Compare commits
134 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
15de250c2c | ||
|
a7b0fcc21e | ||
|
4999fa6263 | ||
|
1f54be66c9 | ||
|
2e1c3a8338 | ||
|
fe934eb7a0 | ||
|
62a0c2f348 | ||
|
cfeef3a9eb | ||
|
52009788ee | ||
|
2acf4d874d | ||
|
f951d7d33f | ||
|
20fe24f348 | ||
|
2e5cc41945 | ||
|
bc76b20e6d | ||
|
3f6befe012 | ||
|
ee201cfb84 | ||
|
66ec12e869 | ||
|
addfc99ff4 | ||
|
91f0faa72d | ||
|
2deebe4d73 | ||
|
5bc57ea004 | ||
|
c63bd05458 | ||
|
b4933f4c74 | ||
|
e7748c603b | ||
|
6a5d782d6c | ||
|
89fad8f566 | ||
|
01cac89867 | ||
|
dfb4729b02 | ||
|
6ab5f823b3 | ||
|
bf1ceab6ed | ||
|
dbaff495c8 | ||
|
f65caa66bf | ||
|
2f455e18ef | ||
|
7b155f384d | ||
|
fd405239d9 | ||
|
8698156e27 | ||
|
2f0f218ad9 | ||
|
3cc75b4bab | ||
|
0a0f8470d7 | ||
|
e46026f1fb | ||
|
fef5089d7e | ||
|
a844bd70da | ||
|
cbc01bd1b9 | ||
|
e096cc1101 | ||
|
0dc559d3cf | ||
|
34f537adad | ||
|
9558628537 | ||
|
940bf7081a | ||
|
38c31c3b4e | ||
|
253272d757 | ||
|
9b1bd8065f | ||
|
b2a5cf57ad | ||
|
33c51ee75d | ||
|
d8aa107fae | ||
|
82e02d0734 | ||
|
dae59bb3e1 | ||
|
e0e7fc8e52 | ||
|
9abc84c583 | ||
|
db6540772c | ||
|
840ebfe625 | ||
|
953282dbf4 | ||
|
788d1199ac | ||
|
f6a8f718a8 | ||
|
f225bf6f01 | ||
|
b5f03a380b | ||
|
140c75f613 | ||
|
6a59f67fdc | ||
|
5943747001 | ||
|
f26f18d29d | ||
|
9b58d72f52 | ||
|
0c885d6a04 | ||
|
70b51c0a5a | ||
|
560660ae11 | ||
|
5436855b67 | ||
|
7351e5ed2c | ||
|
7ee993fadf | ||
|
9bf459e82f | ||
|
545090ff17 | ||
|
bd8074507f | ||
|
cfc7b22c4c | ||
|
5f1c637508 | ||
|
d888e0a6d7 | ||
|
10e808e1bd | ||
|
d9b03ca38b | ||
|
608d75b348 | ||
|
cee3cee4ef | ||
|
09e51323f0 | ||
|
da6f702129 | ||
|
02a83e7c6e | ||
|
83263e1737 | ||
|
1f7ac22b60 | ||
|
747debae56 | ||
|
00b4186469 | ||
|
b087dabf4f | ||
|
e00eb0a069 | ||
|
d4e49ffd06 | ||
|
0f34a190ea | ||
|
df2fb8f5b3 | ||
|
80d2a6046b | ||
|
24273b826f | ||
|
68d2616e35 | ||
|
f506d39339 | ||
|
bc58c9ec7e | ||
|
b57a52cd85 | ||
|
8631be42ac | ||
|
09367369ef | ||
|
23b8c95cc4 | ||
|
e61392b057 | ||
|
7deba20395 | ||
|
274c097f84 | ||
|
1c7cea1af4 | ||
|
4406496d2f | ||
|
a15fa4840c | ||
|
c9030660d6 | ||
|
fdeda769d0 | ||
|
53edd26578 | ||
|
2433cdd6d6 | ||
|
77b34c278e | ||
|
90e993fd9a | ||
|
251054d8c9 | ||
|
d4bb7cec69 | ||
|
a3f6b04345 | ||
|
ba4a5053dd | ||
|
6b47a259c3 | ||
|
c9ec13cf1f | ||
|
906a6ab837 | ||
|
d0e478a9f8 | ||
|
b560b64d33 | ||
|
057af41716 | ||
|
a44b8abd48 | ||
|
8778ecaed5 | ||
|
a02542ada3 | ||
|
ea17c6883f | ||
|
706306645b |
2265
Cargo.lock
generated
2265
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -48,6 +48,7 @@ members = [
|
||||
"sdk",
|
||||
"sdk-c",
|
||||
"scripts",
|
||||
"stake-monitor",
|
||||
"sys-tuner",
|
||||
"transaction-status",
|
||||
"upload-perf",
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-archiver-lib"
|
||||
version = "1.0.11"
|
||||
version = "1.0.22"
|
||||
description = "Solana Archiver Library"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -11,29 +11,32 @@ edition = "2018"
|
||||
[dependencies]
|
||||
bincode = "1.2.1"
|
||||
crossbeam-channel = "0.3"
|
||||
ed25519-dalek = "=1.0.0-pre.1"
|
||||
ed25519-dalek = "=1.0.0-pre.3"
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
rand_chacha = "0.1.1"
|
||||
solana-client = { path = "../client", version = "1.0.11" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.0.11" }
|
||||
rand = "0.7.0"
|
||||
rand_chacha = "0.2.2"
|
||||
solana-client = { path = "../client", version = "1.0.22" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.0.22" }
|
||||
thiserror = "1.0"
|
||||
serde = "1.0.104"
|
||||
serde_json = "1.0.46"
|
||||
serde_derive = "1.0.103"
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.11" }
|
||||
solana-chacha = { path = "../chacha", version = "1.0.11" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.11" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.11" }
|
||||
solana-logger = { path = "../logger", version = "1.0.11" }
|
||||
solana-perf = { path = "../perf", version = "1.0.11" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.11" }
|
||||
solana-core = { path = "../core", version = "1.0.11" }
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.0.11" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.11" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.22" }
|
||||
solana-chacha = { path = "../chacha", version = "1.0.22" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.22" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.22" }
|
||||
solana-logger = { path = "../logger", version = "1.0.22" }
|
||||
solana-perf = { path = "../perf", version = "1.0.22" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.22" }
|
||||
solana-core = { path = "../core", version = "1.0.22" }
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.0.22" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.22" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4.0"
|
||||
|
||||
[lib]
|
||||
name = "solana_archiver_lib"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,7 +1,7 @@
|
||||
use crate::result::ArchiverError;
|
||||
use crossbeam_channel::unbounded;
|
||||
use rand::{thread_rng, Rng, SeedableRng};
|
||||
use rand_chacha::ChaChaRng;
|
||||
use rand::{thread_rng, Rng};
|
||||
use rand_chacha::{rand_core::SeedableRng, ChaChaRng};
|
||||
use solana_archiver_utils::sample_file;
|
||||
use solana_chacha::chacha::{chacha_cbc_encrypt_ledger, CHACHA_BLOCK_SIZE};
|
||||
use solana_client::{
|
||||
@@ -199,7 +199,7 @@ impl Archiver {
|
||||
|
||||
info!("Connecting to the cluster via {:?}", cluster_entrypoint);
|
||||
let (nodes, _) =
|
||||
match solana_core::gossip_service::discover_cluster(&cluster_entrypoint.gossip, 1) {
|
||||
match solana_core::gossip_service::discover_cluster(&cluster_entrypoint.gossip, 2) {
|
||||
Ok(nodes_and_archivers) => nodes_and_archivers,
|
||||
Err(e) => {
|
||||
//shutdown services before exiting
|
||||
|
@@ -1,4 +1,3 @@
|
||||
use serde_json;
|
||||
use solana_client::client_error;
|
||||
use solana_ledger::blockstore;
|
||||
use solana_sdk::transport;
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-archiver-utils"
|
||||
version = "1.0.11"
|
||||
version = "1.0.22"
|
||||
description = "Solana Archiver Utils"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -10,16 +10,19 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
solana-chacha = { path = "../chacha", version = "1.0.11" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.11" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.11" }
|
||||
solana-logger = { path = "../logger", version = "1.0.11" }
|
||||
solana-perf = { path = "../perf", version = "1.0.11" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.11" }
|
||||
rand = "0.7.0"
|
||||
solana-chacha = { path = "../chacha", version = "1.0.22" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.22" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.22" }
|
||||
solana-logger = { path = "../logger", version = "1.0.22" }
|
||||
solana-perf = { path = "../perf", version = "1.0.22" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.22" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4.0"
|
||||
|
||||
[lib]
|
||||
name = "solana_archiver_utils"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-archiver"
|
||||
version = "1.0.11"
|
||||
version = "1.0.22"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -10,11 +10,14 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
console = "0.9.2"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.11" }
|
||||
solana-core = { path = "../core", version = "1.0.11" }
|
||||
solana-logger = { path = "../logger", version = "1.0.11" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.11" }
|
||||
solana-archiver-lib = { path = "../archiver-lib", version = "1.0.11" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.11" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.11" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.22" }
|
||||
solana-core = { path = "../core", version = "1.0.22" }
|
||||
solana-logger = { path = "../logger", version = "1.0.22" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.22" }
|
||||
solana-archiver-lib = { path = "../archiver-lib", version = "1.0.22" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.22" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.22" }
|
||||
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "1.0.11"
|
||||
version = "1.0.22"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -10,11 +10,14 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
log = "0.4.6"
|
||||
rayon = "1.2.0"
|
||||
solana-core = { path = "../core", version = "1.0.11" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.11" }
|
||||
solana-logger = { path = "../logger", version = "1.0.11" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.11" }
|
||||
solana-measure = { path = "../measure", version = "1.0.11" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.11" }
|
||||
rand = "0.6.5"
|
||||
solana-core = { path = "../core", version = "1.0.22" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.22" }
|
||||
solana-logger = { path = "../logger", version = "1.0.22" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.22" }
|
||||
solana-measure = { path = "../measure", version = "1.0.22" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.22" }
|
||||
rand = "0.7.0"
|
||||
crossbeam-channel = "0.3"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -246,7 +246,7 @@ fn main() {
|
||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
||||
assert!(poh_recorder.lock().unwrap().bank().is_some());
|
||||
if bank.slot() > 32 {
|
||||
bank_forks.set_root(root, &None);
|
||||
bank_forks.set_root(root, &None, None);
|
||||
root += 1;
|
||||
}
|
||||
debug!(
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "1.0.11"
|
||||
version = "1.0.22"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -14,21 +14,24 @@ itertools = "0.8.2"
|
||||
log = "0.4.8"
|
||||
num-derive = "0.3"
|
||||
num-traits = "0.2"
|
||||
rand = "0.6.5"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.2.0"
|
||||
serde_json = "1.0.46"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.11" }
|
||||
solana-core = { path = "../core", version = "1.0.11" }
|
||||
solana-genesis = { path = "../genesis", version = "1.0.11" }
|
||||
solana-client = { path = "../client", version = "1.0.11" }
|
||||
solana-faucet = { path = "../faucet", version = "1.0.11" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.0.11" }
|
||||
solana-logger = { path = "../logger", version = "1.0.11" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.11" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.11" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.11" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.11" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.22" }
|
||||
solana-core = { path = "../core", version = "1.0.22" }
|
||||
solana-genesis = { path = "../genesis", version = "1.0.22" }
|
||||
solana-client = { path = "../client", version = "1.0.22" }
|
||||
solana-faucet = { path = "../faucet", version = "1.0.22" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.0.22" }
|
||||
solana-logger = { path = "../logger", version = "1.0.22" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.22" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.22" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.22" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.22" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.0.11" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.0.22" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,14 +2,17 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "1.0.11"
|
||||
version = "1.0.22"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.11" }
|
||||
solana-core = { path = "../core", version = "1.0.11" }
|
||||
solana-logger = { path = "../logger", version = "1.0.11" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.11" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.22" }
|
||||
solana-core = { path = "../core", version = "1.0.22" }
|
||||
solana-logger = { path = "../logger", version = "1.0.22" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.22" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "1.0.11"
|
||||
version = "1.0.22"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -14,24 +14,27 @@ log = "0.4.8"
|
||||
rayon = "1.2.0"
|
||||
serde_json = "1.0.46"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.11" }
|
||||
solana-core = { path = "../core", version = "1.0.11" }
|
||||
solana-genesis = { path = "../genesis", version = "1.0.11" }
|
||||
solana-client = { path = "../client", version = "1.0.11" }
|
||||
solana-faucet = { path = "../faucet", version = "1.0.11" }
|
||||
solana-librapay = { path = "../programs/librapay", version = "1.0.11", optional = true }
|
||||
solana-logger = { path = "../logger", version = "1.0.11" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.11" }
|
||||
solana-measure = { path = "../measure", version = "1.0.11" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.11" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.11" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.11" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader", version = "1.0.11", optional = true }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.22" }
|
||||
solana-core = { path = "../core", version = "1.0.22" }
|
||||
solana-genesis = { path = "../genesis", version = "1.0.22" }
|
||||
solana-client = { path = "../client", version = "1.0.22" }
|
||||
solana-faucet = { path = "../faucet", version = "1.0.22" }
|
||||
#solana-librapay = { path = "../programs/librapay", version = "1.0.20", optional = true }
|
||||
solana-logger = { path = "../logger", version = "1.0.22" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.22" }
|
||||
solana-measure = { path = "../measure", version = "1.0.22" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.22" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.22" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.22" }
|
||||
#solana-move-loader-program = { path = "../programs/move_loader", version = "1.0.20", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.3.2"
|
||||
serial_test_derive = "0.4.0"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.0.11" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.0.22" }
|
||||
|
||||
[features]
|
||||
move = ["solana-librapay", "solana-move-loader-program"]
|
||||
#[features]
|
||||
#move = ["solana-librapay", "solana-move-loader-program"]
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha-cuda"
|
||||
version = "1.0.11"
|
||||
version = "1.0.22"
|
||||
description = "Solana Chacha Cuda APIs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -10,15 +10,18 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.0.11" }
|
||||
solana-chacha = { path = "../chacha", version = "1.0.11" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.11" }
|
||||
solana-logger = { path = "../logger", version = "1.0.11" }
|
||||
solana-perf = { path = "../perf", version = "1.0.11" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.11" }
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.0.22" }
|
||||
solana-chacha = { path = "../chacha", version = "1.0.22" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.22" }
|
||||
solana-logger = { path = "../logger", version = "1.0.22" }
|
||||
solana-perf = { path = "../perf", version = "1.0.22" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.22" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex-literal = "0.2.1"
|
||||
|
||||
[lib]
|
||||
name = "solana_chacha_cuda"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha-sys"
|
||||
version = "1.0.11"
|
||||
version = "1.0.22"
|
||||
description = "Solana chacha-sys"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -10,3 +10,6 @@ edition = "2018"
|
||||
|
||||
[build-dependencies]
|
||||
cc = "1.0.49"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha"
|
||||
version = "1.0.11"
|
||||
version = "1.0.22"
|
||||
description = "Solana Chacha APIs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -10,16 +10,19 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
rand_chacha = "0.1.1"
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.11" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.11" }
|
||||
solana-logger = { path = "../logger", version = "1.0.11" }
|
||||
solana-perf = { path = "../perf", version = "1.0.11" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.11" }
|
||||
rand = "0.7.0"
|
||||
rand_chacha = "0.2.2"
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.22" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.22" }
|
||||
solana-logger = { path = "../logger", version = "1.0.22" }
|
||||
solana-perf = { path = "../perf", version = "1.0.22" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.22" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex-literal = "0.2.1"
|
||||
|
||||
[lib]
|
||||
name = "solana_chacha"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,6 +2,16 @@
|
||||
# Build steps that run after the primary pipeline on pushes and tags.
|
||||
# Pull requests to not run these steps.
|
||||
steps:
|
||||
- command: "ci/publish-tarball.sh"
|
||||
timeout_in_minutes: 60
|
||||
name: "publish tarball"
|
||||
- command: "ci/publish-docs.sh"
|
||||
timeout_in_minutes: 15
|
||||
name: "publish docs"
|
||||
- command: "ci/publish-bpf-sdk.sh"
|
||||
timeout_in_minutes: 5
|
||||
name: "publish bpf sdk"
|
||||
- wait
|
||||
- command: "sdk/docker-solana/build.sh"
|
||||
timeout_in_minutes: 60
|
||||
name: "publish docker"
|
||||
@@ -9,12 +19,6 @@ steps:
|
||||
timeout_in_minutes: 240
|
||||
name: "publish crate"
|
||||
branches: "!master"
|
||||
- command: "ci/publish-bpf-sdk.sh"
|
||||
timeout_in_minutes: 5
|
||||
name: "publish bpf sdk"
|
||||
- command: "ci/publish-tarball.sh"
|
||||
timeout_in_minutes: 60
|
||||
name: "publish tarball"
|
||||
- command: "ci/publish-docs.sh"
|
||||
timeout_in_minutes: 15
|
||||
name: "publish docs"
|
||||
# - command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-move.sh"
|
||||
# name: "move"
|
||||
# timeout_in_minutes: 20
|
||||
|
26
ci/buildkite-tests.yml
Normal file
26
ci/buildkite-tests.yml
Normal file
@@ -0,0 +1,26 @@
|
||||
# These steps are conditionally triggered by ci/buildkite.yml when files
|
||||
# other than those in docs/ are modified
|
||||
|
||||
steps:
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
|
||||
name: "coverage"
|
||||
timeout_in_minutes: 30
|
||||
- wait
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable.sh"
|
||||
name: "stable"
|
||||
timeout_in_minutes: 60
|
||||
artifact_paths: "log-*.txt"
|
||||
- wait
|
||||
- command: "ci/test-stable-perf.sh"
|
||||
name: "stable-perf"
|
||||
timeout_in_minutes: 40
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=cuda"
|
||||
- command: "ci/test-bench.sh"
|
||||
name: "bench"
|
||||
timeout_in_minutes: 30
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-local-cluster.sh"
|
||||
name: "local-cluster"
|
||||
timeout_in_minutes: 45
|
||||
artifact_paths: "log-*.txt"
|
@@ -1,42 +1,25 @@
|
||||
# Build steps that run on pushes and pull requests.
|
||||
# If files other than those in docs/ were modified, this will be followed up by
|
||||
# ci/buildkite-tests.yml
|
||||
#
|
||||
# Release tags use buildkite-release.yml instead
|
||||
|
||||
steps:
|
||||
- command: "ci/shellcheck.sh"
|
||||
name: "shellcheck"
|
||||
timeout_in_minutes: 5
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-checks.sh"
|
||||
name: "checks"
|
||||
timeout_in_minutes: 20
|
||||
- command: "ci/shellcheck.sh"
|
||||
name: "shellcheck"
|
||||
timeout_in_minutes: 5
|
||||
|
||||
- wait
|
||||
- command: "ci/test-stable-perf.sh"
|
||||
name: "stable-perf"
|
||||
timeout_in_minutes: 40
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=cuda"
|
||||
- command: "ci/test-bench.sh"
|
||||
name: "bench"
|
||||
timeout_in_minutes: 30
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable.sh"
|
||||
name: "stable"
|
||||
timeout_in_minutes: 60
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=rpc-test-capable"
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-move.sh"
|
||||
name: "move"
|
||||
timeout_in_minutes: 20
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-local-cluster.sh"
|
||||
name: "local-cluster"
|
||||
timeout_in_minutes: 45
|
||||
artifact_paths: "log-*.txt"
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
|
||||
name: "coverage"
|
||||
timeout_in_minutes: 30
|
||||
agents:
|
||||
- "queue=rpc-test-capable"
|
||||
|
||||
- command: "ci/maybe-trigger-tests.sh"
|
||||
name: "maybe-trigger-tests"
|
||||
timeout_in_minutes: 2
|
||||
|
||||
- wait
|
||||
|
||||
- trigger: "solana-secondary"
|
||||
branches: "!pull/*"
|
||||
async: true
|
||||
|
@@ -49,7 +49,7 @@ else
|
||||
# ~/.cargo
|
||||
ARGS+=(--volume "$PWD:/home")
|
||||
fi
|
||||
ARGS+=(--env "CARGO_HOME=/home/.cargo")
|
||||
ARGS+=(--env "HOME=/home" --env "CARGO_HOME=/home/.cargo")
|
||||
|
||||
# kcov tries to set the personality of the binary which docker
|
||||
# doesn't allow by default.
|
||||
|
@@ -1,4 +1,4 @@
|
||||
FROM solanalabs/rust:1.42.0
|
||||
FROM solanalabs/rust:1.43.0
|
||||
ARG date
|
||||
|
||||
RUN set -x \
|
||||
|
@@ -1,6 +1,6 @@
|
||||
# Note: when the rust version is changed also modify
|
||||
# ci/rust-version.sh to pick up the new image tag
|
||||
FROM rust:1.42.0
|
||||
FROM rust:1.43.0
|
||||
|
||||
# Add Google Protocol Buffers for Libra's metrics library.
|
||||
ENV PROTOC_VERSION 3.8.0
|
||||
|
21
ci/maybe-trigger-tests.sh
Executable file
21
ci/maybe-trigger-tests.sh
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
annotate() {
|
||||
${BUILDKITE:-false} && {
|
||||
buildkite-agent annotate "$@"
|
||||
}
|
||||
}
|
||||
|
||||
# Skip if only the docs have been modified
|
||||
ci/affects-files.sh \
|
||||
\!^docs/ \
|
||||
|| {
|
||||
annotate --style info \
|
||||
"Skipping all further tests as only docs/ files were modified"
|
||||
exit 0
|
||||
}
|
||||
|
||||
annotate --style info "Triggering tests"
|
||||
buildkite-agent pipeline upload ci/buildkite-tests.yml
|
@@ -71,7 +71,7 @@ echo --- Creating release tarball
|
||||
export CHANNEL
|
||||
|
||||
source ci/rust-version.sh stable
|
||||
scripts/cargo-install-all.sh +"$rust_stable" --use-move solana-release
|
||||
scripts/cargo-install-all.sh +"$rust_stable" solana-release
|
||||
|
||||
tar cvf solana-release-$TARGET.tar solana-release
|
||||
bzip2 solana-release-$TARGET.tar
|
||||
@@ -95,9 +95,8 @@ fi
|
||||
source ci/upload-ci-artifact.sh
|
||||
|
||||
for file in solana-release-$TARGET.tar.bz2 solana-release-$TARGET.yml solana-install-init-"$TARGET"* $MAYBE_TARBALLS; do
|
||||
upload-ci-artifact "$file"
|
||||
|
||||
if [[ -n $DO_NOT_PUBLISH_TAR ]]; then
|
||||
upload-ci-artifact "$file"
|
||||
echo "Skipped $file due to DO_NOT_PUBLISH_TAR"
|
||||
continue
|
||||
fi
|
||||
|
@@ -1,28 +1,30 @@
|
||||
#
|
||||
# This file maintains the rust versions for use by CI.
|
||||
#
|
||||
# Build with stable rust, updating the stable toolchain if necessary:
|
||||
# $ source ci/rust-version.sh stable
|
||||
# $ cargo +"$rust_stable" build
|
||||
#
|
||||
# Build with nightly rust, updating the nightly toolchain if necessary:
|
||||
# $ source ci/rust-version.sh nightly
|
||||
# $ cargo +"$rust_nightly" build
|
||||
#
|
||||
# Obtain the environment variables without any automatic toolchain updating:
|
||||
# $ source ci/rust-version.sh
|
||||
#
|
||||
# Obtain the environment variables updating both stable and nightly, only stable, or
|
||||
# only nightly:
|
||||
# $ source ci/rust-version.sh all
|
||||
# $ source ci/rust-version.sh stable
|
||||
# $ source ci/rust-version.sh nightly
|
||||
|
||||
# Then to build with either stable or nightly:
|
||||
# $ cargo +"$rust_stable" build
|
||||
# $ cargo +"$rust_nightly" build
|
||||
#
|
||||
|
||||
if [[ -n $RUST_STABLE_VERSION ]]; then
|
||||
stable_version="$RUST_STABLE_VERSION"
|
||||
else
|
||||
stable_version=1.42.0
|
||||
stable_version=1.43.0
|
||||
fi
|
||||
|
||||
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
|
||||
nightly_version="$RUST_NIGHTLY_VERSION"
|
||||
else
|
||||
nightly_version=2020-03-12
|
||||
nightly_version=2020-04-23
|
||||
fi
|
||||
|
||||
|
||||
@@ -51,6 +53,10 @@ export rust_nightly_docker_image=solanalabs/rust-nightly:"$nightly_version"
|
||||
nightly)
|
||||
rustup_install "$rust_nightly"
|
||||
;;
|
||||
all)
|
||||
rustup_install "$rust_stable"
|
||||
rustup_install "$rust_nightly"
|
||||
;;
|
||||
*)
|
||||
echo "Note: ignoring unknown argument: $1"
|
||||
;;
|
||||
|
@@ -25,7 +25,7 @@ source ci/_
|
||||
source ci/upload-ci-artifact.sh
|
||||
|
||||
eval "$(ci/channel-info.sh)"
|
||||
source ci/rust-version.sh nightly
|
||||
source ci/rust-version.sh all
|
||||
|
||||
set -o pipefail
|
||||
export RUST_BACKTRACE=1
|
||||
|
@@ -22,7 +22,7 @@ _ cargo +"$rust_stable" clippy --all --exclude solana-sdk-c -- --deny=warnings
|
||||
_ cargo +"$rust_stable" clippy --manifest-path sdk-c/Cargo.toml -- --deny=warnings
|
||||
|
||||
_ cargo +"$rust_stable" audit --version
|
||||
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2020-0002
|
||||
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2020-0002 --ignore RUSTSEC-2020-0008
|
||||
_ ci/nits.sh
|
||||
_ ci/order-crates-for-publishing.py
|
||||
_ docs/build.sh
|
||||
|
@@ -38,11 +38,16 @@ test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
|
||||
# Clear the BPF sysroot files, they are not automatically rebuilt
|
||||
rm -rf target/xargo # Issue #3105
|
||||
|
||||
# Limit compiler jobs to reduce memory usage
|
||||
# on machines with 1gb/thread of memory
|
||||
NPROC=$(nproc)
|
||||
NPROC=$((NPROC>16 ? 16 : NPROC))
|
||||
|
||||
echo "Executing $testName"
|
||||
case $testName in
|
||||
test-stable)
|
||||
_ cargo +"$rust_stable" test --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
|
||||
_ cargo +"$rust_stable" test --manifest-path bench-tps/Cargo.toml --features=move ${V:+--verbose} test_bench_tps_local_cluster_move -- --nocapture
|
||||
_ cargo +"$rust_stable" test --jobs "$NPROC" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
|
||||
#_ cargo +"$rust_stable" test --manifest-path bench-tps/Cargo.toml --features=move ${V:+--verbose} test_bench_tps_local_cluster_move -- --nocapture
|
||||
;;
|
||||
test-stable-perf)
|
||||
ci/affects-files.sh \
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "1.0.11"
|
||||
version = "1.0.22"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -11,8 +11,8 @@ edition = "2018"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
rpassword = "4.0"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.0.11" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.11" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.0.22" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.22" }
|
||||
thiserror = "1.0.11"
|
||||
tiny-bip39 = "0.7.0"
|
||||
url = "2.1.0"
|
||||
@@ -20,3 +20,6 @@ chrono = "0.4"
|
||||
|
||||
[lib]
|
||||
name = "solana_clap_utils"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
18
clap-utils/src/commitment.rs
Normal file
18
clap-utils/src/commitment.rs
Normal file
@@ -0,0 +1,18 @@
|
||||
use crate::ArgConstant;
|
||||
use clap::Arg;
|
||||
|
||||
pub const COMMITMENT_ARG: ArgConstant<'static> = ArgConstant {
|
||||
name: "commitment",
|
||||
long: "commitment",
|
||||
help: "Return information at the selected commitment level",
|
||||
};
|
||||
|
||||
pub fn commitment_arg<'a, 'b>() -> Arg<'a, 'b> {
|
||||
Arg::with_name(COMMITMENT_ARG.name)
|
||||
.long(COMMITMENT_ARG.long)
|
||||
.takes_value(true)
|
||||
.possible_values(&["recent", "root", "max"])
|
||||
.default_value("recent")
|
||||
.value_name("COMMITMENT_LEVEL")
|
||||
.help(COMMITMENT_ARG.help)
|
||||
}
|
@@ -7,6 +7,7 @@ use clap::ArgMatches;
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
clock::UnixTimestamp,
|
||||
commitment_config::CommitmentConfig,
|
||||
native_token::sol_to_lamports,
|
||||
pubkey::Pubkey,
|
||||
signature::{read_keypair_file, Keypair, Signature, Signer},
|
||||
@@ -162,6 +163,15 @@ pub fn lamports_of_sol(matches: &ArgMatches<'_>, name: &str) -> Option<u64> {
|
||||
value_of(matches, name).map(sol_to_lamports)
|
||||
}
|
||||
|
||||
pub fn commitment_of(matches: &ArgMatches<'_>, name: &str) -> Option<CommitmentConfig> {
|
||||
matches.value_of(name).map(|value| match value {
|
||||
"max" => CommitmentConfig::max(),
|
||||
"recent" => CommitmentConfig::recent(),
|
||||
"root" => CommitmentConfig::root(),
|
||||
_ => CommitmentConfig::default(),
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
@@ -42,6 +42,7 @@ impl std::fmt::Debug for DisplayError {
|
||||
}
|
||||
}
|
||||
|
||||
pub mod commitment;
|
||||
pub mod input_parsers;
|
||||
pub mod input_validators;
|
||||
pub mod keypair;
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli-config"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.0.11"
|
||||
version = "1.0.22"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -15,3 +15,6 @@ serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
serde_yaml = "0.8.11"
|
||||
url = "2.1.1"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,10 +1,6 @@
|
||||
// Wallet settings that can be configured for long-term use
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use std::{
|
||||
fs::{create_dir_all, File},
|
||||
io::{self, Write},
|
||||
path::Path,
|
||||
};
|
||||
use std::io;
|
||||
use url::Url;
|
||||
|
||||
lazy_static! {
|
||||
@@ -46,23 +42,11 @@ impl Default for Config {
|
||||
|
||||
impl Config {
|
||||
pub fn load(config_file: &str) -> Result<Self, io::Error> {
|
||||
let file = File::open(config_file.to_string())?;
|
||||
let config = serde_yaml::from_reader(file)
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, format!("{:?}", err)))?;
|
||||
Ok(config)
|
||||
crate::load_config_file(config_file)
|
||||
}
|
||||
|
||||
pub fn save(&self, config_file: &str) -> Result<(), io::Error> {
|
||||
let serialized = serde_yaml::to_string(self)
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, format!("{:?}", err)))?;
|
||||
|
||||
if let Some(outdir) = Path::new(&config_file).parent() {
|
||||
create_dir_all(outdir)?;
|
||||
}
|
||||
let mut file = File::create(config_file)?;
|
||||
file.write_all(&serialized.into_bytes())?;
|
||||
|
||||
Ok(())
|
||||
crate::save_config_file(self, config_file)
|
||||
}
|
||||
|
||||
pub fn compute_websocket_url(json_rpc_url: &str) -> String {
|
||||
@@ -76,17 +60,38 @@ impl Config {
|
||||
ws_url
|
||||
.set_scheme(if is_secure { "wss" } else { "ws" })
|
||||
.expect("unable to set scheme");
|
||||
let ws_port = match json_rpc_url.port() {
|
||||
Some(port) => port + 1,
|
||||
None => {
|
||||
if is_secure {
|
||||
8901
|
||||
} else {
|
||||
8900
|
||||
}
|
||||
}
|
||||
};
|
||||
ws_url.set_port(Some(ws_port)).expect("unable to set port");
|
||||
if let Some(port) = json_rpc_url.port() {
|
||||
ws_url.set_port(Some(port + 1)).expect("unable to set port");
|
||||
}
|
||||
ws_url.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn compute_websocket_url() {
|
||||
assert_eq!(
|
||||
Config::compute_websocket_url(&"http://devnet.solana.com"),
|
||||
"ws://devnet.solana.com/".to_string()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
Config::compute_websocket_url(&"https://devnet.solana.com"),
|
||||
"wss://devnet.solana.com/".to_string()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
Config::compute_websocket_url(&"http://example.com:8899"),
|
||||
"ws://example.com:8900/".to_string()
|
||||
);
|
||||
assert_eq!(
|
||||
Config::compute_websocket_url(&"https://example.com:1234"),
|
||||
"wss://example.com:1235/".to_string()
|
||||
);
|
||||
|
||||
assert_eq!(Config::compute_websocket_url(&"garbage"), String::new());
|
||||
}
|
||||
}
|
||||
|
@@ -3,3 +3,37 @@ extern crate lazy_static;
|
||||
|
||||
mod config;
|
||||
pub use config::{Config, CONFIG_FILE};
|
||||
|
||||
use std::{
|
||||
fs::{create_dir_all, File},
|
||||
io::{self, Write},
|
||||
path::Path,
|
||||
};
|
||||
|
||||
pub fn load_config_file<T, P>(config_file: P) -> Result<T, io::Error>
|
||||
where
|
||||
T: serde::de::DeserializeOwned,
|
||||
P: AsRef<Path>,
|
||||
{
|
||||
let file = File::open(config_file)?;
|
||||
let config = serde_yaml::from_reader(file)
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, format!("{:?}", err)))?;
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
pub fn save_config_file<T, P>(config: &T, config_file: P) -> Result<(), io::Error>
|
||||
where
|
||||
T: serde::ser::Serialize,
|
||||
P: AsRef<Path>,
|
||||
{
|
||||
let serialized = serde_yaml::to_string(config)
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, format!("{:?}", err)))?;
|
||||
|
||||
if let Some(outdir) = config_file.as_ref().parent() {
|
||||
create_dir_all(outdir)?;
|
||||
}
|
||||
let mut file = File::create(config_file)?;
|
||||
file.write_all(&serialized.into_bytes())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.0.11"
|
||||
version = "1.0.22"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -26,30 +26,34 @@ reqwest = { version = "0.10.1", default-features = false, features = ["blocking"
|
||||
serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.46"
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.0.11" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.11" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.0.11" }
|
||||
solana-client = { path = "../client", version = "1.0.11" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.0.11" }
|
||||
solana-faucet = { path = "../faucet", version = "1.0.11" }
|
||||
solana-logger = { path = "../logger", version = "1.0.11" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.11" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.0.11" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.11" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.11" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.0.11" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.0.11" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.0.11" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.0.11" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.0.22" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.22" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.0.22" }
|
||||
solana-client = { path = "../client", version = "1.0.22" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.0.22" }
|
||||
solana-faucet = { path = "../faucet", version = "1.0.22" }
|
||||
solana-logger = { path = "../logger", version = "1.0.22" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.22" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.0.22" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.22" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.22" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.0.22" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.0.22" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.0.22" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.0.22" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.0.22" }
|
||||
titlecase = "1.1.0"
|
||||
thiserror = "1.0.11"
|
||||
url = "2.1.1"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "1.0.11" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.0.11" }
|
||||
solana-core = { path = "../core", version = "1.0.22" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.0.22" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
name = "solana"
|
||||
path = "src/main.rs"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
108
cli/src/cli.rs
108
cli/src/cli.rs
@@ -188,6 +188,9 @@ pub enum CliCommand {
|
||||
commitment_config: CommitmentConfig,
|
||||
},
|
||||
GetGenesisHash,
|
||||
GetEpoch {
|
||||
commitment_config: CommitmentConfig,
|
||||
},
|
||||
GetSlot {
|
||||
commitment_config: CommitmentConfig,
|
||||
},
|
||||
@@ -402,6 +405,7 @@ pub enum CliCommand {
|
||||
to: Pubkey,
|
||||
from: SignerIndex,
|
||||
sign_only: bool,
|
||||
no_wait: bool,
|
||||
blockhash_query: BlockhashQuery,
|
||||
nonce_account: Option<Pubkey>,
|
||||
nonce_authority: SignerIndex,
|
||||
@@ -534,7 +538,7 @@ impl CliConfig<'_> {
|
||||
if !self.signers.is_empty() {
|
||||
self.signers[0].try_pubkey()
|
||||
} else {
|
||||
Err(SignerError::CustomError(
|
||||
Err(SignerError::Custom(
|
||||
"Default keypair must be set if pubkey arg not provided".to_string(),
|
||||
))
|
||||
}
|
||||
@@ -583,6 +587,7 @@ pub fn parse_command(
|
||||
command: CliCommand::GetGenesisHash,
|
||||
signers: vec![],
|
||||
}),
|
||||
("epoch", Some(matches)) => parse_get_epoch(matches),
|
||||
("slot", Some(matches)) => parse_get_slot(matches),
|
||||
("transaction-count", Some(matches)) => parse_get_transaction_count(matches),
|
||||
("leader-schedule", Some(_matches)) => Ok(CliCommandInfo {
|
||||
@@ -898,6 +903,7 @@ pub fn parse_command(
|
||||
let lamports = lamports_of_sol(matches, "amount").unwrap();
|
||||
let to = pubkey_of_signer(matches, "to", wallet_manager)?.unwrap();
|
||||
let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
|
||||
let no_wait = matches.is_present("no_wait");
|
||||
let blockhash_query = BlockhashQuery::new_from_matches(matches);
|
||||
let nonce_account = pubkey_of_signer(matches, NONCE_ARG.name, wallet_manager)?;
|
||||
let (nonce_authority, nonce_authority_pubkey) =
|
||||
@@ -923,6 +929,7 @@ pub fn parse_command(
|
||||
lamports,
|
||||
to,
|
||||
sign_only,
|
||||
no_wait,
|
||||
blockhash_query,
|
||||
nonce_account,
|
||||
nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(),
|
||||
@@ -1148,12 +1155,48 @@ fn process_balance(
|
||||
}
|
||||
}
|
||||
|
||||
fn process_confirm(rpc_client: &RpcClient, signature: &Signature) -> ProcessResult {
|
||||
match rpc_client.get_signature_status(&signature.to_string()) {
|
||||
fn process_confirm(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
signature: &Signature,
|
||||
) -> ProcessResult {
|
||||
match rpc_client.get_signature_status_with_commitment_and_history(
|
||||
&signature,
|
||||
CommitmentConfig::max(),
|
||||
true,
|
||||
) {
|
||||
Ok(status) => {
|
||||
if let Some(result) = status {
|
||||
match result {
|
||||
Ok(_) => Ok("Confirmed".to_string()),
|
||||
Ok(_) => {
|
||||
if config.verbose {
|
||||
match rpc_client.get_confirmed_transaction(
|
||||
signature,
|
||||
solana_transaction_status::TransactionEncoding::Binary,
|
||||
) {
|
||||
Ok(confirmed_transaction) => {
|
||||
println!("\nTransaction:");
|
||||
crate::display::println_transaction(
|
||||
&confirmed_transaction
|
||||
.transaction
|
||||
.transaction
|
||||
.decode()
|
||||
.expect("Successful decode"),
|
||||
&confirmed_transaction.transaction.meta,
|
||||
" ",
|
||||
);
|
||||
println!();
|
||||
Ok(format!("Confirmed in slot {}", confirmed_transaction.slot))
|
||||
}
|
||||
Err(err) => Ok(format!(
|
||||
"Confirmed. Unable to get confirmed transaction details: {}",
|
||||
err
|
||||
)),
|
||||
}
|
||||
} else {
|
||||
Ok("Confirmed".to_string())
|
||||
}
|
||||
}
|
||||
Err(err) => Ok(format!("Transaction failed with error: {}", err)),
|
||||
}
|
||||
} else {
|
||||
@@ -1181,6 +1224,7 @@ fn process_show_account(
|
||||
);
|
||||
println_name_value("Owner:", &account.owner.to_string());
|
||||
println_name_value("Executable:", &account.executable.to_string());
|
||||
println_name_value("Rent Epoch:", &account.rent_epoch.to_string());
|
||||
|
||||
if let Some(output_file) = output_file {
|
||||
let mut f = File::create(output_file)?;
|
||||
@@ -1483,6 +1527,7 @@ fn process_transfer(
|
||||
to: &Pubkey,
|
||||
from: SignerIndex,
|
||||
sign_only: bool,
|
||||
no_wait: bool,
|
||||
blockhash_query: &BlockhashQuery,
|
||||
nonce_account: Option<&Pubkey>,
|
||||
nonce_authority: SignerIndex,
|
||||
@@ -1529,7 +1574,11 @@ fn process_transfer(
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
|
||||
let result = if no_wait {
|
||||
rpc_client.send_transaction(&tx)
|
||||
} else {
|
||||
rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers)
|
||||
};
|
||||
log_instruction_custom_error::<SystemError>(result)
|
||||
}
|
||||
}
|
||||
@@ -1597,6 +1646,9 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
CliCommand::GetEpochInfo { commitment_config } => {
|
||||
process_get_epoch_info(&rpc_client, *commitment_config)
|
||||
}
|
||||
CliCommand::GetEpoch { commitment_config } => {
|
||||
process_get_epoch(&rpc_client, *commitment_config)
|
||||
}
|
||||
CliCommand::GetSlot { commitment_config } => {
|
||||
process_get_slot(&rpc_client, *commitment_config)
|
||||
}
|
||||
@@ -2018,7 +2070,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
// Cancel a contract by contract Pubkey
|
||||
CliCommand::Cancel(pubkey) => process_cancel(&rpc_client, config, &pubkey),
|
||||
// Confirm the last client transaction by signature
|
||||
CliCommand::Confirm(signature) => process_confirm(&rpc_client, signature),
|
||||
CliCommand::Confirm(signature) => process_confirm(&rpc_client, config, signature),
|
||||
// If client has positive balance, pay lamports to another address
|
||||
CliCommand::Pay(PayCommand {
|
||||
lamports,
|
||||
@@ -2072,6 +2124,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
to,
|
||||
from,
|
||||
sign_only,
|
||||
no_wait,
|
||||
ref blockhash_query,
|
||||
ref nonce_account,
|
||||
nonce_authority,
|
||||
@@ -2083,6 +2136,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
to,
|
||||
*from,
|
||||
*sign_only,
|
||||
*no_wait,
|
||||
blockhash_query,
|
||||
nonce_account.as_ref(),
|
||||
*nonce_authority,
|
||||
@@ -2157,7 +2211,7 @@ pub fn request_and_confirm_airdrop(
|
||||
log_instruction_custom_error::<SystemError>(result)
|
||||
}
|
||||
|
||||
pub fn log_instruction_custom_error<E>(result: ClientResult<String>) -> ProcessResult
|
||||
pub fn log_instruction_custom_error<E>(result: ClientResult<Signature>) -> ProcessResult
|
||||
where
|
||||
E: 'static + std::error::Error + DecodeError<E> + FromPrimitive,
|
||||
{
|
||||
@@ -2165,7 +2219,7 @@ where
|
||||
Err(err) => {
|
||||
if let ClientErrorKind::TransactionError(TransactionError::InstructionError(
|
||||
_,
|
||||
InstructionError::CustomError(code),
|
||||
InstructionError::Custom(code),
|
||||
)) = err.kind()
|
||||
{
|
||||
if let Some(specific_error) = E::decode_custom_error_to_enum(*code) {
|
||||
@@ -2174,7 +2228,7 @@ where
|
||||
}
|
||||
Err(err.into())
|
||||
}
|
||||
Ok(sig) => Ok(sig),
|
||||
Ok(sig) => Ok(sig.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2489,6 +2543,12 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
|
||||
.validator(is_valid_signer)
|
||||
.help("Source account of funds (if different from client local account)"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("no_wait")
|
||||
.long("no-wait")
|
||||
.takes_value(false)
|
||||
.help("Return signature immediately after submitting the transaction, instead of waiting for confirmations"),
|
||||
)
|
||||
.offline_args()
|
||||
.arg(nonce_arg())
|
||||
.arg(nonce_authority_arg())
|
||||
@@ -3549,6 +3609,33 @@ mod tests {
|
||||
to: to_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
},
|
||||
signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()],
|
||||
}
|
||||
);
|
||||
|
||||
// Test Transfer no-wait
|
||||
let test_transfer = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"transfer",
|
||||
"--no-wait",
|
||||
&to_string,
|
||||
"42",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_transfer, &default_keypair_file, None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Transfer {
|
||||
lamports: 42_000_000_000,
|
||||
to: to_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
no_wait: true,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
@@ -3578,6 +3665,7 @@ mod tests {
|
||||
to: to_pubkey,
|
||||
from: 0,
|
||||
sign_only: true,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::None(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
@@ -3612,6 +3700,7 @@ mod tests {
|
||||
to: to_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::Cluster,
|
||||
blockhash
|
||||
@@ -3650,6 +3739,7 @@ mod tests {
|
||||
to: to_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::NonceAccount(nonce_address),
|
||||
blockhash
|
||||
|
@@ -8,7 +8,12 @@ use crate::{
|
||||
use clap::{value_t, value_t_or_exit, App, Arg, ArgMatches, SubCommand};
|
||||
use console::{style, Emoji};
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
use solana_clap_utils::{input_parsers::*, input_validators::*, keypair::signer_from_path};
|
||||
use solana_clap_utils::{
|
||||
commitment::{commitment_arg, COMMITMENT_ARG},
|
||||
input_parsers::*,
|
||||
input_validators::*,
|
||||
keypair::signer_from_path,
|
||||
};
|
||||
use solana_client::{
|
||||
pubsub_client::{PubsubClient, SlotInfoMessage},
|
||||
rpc_client::RpcClient,
|
||||
@@ -91,14 +96,7 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
SubCommand::with_name("epoch-info")
|
||||
.about("Get information about the current epoch")
|
||||
.alias("get-epoch-info")
|
||||
.arg(
|
||||
Arg::with_name("confirmed")
|
||||
.long("confirmed")
|
||||
.takes_value(false)
|
||||
.help(
|
||||
"Return information at maximum-lockout commitment level",
|
||||
),
|
||||
),
|
||||
.arg(commitment_arg()),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("genesis-hash")
|
||||
@@ -108,26 +106,16 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
.subcommand(
|
||||
SubCommand::with_name("slot").about("Get current slot")
|
||||
.alias("get-slot")
|
||||
.arg(
|
||||
Arg::with_name("confirmed")
|
||||
.long("confirmed")
|
||||
.takes_value(false)
|
||||
.help(
|
||||
"Return slot at maximum-lockout commitment level",
|
||||
),
|
||||
),
|
||||
.arg(commitment_arg()),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("epoch").about("Get current epoch")
|
||||
.arg(commitment_arg()),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("transaction-count").about("Get current transaction count")
|
||||
.alias("get-transaction-count")
|
||||
.arg(
|
||||
Arg::with_name("confirmed")
|
||||
.long("confirmed")
|
||||
.takes_value(false)
|
||||
.help(
|
||||
"Return count at maximum-lockout commitment level",
|
||||
),
|
||||
),
|
||||
.arg(commitment_arg()),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("ping")
|
||||
@@ -167,14 +155,7 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
.default_value("15")
|
||||
.help("Wait up to timeout seconds for transaction confirmation"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("confirmed")
|
||||
.long("confirmed")
|
||||
.takes_value(false)
|
||||
.help(
|
||||
"Wait until the transaction is confirmed at maximum-lockout commitment level",
|
||||
),
|
||||
),
|
||||
.arg(commitment_arg()),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("live-slots")
|
||||
@@ -225,20 +206,13 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
SubCommand::with_name("validators")
|
||||
.about("Show summary information about the current validators")
|
||||
.alias("show-validators")
|
||||
.arg(
|
||||
Arg::with_name("confirmed")
|
||||
.long("confirmed")
|
||||
.takes_value(false)
|
||||
.help(
|
||||
"Return information at maximum-lockout commitment level",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("lamports")
|
||||
.long("lamports")
|
||||
.takes_value(false)
|
||||
.help("Display balance in lamports instead of SOL"),
|
||||
),
|
||||
)
|
||||
.arg(commitment_arg()),
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -271,11 +245,7 @@ pub fn parse_cluster_ping(
|
||||
None
|
||||
};
|
||||
let timeout = Duration::from_secs(value_t_or_exit!(matches, "timeout", u64));
|
||||
let commitment_config = if matches.is_present("confirmed") {
|
||||
CommitmentConfig::default()
|
||||
} else {
|
||||
CommitmentConfig::recent()
|
||||
};
|
||||
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::Ping {
|
||||
lamports,
|
||||
@@ -302,11 +272,7 @@ pub fn parse_get_block_time(matches: &ArgMatches<'_>) -> Result<CliCommandInfo,
|
||||
}
|
||||
|
||||
pub fn parse_get_epoch_info(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let commitment_config = if matches.is_present("confirmed") {
|
||||
CommitmentConfig::default()
|
||||
} else {
|
||||
CommitmentConfig::recent()
|
||||
};
|
||||
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::GetEpochInfo { commitment_config },
|
||||
signers: vec![],
|
||||
@@ -314,23 +280,23 @@ pub fn parse_get_epoch_info(matches: &ArgMatches<'_>) -> Result<CliCommandInfo,
|
||||
}
|
||||
|
||||
pub fn parse_get_slot(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let commitment_config = if matches.is_present("confirmed") {
|
||||
CommitmentConfig::default()
|
||||
} else {
|
||||
CommitmentConfig::recent()
|
||||
};
|
||||
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::GetSlot { commitment_config },
|
||||
signers: vec![],
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_get_epoch(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::GetEpoch { commitment_config },
|
||||
signers: vec![],
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_get_transaction_count(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let commitment_config = if matches.is_present("confirmed") {
|
||||
CommitmentConfig::default()
|
||||
} else {
|
||||
CommitmentConfig::recent()
|
||||
};
|
||||
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::GetTransactionCount { commitment_config },
|
||||
signers: vec![],
|
||||
@@ -356,11 +322,7 @@ pub fn parse_show_stakes(
|
||||
|
||||
pub fn parse_show_validators(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let use_lamports_unit = matches.is_present("lamports");
|
||||
let commitment_config = if matches.is_present("confirmed") {
|
||||
CommitmentConfig::default()
|
||||
} else {
|
||||
CommitmentConfig::recent()
|
||||
};
|
||||
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::ShowValidators {
|
||||
@@ -431,25 +393,35 @@ pub fn process_catchup(
|
||||
}
|
||||
|
||||
let slot_distance = rpc_slot as i64 - node_slot as i64;
|
||||
let slots_per_second =
|
||||
(previous_slot_distance - slot_distance) as f64 / f64::from(sleep_interval);
|
||||
let time_remaining = (slot_distance as f64 / slots_per_second).round();
|
||||
let time_remaining = if !time_remaining.is_normal() || time_remaining <= 0.0 {
|
||||
"".to_string()
|
||||
} else {
|
||||
format!(
|
||||
". Time remaining: {}",
|
||||
humantime::format_duration(Duration::from_secs_f64(time_remaining))
|
||||
)
|
||||
};
|
||||
|
||||
progress_bar.set_message(&format!(
|
||||
"Validator is {} slots away (us:{} them:{}){}",
|
||||
"{} slots behind (us:{} them:{}){}",
|
||||
slot_distance,
|
||||
node_slot,
|
||||
rpc_slot,
|
||||
if previous_rpc_slot == std::u64::MAX {
|
||||
"".to_string()
|
||||
} else {
|
||||
let slots_per_second =
|
||||
(previous_slot_distance - slot_distance) as f64 / f64::from(sleep_interval);
|
||||
|
||||
format!(
|
||||
" and {} at {:.1} slots/second",
|
||||
", {} at {:.1} slots/second{}",
|
||||
if slots_per_second < 0.0 {
|
||||
"falling behind"
|
||||
} else {
|
||||
"gaining"
|
||||
},
|
||||
slots_per_second,
|
||||
time_remaining
|
||||
)
|
||||
}
|
||||
));
|
||||
@@ -575,6 +547,14 @@ pub fn process_get_slot(
|
||||
Ok(slot.to_string())
|
||||
}
|
||||
|
||||
pub fn process_get_epoch(
|
||||
rpc_client: &RpcClient,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> ProcessResult {
|
||||
let epoch_info = rpc_client.get_epoch_info_with_commitment(commitment_config.clone())?;
|
||||
Ok(epoch_info.epoch.to_string())
|
||||
}
|
||||
|
||||
pub fn parse_show_block_production(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let epoch = value_t!(matches, "epoch", Epoch).ok();
|
||||
let slot_limit = value_t!(matches, "slot_limit", u64).ok();
|
||||
@@ -1280,6 +1260,19 @@ mod tests {
|
||||
}
|
||||
);
|
||||
|
||||
let test_get_epoch = test_commands
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "epoch"]);
|
||||
assert_eq!(
|
||||
parse_command(&test_get_epoch, &default_keypair_file, None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::GetEpoch {
|
||||
commitment_config: CommitmentConfig::recent(),
|
||||
},
|
||||
signers: vec![],
|
||||
}
|
||||
);
|
||||
|
||||
let test_transaction_count = test_commands
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "transaction-count"]);
|
||||
@@ -1302,7 +1295,8 @@ mod tests {
|
||||
"2",
|
||||
"-t",
|
||||
"3",
|
||||
"--confirmed",
|
||||
"--commitment",
|
||||
"max",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_ping, &default_keypair_file, None).unwrap(),
|
||||
@@ -1312,7 +1306,7 @@ mod tests {
|
||||
interval: Duration::from_secs(1),
|
||||
count: Some(2),
|
||||
timeout: Duration::from_secs(3),
|
||||
commitment_config: CommitmentConfig::default(),
|
||||
commitment_config: CommitmentConfig::max(),
|
||||
},
|
||||
signers: vec![default_keypair.into()],
|
||||
}
|
||||
|
@@ -1,6 +1,11 @@
|
||||
use crate::cli::SettingType;
|
||||
use console::style;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::{
|
||||
hash::Hash, native_token::lamports_to_sol, program_utils::limited_deserialize,
|
||||
transaction::Transaction,
|
||||
};
|
||||
use solana_transaction_status::RpcTransactionStatusMeta;
|
||||
use std::io;
|
||||
|
||||
// Pretty print a "name value"
|
||||
pub fn println_name_value(name: &str, value: &str) {
|
||||
@@ -49,3 +54,140 @@ pub fn println_signers(
|
||||
}
|
||||
println!();
|
||||
}
|
||||
|
||||
pub fn write_transaction<W: io::Write>(
|
||||
w: &mut W,
|
||||
transaction: &Transaction,
|
||||
transaction_status: &Option<RpcTransactionStatusMeta>,
|
||||
prefix: &str,
|
||||
) -> io::Result<()> {
|
||||
let message = &transaction.message;
|
||||
writeln!(
|
||||
w,
|
||||
"{}Recent Blockhash: {:?}",
|
||||
prefix, message.recent_blockhash
|
||||
)?;
|
||||
for (signature_index, signature) in transaction.signatures.iter().enumerate() {
|
||||
writeln!(
|
||||
w,
|
||||
"{}Signature {}: {:?}",
|
||||
prefix, signature_index, signature
|
||||
)?;
|
||||
}
|
||||
writeln!(w, "{}{:?}", prefix, message.header)?;
|
||||
for (account_index, account) in message.account_keys.iter().enumerate() {
|
||||
writeln!(w, "{}Account {}: {:?}", prefix, account_index, account)?;
|
||||
}
|
||||
for (instruction_index, instruction) in message.instructions.iter().enumerate() {
|
||||
let program_pubkey = message.account_keys[instruction.program_id_index as usize];
|
||||
writeln!(w, "{}Instruction {}", prefix, instruction_index)?;
|
||||
writeln!(
|
||||
w,
|
||||
"{} Program: {} ({})",
|
||||
prefix, program_pubkey, instruction.program_id_index
|
||||
)?;
|
||||
for (account_index, account) in instruction.accounts.iter().enumerate() {
|
||||
let account_pubkey = message.account_keys[*account as usize];
|
||||
writeln!(
|
||||
w,
|
||||
"{} Account {}: {} ({})",
|
||||
prefix, account_index, account_pubkey, account
|
||||
)?;
|
||||
}
|
||||
|
||||
let mut raw = true;
|
||||
if program_pubkey == solana_vote_program::id() {
|
||||
if let Ok(vote_instruction) = limited_deserialize::<
|
||||
solana_vote_program::vote_instruction::VoteInstruction,
|
||||
>(&instruction.data)
|
||||
{
|
||||
writeln!(w, "{} {:?}", prefix, vote_instruction)?;
|
||||
raw = false;
|
||||
}
|
||||
} else if program_pubkey == solana_stake_program::id() {
|
||||
if let Ok(stake_instruction) = limited_deserialize::<
|
||||
solana_stake_program::stake_instruction::StakeInstruction,
|
||||
>(&instruction.data)
|
||||
{
|
||||
writeln!(w, "{} {:?}", prefix, stake_instruction)?;
|
||||
raw = false;
|
||||
}
|
||||
} else if program_pubkey == solana_sdk::system_program::id() {
|
||||
if let Ok(system_instruction) = limited_deserialize::<
|
||||
solana_sdk::system_instruction::SystemInstruction,
|
||||
>(&instruction.data)
|
||||
{
|
||||
writeln!(w, "{} {:?}", prefix, system_instruction)?;
|
||||
raw = false;
|
||||
}
|
||||
}
|
||||
|
||||
if raw {
|
||||
writeln!(w, "{} Data: {:?}", prefix, instruction.data)?;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(transaction_status) = transaction_status {
|
||||
writeln!(
|
||||
w,
|
||||
"{}Status: {}",
|
||||
prefix,
|
||||
match &transaction_status.status {
|
||||
Ok(_) => "Ok".into(),
|
||||
Err(err) => err.to_string(),
|
||||
}
|
||||
)?;
|
||||
writeln!(
|
||||
w,
|
||||
"{} Fee: {} SOL",
|
||||
prefix,
|
||||
lamports_to_sol(transaction_status.fee)
|
||||
)?;
|
||||
assert_eq!(
|
||||
transaction_status.pre_balances.len(),
|
||||
transaction_status.post_balances.len()
|
||||
);
|
||||
for (i, (pre, post)) in transaction_status
|
||||
.pre_balances
|
||||
.iter()
|
||||
.zip(transaction_status.post_balances.iter())
|
||||
.enumerate()
|
||||
{
|
||||
if pre == post {
|
||||
writeln!(
|
||||
w,
|
||||
"{} Account {} balance: {} SOL",
|
||||
prefix,
|
||||
i,
|
||||
lamports_to_sol(*pre)
|
||||
)?;
|
||||
} else {
|
||||
writeln!(
|
||||
w,
|
||||
"{} Account {} balance: {} SOL -> {} SOL",
|
||||
prefix,
|
||||
i,
|
||||
lamports_to_sol(*pre),
|
||||
lamports_to_sol(*post)
|
||||
)?;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
writeln!(w, "{}Status: Unavailable", prefix)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn println_transaction(
|
||||
transaction: &Transaction,
|
||||
transaction_status: &Option<RpcTransactionStatusMeta>,
|
||||
prefix: &str,
|
||||
) {
|
||||
let mut w = Vec::new();
|
||||
if write_transaction(&mut w, transaction, transaction_status, prefix).is_ok() {
|
||||
if let Ok(s) = String::from_utf8(w) {
|
||||
print!("{}", s);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -266,8 +266,8 @@ pub fn process_claim_storage_reward(
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let signature_str = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &signers)?;
|
||||
Ok(signature_str)
|
||||
let signature = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &signers)?;
|
||||
Ok(signature.to_string())
|
||||
}
|
||||
|
||||
pub fn process_show_storage_account(
|
||||
|
@@ -4,7 +4,11 @@ use crate::cli::{
|
||||
ProcessResult, SignerIndex,
|
||||
};
|
||||
use clap::{value_t_or_exit, App, Arg, ArgMatches, SubCommand};
|
||||
use solana_clap_utils::{input_parsers::*, input_validators::*};
|
||||
use solana_clap_utils::{
|
||||
commitment::{commitment_arg, COMMITMENT_ARG},
|
||||
input_parsers::*,
|
||||
input_validators::*,
|
||||
};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
@@ -159,14 +163,6 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
SubCommand::with_name("vote-account")
|
||||
.about("Show the contents of a vote account")
|
||||
.alias("show-vote-account")
|
||||
.arg(
|
||||
Arg::with_name("confirmed")
|
||||
.long("confirmed")
|
||||
.takes_value(false)
|
||||
.help(
|
||||
"Return information at maximum-lockout commitment level",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("vote_account_pubkey")
|
||||
.index(1)
|
||||
@@ -181,7 +177,8 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.long("lamports")
|
||||
.takes_value(false)
|
||||
.help("Display balance in lamports instead of SOL"),
|
||||
),
|
||||
)
|
||||
.arg(commitment_arg()),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("withdraw-from-vote-account")
|
||||
@@ -322,11 +319,7 @@ pub fn parse_vote_get_account_command(
|
||||
let vote_account_pubkey =
|
||||
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
|
||||
let use_lamports_unit = matches.is_present("lamports");
|
||||
let commitment_config = if matches.is_present("confirmed") {
|
||||
CommitmentConfig::default()
|
||||
} else {
|
||||
CommitmentConfig::recent()
|
||||
};
|
||||
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::ShowVoteAccount {
|
||||
pubkey: vote_account_pubkey,
|
||||
|
@@ -338,6 +338,7 @@ fn test_create_account_with_seed() {
|
||||
to: to_address,
|
||||
from: 0,
|
||||
sign_only: true,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::None(nonce_hash),
|
||||
nonce_account: Some(nonce_address),
|
||||
nonce_authority: 0,
|
||||
@@ -358,6 +359,7 @@ fn test_create_account_with_seed() {
|
||||
to: to_address,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::NonceAccount(nonce_address),
|
||||
sign_only.blockhash,
|
||||
|
@@ -68,6 +68,7 @@ fn test_transfer() {
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
@@ -95,6 +96,7 @@ fn test_transfer() {
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: true,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::None(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
@@ -110,6 +112,7 @@ fn test_transfer() {
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
@@ -147,6 +150,7 @@ fn test_transfer() {
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::NonceAccount(nonce_account.pubkey()),
|
||||
nonce_hash,
|
||||
@@ -187,6 +191,7 @@ fn test_transfer() {
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: true,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::None(nonce_hash),
|
||||
nonce_account: Some(nonce_account.pubkey()),
|
||||
nonce_authority: 0,
|
||||
@@ -202,6 +207,7 @@ fn test_transfer() {
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::NonceAccount(nonce_account.pubkey()),
|
||||
sign_only.blockhash,
|
||||
@@ -269,6 +275,7 @@ fn test_transfer_multisession_signing() {
|
||||
to: to_pubkey,
|
||||
from: 1,
|
||||
sign_only: true,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::None(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
@@ -293,6 +300,7 @@ fn test_transfer_multisession_signing() {
|
||||
to: to_pubkey,
|
||||
from: 1,
|
||||
sign_only: true,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::None(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
@@ -314,6 +322,7 @@ fn test_transfer_multisession_signing() {
|
||||
to: to_pubkey,
|
||||
from: 1,
|
||||
sign_only: false,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "1.0.11"
|
||||
version = "1.0.22"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -19,10 +19,10 @@ reqwest = { version = "0.10.1", default-features = false, features = ["blocking"
|
||||
serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.46"
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.11" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.11" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.0.11" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.0.11" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.22" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.22" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.0.22" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.0.22" }
|
||||
thiserror = "1.0"
|
||||
tungstenite = "0.10.1"
|
||||
url = "2.1.1"
|
||||
@@ -31,4 +31,7 @@ url = "2.1.1"
|
||||
assert_matches = "1.3.0"
|
||||
jsonrpc-core = "14.0.5"
|
||||
jsonrpc-http-server = "14.0.6"
|
||||
solana-logger = { path = "../logger", version = "1.0.11" }
|
||||
solana-logger = { path = "../logger", version = "1.0.22" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -40,7 +40,7 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
|
||||
fn send(
|
||||
&self,
|
||||
request: &RpcRequest,
|
||||
params: serde_json::Value,
|
||||
_params: serde_json::Value,
|
||||
_retries: usize,
|
||||
) -> Result<serde_json::Value> {
|
||||
if let Some(value) = self.mocks.write().unwrap().remove(request) {
|
||||
@@ -50,17 +50,6 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
|
||||
return Ok(Value::Null);
|
||||
}
|
||||
let val = match request {
|
||||
RpcRequest::ConfirmTransaction => {
|
||||
if let Some(params_array) = params.as_array() {
|
||||
if let Value::String(param_string) = ¶ms_array[0] {
|
||||
Value::Bool(param_string == SIGNATURE)
|
||||
} else {
|
||||
Value::Null
|
||||
}
|
||||
} else {
|
||||
Value::Null
|
||||
}
|
||||
}
|
||||
RpcRequest::GetBalance => serde_json::to_value(Response {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: Value::Number(Number::from(50)),
|
||||
@@ -87,7 +76,7 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: serde_json::to_value(FeeRateGovernor::default()).unwrap(),
|
||||
})?,
|
||||
RpcRequest::GetSignatureStatus => {
|
||||
RpcRequest::GetSignatureStatuses => {
|
||||
let status: transaction::Result<()> = if self.url == "account_in_use" {
|
||||
Err(TransactionError::AccountInUse)
|
||||
} else if self.url == "instruction_error" {
|
||||
@@ -101,10 +90,12 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
|
||||
let status = if self.url == "sig_not_found" {
|
||||
None
|
||||
} else {
|
||||
let err = status.clone().err();
|
||||
Some(TransactionStatus {
|
||||
status,
|
||||
slot: 1,
|
||||
confirmations: Some(0),
|
||||
confirmations: None,
|
||||
err,
|
||||
})
|
||||
};
|
||||
serde_json::to_value(Response {
|
||||
|
@@ -16,7 +16,10 @@ use log::*;
|
||||
use serde_json::{json, Value};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock::{Slot, UnixTimestamp, DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT},
|
||||
clock::{
|
||||
Slot, UnixTimestamp, DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT,
|
||||
MAX_HASH_AGE_IN_SECONDS,
|
||||
},
|
||||
commitment_config::CommitmentConfig,
|
||||
epoch_schedule::EpochSchedule,
|
||||
fee_calculator::{FeeCalculator, FeeRateGovernor},
|
||||
@@ -27,12 +30,13 @@ use solana_sdk::{
|
||||
signers::Signers,
|
||||
transaction::{self, Transaction, TransactionError},
|
||||
};
|
||||
use solana_transaction_status::{ConfirmedBlock, TransactionStatus};
|
||||
use solana_transaction_status::{
|
||||
ConfirmedBlock, ConfirmedTransaction, TransactionEncoding, TransactionStatus,
|
||||
};
|
||||
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
|
||||
use std::{
|
||||
error,
|
||||
net::SocketAddr,
|
||||
str::FromStr,
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
@@ -71,7 +75,7 @@ impl RpcClient {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn confirm_transaction(&self, signature: &str) -> ClientResult<bool> {
|
||||
pub fn confirm_transaction(&self, signature: &Signature) -> ClientResult<bool> {
|
||||
Ok(self
|
||||
.confirm_transaction_with_commitment(signature, CommitmentConfig::default())?
|
||||
.value)
|
||||
@@ -79,55 +83,94 @@ impl RpcClient {
|
||||
|
||||
pub fn confirm_transaction_with_commitment(
|
||||
&self,
|
||||
signature: &str,
|
||||
signature: &Signature,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<bool> {
|
||||
let response = self
|
||||
.client
|
||||
.send(
|
||||
&RpcRequest::ConfirmTransaction,
|
||||
json!([signature, commitment_config]),
|
||||
0,
|
||||
)
|
||||
.map_err(|err| err.into_with_command("ConfirmTransaction"))?;
|
||||
let Response { context, value } = self.get_signature_statuses(&[*signature])?;
|
||||
|
||||
serde_json::from_value::<Response<bool>>(response)
|
||||
.map_err(|err| ClientError::new_with_command(err.into(), "ConfirmTransaction"))
|
||||
Ok(Response {
|
||||
context,
|
||||
value: value[0]
|
||||
.as_ref()
|
||||
.filter(|result| result.satisfies_commitment(commitment_config))
|
||||
.map(|result| result.status.is_ok())
|
||||
.unwrap_or_default(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn send_transaction(&self, transaction: &Transaction) -> ClientResult<String> {
|
||||
pub fn send_transaction(&self, transaction: &Transaction) -> ClientResult<Signature> {
|
||||
let serialized_encoded = bs58::encode(serialize(transaction).unwrap()).into_string();
|
||||
let signature =
|
||||
let response =
|
||||
self.client
|
||||
.send(&RpcRequest::SendTransaction, json!([serialized_encoded]), 5)?;
|
||||
if signature.as_str().is_none() {
|
||||
Err(RpcError::ForUser("Received result of an unexpected type".to_string()).into())
|
||||
} else {
|
||||
Ok(signature.as_str().unwrap().to_string())
|
||||
|
||||
match response.as_str() {
|
||||
None => {
|
||||
Err(RpcError::ForUser("Received result of an unexpected type".to_string()).into())
|
||||
}
|
||||
Some(signature_base58_str) => signature_base58_str
|
||||
.parse::<Signature>()
|
||||
.map_err(|err| RpcError::ParseError(err.to_string()).into()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_signature_status(
|
||||
&self,
|
||||
signature: &str,
|
||||
signature: &Signature,
|
||||
) -> ClientResult<Option<transaction::Result<()>>> {
|
||||
self.get_signature_status_with_commitment(signature, CommitmentConfig::default())
|
||||
}
|
||||
|
||||
pub fn get_signature_statuses(
|
||||
&self,
|
||||
signatures: &[Signature],
|
||||
) -> RpcResult<Vec<Option<TransactionStatus>>> {
|
||||
let signatures: Vec<_> = signatures.iter().map(|s| s.to_string()).collect();
|
||||
let signature_status =
|
||||
self.client
|
||||
.send(&RpcRequest::GetSignatureStatuses, json!([signatures]), 5)?;
|
||||
Ok(serde_json::from_value(signature_status)
|
||||
.map_err(|err| ClientError::new_with_command(err.into(), "GetSignatureStatuses"))?)
|
||||
}
|
||||
|
||||
pub fn get_signature_status_with_commitment(
|
||||
&self,
|
||||
signature: &str,
|
||||
signature: &Signature,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> ClientResult<Option<transaction::Result<()>>> {
|
||||
let signature_status = self.client.send(
|
||||
&RpcRequest::GetSignatureStatus,
|
||||
json!([[signature.to_string()], commitment_config]),
|
||||
&RpcRequest::GetSignatureStatuses,
|
||||
json!([[signature.to_string()]]),
|
||||
5,
|
||||
)?;
|
||||
let result: Response<Vec<Option<TransactionStatus>>> =
|
||||
serde_json::from_value(signature_status).unwrap();
|
||||
serde_json::from_value(signature_status)
|
||||
.map_err(|err| ClientError::new_with_command(err.into(), "GetSignatureStatuses"))?;
|
||||
Ok(result.value[0]
|
||||
.clone()
|
||||
.filter(|result| result.satisfies_commitment(commitment_config))
|
||||
.map(|status_meta| status_meta.status))
|
||||
}
|
||||
|
||||
pub fn get_signature_status_with_commitment_and_history(
|
||||
&self,
|
||||
signature: &Signature,
|
||||
commitment_config: CommitmentConfig,
|
||||
search_transaction_history: bool,
|
||||
) -> ClientResult<Option<transaction::Result<()>>> {
|
||||
let signature_status = self.client.send(
|
||||
&RpcRequest::GetSignatureStatuses,
|
||||
json!([[signature.to_string()], {
|
||||
"searchTransactionHistory": search_transaction_history
|
||||
}]),
|
||||
5,
|
||||
)?;
|
||||
let result: Response<Vec<Option<TransactionStatus>>> =
|
||||
serde_json::from_value(signature_status)
|
||||
.map_err(|err| ClientError::new_with_command(err.into(), "GetSignatureStatuses"))?;
|
||||
Ok(result.value[0]
|
||||
.clone()
|
||||
.filter(|result| result.satisfies_commitment(commitment_config))
|
||||
.map(|status_meta| status_meta.status))
|
||||
}
|
||||
|
||||
@@ -176,9 +219,17 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn get_confirmed_block(&self, slot: Slot) -> ClientResult<ConfirmedBlock> {
|
||||
self.get_confirmed_block_with_encoding(slot, TransactionEncoding::Json)
|
||||
}
|
||||
|
||||
pub fn get_confirmed_block_with_encoding(
|
||||
&self,
|
||||
slot: Slot,
|
||||
encoding: TransactionEncoding,
|
||||
) -> ClientResult<ConfirmedBlock> {
|
||||
let response = self
|
||||
.client
|
||||
.send(&RpcRequest::GetConfirmedBlock, json!([slot]), 0)
|
||||
.send(&RpcRequest::GetConfirmedBlock, json!([slot, encoding]), 0)
|
||||
.map_err(|err| err.into_with_command("GetConfirmedBlock"))?;
|
||||
|
||||
serde_json::from_value(response)
|
||||
@@ -203,6 +254,44 @@ impl RpcClient {
|
||||
.map_err(|err| ClientError::new_with_command(err.into(), "GetConfirmedBlocks"))
|
||||
}
|
||||
|
||||
pub fn get_confirmed_signatures_for_address(
|
||||
&self,
|
||||
address: &Pubkey,
|
||||
start_slot: Slot,
|
||||
end_slot: Slot,
|
||||
) -> ClientResult<Vec<Signature>> {
|
||||
let response = self
|
||||
.client
|
||||
.send(
|
||||
&RpcRequest::GetConfirmedSignaturesForAddress,
|
||||
json!([address, start_slot, end_slot]),
|
||||
0,
|
||||
)
|
||||
.map_err(|err| err.into_with_command("GetConfirmedSignaturesForAddress"))?;
|
||||
|
||||
serde_json::from_value(response).map_err(|err| {
|
||||
ClientError::new_with_command(err.into(), "GetConfirmedSignaturesForAddress")
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_confirmed_transaction(
|
||||
&self,
|
||||
signature: &Signature,
|
||||
encoding: TransactionEncoding,
|
||||
) -> ClientResult<ConfirmedTransaction> {
|
||||
let response = self
|
||||
.client
|
||||
.send(
|
||||
&RpcRequest::GetConfirmedTransaction,
|
||||
json!([signature.to_string(), encoding]),
|
||||
0,
|
||||
)
|
||||
.map_err(|err| err.into_with_command("GetConfirmedTransaction"))?;
|
||||
|
||||
serde_json::from_value(response)
|
||||
.map_err(|err| ClientError::new_with_command(err.into(), "GetConfirmedTransaction"))
|
||||
}
|
||||
|
||||
pub fn get_block_time(&self, slot: Slot) -> ClientResult<UnixTimestamp> {
|
||||
let response = self
|
||||
.client
|
||||
@@ -325,13 +414,13 @@ impl RpcClient {
|
||||
&self,
|
||||
transaction: &mut Transaction,
|
||||
signer_keys: &T,
|
||||
) -> ClientResult<String> {
|
||||
) -> ClientResult<Signature> {
|
||||
let mut send_retries = 20;
|
||||
loop {
|
||||
let mut status_retries = 15;
|
||||
let signature_str = self.send_transaction(transaction)?;
|
||||
let signature = self.send_transaction(transaction)?;
|
||||
let status = loop {
|
||||
let status = self.get_signature_status(&signature_str)?;
|
||||
let status = self.get_signature_status(&signature)?;
|
||||
if status.is_none() {
|
||||
status_retries -= 1;
|
||||
if status_retries == 0 {
|
||||
@@ -347,7 +436,7 @@ impl RpcClient {
|
||||
};
|
||||
send_retries = if let Some(result) = status.clone() {
|
||||
match result {
|
||||
Ok(_) => return Ok(signature_str),
|
||||
Ok(_) => return Ok(signature),
|
||||
Err(TransactionError::AccountInUse) => {
|
||||
// Fetch a new blockhash and re-sign the transaction before sending it again
|
||||
self.resign_transaction(transaction, signer_keys)?;
|
||||
@@ -809,10 +898,9 @@ impl RpcClient {
|
||||
) -> ClientResult<()> {
|
||||
let now = Instant::now();
|
||||
loop {
|
||||
if let Ok(Some(_)) = self.get_signature_status_with_commitment(
|
||||
&signature.to_string(),
|
||||
commitment_config.clone(),
|
||||
) {
|
||||
if let Ok(Some(_)) =
|
||||
self.get_signature_status_with_commitment(&signature, commitment_config.clone())
|
||||
{
|
||||
break;
|
||||
}
|
||||
if now.elapsed().as_secs() > 15 {
|
||||
@@ -832,14 +920,13 @@ impl RpcClient {
|
||||
trace!("check_signature: {:?}", signature);
|
||||
|
||||
for _ in 0..30 {
|
||||
let response = self.client.send(
|
||||
&RpcRequest::ConfirmTransaction,
|
||||
json!([signature.to_string(), CommitmentConfig::recent()]),
|
||||
0,
|
||||
);
|
||||
|
||||
let response =
|
||||
self.confirm_transaction_with_commitment(signature, CommitmentConfig::recent());
|
||||
match response {
|
||||
Ok(Value::Bool(signature_status)) => {
|
||||
Ok(Response {
|
||||
value: signature_status,
|
||||
..
|
||||
}) => {
|
||||
if signature_status {
|
||||
trace!("Response found signature");
|
||||
} else {
|
||||
@@ -848,12 +935,6 @@ impl RpcClient {
|
||||
|
||||
return signature_status;
|
||||
}
|
||||
Ok(other) => {
|
||||
debug!(
|
||||
"check_signature request failed, expected bool, got: {:?}",
|
||||
other
|
||||
);
|
||||
}
|
||||
Err(err) => {
|
||||
debug!("check_signature request failed: {:?}", err);
|
||||
}
|
||||
@@ -925,20 +1006,20 @@ impl RpcClient {
|
||||
let response = self
|
||||
.client
|
||||
.send(
|
||||
&RpcRequest::GetSignatureStatus,
|
||||
json!([[signature.to_string()], CommitmentConfig::recent().ok()]),
|
||||
&RpcRequest::GetSignatureStatuses,
|
||||
json!([[signature.to_string()]]),
|
||||
1,
|
||||
)
|
||||
.map_err(|err| err.into_with_command("GetSignatureStatus"))?;
|
||||
let result: Response<Vec<Option<TransactionStatus>>> =
|
||||
serde_json::from_value(response).unwrap();
|
||||
.map_err(|err| err.into_with_command("GetSignatureStatuses"))?;
|
||||
let result: Response<Vec<Option<TransactionStatus>>> = serde_json::from_value(response)
|
||||
.map_err(|err| ClientError::new_with_command(err.into(), "GetSignatureStatuses"))?;
|
||||
|
||||
let confirmations = result.value[0]
|
||||
.clone()
|
||||
.ok_or_else(|| {
|
||||
ClientError::new_with_command(
|
||||
ClientErrorKind::Custom("signature not found".to_string()),
|
||||
"GetSignatureStatus",
|
||||
"GetSignatureStatuses",
|
||||
)
|
||||
})?
|
||||
.confirmations
|
||||
@@ -950,7 +1031,7 @@ impl RpcClient {
|
||||
&self,
|
||||
transaction: &mut Transaction,
|
||||
signer_keys: &T,
|
||||
) -> ClientResult<String> {
|
||||
) -> ClientResult<Signature> {
|
||||
let mut confirmations = 0;
|
||||
|
||||
let progress_bar = new_spinner_progress_bar();
|
||||
@@ -961,23 +1042,21 @@ impl RpcClient {
|
||||
));
|
||||
|
||||
let mut send_retries = 20;
|
||||
let signature_str = loop {
|
||||
let signature = loop {
|
||||
let mut status_retries = 15;
|
||||
let (signature_str, status) = loop {
|
||||
let signature_str = self.send_transaction(transaction)?;
|
||||
let (signature, status) = loop {
|
||||
let signature = self.send_transaction(transaction)?;
|
||||
|
||||
// Get recent commitment in order to count confirmations for successful transactions
|
||||
let status = self.get_signature_status_with_commitment(
|
||||
&signature_str,
|
||||
CommitmentConfig::recent(),
|
||||
)?;
|
||||
let status = self
|
||||
.get_signature_status_with_commitment(&signature, CommitmentConfig::recent())?;
|
||||
if status.is_none() {
|
||||
status_retries -= 1;
|
||||
if status_retries == 0 {
|
||||
break (signature_str, status);
|
||||
break (signature, status);
|
||||
}
|
||||
} else {
|
||||
break (signature_str, status);
|
||||
break (signature, status);
|
||||
}
|
||||
|
||||
if cfg!(not(test)) {
|
||||
@@ -1002,7 +1081,7 @@ impl RpcClient {
|
||||
if let Some(result) = status {
|
||||
match result {
|
||||
Ok(_) => {
|
||||
break signature_str;
|
||||
break signature;
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(err.into());
|
||||
@@ -1015,19 +1094,14 @@ impl RpcClient {
|
||||
}
|
||||
}
|
||||
};
|
||||
let signature = Signature::from_str(&signature_str).map_err(|_| {
|
||||
ClientError::from(ClientErrorKind::Custom(format!(
|
||||
"Returned string {} cannot be parsed as a signature",
|
||||
signature_str
|
||||
)))
|
||||
})?;
|
||||
let now = Instant::now();
|
||||
loop {
|
||||
// Return when default (max) commitment is reached
|
||||
// Failed transactions have already been eliminated, `is_some` check is sufficient
|
||||
if self.get_signature_status(&signature_str)?.is_some() {
|
||||
if self.get_signature_status(&signature)?.is_some() {
|
||||
progress_bar.set_message("Transaction confirmed");
|
||||
progress_bar.finish_and_clear();
|
||||
return Ok(signature_str);
|
||||
return Ok(signature);
|
||||
}
|
||||
progress_bar.set_message(&format!(
|
||||
"[{}/{}] Waiting for confirmations",
|
||||
@@ -1035,7 +1109,14 @@ impl RpcClient {
|
||||
MAX_LOCKOUT_HISTORY + 1,
|
||||
));
|
||||
sleep(Duration::from_millis(500));
|
||||
confirmations = self.get_num_blocks_since_signature_confirmation(&signature)?;
|
||||
confirmations = self
|
||||
.get_num_blocks_since_signature_confirmation(&signature)
|
||||
.unwrap_or(confirmations);
|
||||
if now.elapsed().as_secs() >= MAX_HASH_AGE_IN_SECONDS as u64 {
|
||||
return Err(
|
||||
RpcError::ForUser("transaction not finalized. This can happen when a transaction lands in an abandoned fork. Please retry.".to_string()).into(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1189,7 +1270,7 @@ mod tests {
|
||||
let tx = system_transaction::transfer(&key, &to, 50, blockhash);
|
||||
|
||||
let signature = rpc_client.send_transaction(&tx);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.parse().unwrap());
|
||||
|
||||
let rpc_client = RpcClient::new_mock("fails".to_string());
|
||||
|
||||
@@ -1212,18 +1293,17 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_get_signature_status() {
|
||||
let signature = Signature::default();
|
||||
|
||||
let rpc_client = RpcClient::new_mock("succeeds".to_string());
|
||||
let signature = "good_signature";
|
||||
let status = rpc_client.get_signature_status(&signature).unwrap();
|
||||
assert_eq!(status, Some(Ok(())));
|
||||
|
||||
let rpc_client = RpcClient::new_mock("sig_not_found".to_string());
|
||||
let signature = "sig_not_found";
|
||||
let status = rpc_client.get_signature_status(&signature).unwrap();
|
||||
assert_eq!(status, None);
|
||||
|
||||
let rpc_client = RpcClient::new_mock("account_in_use".to_string());
|
||||
let signature = "account_in_use";
|
||||
let status = rpc_client.get_signature_status(&signature).unwrap();
|
||||
assert_eq!(status, Some(Err(TransactionError::AccountInUse)));
|
||||
}
|
||||
|
@@ -3,7 +3,6 @@ use thiserror::Error;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Hash)]
|
||||
pub enum RpcRequest {
|
||||
ConfirmTransaction,
|
||||
DeregisterNode,
|
||||
ValidatorExit,
|
||||
GetAccountInfo,
|
||||
@@ -12,6 +11,8 @@ pub enum RpcRequest {
|
||||
GetClusterNodes,
|
||||
GetConfirmedBlock,
|
||||
GetConfirmedBlocks,
|
||||
GetConfirmedSignaturesForAddress,
|
||||
GetConfirmedTransaction,
|
||||
GetEpochInfo,
|
||||
GetEpochSchedule,
|
||||
GetGenesisHash,
|
||||
@@ -22,7 +23,7 @@ pub enum RpcRequest {
|
||||
GetRecentBlockhash,
|
||||
GetFeeCalculatorForBlockhash,
|
||||
GetFeeRateGovernor,
|
||||
GetSignatureStatus,
|
||||
GetSignatureStatuses,
|
||||
GetSlot,
|
||||
GetSlotLeader,
|
||||
GetStorageTurn,
|
||||
@@ -44,7 +45,6 @@ impl RpcRequest {
|
||||
pub(crate) fn build_request_json(&self, id: u64, params: Value) -> Value {
|
||||
let jsonrpc = "2.0";
|
||||
let method = match self {
|
||||
RpcRequest::ConfirmTransaction => "confirmTransaction",
|
||||
RpcRequest::DeregisterNode => "deregisterNode",
|
||||
RpcRequest::ValidatorExit => "validatorExit",
|
||||
RpcRequest::GetAccountInfo => "getAccountInfo",
|
||||
@@ -53,6 +53,8 @@ impl RpcRequest {
|
||||
RpcRequest::GetClusterNodes => "getClusterNodes",
|
||||
RpcRequest::GetConfirmedBlock => "getConfirmedBlock",
|
||||
RpcRequest::GetConfirmedBlocks => "getConfirmedBlocks",
|
||||
RpcRequest::GetConfirmedSignaturesForAddress => "getConfirmedSignaturesForAddress",
|
||||
RpcRequest::GetConfirmedTransaction => "getConfirmedTransaction",
|
||||
RpcRequest::GetEpochInfo => "getEpochInfo",
|
||||
RpcRequest::GetEpochSchedule => "getEpochSchedule",
|
||||
RpcRequest::GetGenesisHash => "getGenesisHash",
|
||||
@@ -63,7 +65,7 @@ impl RpcRequest {
|
||||
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
|
||||
RpcRequest::GetFeeCalculatorForBlockhash => "getFeeCalculatorForBlockhash",
|
||||
RpcRequest::GetFeeRateGovernor => "getFeeRateGovernor",
|
||||
RpcRequest::GetSignatureStatus => "getSignatureStatus",
|
||||
RpcRequest::GetSignatureStatuses => "getSignatureStatuses",
|
||||
RpcRequest::GetSlot => "getSlot",
|
||||
RpcRequest::GetSlotLeader => "getSlotLeader",
|
||||
RpcRequest::GetStorageTurn => "getStorageTurn",
|
||||
|
@@ -4,7 +4,7 @@ use solana_sdk::{
|
||||
clock::{Epoch, Slot},
|
||||
fee_calculator::{FeeCalculator, FeeRateGovernor},
|
||||
pubkey::Pubkey,
|
||||
transaction::Result,
|
||||
transaction::{Result, TransactionError},
|
||||
};
|
||||
use std::{collections::HashMap, net::SocketAddr, str::FromStr};
|
||||
|
||||
@@ -54,6 +54,12 @@ pub struct RpcKeyedAccount {
|
||||
pub account: RpcAccount,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcSignatureResult {
|
||||
pub err: Option<TransactionError>,
|
||||
}
|
||||
|
||||
/// A duplicate representation of a Message for pretty JSON serialization
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
@@ -186,3 +192,19 @@ pub struct RpcStorageTurn {
|
||||
pub blockhash: String,
|
||||
pub slot: Slot,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcAccountBalance {
|
||||
pub address: String,
|
||||
pub lamports: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcSupply {
|
||||
pub total: u64,
|
||||
pub circulating: u64,
|
||||
pub non_circulating: u64,
|
||||
pub non_circulating_accounts: Vec<String>,
|
||||
}
|
||||
|
@@ -471,7 +471,7 @@ impl SyncClient for ThinClient {
|
||||
) -> TransportResult<Option<transaction::Result<()>>> {
|
||||
let status = self
|
||||
.rpc_client()
|
||||
.get_signature_status(&signature.to_string())
|
||||
.get_signature_status(&signature)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
@@ -488,7 +488,7 @@ impl SyncClient for ThinClient {
|
||||
) -> TransportResult<Option<transaction::Result<()>>> {
|
||||
let status = self
|
||||
.rpc_client()
|
||||
.get_signature_status_with_commitment(&signature.to_string(), commitment_config)
|
||||
.get_signature_status_with_commitment(&signature, commitment_config)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
|
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-core"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.0.11"
|
||||
version = "1.0.22"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "../README.md"
|
||||
@@ -36,41 +36,41 @@ log = "0.4.8"
|
||||
nix = "0.17.0"
|
||||
num_cpus = "1.0.0"
|
||||
num-traits = "0.2"
|
||||
rand = "0.6.5"
|
||||
rand_chacha = "0.1.1"
|
||||
rand = "0.7.0"
|
||||
rand_chacha = "0.2.2"
|
||||
rayon = "1.2.0"
|
||||
regex = "1.3.4"
|
||||
serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.46"
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.0.11" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.11" }
|
||||
solana-client = { path = "../client", version = "1.0.11" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.0.11" }
|
||||
solana-faucet = { path = "../faucet", version = "1.0.11" }
|
||||
ed25519-dalek = "=1.0.0-pre.1"
|
||||
solana-ledger = { path = "../ledger", version = "1.0.11" }
|
||||
solana-logger = { path = "../logger", version = "1.0.11" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.0.11" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.11" }
|
||||
solana-measure = { path = "../measure", version = "1.0.11" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.11" }
|
||||
solana-chacha-cuda = { path = "../chacha-cuda", version = "1.0.11" }
|
||||
solana-perf = { path = "../perf", version = "1.0.11" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.11" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.11" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.0.11" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.0.11" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.0.11" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.0.11" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "1.0.11" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.0.22" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.22" }
|
||||
solana-client = { path = "../client", version = "1.0.22" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.0.22" }
|
||||
solana-faucet = { path = "../faucet", version = "1.0.22" }
|
||||
ed25519-dalek = "=1.0.0-pre.3"
|
||||
solana-ledger = { path = "../ledger", version = "1.0.22" }
|
||||
solana-logger = { path = "../logger", version = "1.0.22" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.0.22" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.22" }
|
||||
solana-measure = { path = "../measure", version = "1.0.22" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.22" }
|
||||
solana-chacha-cuda = { path = "../chacha-cuda", version = "1.0.22" }
|
||||
solana-perf = { path = "../perf", version = "1.0.22" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.22" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.22" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.0.22" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.0.22" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.0.22" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.0.22" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "1.0.22" }
|
||||
tempfile = "3.1.0"
|
||||
thiserror = "1.0"
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
tokio-fs = "0.1"
|
||||
tokio-io = "0.1"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.0.11" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.0.22" }
|
||||
trees = "0.2.1"
|
||||
|
||||
[dev-dependencies]
|
||||
@@ -104,3 +104,6 @@ name = "cluster_info"
|
||||
[[bench]]
|
||||
name = "chacha"
|
||||
required-features = ["chacha"]
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -4,7 +4,7 @@
|
||||
// hash on gossip. Monitor gossip for messages from validators in the --trusted-validators
|
||||
// set and halt the node if a mismatch is detected.
|
||||
|
||||
use crate::cluster_info::ClusterInfo;
|
||||
use crate::cluster_info::{ClusterInfo, MAX_SNAPSHOT_HASHES};
|
||||
use solana_ledger::{
|
||||
snapshot_package::SnapshotPackage, snapshot_package::SnapshotPackageReceiver,
|
||||
snapshot_package::SnapshotPackageSender,
|
||||
@@ -94,6 +94,10 @@ impl AccountsHashVerifier {
|
||||
hashes.push((snapshot_package.root, snapshot_package.hash));
|
||||
}
|
||||
|
||||
while hashes.len() > MAX_SNAPSHOT_HASHES {
|
||||
hashes.remove(0);
|
||||
}
|
||||
|
||||
if halt_on_trusted_validator_accounts_hash_mismatch {
|
||||
let mut slot_to_hash = HashMap::new();
|
||||
for (slot, hash) in hashes.iter() {
|
||||
@@ -119,6 +123,7 @@ impl AccountsHashVerifier {
|
||||
slot_to_hash: &mut HashMap<Slot, Hash>,
|
||||
) -> bool {
|
||||
let mut verified_count = 0;
|
||||
let mut highest_slot = 0;
|
||||
if let Some(trusted_validators) = trusted_validators.as_ref() {
|
||||
for trusted_validator in trusted_validators {
|
||||
let cluster_info_r = cluster_info.read().unwrap();
|
||||
@@ -140,6 +145,7 @@ impl AccountsHashVerifier {
|
||||
verified_count += 1;
|
||||
}
|
||||
} else {
|
||||
highest_slot = std::cmp::max(*slot, highest_slot);
|
||||
slot_to_hash.insert(*slot, *hash);
|
||||
}
|
||||
}
|
||||
@@ -147,6 +153,10 @@ impl AccountsHashVerifier {
|
||||
}
|
||||
}
|
||||
inc_new_counter_info!("accounts_hash_verifier-hashes_verified", verified_count);
|
||||
datapoint_info!(
|
||||
"accounts_hash_verifier",
|
||||
("highest_slot_verified", highest_slot, i64),
|
||||
);
|
||||
false
|
||||
}
|
||||
|
||||
@@ -197,4 +207,57 @@ mod tests {
|
||||
&mut slot_to_hash,
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_max_hashes() {
|
||||
solana_logger::setup();
|
||||
use std::path::PathBuf;
|
||||
use tempfile::TempDir;
|
||||
let keypair = Keypair::new();
|
||||
|
||||
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
|
||||
let trusted_validators = HashSet::new();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let mut hashes = vec![];
|
||||
for i in 0..MAX_SNAPSHOT_HASHES + 1 {
|
||||
let snapshot_links = TempDir::new().unwrap();
|
||||
let snapshot_package = SnapshotPackage {
|
||||
hash: hash(&[i as u8]),
|
||||
root: 100 + i as u64,
|
||||
slot_deltas: vec![],
|
||||
snapshot_links,
|
||||
tar_output_file: PathBuf::from("."),
|
||||
storages: vec![],
|
||||
};
|
||||
|
||||
AccountsHashVerifier::process_snapshot(
|
||||
snapshot_package,
|
||||
&cluster_info,
|
||||
&Some(trusted_validators.clone()),
|
||||
false,
|
||||
&None,
|
||||
&mut hashes,
|
||||
&exit,
|
||||
0,
|
||||
);
|
||||
}
|
||||
let cluster_info_r = cluster_info.read().unwrap();
|
||||
let cluster_hashes = cluster_info_r
|
||||
.get_accounts_hash_for_node(&keypair.pubkey())
|
||||
.unwrap();
|
||||
info!("{:?}", cluster_hashes);
|
||||
assert_eq!(hashes.len(), MAX_SNAPSHOT_HASHES);
|
||||
assert_eq!(cluster_hashes.len(), MAX_SNAPSHOT_HASHES);
|
||||
assert_eq!(cluster_hashes[0], (101, hash(&[1])));
|
||||
assert_eq!(
|
||||
cluster_hashes[MAX_SNAPSHOT_HASHES - 1],
|
||||
(
|
||||
100 + MAX_SNAPSHOT_HASHES as u64,
|
||||
hash(&[MAX_SNAPSHOT_HASHES as u8])
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@@ -1980,13 +1980,23 @@ mod tests {
|
||||
{
|
||||
if let EncodedTransaction::Json(transaction) = transaction {
|
||||
if transaction.signatures[0] == success_signature.to_string() {
|
||||
assert_eq!(meta.unwrap().status, Ok(()));
|
||||
let meta = meta.unwrap();
|
||||
assert_eq!(meta.err, None);
|
||||
assert_eq!(meta.status, Ok(()));
|
||||
} else if transaction.signatures[0] == ix_error_signature.to_string() {
|
||||
let meta = meta.unwrap();
|
||||
assert_eq!(
|
||||
meta.unwrap().status,
|
||||
meta.err,
|
||||
Some(TransactionError::InstructionError(
|
||||
0,
|
||||
InstructionError::Custom(1)
|
||||
))
|
||||
);
|
||||
assert_eq!(
|
||||
meta.status,
|
||||
Err(TransactionError::InstructionError(
|
||||
0,
|
||||
InstructionError::CustomError(1)
|
||||
InstructionError::Custom(1)
|
||||
))
|
||||
);
|
||||
} else {
|
||||
|
@@ -12,8 +12,6 @@
|
||||
//! * layer 2 - Everyone else, if layer 1 is `2^10`, layer 2 should be able to fit `2^20` number of nodes.
|
||||
//!
|
||||
//! Bank needs to provide an interface for us to query the stake weight
|
||||
use crate::crds_value::CompressionType::*;
|
||||
use crate::crds_value::EpochIncompleteSlots;
|
||||
use crate::packet::limited_deserialize;
|
||||
use crate::streamer::{PacketReceiver, PacketSender};
|
||||
use crate::{
|
||||
@@ -21,20 +19,28 @@ use crate::{
|
||||
crds_gossip::CrdsGossip,
|
||||
crds_gossip_error::CrdsGossipError,
|
||||
crds_gossip_pull::{CrdsFilter, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS},
|
||||
crds_value::{self, CrdsData, CrdsValue, CrdsValueLabel, EpochSlots, SnapshotHash, Vote},
|
||||
crds_value::{
|
||||
self, CrdsData, CrdsValue, CrdsValueLabel, EpochSlots, SnapshotHash, Vote, MAX_WALLCLOCK,
|
||||
},
|
||||
packet::{Packet, PACKET_DATA_SIZE},
|
||||
result::{Error, Result},
|
||||
sendmmsg::{multicast, send_mmsg},
|
||||
weighted_shuffle::{weighted_best, weighted_shuffle},
|
||||
};
|
||||
|
||||
use rand::distributions::{Distribution, WeightedIndex};
|
||||
use rand::SeedableRng;
|
||||
use rand_chacha::ChaChaRng;
|
||||
use solana_sdk::sanitize::{Sanitize, SanitizeError};
|
||||
|
||||
use bincode::{serialize, serialized_size};
|
||||
use compression::prelude::*;
|
||||
use core::cmp;
|
||||
use itertools::Itertools;
|
||||
use rayon::iter::IntoParallelIterator;
|
||||
use rayon::iter::ParallelIterator;
|
||||
use rayon::ThreadPool;
|
||||
use solana_ledger::{bank_forks::BankForks, staking_utils};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_measure::thread_mem_usage;
|
||||
use solana_metrics::{datapoint_debug, inc_new_counter_debug, inc_new_counter_error};
|
||||
use solana_net_utils::{
|
||||
@@ -81,9 +87,6 @@ const MAX_PROTOCOL_HEADER_SIZE: u64 = 214;
|
||||
/// 128MB/PACKET_DATA_SIZE
|
||||
const MAX_GOSSIP_TRAFFIC: usize = 128_000_000 / PACKET_DATA_SIZE;
|
||||
|
||||
const NUM_BITS_PER_BYTE: u64 = 8;
|
||||
const MIN_SIZE_TO_COMPRESS_GZIP: u64 = 64;
|
||||
|
||||
/// Keep the number of snapshot hashes a node publishes under MAX_PROTOCOL_PAYLOAD_SIZE
|
||||
pub const MAX_SNAPSHOT_HASHES: usize = 16;
|
||||
|
||||
@@ -95,6 +98,12 @@ pub enum ClusterInfoError {
|
||||
BadGossipAddress,
|
||||
}
|
||||
#[derive(Clone)]
|
||||
pub struct DataBudget {
|
||||
bytes: usize, // amount of bytes we have in the budget to send
|
||||
last_timestamp_ms: u64, // Last time that we upped the bytes count,
|
||||
// used to detect when to up the bytes budget again
|
||||
}
|
||||
#[derive(Clone)]
|
||||
pub struct ClusterInfo {
|
||||
/// The network
|
||||
pub gossip: CrdsGossip,
|
||||
@@ -103,6 +112,8 @@ pub struct ClusterInfo {
|
||||
/// The network entrypoint
|
||||
entrypoint: Option<ContactInfo>,
|
||||
last_datapoint_submit: Instant,
|
||||
|
||||
outbound_budget: DataBudget,
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
@@ -143,6 +154,15 @@ pub struct PruneData {
|
||||
pub wallclock: u64,
|
||||
}
|
||||
|
||||
impl Sanitize for PruneData {
|
||||
fn sanitize(&self) -> std::result::Result<(), SanitizeError> {
|
||||
if self.wallclock >= MAX_WALLCLOCK {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Signable for PruneData {
|
||||
fn pubkey(&self) -> Pubkey {
|
||||
self.pubkey
|
||||
@@ -188,6 +208,14 @@ pub fn make_accounts_hashes_message(
|
||||
Some(CrdsValue::new_signed(message, keypair))
|
||||
}
|
||||
|
||||
fn distance(a: u64, b: u64) -> u64 {
|
||||
if a > b {
|
||||
a - b
|
||||
} else {
|
||||
b - a
|
||||
}
|
||||
}
|
||||
|
||||
// TODO These messages should go through the gpu pipeline for spam filtering
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
@@ -199,6 +227,31 @@ enum Protocol {
|
||||
PruneMessage(Pubkey, PruneData),
|
||||
}
|
||||
|
||||
impl Sanitize for Protocol {
|
||||
fn sanitize(&self) -> std::result::Result<(), SanitizeError> {
|
||||
match self {
|
||||
Protocol::PullRequest(filter, val) => {
|
||||
filter.sanitize()?;
|
||||
val.sanitize()
|
||||
}
|
||||
Protocol::PullResponse(_, val) => val.sanitize(),
|
||||
Protocol::PushMessage(_, val) => val.sanitize(),
|
||||
Protocol::PruneMessage(_, val) => val.sanitize(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Rating for pull requests
|
||||
// A response table is generated as a
|
||||
// 2-d table arranged by target nodes and a
|
||||
// list of responses for that node,
|
||||
// to/responses_index is a location in that table.
|
||||
struct ResponseScore {
|
||||
to: usize, // to, index of who the response is to
|
||||
responses_index: usize, // index into the list of responses for a given to
|
||||
score: u64, // Relative score of the response
|
||||
}
|
||||
|
||||
impl ClusterInfo {
|
||||
/// Without a valid keypair gossip will not function. Only useful for tests.
|
||||
pub fn new_with_invalid_keypair(contact_info: ContactInfo) -> Self {
|
||||
@@ -211,9 +264,13 @@ impl ClusterInfo {
|
||||
keypair,
|
||||
entrypoint: None,
|
||||
last_datapoint_submit: Instant::now(),
|
||||
outbound_budget: DataBudget {
|
||||
bytes: 0,
|
||||
last_timestamp_ms: 0,
|
||||
},
|
||||
};
|
||||
let id = contact_info.id;
|
||||
me.gossip.set_self(&id);
|
||||
me.gossip.set_self(&contact_info.id);
|
||||
me.gossip.set_shred_version(contact_info.shred_version);
|
||||
me.insert_self(contact_info);
|
||||
me.push_self(&HashMap::new());
|
||||
me
|
||||
@@ -266,51 +323,59 @@ impl ClusterInfo {
|
||||
let now = timestamp();
|
||||
let mut spy_nodes = 0;
|
||||
let mut archivers = 0;
|
||||
let mut different_shred_nodes = 0;
|
||||
let my_pubkey = self.my_data().id;
|
||||
let my_shred_version = self.my_data().shred_version;
|
||||
let nodes: Vec<_> = self
|
||||
.all_peers()
|
||||
.into_iter()
|
||||
.map(|(node, last_updated)| {
|
||||
.filter_map(|(node, last_updated)| {
|
||||
if Self::is_spy_node(&node) {
|
||||
spy_nodes += 1;
|
||||
} else if Self::is_archiver(&node) {
|
||||
archivers += 1;
|
||||
}
|
||||
fn addr_to_string(default_ip: &IpAddr, addr: &SocketAddr) -> String {
|
||||
if ContactInfo::is_valid_address(addr) {
|
||||
if &addr.ip() == default_ip {
|
||||
addr.port().to_string()
|
||||
} else {
|
||||
addr.to_string()
|
||||
}
|
||||
} else {
|
||||
"none".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
let ip_addr = node.gossip.ip();
|
||||
format!(
|
||||
"{:15} {:2}| {:5} | {:44} | {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {}\n",
|
||||
if ContactInfo::is_valid_address(&node.gossip) {
|
||||
ip_addr.to_string()
|
||||
} else {
|
||||
"none".to_string()
|
||||
},
|
||||
if node.id == my_pubkey { "me" } else { "" }.to_string(),
|
||||
now.saturating_sub(last_updated),
|
||||
node.id.to_string(),
|
||||
addr_to_string(&ip_addr, &node.gossip),
|
||||
addr_to_string(&ip_addr, &node.tpu),
|
||||
addr_to_string(&ip_addr, &node.tpu_forwards),
|
||||
addr_to_string(&ip_addr, &node.tvu),
|
||||
addr_to_string(&ip_addr, &node.tvu_forwards),
|
||||
addr_to_string(&ip_addr, &node.repair),
|
||||
addr_to_string(&ip_addr, &node.serve_repair),
|
||||
addr_to_string(&ip_addr, &node.storage_addr),
|
||||
addr_to_string(&ip_addr, &node.rpc),
|
||||
addr_to_string(&ip_addr, &node.rpc_pubsub),
|
||||
node.shred_version,
|
||||
)
|
||||
if my_shred_version != 0 && (node.shred_version != 0 && node.shred_version != my_shred_version) {
|
||||
different_shred_nodes += 1;
|
||||
None
|
||||
} else {
|
||||
fn addr_to_string(default_ip: &IpAddr, addr: &SocketAddr) -> String {
|
||||
if ContactInfo::is_valid_address(addr) {
|
||||
if &addr.ip() == default_ip {
|
||||
addr.port().to_string()
|
||||
} else {
|
||||
addr.to_string()
|
||||
}
|
||||
} else {
|
||||
"none".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
let ip_addr = node.gossip.ip();
|
||||
Some(format!(
|
||||
"{:15} {:2}| {:5} | {:44} | {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {}\n",
|
||||
if ContactInfo::is_valid_address(&node.gossip) {
|
||||
ip_addr.to_string()
|
||||
} else {
|
||||
"none".to_string()
|
||||
},
|
||||
if node.id == my_pubkey { "me" } else { "" }.to_string(),
|
||||
now.saturating_sub(last_updated),
|
||||
node.id.to_string(),
|
||||
addr_to_string(&ip_addr, &node.gossip),
|
||||
addr_to_string(&ip_addr, &node.tpu),
|
||||
addr_to_string(&ip_addr, &node.tpu_forwards),
|
||||
addr_to_string(&ip_addr, &node.tvu),
|
||||
addr_to_string(&ip_addr, &node.tvu_forwards),
|
||||
addr_to_string(&ip_addr, &node.repair),
|
||||
addr_to_string(&ip_addr, &node.serve_repair),
|
||||
addr_to_string(&ip_addr, &node.storage_addr),
|
||||
addr_to_string(&ip_addr, &node.rpc),
|
||||
addr_to_string(&ip_addr, &node.rpc_pubsub),
|
||||
node.shred_version,
|
||||
))
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
@@ -320,7 +385,7 @@ impl ClusterInfo {
|
||||
------------------+-------+----------------------------------------------+\
|
||||
------+------+------+------+------+------+------+------+------+------+--------\n\
|
||||
{}\
|
||||
Nodes: {}{}{}",
|
||||
Nodes: {}{}{}{}",
|
||||
nodes.join(""),
|
||||
nodes.len() - spy_nodes - archivers,
|
||||
if archivers > 0 {
|
||||
@@ -332,119 +397,29 @@ impl ClusterInfo {
|
||||
format!("\nSpies: {}", spy_nodes)
|
||||
} else {
|
||||
"".to_string()
|
||||
},
|
||||
if spy_nodes > 0 {
|
||||
format!(
|
||||
"\nNodes with different shred version: {}",
|
||||
different_shred_nodes
|
||||
)
|
||||
} else {
|
||||
"".to_string()
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
pub fn compress_incomplete_slots(incomplete_slots: &BTreeSet<Slot>) -> EpochIncompleteSlots {
|
||||
if !incomplete_slots.is_empty() {
|
||||
let first_slot = incomplete_slots
|
||||
.iter()
|
||||
.next()
|
||||
.expect("expected to find at least one slot");
|
||||
let last_slot = incomplete_slots
|
||||
.iter()
|
||||
.next_back()
|
||||
.expect("expected to find last slot");
|
||||
let num_uncompressed_bits = last_slot.saturating_sub(*first_slot) + 1;
|
||||
let num_uncompressed_bytes = if num_uncompressed_bits % NUM_BITS_PER_BYTE > 0 {
|
||||
1
|
||||
} else {
|
||||
0
|
||||
} + num_uncompressed_bits / NUM_BITS_PER_BYTE;
|
||||
let mut uncompressed = vec![0u8; num_uncompressed_bytes as usize];
|
||||
incomplete_slots.iter().for_each(|slot| {
|
||||
let offset_from_first_slot = slot.saturating_sub(*first_slot);
|
||||
let index = offset_from_first_slot / NUM_BITS_PER_BYTE;
|
||||
let bit_index = offset_from_first_slot % NUM_BITS_PER_BYTE;
|
||||
uncompressed[index as usize] |= 1 << bit_index;
|
||||
});
|
||||
if num_uncompressed_bytes >= MIN_SIZE_TO_COMPRESS_GZIP {
|
||||
if let Ok(compressed) = uncompressed
|
||||
.iter()
|
||||
.cloned()
|
||||
.encode(&mut GZipEncoder::new(), Action::Finish)
|
||||
.collect::<std::result::Result<Vec<u8>, _>>()
|
||||
{
|
||||
return EpochIncompleteSlots {
|
||||
first: *first_slot,
|
||||
compression: GZip,
|
||||
compressed_list: compressed,
|
||||
};
|
||||
}
|
||||
} else {
|
||||
return EpochIncompleteSlots {
|
||||
first: *first_slot,
|
||||
compression: Uncompressed,
|
||||
compressed_list: uncompressed,
|
||||
};
|
||||
}
|
||||
}
|
||||
EpochIncompleteSlots::default()
|
||||
}
|
||||
|
||||
fn bitmap_to_slot_list(first: Slot, bitmap: &[u8]) -> BTreeSet<Slot> {
|
||||
let mut old_incomplete_slots: BTreeSet<Slot> = BTreeSet::new();
|
||||
bitmap.iter().enumerate().for_each(|(i, val)| {
|
||||
if *val != 0 {
|
||||
(0..8).for_each(|bit_index| {
|
||||
if (1 << bit_index & *val) != 0 {
|
||||
let slot = first + i as u64 * NUM_BITS_PER_BYTE + bit_index as u64;
|
||||
old_incomplete_slots.insert(slot);
|
||||
}
|
||||
})
|
||||
}
|
||||
});
|
||||
old_incomplete_slots
|
||||
}
|
||||
|
||||
pub fn decompress_incomplete_slots(slots: &EpochIncompleteSlots) -> BTreeSet<Slot> {
|
||||
match slots.compression {
|
||||
Uncompressed => Self::bitmap_to_slot_list(slots.first, &slots.compressed_list),
|
||||
GZip => {
|
||||
if let Ok(decompressed) = slots
|
||||
.compressed_list
|
||||
.iter()
|
||||
.cloned()
|
||||
.decode(&mut GZipDecoder::new())
|
||||
.collect::<std::result::Result<Vec<u8>, _>>()
|
||||
{
|
||||
Self::bitmap_to_slot_list(slots.first, &decompressed)
|
||||
} else {
|
||||
BTreeSet::new()
|
||||
}
|
||||
}
|
||||
BZip2 => {
|
||||
if let Ok(decompressed) = slots
|
||||
.compressed_list
|
||||
.iter()
|
||||
.cloned()
|
||||
.decode(&mut BZip2Decoder::new())
|
||||
.collect::<std::result::Result<Vec<u8>, _>>()
|
||||
{
|
||||
Self::bitmap_to_slot_list(slots.first, &decompressed)
|
||||
} else {
|
||||
BTreeSet::new()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push_epoch_slots(
|
||||
&mut self,
|
||||
id: Pubkey,
|
||||
root: Slot,
|
||||
_root: Slot,
|
||||
min: Slot,
|
||||
slots: BTreeSet<Slot>,
|
||||
incomplete_slots: &BTreeSet<Slot>,
|
||||
_slots: BTreeSet<Slot>,
|
||||
_incomplete_slots: &BTreeSet<Slot>,
|
||||
) {
|
||||
let compressed = Self::compress_incomplete_slots(incomplete_slots);
|
||||
let now = timestamp();
|
||||
let entry = CrdsValue::new_signed(
|
||||
CrdsData::EpochSlots(
|
||||
0,
|
||||
EpochSlots::new(id, root, min, slots, vec![compressed], now),
|
||||
),
|
||||
CrdsData::EpochSlots(0, EpochSlots::new(id, min, now)),
|
||||
&self.keypair,
|
||||
);
|
||||
self.gossip
|
||||
@@ -1003,7 +978,7 @@ impl ClusterInfo {
|
||||
let mut num_live_peers = 1i64;
|
||||
peers.iter().for_each(|p| {
|
||||
// A peer is considered live if they generated their contact info recently
|
||||
if timestamp() - p.wallclock <= CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS {
|
||||
if distance(timestamp(), p.wallclock) <= CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS {
|
||||
num_live_peers += 1;
|
||||
}
|
||||
});
|
||||
@@ -1282,6 +1257,10 @@ impl ClusterInfo {
|
||||
entrypoint.shred_version, entrypoint.id
|
||||
);
|
||||
self_info.shred_version = entrypoint.shred_version;
|
||||
obj.write()
|
||||
.unwrap()
|
||||
.gossip
|
||||
.set_shred_version(entrypoint.shred_version);
|
||||
obj.write().unwrap().insert_self(self_info);
|
||||
adopt_shred_version = false;
|
||||
}
|
||||
@@ -1321,6 +1300,7 @@ impl ClusterInfo {
|
||||
let from_addr = packet.meta.addr();
|
||||
limited_deserialize(&packet.data[..packet.meta.size])
|
||||
.into_iter()
|
||||
.filter(|r: &Protocol| r.sanitize().is_ok())
|
||||
.for_each(|request| match request {
|
||||
Protocol::PullRequest(filter, caller) => {
|
||||
let start = allocated.get();
|
||||
@@ -1419,20 +1399,43 @@ impl ClusterInfo {
|
||||
})
|
||||
});
|
||||
// process the collected pulls together
|
||||
let rsp = Self::handle_pull_requests(me, recycler, gossip_pull_data);
|
||||
let rsp = Self::handle_pull_requests(me, recycler, gossip_pull_data, stakes);
|
||||
if let Some(rsp) = rsp {
|
||||
let _ignore_disconnect = response_sender.send(rsp);
|
||||
}
|
||||
}
|
||||
|
||||
// Pull requests take an incoming bloom filter of contained entries from a node
|
||||
// and tries to send back to them the values it detects are missing.
|
||||
fn handle_pull_requests(
|
||||
me: &Arc<RwLock<Self>>,
|
||||
recycler: &PacketsRecycler,
|
||||
requests: Vec<PullData>,
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
) -> Option<Packets> {
|
||||
// split the requests into addrs and filters
|
||||
let mut caller_and_filters = vec![];
|
||||
let mut addrs = vec![];
|
||||
let mut time = Measure::start("handle_pull_requests");
|
||||
{
|
||||
let mut cluster_info = me.write().unwrap();
|
||||
|
||||
let now = timestamp();
|
||||
const INTERVAL_MS: u64 = 100;
|
||||
// allow 50kBps per staked validator, epoch slots + votes ~= 1.5kB/slot ~= 4kB/s
|
||||
const BYTES_PER_INTERVAL: usize = 5000;
|
||||
const MAX_BUDGET_MULTIPLE: usize = 5; // allow budget build-up to 5x the interval default
|
||||
|
||||
if now - cluster_info.outbound_budget.last_timestamp_ms > INTERVAL_MS {
|
||||
let len = std::cmp::max(stakes.len(), 2);
|
||||
cluster_info.outbound_budget.bytes += len * BYTES_PER_INTERVAL;
|
||||
cluster_info.outbound_budget.bytes = std::cmp::min(
|
||||
cluster_info.outbound_budget.bytes,
|
||||
MAX_BUDGET_MULTIPLE * len * BYTES_PER_INTERVAL,
|
||||
);
|
||||
cluster_info.outbound_budget.last_timestamp_ms = now;
|
||||
}
|
||||
}
|
||||
for pull_data in requests {
|
||||
caller_and_filters.push((pull_data.caller, pull_data.filter));
|
||||
addrs.push(pull_data.from_addr);
|
||||
@@ -1444,30 +1447,101 @@ impl ClusterInfo {
|
||||
.unwrap()
|
||||
.gossip
|
||||
.process_pull_requests(caller_and_filters, now);
|
||||
let mut packets = Packets::new_with_recycler(recycler.clone(), 64, "handle_pull_requests");
|
||||
pull_responses
|
||||
|
||||
// Filter bad to addresses
|
||||
let pull_responses: Vec<_> = pull_responses
|
||||
.into_iter()
|
||||
.zip(addrs.into_iter())
|
||||
.for_each(|(response, from_addr)| {
|
||||
if !from_addr.ip().is_unspecified() && from_addr.port() != 0 {
|
||||
let len = response.len();
|
||||
trace!("get updates since response {}", len);
|
||||
inc_new_counter_debug!("cluster_info-pull_request-rsp", len);
|
||||
Self::split_gossip_messages(response)
|
||||
.into_iter()
|
||||
.for_each(|payload| {
|
||||
let protocol = Protocol::PullResponse(self_id, payload);
|
||||
// The remote node may not know its public IP:PORT. Instead of responding to the caller's
|
||||
// gossip addr, respond to the origin addr. The last origin addr is picked from the list of
|
||||
// addrs.
|
||||
packets
|
||||
.packets
|
||||
.push(Packet::from_data(&from_addr, protocol))
|
||||
})
|
||||
.filter_map(|(responses, from_addr)| {
|
||||
if !from_addr.ip().is_unspecified()
|
||||
&& from_addr.port() != 0
|
||||
&& !responses.is_empty()
|
||||
{
|
||||
Some((responses, from_addr))
|
||||
} else {
|
||||
trace!("Dropping Gossip pull response, as destination is unknown");
|
||||
None
|
||||
}
|
||||
});
|
||||
})
|
||||
.collect();
|
||||
|
||||
if pull_responses.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut stats: Vec<_> = pull_responses
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, (responses, _from_addr))| {
|
||||
let score: u64 = if stakes.get(&responses[0].pubkey()).is_some() {
|
||||
2
|
||||
} else {
|
||||
1
|
||||
};
|
||||
responses
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(j, _response)| ResponseScore {
|
||||
to: i,
|
||||
responses_index: j,
|
||||
score,
|
||||
})
|
||||
.collect::<Vec<ResponseScore>>()
|
||||
})
|
||||
.flatten()
|
||||
.collect();
|
||||
|
||||
stats.sort_by(|a, b| a.score.cmp(&b.score));
|
||||
let weights: Vec<_> = stats.iter().map(|stat| stat.score).collect();
|
||||
|
||||
let seed = [48u8; 32];
|
||||
let rng = &mut ChaChaRng::from_seed(seed);
|
||||
let weighted_index = WeightedIndex::new(weights).unwrap();
|
||||
|
||||
let mut packets = Packets::new_with_recycler(recycler.clone(), 64, "handle_pull_requests");
|
||||
let mut total_bytes = 0;
|
||||
let outbound_budget = me.read().unwrap().outbound_budget.bytes;
|
||||
let mut sent = HashSet::new();
|
||||
while sent.len() < stats.len() {
|
||||
let index = weighted_index.sample(rng);
|
||||
if sent.contains(&index) {
|
||||
continue;
|
||||
}
|
||||
sent.insert(index);
|
||||
let stat = &stats[index];
|
||||
let from_addr = pull_responses[stat.to].1;
|
||||
let response = pull_responses[stat.to].0[stat.responses_index].clone();
|
||||
let protocol = Protocol::PullResponse(self_id, vec![response]);
|
||||
packets
|
||||
.packets
|
||||
.push(Packet::from_data(&from_addr, protocol));
|
||||
let len = packets.packets.len();
|
||||
total_bytes += packets.packets[len - 1].meta.size;
|
||||
|
||||
if total_bytes > outbound_budget {
|
||||
inc_new_counter_info!("gossip_pull_request-no_budget", 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
{
|
||||
let mut cluster_info = me.write().unwrap();
|
||||
cluster_info.outbound_budget.bytes = cluster_info
|
||||
.outbound_budget
|
||||
.bytes
|
||||
.saturating_sub(total_bytes);
|
||||
}
|
||||
time.stop();
|
||||
inc_new_counter_info!("gossip_pull_request-sent_requests", sent.len());
|
||||
inc_new_counter_info!(
|
||||
"gossip_pull_request-dropped_requests",
|
||||
stats.len() - sent.len()
|
||||
);
|
||||
debug!(
|
||||
"handle_pull_requests: {} sent: {} total: {} total_bytes: {}",
|
||||
time,
|
||||
sent.len(),
|
||||
stats.len(),
|
||||
total_bytes
|
||||
);
|
||||
if packets.is_empty() {
|
||||
return None;
|
||||
}
|
||||
@@ -1577,7 +1651,10 @@ impl ClusterInfo {
|
||||
requests.push(more_reqs)
|
||||
}
|
||||
if num_requests >= MAX_GOSSIP_TRAFFIC {
|
||||
warn!("Too much gossip traffic, ignoring some messages");
|
||||
warn!(
|
||||
"Too much gossip traffic, ignoring some messages (requests={}, max requests={})",
|
||||
num_requests, MAX_GOSSIP_TRAFFIC
|
||||
);
|
||||
}
|
||||
let epoch_ms;
|
||||
let stakes: HashMap<_, _> = match bank_forks {
|
||||
@@ -1950,7 +2027,7 @@ impl Node {
|
||||
|
||||
fn report_time_spent(label: &str, time: &Duration, extra: &str) {
|
||||
let count = duration_as_ms(time);
|
||||
if count > 5 {
|
||||
if count > 100 {
|
||||
info!("{} took: {} ms {}", label, count, extra);
|
||||
}
|
||||
}
|
||||
@@ -2369,14 +2446,7 @@ mod tests {
|
||||
}
|
||||
let value = CrdsValue::new_unsigned(CrdsData::EpochSlots(
|
||||
0,
|
||||
EpochSlots {
|
||||
from: Pubkey::default(),
|
||||
root: 0,
|
||||
lowest: 0,
|
||||
slots: btree_slots,
|
||||
stash: vec![],
|
||||
wallclock: 0,
|
||||
},
|
||||
EpochSlots::new(Pubkey::default(), 0, 0),
|
||||
));
|
||||
test_split_messages(value);
|
||||
}
|
||||
@@ -2388,39 +2458,19 @@ mod tests {
|
||||
let payload: Vec<CrdsValue> = vec![];
|
||||
let vec_size = serialized_size(&payload).unwrap();
|
||||
let desired_size = MAX_PROTOCOL_PAYLOAD_SIZE - vec_size;
|
||||
let mut value = CrdsValue::new_unsigned(CrdsData::EpochSlots(
|
||||
0,
|
||||
EpochSlots {
|
||||
from: Pubkey::default(),
|
||||
root: 0,
|
||||
lowest: 0,
|
||||
slots: BTreeSet::new(),
|
||||
stash: vec![],
|
||||
wallclock: 0,
|
||||
},
|
||||
));
|
||||
let mut value = CrdsValue::new_unsigned(CrdsData::SnapshotHashes(SnapshotHash {
|
||||
from: Pubkey::default(),
|
||||
hashes: vec![],
|
||||
wallclock: 0,
|
||||
}));
|
||||
|
||||
let mut i = 0;
|
||||
while value.size() <= desired_size {
|
||||
let slots = (0..i).collect::<BTreeSet<_>>();
|
||||
if slots.len() > 200 {
|
||||
panic!(
|
||||
"impossible to match size: last {:?} vs desired {:?}",
|
||||
serialized_size(&value).unwrap(),
|
||||
desired_size
|
||||
);
|
||||
}
|
||||
value.data = CrdsData::EpochSlots(
|
||||
0,
|
||||
EpochSlots {
|
||||
from: Pubkey::default(),
|
||||
root: 0,
|
||||
lowest: 0,
|
||||
slots,
|
||||
stash: vec![],
|
||||
wallclock: 0,
|
||||
},
|
||||
);
|
||||
value.data = CrdsData::SnapshotHashes(SnapshotHash {
|
||||
from: Pubkey::default(),
|
||||
hashes: vec![(0, Hash::default()); i],
|
||||
wallclock: 0,
|
||||
});
|
||||
i += 1;
|
||||
}
|
||||
let split = ClusterInfo::split_gossip_messages(vec![value.clone()]);
|
||||
@@ -2550,11 +2600,9 @@ mod tests {
|
||||
node_keypair,
|
||||
);
|
||||
for i in 0..10 {
|
||||
let mut peer_root = 5;
|
||||
let mut peer_lowest = 0;
|
||||
if i >= 5 {
|
||||
// make these invalid for the upcoming repair request
|
||||
peer_root = 15;
|
||||
peer_lowest = 10;
|
||||
}
|
||||
let other_node_pubkey = Pubkey::new_rand();
|
||||
@@ -2562,14 +2610,7 @@ mod tests {
|
||||
cluster_info.insert_info(other_node.clone());
|
||||
let value = CrdsValue::new_unsigned(CrdsData::EpochSlots(
|
||||
0,
|
||||
EpochSlots::new(
|
||||
other_node_pubkey,
|
||||
peer_root,
|
||||
peer_lowest,
|
||||
BTreeSet::new(),
|
||||
vec![],
|
||||
timestamp(),
|
||||
),
|
||||
EpochSlots::new(other_node_pubkey, peer_lowest, timestamp()),
|
||||
));
|
||||
let _ = cluster_info.gossip.crds.insert(value, timestamp());
|
||||
}
|
||||
@@ -2618,6 +2659,14 @@ mod tests {
|
||||
assert_eq!(MAX_PROTOCOL_HEADER_SIZE, max_protocol_size);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_protocol_sanitize() {
|
||||
let mut pd = PruneData::default();
|
||||
pd.wallclock = MAX_WALLCLOCK;
|
||||
let msg = Protocol::PruneMessage(Pubkey::default(), pd);
|
||||
assert_eq!(msg.sanitize(), Err(SanitizeError::ValueOutOfBounds));
|
||||
}
|
||||
|
||||
// computes the maximum size for pull request blooms
|
||||
fn max_bloom_size() -> usize {
|
||||
let filter_size = serialized_size(&CrdsFilter::default())
|
||||
@@ -2630,38 +2679,4 @@ mod tests {
|
||||
serialized_size(&protocol).expect("unable to serialize gossip protocol") as usize;
|
||||
PACKET_DATA_SIZE - (protocol_size - filter_size)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compress_incomplete_slots() {
|
||||
let mut incomplete_slots: BTreeSet<Slot> = BTreeSet::new();
|
||||
|
||||
assert_eq!(
|
||||
EpochIncompleteSlots::default(),
|
||||
ClusterInfo::compress_incomplete_slots(&incomplete_slots)
|
||||
);
|
||||
|
||||
incomplete_slots.insert(100);
|
||||
let compressed = ClusterInfo::compress_incomplete_slots(&incomplete_slots);
|
||||
assert_eq!(100, compressed.first);
|
||||
let decompressed = ClusterInfo::decompress_incomplete_slots(&compressed);
|
||||
assert_eq!(incomplete_slots, decompressed);
|
||||
|
||||
incomplete_slots.insert(104);
|
||||
let compressed = ClusterInfo::compress_incomplete_slots(&incomplete_slots);
|
||||
assert_eq!(100, compressed.first);
|
||||
let decompressed = ClusterInfo::decompress_incomplete_slots(&compressed);
|
||||
assert_eq!(incomplete_slots, decompressed);
|
||||
|
||||
incomplete_slots.insert(80);
|
||||
let compressed = ClusterInfo::compress_incomplete_slots(&incomplete_slots);
|
||||
assert_eq!(80, compressed.first);
|
||||
let decompressed = ClusterInfo::decompress_incomplete_slots(&compressed);
|
||||
assert_eq!(incomplete_slots, decompressed);
|
||||
|
||||
incomplete_slots.insert(10000);
|
||||
let compressed = ClusterInfo::compress_incomplete_slots(&incomplete_slots);
|
||||
assert_eq!(80, compressed.first);
|
||||
let decompressed = ClusterInfo::decompress_incomplete_slots(&compressed);
|
||||
assert_eq!(incomplete_slots, decompressed);
|
||||
}
|
||||
}
|
||||
|
@@ -1,3 +1,7 @@
|
||||
use crate::consensus::VOTE_THRESHOLD_SIZE;
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::inc_new_counter_info;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::clock::Slot;
|
||||
use solana_vote_program::{vote_state::VoteState, vote_state::MAX_LOCKOUT_HISTORY};
|
||||
@@ -10,9 +14,11 @@ use std::{
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
pub type BlockCommitmentArray = [u64; MAX_LOCKOUT_HISTORY + 1];
|
||||
|
||||
#[derive(Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)]
|
||||
pub struct BlockCommitment {
|
||||
pub commitment: [u64; MAX_LOCKOUT_HISTORY],
|
||||
pub commitment: BlockCommitmentArray,
|
||||
}
|
||||
|
||||
impl BlockCommitment {
|
||||
@@ -25,23 +31,71 @@ impl BlockCommitment {
|
||||
assert!(confirmation_count > 0 && confirmation_count <= MAX_LOCKOUT_HISTORY);
|
||||
self.commitment[confirmation_count - 1]
|
||||
}
|
||||
|
||||
pub fn increase_rooted_stake(&mut self, stake: u64) {
|
||||
self.commitment[MAX_LOCKOUT_HISTORY] += stake;
|
||||
}
|
||||
|
||||
pub fn get_rooted_stake(&self) -> u64 {
|
||||
self.commitment[MAX_LOCKOUT_HISTORY]
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn new(commitment: [u64; MAX_LOCKOUT_HISTORY]) -> Self {
|
||||
pub(crate) fn new(commitment: BlockCommitmentArray) -> Self {
|
||||
Self { commitment }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct BlockCommitmentCache {
|
||||
block_commitment: HashMap<Slot, BlockCommitment>,
|
||||
largest_confirmed_root: Slot,
|
||||
total_stake: u64,
|
||||
bank: Arc<Bank>,
|
||||
blockstore: Arc<Blockstore>,
|
||||
root: Slot,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for BlockCommitmentCache {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("BlockCommitmentCache")
|
||||
.field("block_commitment", &self.block_commitment)
|
||||
.field("total_stake", &self.total_stake)
|
||||
.field(
|
||||
"bank",
|
||||
&format_args!("Bank({{current_slot: {:?}}})", self.bank.slot()),
|
||||
)
|
||||
.field("root", &self.root)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockCommitmentCache {
|
||||
pub fn new(block_commitment: HashMap<Slot, BlockCommitment>, total_stake: u64) -> Self {
|
||||
pub fn new(
|
||||
block_commitment: HashMap<Slot, BlockCommitment>,
|
||||
largest_confirmed_root: Slot,
|
||||
total_stake: u64,
|
||||
bank: Arc<Bank>,
|
||||
blockstore: Arc<Blockstore>,
|
||||
root: Slot,
|
||||
) -> Self {
|
||||
Self {
|
||||
block_commitment,
|
||||
largest_confirmed_root,
|
||||
total_stake,
|
||||
bank,
|
||||
blockstore,
|
||||
root,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn default_with_blockstore(blockstore: Arc<Blockstore>) -> Self {
|
||||
Self {
|
||||
block_commitment: HashMap::default(),
|
||||
largest_confirmed_root: Slot::default(),
|
||||
total_stake: u64::default(),
|
||||
bank: Arc::new(Bank::default()),
|
||||
blockstore,
|
||||
root: Slot::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -49,45 +103,99 @@ impl BlockCommitmentCache {
|
||||
self.block_commitment.get(&slot)
|
||||
}
|
||||
|
||||
pub fn largest_confirmed_root(&self) -> Slot {
|
||||
self.largest_confirmed_root
|
||||
}
|
||||
|
||||
pub fn total_stake(&self) -> u64 {
|
||||
self.total_stake
|
||||
}
|
||||
|
||||
pub fn get_block_with_depth_commitment(
|
||||
&self,
|
||||
minimum_depth: usize,
|
||||
minimum_stake_percentage: f64,
|
||||
) -> Option<Slot> {
|
||||
self.block_commitment
|
||||
.iter()
|
||||
.filter(|&(_, block_commitment)| {
|
||||
let fork_stake_minimum_depth: u64 = block_commitment.commitment[minimum_depth..]
|
||||
.iter()
|
||||
.cloned()
|
||||
.sum();
|
||||
fork_stake_minimum_depth as f64 / self.total_stake as f64
|
||||
>= minimum_stake_percentage
|
||||
})
|
||||
.map(|(slot, _)| *slot)
|
||||
.max()
|
||||
pub fn bank(&self) -> Arc<Bank> {
|
||||
self.bank.clone()
|
||||
}
|
||||
|
||||
pub fn get_rooted_block_with_commitment(&self, minimum_stake_percentage: f64) -> Option<u64> {
|
||||
self.get_block_with_depth_commitment(MAX_LOCKOUT_HISTORY - 1, minimum_stake_percentage)
|
||||
pub fn slot(&self) -> Slot {
|
||||
self.bank.slot()
|
||||
}
|
||||
|
||||
pub fn root(&self) -> Slot {
|
||||
self.root
|
||||
}
|
||||
|
||||
pub fn get_confirmation_count(&self, slot: Slot) -> Option<usize> {
|
||||
self.get_lockout_count(slot, VOTE_THRESHOLD_SIZE)
|
||||
}
|
||||
|
||||
// Returns the lowest level at which at least `minimum_stake_percentage` of the total epoch
|
||||
// stake is locked out
|
||||
fn get_lockout_count(&self, slot: Slot, minimum_stake_percentage: f64) -> Option<usize> {
|
||||
self.get_block_commitment(slot).map(|block_commitment| {
|
||||
let iterator = block_commitment.commitment.iter().enumerate().rev();
|
||||
let mut sum = 0;
|
||||
for (i, stake) in iterator {
|
||||
sum += stake;
|
||||
if (sum as f64 / self.total_stake as f64) > minimum_stake_percentage {
|
||||
return i + 1;
|
||||
}
|
||||
}
|
||||
0
|
||||
})
|
||||
}
|
||||
|
||||
pub fn is_confirmed_rooted(&self, slot: Slot) -> bool {
|
||||
slot <= self.largest_confirmed_root()
|
||||
&& (self.blockstore.is_root(slot) || self.bank.status_cache_ancestors().contains(&slot))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn new_for_tests_with_blockstore(blockstore: Arc<Blockstore>) -> Self {
|
||||
let mut block_commitment: HashMap<Slot, BlockCommitment> = HashMap::new();
|
||||
block_commitment.insert(0, BlockCommitment::default());
|
||||
Self {
|
||||
block_commitment,
|
||||
blockstore,
|
||||
total_stake: 42,
|
||||
largest_confirmed_root: Slot::default(),
|
||||
bank: Arc::new(Bank::default()),
|
||||
root: Slot::default(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn set_get_largest_confirmed_root(&mut self, root: Slot) {
|
||||
self.largest_confirmed_root = root;
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CommitmentAggregationData {
|
||||
bank: Arc<Bank>,
|
||||
root: Slot,
|
||||
total_staked: u64,
|
||||
}
|
||||
|
||||
impl CommitmentAggregationData {
|
||||
pub fn new(bank: Arc<Bank>, total_staked: u64) -> Self {
|
||||
Self { bank, total_staked }
|
||||
pub fn new(bank: Arc<Bank>, root: Slot, total_staked: u64) -> Self {
|
||||
Self {
|
||||
bank,
|
||||
root,
|
||||
total_staked,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_largest_confirmed_root(mut rooted_stake: Vec<(Slot, u64)>, total_stake: u64) -> Slot {
|
||||
rooted_stake.sort_by(|a, b| a.0.cmp(&b.0).reverse());
|
||||
let mut stake_sum = 0;
|
||||
for (root, stake) in rooted_stake {
|
||||
stake_sum += stake;
|
||||
if (stake_sum as f64 / total_stake as f64) > VOTE_THRESHOLD_SIZE {
|
||||
return root;
|
||||
}
|
||||
}
|
||||
0
|
||||
}
|
||||
|
||||
pub struct AggregateCommitmentService {
|
||||
t_commitment: JoinHandle<()>,
|
||||
}
|
||||
@@ -144,18 +252,37 @@ impl AggregateCommitmentService {
|
||||
continue;
|
||||
}
|
||||
|
||||
let block_commitment = Self::aggregate_commitment(&ancestors, &aggregation_data.bank);
|
||||
let mut aggregate_commitment_time = Measure::start("aggregate-commitment-ms");
|
||||
let (block_commitment, rooted_stake) =
|
||||
Self::aggregate_commitment(&ancestors, &aggregation_data.bank);
|
||||
|
||||
let mut new_block_commitment =
|
||||
BlockCommitmentCache::new(block_commitment, aggregation_data.total_staked);
|
||||
let largest_confirmed_root =
|
||||
get_largest_confirmed_root(rooted_stake, aggregation_data.total_staked);
|
||||
|
||||
let mut new_block_commitment = BlockCommitmentCache::new(
|
||||
block_commitment,
|
||||
largest_confirmed_root,
|
||||
aggregation_data.total_staked,
|
||||
aggregation_data.bank,
|
||||
block_commitment_cache.read().unwrap().blockstore.clone(),
|
||||
aggregation_data.root,
|
||||
);
|
||||
|
||||
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
|
||||
|
||||
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
|
||||
aggregate_commitment_time.stop();
|
||||
inc_new_counter_info!(
|
||||
"aggregate-commitment-ms",
|
||||
aggregate_commitment_time.as_ms() as usize
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn aggregate_commitment(ancestors: &[Slot], bank: &Bank) -> HashMap<Slot, BlockCommitment> {
|
||||
pub fn aggregate_commitment(
|
||||
ancestors: &[Slot],
|
||||
bank: &Bank,
|
||||
) -> (HashMap<Slot, BlockCommitment>, Vec<(Slot, u64)>) {
|
||||
assert!(!ancestors.is_empty());
|
||||
|
||||
// Check ancestors is sorted
|
||||
@@ -164,6 +291,7 @@ impl AggregateCommitmentService {
|
||||
}
|
||||
|
||||
let mut commitment = HashMap::new();
|
||||
let mut rooted_stake: Vec<(Slot, u64)> = Vec::new();
|
||||
for (_, (lamports, account)) in bank.vote_accounts().into_iter() {
|
||||
if lamports == 0 {
|
||||
continue;
|
||||
@@ -176,17 +304,19 @@ impl AggregateCommitmentService {
|
||||
let vote_state = vote_state.unwrap();
|
||||
Self::aggregate_commitment_for_vote_account(
|
||||
&mut commitment,
|
||||
&mut rooted_stake,
|
||||
&vote_state,
|
||||
ancestors,
|
||||
lamports,
|
||||
);
|
||||
}
|
||||
|
||||
commitment
|
||||
(commitment, rooted_stake)
|
||||
}
|
||||
|
||||
fn aggregate_commitment_for_vote_account(
|
||||
commitment: &mut HashMap<Slot, BlockCommitment>,
|
||||
rooted_stake: &mut Vec<(Slot, u64)>,
|
||||
vote_state: &VoteState,
|
||||
ancestors: &[Slot],
|
||||
lamports: u64,
|
||||
@@ -199,12 +329,13 @@ impl AggregateCommitmentService {
|
||||
commitment
|
||||
.entry(*a)
|
||||
.or_insert_with(BlockCommitment::default)
|
||||
.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, lamports);
|
||||
.increase_rooted_stake(lamports);
|
||||
} else {
|
||||
ancestors_index = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
rooted_stake.push((root, lamports));
|
||||
}
|
||||
|
||||
for vote in &vote_state.votes {
|
||||
@@ -231,6 +362,7 @@ impl AggregateCommitmentService {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_ledger::get_tmp_ledger_path;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_stake_program::stake_state;
|
||||
use solana_vote_program::vote_state::{self, VoteStateVersions};
|
||||
@@ -246,97 +378,98 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_block_with_depth_commitment() {
|
||||
fn test_get_confirmations() {
|
||||
let bank = Arc::new(Bank::default());
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
// Build BlockCommitmentCache with votes at depths 0 and 1 for 2 slots
|
||||
let mut cache0 = BlockCommitment::default();
|
||||
cache0.increase_confirmation_stake(1, 15);
|
||||
cache0.increase_confirmation_stake(2, 25);
|
||||
cache0.increase_confirmation_stake(1, 5);
|
||||
cache0.increase_confirmation_stake(2, 40);
|
||||
|
||||
let mut cache1 = BlockCommitment::default();
|
||||
cache1.increase_confirmation_stake(1, 10);
|
||||
cache1.increase_confirmation_stake(2, 20);
|
||||
cache1.increase_confirmation_stake(1, 40);
|
||||
cache1.increase_confirmation_stake(2, 5);
|
||||
|
||||
let mut cache2 = BlockCommitment::default();
|
||||
cache2.increase_confirmation_stake(1, 20);
|
||||
cache2.increase_confirmation_stake(2, 5);
|
||||
|
||||
let mut block_commitment = HashMap::new();
|
||||
block_commitment.entry(0).or_insert(cache0.clone());
|
||||
block_commitment.entry(1).or_insert(cache1.clone());
|
||||
let block_commitment_cache = BlockCommitmentCache::new(block_commitment, 50);
|
||||
block_commitment.entry(2).or_insert(cache2.clone());
|
||||
let block_commitment_cache =
|
||||
BlockCommitmentCache::new(block_commitment, 0, 50, bank, blockstore, 0);
|
||||
|
||||
// Neither slot has rooted votes
|
||||
assert_eq!(
|
||||
block_commitment_cache.get_rooted_block_with_commitment(0.1),
|
||||
None
|
||||
);
|
||||
// Neither slot meets the minimum level of commitment 0.6 at depth 1
|
||||
assert_eq!(
|
||||
block_commitment_cache.get_block_with_depth_commitment(1, 0.6),
|
||||
None
|
||||
);
|
||||
// Only slot 0 meets the minimum level of commitment 0.5 at depth 1
|
||||
assert_eq!(
|
||||
block_commitment_cache.get_block_with_depth_commitment(1, 0.5),
|
||||
Some(0)
|
||||
);
|
||||
// If multiple slots meet the minimum level of commitment, method should return the most recent
|
||||
assert_eq!(
|
||||
block_commitment_cache.get_block_with_depth_commitment(1, 0.4),
|
||||
Some(1)
|
||||
);
|
||||
// If multiple slots meet the minimum level of commitment, method should return the most recent
|
||||
assert_eq!(
|
||||
block_commitment_cache.get_block_with_depth_commitment(0, 0.6),
|
||||
Some(1)
|
||||
);
|
||||
// Neither slot meets the minimum level of commitment 0.9 at depth 0
|
||||
assert_eq!(
|
||||
block_commitment_cache.get_block_with_depth_commitment(0, 0.9),
|
||||
None
|
||||
);
|
||||
assert_eq!(block_commitment_cache.get_confirmation_count(0), Some(2));
|
||||
assert_eq!(block_commitment_cache.get_confirmation_count(1), Some(1));
|
||||
assert_eq!(block_commitment_cache.get_confirmation_count(2), Some(0),);
|
||||
assert_eq!(block_commitment_cache.get_confirmation_count(3), None,);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_rooted_block_with_commitment() {
|
||||
// Build BlockCommitmentCache with rooted votes
|
||||
let mut cache0 = BlockCommitment::new([0; MAX_LOCKOUT_HISTORY]);
|
||||
cache0.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, 40);
|
||||
cache0.increase_confirmation_stake(MAX_LOCKOUT_HISTORY - 1, 10);
|
||||
let mut cache1 = BlockCommitment::new([0; MAX_LOCKOUT_HISTORY]);
|
||||
cache1.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, 30);
|
||||
cache1.increase_confirmation_stake(MAX_LOCKOUT_HISTORY - 1, 10);
|
||||
cache1.increase_confirmation_stake(MAX_LOCKOUT_HISTORY - 2, 10);
|
||||
fn test_is_confirmed_rooted() {
|
||||
let bank = Arc::new(Bank::default());
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
blockstore.set_roots(&[0, 1]).unwrap();
|
||||
// Build BlockCommitmentCache with rooted slots
|
||||
let mut cache0 = BlockCommitment::default();
|
||||
cache0.increase_rooted_stake(50);
|
||||
let mut cache1 = BlockCommitment::default();
|
||||
cache1.increase_rooted_stake(40);
|
||||
let mut cache2 = BlockCommitment::default();
|
||||
cache2.increase_rooted_stake(20);
|
||||
|
||||
let mut block_commitment = HashMap::new();
|
||||
block_commitment.entry(0).or_insert(cache0.clone());
|
||||
block_commitment.entry(1).or_insert(cache1.clone());
|
||||
let block_commitment_cache = BlockCommitmentCache::new(block_commitment, 50);
|
||||
block_commitment.entry(1).or_insert(cache0.clone());
|
||||
block_commitment.entry(2).or_insert(cache1.clone());
|
||||
block_commitment.entry(3).or_insert(cache2.clone());
|
||||
let largest_confirmed_root = 1;
|
||||
let block_commitment_cache = BlockCommitmentCache::new(
|
||||
block_commitment,
|
||||
largest_confirmed_root,
|
||||
50,
|
||||
bank,
|
||||
blockstore,
|
||||
0,
|
||||
);
|
||||
|
||||
// Only slot 0 meets the minimum level of commitment 0.66 at root
|
||||
assert_eq!(
|
||||
block_commitment_cache.get_rooted_block_with_commitment(0.66),
|
||||
Some(0)
|
||||
);
|
||||
// If multiple slots meet the minimum level of commitment, method should return the most recent
|
||||
assert_eq!(
|
||||
block_commitment_cache.get_rooted_block_with_commitment(0.6),
|
||||
Some(1)
|
||||
);
|
||||
// Neither slot meets the minimum level of commitment 0.9 at root
|
||||
assert_eq!(
|
||||
block_commitment_cache.get_rooted_block_with_commitment(0.9),
|
||||
None
|
||||
);
|
||||
assert!(block_commitment_cache.is_confirmed_rooted(0));
|
||||
assert!(block_commitment_cache.is_confirmed_rooted(1));
|
||||
assert!(!block_commitment_cache.is_confirmed_rooted(2));
|
||||
assert!(!block_commitment_cache.is_confirmed_rooted(3));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_largest_confirmed_root() {
|
||||
assert_eq!(get_largest_confirmed_root(vec![], 10), 0);
|
||||
let mut rooted_stake = vec![];
|
||||
rooted_stake.push((0, 5));
|
||||
rooted_stake.push((1, 5));
|
||||
assert_eq!(get_largest_confirmed_root(rooted_stake, 10), 0);
|
||||
let mut rooted_stake = vec![];
|
||||
rooted_stake.push((1, 5));
|
||||
rooted_stake.push((0, 10));
|
||||
rooted_stake.push((2, 5));
|
||||
rooted_stake.push((1, 4));
|
||||
assert_eq!(get_largest_confirmed_root(rooted_stake, 10), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregate_commitment_for_vote_account_1() {
|
||||
let ancestors = vec![3, 4, 5, 7, 9, 11];
|
||||
let mut commitment = HashMap::new();
|
||||
let mut rooted_stake = vec![];
|
||||
let lamports = 5;
|
||||
let mut vote_state = VoteState::default();
|
||||
|
||||
let root = ancestors.last().unwrap();
|
||||
vote_state.root_slot = Some(*root);
|
||||
let root = ancestors.last().unwrap().clone();
|
||||
vote_state.root_slot = Some(root);
|
||||
AggregateCommitmentService::aggregate_commitment_for_vote_account(
|
||||
&mut commitment,
|
||||
&mut rooted_stake,
|
||||
&vote_state,
|
||||
&ancestors,
|
||||
lamports,
|
||||
@@ -344,15 +477,17 @@ mod tests {
|
||||
|
||||
for a in ancestors {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, lamports);
|
||||
expected.increase_rooted_stake(lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
}
|
||||
assert_eq!(rooted_stake[0], (root, lamports));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregate_commitment_for_vote_account_2() {
|
||||
let ancestors = vec![3, 4, 5, 7, 9, 11];
|
||||
let mut commitment = HashMap::new();
|
||||
let mut rooted_stake = vec![];
|
||||
let lamports = 5;
|
||||
let mut vote_state = VoteState::default();
|
||||
|
||||
@@ -361,6 +496,7 @@ mod tests {
|
||||
vote_state.process_slot_vote_unchecked(*ancestors.last().unwrap());
|
||||
AggregateCommitmentService::aggregate_commitment_for_vote_account(
|
||||
&mut commitment,
|
||||
&mut rooted_stake,
|
||||
&vote_state,
|
||||
&ancestors,
|
||||
lamports,
|
||||
@@ -369,7 +505,7 @@ mod tests {
|
||||
for a in ancestors {
|
||||
if a <= root {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, lamports);
|
||||
expected.increase_rooted_stake(lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else {
|
||||
let mut expected = BlockCommitment::default();
|
||||
@@ -377,12 +513,14 @@ mod tests {
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
}
|
||||
}
|
||||
assert_eq!(rooted_stake[0], (root, lamports));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregate_commitment_for_vote_account_3() {
|
||||
let ancestors = vec![3, 4, 5, 7, 9, 10, 11];
|
||||
let mut commitment = HashMap::new();
|
||||
let mut rooted_stake = vec![];
|
||||
let lamports = 5;
|
||||
let mut vote_state = VoteState::default();
|
||||
|
||||
@@ -393,6 +531,7 @@ mod tests {
|
||||
vote_state.process_slot_vote_unchecked(ancestors[6]);
|
||||
AggregateCommitmentService::aggregate_commitment_for_vote_account(
|
||||
&mut commitment,
|
||||
&mut rooted_stake,
|
||||
&vote_state,
|
||||
&ancestors,
|
||||
lamports,
|
||||
@@ -401,7 +540,7 @@ mod tests {
|
||||
for (i, a) in ancestors.iter().enumerate() {
|
||||
if *a <= root {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, lamports);
|
||||
expected.increase_rooted_stake(lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else if i <= 4 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
@@ -413,6 +552,7 @@ mod tests {
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
}
|
||||
}
|
||||
assert_eq!(rooted_stake[0], (root, lamports));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -422,6 +562,8 @@ mod tests {
|
||||
mut genesis_config, ..
|
||||
} = create_genesis_config(10_000);
|
||||
|
||||
let rooted_stake_amount = 40;
|
||||
|
||||
let sk1 = Pubkey::new_rand();
|
||||
let pk1 = Pubkey::new_rand();
|
||||
let mut vote_account1 = vote_state::create_account(&pk1, &Pubkey::new_rand(), 0, 100);
|
||||
@@ -432,12 +574,36 @@ mod tests {
|
||||
let mut vote_account2 = vote_state::create_account(&pk2, &Pubkey::new_rand(), 0, 50);
|
||||
let stake_account2 =
|
||||
stake_state::create_account(&sk2, &pk2, &vote_account2, &genesis_config.rent, 50);
|
||||
let sk3 = Pubkey::new_rand();
|
||||
let pk3 = Pubkey::new_rand();
|
||||
let mut vote_account3 = vote_state::create_account(&pk3, &Pubkey::new_rand(), 0, 1);
|
||||
let stake_account3 = stake_state::create_account(
|
||||
&sk3,
|
||||
&pk3,
|
||||
&vote_account3,
|
||||
&genesis_config.rent,
|
||||
rooted_stake_amount,
|
||||
);
|
||||
let sk4 = Pubkey::new_rand();
|
||||
let pk4 = Pubkey::new_rand();
|
||||
let mut vote_account4 = vote_state::create_account(&pk4, &Pubkey::new_rand(), 0, 1);
|
||||
let stake_account4 = stake_state::create_account(
|
||||
&sk4,
|
||||
&pk4,
|
||||
&vote_account4,
|
||||
&genesis_config.rent,
|
||||
rooted_stake_amount,
|
||||
);
|
||||
|
||||
genesis_config.accounts.extend(vec![
|
||||
(pk1, vote_account1.clone()),
|
||||
(sk1, stake_account1),
|
||||
(pk2, vote_account2.clone()),
|
||||
(sk2, stake_account2),
|
||||
(pk3, vote_account3.clone()),
|
||||
(sk3, stake_account3),
|
||||
(pk4, vote_account4.clone()),
|
||||
(sk4, stake_account4),
|
||||
]);
|
||||
|
||||
// Create bank
|
||||
@@ -457,7 +623,20 @@ mod tests {
|
||||
VoteState::to(&versioned, &mut vote_account2).unwrap();
|
||||
bank.store_account(&pk2, &vote_account2);
|
||||
|
||||
let commitment = AggregateCommitmentService::aggregate_commitment(&ancestors, &bank);
|
||||
let mut vote_state3 = VoteState::from(&vote_account3).unwrap();
|
||||
vote_state3.root_slot = Some(1);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state3));
|
||||
VoteState::to(&versioned, &mut vote_account3).unwrap();
|
||||
bank.store_account(&pk3, &vote_account3);
|
||||
|
||||
let mut vote_state4 = VoteState::from(&vote_account4).unwrap();
|
||||
vote_state4.root_slot = Some(2);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state4));
|
||||
VoteState::to(&versioned, &mut vote_account4).unwrap();
|
||||
bank.store_account(&pk4, &vote_account4);
|
||||
|
||||
let (commitment, rooted_stake) =
|
||||
AggregateCommitmentService::aggregate_commitment(&ancestors, &bank);
|
||||
|
||||
for a in ancestors {
|
||||
if a <= 3 {
|
||||
@@ -481,5 +660,7 @@ mod tests {
|
||||
assert!(commitment.get(&a).is_none());
|
||||
}
|
||||
}
|
||||
assert_eq!(rooted_stake.len(), 2);
|
||||
assert_eq!(get_largest_confirmed_root(rooted_stake, 100), 1)
|
||||
}
|
||||
}
|
||||
|
@@ -621,7 +621,7 @@ pub mod test {
|
||||
}
|
||||
let vote = tower.new_vote_from_bank(&bank, &my_vote_pubkey).0;
|
||||
if let Some(new_root) = tower.record_bank_vote(vote) {
|
||||
ReplayStage::handle_new_root(new_root, bank_forks, progress, &None);
|
||||
ReplayStage::handle_new_root(new_root, bank_forks, progress, &None, None);
|
||||
}
|
||||
|
||||
// Mark the vote for this bank under this node's pubkey so it will be
|
||||
|
@@ -1,6 +1,8 @@
|
||||
use crate::crds_value::MAX_WALLCLOCK;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
#[cfg(test)]
|
||||
use solana_sdk::rpc_port;
|
||||
use solana_sdk::sanitize::{Sanitize, SanitizeError};
|
||||
#[cfg(test)]
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_sdk::timing::timestamp;
|
||||
@@ -37,6 +39,15 @@ pub struct ContactInfo {
|
||||
pub shred_version: u16,
|
||||
}
|
||||
|
||||
impl Sanitize for ContactInfo {
|
||||
fn sanitize(&self) -> std::result::Result<(), SanitizeError> {
|
||||
if self.wallclock >= MAX_WALLCLOCK {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for ContactInfo {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
self.id.cmp(&other.id)
|
||||
@@ -314,4 +325,12 @@ mod tests {
|
||||
ci.rpc = socketaddr!("127.0.0.1:234");
|
||||
assert!(ci.valid_client_facing_addr().is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sanitize() {
|
||||
let mut ci = ContactInfo::default();
|
||||
assert_eq!(ci.sanitize(), Ok(()));
|
||||
ci.wallclock = MAX_WALLCLOCK;
|
||||
assert_eq!(ci.sanitize(), Err(SanitizeError::ValueOutOfBounds));
|
||||
}
|
||||
}
|
||||
|
@@ -20,6 +20,7 @@ pub const CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS: usize = 500;
|
||||
pub struct CrdsGossip {
|
||||
pub crds: Crds,
|
||||
pub id: Pubkey,
|
||||
pub shred_version: u16,
|
||||
pub push: CrdsGossipPush,
|
||||
pub pull: CrdsGossipPull,
|
||||
}
|
||||
@@ -29,6 +30,7 @@ impl Default for CrdsGossip {
|
||||
CrdsGossip {
|
||||
crds: Crds::default(),
|
||||
id: Pubkey::default(),
|
||||
shred_version: 0,
|
||||
push: CrdsGossipPush::default(),
|
||||
pull: CrdsGossipPull::default(),
|
||||
}
|
||||
@@ -39,6 +41,9 @@ impl CrdsGossip {
|
||||
pub fn set_self(&mut self, id: &Pubkey) {
|
||||
self.id = *id;
|
||||
}
|
||||
pub fn set_shred_version(&mut self, shred_version: u16) {
|
||||
self.shred_version = shred_version;
|
||||
}
|
||||
|
||||
/// process a push message to the network
|
||||
pub fn process_push_message(
|
||||
@@ -122,6 +127,7 @@ impl CrdsGossip {
|
||||
&self.crds,
|
||||
stakes,
|
||||
&self.id,
|
||||
self.shred_version,
|
||||
self.pull.pull_request_time.len(),
|
||||
CRDS_GOSSIP_NUM_ACTIVE,
|
||||
)
|
||||
@@ -134,8 +140,14 @@ impl CrdsGossip {
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
bloom_size: usize,
|
||||
) -> Result<(Pubkey, Vec<CrdsFilter>, CrdsValue), CrdsGossipError> {
|
||||
self.pull
|
||||
.new_pull_request(&self.crds, &self.id, now, stakes, bloom_size)
|
||||
self.pull.new_pull_request(
|
||||
&self.crds,
|
||||
&self.id,
|
||||
self.shred_version,
|
||||
now,
|
||||
stakes,
|
||||
bloom_size,
|
||||
)
|
||||
}
|
||||
|
||||
/// time when a request to `from` was initiated
|
||||
|
@@ -14,7 +14,6 @@ use crate::crds::Crds;
|
||||
use crate::crds_gossip::{get_stake, get_weight, CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS};
|
||||
use crate::crds_gossip_error::CrdsGossipError;
|
||||
use crate::crds_value::{CrdsValue, CrdsValueLabel};
|
||||
use rand;
|
||||
use rand::distributions::{Distribution, WeightedIndex};
|
||||
use rand::Rng;
|
||||
use solana_runtime::bloom::Bloom;
|
||||
@@ -37,6 +36,13 @@ pub struct CrdsFilter {
|
||||
mask_bits: u32,
|
||||
}
|
||||
|
||||
impl solana_sdk::sanitize::Sanitize for CrdsFilter {
|
||||
fn sanitize(&self) -> std::result::Result<(), solana_sdk::sanitize::SanitizeError> {
|
||||
self.filter.sanitize()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl CrdsFilter {
|
||||
pub fn new_rand(num_items: usize, max_bytes: usize) -> Self {
|
||||
let max_bits = (max_bytes * 8) as f64;
|
||||
@@ -138,11 +144,12 @@ impl CrdsGossipPull {
|
||||
&self,
|
||||
crds: &Crds,
|
||||
self_id: &Pubkey,
|
||||
self_shred_version: u16,
|
||||
now: u64,
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
bloom_size: usize,
|
||||
) -> Result<(Pubkey, Vec<CrdsFilter>, CrdsValue), CrdsGossipError> {
|
||||
let options = self.pull_options(crds, &self_id, now, stakes);
|
||||
let options = self.pull_options(crds, &self_id, self_shred_version, now, stakes);
|
||||
if options.is_empty() {
|
||||
return Err(CrdsGossipError::NoPeers);
|
||||
}
|
||||
@@ -159,13 +166,20 @@ impl CrdsGossipPull {
|
||||
&self,
|
||||
crds: &'a Crds,
|
||||
self_id: &Pubkey,
|
||||
self_shred_version: u16,
|
||||
now: u64,
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
) -> Vec<(f32, &'a ContactInfo)> {
|
||||
crds.table
|
||||
.values()
|
||||
.filter_map(|v| v.value.contact_info())
|
||||
.filter(|v| v.id != *self_id && ContactInfo::is_valid_address(&v.gossip))
|
||||
.filter(|v| {
|
||||
v.id != *self_id
|
||||
&& ContactInfo::is_valid_address(&v.gossip)
|
||||
&& (self_shred_version == 0
|
||||
|| v.shred_version == 0
|
||||
|| self_shred_version == v.shred_version)
|
||||
})
|
||||
.map(|item| {
|
||||
let max_weight = f32::from(u16::max_value()) - 1.0;
|
||||
let req_time: u64 = *self.pull_request_time.get(&item.id).unwrap_or(&0);
|
||||
@@ -396,7 +410,7 @@ mod test {
|
||||
stakes.insert(id, i * 100);
|
||||
}
|
||||
let now = 1024;
|
||||
let mut options = node.pull_options(&crds, &me.label().pubkey(), now, &stakes);
|
||||
let mut options = node.pull_options(&crds, &me.label().pubkey(), 0, now, &stakes);
|
||||
assert!(!options.is_empty());
|
||||
options.sort_by(|(weight_l, _), (weight_r, _)| weight_r.partial_cmp(weight_l).unwrap());
|
||||
// check that the highest stake holder is also the heaviest weighted.
|
||||
@@ -406,6 +420,66 @@ mod test {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_no_pulls_from_different_shred_versions() {
|
||||
let mut crds = Crds::default();
|
||||
let stakes = HashMap::new();
|
||||
let node = CrdsGossipPull::default();
|
||||
|
||||
let gossip = socketaddr!("127.0.0.1:1234");
|
||||
|
||||
let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
|
||||
id: Pubkey::new_rand(),
|
||||
shred_version: 123,
|
||||
gossip: gossip.clone(),
|
||||
..ContactInfo::default()
|
||||
}));
|
||||
let spy = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
|
||||
id: Pubkey::new_rand(),
|
||||
shred_version: 0,
|
||||
gossip: gossip.clone(),
|
||||
..ContactInfo::default()
|
||||
}));
|
||||
let node_123 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
|
||||
id: Pubkey::new_rand(),
|
||||
shred_version: 123,
|
||||
gossip: gossip.clone(),
|
||||
..ContactInfo::default()
|
||||
}));
|
||||
let node_456 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
|
||||
id: Pubkey::new_rand(),
|
||||
shred_version: 456,
|
||||
gossip: gossip.clone(),
|
||||
..ContactInfo::default()
|
||||
}));
|
||||
|
||||
crds.insert(me.clone(), 0).unwrap();
|
||||
crds.insert(spy.clone(), 0).unwrap();
|
||||
crds.insert(node_123.clone(), 0).unwrap();
|
||||
crds.insert(node_456.clone(), 0).unwrap();
|
||||
|
||||
// shred version 123 should ignore 456 nodes
|
||||
let options = node
|
||||
.pull_options(&crds, &me.label().pubkey(), 123, 0, &stakes)
|
||||
.iter()
|
||||
.map(|(_, c)| c.id)
|
||||
.collect::<Vec<_>>();
|
||||
assert_eq!(options.len(), 2);
|
||||
assert!(options.contains(&spy.pubkey()));
|
||||
assert!(options.contains(&node_123.pubkey()));
|
||||
|
||||
// spy nodes will see all
|
||||
let options = node
|
||||
.pull_options(&crds, &spy.label().pubkey(), 0, 0, &stakes)
|
||||
.iter()
|
||||
.map(|(_, c)| c.id)
|
||||
.collect::<Vec<_>>();
|
||||
assert_eq!(options.len(), 3);
|
||||
assert!(options.contains(&me.pubkey()));
|
||||
assert!(options.contains(&node_123.pubkey()));
|
||||
assert!(options.contains(&node_456.pubkey()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_new_pull_request() {
|
||||
let mut crds = Crds::default();
|
||||
@@ -416,13 +490,13 @@ mod test {
|
||||
let id = entry.label().pubkey();
|
||||
let node = CrdsGossipPull::default();
|
||||
assert_eq!(
|
||||
node.new_pull_request(&crds, &id, 0, &HashMap::new(), PACKET_DATA_SIZE),
|
||||
node.new_pull_request(&crds, &id, 0, 0, &HashMap::new(), PACKET_DATA_SIZE),
|
||||
Err(CrdsGossipError::NoPeers)
|
||||
);
|
||||
|
||||
crds.insert(entry.clone(), 0).unwrap();
|
||||
assert_eq!(
|
||||
node.new_pull_request(&crds, &id, 0, &HashMap::new(), PACKET_DATA_SIZE),
|
||||
node.new_pull_request(&crds, &id, 0, 0, &HashMap::new(), PACKET_DATA_SIZE),
|
||||
Err(CrdsGossipError::NoPeers)
|
||||
);
|
||||
|
||||
@@ -431,7 +505,7 @@ mod test {
|
||||
0,
|
||||
)));
|
||||
crds.insert(new.clone(), 0).unwrap();
|
||||
let req = node.new_pull_request(&crds, &id, 0, &HashMap::new(), PACKET_DATA_SIZE);
|
||||
let req = node.new_pull_request(&crds, &id, 0, 0, &HashMap::new(), PACKET_DATA_SIZE);
|
||||
let (to, _, self_info) = req.unwrap();
|
||||
assert_eq!(to, new.label().pubkey());
|
||||
assert_eq!(self_info, entry);
|
||||
@@ -466,6 +540,7 @@ mod test {
|
||||
let req = node.new_pull_request(
|
||||
&crds,
|
||||
&node_pubkey,
|
||||
0,
|
||||
u64::max_value(),
|
||||
&HashMap::new(),
|
||||
PACKET_DATA_SIZE,
|
||||
@@ -495,6 +570,7 @@ mod test {
|
||||
&node_crds,
|
||||
&node_pubkey,
|
||||
0,
|
||||
0,
|
||||
&HashMap::new(),
|
||||
PACKET_DATA_SIZE,
|
||||
);
|
||||
@@ -567,6 +643,7 @@ mod test {
|
||||
&node_crds,
|
||||
&node_pubkey,
|
||||
0,
|
||||
0,
|
||||
&HashMap::new(),
|
||||
PACKET_DATA_SIZE,
|
||||
);
|
||||
|
@@ -236,13 +236,14 @@ impl CrdsGossipPush {
|
||||
crds: &Crds,
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
self_id: &Pubkey,
|
||||
self_shred_version: u16,
|
||||
network_size: usize,
|
||||
ratio: usize,
|
||||
) {
|
||||
let need = Self::compute_need(self.num_active, self.active_set.len(), ratio);
|
||||
let mut new_items = HashMap::new();
|
||||
|
||||
let options: Vec<_> = self.push_options(crds, &self_id, stakes);
|
||||
let options: Vec<_> = self.push_options(crds, &self_id, self_shred_version, stakes);
|
||||
if options.is_empty() {
|
||||
return;
|
||||
}
|
||||
@@ -288,13 +289,20 @@ impl CrdsGossipPush {
|
||||
&self,
|
||||
crds: &'a Crds,
|
||||
self_id: &Pubkey,
|
||||
self_shred_version: u16,
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
) -> Vec<(f32, &'a ContactInfo)> {
|
||||
crds.table
|
||||
.values()
|
||||
.filter(|v| v.value.contact_info().is_some())
|
||||
.map(|v| (v.value.contact_info().unwrap(), v))
|
||||
.filter(|(info, _)| info.id != *self_id && ContactInfo::is_valid_address(&info.gossip))
|
||||
.filter(|(info, _)| {
|
||||
info.id != *self_id
|
||||
&& ContactInfo::is_valid_address(&info.gossip)
|
||||
&& (self_shred_version == 0
|
||||
|| info.shred_version == 0
|
||||
|| self_shred_version == info.shred_version)
|
||||
})
|
||||
.map(|(info, value)| {
|
||||
let max_weight = f32::from(u16::max_value()) - 1.0;
|
||||
let last_updated: u64 = value.local_timestamp;
|
||||
@@ -510,7 +518,7 @@ mod test {
|
||||
)));
|
||||
|
||||
assert_eq!(crds.insert(value1.clone(), 0), Ok(None));
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 0, 1, 1);
|
||||
|
||||
assert!(push.active_set.get(&value1.label().pubkey()).is_some());
|
||||
let value2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
@@ -520,7 +528,7 @@ mod test {
|
||||
assert!(push.active_set.get(&value2.label().pubkey()).is_none());
|
||||
assert_eq!(crds.insert(value2.clone(), 0), Ok(None));
|
||||
for _ in 0..30 {
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 0, 1, 1);
|
||||
if push.active_set.get(&value2.label().pubkey()).is_some() {
|
||||
break;
|
||||
}
|
||||
@@ -533,7 +541,7 @@ mod test {
|
||||
));
|
||||
assert_eq!(crds.insert(value2.clone(), 0), Ok(None));
|
||||
}
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 0, 1, 1);
|
||||
assert_eq!(push.active_set.len(), push.num_active);
|
||||
}
|
||||
#[test]
|
||||
@@ -551,7 +559,7 @@ mod test {
|
||||
crds.insert(peer.clone(), time).unwrap();
|
||||
stakes.insert(id, i * 100);
|
||||
}
|
||||
let mut options = push.push_options(&crds, &Pubkey::default(), &stakes);
|
||||
let mut options = push.push_options(&crds, &Pubkey::default(), 0, &stakes);
|
||||
assert!(!options.is_empty());
|
||||
options.sort_by(|(weight_l, _), (weight_r, _)| weight_r.partial_cmp(weight_l).unwrap());
|
||||
// check that the highest stake holder is also the heaviest weighted.
|
||||
@@ -560,6 +568,66 @@ mod test {
|
||||
10_000_u64
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_no_pushes_to_from_different_shred_versions() {
|
||||
let mut crds = Crds::default();
|
||||
let stakes = HashMap::new();
|
||||
let node = CrdsGossipPush::default();
|
||||
|
||||
let gossip = socketaddr!("127.0.0.1:1234");
|
||||
|
||||
let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
|
||||
id: Pubkey::new_rand(),
|
||||
shred_version: 123,
|
||||
gossip: gossip.clone(),
|
||||
..ContactInfo::default()
|
||||
}));
|
||||
let spy = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
|
||||
id: Pubkey::new_rand(),
|
||||
shred_version: 0,
|
||||
gossip: gossip.clone(),
|
||||
..ContactInfo::default()
|
||||
}));
|
||||
let node_123 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
|
||||
id: Pubkey::new_rand(),
|
||||
shred_version: 123,
|
||||
gossip: gossip.clone(),
|
||||
..ContactInfo::default()
|
||||
}));
|
||||
let node_456 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
|
||||
id: Pubkey::new_rand(),
|
||||
shred_version: 456,
|
||||
gossip: gossip.clone(),
|
||||
..ContactInfo::default()
|
||||
}));
|
||||
|
||||
crds.insert(me.clone(), 0).unwrap();
|
||||
crds.insert(spy.clone(), 0).unwrap();
|
||||
crds.insert(node_123.clone(), 0).unwrap();
|
||||
crds.insert(node_456.clone(), 0).unwrap();
|
||||
|
||||
// shred version 123 should ignore 456 nodes
|
||||
let options = node
|
||||
.push_options(&crds, &me.label().pubkey(), 123, &stakes)
|
||||
.iter()
|
||||
.map(|(_, c)| c.id)
|
||||
.collect::<Vec<_>>();
|
||||
assert_eq!(options.len(), 2);
|
||||
assert!(options.contains(&spy.pubkey()));
|
||||
assert!(options.contains(&node_123.pubkey()));
|
||||
|
||||
// spy nodes will see all
|
||||
let options = node
|
||||
.push_options(&crds, &spy.label().pubkey(), 0, &stakes)
|
||||
.iter()
|
||||
.map(|(_, c)| c.id)
|
||||
.collect::<Vec<_>>();
|
||||
assert_eq!(options.len(), 3);
|
||||
assert!(options.contains(&me.pubkey()));
|
||||
assert!(options.contains(&node_123.pubkey()));
|
||||
assert!(options.contains(&node_456.pubkey()));
|
||||
}
|
||||
#[test]
|
||||
fn test_new_push_messages() {
|
||||
let mut crds = Crds::default();
|
||||
@@ -569,7 +637,7 @@ mod test {
|
||||
0,
|
||||
)));
|
||||
assert_eq!(crds.insert(peer.clone(), 0), Ok(None));
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 0, 1, 1);
|
||||
|
||||
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
@@ -606,7 +674,7 @@ mod test {
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), peer_3.clone(), 0),
|
||||
Ok(None)
|
||||
);
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 0, 1, 1);
|
||||
|
||||
// push 3's contact info to 1 and 2 and 3
|
||||
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
@@ -628,7 +696,7 @@ mod test {
|
||||
0,
|
||||
)));
|
||||
assert_eq!(crds.insert(peer.clone(), 0), Ok(None));
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 0, 1, 1);
|
||||
|
||||
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
@@ -651,7 +719,7 @@ mod test {
|
||||
0,
|
||||
)));
|
||||
assert_eq!(crds.insert(peer.clone(), 0), Ok(None));
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 0, 1, 1);
|
||||
|
||||
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
|
||||
ci.wallclock = 1;
|
||||
|
@@ -1,5 +1,6 @@
|
||||
use crate::contact_info::ContactInfo;
|
||||
use bincode::{serialize, serialized_size};
|
||||
use solana_sdk::sanitize::{Sanitize, SanitizeError};
|
||||
use solana_sdk::timing::timestamp;
|
||||
use solana_sdk::{
|
||||
clock::Slot,
|
||||
@@ -14,10 +15,14 @@ use std::{
|
||||
fmt,
|
||||
};
|
||||
|
||||
pub const MAX_WALLCLOCK: u64 = 1_000_000_000_000_000;
|
||||
pub const MAX_SLOT: u64 = 1_000_000_000_000_000;
|
||||
|
||||
pub type VoteIndex = u8;
|
||||
pub const MAX_VOTES: VoteIndex = 32;
|
||||
|
||||
pub type EpochSlotIndex = u8;
|
||||
pub const MAX_EPOCH_SLOTS: EpochSlotIndex = 1;
|
||||
|
||||
/// CrdsValue that is replicated across the cluster
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
@@ -26,6 +31,13 @@ pub struct CrdsValue {
|
||||
pub data: CrdsData,
|
||||
}
|
||||
|
||||
impl Sanitize for CrdsValue {
|
||||
fn sanitize(&self) -> Result<(), SanitizeError> {
|
||||
self.signature.sanitize()?;
|
||||
self.data.sanitize()
|
||||
}
|
||||
}
|
||||
|
||||
impl Signable for CrdsValue {
|
||||
fn pubkey(&self) -> Pubkey {
|
||||
self.pubkey()
|
||||
@@ -44,14 +56,8 @@ impl Signable for CrdsValue {
|
||||
}
|
||||
|
||||
fn verify(&self) -> bool {
|
||||
let sig_check = self
|
||||
.get_signature()
|
||||
.verify(&self.pubkey().as_ref(), self.signable_data().borrow());
|
||||
let data_check = match &self.data {
|
||||
CrdsData::Vote(ix, _) => *ix < MAX_VOTES,
|
||||
_ => true,
|
||||
};
|
||||
sig_check && data_check
|
||||
self.get_signature()
|
||||
.verify(&self.pubkey().as_ref(), self.signable_data().borrow())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,6 +93,39 @@ pub struct EpochIncompleteSlots {
|
||||
pub compressed_list: Vec<u8>,
|
||||
}
|
||||
|
||||
impl Sanitize for EpochIncompleteSlots {
|
||||
fn sanitize(&self) -> Result<(), SanitizeError> {
|
||||
if self.first >= MAX_SLOT {
|
||||
return Err(SanitizeError::InvalidValue);
|
||||
}
|
||||
//rest of the data doesn't matter since we no longer decompress
|
||||
//these values
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Sanitize for CrdsData {
|
||||
fn sanitize(&self) -> Result<(), SanitizeError> {
|
||||
match self {
|
||||
CrdsData::ContactInfo(val) => val.sanitize(),
|
||||
CrdsData::Vote(ix, val) => {
|
||||
if *ix >= MAX_VOTES {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
val.sanitize()
|
||||
}
|
||||
CrdsData::SnapshotHashes(val) => val.sanitize(),
|
||||
CrdsData::AccountsHashes(val) => val.sanitize(),
|
||||
CrdsData::EpochSlots(ix, val) => {
|
||||
if *ix as usize >= MAX_EPOCH_SLOTS as usize {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
val.sanitize()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub struct SnapshotHash {
|
||||
pub from: Pubkey,
|
||||
@@ -94,6 +133,20 @@ pub struct SnapshotHash {
|
||||
pub wallclock: u64,
|
||||
}
|
||||
|
||||
impl Sanitize for SnapshotHash {
|
||||
fn sanitize(&self) -> Result<(), SanitizeError> {
|
||||
if self.wallclock >= MAX_WALLCLOCK {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
for (slot, _) in &self.hashes {
|
||||
if *slot >= MAX_SLOT {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
}
|
||||
self.from.sanitize()
|
||||
}
|
||||
}
|
||||
|
||||
impl SnapshotHash {
|
||||
pub fn new(from: Pubkey, hashes: Vec<(Slot, Hash)>) -> Self {
|
||||
Self {
|
||||
@@ -107,33 +160,47 @@ impl SnapshotHash {
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub struct EpochSlots {
|
||||
pub from: Pubkey,
|
||||
pub root: Slot,
|
||||
root: Slot,
|
||||
pub lowest: Slot,
|
||||
pub slots: BTreeSet<Slot>,
|
||||
pub stash: Vec<EpochIncompleteSlots>,
|
||||
slots: BTreeSet<Slot>,
|
||||
stash: Vec<EpochIncompleteSlots>,
|
||||
pub wallclock: u64,
|
||||
}
|
||||
|
||||
impl EpochSlots {
|
||||
pub fn new(
|
||||
from: Pubkey,
|
||||
root: Slot,
|
||||
lowest: Slot,
|
||||
slots: BTreeSet<Slot>,
|
||||
stash: Vec<EpochIncompleteSlots>,
|
||||
wallclock: u64,
|
||||
) -> Self {
|
||||
pub fn new(from: Pubkey, lowest: Slot, wallclock: u64) -> Self {
|
||||
Self {
|
||||
from,
|
||||
root,
|
||||
root: 0,
|
||||
lowest,
|
||||
slots,
|
||||
stash,
|
||||
slots: BTreeSet::new(),
|
||||
stash: vec![],
|
||||
wallclock,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Sanitize for EpochSlots {
|
||||
fn sanitize(&self) -> Result<(), SanitizeError> {
|
||||
if self.wallclock >= MAX_WALLCLOCK {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
if self.lowest >= MAX_SLOT {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
if self.root >= MAX_SLOT {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
for slot in &self.slots {
|
||||
if *slot >= MAX_SLOT {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
}
|
||||
self.stash.sanitize()?;
|
||||
self.from.sanitize()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub struct Vote {
|
||||
pub from: Pubkey,
|
||||
@@ -141,6 +208,16 @@ pub struct Vote {
|
||||
pub wallclock: u64,
|
||||
}
|
||||
|
||||
impl Sanitize for Vote {
|
||||
fn sanitize(&self) -> Result<(), SanitizeError> {
|
||||
if self.wallclock >= MAX_WALLCLOCK {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
self.from.sanitize()?;
|
||||
self.transaction.sanitize()
|
||||
}
|
||||
}
|
||||
|
||||
impl Vote {
|
||||
pub fn new(from: &Pubkey, transaction: Transaction, wallclock: u64) -> Self {
|
||||
Self {
|
||||
@@ -356,7 +433,7 @@ mod test {
|
||||
|
||||
let v = CrdsValue::new_unsigned(CrdsData::EpochSlots(
|
||||
0,
|
||||
EpochSlots::new(Pubkey::default(), 0, 0, BTreeSet::new(), vec![], 0),
|
||||
EpochSlots::new(Pubkey::default(), 0, 0),
|
||||
));
|
||||
assert_eq!(v.wallclock(), 0);
|
||||
let key = v.clone().epoch_slots().unwrap().from;
|
||||
@@ -377,10 +454,9 @@ mod test {
|
||||
Vote::new(&keypair.pubkey(), test_tx(), timestamp()),
|
||||
));
|
||||
verify_signatures(&mut v, &keypair, &wrong_keypair);
|
||||
let btreeset: BTreeSet<Slot> = vec![1, 2, 3, 6, 8].into_iter().collect();
|
||||
v = CrdsValue::new_unsigned(CrdsData::EpochSlots(
|
||||
0,
|
||||
EpochSlots::new(keypair.pubkey(), 0, 0, btreeset, vec![], timestamp()),
|
||||
EpochSlots::new(keypair.pubkey(), 0, timestamp()),
|
||||
));
|
||||
verify_signatures(&mut v, &keypair, &wrong_keypair);
|
||||
}
|
||||
@@ -395,9 +471,21 @@ mod test {
|
||||
),
|
||||
&keypair,
|
||||
);
|
||||
assert!(!vote.verify());
|
||||
assert!(vote.sanitize().is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_max_epoch_slots_index() {
|
||||
let keypair = Keypair::new();
|
||||
let item = CrdsValue::new_signed(
|
||||
CrdsData::Vote(
|
||||
MAX_VOTES,
|
||||
Vote::new(&keypair.pubkey(), test_tx(), timestamp()),
|
||||
),
|
||||
&keypair,
|
||||
);
|
||||
assert_eq!(item.sanitize(), Err(SanitizeError::ValueOutOfBounds));
|
||||
}
|
||||
#[test]
|
||||
fn test_compute_vote_index_empty() {
|
||||
for i in 0..MAX_VOTES {
|
||||
|
@@ -1,6 +1,8 @@
|
||||
//! The `ledger_cleanup_service` drops older ledger data to limit disk space usage
|
||||
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_ledger::blockstore_db::Result as BlockstoreResult;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::datapoint_debug;
|
||||
use solana_sdk::clock::Slot;
|
||||
use std::string::ToString;
|
||||
@@ -11,13 +13,25 @@ use std::thread;
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
|
||||
// - To try and keep the RocksDB size under 400GB:
|
||||
// Seeing about 1600b/shred, using 2000b/shred for margin, so 200m shreds can be stored in 400gb.
|
||||
// at 5k shreds/slot at 50k tps, this is 500k slots (~5 hours).
|
||||
// At idle, 60 shreds/slot this is about 4m slots (18 days)
|
||||
// This is chosen to allow enough time for
|
||||
// - To try and keep the RocksDB size under 512GB at 50k tps (100 slots take ~2GB).
|
||||
// - A validator to download a snapshot from a peer and boot from it
|
||||
// - To make sure that if a validator needs to reboot from its own snapshot, it has enough slots locally
|
||||
// to catch back up to where it was when it stopped
|
||||
pub const DEFAULT_MAX_LEDGER_SLOTS: u64 = 270_000;
|
||||
// Remove a fixed number of slots at a time, it's more efficient than doing it one-by-one
|
||||
pub const DEFAULT_MAX_LEDGER_SHREDS: u64 = 200_000_000;
|
||||
|
||||
// Allow down to 50m, or 3.5 days at idle, 1hr at 50k load, around ~100GB
|
||||
pub const DEFAULT_MIN_MAX_LEDGER_SHREDS: u64 = 50_000_000;
|
||||
|
||||
// Check for removing slots at this interval so we don't purge too often
|
||||
// and starve other blockstore users.
|
||||
pub const DEFAULT_PURGE_SLOT_INTERVAL: u64 = 512;
|
||||
|
||||
// Remove a limited number of slots at a time, so the operation
|
||||
// does not take too long and block other blockstore users.
|
||||
pub const DEFAULT_PURGE_BATCH_SIZE: u64 = 256;
|
||||
|
||||
pub struct LedgerCleanupService {
|
||||
@@ -36,7 +50,7 @@ impl LedgerCleanupService {
|
||||
max_ledger_slots
|
||||
);
|
||||
let exit = exit.clone();
|
||||
let mut next_purge_batch = max_ledger_slots;
|
||||
let mut last_purge_slot = 0;
|
||||
let t_cleanup = Builder::new()
|
||||
.name("solana-ledger-cleanup".to_string())
|
||||
.spawn(move || loop {
|
||||
@@ -47,7 +61,8 @@ impl LedgerCleanupService {
|
||||
&new_root_receiver,
|
||||
&blockstore,
|
||||
max_ledger_slots,
|
||||
&mut next_purge_batch,
|
||||
&mut last_purge_slot,
|
||||
DEFAULT_PURGE_SLOT_INTERVAL,
|
||||
) {
|
||||
match e {
|
||||
RecvTimeoutError::Disconnected => break,
|
||||
@@ -59,45 +74,123 @@ impl LedgerCleanupService {
|
||||
Self { t_cleanup }
|
||||
}
|
||||
|
||||
fn find_slots_to_clean(
|
||||
blockstore: &Arc<Blockstore>,
|
||||
root: Slot,
|
||||
max_ledger_shreds: u64,
|
||||
) -> (u64, Slot, Slot) {
|
||||
let mut shreds = Vec::new();
|
||||
let mut iterate_time = Measure::start("iterate_time");
|
||||
let mut total_shreds = 0;
|
||||
let mut first_slot = 0;
|
||||
for (i, (slot, meta)) in blockstore.slot_meta_iterator(0).unwrap().enumerate() {
|
||||
if i == 0 {
|
||||
first_slot = slot;
|
||||
debug!("purge: searching from slot: {}", slot);
|
||||
}
|
||||
// Not exact since non-full slots will have holes
|
||||
total_shreds += meta.received;
|
||||
shreds.push((slot, meta.received));
|
||||
if slot > root {
|
||||
break;
|
||||
}
|
||||
}
|
||||
iterate_time.stop();
|
||||
info!(
|
||||
"checking for ledger purge: max_shreds: {} slots: {} total_shreds: {} {}",
|
||||
max_ledger_shreds,
|
||||
shreds.len(),
|
||||
total_shreds,
|
||||
iterate_time
|
||||
);
|
||||
if (total_shreds as u64) < max_ledger_shreds {
|
||||
return (0, 0, 0);
|
||||
}
|
||||
let mut cur_shreds = 0;
|
||||
let mut lowest_slot_to_clean = shreds[0].0;
|
||||
for (slot, num_shreds) in shreds.iter().rev() {
|
||||
cur_shreds += *num_shreds as u64;
|
||||
if cur_shreds > max_ledger_shreds {
|
||||
lowest_slot_to_clean = *slot;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
(cur_shreds, lowest_slot_to_clean, first_slot)
|
||||
}
|
||||
|
||||
fn cleanup_ledger(
|
||||
new_root_receiver: &Receiver<Slot>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
max_ledger_slots: u64,
|
||||
next_purge_batch: &mut u64,
|
||||
max_ledger_shreds: u64,
|
||||
last_purge_slot: &mut u64,
|
||||
purge_interval: u64,
|
||||
) -> Result<(), RecvTimeoutError> {
|
||||
let disk_utilization_pre = blockstore.storage_size();
|
||||
|
||||
let root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
|
||||
|
||||
// Notify blockstore of impending purge
|
||||
if root > *next_purge_batch {
|
||||
//cleanup
|
||||
let lowest_slot = root - max_ledger_slots;
|
||||
*blockstore.lowest_cleanup_slot.write().unwrap() = lowest_slot;
|
||||
blockstore.purge_slots(0, Some(lowest_slot));
|
||||
*next_purge_batch += DEFAULT_PURGE_BATCH_SIZE;
|
||||
let mut root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
|
||||
// Get the newest root
|
||||
while let Ok(new_root) = new_root_receiver.try_recv() {
|
||||
root = new_root;
|
||||
}
|
||||
|
||||
let disk_utilization_post = blockstore.storage_size();
|
||||
|
||||
if let (Ok(disk_utilization_pre), Ok(disk_utilization_post)) =
|
||||
(disk_utilization_pre, disk_utilization_post)
|
||||
{
|
||||
datapoint_debug!(
|
||||
"ledger_disk_utilization",
|
||||
("disk_utilization_pre", disk_utilization_pre as i64, i64),
|
||||
("disk_utilization_post", disk_utilization_post as i64, i64),
|
||||
(
|
||||
"disk_utilization_delta",
|
||||
(disk_utilization_pre as i64 - disk_utilization_post as i64),
|
||||
i64
|
||||
)
|
||||
if root - *last_purge_slot > purge_interval {
|
||||
let disk_utilization_pre = blockstore.storage_size();
|
||||
info!(
|
||||
"purge: new root: {} last_purge: {} purge_interval: {} disk: {:?}",
|
||||
root, last_purge_slot, purge_interval, disk_utilization_pre
|
||||
);
|
||||
*last_purge_slot = root;
|
||||
|
||||
let (num_shreds_to_clean, lowest_slot_to_clean, mut first_slot) =
|
||||
Self::find_slots_to_clean(blockstore, root, max_ledger_shreds);
|
||||
|
||||
if num_shreds_to_clean > 0 {
|
||||
debug!(
|
||||
"cleaning up to: {} shreds: {} first: {}",
|
||||
lowest_slot_to_clean, num_shreds_to_clean, first_slot
|
||||
);
|
||||
loop {
|
||||
let current_lowest =
|
||||
std::cmp::min(lowest_slot_to_clean, first_slot + DEFAULT_PURGE_BATCH_SIZE);
|
||||
|
||||
let mut slot_update_time = Measure::start("slot_update");
|
||||
*blockstore.lowest_cleanup_slot.write().unwrap() = current_lowest;
|
||||
slot_update_time.stop();
|
||||
|
||||
let mut clean_time = Measure::start("ledger_clean");
|
||||
blockstore.purge_slots(first_slot, Some(current_lowest));
|
||||
clean_time.stop();
|
||||
|
||||
debug!(
|
||||
"ledger purge {} -> {}: {} {}",
|
||||
first_slot, current_lowest, slot_update_time, clean_time
|
||||
);
|
||||
first_slot += DEFAULT_PURGE_BATCH_SIZE;
|
||||
if current_lowest == lowest_slot_to_clean {
|
||||
break;
|
||||
}
|
||||
thread::sleep(Duration::from_millis(500));
|
||||
}
|
||||
}
|
||||
|
||||
let disk_utilization_post = blockstore.storage_size();
|
||||
|
||||
Self::report_disk_metrics(disk_utilization_pre, disk_utilization_post);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn report_disk_metrics(pre: BlockstoreResult<u64>, post: BlockstoreResult<u64>) {
|
||||
if let (Ok(pre), Ok(post)) = (pre, post) {
|
||||
datapoint_debug!(
|
||||
"ledger_disk_utilization",
|
||||
("disk_utilization_pre", pre as i64, i64),
|
||||
("disk_utilization_post", post as i64, i64),
|
||||
("disk_utilization_delta", (pre as i64 - post as i64), i64)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.t_cleanup.join()
|
||||
}
|
||||
@@ -111,6 +204,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_cleanup() {
|
||||
solana_logger::setup();
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||
let (shreds, _) = make_many_slot_entries(0, 50, 5);
|
||||
@@ -118,10 +212,10 @@ mod tests {
|
||||
let blockstore = Arc::new(blockstore);
|
||||
let (sender, receiver) = channel();
|
||||
|
||||
//send a signal to kill slots 0-40
|
||||
let mut next_purge_slot = 0;
|
||||
//send a signal to kill all but 5 shreds, which will be in the newest slots
|
||||
let mut last_purge_slot = 0;
|
||||
sender.send(50).unwrap();
|
||||
LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 10, &mut next_purge_slot)
|
||||
LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 5, &mut last_purge_slot, 10)
|
||||
.unwrap();
|
||||
|
||||
//check that 0-40 don't exist
|
||||
@@ -134,6 +228,62 @@ mod tests {
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cleanup_speed() {
|
||||
solana_logger::setup();
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
let mut blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||
blockstore.set_no_compaction(true);
|
||||
let blockstore = Arc::new(blockstore);
|
||||
let (sender, receiver) = channel();
|
||||
|
||||
let mut first_insert = Measure::start("first_insert");
|
||||
let initial_slots = 50;
|
||||
let initial_entries = 5;
|
||||
let (shreds, _) = make_many_slot_entries(0, initial_slots, initial_entries);
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
first_insert.stop();
|
||||
info!("{}", first_insert);
|
||||
|
||||
let mut last_purge_slot = 0;
|
||||
let mut slot = initial_slots;
|
||||
let mut num_slots = 6;
|
||||
for _ in 0..5 {
|
||||
let mut insert_time = Measure::start("insert time");
|
||||
let batch_size = 2;
|
||||
let batches = num_slots / batch_size;
|
||||
for i in 0..batches {
|
||||
let (shreds, _) = make_many_slot_entries(slot + i * batch_size, batch_size, 5);
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
if i % 100 == 0 {
|
||||
info!("inserting..{} of {}", i, batches);
|
||||
}
|
||||
}
|
||||
insert_time.stop();
|
||||
|
||||
let mut time = Measure::start("purge time");
|
||||
sender.send(slot + num_slots).unwrap();
|
||||
LedgerCleanupService::cleanup_ledger(
|
||||
&receiver,
|
||||
&blockstore,
|
||||
initial_slots,
|
||||
&mut last_purge_slot,
|
||||
10,
|
||||
)
|
||||
.unwrap();
|
||||
time.stop();
|
||||
info!(
|
||||
"slot: {} size: {} {} {}",
|
||||
slot, num_slots, insert_time, time
|
||||
);
|
||||
slot += num_slots;
|
||||
num_slots *= 2;
|
||||
}
|
||||
|
||||
drop(blockstore);
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compaction() {
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
@@ -142,7 +292,7 @@ mod tests {
|
||||
let n = 10_000;
|
||||
let batch_size = 100;
|
||||
let batches = n / batch_size;
|
||||
let max_ledger_slots = 100;
|
||||
let max_ledger_shreds = 100;
|
||||
|
||||
for i in 0..batches {
|
||||
let (shreds, _) = make_many_slot_entries(i * batch_size, batch_size, 1);
|
||||
@@ -158,8 +308,9 @@ mod tests {
|
||||
LedgerCleanupService::cleanup_ledger(
|
||||
&receiver,
|
||||
&blockstore,
|
||||
max_ledger_slots,
|
||||
max_ledger_shreds,
|
||||
&mut next_purge_batch,
|
||||
10,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -170,7 +321,7 @@ mod tests {
|
||||
assert!(u2 < u1, "insufficient compaction! pre={},post={}", u1, u2,);
|
||||
|
||||
// check that early slots don't exist
|
||||
let max_slot = n - max_ledger_slots;
|
||||
let max_slot = n - max_ledger_shreds - 1;
|
||||
blockstore
|
||||
.slot_meta_iterator(0)
|
||||
.unwrap()
|
||||
|
@@ -29,6 +29,7 @@ pub mod genesis_utils;
|
||||
pub mod gossip_service;
|
||||
pub mod ledger_cleanup_service;
|
||||
pub mod local_vote_signer_service;
|
||||
pub mod non_circulating_supply;
|
||||
pub mod packet;
|
||||
pub mod poh_recorder;
|
||||
pub mod poh_service;
|
||||
|
193
core/src/non_circulating_supply.rs
Normal file
193
core/src/non_circulating_supply.rs
Normal file
@@ -0,0 +1,193 @@
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_stake_program::stake_state::StakeState;
|
||||
use std::{collections::HashSet, sync::Arc};
|
||||
|
||||
pub struct NonCirculatingSupply {
|
||||
pub lamports: u64,
|
||||
pub accounts: Vec<Pubkey>,
|
||||
}
|
||||
|
||||
pub fn calculate_non_circulating_supply(bank: Arc<Bank>) -> NonCirculatingSupply {
|
||||
debug!("Updating Bank supply, epoch: {}", bank.epoch());
|
||||
let mut non_circulating_accounts_set: HashSet<Pubkey> = HashSet::new();
|
||||
|
||||
for key in non_circulating_accounts() {
|
||||
non_circulating_accounts_set.insert(key);
|
||||
}
|
||||
|
||||
let clock = bank.clock();
|
||||
let stake_accounts = bank.get_program_accounts(Some(&solana_stake_program::id()));
|
||||
for (pubkey, account) in stake_accounts.iter() {
|
||||
let stake_account = StakeState::from(&account).unwrap_or_default();
|
||||
match stake_account {
|
||||
StakeState::Initialized(meta) => {
|
||||
if meta.lockup.is_in_force(&clock, &HashSet::default())
|
||||
|| meta.authorized.withdrawer == withdraw_authority()
|
||||
{
|
||||
non_circulating_accounts_set.insert(*pubkey);
|
||||
}
|
||||
}
|
||||
StakeState::Stake(meta, _stake) => {
|
||||
if meta.lockup.is_in_force(&clock, &HashSet::default())
|
||||
|| meta.authorized.withdrawer == withdraw_authority()
|
||||
{
|
||||
non_circulating_accounts_set.insert(*pubkey);
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
let lamports = non_circulating_accounts_set
|
||||
.iter()
|
||||
.fold(0, |acc, pubkey| acc + bank.get_balance(&pubkey));
|
||||
|
||||
NonCirculatingSupply {
|
||||
lamports,
|
||||
accounts: non_circulating_accounts_set.into_iter().collect(),
|
||||
}
|
||||
}
|
||||
|
||||
// Mainnet-beta accounts that should be considered non-circulating
|
||||
solana_sdk::pubkeys!(
|
||||
non_circulating_accounts,
|
||||
[
|
||||
"9huDUZfxoJ7wGMTffUE7vh1xePqef7gyrLJu9NApncqA",
|
||||
"GK2zqSsXLA2rwVZk347RYhh6jJpRsCA69FjLW93ZGi3B",
|
||||
"HCV5dGFJXRrJ3jhDYA4DCeb9TEDTwGGYXtT3wHksu2Zr",
|
||||
"25odAafVXnd63L6Hq5Cx6xGmhKqkhE2y6UrLVuqUfWZj",
|
||||
"14FUT96s9swbmH7ZjpDvfEDywnAYy9zaNhv4xvezySGu",
|
||||
"HbZ5FfmKWNHC7uwk6TF1hVi6TCs7dtYfdjEcuPGgzFAg",
|
||||
"C7C8odR8oashR5Feyrq2tJKaXL18id1dSj2zbkDGL2C2",
|
||||
"APnSR52EC1eH676m7qTBHUJ1nrGpHYpV7XKPxgRDD8gX",
|
||||
"9ibqedFVnu5k4wo1mJRbH6KJ5HLBCyjpA9omPYkDeeT5",
|
||||
"FopBKzQkG9pkyQqjdMFBLMQ995pSkjy83ziR4aism4c6",
|
||||
"AiUHvJhTbMCcgFE2K26Ea9qCe74y3sFwqUt38iD5sfoR",
|
||||
"3DndE3W53QdHSfBJiSJgzDKGvKJBoQLVmRHvy5LtqYfG",
|
||||
"Eyr9P5XsjK2NUKNCnfu39eqpGoiLFgVAv1LSQgMZCwiQ",
|
||||
"DE1bawNcRJB9rVm3buyMVfr8mBEoyyu73NBovf2oXJsJ",
|
||||
"CakcnaRDHka2gXyfbEd2d3xsvkJkqsLw2akB3zsN1D2S",
|
||||
"7Np41oeYqPefeNQEHSv1UDhYrehxin3NStELsSKCT4K2",
|
||||
"GdnSyH3YtwcxFvQrVVJMm1JhTS4QVX7MFsX56uJLUfiZ",
|
||||
"Mc5XB47H3DKJHym5RLa9mPzWv5snERsF3KNv5AauXK8",
|
||||
"7cvkjYAkUYs4W8XcXsca7cBrEGFeSUjeZmKoNBvEwyri",
|
||||
"AG3m2bAibcY8raMt4oXEGqRHwX4FWKPPJVjZxn1LySDX",
|
||||
"5XdtyEDREHJXXW1CTtCsVjJRjBapAwK78ZquzvnNVRrV",
|
||||
"6yKHERk8rsbmJxvMpPuwPs1ct3hRiP7xaJF2tvnGU6nK",
|
||||
"CHmdL15akDcJgBkY6BP3hzs98Dqr6wbdDC5p8odvtSbq",
|
||||
"FR84wZQy3Y3j2gWz6pgETUiUoJtreMEuWfbg6573UCj9",
|
||||
"5q54XjQ7vDx4y6KphPeE97LUNiYGtP55spjvXAWPGBuf",
|
||||
]
|
||||
);
|
||||
|
||||
// Withdraw authority for autostaked accounts on mainnet-beta
|
||||
solana_sdk::pubkeys!(
|
||||
withdraw_authority,
|
||||
"8CUUMKYNGxdgYio5CLHRHyzMEhhVRMcqefgE6dLqnVRK"
|
||||
);
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_sdk::{
|
||||
account::Account, epoch_schedule::EpochSchedule, genesis_config::GenesisConfig,
|
||||
};
|
||||
use solana_stake_program::stake_state::{Authorized, Lockup, Meta, StakeState};
|
||||
use std::{collections::BTreeMap, sync::Arc};
|
||||
|
||||
fn new_from_parent(parent: &Arc<Bank>) -> Bank {
|
||||
Bank::new_from_parent(parent, &Pubkey::default(), parent.slot() + 1)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_calculate_non_circulating_supply() {
|
||||
let mut accounts: BTreeMap<Pubkey, Account> = BTreeMap::new();
|
||||
let balance = 10;
|
||||
let num_genesis_accounts = 10;
|
||||
for _ in 0..num_genesis_accounts {
|
||||
accounts.insert(
|
||||
Pubkey::new_rand(),
|
||||
Account::new(balance, 0, &Pubkey::default()),
|
||||
);
|
||||
}
|
||||
let non_circulating_accounts = non_circulating_accounts();
|
||||
let num_non_circulating_accounts = non_circulating_accounts.len() as u64;
|
||||
for key in non_circulating_accounts.clone() {
|
||||
accounts.insert(key, Account::new(balance, 0, &Pubkey::default()));
|
||||
}
|
||||
|
||||
let num_stake_accounts = 3;
|
||||
for _ in 0..num_stake_accounts {
|
||||
let pubkey = Pubkey::new_rand();
|
||||
let meta = Meta {
|
||||
authorized: Authorized::auto(&pubkey),
|
||||
lockup: Lockup {
|
||||
epoch: 1,
|
||||
..Lockup::default()
|
||||
},
|
||||
..Meta::default()
|
||||
};
|
||||
let stake_account = Account::new_data_with_space(
|
||||
balance,
|
||||
&StakeState::Initialized(meta),
|
||||
std::mem::size_of::<StakeState>(),
|
||||
&solana_stake_program::id(),
|
||||
)
|
||||
.unwrap();
|
||||
accounts.insert(pubkey, stake_account);
|
||||
}
|
||||
|
||||
let slots_per_epoch = 32;
|
||||
let genesis_config = GenesisConfig {
|
||||
accounts,
|
||||
epoch_schedule: EpochSchedule::new(slots_per_epoch),
|
||||
..GenesisConfig::default()
|
||||
};
|
||||
let mut bank = Arc::new(Bank::new(&genesis_config));
|
||||
assert_eq!(
|
||||
bank.capitalization(),
|
||||
(num_genesis_accounts + num_non_circulating_accounts + num_stake_accounts) * balance
|
||||
);
|
||||
|
||||
let non_circulating_supply = calculate_non_circulating_supply(bank.clone());
|
||||
assert_eq!(
|
||||
non_circulating_supply.lamports,
|
||||
(num_non_circulating_accounts + num_stake_accounts) * balance
|
||||
);
|
||||
assert_eq!(
|
||||
non_circulating_supply.accounts.len(),
|
||||
num_non_circulating_accounts as usize + num_stake_accounts as usize
|
||||
);
|
||||
|
||||
bank = Arc::new(new_from_parent(&bank));
|
||||
let new_balance = 11;
|
||||
for key in non_circulating_accounts {
|
||||
bank.store_account(&key, &Account::new(new_balance, 0, &Pubkey::default()));
|
||||
}
|
||||
let non_circulating_supply = calculate_non_circulating_supply(bank.clone());
|
||||
assert_eq!(
|
||||
non_circulating_supply.lamports,
|
||||
(num_non_circulating_accounts * new_balance) + (num_stake_accounts * balance)
|
||||
);
|
||||
assert_eq!(
|
||||
non_circulating_supply.accounts.len(),
|
||||
num_non_circulating_accounts as usize + num_stake_accounts as usize
|
||||
);
|
||||
|
||||
// Advance bank an epoch, which should unlock stakes
|
||||
for _ in 0..slots_per_epoch {
|
||||
bank = Arc::new(new_from_parent(&bank));
|
||||
}
|
||||
assert_eq!(bank.epoch(), 1);
|
||||
let non_circulating_supply = calculate_non_circulating_supply(bank.clone());
|
||||
assert_eq!(
|
||||
non_circulating_supply.lamports,
|
||||
num_non_circulating_accounts * new_balance
|
||||
);
|
||||
assert_eq!(
|
||||
non_circulating_supply.accounts.len(),
|
||||
num_non_circulating_accounts as usize
|
||||
);
|
||||
}
|
||||
}
|
@@ -1,10 +1,8 @@
|
||||
//! The `poh_service` module implements a service that records the passing of
|
||||
//! "ticks", a measure of time in the PoH stream
|
||||
use crate::poh_recorder::PohRecorder;
|
||||
use core_affinity;
|
||||
use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT;
|
||||
use solana_sdk::poh_config::PohConfig;
|
||||
use solana_sys_tuner;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread::{self, sleep, Builder, JoinHandle};
|
||||
|
@@ -213,49 +213,4 @@ mod tests {
|
||||
assert_eq!(packets[i].meta.addr(), saddr2);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
#[test]
|
||||
pub fn test_recv_mmsg_batch_size() {
|
||||
let reader = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let addr = reader.local_addr().unwrap();
|
||||
let sender = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
|
||||
const TEST_BATCH_SIZE: usize = 64;
|
||||
let sent = TEST_BATCH_SIZE;
|
||||
|
||||
let mut elapsed_in_max_batch = 0;
|
||||
(0..1000).for_each(|_| {
|
||||
for _ in 0..sent {
|
||||
let data = [0; PACKET_DATA_SIZE];
|
||||
sender.send_to(&data[..], &addr).unwrap();
|
||||
}
|
||||
let mut packets = vec![Packet::default(); TEST_BATCH_SIZE];
|
||||
let now = Instant::now();
|
||||
let recv = recv_mmsg(&reader, &mut packets[..]).unwrap().1;
|
||||
elapsed_in_max_batch += now.elapsed().as_nanos();
|
||||
assert_eq!(TEST_BATCH_SIZE, recv);
|
||||
});
|
||||
|
||||
let mut elapsed_in_small_batch = 0;
|
||||
(0..1000).for_each(|_| {
|
||||
for _ in 0..sent {
|
||||
let data = [0; PACKET_DATA_SIZE];
|
||||
sender.send_to(&data[..], &addr).unwrap();
|
||||
}
|
||||
let mut packets = vec![Packet::default(); 4];
|
||||
let mut recv = 0;
|
||||
let now = Instant::now();
|
||||
while let Ok(num) = recv_mmsg(&reader, &mut packets[..]) {
|
||||
recv += num.1;
|
||||
if recv >= TEST_BATCH_SIZE {
|
||||
break;
|
||||
}
|
||||
}
|
||||
elapsed_in_small_batch += now.elapsed().as_nanos();
|
||||
assert_eq!(TEST_BATCH_SIZE, recv);
|
||||
});
|
||||
|
||||
assert!(elapsed_in_max_batch <= elapsed_in_small_batch);
|
||||
}
|
||||
}
|
||||
|
@@ -26,6 +26,7 @@ use solana_metrics::inc_new_counter_info;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{
|
||||
clock::Slot,
|
||||
genesis_config::GenesisConfig,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
@@ -66,7 +67,6 @@ impl Drop for Finalizer {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ReplayStageConfig {
|
||||
pub my_pubkey: Pubkey,
|
||||
pub vote_account: Pubkey,
|
||||
@@ -80,6 +80,7 @@ pub struct ReplayStageConfig {
|
||||
pub block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
pub transaction_status_sender: Option<TransactionStatusSender>,
|
||||
pub rewards_recorder_sender: Option<RewardsRecorderSender>,
|
||||
pub genesis_config: GenesisConfig,
|
||||
}
|
||||
|
||||
pub struct ReplayStage {
|
||||
@@ -183,6 +184,7 @@ impl ReplayStage {
|
||||
block_commitment_cache,
|
||||
transaction_status_sender,
|
||||
rewards_recorder_sender,
|
||||
genesis_config,
|
||||
} = config;
|
||||
|
||||
let (root_bank_sender, root_bank_receiver) = channel();
|
||||
@@ -192,7 +194,7 @@ impl ReplayStage {
|
||||
// Start the replay stage loop
|
||||
|
||||
let (lockouts_sender, commitment_service) =
|
||||
AggregateCommitmentService::new(&exit, block_commitment_cache);
|
||||
AggregateCommitmentService::new(&exit, block_commitment_cache.clone());
|
||||
|
||||
#[allow(clippy::cognitive_complexity)]
|
||||
let t_replay = Builder::new()
|
||||
@@ -246,6 +248,7 @@ impl ReplayStage {
|
||||
&slot_full_senders,
|
||||
transaction_status_sender.clone(),
|
||||
&verify_recyclers,
|
||||
&genesis_config,
|
||||
);
|
||||
datapoint_debug!(
|
||||
"replay_stage-memory",
|
||||
@@ -253,13 +256,15 @@ impl ReplayStage {
|
||||
);
|
||||
|
||||
let ancestors = Arc::new(bank_forks.read().unwrap().ancestors());
|
||||
let forks_root = bank_forks.read().unwrap().root();
|
||||
let start = allocated.get();
|
||||
let mut frozen_banks: Vec<_> = bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.frozen_banks()
|
||||
.values()
|
||||
.cloned()
|
||||
.into_iter()
|
||||
.filter(|(slot, _)| *slot >= forks_root)
|
||||
.map(|(_, bank)| bank)
|
||||
.collect();
|
||||
let newly_computed_slot_stats = Self::compute_bank_stats(
|
||||
&my_pubkey,
|
||||
@@ -309,7 +314,10 @@ impl ReplayStage {
|
||||
let start = allocated.get();
|
||||
if !is_locked_out && vote_threshold {
|
||||
info!("voting: {} {}", bank.slot(), fork_weight);
|
||||
subscriptions.notify_subscribers(bank.slot(), &bank_forks);
|
||||
subscriptions.notify_subscribers(
|
||||
block_commitment_cache.read().unwrap().slot(),
|
||||
&bank_forks,
|
||||
);
|
||||
if let Some(votable_leader) =
|
||||
leader_schedule_cache.slot_leader_at(bank.slot(), Some(&bank))
|
||||
{
|
||||
@@ -337,6 +345,7 @@ impl ReplayStage {
|
||||
&accounts_hash_sender,
|
||||
&latest_root_senders,
|
||||
&subscriptions,
|
||||
&block_commitment_cache,
|
||||
)?;
|
||||
}
|
||||
datapoint_debug!(
|
||||
@@ -548,6 +557,7 @@ impl ReplayStage {
|
||||
bank_progress: &mut ForkProgress,
|
||||
transaction_status_sender: Option<TransactionStatusSender>,
|
||||
verify_recyclers: &VerifyRecyclers,
|
||||
genesis_config: &GenesisConfig,
|
||||
) -> result::Result<usize, BlockstoreProcessorError> {
|
||||
let tx_count_before = bank_progress.replay_progress.num_txs;
|
||||
let confirm_result = blockstore_processor::confirm_slot(
|
||||
@@ -559,6 +569,7 @@ impl ReplayStage {
|
||||
transaction_status_sender,
|
||||
None,
|
||||
verify_recyclers,
|
||||
Some(genesis_config),
|
||||
);
|
||||
let tx_count_after = bank_progress.replay_progress.num_txs;
|
||||
let tx_count = tx_count_after - tx_count_before;
|
||||
@@ -609,6 +620,7 @@ impl ReplayStage {
|
||||
accounts_hash_sender: &Option<SnapshotPackageSender>,
|
||||
latest_root_senders: &[Sender<Slot>],
|
||||
subscriptions: &Arc<RpcSubscriptions>,
|
||||
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
|
||||
) -> Result<()> {
|
||||
if bank.is_empty() {
|
||||
inc_new_counter_info!("replay_stage-voted_empty_bank", 1);
|
||||
@@ -634,7 +646,20 @@ impl ReplayStage {
|
||||
blockstore
|
||||
.set_roots(&rooted_slots)
|
||||
.expect("Ledger set roots failed");
|
||||
Self::handle_new_root(new_root, &bank_forks, progress, accounts_hash_sender);
|
||||
let largest_confirmed_root = Some(
|
||||
block_commitment_cache
|
||||
.read()
|
||||
.unwrap()
|
||||
.largest_confirmed_root(),
|
||||
);
|
||||
|
||||
Self::handle_new_root(
|
||||
new_root,
|
||||
&bank_forks,
|
||||
progress,
|
||||
accounts_hash_sender,
|
||||
largest_confirmed_root,
|
||||
);
|
||||
subscriptions.notify_roots(rooted_slots);
|
||||
latest_root_senders.iter().for_each(|s| {
|
||||
if let Err(e) = s.send(new_root) {
|
||||
@@ -647,7 +672,13 @@ impl ReplayStage {
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
Self::update_commitment_cache(bank.clone(), total_staked, lockouts_sender);
|
||||
|
||||
Self::update_commitment_cache(
|
||||
bank.clone(),
|
||||
bank_forks.read().unwrap().root(),
|
||||
total_staked,
|
||||
lockouts_sender,
|
||||
);
|
||||
|
||||
if let Some(ref voting_keypair) = voting_keypair {
|
||||
let node_keypair = cluster_info.read().unwrap().keypair.clone();
|
||||
@@ -675,10 +706,13 @@ impl ReplayStage {
|
||||
|
||||
fn update_commitment_cache(
|
||||
bank: Arc<Bank>,
|
||||
root: Slot,
|
||||
total_staked: u64,
|
||||
lockouts_sender: &Sender<CommitmentAggregationData>,
|
||||
) {
|
||||
if let Err(e) = lockouts_sender.send(CommitmentAggregationData::new(bank, total_staked)) {
|
||||
if let Err(e) =
|
||||
lockouts_sender.send(CommitmentAggregationData::new(bank, root, total_staked))
|
||||
{
|
||||
trace!("lockouts_sender failed: {:?}", e);
|
||||
}
|
||||
}
|
||||
@@ -725,6 +759,7 @@ impl ReplayStage {
|
||||
slot_full_senders: &[Sender<(u64, Pubkey)>],
|
||||
transaction_status_sender: Option<TransactionStatusSender>,
|
||||
verify_recyclers: &VerifyRecyclers,
|
||||
genesis_config: &GenesisConfig,
|
||||
) -> bool {
|
||||
let mut did_complete_bank = false;
|
||||
let mut tx_count = 0;
|
||||
@@ -753,6 +788,7 @@ impl ReplayStage {
|
||||
bank_progress,
|
||||
transaction_status_sender.clone(),
|
||||
verify_recyclers,
|
||||
genesis_config,
|
||||
);
|
||||
match replay_result {
|
||||
Ok(replay_tx_count) => tx_count += replay_tx_count,
|
||||
@@ -959,15 +995,17 @@ impl ReplayStage {
|
||||
}
|
||||
|
||||
pub(crate) fn handle_new_root(
|
||||
new_root: u64,
|
||||
new_root: Slot,
|
||||
bank_forks: &RwLock<BankForks>,
|
||||
progress: &mut HashMap<u64, ForkProgress>,
|
||||
accounts_hash_sender: &Option<SnapshotPackageSender>,
|
||||
largest_confirmed_root: Option<Slot>,
|
||||
) {
|
||||
bank_forks
|
||||
.write()
|
||||
.unwrap()
|
||||
.set_root(new_root, accounts_hash_sender);
|
||||
bank_forks.write().unwrap().set_root(
|
||||
new_root,
|
||||
accounts_hash_sender,
|
||||
largest_confirmed_root,
|
||||
);
|
||||
let r_bank_forks = bank_forks.read().unwrap();
|
||||
progress.retain(|k, _| r_bank_forks.get(*k).is_some());
|
||||
}
|
||||
@@ -996,7 +1034,11 @@ impl ReplayStage {
|
||||
// Find the next slot that chains to the old slot
|
||||
let forks = forks_lock.read().unwrap();
|
||||
let frozen_banks = forks.frozen_banks();
|
||||
let frozen_bank_slots: Vec<u64> = frozen_banks.keys().cloned().collect();
|
||||
let frozen_bank_slots: Vec<u64> = frozen_banks
|
||||
.keys()
|
||||
.cloned()
|
||||
.filter(|s| *s >= forks.root())
|
||||
.collect();
|
||||
let next_slots = blockstore
|
||||
.get_slots_since(&frozen_bank_slots)
|
||||
.expect("Db error");
|
||||
@@ -1397,7 +1439,12 @@ pub(crate) mod tests {
|
||||
let bank0 = Bank::new(&genesis_config);
|
||||
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank0));
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new(&exit));
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new(
|
||||
&exit,
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
|
||||
blockstore.clone(),
|
||||
))),
|
||||
));
|
||||
let bank_forks = BankForks::new(0, bank0);
|
||||
bank_forks.working_bank().freeze();
|
||||
|
||||
@@ -1449,12 +1496,58 @@ pub(crate) mod tests {
|
||||
for i in 0..=root {
|
||||
progress.insert(i, ForkProgress::new(Hash::default()));
|
||||
}
|
||||
ReplayStage::handle_new_root(root, &bank_forks, &mut progress, &None);
|
||||
ReplayStage::handle_new_root(root, &bank_forks, &mut progress, &None, None);
|
||||
assert_eq!(bank_forks.read().unwrap().root(), root);
|
||||
assert_eq!(progress.len(), 1);
|
||||
assert!(progress.get(&root).is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_handle_new_root_ahead_of_largest_confirmed_root() {
|
||||
let genesis_config = create_genesis_config(10_000).genesis_config;
|
||||
let bank0 = Bank::new(&genesis_config);
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank0)));
|
||||
let confirmed_root = 1;
|
||||
let fork = 2;
|
||||
let bank1 = Bank::new_from_parent(
|
||||
bank_forks.read().unwrap().get(0).unwrap(),
|
||||
&Pubkey::default(),
|
||||
confirmed_root,
|
||||
);
|
||||
bank_forks.write().unwrap().insert(bank1);
|
||||
let bank2 = Bank::new_from_parent(
|
||||
bank_forks.read().unwrap().get(confirmed_root).unwrap(),
|
||||
&Pubkey::default(),
|
||||
fork,
|
||||
);
|
||||
bank_forks.write().unwrap().insert(bank2);
|
||||
let root = 3;
|
||||
let root_bank = Bank::new_from_parent(
|
||||
bank_forks.read().unwrap().get(confirmed_root).unwrap(),
|
||||
&Pubkey::default(),
|
||||
root,
|
||||
);
|
||||
bank_forks.write().unwrap().insert(root_bank);
|
||||
let mut progress = HashMap::new();
|
||||
for i in 0..=root {
|
||||
progress.insert(i, ForkProgress::new(Hash::default()));
|
||||
}
|
||||
ReplayStage::handle_new_root(
|
||||
root,
|
||||
&bank_forks,
|
||||
&mut progress,
|
||||
&None,
|
||||
Some(confirmed_root),
|
||||
);
|
||||
assert_eq!(bank_forks.read().unwrap().root(), root);
|
||||
assert!(bank_forks.read().unwrap().get(confirmed_root).is_some());
|
||||
assert!(bank_forks.read().unwrap().get(fork).is_none());
|
||||
assert_eq!(progress.len(), 2);
|
||||
assert!(progress.get(&root).is_some());
|
||||
assert!(progress.get(&confirmed_root).is_some());
|
||||
assert!(progress.get(&fork).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dead_fork_transaction_error() {
|
||||
let keypair1 = Keypair::new();
|
||||
@@ -1694,6 +1787,7 @@ pub(crate) mod tests {
|
||||
&mut bank0_progress,
|
||||
None,
|
||||
&VerifyRecyclers::default(),
|
||||
&genesis_config,
|
||||
);
|
||||
|
||||
// Check that the erroring bank was marked as dead in the progress map
|
||||
@@ -1721,7 +1815,11 @@ pub(crate) mod tests {
|
||||
bank.store_account(&pubkey, &leader_vote_account);
|
||||
}
|
||||
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
|
||||
));
|
||||
let (lockouts_sender, _) = AggregateCommitmentService::new(
|
||||
&Arc::new(AtomicBool::new(false)),
|
||||
block_commitment_cache.clone(),
|
||||
@@ -1766,7 +1864,12 @@ pub(crate) mod tests {
|
||||
bank_forks.write().unwrap().insert(bank1);
|
||||
let arc_bank1 = bank_forks.read().unwrap().get(1).unwrap().clone();
|
||||
leader_vote(&arc_bank1, &leader_voting_pubkey);
|
||||
ReplayStage::update_commitment_cache(arc_bank1.clone(), leader_lamports, &lockouts_sender);
|
||||
ReplayStage::update_commitment_cache(
|
||||
arc_bank1.clone(),
|
||||
0,
|
||||
leader_lamports,
|
||||
&lockouts_sender,
|
||||
);
|
||||
|
||||
let bank2 = Bank::new_from_parent(&arc_bank1, &Pubkey::default(), arc_bank1.slot() + 1);
|
||||
let _res = bank2.transfer(10, &genesis_config_info.mint_keypair, &Pubkey::new_rand());
|
||||
@@ -1777,7 +1880,12 @@ pub(crate) mod tests {
|
||||
bank_forks.write().unwrap().insert(bank2);
|
||||
let arc_bank2 = bank_forks.read().unwrap().get(2).unwrap().clone();
|
||||
leader_vote(&arc_bank2, &leader_voting_pubkey);
|
||||
ReplayStage::update_commitment_cache(arc_bank2.clone(), leader_lamports, &lockouts_sender);
|
||||
ReplayStage::update_commitment_cache(
|
||||
arc_bank2.clone(),
|
||||
0,
|
||||
leader_lamports,
|
||||
&lockouts_sender,
|
||||
);
|
||||
thread::sleep(Duration::from_millis(200));
|
||||
|
||||
let mut expected0 = BlockCommitment::default();
|
||||
@@ -1907,13 +2015,23 @@ pub(crate) mod tests {
|
||||
{
|
||||
if let EncodedTransaction::Json(transaction) = transaction {
|
||||
if transaction.signatures[0] == signatures[0].to_string() {
|
||||
assert_eq!(meta.unwrap().status, Ok(()));
|
||||
let meta = meta.unwrap();
|
||||
assert_eq!(meta.err, None);
|
||||
assert_eq!(meta.status, Ok(()));
|
||||
} else if transaction.signatures[0] == signatures[1].to_string() {
|
||||
let meta = meta.unwrap();
|
||||
assert_eq!(
|
||||
meta.unwrap().status,
|
||||
meta.err,
|
||||
Some(TransactionError::InstructionError(
|
||||
0,
|
||||
InstructionError::Custom(1)
|
||||
))
|
||||
);
|
||||
assert_eq!(
|
||||
meta.status,
|
||||
Err(TransactionError::InstructionError(
|
||||
0,
|
||||
InstructionError::CustomError(1)
|
||||
InstructionError::Custom(1)
|
||||
))
|
||||
);
|
||||
} else {
|
||||
|
877
core/src/rpc.rs
877
core/src/rpc.rs
File diff suppressed because it is too large
Load Diff
@@ -4,9 +4,16 @@ use crate::rpc_subscriptions::{Confirmations, RpcSubscriptions, SlotInfo};
|
||||
use jsonrpc_core::{Error, ErrorCode, Result};
|
||||
use jsonrpc_derive::rpc;
|
||||
use jsonrpc_pubsub::{typed::Subscriber, Session, SubscriptionId};
|
||||
use solana_client::rpc_response::{Response as RpcResponse, RpcAccount, RpcKeyedAccount};
|
||||
use solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature, transaction};
|
||||
use std::sync::{atomic, Arc};
|
||||
use solana_client::rpc_response::{
|
||||
Response as RpcResponse, RpcAccount, RpcKeyedAccount, RpcSignatureResult,
|
||||
};
|
||||
#[cfg(test)]
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature};
|
||||
use std::{
|
||||
str::FromStr,
|
||||
sync::{atomic, Arc},
|
||||
};
|
||||
|
||||
// Suppress needless_return due to
|
||||
// https://github.com/paritytech/jsonrpc/blob/2d38e6424d8461cdf72e78425ce67d51af9c6586/derive/src/lib.rs#L204
|
||||
@@ -74,7 +81,7 @@ pub trait RpcSolPubSub {
|
||||
fn signature_subscribe(
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
subscriber: Subscriber<RpcResponse<transaction::Result<()>>>,
|
||||
subscriber: Subscriber<RpcResponse<RpcSignatureResult>>,
|
||||
signature_str: String,
|
||||
confirmations: Option<Confirmations>,
|
||||
);
|
||||
@@ -116,7 +123,6 @@ pub trait RpcSolPubSub {
|
||||
fn root_unsubscribe(&self, meta: Option<Self::Metadata>, id: SubscriptionId) -> Result<bool>;
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct RpcSolPubSubImpl {
|
||||
uid: Arc<atomic::AtomicUsize>,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
@@ -127,9 +133,14 @@ impl RpcSolPubSubImpl {
|
||||
let uid = Arc::new(atomic::AtomicUsize::default());
|
||||
Self { uid, subscriptions }
|
||||
}
|
||||
}
|
||||
|
||||
use std::str::FromStr;
|
||||
#[cfg(test)]
|
||||
fn default_with_blockstore(blockstore: Arc<Blockstore>) -> Self {
|
||||
let uid = Arc::new(atomic::AtomicUsize::default());
|
||||
let subscriptions = Arc::new(RpcSubscriptions::default_with_blockstore(blockstore));
|
||||
Self { uid, subscriptions }
|
||||
}
|
||||
}
|
||||
|
||||
fn param<T: FromStr>(param_str: &str, thing: &str) -> Result<T> {
|
||||
param_str.parse::<T>().map_err(|_e| Error {
|
||||
@@ -225,7 +236,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
|
||||
fn signature_subscribe(
|
||||
&self,
|
||||
_meta: Self::Metadata,
|
||||
subscriber: Subscriber<RpcResponse<transaction::Result<()>>>,
|
||||
subscriber: Subscriber<RpcResponse<RpcSignatureResult>>,
|
||||
signature_str: String,
|
||||
confirmations: Option<Confirmations>,
|
||||
) {
|
||||
@@ -312,12 +323,16 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use crate::rpc_subscriptions::tests::robust_poll_or_panic;
|
||||
use crate::{
|
||||
commitment::{BlockCommitment, BlockCommitmentCache},
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
rpc_subscriptions::tests::robust_poll_or_panic,
|
||||
};
|
||||
use jsonrpc_core::{futures::sync::mpsc, Response};
|
||||
use jsonrpc_pubsub::{PubSubHandler, Session};
|
||||
use serial_test_derive::serial;
|
||||
use solana_budget_program::{self, budget_instruction};
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_ledger::{bank_forks::BankForks, get_tmp_ledger_path};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{
|
||||
pubkey::Pubkey,
|
||||
@@ -325,7 +340,12 @@ mod tests {
|
||||
system_program, system_transaction,
|
||||
transaction::{self, Transaction},
|
||||
};
|
||||
use std::{sync::RwLock, thread::sleep, time::Duration};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::{atomic::AtomicBool, RwLock},
|
||||
thread::sleep,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
fn process_transaction_and_notify(
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
@@ -347,6 +367,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_signature_subscribe() {
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
@@ -358,8 +379,17 @@ mod tests {
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
|
||||
let rpc = RpcSolPubSubImpl::default();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let rpc = RpcSolPubSubImpl {
|
||||
subscriptions: Arc::new(RpcSubscriptions::new(
|
||||
&Arc::new(AtomicBool::new(false)),
|
||||
Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
|
||||
)),
|
||||
)),
|
||||
uid: Arc::new(atomic::AtomicUsize::default()),
|
||||
};
|
||||
|
||||
// Test signature subscriptions
|
||||
let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash);
|
||||
@@ -372,7 +402,7 @@ mod tests {
|
||||
|
||||
// Test signature confirmation notification
|
||||
let (response, _) = robust_poll_or_panic(receiver);
|
||||
let expected_res: Option<transaction::Result<()>> = Some(Ok(()));
|
||||
let expected_res = RpcSignatureResult { err: None };
|
||||
let expected = json!({
|
||||
"jsonrpc": "2.0",
|
||||
"method": "signatureNotification",
|
||||
@@ -388,6 +418,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_signature_unsubscribe() {
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
@@ -398,11 +429,13 @@ mod tests {
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let arc_bank = Arc::new(bank);
|
||||
let blockhash = arc_bank.last_blockhash();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
|
||||
let session = create_session();
|
||||
|
||||
let mut io = PubSubHandler::default();
|
||||
let rpc = RpcSolPubSubImpl::default();
|
||||
let rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore);
|
||||
io.extend_with(rpc.to_delegate());
|
||||
|
||||
let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash);
|
||||
@@ -436,6 +469,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_account_subscribe() {
|
||||
let GenesisConfigInfo {
|
||||
mut genesis_config,
|
||||
@@ -456,8 +490,18 @@ mod tests {
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
|
||||
let rpc = RpcSolPubSubImpl::default();
|
||||
let rpc = RpcSolPubSubImpl {
|
||||
subscriptions: Arc::new(RpcSubscriptions::new(
|
||||
&Arc::new(AtomicBool::new(false)),
|
||||
Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
|
||||
)),
|
||||
)),
|
||||
uid: Arc::new(atomic::AtomicUsize::default()),
|
||||
};
|
||||
let session = create_session();
|
||||
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification");
|
||||
rpc.account_subscribe(
|
||||
@@ -540,12 +584,15 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_account_unsubscribe() {
|
||||
let bob_pubkey = Pubkey::new_rand();
|
||||
let session = create_session();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
|
||||
let mut io = PubSubHandler::default();
|
||||
let rpc = RpcSolPubSubImpl::default();
|
||||
let rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore);
|
||||
|
||||
io.extend_with(rpc.to_delegate());
|
||||
|
||||
@@ -589,9 +636,19 @@ mod tests {
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let bob = Keypair::new();
|
||||
|
||||
let rpc = RpcSolPubSubImpl::default();
|
||||
let mut rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore.clone());
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let subscriptions = RpcSubscriptions::new(
|
||||
&exit,
|
||||
Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
|
||||
)),
|
||||
);
|
||||
rpc.subscriptions = Arc::new(subscriptions);
|
||||
let session = create_session();
|
||||
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification");
|
||||
rpc.account_subscribe(session, subscriber, bob.pubkey().to_string(), Some(2));
|
||||
@@ -620,9 +677,18 @@ mod tests {
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let bob = Keypair::new();
|
||||
|
||||
let rpc = RpcSolPubSubImpl::default();
|
||||
let mut rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore.clone());
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore.clone()),
|
||||
));
|
||||
|
||||
let subscriptions = RpcSubscriptions::new(&exit, block_commitment_cache.clone());
|
||||
rpc.subscriptions = Arc::new(subscriptions);
|
||||
let session = create_session();
|
||||
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification");
|
||||
rpc.account_subscribe(session, subscriber, bob.pubkey().to_string(), Some(2));
|
||||
@@ -640,10 +706,39 @@ mod tests {
|
||||
let bank0 = bank_forks.read().unwrap()[0].clone();
|
||||
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
|
||||
bank_forks.write().unwrap().insert(bank1);
|
||||
rpc.subscriptions.notify_subscribers(1, &bank_forks);
|
||||
let bank1 = bank_forks.read().unwrap()[1].clone();
|
||||
|
||||
let mut cache0 = BlockCommitment::default();
|
||||
cache0.increase_confirmation_stake(1, 10);
|
||||
let mut block_commitment = HashMap::new();
|
||||
block_commitment.entry(0).or_insert(cache0.clone());
|
||||
let mut new_block_commitment = BlockCommitmentCache::new(
|
||||
block_commitment,
|
||||
0,
|
||||
10,
|
||||
bank1.clone(),
|
||||
blockstore.clone(),
|
||||
0,
|
||||
);
|
||||
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
|
||||
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
|
||||
drop(w_block_commitment_cache);
|
||||
|
||||
rpc.subscriptions.notify_subscribers(1, &bank_forks);
|
||||
let bank2 = Bank::new_from_parent(&bank1, &Pubkey::default(), 2);
|
||||
bank_forks.write().unwrap().insert(bank2);
|
||||
let bank2 = bank_forks.read().unwrap()[2].clone();
|
||||
|
||||
let mut cache0 = BlockCommitment::default();
|
||||
cache0.increase_confirmation_stake(2, 10);
|
||||
let mut block_commitment = HashMap::new();
|
||||
block_commitment.entry(0).or_insert(cache0.clone());
|
||||
let mut new_block_commitment =
|
||||
BlockCommitmentCache::new(block_commitment, 0, 10, bank2, blockstore.clone(), 0);
|
||||
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
|
||||
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
|
||||
drop(w_block_commitment_cache);
|
||||
|
||||
rpc.subscriptions.notify_subscribers(2, &bank_forks);
|
||||
let expected = json!({
|
||||
"jsonrpc": "2.0",
|
||||
@@ -667,8 +762,11 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_slot_subscribe() {
|
||||
let rpc = RpcSolPubSubImpl::default();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore);
|
||||
let session = create_session();
|
||||
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("slotNotification");
|
||||
rpc.slot_subscribe(session, subscriber);
|
||||
@@ -691,8 +789,11 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_slot_unsubscribe() {
|
||||
let rpc = RpcSolPubSubImpl::default();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore);
|
||||
let session = create_session();
|
||||
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("slotNotification");
|
||||
rpc.slot_subscribe(session, subscriber);
|
||||
|
@@ -1,14 +1,20 @@
|
||||
//! The `pubsub` module implements a threaded subscription service on client RPC request
|
||||
|
||||
use crate::rpc_pubsub::{RpcSolPubSub, RpcSolPubSubImpl};
|
||||
use crate::rpc_subscriptions::RpcSubscriptions;
|
||||
use crate::{
|
||||
rpc_pubsub::{RpcSolPubSub, RpcSolPubSubImpl},
|
||||
rpc_subscriptions::RpcSubscriptions,
|
||||
};
|
||||
use jsonrpc_pubsub::{PubSubHandler, Session};
|
||||
use jsonrpc_ws_server::{RequestContext, ServerBuilder};
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::thread::{self, sleep, Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
},
|
||||
thread::{self, sleep, Builder, JoinHandle},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
pub struct PubSubService {
|
||||
thread_hdl: JoinHandle<()>,
|
||||
@@ -66,13 +72,25 @@ impl PubSubService {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
use crate::commitment::BlockCommitmentCache;
|
||||
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
|
||||
use std::{
|
||||
net::{IpAddr, Ipv4Addr},
|
||||
sync::RwLock,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_pubsub_new() {
|
||||
let pubsub_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new(&exit));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new(
|
||||
&exit,
|
||||
Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
|
||||
)),
|
||||
));
|
||||
let pubsub_service = PubSubService::new(&subscriptions, pubsub_addr, &exit);
|
||||
let thread = pubsub_service.thread_hdl.thread();
|
||||
assert_eq!(thread.name().unwrap(), "solana-pubsub");
|
||||
|
@@ -15,8 +15,9 @@ use solana_ledger::{
|
||||
blockstore::Blockstore,
|
||||
snapshot_utils,
|
||||
};
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::{hash::Hash, pubkey::Pubkey};
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
net::SocketAddr,
|
||||
path::{Path, PathBuf},
|
||||
sync::{mpsc::channel, Arc, RwLock},
|
||||
@@ -24,6 +25,10 @@ use std::{
|
||||
};
|
||||
use tokio::prelude::Future;
|
||||
|
||||
// If trusted validators are specified, consider this validator healthy if its latest account hash
|
||||
// is no further behind than this distance from the latest trusted validator account hash
|
||||
const HEALTH_CHECK_SLOT_DISTANCE: u64 = 150;
|
||||
|
||||
pub struct JsonRpcService {
|
||||
thread_hdl: JoinHandle<()>,
|
||||
|
||||
@@ -37,15 +42,24 @@ struct RpcRequestMiddleware {
|
||||
ledger_path: PathBuf,
|
||||
snapshot_archive_path_regex: Regex,
|
||||
snapshot_config: Option<SnapshotConfig>,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
trusted_validators: Option<HashSet<Pubkey>>,
|
||||
}
|
||||
|
||||
impl RpcRequestMiddleware {
|
||||
pub fn new(ledger_path: PathBuf, snapshot_config: Option<SnapshotConfig>) -> Self {
|
||||
pub fn new(
|
||||
ledger_path: PathBuf,
|
||||
snapshot_config: Option<SnapshotConfig>,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
trusted_validators: Option<HashSet<Pubkey>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
ledger_path,
|
||||
snapshot_archive_path_regex: Regex::new(r"/snapshot-\d+-[[:alnum:]]+\.tar\.bz2$")
|
||||
.unwrap(),
|
||||
snapshot_config,
|
||||
cluster_info,
|
||||
trusted_validators,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -104,6 +118,58 @@ impl RpcRequestMiddleware {
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
fn health_check(&self) -> &'static str {
|
||||
let response = if let Some(trusted_validators) = &self.trusted_validators {
|
||||
let (latest_account_hash_slot, latest_trusted_validator_account_hash_slot) = {
|
||||
let cluster_info = self.cluster_info.read().unwrap();
|
||||
(
|
||||
cluster_info
|
||||
.get_accounts_hash_for_node(&cluster_info.id())
|
||||
.map(|hashes| hashes.iter().max_by(|a, b| a.0.cmp(&b.0)))
|
||||
.flatten()
|
||||
.map(|slot_hash| slot_hash.0)
|
||||
.unwrap_or(0),
|
||||
trusted_validators
|
||||
.iter()
|
||||
.map(|trusted_validator| {
|
||||
cluster_info
|
||||
.get_accounts_hash_for_node(&trusted_validator)
|
||||
.map(|hashes| hashes.iter().max_by(|a, b| a.0.cmp(&b.0)))
|
||||
.flatten()
|
||||
.map(|slot_hash| slot_hash.0)
|
||||
.unwrap_or(0)
|
||||
})
|
||||
.max()
|
||||
.unwrap_or(0),
|
||||
)
|
||||
};
|
||||
|
||||
// This validator is considered healthy if its latest account hash slot is within
|
||||
// `HEALTH_CHECK_SLOT_DISTANCE` of the latest trusted validator's account hash slot
|
||||
if latest_account_hash_slot > 0
|
||||
&& latest_trusted_validator_account_hash_slot > 0
|
||||
&& latest_account_hash_slot
|
||||
> latest_trusted_validator_account_hash_slot
|
||||
.saturating_sub(HEALTH_CHECK_SLOT_DISTANCE)
|
||||
{
|
||||
"ok"
|
||||
} else {
|
||||
warn!(
|
||||
"health check: me={}, latest trusted_validator={}",
|
||||
latest_account_hash_slot, latest_trusted_validator_account_hash_slot
|
||||
);
|
||||
"behind"
|
||||
}
|
||||
} else {
|
||||
// No trusted validator point of reference available, so this validator is healthy
|
||||
// because it's running
|
||||
"ok"
|
||||
};
|
||||
|
||||
info!("health check: {}", response);
|
||||
response
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestMiddleware for RpcRequestMiddleware {
|
||||
@@ -138,6 +204,16 @@ impl RequestMiddleware for RpcRequestMiddleware {
|
||||
}
|
||||
if self.is_get_path(request.uri().path()) {
|
||||
self.get(request.uri().path())
|
||||
} else if request.uri().path() == "/health" {
|
||||
RequestMiddlewareAction::Respond {
|
||||
should_validate_hosts: true,
|
||||
response: Box::new(jsonrpc_core::futures::future::ok(
|
||||
hyper::Response::builder()
|
||||
.status(hyper::StatusCode::OK)
|
||||
.body(hyper::Body::from(self.health_check()))
|
||||
.unwrap(),
|
||||
)),
|
||||
}
|
||||
} else {
|
||||
RequestMiddlewareAction::Proceed {
|
||||
should_continue_on_invalid_cors: false,
|
||||
@@ -161,6 +237,7 @@ impl JsonRpcService {
|
||||
ledger_path: &Path,
|
||||
storage_state: StorageState,
|
||||
validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
|
||||
trusted_validators: Option<HashSet<Pubkey>>,
|
||||
) -> Self {
|
||||
info!("rpc bound to {:?}", rpc_addr);
|
||||
info!("rpc configuration: {:?}", config);
|
||||
@@ -186,20 +263,35 @@ impl JsonRpcService {
|
||||
let rpc = RpcSolImpl;
|
||||
io.extend_with(rpc.to_delegate());
|
||||
|
||||
let server =
|
||||
ServerBuilder::with_meta_extractor(io, move |_req: &hyper::Request<hyper::Body>| Meta {
|
||||
let request_middleware = RpcRequestMiddleware::new(
|
||||
ledger_path,
|
||||
snapshot_config,
|
||||
cluster_info.clone(),
|
||||
trusted_validators,
|
||||
);
|
||||
let server = ServerBuilder::with_meta_extractor(
|
||||
io,
|
||||
move |_req: &hyper::Request<hyper::Body>| Meta {
|
||||
request_processor: request_processor.clone(),
|
||||
cluster_info: cluster_info.clone(),
|
||||
genesis_hash
|
||||
}).threads(4)
|
||||
.cors(DomainsValidation::AllowOnly(vec![
|
||||
AccessControlAllowOrigin::Any,
|
||||
]))
|
||||
.cors_max_age(86400)
|
||||
.request_middleware(RpcRequestMiddleware::new(ledger_path, snapshot_config))
|
||||
.start_http(&rpc_addr);
|
||||
genesis_hash,
|
||||
},
|
||||
)
|
||||
.threads(4)
|
||||
.cors(DomainsValidation::AllowOnly(vec![
|
||||
AccessControlAllowOrigin::Any,
|
||||
]))
|
||||
.cors_max_age(86400)
|
||||
.request_middleware(request_middleware)
|
||||
.start_http(&rpc_addr);
|
||||
|
||||
if let Err(e) = server {
|
||||
warn!("JSON RPC service unavailable error: {:?}. \nAlso, check that port {} is not already in use by another application", e, rpc_addr.port());
|
||||
warn!(
|
||||
"JSON RPC service unavailable error: {:?}. \n\
|
||||
Also, check that port {} is not already in use by another application",
|
||||
e,
|
||||
rpc_addr.port()
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -240,6 +332,7 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::{
|
||||
contact_info::ContactInfo,
|
||||
crds_value::{CrdsData, CrdsValue, SnapshotHash},
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
rpc::tests::create_validator_exit,
|
||||
};
|
||||
@@ -268,21 +361,24 @@ mod tests {
|
||||
solana_net_utils::find_available_port_in_range(ip_addr, (10000, 65535)).unwrap(),
|
||||
);
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank.slot(), bank)));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Blockstore::open(&ledger_path).unwrap();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
|
||||
));
|
||||
let mut rpc_service = JsonRpcService::new(
|
||||
rpc_addr,
|
||||
JsonRpcConfig::default(),
|
||||
None,
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
Arc::new(blockstore),
|
||||
blockstore,
|
||||
cluster_info,
|
||||
Hash::default(),
|
||||
&PathBuf::from("farf"),
|
||||
StorageState::default(),
|
||||
validator_exit,
|
||||
None,
|
||||
);
|
||||
let thread = rpc_service.thread_hdl.thread();
|
||||
assert_eq!(thread.name().unwrap(), "solana-jsonrpc");
|
||||
@@ -303,7 +399,11 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_is_get_path() {
|
||||
let rrm = RpcRequestMiddleware::new(PathBuf::from("/"), None);
|
||||
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
|
||||
ContactInfo::default(),
|
||||
)));
|
||||
|
||||
let rrm = RpcRequestMiddleware::new(PathBuf::from("/"), None, cluster_info.clone(), None);
|
||||
let rrm_with_snapshot_config = RpcRequestMiddleware::new(
|
||||
PathBuf::from("/"),
|
||||
Some(SnapshotConfig {
|
||||
@@ -311,6 +411,8 @@ mod tests {
|
||||
snapshot_package_output_path: PathBuf::from("/"),
|
||||
snapshot_path: PathBuf::from("/"),
|
||||
}),
|
||||
cluster_info,
|
||||
None,
|
||||
);
|
||||
|
||||
assert!(rrm.is_get_path("/genesis.tar.bz2"));
|
||||
@@ -332,4 +434,95 @@ mod tests {
|
||||
assert!(!rrm.is_get_path(".."));
|
||||
assert!(!rrm.is_get_path("🎣"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_health_check_with_no_trusted_validators() {
|
||||
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
|
||||
ContactInfo::default(),
|
||||
)));
|
||||
|
||||
let rm = RpcRequestMiddleware::new(PathBuf::from("/"), None, cluster_info.clone(), None);
|
||||
assert_eq!(rm.health_check(), "ok");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_health_check_with_trusted_validators() {
|
||||
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
|
||||
ContactInfo::default(),
|
||||
)));
|
||||
|
||||
let trusted_validators = vec![Pubkey::new_rand(), Pubkey::new_rand(), Pubkey::new_rand()];
|
||||
let rm = RpcRequestMiddleware::new(
|
||||
PathBuf::from("/"),
|
||||
None,
|
||||
cluster_info.clone(),
|
||||
Some(trusted_validators.clone().into_iter().collect()),
|
||||
);
|
||||
|
||||
// No account hashes for this node or any trusted validators == "behind"
|
||||
assert_eq!(rm.health_check(), "behind");
|
||||
|
||||
// No account hashes for any trusted validators == "behind"
|
||||
{
|
||||
let mut cluster_info = cluster_info.write().unwrap();
|
||||
cluster_info
|
||||
.push_accounts_hashes(vec![(1000, Hash::default()), (900, Hash::default())]);
|
||||
}
|
||||
assert_eq!(rm.health_check(), "behind");
|
||||
|
||||
// This node is ahead of the trusted validators == "ok"
|
||||
{
|
||||
let mut cluster_info = cluster_info.write().unwrap();
|
||||
cluster_info
|
||||
.gossip
|
||||
.crds
|
||||
.insert(
|
||||
CrdsValue::new_unsigned(CrdsData::AccountsHashes(SnapshotHash::new(
|
||||
trusted_validators[0].clone(),
|
||||
vec![
|
||||
(1, Hash::default()),
|
||||
(1001, Hash::default()),
|
||||
(2, Hash::default()),
|
||||
],
|
||||
))),
|
||||
1,
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
assert_eq!(rm.health_check(), "ok");
|
||||
|
||||
// Node is slightly behind the trusted validators == "ok"
|
||||
{
|
||||
let mut cluster_info = cluster_info.write().unwrap();
|
||||
cluster_info
|
||||
.gossip
|
||||
.crds
|
||||
.insert(
|
||||
CrdsValue::new_unsigned(CrdsData::AccountsHashes(SnapshotHash::new(
|
||||
trusted_validators[1].clone(),
|
||||
vec![(1000 + HEALTH_CHECK_SLOT_DISTANCE - 1, Hash::default())],
|
||||
))),
|
||||
1,
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
assert_eq!(rm.health_check(), "ok");
|
||||
|
||||
// Node is far behind the trusted validators == "behind"
|
||||
{
|
||||
let mut cluster_info = cluster_info.write().unwrap();
|
||||
cluster_info
|
||||
.gossip
|
||||
.crds
|
||||
.insert(
|
||||
CrdsValue::new_unsigned(CrdsData::AccountsHashes(SnapshotHash::new(
|
||||
trusted_validators[2].clone(),
|
||||
vec![(1000 + HEALTH_CHECK_SLOT_DISTANCE, Hash::default())],
|
||||
))),
|
||||
1,
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
assert_eq!(rm.health_check(), "behind");
|
||||
}
|
||||
}
|
||||
|
@@ -1,5 +1,6 @@
|
||||
//! The `pubsub` module implements a threaded subscription service on client RPC request
|
||||
|
||||
use crate::commitment::BlockCommitmentCache;
|
||||
use core::hash::Hash;
|
||||
use jsonrpc_core::futures::Future;
|
||||
use jsonrpc_pubsub::{
|
||||
@@ -7,18 +8,23 @@ use jsonrpc_pubsub::{
|
||||
SubscriptionId,
|
||||
};
|
||||
use serde::Serialize;
|
||||
use solana_client::rpc_response::{Response, RpcAccount, RpcKeyedAccount, RpcResponseContext};
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_client::rpc_response::{
|
||||
Response, RpcAccount, RpcKeyedAccount, RpcResponseContext, RpcSignatureResult,
|
||||
};
|
||||
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{
|
||||
account::Account, clock::Slot, pubkey::Pubkey, signature::Signature, transaction,
|
||||
};
|
||||
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{Receiver, RecvTimeoutError, SendError, Sender};
|
||||
use std::sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
mpsc::{Receiver, RecvTimeoutError, SendError, Sender},
|
||||
};
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use std::{
|
||||
cmp::min,
|
||||
collections::{HashMap, HashSet},
|
||||
iter,
|
||||
sync::{Arc, Mutex, RwLock},
|
||||
@@ -62,7 +68,7 @@ type RpcProgramSubscriptions = RwLock<
|
||||
type RpcSignatureSubscriptions = RwLock<
|
||||
HashMap<
|
||||
Signature,
|
||||
HashMap<SubscriptionId, (Sink<Response<transaction::Result<()>>>, Confirmations)>,
|
||||
HashMap<SubscriptionId, (Sink<Response<RpcSignatureResult>>, Confirmations)>,
|
||||
>,
|
||||
>;
|
||||
type RpcSlotSubscriptions = RwLock<HashMap<SubscriptionId, Sink<SlotInfo>>>;
|
||||
@@ -80,11 +86,7 @@ fn add_subscription<K, S>(
|
||||
{
|
||||
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
|
||||
let confirmations = confirmations.unwrap_or(0);
|
||||
let confirmations = if confirmations > MAX_LOCKOUT_HISTORY {
|
||||
MAX_LOCKOUT_HISTORY
|
||||
} else {
|
||||
confirmations
|
||||
};
|
||||
let confirmations = min(confirmations, MAX_LOCKOUT_HISTORY + 1);
|
||||
if let Some(current_hashmap) = subscriptions.get_mut(&hashmap_key) {
|
||||
current_hashmap.insert(sub_id, (sink, confirmations));
|
||||
return;
|
||||
@@ -120,8 +122,8 @@ where
|
||||
fn check_confirmations_and_notify<K, S, B, F, X>(
|
||||
subscriptions: &HashMap<K, HashMap<SubscriptionId, (Sink<Response<S>>, Confirmations)>>,
|
||||
hashmap_key: &K,
|
||||
current_slot: Slot,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
|
||||
bank_method: B,
|
||||
filter_results: F,
|
||||
notifier: &RpcNotifier,
|
||||
@@ -133,6 +135,10 @@ where
|
||||
F: Fn(X, u64) -> Box<dyn Iterator<Item = S>>,
|
||||
X: Clone + Serialize,
|
||||
{
|
||||
let mut confirmation_slots: HashMap<usize, Slot> = HashMap::new();
|
||||
let r_block_commitment_cache = block_commitment_cache.read().unwrap();
|
||||
let current_slot = r_block_commitment_cache.slot();
|
||||
let root = r_block_commitment_cache.root();
|
||||
let current_ancestors = bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
@@ -140,27 +146,29 @@ where
|
||||
.unwrap()
|
||||
.ancestors
|
||||
.clone();
|
||||
for (slot, _) in current_ancestors.iter() {
|
||||
if let Some(confirmations) = r_block_commitment_cache.get_confirmation_count(*slot) {
|
||||
confirmation_slots.entry(confirmations).or_insert(*slot);
|
||||
}
|
||||
}
|
||||
drop(r_block_commitment_cache);
|
||||
|
||||
let mut notified_set: HashSet<SubscriptionId> = HashSet::new();
|
||||
if let Some(hashmap) = subscriptions.get(hashmap_key) {
|
||||
for (sub_id, (sink, confirmations)) in hashmap.iter() {
|
||||
let desired_slot: Vec<u64> = current_ancestors
|
||||
.iter()
|
||||
.filter(|(_, &v)| v == *confirmations)
|
||||
.map(|(k, _)| k)
|
||||
.cloned()
|
||||
.collect();
|
||||
let root: Vec<u64> = current_ancestors
|
||||
.iter()
|
||||
.filter(|(_, &v)| v == 32)
|
||||
.map(|(k, _)| k)
|
||||
.cloned()
|
||||
.collect();
|
||||
let root = if root.len() == 1 { root[0] } else { 0 };
|
||||
if desired_slot.len() == 1 {
|
||||
let slot = desired_slot[0];
|
||||
let desired_bank = bank_forks.read().unwrap().get(slot).unwrap().clone();
|
||||
let results = bank_method(&desired_bank, hashmap_key);
|
||||
let desired_slot = if *confirmations == 0 {
|
||||
Some(¤t_slot)
|
||||
} else if *confirmations == MAX_LOCKOUT_HISTORY + 1 {
|
||||
Some(&root)
|
||||
} else {
|
||||
confirmation_slots.get(confirmations)
|
||||
};
|
||||
if let Some(&slot) = desired_slot {
|
||||
let results = {
|
||||
let bank_forks = bank_forks.read().unwrap();
|
||||
let desired_bank = bank_forks.get(slot).unwrap();
|
||||
bank_method(&desired_bank, hashmap_key)
|
||||
};
|
||||
for result in filter_results(results, root) {
|
||||
notifier.notify(
|
||||
Response {
|
||||
@@ -201,11 +209,15 @@ fn filter_account_result(
|
||||
Box::new(iter::empty())
|
||||
}
|
||||
|
||||
fn filter_signature_result<S>(result: Option<S>, _root: Slot) -> Box<dyn Iterator<Item = S>>
|
||||
where
|
||||
S: 'static + Clone + Serialize,
|
||||
{
|
||||
Box::new(result.into_iter())
|
||||
fn filter_signature_result(
|
||||
result: Option<transaction::Result<()>>,
|
||||
_root: Slot,
|
||||
) -> Box<dyn Iterator<Item = RpcSignatureResult>> {
|
||||
Box::new(
|
||||
result
|
||||
.into_iter()
|
||||
.map(|result| RpcSignatureResult { err: result.err() }),
|
||||
)
|
||||
}
|
||||
|
||||
fn filter_program_results(
|
||||
@@ -234,12 +246,6 @@ pub struct RpcSubscriptions {
|
||||
exit: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
impl Default for RpcSubscriptions {
|
||||
fn default() -> Self {
|
||||
Self::new(&Arc::new(AtomicBool::new(false)))
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for RpcSubscriptions {
|
||||
fn drop(&mut self) {
|
||||
self.shutdown().unwrap_or_else(|err| {
|
||||
@@ -249,7 +255,10 @@ impl Drop for RpcSubscriptions {
|
||||
}
|
||||
|
||||
impl RpcSubscriptions {
|
||||
pub fn new(exit: &Arc<AtomicBool>) -> Self {
|
||||
pub fn new(
|
||||
exit: &Arc<AtomicBool>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
) -> Self {
|
||||
let (notification_sender, notification_receiver): (
|
||||
Sender<NotificationEntry>,
|
||||
Receiver<NotificationEntry>,
|
||||
@@ -288,6 +297,7 @@ impl RpcSubscriptions {
|
||||
signature_subscriptions_clone,
|
||||
slot_subscriptions_clone,
|
||||
root_subscriptions_clone,
|
||||
block_commitment_cache,
|
||||
);
|
||||
})
|
||||
.unwrap();
|
||||
@@ -305,10 +315,19 @@ impl RpcSubscriptions {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn default_with_blockstore(blockstore: Arc<Blockstore>) -> Self {
|
||||
Self::new(
|
||||
&Arc::new(AtomicBool::new(false)),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
|
||||
blockstore,
|
||||
))),
|
||||
)
|
||||
}
|
||||
|
||||
fn check_account(
|
||||
pubkey: &Pubkey,
|
||||
current_slot: Slot,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
|
||||
account_subscriptions: Arc<RpcAccountSubscriptions>,
|
||||
notifier: &RpcNotifier,
|
||||
) {
|
||||
@@ -316,8 +335,8 @@ impl RpcSubscriptions {
|
||||
check_confirmations_and_notify(
|
||||
&subscriptions,
|
||||
pubkey,
|
||||
current_slot,
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
Bank::get_account_modified_since_parent,
|
||||
filter_account_result,
|
||||
notifier,
|
||||
@@ -326,8 +345,8 @@ impl RpcSubscriptions {
|
||||
|
||||
fn check_program(
|
||||
program_id: &Pubkey,
|
||||
current_slot: Slot,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
|
||||
program_subscriptions: Arc<RpcProgramSubscriptions>,
|
||||
notifier: &RpcNotifier,
|
||||
) {
|
||||
@@ -335,8 +354,8 @@ impl RpcSubscriptions {
|
||||
check_confirmations_and_notify(
|
||||
&subscriptions,
|
||||
program_id,
|
||||
current_slot,
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
Bank::get_program_accounts_modified_since_parent,
|
||||
filter_program_results,
|
||||
notifier,
|
||||
@@ -345,8 +364,8 @@ impl RpcSubscriptions {
|
||||
|
||||
fn check_signature(
|
||||
signature: &Signature,
|
||||
current_slot: Slot,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
|
||||
signature_subscriptions: Arc<RpcSignatureSubscriptions>,
|
||||
notifier: &RpcNotifier,
|
||||
) {
|
||||
@@ -354,8 +373,8 @@ impl RpcSubscriptions {
|
||||
let notified_ids = check_confirmations_and_notify(
|
||||
&subscriptions,
|
||||
signature,
|
||||
current_slot,
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
Bank::get_signature_status_processed_since_parent,
|
||||
filter_signature_result,
|
||||
notifier,
|
||||
@@ -417,7 +436,7 @@ impl RpcSubscriptions {
|
||||
signature: Signature,
|
||||
confirmations: Option<Confirmations>,
|
||||
sub_id: SubscriptionId,
|
||||
subscriber: Subscriber<Response<transaction::Result<()>>>,
|
||||
subscriber: Subscriber<Response<RpcSignatureResult>>,
|
||||
) {
|
||||
let mut subscriptions = self.signature_subscriptions.write().unwrap();
|
||||
add_subscription(
|
||||
@@ -499,6 +518,7 @@ impl RpcSubscriptions {
|
||||
signature_subscriptions: Arc<RpcSignatureSubscriptions>,
|
||||
slot_subscriptions: Arc<RpcSlotSubscriptions>,
|
||||
root_subscriptions: Arc<RpcRootSubscriptions>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
) {
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
@@ -518,7 +538,7 @@ impl RpcSubscriptions {
|
||||
notifier.notify(root, sink);
|
||||
}
|
||||
}
|
||||
NotificationEntry::Bank((current_slot, bank_forks)) => {
|
||||
NotificationEntry::Bank((_current_slot, bank_forks)) => {
|
||||
let pubkeys: Vec<_> = {
|
||||
let subs = account_subscriptions.read().unwrap();
|
||||
subs.keys().cloned().collect()
|
||||
@@ -526,8 +546,8 @@ impl RpcSubscriptions {
|
||||
for pubkey in &pubkeys {
|
||||
Self::check_account(
|
||||
pubkey,
|
||||
current_slot,
|
||||
&bank_forks,
|
||||
&block_commitment_cache,
|
||||
account_subscriptions.clone(),
|
||||
¬ifier,
|
||||
);
|
||||
@@ -540,8 +560,8 @@ impl RpcSubscriptions {
|
||||
for program_id in &programs {
|
||||
Self::check_program(
|
||||
program_id,
|
||||
current_slot,
|
||||
&bank_forks,
|
||||
&block_commitment_cache,
|
||||
program_subscriptions.clone(),
|
||||
¬ifier,
|
||||
);
|
||||
@@ -554,8 +574,8 @@ impl RpcSubscriptions {
|
||||
for signature in &signatures {
|
||||
Self::check_signature(
|
||||
signature,
|
||||
current_slot,
|
||||
&bank_forks,
|
||||
&block_commitment_cache,
|
||||
signature_subscriptions.clone(),
|
||||
¬ifier,
|
||||
);
|
||||
@@ -596,10 +616,15 @@ impl RpcSubscriptions {
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use super::*;
|
||||
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use crate::{
|
||||
commitment::BlockCommitment,
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
};
|
||||
use jsonrpc_core::futures::{self, stream::Stream};
|
||||
use jsonrpc_pubsub::typed::Subscriber;
|
||||
use serial_test_derive::serial;
|
||||
use solana_budget_program;
|
||||
use solana_ledger::get_tmp_ledger_path;
|
||||
use solana_sdk::{
|
||||
signature::{Keypair, Signer},
|
||||
system_transaction,
|
||||
@@ -633,12 +658,15 @@ pub(crate) mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_check_account_subscribe() {
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
mint_keypair,
|
||||
..
|
||||
} = create_genesis_config(100);
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
@@ -663,7 +691,12 @@ pub(crate) mod tests {
|
||||
Subscriber::new_test("accountNotification");
|
||||
let sub_id = SubscriptionId::Number(0 as u64);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let subscriptions = RpcSubscriptions::new(&exit);
|
||||
let subscriptions = RpcSubscriptions::new(
|
||||
&exit,
|
||||
Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
|
||||
)),
|
||||
);
|
||||
subscriptions.add_account_subscription(alice.pubkey(), None, sub_id.clone(), subscriber);
|
||||
|
||||
assert!(subscriptions
|
||||
@@ -702,12 +735,15 @@ pub(crate) mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_check_program_subscribe() {
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
mint_keypair,
|
||||
..
|
||||
} = create_genesis_config(100);
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
@@ -732,7 +768,12 @@ pub(crate) mod tests {
|
||||
Subscriber::new_test("programNotification");
|
||||
let sub_id = SubscriptionId::Number(0 as u64);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let subscriptions = RpcSubscriptions::new(&exit);
|
||||
let subscriptions = RpcSubscriptions::new(
|
||||
&exit,
|
||||
Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
|
||||
)),
|
||||
);
|
||||
subscriptions.add_program_subscription(
|
||||
solana_budget_program::id(),
|
||||
None,
|
||||
@@ -779,12 +820,15 @@ pub(crate) mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_check_signature_subscribe() {
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
mint_keypair,
|
||||
..
|
||||
} = create_genesis_config(100);
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let mut bank_forks = BankForks::new(0, bank);
|
||||
@@ -812,27 +856,42 @@ pub(crate) mod tests {
|
||||
.unwrap()
|
||||
.process_transaction(&processed_tx)
|
||||
.unwrap();
|
||||
let bank1 = bank_forks[1].clone();
|
||||
|
||||
let bank_forks = Arc::new(RwLock::new(bank_forks));
|
||||
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let subscriptions = RpcSubscriptions::new(&exit);
|
||||
let mut cache0 = BlockCommitment::default();
|
||||
cache0.increase_confirmation_stake(1, 10);
|
||||
let cache1 = BlockCommitment::default();
|
||||
|
||||
let (past_bank_sub, _id_receiver, past_bank_recv) =
|
||||
let mut block_commitment = HashMap::new();
|
||||
block_commitment.entry(0).or_insert(cache0.clone());
|
||||
block_commitment.entry(1).or_insert(cache1.clone());
|
||||
let block_commitment_cache =
|
||||
BlockCommitmentCache::new(block_commitment, 0, 10, bank1, blockstore, 0);
|
||||
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let subscriptions =
|
||||
RpcSubscriptions::new(&exit, Arc::new(RwLock::new(block_commitment_cache)));
|
||||
|
||||
let (past_bank_sub1, _id_receiver, past_bank_recv1) =
|
||||
Subscriber::new_test("signatureNotification");
|
||||
let (past_bank_sub2, _id_receiver, past_bank_recv2) =
|
||||
Subscriber::new_test("signatureNotification");
|
||||
let (processed_sub, _id_receiver, processed_recv) =
|
||||
Subscriber::new_test("signatureNotification");
|
||||
|
||||
subscriptions.add_signature_subscription(
|
||||
past_bank_tx.signatures[0],
|
||||
Some(0),
|
||||
SubscriptionId::Number(1 as u64),
|
||||
Subscriber::new_test("signatureNotification").0,
|
||||
past_bank_sub1,
|
||||
);
|
||||
subscriptions.add_signature_subscription(
|
||||
past_bank_tx.signatures[0],
|
||||
Some(1),
|
||||
SubscriptionId::Number(2 as u64),
|
||||
past_bank_sub,
|
||||
past_bank_sub2,
|
||||
);
|
||||
subscriptions.add_signature_subscription(
|
||||
processed_tx.signatures[0],
|
||||
@@ -855,43 +914,48 @@ pub(crate) mod tests {
|
||||
}
|
||||
|
||||
subscriptions.notify_subscribers(1, &bank_forks);
|
||||
let expected_res: Option<transaction::Result<()>> = Some(Ok(()));
|
||||
let expected_res = RpcSignatureResult { err: None };
|
||||
|
||||
let expected = json!({
|
||||
"jsonrpc": "2.0",
|
||||
"method": "signatureNotification",
|
||||
"params": {
|
||||
"result": {
|
||||
"context": { "slot": 0 },
|
||||
"value": expected_res,
|
||||
},
|
||||
"subscription": 2,
|
||||
}
|
||||
});
|
||||
let (response, _) = robust_poll_or_panic(past_bank_recv);
|
||||
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
|
||||
struct Notification {
|
||||
slot: Slot,
|
||||
id: u64,
|
||||
}
|
||||
|
||||
let expected = json!({
|
||||
"jsonrpc": "2.0",
|
||||
"method": "signatureNotification",
|
||||
"params": {
|
||||
"result": {
|
||||
"context": { "slot": 1 },
|
||||
"value": expected_res,
|
||||
},
|
||||
"subscription": 3,
|
||||
}
|
||||
});
|
||||
let expected_notification = |exp: Notification| -> String {
|
||||
let json = json!({
|
||||
"jsonrpc": "2.0",
|
||||
"method": "signatureNotification",
|
||||
"params": {
|
||||
"result": {
|
||||
"context": { "slot": exp.slot },
|
||||
"value": &expected_res,
|
||||
},
|
||||
"subscription": exp.id,
|
||||
}
|
||||
});
|
||||
serde_json::to_string(&json).unwrap()
|
||||
};
|
||||
|
||||
// Expect to receive a notification from bank 1 because this subscription is
|
||||
// looking for 0 confirmations and so checks the current bank
|
||||
let expected = expected_notification(Notification { slot: 1, id: 1 });
|
||||
let (response, _) = robust_poll_or_panic(past_bank_recv1);
|
||||
assert_eq!(expected, response);
|
||||
|
||||
// Expect to receive a notification from bank 0 because this subscription is
|
||||
// looking for 1 confirmation and so checks the past bank
|
||||
let expected = expected_notification(Notification { slot: 0, id: 2 });
|
||||
let (response, _) = robust_poll_or_panic(past_bank_recv2);
|
||||
assert_eq!(expected, response);
|
||||
|
||||
let expected = expected_notification(Notification { slot: 1, id: 3 });
|
||||
let (response, _) = robust_poll_or_panic(processed_recv);
|
||||
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
|
||||
|
||||
let sig_subs = subscriptions.signature_subscriptions.read().unwrap();
|
||||
assert_eq!(expected, response);
|
||||
|
||||
// Subscription should be automatically removed after notification
|
||||
let sig_subs = subscriptions.signature_subscriptions.read().unwrap();
|
||||
assert!(!sig_subs.contains_key(&processed_tx.signatures[0]));
|
||||
|
||||
// Only one notification is expected for signature processed in previous bank
|
||||
assert_eq!(sig_subs.get(&past_bank_tx.signatures[0]).unwrap().len(), 1);
|
||||
assert!(!sig_subs.contains_key(&past_bank_tx.signatures[0]));
|
||||
|
||||
// Unprocessed signature subscription should not be removed
|
||||
assert_eq!(
|
||||
@@ -901,12 +965,20 @@ pub(crate) mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_check_slot_subscribe() {
|
||||
let (subscriber, _id_receiver, transport_receiver) =
|
||||
Subscriber::new_test("slotNotification");
|
||||
let sub_id = SubscriptionId::Number(0 as u64);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let subscriptions = RpcSubscriptions::new(&exit);
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let subscriptions = RpcSubscriptions::new(
|
||||
&exit,
|
||||
Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
|
||||
)),
|
||||
);
|
||||
subscriptions.add_slot_subscription(sub_id.clone(), subscriber);
|
||||
|
||||
assert!(subscriptions
|
||||
@@ -939,12 +1011,20 @@ pub(crate) mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_check_root_subscribe() {
|
||||
let (subscriber, _id_receiver, mut transport_receiver) =
|
||||
Subscriber::new_test("rootNotification");
|
||||
let sub_id = SubscriptionId::Number(0 as u64);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let subscriptions = RpcSubscriptions::new(&exit);
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let subscriptions = RpcSubscriptions::new(
|
||||
&exit,
|
||||
Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
|
||||
)),
|
||||
);
|
||||
subscriptions.add_root_subscription(sub_id.clone(), subscriber);
|
||||
|
||||
assert!(subscriptions
|
||||
@@ -976,6 +1056,7 @@ pub(crate) mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_add_and_remove_subscription() {
|
||||
let mut subscriptions: HashMap<u64, HashMap<SubscriptionId, (Sink<()>, Confirmations)>> =
|
||||
HashMap::new();
|
||||
|
@@ -9,6 +9,7 @@ use crate::{
|
||||
use bincode::serialize;
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_measure::thread_mem_usage;
|
||||
use solana_metrics::{datapoint_debug, inc_new_counter_debug};
|
||||
use solana_perf::packet::{Packets, PacketsRecycler};
|
||||
@@ -184,12 +185,33 @@ impl ServeRepair {
|
||||
blockstore: Option<&Arc<Blockstore>>,
|
||||
requests_receiver: &PacketReceiver,
|
||||
response_sender: &PacketSender,
|
||||
max_packets: &mut usize,
|
||||
) -> Result<()> {
|
||||
//TODO cache connections
|
||||
let timeout = Duration::new(1, 0);
|
||||
let reqs = requests_receiver.recv_timeout(timeout)?;
|
||||
let mut reqs_v = vec![requests_receiver.recv_timeout(timeout)?];
|
||||
let mut total_packets = reqs_v[0].packets.len();
|
||||
|
||||
Self::handle_packets(obj, &recycler, blockstore, reqs, response_sender);
|
||||
while let Ok(more) = requests_receiver.try_recv() {
|
||||
total_packets += more.packets.len();
|
||||
if total_packets < *max_packets {
|
||||
// Drop the rest in the channel in case of dos
|
||||
reqs_v.push(more);
|
||||
}
|
||||
}
|
||||
|
||||
let mut time = Measure::start("repair::handle_packets");
|
||||
for reqs in reqs_v {
|
||||
Self::handle_packets(obj, &recycler, blockstore, reqs, response_sender);
|
||||
}
|
||||
time.stop();
|
||||
if total_packets >= *max_packets {
|
||||
if time.as_ms() > 1000 {
|
||||
*max_packets = (*max_packets * 9) / 10;
|
||||
} else {
|
||||
*max_packets = (*max_packets * 10) / 9;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -204,22 +226,26 @@ impl ServeRepair {
|
||||
let recycler = PacketsRecycler::default();
|
||||
Builder::new()
|
||||
.name("solana-repair-listen".to_string())
|
||||
.spawn(move || loop {
|
||||
let result = Self::run_listen(
|
||||
&me,
|
||||
&recycler,
|
||||
blockstore.as_ref(),
|
||||
&requests_receiver,
|
||||
&response_sender,
|
||||
);
|
||||
match result {
|
||||
Err(Error::RecvTimeoutError(_)) | Ok(_) => {}
|
||||
Err(err) => info!("repair listener error: {:?}", err),
|
||||
};
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
.spawn(move || {
|
||||
let mut max_packets = 1024;
|
||||
loop {
|
||||
let result = Self::run_listen(
|
||||
&me,
|
||||
&recycler,
|
||||
blockstore.as_ref(),
|
||||
&requests_receiver,
|
||||
&response_sender,
|
||||
&mut max_packets,
|
||||
);
|
||||
match result {
|
||||
Err(Error::RecvTimeoutError(_)) | Ok(_) => {}
|
||||
Err(err) => info!("repair listener error: {:?}", err),
|
||||
};
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
thread_mem_usage::datapoint("solana-repair-listen");
|
||||
}
|
||||
thread_mem_usage::datapoint("solana-repair-listen");
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
@@ -4,6 +4,7 @@
|
||||
|
||||
use crate::{
|
||||
cluster_info::ClusterInfo,
|
||||
commitment::BlockCommitmentCache,
|
||||
contact_info::ContactInfo,
|
||||
result::{Error, Result},
|
||||
};
|
||||
@@ -11,9 +12,7 @@ use rand::{Rng, SeedableRng};
|
||||
use rand_chacha::ChaChaRng;
|
||||
use solana_chacha_cuda::chacha_cuda::chacha_cbc_encrypt_file_many_keys;
|
||||
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore};
|
||||
use solana_runtime::{
|
||||
bank::Bank, status_cache::SignatureConfirmationStatus, storage_utils::archiver_accounts,
|
||||
};
|
||||
use solana_runtime::{bank::Bank, storage_utils::archiver_accounts};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
account_utils::StateMut,
|
||||
@@ -30,6 +29,7 @@ use solana_storage_program::{
|
||||
storage_instruction,
|
||||
storage_instruction::proof_validation,
|
||||
};
|
||||
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
|
||||
use std::{
|
||||
cmp,
|
||||
collections::HashMap,
|
||||
@@ -185,6 +185,7 @@ impl StorageStage {
|
||||
exit: &Arc<AtomicBool>,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
) -> Self {
|
||||
let (instruction_sender, instruction_receiver) = channel();
|
||||
|
||||
@@ -256,6 +257,7 @@ impl StorageStage {
|
||||
&keypair,
|
||||
&storage_keypair,
|
||||
&transactions_socket,
|
||||
&block_commitment_cache,
|
||||
)
|
||||
.unwrap_or_else(|err| {
|
||||
info!("failed to send storage transaction: {:?}", err)
|
||||
@@ -289,6 +291,7 @@ impl StorageStage {
|
||||
keypair: &Arc<Keypair>,
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
transactions_socket: &UdpSocket,
|
||||
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
|
||||
) -> io::Result<()> {
|
||||
let working_bank = bank_forks.read().unwrap().working_bank();
|
||||
let blockhash = working_bank.confirmed_last_blockhash().0;
|
||||
@@ -323,8 +326,13 @@ impl StorageStage {
|
||||
cluster_info.read().unwrap().my_data().tpu,
|
||||
)?;
|
||||
sleep(Duration::from_millis(100));
|
||||
if Self::poll_for_signature_confirmation(bank_forks, &transaction.signatures[0], 0)
|
||||
.is_ok()
|
||||
if Self::poll_for_signature_confirmation(
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
&transaction.signatures[0],
|
||||
0,
|
||||
)
|
||||
.is_ok()
|
||||
{
|
||||
break;
|
||||
};
|
||||
@@ -334,23 +342,24 @@ impl StorageStage {
|
||||
|
||||
fn poll_for_signature_confirmation(
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
|
||||
signature: &Signature,
|
||||
min_confirmed_blocks: usize,
|
||||
) -> Result<()> {
|
||||
let mut now = Instant::now();
|
||||
let mut confirmed_blocks = 0;
|
||||
loop {
|
||||
let response = bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.working_bank()
|
||||
.get_signature_confirmation_status(signature);
|
||||
if let Some(SignatureConfirmationStatus {
|
||||
confirmations,
|
||||
status,
|
||||
..
|
||||
}) = response
|
||||
{
|
||||
let working_bank = bank_forks.read().unwrap().working_bank();
|
||||
let response = working_bank.get_signature_status_slot(signature);
|
||||
if let Some((slot, status)) = response {
|
||||
let confirmations = if working_bank.src.roots().contains(&slot) {
|
||||
MAX_LOCKOUT_HISTORY + 1
|
||||
} else {
|
||||
let r_block_commitment_cache = block_commitment_cache.read().unwrap();
|
||||
r_block_commitment_cache
|
||||
.get_confirmation_count(slot)
|
||||
.unwrap_or(0)
|
||||
};
|
||||
if status.is_ok() {
|
||||
if confirmed_blocks != confirmations {
|
||||
now = Instant::now();
|
||||
@@ -654,13 +663,20 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use rayon::prelude::*;
|
||||
use solana_ledger::get_tmp_ledger_path;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::hash::Hasher;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use std::cmp::{max, min};
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use solana_sdk::{
|
||||
hash::Hasher,
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
use std::{
|
||||
cmp::{max, min},
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicUsize, Ordering},
|
||||
mpsc::channel,
|
||||
Arc, RwLock,
|
||||
},
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_storage_stage_none_ledger() {
|
||||
@@ -675,6 +691,11 @@ mod tests {
|
||||
&[bank.clone()],
|
||||
vec![0],
|
||||
)));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::default_with_blockstore(blockstore),
|
||||
));
|
||||
let (_slot_sender, slot_receiver) = channel();
|
||||
let storage_state = StorageState::new(
|
||||
&bank.last_blockhash(),
|
||||
@@ -690,6 +711,7 @@ mod tests {
|
||||
&exit.clone(),
|
||||
&bank_forks,
|
||||
&cluster_info,
|
||||
block_commitment_cache,
|
||||
);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
storage_stage.join().unwrap();
|
||||
|
@@ -70,9 +70,14 @@ impl TransactionStatusService {
|
||||
}
|
||||
.expect("FeeCalculator must exist");
|
||||
let fee = fee_calculator.calculate_fee(transaction.message());
|
||||
let (writable_keys, readonly_keys) =
|
||||
transaction.message.get_account_keys_by_lock_type();
|
||||
blockstore
|
||||
.write_transaction_status(
|
||||
(slot, transaction.signatures[0]),
|
||||
slot,
|
||||
transaction.signatures[0],
|
||||
writable_keys,
|
||||
readonly_keys,
|
||||
&TransactionStatusMeta {
|
||||
status,
|
||||
fee,
|
||||
|
@@ -26,6 +26,7 @@ use solana_ledger::{
|
||||
snapshot_package::SnapshotPackageSender,
|
||||
};
|
||||
use solana_sdk::{
|
||||
genesis_config::GenesisConfig,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
@@ -67,6 +68,7 @@ pub struct TvuConfig {
|
||||
pub halt_on_trusted_validators_accounts_hash_mismatch: bool,
|
||||
pub trusted_validators: Option<HashSet<Pubkey>>,
|
||||
pub accounts_hash_fault_injection_slots: u64,
|
||||
pub genesis_config: GenesisConfig,
|
||||
}
|
||||
|
||||
impl Tvu {
|
||||
@@ -182,9 +184,10 @@ impl Tvu {
|
||||
slot_full_senders: vec![blockstream_slot_sender],
|
||||
latest_root_senders: vec![ledger_cleanup_slot_sender],
|
||||
accounts_hash_sender: Some(accounts_hash_sender),
|
||||
block_commitment_cache,
|
||||
block_commitment_cache: block_commitment_cache.clone(),
|
||||
transaction_status_sender,
|
||||
rewards_recorder_sender,
|
||||
genesis_config: tvu_config.genesis_config,
|
||||
};
|
||||
|
||||
let (replay_stage, root_bank_receiver) = ReplayStage::new(
|
||||
@@ -226,6 +229,7 @@ impl Tvu {
|
||||
&exit,
|
||||
&bank_forks,
|
||||
&cluster_info,
|
||||
block_commitment_cache,
|
||||
);
|
||||
|
||||
Tvu {
|
||||
@@ -263,11 +267,13 @@ pub mod tests {
|
||||
use crate::banking_stage::create_test_recorder;
|
||||
use crate::cluster_info::{ClusterInfo, Node};
|
||||
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use serial_test_derive::serial;
|
||||
use solana_ledger::create_new_tmp_ledger;
|
||||
use solana_runtime::bank::Bank;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_tvu_exit() {
|
||||
solana_logger::setup();
|
||||
let leader = Node::new_localhost();
|
||||
@@ -295,7 +301,9 @@ pub mod tests {
|
||||
let voting_keypair = Keypair::new();
|
||||
let storage_keypair = Arc::new(Keypair::new());
|
||||
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
|
||||
));
|
||||
let tvu = Tvu::new(
|
||||
&voting_keypair.pubkey(),
|
||||
Some(Arc::new(voting_keypair)),
|
||||
@@ -314,7 +322,7 @@ pub mod tests {
|
||||
&StorageState::default(),
|
||||
None,
|
||||
l_receiver,
|
||||
&Arc::new(RpcSubscriptions::new(&exit)),
|
||||
&Arc::new(RpcSubscriptions::new(&exit, block_commitment_cache.clone())),
|
||||
&poh_recorder,
|
||||
&leader_schedule_cache,
|
||||
&exit,
|
||||
|
@@ -202,7 +202,6 @@ impl Validator {
|
||||
}
|
||||
|
||||
let bank_forks = Arc::new(RwLock::new(bank_forks));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||
|
||||
let mut validator_exit = ValidatorExit::default();
|
||||
let exit_ = exit.clone();
|
||||
@@ -238,8 +237,11 @@ impl Validator {
|
||||
);
|
||||
|
||||
let blockstore = Arc::new(blockstore);
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
|
||||
));
|
||||
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new(&exit));
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new(&exit, block_commitment_cache.clone()));
|
||||
|
||||
let rpc_service = config.rpc_ports.map(|(rpc_port, rpc_pubsub_port)| {
|
||||
if ContactInfo::is_valid_address(&node.info.rpc) {
|
||||
@@ -262,6 +264,7 @@ impl Validator {
|
||||
ledger_path,
|
||||
storage_state.clone(),
|
||||
validator_exit.clone(),
|
||||
config.trusted_validators.clone(),
|
||||
),
|
||||
PubSubService::new(
|
||||
&subscriptions,
|
||||
@@ -316,7 +319,7 @@ impl Validator {
|
||||
std::thread::park();
|
||||
}
|
||||
|
||||
let poh_config = Arc::new(genesis_config.poh_config);
|
||||
let poh_config = Arc::new(genesis_config.poh_config.clone());
|
||||
let (mut poh_recorder, entry_receiver) = PohRecorder::new_with_clear_signal(
|
||||
bank.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
@@ -443,6 +446,7 @@ impl Validator {
|
||||
shred_version: node.info.shred_version,
|
||||
trusted_validators: config.trusted_validators.clone(),
|
||||
accounts_hash_fault_injection_slots: config.accounts_hash_fault_injection_slots,
|
||||
genesis_config,
|
||||
},
|
||||
);
|
||||
|
||||
|
@@ -137,7 +137,7 @@ mod tests {
|
||||
// and to allow snapshotting of bank and the purging logic on status_cache to
|
||||
// kick in
|
||||
if slot % set_root_interval == 0 || slot == last_slot - 1 {
|
||||
bank_forks.set_root(bank.slot(), &sender);
|
||||
bank_forks.set_root(bank.slot(), &sender, None);
|
||||
}
|
||||
}
|
||||
// Generate a snapshot package for last bank
|
||||
@@ -377,9 +377,11 @@ mod tests {
|
||||
snapshot_test_config.bank_forks.insert(new_bank);
|
||||
current_bank = snapshot_test_config.bank_forks[new_slot].clone();
|
||||
}
|
||||
snapshot_test_config
|
||||
.bank_forks
|
||||
.set_root(current_bank.slot(), &snapshot_sender);
|
||||
snapshot_test_config.bank_forks.set_root(
|
||||
current_bank.slot(),
|
||||
&snapshot_sender,
|
||||
None,
|
||||
);
|
||||
}
|
||||
|
||||
let num_old_slots = num_set_roots * *add_root_interval - MAX_CACHE_ENTRIES + 1;
|
||||
|
@@ -3,9 +3,10 @@ use solana_client::{
|
||||
rpc_client::RpcClient,
|
||||
};
|
||||
use solana_core::{
|
||||
rpc_pubsub_service::PubSubService, rpc_subscriptions::RpcSubscriptions,
|
||||
validator::TestValidator,
|
||||
commitment::BlockCommitmentCache, rpc_pubsub_service::PubSubService,
|
||||
rpc_subscriptions::RpcSubscriptions, validator::TestValidator,
|
||||
};
|
||||
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
|
||||
use solana_sdk::{
|
||||
commitment_config::CommitmentConfig, pubkey::Pubkey, rpc_port, signature::Signer,
|
||||
system_transaction,
|
||||
@@ -15,7 +16,7 @@ use std::{
|
||||
net::{IpAddr, SocketAddr},
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
@@ -58,7 +59,7 @@ fn test_rpc_client() {
|
||||
let now = Instant::now();
|
||||
while now.elapsed().as_secs() <= 20 {
|
||||
let response = client
|
||||
.confirm_transaction_with_commitment(signature.as_str(), CommitmentConfig::default())
|
||||
.confirm_transaction_with_commitment(&signature, CommitmentConfig::default())
|
||||
.unwrap();
|
||||
|
||||
if response.value {
|
||||
@@ -85,7 +86,14 @@ fn test_slot_subscription() {
|
||||
rpc_port::DEFAULT_RPC_PUBSUB_PORT,
|
||||
);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new(&exit));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new(
|
||||
&exit,
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
|
||||
blockstore,
|
||||
))),
|
||||
));
|
||||
let pubsub_service = PubSubService::new(&subscriptions, pubsub_addr, &exit);
|
||||
std::thread::sleep(Duration::from_millis(400));
|
||||
|
||||
|
@@ -9,15 +9,12 @@ use reqwest::{self, header::CONTENT_TYPE};
|
||||
use serde_json::{json, Value};
|
||||
use solana_client::{
|
||||
rpc_client::{get_rpc_request_str, RpcClient},
|
||||
rpc_response::Response,
|
||||
rpc_response::{Response, RpcSignatureResult},
|
||||
};
|
||||
use solana_core::{rpc_pubsub::gen_client::Client as PubsubClient, validator::TestValidator};
|
||||
use solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
system_transaction,
|
||||
transaction::{self, Transaction},
|
||||
commitment_config::CommitmentConfig, hash::Hash, pubkey::Pubkey, system_transaction,
|
||||
transaction::Transaction,
|
||||
};
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
@@ -224,7 +221,7 @@ fn test_rpc_subscriptions() {
|
||||
let mut rt = Runtime::new().unwrap();
|
||||
let rpc_pubsub_url = format!("ws://{}/", leader_data.rpc_pubsub);
|
||||
|
||||
let (status_sender, status_receiver) = channel::<(String, Response<transaction::Result<()>>)>();
|
||||
let (status_sender, status_receiver) = channel::<(String, Response<RpcSignatureResult>)>();
|
||||
let status_sender = Arc::new(Mutex::new(status_sender));
|
||||
let (sent_sender, sent_receiver) = channel::<()>();
|
||||
let sent_sender = Arc::new(Mutex::new(sent_sender));
|
||||
@@ -296,7 +293,7 @@ fn test_rpc_subscriptions() {
|
||||
let timeout = deadline.saturating_duration_since(Instant::now());
|
||||
match status_receiver.recv_timeout(timeout) {
|
||||
Ok((sig, result)) => {
|
||||
assert!(result.value.is_ok());
|
||||
assert!(result.value.err.is_none());
|
||||
assert!(signature_set.remove(&sig));
|
||||
}
|
||||
Err(_err) => {
|
||||
|
@@ -3,28 +3,35 @@
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use log::*;
|
||||
use solana_core::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_core::storage_stage::{test_cluster_info, SLOTS_PER_TURN_TEST};
|
||||
use solana_core::storage_stage::{StorageStage, StorageState};
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_ledger::blockstore_processor;
|
||||
use solana_ledger::entry;
|
||||
use solana_ledger::{blockstore::Blockstore, create_new_tmp_ledger};
|
||||
use solana_core::{
|
||||
commitment::BlockCommitmentCache,
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
storage_stage::{test_cluster_info, StorageStage, StorageState, SLOTS_PER_TURN_TEST},
|
||||
};
|
||||
use solana_ledger::{
|
||||
bank_forks::BankForks, blockstore::Blockstore, blockstore_processor, create_new_tmp_ledger,
|
||||
entry,
|
||||
};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::message::Message;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_sdk::transaction::Transaction;
|
||||
use solana_storage_program::storage_instruction;
|
||||
use solana_storage_program::storage_instruction::StorageAccountType;
|
||||
use std::fs::remove_dir_all;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use solana_sdk::{
|
||||
clock::DEFAULT_TICKS_PER_SLOT,
|
||||
hash::Hash,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
transaction::Transaction,
|
||||
};
|
||||
use solana_storage_program::storage_instruction::{self, StorageAccountType};
|
||||
use std::{
|
||||
fs::remove_dir_all,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
mpsc::channel,
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::sleep,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_storage_stage_process_account_proofs() {
|
||||
@@ -52,6 +59,9 @@ mod tests {
|
||||
&[bank.clone()],
|
||||
vec![0],
|
||||
)));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
|
||||
));
|
||||
let cluster_info = test_cluster_info(&keypair.pubkey());
|
||||
|
||||
let (bank_sender, bank_receiver) = channel();
|
||||
@@ -69,6 +79,7 @@ mod tests {
|
||||
&exit.clone(),
|
||||
&bank_forks,
|
||||
&cluster_info,
|
||||
block_commitment_cache,
|
||||
);
|
||||
bank_sender.send(vec![bank.clone()]).unwrap();
|
||||
|
||||
@@ -171,6 +182,9 @@ mod tests {
|
||||
&[bank.clone()],
|
||||
vec![0],
|
||||
)));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
|
||||
));
|
||||
|
||||
let cluster_info = test_cluster_info(&keypair.pubkey());
|
||||
let (bank_sender, bank_receiver) = channel();
|
||||
@@ -188,6 +202,7 @@ mod tests {
|
||||
&exit.clone(),
|
||||
&bank_forks,
|
||||
&cluster_info,
|
||||
block_commitment_cache,
|
||||
);
|
||||
bank_sender.send(vec![bank.clone()]).unwrap();
|
||||
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-crate-features"
|
||||
version = "1.0.11"
|
||||
version = "1.0.22"
|
||||
description = "Solana Crate Features"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -12,17 +12,20 @@ edition = "2018"
|
||||
backtrace = { version = "0.3.33", features = ["serialize-serde"] }
|
||||
bytes = { version = "0.4.12", features = ["either"] }
|
||||
cc = { version = "1.0.45", features = ["jobserver", "num_cpus", "parallel"]}
|
||||
curve25519-dalek = { version = "1.1.3" }
|
||||
curve25519-dalek = { version = "2" }
|
||||
either= { version = "1.5.2" }
|
||||
failure = { version = "0.1.5" }
|
||||
lazy_static = { version = "1.4.0", features = ["spin", "spin_no_std"] }
|
||||
libc = { version = "0.2.62", features = ["extra_traits"] }
|
||||
rand_chacha = { version = "0.1.1" }
|
||||
rand_chacha = { version = "0.2.2" }
|
||||
regex-syntax = { version = "0.6.12" }
|
||||
reqwest = { version = "0.9.20", default-features = false, features = ["rustls-tls"] }
|
||||
serde = { version = "1.0.100", features = ["rc"] }
|
||||
ed25519-dalek = { version = "=1.0.0-pre.1", features = ["serde"] }
|
||||
ed25519-dalek = { version = "=1.0.0-pre.3", features = ["serde"] }
|
||||
syn_0_15 = { package = "syn", version = "0.15.42", features = ["extra-traits", "fold", "full"] }
|
||||
syn_1_0 = { package = "syn", version = "1.0.3", features = ["extra-traits", "fold", "full"] }
|
||||
tokio = { version = "0.1.22",features=["bytes", "codec", "default", "fs", "io", "mio", "num_cpus", "reactor", "rt-full", "sync", "tcp", "timer", "tokio-codec", "tokio-current-thread", "tokio-executor", "tokio-io", "tokio-io", "tokio-reactor", "tokio-tcp", "tokio-tcp", "tokio-threadpool", "tokio-timer", "tokio-udp", "tokio-uds", "udp", "uds"] }
|
||||
winapi = { version = "0.3.8", features=["basetsd", "consoleapi", "errhandlingapi", "fileapi", "handleapi", "impl-debug", "impl-default", "knownfolders", "libloaderapi", "memoryapi", "minwinbase", "minwindef", "ntdef", "ntsecapi", "ntstatus", "objbase", "processenv", "processthreadsapi", "profileapi", "shlobj", "std", "synchapi", "sysinfoapi", "timezoneapi", "utilapiset", "winbase", "wincon", "windef", "winerror", "winnls", "winnt", "winreg", "winsock2", "winuser", "ws2def", "ws2ipdef", "ws2tcpip", "wtypesbase"] }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -3,7 +3,9 @@ set -e
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
usage=$(cargo -q run -p solana-cli -- -C ~/.foo --help | sed 's|'"$HOME"'|~|g')
|
||||
: "${rust_stable:=}" # Pacify shellcheck
|
||||
|
||||
usage=$(cargo +"$rust_stable" -q run -p solana-cli -- -C ~/.foo --help | sed -e 's|'"$HOME"'|~|g' -e 's/[[:space:]]\+$//')
|
||||
|
||||
out=${1:-src/cli/usage.md}
|
||||
|
||||
@@ -29,6 +31,6 @@ in_subcommands=0
|
||||
while read -r subcommand rest; do
|
||||
[[ $subcommand == "SUBCOMMANDS:" ]] && in_subcommands=1 && continue
|
||||
if ((in_subcommands)); then
|
||||
section "$(cargo -q run -p solana-cli -- help "$subcommand" | sed 's|'"$HOME"'|~|g')" "####" >> "$out"
|
||||
section "$(cargo +"$rust_stable" -q run -p solana-cli -- help "$subcommand" | sed 's|'"$HOME"'|~|g')" "####" >> "$out"
|
||||
fi
|
||||
done <<<"$usage">>"$out"
|
||||
|
BIN
docs/src/.gitbook/assets/ledger-live-enable-developer-mode.png
Normal file
BIN
docs/src/.gitbook/assets/ledger-live-enable-developer-mode.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 184 KiB |
BIN
docs/src/.gitbook/assets/ledger-live-install-solana-app.png
Normal file
BIN
docs/src/.gitbook/assets/ledger-live-install-solana-app.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 195 KiB |
@@ -6,10 +6,11 @@
|
||||
* [Trust Wallet](wallet/trust-wallet.md)
|
||||
* [Ledger Live](wallet/ledger-live.md)
|
||||
* [Command-line Wallets](wallet/cli-wallets.md)
|
||||
* [Hardware Wallets](remote-wallet/README.md)
|
||||
* [Ledger Hardware Wallet](remote-wallet/ledger.md)
|
||||
* [Paper Wallet](paper-wallet/README.md)
|
||||
* [Paper Wallet Usage](paper-wallet/paper-wallet-usage.md)
|
||||
* [Hardware Wallets](remote-wallet/README.md)
|
||||
* [Ledger Hardware Wallet](remote-wallet/ledger.md)
|
||||
* [Support / Troubleshooting](wallet/support.md)
|
||||
* [Command Line Guide](cli/README.md)
|
||||
* [Install the Solana Command Line Tool Suite](cli/install-solana-cli-tools.md)
|
||||
* [Generate Keys](cli/generate-keys.md)
|
||||
|
@@ -14,7 +14,6 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
|
||||
|
||||
## Methods
|
||||
|
||||
* [confirmTransaction](jsonrpc-api.md#confirmtransaction)
|
||||
* [getAccountInfo](jsonrpc-api.md#getaccountinfo)
|
||||
* [getBalance](jsonrpc-api.md#getbalance)
|
||||
* [getBlockCommitment](jsonrpc-api.md#getblockcommitment)
|
||||
@@ -22,26 +21,30 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
|
||||
* [getClusterNodes](jsonrpc-api.md#getclusternodes)
|
||||
* [getConfirmedBlock](jsonrpc-api.md#getconfirmedblock)
|
||||
* [getConfirmedBlocks](jsonrpc-api.md#getconfirmedblocks)
|
||||
* [getConfirmedSignaturesForAddress](jsonrpc-api.md#getconfirmedsignaturesforaddress)
|
||||
* [getConfirmedTransaction](jsonrpc-api.md#getconfirmedtransaction)
|
||||
* [getEpochInfo](jsonrpc-api.md#getepochinfo)
|
||||
* [getEpochSchedule](jsonrpc-api.md#getepochschedule)
|
||||
* [getFeeCalculatorForBlockhash](jsonrpc-api.md#getfeecalculatorforblockhash)
|
||||
* [getFeeRateGovernor](jsonrpc-api.md#getfeerategovernor)
|
||||
* [getFirstAvailableBlock](jsonrpc-api.md#getfirstavailableblock)
|
||||
* [getGenesisHash](jsonrpc-api.md#getgenesishash)
|
||||
* [getIdentity](jsonrpc-api.md#getidentity)
|
||||
* [getInflation](jsonrpc-api.md#getinflation)
|
||||
* [getLargestAccounts](jsonrpc-api.md#getlargestaccounts)
|
||||
* [getLeaderSchedule](jsonrpc-api.md#getleaderschedule)
|
||||
* [getMinimumBalanceForRentExemption](jsonrpc-api.md#getminimumbalanceforrentexemption)
|
||||
* [getProgramAccounts](jsonrpc-api.md#getprogramaccounts)
|
||||
* [getRecentBlockhash](jsonrpc-api.md#getrecentblockhash)
|
||||
* [getSignatureStatus](jsonrpc-api.md#getsignaturestatus)
|
||||
* [getSignatureStatuses](jsonrpc-api.md#getsignaturestatuses)
|
||||
* [getSlot](jsonrpc-api.md#getslot)
|
||||
* [getSlotLeader](jsonrpc-api.md#getslotleader)
|
||||
* [getSlotsPerSegment](jsonrpc-api.md#getslotspersegment)
|
||||
* [getStoragePubkeysForSlot](jsonrpc-api.md#getstoragepubkeysforslot)
|
||||
* [getStorageTurn](jsonrpc-api.md#getstorageturn)
|
||||
* [getStorageTurnRate](jsonrpc-api.md#getstorageturnrate)
|
||||
* [getSupply](jsonrpc-api.md#getsupply)
|
||||
* [getTransactionCount](jsonrpc-api.md#gettransactioncount)
|
||||
* [getTotalSupply](jsonrpc-api.md#gettotalsupply)
|
||||
* [getVersion](jsonrpc-api.md#getversion)
|
||||
* [getVoteAccounts](jsonrpc-api.md#getvoteaccounts)
|
||||
* [minimumLedgerSlot](jsonrpc-api.md#minimumledgerslot)
|
||||
@@ -93,7 +96,8 @@ Requests can be sent in batches by sending an array of JSON-RPC request objects
|
||||
|
||||
Solana nodes choose which bank state to query based on a commitment requirement
|
||||
set by the client. Clients may specify either:
|
||||
* `{"commitment":"max"}` - the node will query the most recent bank having reached `MAX_LOCKOUT_HISTORY` confirmations
|
||||
* `{"commitment":"max"}` - the node will query the most recent bank confirmed by the cluster as having reached `MAX_LOCKOUT_HISTORY` confirmations
|
||||
* `{"commitment":"root"}` - the node will query the most recent bank having reached `MAX_LOCKOUT_HISTORY` confirmations on this node
|
||||
* `{"commitment":"recent"}` - the node will query its most recent bank state
|
||||
|
||||
The commitment parameter should be included as the last element in the `params` array:
|
||||
@@ -114,31 +118,18 @@ Many methods that take a commitment parameter return an RpcResponse JSON object
|
||||
* `value` : The value returned by the operation itself.
|
||||
|
||||
|
||||
## Health Check
|
||||
Although not a JSON RPC API, a `GET /heath` at the RPC HTTP Endpoint provides a
|
||||
health-check mechanism for use by load balancers or other network
|
||||
infrastructure. This request will always return a HTTP 200 OK response with a body of
|
||||
"ok" or "behind" based on the following conditions:
|
||||
1. If one or more `--trusted-validator` arguments are provided to `solana-validator`, "ok" is returned
|
||||
when the node has within `HEALTH_CHECK_SLOT_DISTANCE` slots of the highest trusted validator,
|
||||
otherwise "behind" is returned.
|
||||
2. "ok" is always returned if no trusted validators are provided.
|
||||
|
||||
## JSON RPC API Reference
|
||||
|
||||
### confirmTransaction
|
||||
|
||||
Returns a transaction receipt
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<string>` - Signature of Transaction to confirm, as base-58 encoded string
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
* `RpcResponse<bool>` - RpcResponse JSON object with `value` field set to Transaction status, boolean true if Transaction is confirmed
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"confirmTransaction", "params":["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW"]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":true},"id":1}
|
||||
```
|
||||
|
||||
### getAccountInfo
|
||||
|
||||
Returns all information associated with the account of provided Pubkey
|
||||
@@ -206,7 +197,7 @@ The result field will be a JSON object containing:
|
||||
|
||||
* `commitment` - commitment, comprising either:
|
||||
* `<null>` - Unknown block
|
||||
* `<array>` - commitment, array of u64 integers logging the amount of cluster stake in lamports that has voted on the block at each depth from 0 to `MAX_LOCKOUT_HISTORY`
|
||||
* `<array>` - commitment, array of u64 integers logging the amount of cluster stake in lamports that has voted on the block at each depth from 0 to `MAX_LOCKOUT_HISTORY` + 1
|
||||
* `totalStake` - total active stake, in lamports, of the current epoch
|
||||
|
||||
#### Example:
|
||||
@@ -216,7 +207,7 @@ The result field will be a JSON object containing:
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getBlockCommitment","params":[5]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"commitment":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,10,32],"totalStake": 42},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"commitment":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,10,32],"totalStake": 42},"id":1}
|
||||
```
|
||||
|
||||
### getBlockTime
|
||||
@@ -292,21 +283,24 @@ Returns identity and transaction information about a confirmed block in the ledg
|
||||
|
||||
The result field will be an object with the following fields:
|
||||
|
||||
* `blockhash: <string>` - the blockhash of this block, as base-58 encoded string
|
||||
* `previousBlockhash: <string>` - the blockhash of this block's parent, as base-58 encoded string
|
||||
* `parentSlot: <u64>` - the slot index of this block's parent
|
||||
* `transactions: <array>` - an array of JSON objects containing:
|
||||
* `transaction: <object|string>` - [Transaction](#transaction-structure) object, either in JSON format or base-58 encoded binary data, depending on encoding parameter
|
||||
* `meta: <object>` - transaction status metadata object, containing `null` or:
|
||||
* `status: <object>` - Transaction status:
|
||||
* `"Ok": null` - Transaction was successful
|
||||
* `"Err": <ERR>` - Transaction failed with TransactionError [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L18)
|
||||
* `fee: <u64>` - fee this transaction was charged, as u64 integer
|
||||
* `preBalances: <array>` - array of u64 account balances from before the transaction was processed
|
||||
* `postBalances: <array>` - array of u64 account balances after the transaction was processed
|
||||
* `rewards: <array>` - an array of JSON objects containing:
|
||||
* `pubkey: <string>` - The public key, as base-58 encoded string, of the account that received the reward
|
||||
* `lamports: <i64>`- number of reward lamports credited or debited by the account, as a i64
|
||||
* `<null>` - if specified block is not confirmed
|
||||
* `<object>` - if block is confirmed, an object with the following fields:
|
||||
* `blockhash: <string>` - the blockhash of this block, as base-58 encoded string
|
||||
* `previousBlockhash: <string>` - the blockhash of this block's parent, as base-58 encoded string; if the parent block is not available due to ledger cleanup, this field will return "11111111111111111111111111111111"
|
||||
* `parentSlot: <u64>` - the slot index of this block's parent
|
||||
* `transactions: <array>` - an array of JSON objects containing:
|
||||
* `transaction: <object|string>` - [Transaction](#transaction-structure) object, either in JSON format or base-58 encoded binary data, depending on encoding parameter
|
||||
* `meta: <object>` - transaction status metadata object, containing `null` or:
|
||||
* `err: <object | null>` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
||||
* `fee: <u64>` - fee this transaction was charged, as u64 integer
|
||||
* `preBalances: <array>` - array of u64 account balances from before the transaction was processed
|
||||
* `postBalances: <array>` - array of u64 account balances after the transaction was processed
|
||||
* DEPRECATED: `status: <object>` - Transaction status
|
||||
* `"Ok": <null>` - Transaction was successful
|
||||
* `"Err": <ERR>` - Transaction failed with TransactionError
|
||||
* `rewards: <array>` - an array of JSON objects containing:
|
||||
* `pubkey: <string>` - The public key, as base-58 encoded string, of the account that received the reward
|
||||
* `lamports: <i64>`- number of reward lamports credited or debited by the account, as a i64
|
||||
|
||||
#### Example:
|
||||
|
||||
@@ -315,18 +309,18 @@ The result field will be an object with the following fields:
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlock","params":[430, "json"]}' localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"blockhash":"Gp3t5bfDsJv1ovP8cB1SuRhXVuoTqDv7p3tymyubYg5","parentSlot":429,"previousBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA","transactions":[{"transaction":{"message":{"accountKeys":["6H94zdiaYfRfPfKjYLjyr2VFBg6JHXygy84r3qhc3NsC","39UAy8hsoYPywGPGdmun747omSr79zLSjqvPJN3zetoH","SysvarS1otHashes111111111111111111111111111","SysvarC1ock11111111111111111111111111111111","Vote111111111111111111111111111111111111111"],"header":{"numReadonlySignedAccounts":0,"numReadonlyUnsignedAccounts":3,"numRequiredSignatures":2},"instructions":[{"accounts":[1,2,3],"data":"29z5mr1JoRmJYQ6ynmk3pf31cGFRziAF1M3mT3L6sFXf5cKLdkEaMXMT8AqLpD4CpcupHmuMEmtZHpomrwfdZetSomNy3d","programIdIndex":4}],"recentBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA"},"signatures":["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby","4vANMjSKiwEchGSXwVrQkwHnmsbKQmy9vdrsYxWdCup1bLsFzX8gKrFTSVDCZCae2dbxJB9mPNhqB2sD1vvr4sAD"]},"meta":{"fee":1000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}}]},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"blockhash":"Gp3t5bfDsJv1ovP8cB1SuRhXVuoTqDv7p3tymyubYg5","parentSlot":429,"previousBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA","transactions":[{"transaction":{"message":{"accountKeys":["6H94zdiaYfRfPfKjYLjyr2VFBg6JHXygy84r3qhc3NsC","39UAy8hsoYPywGPGdmun747omSr79zLSjqvPJN3zetoH","SysvarS1otHashes111111111111111111111111111","SysvarC1ock11111111111111111111111111111111","Vote111111111111111111111111111111111111111"],"header":{"numReadonlySignedAccounts":0,"numReadonlyUnsignedAccounts":3,"numRequiredSignatures":2},"instructions":[{"accounts":[1,2,3],"data":"29z5mr1JoRmJYQ6ynmk3pf31cGFRziAF1M3mT3L6sFXf5cKLdkEaMXMT8AqLpD4CpcupHmuMEmtZHpomrwfdZetSomNy3d","programIdIndex":4}],"recentBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA"},"signatures":["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby","4vANMjSKiwEchGSXwVrQkwHnmsbKQmy9vdrsYxWdCup1bLsFzX8gKrFTSVDCZCae2dbxJB9mPNhqB2sD1vvr4sAD"]},"meta":{"err":null,"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}}]},"id":1}
|
||||
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlock","params":[430, "binary"]}' localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"blockhash":"Gp3t5bfDsJv1ovP8cB1SuRhXVuoTqDv7p3tymyubYg5","parentSlot":429,"previousBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA","transactions":[{"transaction":"81UZJt4dh4Do66jDhrgkQudS8J2N6iG3jaVav7gJrqJSFY4Ug53iA9JFJZh2gxKWcaFdLJwhHx9mRdg9JwDAWB4ywiu5154CRwXV4FMdnPLg7bhxRLwhhYaLsVgMF5AyNRcTzjCVoBvqFgDU7P8VEKDEiMvD3qxzm1pLZVxDG1LTQpT3Dz4Uviv4KQbFQNuC22KupBoyHFB7Zh6KFdMqux4M9PvhoqcoJsJKwXjWpKu7xmEKnnrSbfLadkgjBmmjhW3fdTrFvnhQdTkhtdJxUL1xS9GMuJQer8YgSKNtUXB1eXZQwXU8bU2BjYkZE6Q5Xww8hu9Z4E4Mo4QsooVtHoP6BM3NKw8zjVbWfoCQqxTrwuSzrNCWCWt58C24LHecH67CTt2uXbYSviixvrYkK7A3t68BxTJcF1dXJitEPTFe2ceTkauLJqrJgnER4iUrsjr26T8YgWvpY9wkkWFSviQW6wV5RASTCUasVEcrDiaKj8EQMkgyDoe9HyKitSVg67vMWJFpUXpQobseWJUs5FTWWzmfHmFp8FZ","meta":{"fee":1000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}}]},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"blockhash":"Gp3t5bfDsJv1ovP8cB1SuRhXVuoTqDv7p3tymyubYg5","parentSlot":429,"previousBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA","transactions":[{"transaction":"81UZJt4dh4Do66jDhrgkQudS8J2N6iG3jaVav7gJrqJSFY4Ug53iA9JFJZh2gxKWcaFdLJwhHx9mRdg9JwDAWB4ywiu5154CRwXV4FMdnPLg7bhxRLwhhYaLsVgMF5AyNRcTzjCVoBvqFgDU7P8VEKDEiMvD3qxzm1pLZVxDG1LTQpT3Dz4Uviv4KQbFQNuC22KupBoyHFB7Zh6KFdMqux4M9PvhoqcoJsJKwXjWpKu7xmEKnnrSbfLadkgjBmmjhW3fdTrFvnhQdTkhtdJxUL1xS9GMuJQer8YgSKNtUXB1eXZQwXU8bU2BjYkZE6Q5Xww8hu9Z4E4Mo4QsooVtHoP6BM3NKw8zjVbWfoCQqxTrwuSzrNCWCWt58C24LHecH67CTt2uXbYSviixvrYkK7A3t68BxTJcF1dXJitEPTFe2ceTkauLJqrJgnER4iUrsjr26T8YgWvpY9wkkWFSviQW6wV5RASTCUasVEcrDiaKj8EQMkgyDoe9HyKitSVg67vMWJFpUXpQobseWJUs5FTWWzmfHmFp8FZ","meta":{"err":null,"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}}]},"id":1}
|
||||
```
|
||||
|
||||
#### Transaction Structure
|
||||
|
||||
Transactions are quite different from those on other blockchains. Be sure to review [Anatomy of a Transaction](transaction.md) to learn about transactions on Solana.
|
||||
Transactions are quite different from those on other blockchains. Be sure to review [Anatomy of a Transaction](../transaction.md) to learn about transactions on Solana.
|
||||
|
||||
The JSON structure of a transaction is defined as follows:
|
||||
|
||||
@@ -368,6 +362,72 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"m
|
||||
{"jsonrpc":"2.0","result":[5,6,7,8,9,10],"id":1}
|
||||
```
|
||||
|
||||
### getConfirmedSignaturesForAddress
|
||||
|
||||
Returns a list of all the confirmed signatures for transactions involving an address, within a specified Slot range. Max range allowed is 10_000 Slots.
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<string>` - account address as base-58 encoded string
|
||||
* `<u64>` - start slot, inclusive
|
||||
* `<u64>` - end slot, inclusive
|
||||
|
||||
#### Results:
|
||||
|
||||
The result field will be an array of:
|
||||
* `<string>` - transaction signature as base-58 encoded string
|
||||
|
||||
The signatures will be ordered based on the Slot in which they were confirmed in, from lowest to highest Slot
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedSignaturesForAddress","params":["6H94zdiaYfRfPfKjYLjyr2VFBg6JHXygy84r3qhc3NsC", 0, 100]}' localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby","4bJdGN8Tt2kLWZ3Fa1dpwPSEkXWWTSszPSf1rRVsCwNjxbbUdwTeiWtmi8soA26YmwnKD4aAxNp8ci1Gjpdv4gsr","4LQ14a7BYY27578Uj8LPCaVhSdJGLn9DJqnUJHpy95FMqdKf9acAhUhecPQNjNUy6VoNFUbvwYkPociFSf87cWbG"]},"id":1}
|
||||
```
|
||||
|
||||
### getConfirmedTransaction
|
||||
|
||||
Returns transaction details for a confirmed transaction
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<string>` - transaction signature as base-58 encoded string
|
||||
* `<string>` - (optional) encoding for the returned Transaction, either "json" or "binary". If not provided, the default encoding is JSON.
|
||||
|
||||
#### Results:
|
||||
|
||||
The result field will be an object with the following fields:
|
||||
* `slot: <u64>` - the slot this transaction was processed in
|
||||
* `transaction: <object|string>` - [Transaction](#transaction-structure) object, either in JSON format or base-58 encoded binary data, depending on encoding parameter
|
||||
* `meta: <object>` - transaction status metadata object, containing `null` or:
|
||||
* `err: <object | null>` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
||||
* `fee: <u64>` - fee this transaction was charged, as u64 integer
|
||||
* `preBalances: <array>` - array of u64 account balances from before the transaction was processed
|
||||
* `postBalances: <array>` - array of u64 account balances after the transaction was processed
|
||||
* DEPRECATED: `status: <object>` - Transaction status
|
||||
* `"Ok": <null>` - Transaction was successful
|
||||
* `"Err": <ERR>` - Transaction failed with TransactionError
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedTransaction","params":["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby", "json"]}' localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"slot":430,"transaction":{"message":{"accountKeys":["6H94zdiaYfRfPfKjYLjyr2VFBg6JHXygy84r3qhc3NsC","39UAy8hsoYPywGPGdmun747omSr79zLSjqvPJN3zetoH","SysvarS1otHashes111111111111111111111111111","SysvarC1ock11111111111111111111111111111111","Vote111111111111111111111111111111111111111"],"header":{"numReadonlySignedAccounts":0,"numReadonlyUnsignedAccounts":3,"numRequiredSignatures":2},"instructions":[{"accounts":[1,2,3],"data":"29z5mr1JoRmJYQ6ynmk3pf31cGFRziAF1M3mT3L6sFXf5cKLdkEaMXMT8AqLpD4CpcupHmuMEmtZHpomrwfdZetSomNy3d","programIdIndex":4}],"recentBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA"},"signatures":["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby","4vANMjSKiwEchGSXwVrQkwHnmsbKQmy9vdrsYxWdCup1bLsFzX8gKrFTSVDCZCae2dbxJB9mPNhqB2sD1vvr4sAD"]},"meta":{"err":null,"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}},"id":1}
|
||||
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedTransaction","params":["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby", "binary"]}' localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"slot":430,"transaction":"81UZJt4dh4Do66jDhrgkQudS8J2N6iG3jaVav7gJrqJSFY4Ug53iA9JFJZh2gxKWcaFdLJwhHx9mRdg9JwDAWB4ywiu5154CRwXV4FMdnPLg7bhxRLwhhYaLsVgMF5AyNRcTzjCVoBvqFgDU7P8VEKDEiMvD3qxzm1pLZVxDG1LTQpT3Dz4Uviv4KQbFQNuC22KupBoyHFB7Zh6KFdMqux4M9PvhoqcoJsJKwXjWpKu7xmEKnnrSbfLadkgjBmmjhW3fdTrFvnhQdTkhtdJxUL1xS9GMuJQer8YgSKNtUXB1eXZQwXU8bU2BjYkZE6Q5Xww8hu9Z4E4Mo4QsooVtHoP6BM3NKw8zjVbWfoCQqxTrwuSzrNCWCWt58C24LHecH67CTt2uXbYSviixvrYkK7A3t68BxTJcF1dXJitEPTFe2ceTkauLJqrJgnER4iUrsjr26T8YgWvpY9wkkWFSviQW6wV5RASTCUasVEcrDiaKj8EQMkgyDoe9HyKitSVg67vMWJFpUXpQobseWJUs5FTWWzmfHmFp8FZ","meta":{"err":null,"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}},"id":1}
|
||||
```
|
||||
|
||||
### getEpochInfo
|
||||
|
||||
Returns information about the current epoch
|
||||
@@ -475,6 +535,28 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":54},"value":{"feeRateGovernor":{"burnPercent":50,"maxLamportsPerSignature":100000,"minLamportsPerSignature":5000,"targetLamportsPerSignature":10000,"targetSignaturesPerSlot":20000}}},"id":1}
|
||||
```
|
||||
|
||||
### getFirstAvailableBlock
|
||||
|
||||
Returns the slot of the lowest confirmed block that has not been purged from the ledger
|
||||
|
||||
#### Parameters:
|
||||
|
||||
None
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<u64>` - Slot
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getFirstAvailableBlock"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":250000,"id":1}
|
||||
```
|
||||
|
||||
### getGenesisHash
|
||||
|
||||
Returns the genesis hash
|
||||
@@ -549,6 +631,32 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
{"jsonrpc":"2.0","result":{"foundation":0.05,"foundationTerm":7.0,"initial":0.15,"storage":0.1,"taper":0.15,"terminal":0.015},"id":1}
|
||||
```
|
||||
|
||||
### getLargestAccounts
|
||||
|
||||
Returns the 20 largest accounts, by lamport balance
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
The result will be an RpcResponse JSON object with `value` equal to an array of:
|
||||
|
||||
* `<object>` - otherwise, a JSON object containing:
|
||||
* `address: <string>`, base-58 encoded address of the account
|
||||
* `lamports: <u64>`, number of lamports in the account, as a u64
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getLargestAccounts"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":54},"value":[{"lamports":999974,"address":"99P8ZgtJYe1buSK8JXkvpLh8xPsCFuLYhz9hQFNw93WJ"},{"lamports":42,"address":"uPwWLo16MVehpyWqsLkK3Ka8nLowWvAHbBChqv2FZeL"},{"lamports":42,"address":"aYJCgU7REfu3XF8b3QhkqgqQvLizx8zxuLBHA25PzDS"},{"lamports":42,"address":"CTvHVtQ4gd4gUcw3bdVgZJJqApXE9nCbbbP4VTS5wE1D"},{"lamports":20,"address":"4fq3xJ6kfrh9RkJQsmVd5gNMvJbuSHfErywvEjNQDPxu"},{"lamports":4,"address":"AXJADheGVp9cruP8WYu46oNkRbeASngN5fPCMVGQqNHa"},{"lamports":2,"address":"8NT8yS6LiwNprgW4yM1jPPow7CwRUotddBVkrkWgYp24"},{"lamports":1,"address":"SysvarEpochSchedu1e111111111111111111111111"},{"lamports":1,"address":"11111111111111111111111111111111"},{"lamports":1,"address":"Stake11111111111111111111111111111111111111"},{"lamports":1,"address":"SysvarC1ock11111111111111111111111111111111"},{"lamports":1,"address":"StakeConfig11111111111111111111111111111111"},{"lamports":1,"address":"SysvarRent111111111111111111111111111111111"},{"lamports":1,"address":"Config1111111111111111111111111111111111111"},{"lamports":1,"address":"SysvarStakeHistory1111111111111111111111111"},{"lamports":1,"address":"SysvarRecentB1ockHashes11111111111111111111"},{"lamports":1,"address":"SysvarFees111111111111111111111111111111111"},{"lamports":1,"address":"Vote111111111111111111111111111111111111111"}]},"id":1}
|
||||
```
|
||||
|
||||
### getLeaderSchedule
|
||||
|
||||
Returns the leader schedule for an epoch
|
||||
@@ -654,16 +762,18 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"blockhash":"CSymwgTNX1j3E4qhKfJAUE41nBWEwXufoYryPbkde5RR","feeCalculator":{"burnPercent":50,"lamportsPerSignature":5000,"maxLamportsPerSignature":10000,"minLamportsPerSignature":5000,"targetLamportsPerSignature":1000,"targetSignaturesPerSlot":20000}}},"id":1}
|
||||
```
|
||||
|
||||
### getSignatureStatus
|
||||
### getSignatureStatuses
|
||||
|
||||
Returns the status of a given signature. This method is similar to [confirmTransaction](jsonrpc-api.md#confirmtransaction) but provides more resolution for error events.
|
||||
Returns the statuses of a list of signatures. Unless the
|
||||
`searchTransactionHistory` configuration parameter is included, this method only
|
||||
searches the recent status cache of signatures, which retains statuses for all
|
||||
active slots plus `MAX_RECENT_BLOCKHASHES` rooted slots.
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<array>` - An array of transaction signatures to confirm, as base-58 encoded strings
|
||||
* `<object>` - (optional) Extended Rpc configuration, containing the following optional fields:
|
||||
* `commitment: <string>` - [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
* `searchTransactionHistory: <bool>` - whether to search the ledger transaction status cache, which may be expensive
|
||||
* `<object>` - (optional) Configuration object containing the following field:
|
||||
* `searchTransactionHistory: <bool>` - if true, a Solana node will search its ledger cache for any signatures not found in the recent status cache
|
||||
|
||||
#### Results:
|
||||
|
||||
@@ -676,22 +786,26 @@ An array of:
|
||||
* `<null>` - Unknown transaction
|
||||
* `<object>`
|
||||
* `slot: <u64>` - The slot the transaction was processed
|
||||
* `confirmations: <usize | null>` - Number of blocks since signature confirmation, null if rooted
|
||||
* `status: <object>` - Transaction status
|
||||
* `confirmations: <usize | null>` - Number of blocks since signature confirmation, null if rooted, as well as finalized by a supermajority of the cluster
|
||||
* `err: <object | null>` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
||||
* DEPRECATED: `status: <object>` - Transaction status
|
||||
* `"Ok": <null>` - Transaction was successful
|
||||
* `"Err": <ERR>` - Transaction failed with TransactionError [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
||||
* `"Err": <ERR>` - Transaction failed with TransactionError
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getSignatureStatus", "params":[["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW", "5j7s6NiJS3JAkvgkoc18WVAsiSaci2pxB2A6ueCJP4tprA2TFg9wSyTLeYouxPBJEMzJinENTkpA52YStRW5Dia7"]]]}' http://localhost:8899
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getSignatureStatuses", "params":[["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW", "5j7s6NiJS3JAkvgkoc18WVAsiSaci2pxB2A6ueCJP4tprA2TFg9wSyTLeYouxPBJEMzJinENTkpA52YStRW5Dia7"]]}' http://localhost:8899
|
||||
|
||||
// Request with configuration
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getSignatureStatuses", "params":[["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW"], {"searchTransactionHistory": true}]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":82},"value":[{"slot": 72, "confirmations": 10, "status": {"Ok": null}}, null]},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":82},"value":[{"slot": 72, "confirmations": 10, "err": null, "status": {"Ok": null}}, null]},"id":1}
|
||||
|
||||
// Result, first transaction rooted
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":82},"value":[{"slot": 48, "confirmations": null, "status": {"Ok": null}}, null]},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":82},"value":[{"slot": 48, "confirmations": null, "err": null, "status": {"Ok": null}}, null]},"id":1}
|
||||
```
|
||||
|
||||
### getSlot
|
||||
@@ -825,6 +939,32 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
{"jsonrpc":"2.0","result":1024,"id":1}
|
||||
```
|
||||
|
||||
### getSupply
|
||||
|
||||
Returns information about the current supply.
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
The result will be an RpcResponse JSON object with `value` equal to a JSON object containing:
|
||||
|
||||
* `total: <u64>` - Total supply in lamports
|
||||
* `circulating: <u64>` - Circulating supply in lamports
|
||||
* `nonCirculating: <u64>` - Non-circulating supply in lamports
|
||||
* `nonCirculatingAccounts: <array>` - an array of account addresses of non-circulating accounts, as strings
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getCirculatingSupply"}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1114},"value":{"circulating":16000,"nonCirculating":1000000,"nonCirculatingAccounts":["FEy8pTbP5fEoqMV1GdTz83byuA8EKByqYat1PKDgVAq5","9huDUZfxoJ7wGMTffUE7vh1xePqef7gyrLJu9NApncqA","3mi1GmwEE3zo2jmfDuzvjSX9ovRXsDUKHvsntpkhuLJ9","BYxEJTDerkaRWBem3XgnVcdhppktBXa2HbkHPKj2Ui4Z],total:1016000}},"id":1}
|
||||
```
|
||||
|
||||
### getTransactionCount
|
||||
|
||||
Returns the current Transaction count from the ledger
|
||||
@@ -847,28 +987,6 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
{"jsonrpc":"2.0","result":268,"id":1}
|
||||
```
|
||||
|
||||
### getTotalSupply
|
||||
|
||||
Returns the current total supply in lamports
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<u64>` - Total supply
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getTotalSupply"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":10126,"id":1}
|
||||
```
|
||||
|
||||
### getVersion
|
||||
|
||||
Returns the current solana versions running on the node
|
||||
@@ -889,7 +1007,7 @@ The result field will be a JSON object with the following fields:
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"solana-core": "1.0.11"},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"solana-core": "1.0.22"},"id":1}
|
||||
```
|
||||
|
||||
### getVoteAccounts
|
||||
@@ -1200,7 +1318,7 @@ Subscribe to a transaction signature to receive notification when the transactio
|
||||
#### Notification Format:
|
||||
|
||||
```bash
|
||||
{"jsonrpc": "2.0","method": "signatureNotification", "params": {"result": "Confirmed","subscription":0}}
|
||||
{"jsonrpc": "2.0","method": "signatureNotification", "params": {"result": {"err": null}, "subscription":0}}
|
||||
```
|
||||
|
||||
### signatureUnsubscribe
|
||||
|
@@ -33,11 +33,13 @@ want to perform an action on the stake account you create next.
|
||||
Now, create a stake account:
|
||||
|
||||
```bash
|
||||
solana create-stake-account --from <KEYPAIR> stake-account.json <AMOUNT> --stake-authority <KEYPAIR> --withdraw-authority <KEYPAIR>
|
||||
solana create-stake-account --from <KEYPAIR> stake-account.json <AMOUNT> \
|
||||
--stake-authority <KEYPAIR> --withdraw-authority <KEYPAIR> \
|
||||
--fee-payer <KEYPAIR>
|
||||
```
|
||||
|
||||
`<AMOUNT>` tokens are transferred from the account at `<KEYPAIR>` to a new
|
||||
stake account at the public key of stake-account.json.
|
||||
`<AMOUNT>` tokens are transferred from the account at the "from" `<KEYPAIR>` to
|
||||
a new stake account at the public key of stake-account.json.
|
||||
|
||||
The stake-account.json file can now be discarded. To authorize additional
|
||||
actions, you will use the `--stake-authority` or `withdraw-authority` keypair,
|
||||
@@ -72,7 +74,9 @@ Stake and withdraw authorities can be set when creating an account via the
|
||||
run:
|
||||
|
||||
```bash
|
||||
solana stake-authorize <STAKE_ACCOUNT_ADDRESS> --stake-authority <KEYPAIR> --new-stake-authority <PUBKEY>
|
||||
solana stake-authorize <STAKE_ACCOUNT_ADDRESS> \
|
||||
--stake-authority <KEYPAIR> --new-stake-authority <PUBKEY> \
|
||||
--fee-payer <KEYPAIR>
|
||||
```
|
||||
|
||||
This will use the existing stake authority `<KEYPAIR>` to authorize a new stake
|
||||
@@ -87,7 +91,8 @@ addresses can be cumbersome. Fortunately, you can derive stake addresses using
|
||||
the `--seed` option:
|
||||
|
||||
```bash
|
||||
solana create-stake-account --from <KEYPAIR> <STAKE_ACCOUNT_KEYPAIR> --seed <STRING> <AMOUNT> --stake-authority <PUBKEY> --withdraw-authority <PUBKEY>
|
||||
solana create-stake-account --from <KEYPAIR> <STAKE_ACCOUNT_KEYPAIR> --seed <STRING> <AMOUNT> \
|
||||
--stake-authority <PUBKEY> --withdraw-authority <PUBKEY> --fee-payer <KEYPAIR>
|
||||
```
|
||||
|
||||
`<STRING>` is an arbitrary string up to 32 bytes, but will typically be a
|
||||
@@ -122,12 +127,13 @@ is the vote account address. Choose a validator and use its vote account
|
||||
address in `solana delegate-stake`:
|
||||
|
||||
```bash
|
||||
solana delegate-stake --stake-authority <KEYPAIR> <STAKE_ACCOUNT_ADDRESS> <VOTE_ACCOUNT_ADDRESS>
|
||||
solana delegate-stake --stake-authority <KEYPAIR> <STAKE_ACCOUNT_ADDRESS> <VOTE_ACCOUNT_ADDRESS> \
|
||||
--fee-payer <KEYPAIR>
|
||||
```
|
||||
|
||||
`<KEYPAIR>` authorizes the operation on the account with address
|
||||
`<STAKE_ACCOUNT_ADDRESS>`. The stake is delegated to the vote account with
|
||||
address `<VOTE_ACCOUNT_ADDRESS>`.
|
||||
The stake authority `<KEYPAIR>` authorizes the operation on the account with
|
||||
address `<STAKE_ACCOUNT_ADDRESS>`. The stake is delegated to the vote account
|
||||
with address `<VOTE_ACCOUNT_ADDRESS>`.
|
||||
|
||||
After delegating stake, use `solana stake-account` to observe the changes
|
||||
to the stake account:
|
||||
@@ -155,11 +161,12 @@ Once delegated, you can undelegate stake with the `solana deactivate-stake`
|
||||
command:
|
||||
|
||||
```bash
|
||||
solana deactivate-stake --stake-authority <KEYPAIR> <STAKE_ACCOUNT_ADDRESS>
|
||||
solana deactivate-stake --stake-authority <KEYPAIR> <STAKE_ACCOUNT_ADDRESS> \
|
||||
--fee-payer <KEYPAIR>
|
||||
```
|
||||
|
||||
`<KEYPAIR>` authorizes the operation on the account with address
|
||||
`<STAKE_ACCOUNT_ADDRESS>`.
|
||||
The stake authority `<KEYPAIR>` authorizes the operation on the account
|
||||
with address `<STAKE_ACCOUNT_ADDRESS>`.
|
||||
|
||||
Note that stake takes several epochs to "cool down". Attempts to delegate stake
|
||||
in the cool down period will fail.
|
||||
@@ -169,12 +176,13 @@ in the cool down period will fail.
|
||||
Transfer tokens out of a stake account with the `solana withdraw-stake` command:
|
||||
|
||||
```bash
|
||||
solana withdraw-stake --withdraw-authority <KEYPAIR> <STAKE_ACCOUNT_ADDRESS> <RECIPIENT_ADDRESS> <AMOUNT>
|
||||
solana withdraw-stake --withdraw-authority <KEYPAIR> <STAKE_ACCOUNT_ADDRESS> <RECIPIENT_ADDRESS> <AMOUNT> \
|
||||
--fee-payer <KEYPAIR>
|
||||
```
|
||||
|
||||
`<STAKE_ACCOUNT_ADDRESS>` is the existing stake account, `<KEYPAIR>` is the
|
||||
withdraw authority, and `<AMOUNT>` is the number of tokens to transfer to
|
||||
`<RECIPIENT_ADDRESS>`.
|
||||
`<STAKE_ACCOUNT_ADDRESS>` is the existing stake account, the stake authority
|
||||
`<KEYPAIR>` is the withdraw authority, and `<AMOUNT>` is the number of tokens
|
||||
to transfer to `<RECIPIENT_ADDRESS>`.
|
||||
|
||||
## Split Stake
|
||||
|
||||
@@ -184,12 +192,14 @@ currently staked, cooling down, or locked up. To transfer tokens from an
|
||||
existing stake account to a new one, use the `solana split-stake` command:
|
||||
|
||||
```bash
|
||||
solana split-stake --stake-authority <KEYPAIR> <STAKE_ACCOUNT_ADDRESS> <NEW_STAKE_ACCOUNT_KEYPAIR> <AMOUNT>
|
||||
solana split-stake --stake-authority <KEYPAIR> <STAKE_ACCOUNT_ADDRESS> <NEW_STAKE_ACCOUNT_KEYPAIR> <AMOUNT> \
|
||||
--fee-payer <KEYPAIR>
|
||||
```
|
||||
|
||||
`<STAKE_ACCOUNT_ADDRESS>` is the existing stake account, `<KEYPAIR>` is the
|
||||
stake authority, `<NEW_STAKE_ACCOUNT_KEYPAIR>` is the keypair for the new account,
|
||||
and `<AMOUNT>` is the number of tokens to transfer to the new account.
|
||||
`<STAKE_ACCOUNT_ADDRESS>` is the existing stake account, the stake authority
|
||||
`<KEYPAIR>` is the stake authority, `<NEW_STAKE_ACCOUNT_KEYPAIR>` is the
|
||||
keypair for the new account, and `<AMOUNT>` is the number of tokens to transfer
|
||||
to the new account.
|
||||
|
||||
To split a stake account into a derived account address, use the `--seed`
|
||||
option. See
|
||||
|
@@ -84,10 +84,10 @@ pubkey: GKvqsuNcnwWqPzzuhLmGi4rzzh55FhJtGizkhHaEJqiV
|
||||
```
|
||||
|
||||
```bash
|
||||
solana transfer --from <SENDER_KEYPAIR> <RECIPIENT_ACCOUNT_ADDRESS> 5 --url http://devnet.solana.com
|
||||
solana transfer --from <KEYPAIR> <RECIPIENT_ACCOUNT_ADDRESS> 5--url http://devnet.solana.com --fee-payer <KEYPAIR>
|
||||
```
|
||||
|
||||
where you replace `<SENDER_KEYPAIR>` with the path to a keypair in your wallet,
|
||||
where you replace `<KEYPAIR>` with the path to a keypair in your wallet,
|
||||
and replace `<RECIPIENT_ACCOUNT_ADDRESS>` with the output of `solana-keygen new` above.
|
||||
|
||||
Confirm the updated balances with `solana balance`:
|
||||
@@ -107,7 +107,7 @@ tokens to transfer. Once you have that collected, you can transfer tokens
|
||||
with the `solana transfer` command:
|
||||
|
||||
```bash
|
||||
solana transfer --from <SENDER_KEYPAIR> <RECIPIENT_ACCOUNT_ADDRESS> <AMOUNT>
|
||||
solana transfer --from <KEYPAIR> <RECIPIENT_ACCOUNT_ADDRESS> <AMOUNT> --fee-payer <KEYPAIR>
|
||||
```
|
||||
|
||||
Confirm the updated balances with `solana balance`:
|
||||
|
@@ -171,7 +171,7 @@ $ solana send-timestamp <PUBKEY> <PROCESS_ID> --date 2018-12-24T23:59:00
|
||||
## Usage
|
||||
### solana-cli
|
||||
```text
|
||||
solana-cli 1.0.11 [channel=unknown commit=unknown]
|
||||
solana-cli 1.0.22 [channel=unknown commit=unknown]
|
||||
Blockchain, Rebuilt for Scale
|
||||
|
||||
USAGE:
|
||||
|
@@ -289,3 +289,7 @@ Refer to the following page for a comprehensive guide on delegating stake:
|
||||
---
|
||||
|
||||
{% page-ref page="../api-reference/cli.md" %}
|
||||
|
||||
## Support
|
||||
|
||||
Check out our [Wallet Support Page](../wallet/support.md) for ways to get help.
|
||||
|
@@ -6,64 +6,8 @@ secure transaction signing.
|
||||
|
||||
## Before You Begin
|
||||
|
||||
- [Set up a Ledger Nano S with the Solana App](../wallet/ledger-live.md)
|
||||
- [Install the Solana command-line tools](../cli/install-solana-cli-tools.md)
|
||||
- [Initialize your Ledger Nano S](https://support.ledger.com/hc/en-us/articles/360000613793)
|
||||
- [Install the latest device firmware](https://support.ledgerwallet.com/hc/en-us/articles/360002731113-Update-Ledger-Nano-S-firmware)
|
||||
|
||||
## Install the Solana App on Ledger Nano S
|
||||
|
||||
The Solana Ledger app is not yet available on Ledger Live. Until it is, you
|
||||
can install a development version of the app from the command-line. Note that
|
||||
because the app is not installed via Ledger Live, you will need to approve
|
||||
installation from an "unsafe" manager, as well as see the message, "This app
|
||||
is not genuine" each time you open the app. Once the app is available on
|
||||
Ledger Live, you can reinstall the app from there, and the message will no
|
||||
longer be displayed.
|
||||
|
||||
1. Connect your Ledger device via USB and enter your pin to unlock it
|
||||
2. Download and run the Solana Ledger app installer:
|
||||
```text
|
||||
curl -sSLf https://github.com/solana-labs/ledger-app-solana/releases/download/v0.1.1/install.sh | sh
|
||||
```
|
||||
3. When prompted, approve the "unsafe" manager on your device
|
||||
4. When prompted, approve the installation on your device
|
||||
5. An installation window appears and your device will display Processing…
|
||||
6. The app installation is confirmed
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
If you encounter the following error:
|
||||
|
||||
```text
|
||||
Traceback (most recent call last):
|
||||
File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.7/lib/python3.7/runpy.py", line 193, in _run_module_as_main
|
||||
"__main__", mod_spec)
|
||||
File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.7/lib/python3.7/runpy.py", line 85, in _run_code
|
||||
exec(code, run_globals)
|
||||
File "ledger-env/lib/python3.7/site-packages/ledgerblue/loadApp.py", line 197, in <module>
|
||||
dongle = getDongle(args.apdu)
|
||||
File "ledger-env/lib/python3.7/site-packages/ledgerblue/comm.py", line 216, in getDongle
|
||||
dev.open_path(hidDevicePath)
|
||||
File "hid.pyx", line 72, in hid.device.open_path
|
||||
OSError: open failed
|
||||
```
|
||||
|
||||
To fix, check the following:
|
||||
|
||||
1. Ensure your Ledger device is connected to USB
|
||||
2. Ensure your Ledger device is unlocked and not waiting for you to enter your pin
|
||||
3. Ensure the Ledger Live application is not open
|
||||
|
||||
### Future: Installation once the Solana app is on Ledger Live
|
||||
|
||||
- [Install Ledger Live](https://support.ledger.com/hc/en-us/articles/360006395553/) software on your computer
|
||||
|
||||
1. Open the Manager in Ledger Live
|
||||
2. Connect your Ledger device via USB and enter your pin to unlock it
|
||||
3. When prompted, approve the manager on your device
|
||||
4. Find Solana in the app catalog and click Install
|
||||
5. An installation window appears and your device will display Processing…
|
||||
6. The app installation is confirmed
|
||||
|
||||
## Use Ledger Device with Solana CLI
|
||||
|
||||
@@ -98,4 +42,4 @@ anywhere you see an option or argument that accepts a `<KEYPAIR>`.
|
||||
|
||||
## Support
|
||||
|
||||
Email maintainers@solana.com
|
||||
Check out our [Wallet Support Page](../wallet/support.md) for ways to get help.
|
||||
|
@@ -16,6 +16,7 @@ line tools](../remote-wallet/ledger.md).
|
||||
- Order a [Nano S from Ledger](https://shop.ledger.com/products/ledger-nano-s)
|
||||
- Follow the instructions for device setup included in the package,
|
||||
or [Ledger's Start page](https://www.ledger.com/start/)
|
||||
- [Install the latest device firmware](https://support.ledgerwallet.com/hc/en-us/articles/360002731113-Update-Ledger-Nano-S-firmware)
|
||||
|
||||
##Install Ledger Live
|
||||
- Install [Ledger Live desktop software](https://www.ledger.com/ledger-live/),
|
||||
@@ -30,13 +31,20 @@ line tools](../remote-wallet/ledger.md).
|
||||
- Open Ledger Live
|
||||
- Currently Ledger Live needs to be in "Developer Mode"
|
||||
(Settings > Experimental Features > Developer Mode) to see our app.
|
||||

|
||||
|
||||
- Go to Manager in the app and find "Solana" in the App Catalog and
|
||||
click Install
|
||||
- Make sure your device is plugged in via USB and is unlocked with its PIN
|
||||
- You may be prompted on the Nano S to confirm the install of Solana App
|
||||
- "Solana" should now show as "Installed" in the Ledger Live Manager
|
||||

|
||||
|
||||
##Interact with Solana network
|
||||
- To interact with your Ledger wallet on our live network, please see our
|
||||
instructions on how to [use a Ledger Nano S with the Solana command
|
||||
line tools](../remote-wallet/ledger.md).
|
||||
instructions on how to
|
||||
[use a Ledger Nano S with the Solana command line tools](../remote-wallet/ledger.md).
|
||||
|
||||
## Support
|
||||
|
||||
Check out our [Wallet Support Page](../wallet/support.md) for ways to get help.
|
||||
|
13
docs/src/wallet/support.md
Normal file
13
docs/src/wallet/support.md
Normal file
@@ -0,0 +1,13 @@
|
||||
# Support / Troubleshooting
|
||||
If you have questions or are having trouble setting up or using your wallet
|
||||
of choice, please make sure you've read through all the relevant pages in our
|
||||
[Wallet Guide](README.md). The Solana team is working hard to support new
|
||||
features on popular wallets, and we do our best to keep our documents up to date
|
||||
with the latest available features.
|
||||
|
||||
If you have questions after reading the docs, feel free to reach out to us on
|
||||
our [Telegram](https://t.me/solanaio).
|
||||
|
||||
For **technical support**, reach out to us on
|
||||
[Discord](https://discordapp.com/invite/pquxPsq), using the #wallet-support
|
||||
channel in our Community section.
|
@@ -1,16 +1,24 @@
|
||||
#Trust Wallet
|
||||
# Trust Wallet
|
||||
Trust Wallet is an app for your smartphone or tablet and is the fastest and
|
||||
simplest way for most users to get started with a Solana wallet.
|
||||
|
||||
**NOTE: Trust Wallet currently only supports Solana on the iOS version of its
|
||||
app. Support for Android is coming very very soon!**
|
||||
## Install Trust Wallet
|
||||
|
||||
##Set Up Trust Wallet
|
||||
- Open the App Store or Play Store
|
||||
#### iOS
|
||||
|
||||
- Open the App Store
|
||||
- Download “Trust: Crypto & Bitcoin Wallet” from Six Days LLC
|
||||
- Requires iOS 13.0 or higher
|
||||
- Open Trust Wallet and follow the app prompts to get started
|
||||
|
||||
#### Android
|
||||
|
||||
- Open the Play Store
|
||||
- Download “Trust Crypto Wallet” from Six Days LLC
|
||||
- Requires Android 6.0 or higher
|
||||
- Open Trust Wallet and follow the app prompts to get started
|
||||
|
||||
##Add Solana (SOL) tokens to your wallet
|
||||
## Add Solana (SOL) tokens to your wallet
|
||||
- From the main page, go to the “Tokens” tab at the top of the screen
|
||||
- Tap the “+” icon at the top right corner
|
||||
- Search for “Solana” in the search page, and when the “Solana SOL” token is
|
||||
@@ -19,7 +27,7 @@ shown, slide the slider to enable this token.
|
||||
|
||||
[Trust Wallet Official Docs: How to Add or Remove a Coin](https://community.trustwallet.com/t/how-to-add-or-remove-a-coin/896)
|
||||
|
||||
##Receiving SOL tokens
|
||||
## Receiving SOL tokens
|
||||
- To receive SOL tokens that you’ve purchased or earned, you need to send your
|
||||
Receive Address to whoever is sending you tokens.
|
||||
- Tap “Receive” to view a QR code and your text address, which is a long string
|
||||
@@ -32,6 +40,10 @@ to that address, **those tokens will be lost forever**.
|
||||
|
||||
[Trust Wallet Official Docs: How to Find my Receiving Address](https://community.trustwallet.com/t/how-to-find-my-receiving-address/2006)
|
||||
|
||||
##Troubleshooting
|
||||
## Troubleshooting
|
||||
If you are having trouble setting up your Trust Wallet app, check out their
|
||||
[Community Help Center](https://community.trustwallet.com/c/helpcenter)
|
||||
|
||||
## Support
|
||||
|
||||
Check out our [Wallet Support Page](../wallet/support.md) for ways to get help.
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-faucet"
|
||||
version = "1.0.11"
|
||||
version = "1.0.22"
|
||||
description = "Solana Faucet"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -19,10 +19,10 @@ clap = "2.33"
|
||||
log = "0.4.8"
|
||||
serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.11" }
|
||||
solana-logger = { path = "../logger", version = "1.0.11" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.11" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.11" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.22" }
|
||||
solana-logger = { path = "../logger", version = "1.0.22" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.22" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.22" }
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
|
||||
@@ -33,3 +33,6 @@ name = "solana_faucet"
|
||||
[[bin]]
|
||||
name = "solana-faucet"
|
||||
path = "src/bin/faucet.rs"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -21,10 +21,20 @@ if [[ ! -f target/perf-libs/.$VERSION ]]; then
|
||||
(
|
||||
set -x
|
||||
cd target/perf-libs
|
||||
curl -L --retry 5 --retry-delay 2 --retry-connrefused -o solana-perf.tgz \
|
||||
https://github.com/solana-labs/solana-perf-libs/releases/download/$PERF_LIBS_VERSION/solana-perf.tgz
|
||||
|
||||
if [[ -r ~/.cache/solana-perf-$PERF_LIBS_VERSION.tgz ]]; then
|
||||
cp ~/.cache/solana-perf-$PERF_LIBS_VERSION.tgz solana-perf.tgz
|
||||
else
|
||||
curl -L --retry 5 --retry-delay 2 --retry-connrefused -o solana-perf.tgz \
|
||||
https://github.com/solana-labs/solana-perf-libs/releases/download/$PERF_LIBS_VERSION/solana-perf.tgz
|
||||
fi
|
||||
tar zxvf solana-perf.tgz
|
||||
rm -f solana-perf.tgz
|
||||
|
||||
if [[ ! -r ~/.cache/solana-perf-$PERF_LIBS_VERSION.tgz ]]; then
|
||||
# Save it for next time
|
||||
mkdir -p ~/.cache
|
||||
mv solana-perf.tgz ~/.cache/solana-perf-$PERF_LIBS_VERSION.tgz
|
||||
fi
|
||||
touch .$VERSION
|
||||
)
|
||||
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-genesis-programs"
|
||||
version = "1.0.11"
|
||||
version = "1.0.22"
|
||||
description = "Solana genesis programs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -10,17 +10,20 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = { version = "0.4.8" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.0.11" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.0.11" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.0.11" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.0.11" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.11" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.11" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.0.11" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.0.11" }
|
||||
solana-vest-program = { path = "../programs/vest", version = "1.0.11" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.0.11" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.0.22" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.0.22" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.0.22" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.0.22" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.22" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.22" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.0.22" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.0.22" }
|
||||
solana-vest-program = { path = "../programs/vest", version = "1.0.22" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.0.22" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
name = "solana_genesis_programs"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-genesis"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.0.11"
|
||||
version = "1.0.22"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -15,13 +15,13 @@ chrono = "0.4"
|
||||
serde = "1.0.104"
|
||||
serde_json = "1.0.46"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.11" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.0.11" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.11" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.11" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.0.11" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.0.11" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.0.11" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.22" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.0.22" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.22" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.22" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.0.22" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.0.22" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.0.22" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
@@ -31,3 +31,6 @@ path = "src/main.rs"
|
||||
|
||||
[lib]
|
||||
name = "solana_genesis"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -3,19 +3,22 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-gossip"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.0.11"
|
||||
version = "1.0.22"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.11" }
|
||||
solana-core = { path = "../core", version = "1.0.11" }
|
||||
solana-client = { path = "../client", version = "1.0.11" }
|
||||
solana-logger = { path = "../logger", version = "1.0.11" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.11" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.11" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.22" }
|
||||
solana-core = { path = "../core", version = "1.0.22" }
|
||||
solana-client = { path = "../client", version = "1.0.22" }
|
||||
solana-logger = { path = "../logger", version = "1.0.22" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.22" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.22" }
|
||||
|
||||
|
||||
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-install"
|
||||
description = "The solana cluster software installer"
|
||||
version = "1.0.11"
|
||||
version = "1.0.22"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -24,11 +24,11 @@ reqwest = { version = "0.10.1", default-features = false, features = ["blocking"
|
||||
serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.11" }
|
||||
solana-client = { path = "../client", version = "1.0.11" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.0.11" }
|
||||
solana-logger = { path = "../logger", version = "1.0.11" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.11" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.22" }
|
||||
solana-client = { path = "../client", version = "1.0.22" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.0.22" }
|
||||
solana-logger = { path = "../logger", version = "1.0.22" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.22" }
|
||||
semver = "0.9.0"
|
||||
tar = "0.4.26"
|
||||
tempdir = "0.3.7"
|
||||
@@ -45,3 +45,6 @@ path = "src/main-install.rs"
|
||||
[[bin]]
|
||||
name = "solana-install-init"
|
||||
path = "src/main-install-init.rs"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -581,7 +581,7 @@ pub fn info(config_file: &str, local_info_only: bool) -> Result<Option<UpdateMan
|
||||
if let Some(explicit_release) = &config.explicit_release {
|
||||
match explicit_release {
|
||||
ExplicitRelease::Semver(release_semver) => {
|
||||
println_name_value(&format!("v{}Release version:", BULLET), &release_semver);
|
||||
println_name_value(&format!("{}Release version:", BULLET), &release_semver);
|
||||
println_name_value(
|
||||
&format!("{}Release URL:", BULLET),
|
||||
&github_release_download_url(release_semver),
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user