Compare commits
86 Commits
Author | SHA1 | Date | |
---|---|---|---|
73dad25d74 | |||
a895ce51ee | |||
3f95e7f055 | |||
a54042fc11 | |||
68525a961f | |||
45093c8092 | |||
c3227ab671 | |||
967c178f5d | |||
310aa1a63f | |||
d5ae850169 | |||
89f5153316 | |||
677008b6cc | |||
7936f34df8 | |||
65f0187324 | |||
8dc5d10f9c | |||
58d8c3ad70 | |||
7df45cf58a | |||
3379a8470d | |||
0969e87b08 | |||
7a0dcdd1a4 | |||
34893d2449 | |||
ec8d1c5e2b | |||
e1dbed25b6 | |||
3b08a2a116 | |||
7e42eca4b0 | |||
580304add4 | |||
b58ce6c740 | |||
0b27d0b363 | |||
6ea74c3d29 | |||
15631f8194 | |||
b87a1d2bc5 | |||
eae98ad8ab | |||
3a6c23e995 | |||
2e3db6aba8 | |||
f1e635d088 | |||
cc07c86aab | |||
543b6016ea | |||
f4e05909f7 | |||
5da1466d08 | |||
7a8528793e | |||
4a0338c902 | |||
11b4da4146 | |||
33c19130b5 | |||
0c7689206c | |||
756bc3b5bb | |||
571b2eb807 | |||
9819fe6684 | |||
ec7e44659d | |||
40d0f8da2d | |||
47ddb84078 | |||
4649378f95 | |||
3f6027055c | |||
d61a46476a | |||
c112f51f97 | |||
c1351d6b12 | |||
c1acfe4843 | |||
68a4288078 | |||
c4c96e1460 | |||
32ab57fa83 | |||
a33e8cc164 | |||
c8b4f616b0 | |||
380c3b0080 | |||
2d6847c27b | |||
d5b9899ac9 | |||
9817cd769a | |||
ec3d2fdbdc | |||
1f794fb1da | |||
89e1d7300d | |||
d239550e68 | |||
3dc336e1f1 | |||
220a369efa | |||
b079564a13 | |||
e8935aa99e | |||
016a342de0 | |||
47c6dfe1aa | |||
c66d528e85 | |||
8ba8deb933 | |||
587342d5e3 | |||
f31d2d9cc4 | |||
bc761c2c02 | |||
6f4bc3aaff | |||
070664ff94 | |||
61c2883de6 | |||
e32f7dbe49 | |||
c0b178db45 | |||
1027b0681b |
768
Cargo.lock
generated
768
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -52,6 +52,7 @@ members = [
|
||||
"sdk",
|
||||
"sdk-c",
|
||||
"scripts",
|
||||
"stake-accounts",
|
||||
"stake-monitor",
|
||||
"sys-tuner",
|
||||
"transaction-status",
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-accounts-bench"
|
||||
version = "1.1.0"
|
||||
version = "1.1.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -10,10 +10,10 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
log = "0.4.6"
|
||||
rayon = "1.3.0"
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.0" }
|
||||
solana-measure = { path = "../measure", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.3" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.3" }
|
||||
solana-measure = { path = "../measure", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
rand = "0.6.5"
|
||||
clap = "2.33.0"
|
||||
crossbeam-channel = "0.4"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-archiver-lib"
|
||||
version = "1.1.0"
|
||||
version = "1.1.3"
|
||||
description = "Solana Archiver Library"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -15,23 +15,23 @@ ed25519-dalek = "=1.0.0-pre.1"
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
rand_chacha = "0.1.1"
|
||||
solana-client = { path = "../client", version = "1.1.0" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.0" }
|
||||
solana-client = { path = "../client", version = "1.1.3" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.3" }
|
||||
thiserror = "1.0"
|
||||
serde = "1.0.105"
|
||||
serde_json = "1.0.48"
|
||||
serde_derive = "1.0.103"
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
|
||||
solana-chacha = { path = "../chacha", version = "1.1.0" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.0" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-perf = { path = "../perf", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
solana-core = { path = "../core", version = "1.1.0" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.0" }
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.0" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.3" }
|
||||
solana-chacha = { path = "../chacha", version = "1.1.3" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.3" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.3" }
|
||||
solana-logger = { path = "../logger", version = "1.1.3" }
|
||||
solana-perf = { path = "../perf", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
solana-core = { path = "../core", version = "1.1.3" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.3" }
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.3" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.3" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4.2"
|
||||
|
@ -14,7 +14,7 @@ use solana_core::{
|
||||
contact_info::ContactInfo,
|
||||
gossip_service::GossipService,
|
||||
repair_service,
|
||||
repair_service::{RepairService, RepairSlotRange, RepairStrategy},
|
||||
repair_service::{RepairService, RepairSlotRange, RepairStats, RepairStrategy},
|
||||
serve_repair::ServeRepair,
|
||||
shred_fetch_stage::ShredFetchStage,
|
||||
sigverify_stage::{DisabledSigVerifier, SigVerifyStage},
|
||||
@ -844,13 +844,14 @@ impl Archiver {
|
||||
repair_service::MAX_REPAIR_LENGTH,
|
||||
&repair_slot_range,
|
||||
);
|
||||
let mut repair_stats = RepairStats::default();
|
||||
//iter over the repairs and send them
|
||||
if let Ok(repairs) = repairs {
|
||||
let reqs: Vec<_> = repairs
|
||||
.into_iter()
|
||||
.filter_map(|repair_request| {
|
||||
serve_repair
|
||||
.map_repair_request(&repair_request)
|
||||
.map_repair_request(&repair_request, &mut repair_stats)
|
||||
.map(|result| ((archiver_info.gossip, result), repair_request))
|
||||
.ok()
|
||||
})
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-archiver-utils"
|
||||
version = "1.1.0"
|
||||
version = "1.1.3"
|
||||
description = "Solana Archiver Utils"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -11,12 +11,12 @@ edition = "2018"
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
solana-chacha = { path = "../chacha", version = "1.1.0" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.0" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-perf = { path = "../perf", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
solana-chacha = { path = "../chacha", version = "1.1.3" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.3" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.3" }
|
||||
solana-logger = { path = "../logger", version = "1.1.3" }
|
||||
solana-perf = { path = "../perf", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4.2"
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-archiver"
|
||||
version = "1.1.0"
|
||||
version = "1.1.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -10,11 +10,11 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
console = "0.10.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.0" }
|
||||
solana-core = { path = "../core", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.0" }
|
||||
solana-archiver-lib = { path = "../archiver-lib", version = "1.1.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.3" }
|
||||
solana-core = { path = "../core", version = "1.1.3" }
|
||||
solana-logger = { path = "../logger", version = "1.1.3" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.3" }
|
||||
solana-archiver-lib = { path = "../archiver-lib", version = "1.1.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "1.1.0"
|
||||
version = "1.1.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -10,13 +10,13 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
log = "0.4.6"
|
||||
rayon = "1.3.0"
|
||||
solana-core = { path = "../core", version = "1.1.0" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.0" }
|
||||
solana-perf = { path = "../perf", version = "1.1.0" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.0" }
|
||||
solana-measure = { path = "../measure", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
solana-core = { path = "../core", version = "1.1.3" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.3" }
|
||||
solana-perf = { path = "../perf", version = "1.1.3" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.3" }
|
||||
solana-logger = { path = "../logger", version = "1.1.3" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.3" }
|
||||
solana-measure = { path = "../measure", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
rand = "0.6.5"
|
||||
crossbeam-channel = "0.4"
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "1.1.0"
|
||||
version = "1.1.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -18,17 +18,17 @@ rand = "0.6.5"
|
||||
rayon = "1.3.0"
|
||||
serde_json = "1.0.48"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.0" }
|
||||
solana-core = { path = "../core", version = "1.1.0" }
|
||||
solana-genesis = { path = "../genesis", version = "1.1.0" }
|
||||
solana-client = { path = "../client", version = "1.1.0" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.0" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.3" }
|
||||
solana-core = { path = "../core", version = "1.1.3" }
|
||||
solana-genesis = { path = "../genesis", version = "1.1.3" }
|
||||
solana-client = { path = "../client", version = "1.1.3" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.3" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.1.3" }
|
||||
solana-logger = { path = "../logger", version = "1.1.3" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.3" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.1.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.1.3" }
|
||||
|
@ -2,14 +2,14 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "1.1.0"
|
||||
version = "1.1.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.0" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.3" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.3" }
|
||||
solana-logger = { path = "../logger", version = "1.1.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.3" }
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "1.1.0"
|
||||
version = "1.1.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -14,24 +14,24 @@ log = "0.4.8"
|
||||
rayon = "1.3.0"
|
||||
serde_json = "1.0.48"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.0" }
|
||||
solana-core = { path = "../core", version = "1.1.0" }
|
||||
solana-genesis = { path = "../genesis", version = "1.1.0" }
|
||||
solana-client = { path = "../client", version = "1.1.0" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.0" }
|
||||
solana-librapay = { path = "../programs/librapay", version = "1.1.0", optional = true }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.0" }
|
||||
solana-measure = { path = "../measure", version = "1.1.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader", version = "1.1.0", optional = true }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.3" }
|
||||
solana-core = { path = "../core", version = "1.1.3" }
|
||||
solana-genesis = { path = "../genesis", version = "1.1.3" }
|
||||
solana-client = { path = "../client", version = "1.1.3" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.3" }
|
||||
solana-librapay = { path = "../programs/librapay", version = "1.1.3", optional = true }
|
||||
solana-logger = { path = "../logger", version = "1.1.3" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.3" }
|
||||
solana-measure = { path = "../measure", version = "1.1.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.3" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader", version = "1.1.3", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.4.0"
|
||||
serial_test_derive = "0.4.0"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.1.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.1.3" }
|
||||
|
||||
[features]
|
||||
move = ["solana-librapay", "solana-move-loader-program"]
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha-cuda"
|
||||
version = "1.1.0"
|
||||
version = "1.1.3"
|
||||
description = "Solana Chacha Cuda APIs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -10,12 +10,12 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.0" }
|
||||
solana-chacha = { path = "../chacha", version = "1.1.0" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-perf = { path = "../perf", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.3" }
|
||||
solana-chacha = { path = "../chacha", version = "1.1.3" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.3" }
|
||||
solana-logger = { path = "../logger", version = "1.1.3" }
|
||||
solana-perf = { path = "../perf", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex-literal = "0.2.1"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha-sys"
|
||||
version = "1.1.0"
|
||||
version = "1.1.3"
|
||||
description = "Solana chacha-sys"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha"
|
||||
version = "1.1.0"
|
||||
version = "1.1.3"
|
||||
description = "Solana Chacha APIs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -12,11 +12,11 @@ edition = "2018"
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
rand_chacha = "0.1.1"
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.0" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-perf = { path = "../perf", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.3" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.3" }
|
||||
solana-logger = { path = "../logger", version = "1.1.3" }
|
||||
solana-perf = { path = "../perf", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex-literal = "0.2.1"
|
||||
|
37
ci/buildkite-tests.yml
Normal file
37
ci/buildkite-tests.yml
Normal file
@ -0,0 +1,37 @@
|
||||
# These steps are conditionally triggered by ci/buildkite.yml when files
|
||||
# other than those in docs/ are modified
|
||||
|
||||
steps:
|
||||
- command: "ci/shellcheck.sh"
|
||||
name: "shellcheck"
|
||||
timeout_in_minutes: 5
|
||||
|
||||
- wait
|
||||
|
||||
- command: "ci/test-stable-perf.sh"
|
||||
name: "stable-perf"
|
||||
timeout_in_minutes: 40
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=cuda"
|
||||
- command: "ci/test-bench.sh"
|
||||
name: "bench"
|
||||
timeout_in_minutes: 30
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable.sh"
|
||||
name: "stable"
|
||||
timeout_in_minutes: 60
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=rpc-test-capable"
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-move.sh"
|
||||
name: "move"
|
||||
timeout_in_minutes: 20
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-local-cluster.sh"
|
||||
name: "local-cluster"
|
||||
timeout_in_minutes: 45
|
||||
artifact_paths: "log-*.txt"
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
|
||||
name: "coverage"
|
||||
timeout_in_minutes: 30
|
||||
agents:
|
||||
- "queue=rpc-test-capable"
|
@ -1,42 +1,22 @@
|
||||
# Build steps that run on pushes and pull requests.
|
||||
# If files other than those in docs/ were modified, this will be followed up by
|
||||
# ci/buildkite-tests.yml
|
||||
#
|
||||
# Release tags use buildkite-release.yml instead
|
||||
|
||||
steps:
|
||||
- command: "ci/shellcheck.sh"
|
||||
name: "shellcheck"
|
||||
timeout_in_minutes: 5
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-checks.sh"
|
||||
name: "checks"
|
||||
timeout_in_minutes: 20
|
||||
|
||||
- wait
|
||||
- command: "ci/test-stable-perf.sh"
|
||||
name: "stable-perf"
|
||||
timeout_in_minutes: 40
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=cuda"
|
||||
- command: "ci/test-bench.sh"
|
||||
name: "bench"
|
||||
timeout_in_minutes: 30
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable.sh"
|
||||
name: "stable"
|
||||
timeout_in_minutes: 60
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=rpc-test-capable"
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-move.sh"
|
||||
name: "move"
|
||||
timeout_in_minutes: 20
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-local-cluster.sh"
|
||||
name: "local-cluster"
|
||||
timeout_in_minutes: 45
|
||||
artifact_paths: "log-*.txt"
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
|
||||
name: "coverage"
|
||||
timeout_in_minutes: 30
|
||||
agents:
|
||||
- "queue=rpc-test-capable"
|
||||
|
||||
- command: "ci/maybe-trigger-tests.sh"
|
||||
name: "maybe-trigger-tests"
|
||||
timeout_in_minutes: 2
|
||||
|
||||
- wait
|
||||
|
||||
- trigger: "solana-secondary"
|
||||
branches: "!pull/*"
|
||||
async: true
|
||||
|
21
ci/maybe-trigger-tests.sh
Executable file
21
ci/maybe-trigger-tests.sh
Executable file
@ -0,0 +1,21 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
annotate() {
|
||||
${BUILDKITE:-false} && {
|
||||
buildkite-agent annotate "$@"
|
||||
}
|
||||
}
|
||||
|
||||
# Skip if only the docs have been modified
|
||||
ci/affects-files.sh \
|
||||
\!^docs/ \
|
||||
|| {
|
||||
annotate --style info \
|
||||
"Skipping all further tests as only docs/ files were modified"
|
||||
exit 0
|
||||
}
|
||||
|
||||
annotate --style info "Triggering tests"
|
||||
buildkite-agent pipeline upload ci/buildkite-tests.yml
|
@ -22,7 +22,7 @@ _ cargo +"$rust_stable" clippy --all --exclude solana-sdk-c -- --deny=warnings
|
||||
_ cargo +"$rust_stable" clippy --manifest-path sdk-c/Cargo.toml -- --deny=warnings
|
||||
|
||||
_ cargo +"$rust_stable" audit --version
|
||||
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2020-0002 --ignore RUSTSEC-2020-0006
|
||||
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2020-0002 --ignore RUSTSEC-2020-0008
|
||||
_ ci/nits.sh
|
||||
_ ci/order-crates-for-publishing.py
|
||||
_ docs/build.sh
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "1.1.0"
|
||||
version = "1.1.3"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -11,8 +11,8 @@ edition = "2018"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
rpassword = "4.0"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
thiserror = "1.0.11"
|
||||
tiny-bip39 = "0.7.0"
|
||||
url = "2.1.0"
|
||||
|
@ -62,6 +62,21 @@ pub fn keypair_of(matches: &ArgMatches<'_>, name: &str) -> Option<Keypair> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn keypairs_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<Keypair>> {
|
||||
matches.values_of(name).map(|values| {
|
||||
values
|
||||
.filter_map(|value| {
|
||||
if value == ASK_KEYWORD {
|
||||
let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name);
|
||||
keypair_from_seed_phrase(name, skip_validation, true).ok()
|
||||
} else {
|
||||
read_keypair_file(value).ok()
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
// Return a pubkey for an argument that can itself be parsed into a pubkey,
|
||||
// or is a filename that can be read as a keypair
|
||||
pub fn pubkey_of(matches: &ArgMatches<'_>, name: &str) -> Option<Pubkey> {
|
||||
|
@ -47,6 +47,13 @@ pub fn parse_keypair_path(path: &str) -> KeypairUrl {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_for_usb<S>(mut items: impl Iterator<Item = S>) -> bool
|
||||
where
|
||||
S: Into<String>,
|
||||
{
|
||||
items.any(|arg| matches!(parse_keypair_path(&arg.into()), KeypairUrl::Usb(_)))
|
||||
}
|
||||
|
||||
pub fn presigner_from_pubkey_sigs(
|
||||
pubkey: &Pubkey,
|
||||
signers: &[(Pubkey, Signature)],
|
||||
@ -256,4 +263,20 @@ mod tests {
|
||||
sanitize_seed_phrase(seed_phrase)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_for_usb() {
|
||||
let args: Vec<&str> = vec![];
|
||||
assert_eq!(check_for_usb(args.into_iter()), false);
|
||||
let args = vec!["usb://"];
|
||||
assert_eq!(check_for_usb(args.into_iter()), true);
|
||||
let args = vec!["other"];
|
||||
assert_eq!(check_for_usb(args.into_iter()), false);
|
||||
let args = vec!["other", "usb://", "another"];
|
||||
assert_eq!(check_for_usb(args.into_iter()), true);
|
||||
let args = vec!["other", "another"];
|
||||
assert_eq!(check_for_usb(args.into_iter()), false);
|
||||
let args = vec!["usb://", "usb://"];
|
||||
assert_eq!(check_for_usb(args.into_iter()), true);
|
||||
}
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli-config"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.1.0"
|
||||
version = "1.1.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
@ -26,7 +26,7 @@ impl Default for Config {
|
||||
keypair_path.extend(&[".config", "solana", "id.json"]);
|
||||
keypair_path.to_str().unwrap().to_string()
|
||||
};
|
||||
let json_rpc_url = "http://127.0.0.1:8899".to_string();
|
||||
let json_rpc_url = "https://api.mainnet-beta.solana.com".to_string();
|
||||
|
||||
// Empty websocket_url string indicates the client should
|
||||
// `Config::compute_websocket_url(&json_rpc_url)`
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.1.0"
|
||||
version = "1.1.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -26,28 +26,28 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
|
||||
serde = "1.0.105"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.48"
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.0" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.1.0" }
|
||||
solana-client = { path = "../client", version = "1.1.0" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.1.0" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.0" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.0" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.1.0" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.3" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.3" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.1.3" }
|
||||
solana-client = { path = "../client", version = "1.1.3" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.1.3" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.3" }
|
||||
solana-logger = { path = "../logger", version = "1.1.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.3" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.3" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.3" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.3" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.3" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.1.3" }
|
||||
titlecase = "1.1.0"
|
||||
thiserror = "1.0.13"
|
||||
url = "2.1.1"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "1.1.0" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.0" }
|
||||
solana-core = { path = "../core", version = "1.1.3" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.3" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
|
139
cli/src/cli.rs
139
cli/src/cli.rs
@ -173,6 +173,8 @@ pub enum CliCommand {
|
||||
Catchup {
|
||||
node_pubkey: Pubkey,
|
||||
node_json_rpc_url: Option<String>,
|
||||
commitment_config: CommitmentConfig,
|
||||
follow: bool,
|
||||
},
|
||||
ClusterVersion,
|
||||
CreateAddressWithSeed {
|
||||
@ -188,6 +190,9 @@ pub enum CliCommand {
|
||||
commitment_config: CommitmentConfig,
|
||||
},
|
||||
GetGenesisHash,
|
||||
GetEpoch {
|
||||
commitment_config: CommitmentConfig,
|
||||
},
|
||||
GetSlot {
|
||||
commitment_config: CommitmentConfig,
|
||||
},
|
||||
@ -405,6 +410,7 @@ pub enum CliCommand {
|
||||
to: Pubkey,
|
||||
from: SignerIndex,
|
||||
sign_only: bool,
|
||||
no_wait: bool,
|
||||
blockhash_query: BlockhashQuery,
|
||||
nonce_account: Option<Pubkey>,
|
||||
nonce_authority: SignerIndex,
|
||||
@ -586,6 +592,7 @@ pub fn parse_command(
|
||||
command: CliCommand::GetGenesisHash,
|
||||
signers: vec![],
|
||||
}),
|
||||
("epoch", Some(matches)) => parse_get_epoch(matches),
|
||||
("slot", Some(matches)) => parse_get_slot(matches),
|
||||
("total-supply", Some(matches)) => parse_total_supply(matches),
|
||||
("transaction-count", Some(matches)) => parse_get_transaction_count(matches),
|
||||
@ -902,6 +909,7 @@ pub fn parse_command(
|
||||
let lamports = lamports_of_sol(matches, "amount").unwrap();
|
||||
let to = pubkey_of_signer(matches, "to", wallet_manager)?.unwrap();
|
||||
let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
|
||||
let no_wait = matches.is_present("no_wait");
|
||||
let blockhash_query = BlockhashQuery::new_from_matches(matches);
|
||||
let nonce_account = pubkey_of_signer(matches, NONCE_ARG.name, wallet_manager)?;
|
||||
let (nonce_authority, nonce_authority_pubkey) =
|
||||
@ -927,6 +935,7 @@ pub fn parse_command(
|
||||
lamports,
|
||||
to,
|
||||
sign_only,
|
||||
no_wait,
|
||||
blockhash_query,
|
||||
nonce_account,
|
||||
nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(),
|
||||
@ -1488,6 +1497,7 @@ fn process_transfer(
|
||||
to: &Pubkey,
|
||||
from: SignerIndex,
|
||||
sign_only: bool,
|
||||
no_wait: bool,
|
||||
blockhash_query: &BlockhashQuery,
|
||||
nonce_account: Option<&Pubkey>,
|
||||
nonce_authority: SignerIndex,
|
||||
@ -1534,7 +1544,11 @@ fn process_transfer(
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
|
||||
let result = if no_wait {
|
||||
rpc_client.send_transaction(&tx)
|
||||
} else {
|
||||
rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers)
|
||||
};
|
||||
log_instruction_custom_error::<SystemError>(result)
|
||||
}
|
||||
}
|
||||
@ -1589,7 +1603,15 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
CliCommand::Catchup {
|
||||
node_pubkey,
|
||||
node_json_rpc_url,
|
||||
} => process_catchup(&rpc_client, node_pubkey, node_json_rpc_url),
|
||||
commitment_config,
|
||||
follow,
|
||||
} => process_catchup(
|
||||
&rpc_client,
|
||||
node_pubkey,
|
||||
node_json_rpc_url,
|
||||
*commitment_config,
|
||||
*follow,
|
||||
),
|
||||
CliCommand::ClusterVersion => process_cluster_version(&rpc_client),
|
||||
CliCommand::CreateAddressWithSeed {
|
||||
from_pubkey,
|
||||
@ -1602,6 +1624,9 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
CliCommand::GetEpochInfo { commitment_config } => {
|
||||
process_get_epoch_info(&rpc_client, *commitment_config)
|
||||
}
|
||||
CliCommand::GetEpoch { commitment_config } => {
|
||||
process_get_epoch(&rpc_client, *commitment_config)
|
||||
}
|
||||
CliCommand::GetSlot { commitment_config } => {
|
||||
process_get_slot(&rpc_client, *commitment_config)
|
||||
}
|
||||
@ -2080,6 +2105,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
to,
|
||||
from,
|
||||
sign_only,
|
||||
no_wait,
|
||||
ref blockhash_query,
|
||||
ref nonce_account,
|
||||
nonce_authority,
|
||||
@ -2091,6 +2117,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
to,
|
||||
*from,
|
||||
*sign_only,
|
||||
*no_wait,
|
||||
blockhash_query,
|
||||
nonce_account.as_ref(),
|
||||
*nonce_authority,
|
||||
@ -2497,6 +2524,12 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
|
||||
.validator(is_valid_signer)
|
||||
.help("Source account of funds (if different from client local account)"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("no_wait")
|
||||
.long("no-wait")
|
||||
.takes_value(false)
|
||||
.help("Return signature immediately after submitting the transaction, instead of waiting for confirmations"),
|
||||
)
|
||||
.offline_args()
|
||||
.arg(nonce_arg())
|
||||
.arg(nonce_authority_arg())
|
||||
@ -3183,7 +3216,7 @@ mod tests {
|
||||
|
||||
let process_id = Pubkey::new_rand();
|
||||
config.command = CliCommand::Cancel(process_id);
|
||||
assert_eq!(process_command(&config).unwrap(), SIGNATURE);
|
||||
assert!(process_command(&config).is_ok());
|
||||
|
||||
let good_signature = Signature::new(&bs58::decode(SIGNATURE).into_vec().unwrap());
|
||||
config.command = CliCommand::Confirm(good_signature);
|
||||
@ -3200,8 +3233,8 @@ mod tests {
|
||||
commission: 0,
|
||||
};
|
||||
config.signers = vec![&keypair, &bob_keypair, &identity_keypair];
|
||||
let signature = process_command(&config);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
let result = process_command(&config);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let new_authorized_pubkey = Pubkey::new_rand();
|
||||
config.signers = vec![&bob_keypair];
|
||||
@ -3210,8 +3243,8 @@ mod tests {
|
||||
new_authorized_pubkey,
|
||||
vote_authorize: VoteAuthorize::Voter,
|
||||
};
|
||||
let signature = process_command(&config);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
let result = process_command(&config);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let new_identity_keypair = Keypair::new();
|
||||
config.signers = vec![&keypair, &bob_keypair, &new_identity_keypair];
|
||||
@ -3219,8 +3252,8 @@ mod tests {
|
||||
vote_account_pubkey: bob_pubkey,
|
||||
new_identity_account: 2,
|
||||
};
|
||||
let signature = process_command(&config);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
let result = process_command(&config);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let bob_keypair = Keypair::new();
|
||||
let bob_pubkey = bob_keypair.pubkey();
|
||||
@ -3244,8 +3277,8 @@ mod tests {
|
||||
from: 0,
|
||||
};
|
||||
config.signers = vec![&keypair, &bob_keypair];
|
||||
let signature = process_command(&config);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
let result = process_command(&config);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let stake_pubkey = Pubkey::new_rand();
|
||||
let to_pubkey = Pubkey::new_rand();
|
||||
@ -3261,8 +3294,8 @@ mod tests {
|
||||
fee_payer: 0,
|
||||
};
|
||||
config.signers = vec![&keypair];
|
||||
let signature = process_command(&config);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
let result = process_command(&config);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let stake_pubkey = Pubkey::new_rand();
|
||||
config.command = CliCommand::DeactivateStake {
|
||||
@ -3274,8 +3307,8 @@ mod tests {
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
};
|
||||
let signature = process_command(&config);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
let result = process_command(&config);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let stake_pubkey = Pubkey::new_rand();
|
||||
let split_stake_account = Keypair::new();
|
||||
@ -3292,8 +3325,8 @@ mod tests {
|
||||
fee_payer: 0,
|
||||
};
|
||||
config.signers = vec![&keypair, &split_stake_account];
|
||||
let signature = process_command(&config);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
let result = process_command(&config);
|
||||
assert!(result.is_ok());
|
||||
|
||||
config.command = CliCommand::GetSlot {
|
||||
commitment_config: CommitmentConfig::default(),
|
||||
@ -3311,8 +3344,8 @@ mod tests {
|
||||
to: bob_pubkey,
|
||||
..PayCommand::default()
|
||||
});
|
||||
let signature = process_command(&config);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
let result = process_command(&config);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let date_string = "\"2018-09-19T17:30:59Z\"";
|
||||
let dt: DateTime<Utc> = serde_json::from_str(&date_string).unwrap();
|
||||
@ -3324,16 +3357,7 @@ mod tests {
|
||||
..PayCommand::default()
|
||||
});
|
||||
let result = process_command(&config);
|
||||
let json: Value = serde_json::from_str(&result.unwrap()).unwrap();
|
||||
assert_eq!(
|
||||
json.as_object()
|
||||
.unwrap()
|
||||
.get("signature")
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.unwrap(),
|
||||
SIGNATURE.to_string()
|
||||
);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let witness = Pubkey::new_rand();
|
||||
config.command = CliCommand::Pay(PayCommand {
|
||||
@ -3344,27 +3368,18 @@ mod tests {
|
||||
..PayCommand::default()
|
||||
});
|
||||
let result = process_command(&config);
|
||||
let json: Value = serde_json::from_str(&result.unwrap()).unwrap();
|
||||
assert_eq!(
|
||||
json.as_object()
|
||||
.unwrap()
|
||||
.get("signature")
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.unwrap(),
|
||||
SIGNATURE.to_string()
|
||||
);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let process_id = Pubkey::new_rand();
|
||||
config.command = CliCommand::TimeElapsed(bob_pubkey, process_id, dt);
|
||||
config.signers = vec![&keypair];
|
||||
let signature = process_command(&config);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
let result = process_command(&config);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let witness = Pubkey::new_rand();
|
||||
config.command = CliCommand::Witness(bob_pubkey, witness);
|
||||
let signature = process_command(&config);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
let result = process_command(&config);
|
||||
assert!(result.is_ok());
|
||||
|
||||
// CreateAddressWithSeed
|
||||
let from_pubkey = Pubkey::new_rand();
|
||||
@ -3391,13 +3406,13 @@ mod tests {
|
||||
assert!(process_command(&config).is_ok());
|
||||
|
||||
config.command = CliCommand::TimeElapsed(bob_pubkey, process_id, dt);
|
||||
let signature = process_command(&config);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
let result = process_command(&config);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let witness = Pubkey::new_rand();
|
||||
config.command = CliCommand::Witness(bob_pubkey, witness);
|
||||
let signature = process_command(&config);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
let result = process_command(&config);
|
||||
assert!(result.is_ok());
|
||||
|
||||
// sig_not_found case
|
||||
config.rpc_client = Some(RpcClient::new_mock("sig_not_found".to_string()));
|
||||
@ -3557,6 +3572,33 @@ mod tests {
|
||||
to: to_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
},
|
||||
signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()],
|
||||
}
|
||||
);
|
||||
|
||||
// Test Transfer no-wait
|
||||
let test_transfer = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"transfer",
|
||||
"--no-wait",
|
||||
&to_string,
|
||||
"42",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_transfer, &default_keypair_file, None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Transfer {
|
||||
lamports: 42_000_000_000,
|
||||
to: to_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
no_wait: true,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
@ -3586,6 +3628,7 @@ mod tests {
|
||||
to: to_pubkey,
|
||||
from: 0,
|
||||
sign_only: true,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::None(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
@ -3620,6 +3663,7 @@ mod tests {
|
||||
to: to_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::Cluster,
|
||||
blockhash
|
||||
@ -3658,6 +3702,7 @@ mod tests {
|
||||
to: to_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::NonceAccount(nonce_address),
|
||||
blockhash
|
||||
|
@ -69,6 +69,20 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
.takes_value(true)
|
||||
.validator(is_url)
|
||||
.help("JSON RPC URL for validator, which is useful for validators with a private RPC service")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("confirmed")
|
||||
.long("confirmed")
|
||||
.takes_value(false)
|
||||
.help(
|
||||
"Return information at maximum-lockout commitment level",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("follow")
|
||||
.long("follow")
|
||||
.takes_value(false)
|
||||
.help("Continue reporting progress even after the validator has caught up"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
@ -119,6 +133,17 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("epoch").about("Get current epoch")
|
||||
.arg(
|
||||
Arg::with_name("confirmed")
|
||||
.long("confirmed")
|
||||
.takes_value(false)
|
||||
.help(
|
||||
"Return epoch at maximum-lockout commitment level",
|
||||
),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("total-supply").about("Get total number of SOL")
|
||||
.arg(
|
||||
@ -262,10 +287,18 @@ pub fn parse_catchup(
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let node_pubkey = pubkey_of_signer(matches, "node_pubkey", wallet_manager)?.unwrap();
|
||||
let node_json_rpc_url = value_t!(matches, "node_json_rpc_url", String).ok();
|
||||
let commitment_config = if matches.is_present("confirmed") {
|
||||
CommitmentConfig::default()
|
||||
} else {
|
||||
CommitmentConfig::recent()
|
||||
};
|
||||
let follow = matches.is_present("follow");
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::Catchup {
|
||||
node_pubkey,
|
||||
node_json_rpc_url,
|
||||
commitment_config,
|
||||
follow,
|
||||
},
|
||||
signers: vec![],
|
||||
})
|
||||
@ -338,6 +371,18 @@ pub fn parse_get_slot(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliErr
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_get_epoch(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let commitment_config = if matches.is_present("confirmed") {
|
||||
CommitmentConfig::default()
|
||||
} else {
|
||||
CommitmentConfig::recent()
|
||||
};
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::GetEpoch { commitment_config },
|
||||
signers: vec![],
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_total_supply(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let commitment_config = if matches.is_present("confirmed") {
|
||||
CommitmentConfig::default()
|
||||
@ -409,20 +454,37 @@ pub fn process_catchup(
|
||||
rpc_client: &RpcClient,
|
||||
node_pubkey: &Pubkey,
|
||||
node_json_rpc_url: &Option<String>,
|
||||
commitment_config: CommitmentConfig,
|
||||
follow: bool,
|
||||
) -> ProcessResult {
|
||||
let cluster_nodes = rpc_client.get_cluster_nodes()?;
|
||||
let sleep_interval = 5;
|
||||
|
||||
let progress_bar = new_spinner_progress_bar();
|
||||
progress_bar.set_message("Connecting...");
|
||||
|
||||
let node_client = if let Some(node_json_rpc_url) = node_json_rpc_url {
|
||||
RpcClient::new(node_json_rpc_url.to_string())
|
||||
} else {
|
||||
RpcClient::new_socket(
|
||||
cluster_nodes
|
||||
let rpc_addr = loop {
|
||||
let cluster_nodes = rpc_client.get_cluster_nodes()?;
|
||||
if let Some(contact_info) = cluster_nodes
|
||||
.iter()
|
||||
.find(|contact_info| contact_info.pubkey == node_pubkey.to_string())
|
||||
.ok_or_else(|| format!("Contact information not found for {}", node_pubkey))?
|
||||
.rpc
|
||||
.ok_or_else(|| format!("RPC service not found for {}", node_pubkey))?,
|
||||
)
|
||||
{
|
||||
if let Some(rpc_addr) = contact_info.rpc {
|
||||
break rpc_addr;
|
||||
}
|
||||
progress_bar.set_message(&format!("RPC service not found for {}", node_pubkey));
|
||||
} else {
|
||||
progress_bar.set_message(&format!(
|
||||
"Contact information not found for {}",
|
||||
node_pubkey
|
||||
));
|
||||
}
|
||||
sleep(Duration::from_secs(sleep_interval as u64));
|
||||
};
|
||||
|
||||
RpcClient::new_socket(rpc_addr)
|
||||
};
|
||||
|
||||
let reported_node_pubkey = node_client.get_identity()?;
|
||||
@ -438,16 +500,12 @@ pub fn process_catchup(
|
||||
return Err("Both RPC URLs reference the same node, unable to monitor for catchup. Try a different --url".into());
|
||||
}
|
||||
|
||||
let progress_bar = new_spinner_progress_bar();
|
||||
progress_bar.set_message("Connecting...");
|
||||
|
||||
let mut previous_rpc_slot = std::u64::MAX;
|
||||
let mut previous_slot_distance = 0;
|
||||
let sleep_interval = 5;
|
||||
loop {
|
||||
let rpc_slot = rpc_client.get_slot_with_commitment(CommitmentConfig::recent())?;
|
||||
let node_slot = node_client.get_slot_with_commitment(CommitmentConfig::recent())?;
|
||||
if node_slot > std::cmp::min(previous_rpc_slot, rpc_slot) {
|
||||
let rpc_slot = rpc_client.get_slot_with_commitment(commitment_config)?;
|
||||
let node_slot = node_client.get_slot_with_commitment(commitment_config)?;
|
||||
if !follow && node_slot > std::cmp::min(previous_rpc_slot, rpc_slot) {
|
||||
progress_bar.finish_and_clear();
|
||||
return Ok(format!(
|
||||
"{} has caught up (us:{} them:{})",
|
||||
@ -461,7 +519,7 @@ pub fn process_catchup(
|
||||
slot_distance,
|
||||
node_slot,
|
||||
rpc_slot,
|
||||
if previous_rpc_slot == std::u64::MAX {
|
||||
if slot_distance == 0 || previous_rpc_slot == std::u64::MAX {
|
||||
"".to_string()
|
||||
} else {
|
||||
let slots_per_second =
|
||||
@ -606,6 +664,14 @@ pub fn process_get_slot(
|
||||
Ok(slot.to_string())
|
||||
}
|
||||
|
||||
pub fn process_get_epoch(
|
||||
rpc_client: &RpcClient,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> ProcessResult {
|
||||
let epoch_info = rpc_client.get_epoch_info_with_commitment(commitment_config.clone())?;
|
||||
Ok(epoch_info.epoch.to_string())
|
||||
}
|
||||
|
||||
pub fn parse_show_block_production(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let epoch = value_t!(matches, "epoch", Epoch).ok();
|
||||
let slot_limit = value_t!(matches, "slot_limit", u64).ok();
|
||||
@ -1210,7 +1276,9 @@ pub fn process_show_validators(
|
||||
);
|
||||
}
|
||||
|
||||
for vote_account in vote_accounts.current.into_iter() {
|
||||
let mut current = vote_accounts.current;
|
||||
current.sort_by(|a, b| b.activated_stake.cmp(&a.activated_stake));
|
||||
for vote_account in current.into_iter() {
|
||||
print_vote_account(
|
||||
vote_account,
|
||||
epoch_info.epoch,
|
||||
@ -1219,7 +1287,9 @@ pub fn process_show_validators(
|
||||
false,
|
||||
);
|
||||
}
|
||||
for vote_account in vote_accounts.delinquent.into_iter() {
|
||||
let mut delinquent = vote_accounts.delinquent;
|
||||
delinquent.sort_by(|a, b| b.activated_stake.cmp(&a.activated_stake));
|
||||
for vote_account in delinquent.into_iter() {
|
||||
print_vote_account(
|
||||
vote_account,
|
||||
epoch_info.epoch,
|
||||
@ -1319,6 +1389,19 @@ mod tests {
|
||||
}
|
||||
);
|
||||
|
||||
let test_get_epoch = test_commands
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "epoch"]);
|
||||
assert_eq!(
|
||||
parse_command(&test_get_epoch, &default_keypair_file, None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::GetEpoch {
|
||||
commitment_config: CommitmentConfig::recent(),
|
||||
},
|
||||
signers: vec![],
|
||||
}
|
||||
);
|
||||
|
||||
let test_total_supply = test_commands
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "total-supply"]);
|
||||
|
@ -2,7 +2,9 @@ use clap::{crate_description, crate_name, AppSettings, Arg, ArgGroup, ArgMatches
|
||||
use console::style;
|
||||
|
||||
use solana_clap_utils::{
|
||||
input_validators::is_url, keypair::SKIP_SEED_PHRASE_VALIDATION_ARG, offline::SIGN_ONLY_ARG,
|
||||
input_validators::is_url,
|
||||
keypair::{check_for_usb, SKIP_SEED_PHRASE_VALIDATION_ARG},
|
||||
offline::SIGN_ONLY_ARG,
|
||||
DisplayError,
|
||||
};
|
||||
use solana_cli::{
|
||||
@ -233,12 +235,20 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
do_main(&matches).map_err(|err| DisplayError::new_as_boxed(err).into())
|
||||
do_main(&matches, check_for_usb(std::env::args()))
|
||||
.map_err(|err| DisplayError::new_as_boxed(err).into())
|
||||
}
|
||||
|
||||
fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box<dyn error::Error>> {
|
||||
fn do_main(
|
||||
matches: &ArgMatches<'_>,
|
||||
need_wallet_manager: bool,
|
||||
) -> Result<(), Box<dyn error::Error>> {
|
||||
if parse_settings(&matches)? {
|
||||
let wallet_manager = maybe_wallet_manager()?;
|
||||
let wallet_manager = if need_wallet_manager {
|
||||
maybe_wallet_manager()?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let (mut config, signers) = parse_args(&matches, wallet_manager)?;
|
||||
config.signers = signers.iter().map(|s| s.as_ref()).collect();
|
||||
|
@ -1,7 +1,7 @@
|
||||
use crate::cli::{
|
||||
build_balance_message, check_account_for_fee, check_unique_pubkeys, generate_unique_signers,
|
||||
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, CliSignerInfo,
|
||||
ProcessResult, SignerIndex,
|
||||
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult,
|
||||
SignerIndex,
|
||||
};
|
||||
use clap::{value_t_or_exit, App, Arg, ArgMatches, SubCommand};
|
||||
use solana_clap_utils::{input_parsers::*, input_validators::*};
|
||||
@ -89,8 +89,16 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.help("Vote account in which to set the authorized voter"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("new_authorized_pubkey")
|
||||
Arg::with_name("authorized")
|
||||
.index(2)
|
||||
.value_name("AUTHORIZED_KEYPAIR")
|
||||
.required(true)
|
||||
.validator(is_valid_signer)
|
||||
.help("Current authorized vote signer"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("new_authorized_pubkey")
|
||||
.index(3)
|
||||
.value_name("AUTHORIZED_PUBKEY")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
@ -111,8 +119,16 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.help("Vote account in which to set the authorized withdrawer"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("new_authorized_pubkey")
|
||||
Arg::with_name("authorized")
|
||||
.index(2)
|
||||
.value_name("AUTHORIZED_KEYPAIR")
|
||||
.required(true)
|
||||
.validator(is_valid_signer)
|
||||
.help("Current authorized withdrawer"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("new_authorized_pubkey")
|
||||
.index(3)
|
||||
.value_name("AUTHORIZED_PUBKEY")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
@ -264,10 +280,11 @@ pub fn parse_vote_authorize(
|
||||
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
|
||||
let new_authorized_pubkey =
|
||||
pubkey_of_signer(matches, "new_authorized_pubkey", wallet_manager)?.unwrap();
|
||||
let (authorized, _) = signer_of(matches, "authorized", wallet_manager)?;
|
||||
|
||||
let authorized_voter_provided = None;
|
||||
let CliSignerInfo { signers } = generate_unique_signers(
|
||||
vec![authorized_voter_provided],
|
||||
let payer_provided = None;
|
||||
let signer_info = generate_unique_signers(
|
||||
vec![payer_provided, authorized],
|
||||
matches,
|
||||
default_signer_path,
|
||||
wallet_manager,
|
||||
@ -279,7 +296,7 @@ pub fn parse_vote_authorize(
|
||||
new_authorized_pubkey,
|
||||
vote_authorize,
|
||||
},
|
||||
signers,
|
||||
signers: signer_info.signers,
|
||||
})
|
||||
}
|
||||
|
||||
@ -455,16 +472,24 @@ pub fn process_vote_authorize(
|
||||
new_authorized_pubkey: &Pubkey,
|
||||
vote_authorize: VoteAuthorize,
|
||||
) -> ProcessResult {
|
||||
// If the `authorized_account` is also the fee payer, `config.signers` will only have one
|
||||
// keypair in it
|
||||
let authorized = if config.signers.len() == 2 {
|
||||
config.signers[1]
|
||||
} else {
|
||||
config.signers[0]
|
||||
};
|
||||
|
||||
check_unique_pubkeys(
|
||||
(vote_account_pubkey, "vote_account_pubkey".to_string()),
|
||||
(&authorized.pubkey(), "authorized_account".to_string()),
|
||||
(new_authorized_pubkey, "new_authorized_pubkey".to_string()),
|
||||
)?;
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
let ixs = vec![vote_instruction::authorize(
|
||||
vote_account_pubkey, // vote account to update
|
||||
&config.signers[0].pubkey(), // current authorized voter
|
||||
new_authorized_pubkey, // new vote signer/withdrawer
|
||||
vote_authorize, // vote or withdraw
|
||||
vote_account_pubkey, // vote account to update
|
||||
&authorized.pubkey(), // current authorized
|
||||
new_authorized_pubkey, // new vote signer/withdrawer
|
||||
vote_authorize, // vote or withdraw
|
||||
)];
|
||||
|
||||
let message = Message::new_with_payer(&ixs, Some(&config.signers[0].pubkey()));
|
||||
@ -658,6 +683,7 @@ mod tests {
|
||||
"test",
|
||||
"vote-authorize-voter",
|
||||
&pubkey_string,
|
||||
&default_keypair_file,
|
||||
&pubkey2_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
@ -672,6 +698,32 @@ mod tests {
|
||||
}
|
||||
);
|
||||
|
||||
let authorized_keypair = Keypair::new();
|
||||
let (authorized_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&authorized_keypair, tmp_file.as_file_mut()).unwrap();
|
||||
|
||||
let test_authorize_voter = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"vote-authorize-voter",
|
||||
&pubkey_string,
|
||||
&authorized_keypair_file,
|
||||
&pubkey2_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_authorize_voter, &default_keypair_file, None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::VoteAuthorize {
|
||||
vote_account_pubkey: pubkey,
|
||||
new_authorized_pubkey: pubkey2,
|
||||
vote_authorize: VoteAuthorize::Voter
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
read_keypair_file(&authorized_keypair_file).unwrap().into(),
|
||||
],
|
||||
}
|
||||
);
|
||||
|
||||
let (keypair_file, mut tmp_file) = make_tmp_file();
|
||||
let keypair = Keypair::new();
|
||||
write_keypair(&keypair, tmp_file.as_file_mut()).unwrap();
|
||||
|
@ -265,6 +265,7 @@ fn test_create_account_with_seed() {
|
||||
} = TestValidator::run_with_options(TestValidatorOptions {
|
||||
fees: 1,
|
||||
bootstrap_validator_lamports: 42_000,
|
||||
..TestValidatorOptions::default()
|
||||
});
|
||||
|
||||
let (sender, receiver) = channel();
|
||||
@ -337,6 +338,7 @@ fn test_create_account_with_seed() {
|
||||
to: to_address,
|
||||
from: 0,
|
||||
sign_only: true,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::None(nonce_hash),
|
||||
nonce_account: Some(nonce_address),
|
||||
nonce_authority: 0,
|
||||
@ -357,6 +359,7 @@ fn test_create_account_with_seed() {
|
||||
to: to_address,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::NonceAccount(nonce_address),
|
||||
sign_only.blockhash,
|
||||
|
@ -811,6 +811,7 @@ fn test_stake_authorize_with_fee_payer() {
|
||||
} = TestValidator::run_with_options(TestValidatorOptions {
|
||||
fees: SIG_FEE,
|
||||
bootstrap_validator_lamports: 42_000,
|
||||
..TestValidatorOptions::default()
|
||||
});
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(alice, sender, None);
|
||||
@ -938,6 +939,7 @@ fn test_stake_split() {
|
||||
} = TestValidator::run_with_options(TestValidatorOptions {
|
||||
fees: 1,
|
||||
bootstrap_validator_lamports: 42_000,
|
||||
..TestValidatorOptions::default()
|
||||
});
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(alice, sender, None);
|
||||
@ -1085,6 +1087,7 @@ fn test_stake_set_lockup() {
|
||||
} = TestValidator::run_with_options(TestValidatorOptions {
|
||||
fees: 1,
|
||||
bootstrap_validator_lamports: 42_000,
|
||||
..TestValidatorOptions::default()
|
||||
});
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(alice, sender, None);
|
||||
|
@ -40,6 +40,7 @@ fn test_transfer() {
|
||||
} = TestValidator::run_with_options(TestValidatorOptions {
|
||||
fees: 1,
|
||||
bootstrap_validator_lamports: 42_000,
|
||||
..TestValidatorOptions::default()
|
||||
});
|
||||
|
||||
let (sender, receiver) = channel();
|
||||
@ -68,6 +69,7 @@ fn test_transfer() {
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
@ -95,6 +97,7 @@ fn test_transfer() {
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: true,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::None(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
@ -110,6 +113,7 @@ fn test_transfer() {
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
@ -147,6 +151,7 @@ fn test_transfer() {
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::NonceAccount(nonce_account.pubkey()),
|
||||
nonce_hash,
|
||||
@ -187,6 +192,7 @@ fn test_transfer() {
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: true,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::None(nonce_hash),
|
||||
nonce_account: Some(nonce_account.pubkey()),
|
||||
nonce_authority: 0,
|
||||
@ -202,6 +208,7 @@ fn test_transfer() {
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::NonceAccount(nonce_account.pubkey()),
|
||||
sign_only.blockhash,
|
||||
@ -229,6 +236,7 @@ fn test_transfer_multisession_signing() {
|
||||
} = TestValidator::run_with_options(TestValidatorOptions {
|
||||
fees: 1,
|
||||
bootstrap_validator_lamports: 42_000,
|
||||
..TestValidatorOptions::default()
|
||||
});
|
||||
|
||||
let (sender, receiver) = channel();
|
||||
@ -269,6 +277,7 @@ fn test_transfer_multisession_signing() {
|
||||
to: to_pubkey,
|
||||
from: 1,
|
||||
sign_only: true,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::None(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
@ -293,6 +302,7 @@ fn test_transfer_multisession_signing() {
|
||||
to: to_pubkey,
|
||||
from: 1,
|
||||
sign_only: true,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::None(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
@ -314,6 +324,7 @@ fn test_transfer_multisession_signing() {
|
||||
to: to_pubkey,
|
||||
from: 1,
|
||||
sign_only: false,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "1.1.0"
|
||||
version = "1.1.3"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -19,10 +19,10 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
|
||||
serde = "1.0.105"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.48"
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.3" }
|
||||
thiserror = "1.0"
|
||||
tungstenite = "0.10.1"
|
||||
url = "2.1.1"
|
||||
@ -31,4 +31,4 @@ url = "2.1.1"
|
||||
assert_matches = "1.3.0"
|
||||
jsonrpc-core = "14.0.5"
|
||||
jsonrpc-http-server = "14.0.6"
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.3" }
|
||||
|
@ -8,7 +8,8 @@ use serde_json::{Number, Value};
|
||||
use solana_sdk::{
|
||||
fee_calculator::{FeeCalculator, FeeRateGovernor},
|
||||
instruction::InstructionError,
|
||||
transaction::{self, TransactionError},
|
||||
signature::Signature,
|
||||
transaction::{self, Transaction, TransactionError},
|
||||
};
|
||||
use solana_transaction_status::TransactionStatus;
|
||||
use std::{collections::HashMap, sync::RwLock};
|
||||
@ -50,17 +51,6 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
|
||||
return Ok(Value::Null);
|
||||
}
|
||||
let val = match request {
|
||||
RpcRequest::ConfirmTransaction => {
|
||||
if let Some(params_array) = params.as_array() {
|
||||
if let Value::String(param_string) = ¶ms_array[0] {
|
||||
Value::Bool(param_string == SIGNATURE)
|
||||
} else {
|
||||
Value::Null
|
||||
}
|
||||
} else {
|
||||
Value::Null
|
||||
}
|
||||
}
|
||||
RpcRequest::GetBalance => serde_json::to_value(Response {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: Value::Number(Number::from(50)),
|
||||
@ -87,7 +77,7 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: serde_json::to_value(FeeRateGovernor::default()).unwrap(),
|
||||
})?,
|
||||
RpcRequest::GetSignatureStatus => {
|
||||
RpcRequest::GetSignatureStatuses => {
|
||||
let status: transaction::Result<()> = if self.url == "account_in_use" {
|
||||
Err(TransactionError::AccountInUse)
|
||||
} else if self.url == "instruction_error" {
|
||||
@ -101,10 +91,12 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
|
||||
let status = if self.url == "sig_not_found" {
|
||||
None
|
||||
} else {
|
||||
let err = status.clone().err();
|
||||
Some(TransactionStatus {
|
||||
status,
|
||||
slot: 1,
|
||||
confirmations: Some(0),
|
||||
confirmations: None,
|
||||
err,
|
||||
})
|
||||
};
|
||||
serde_json::to_value(Response {
|
||||
@ -114,7 +106,17 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
|
||||
}
|
||||
RpcRequest::GetTransactionCount => Value::Number(Number::from(1234)),
|
||||
RpcRequest::GetSlot => Value::Number(Number::from(0)),
|
||||
RpcRequest::SendTransaction => Value::String(SIGNATURE.to_string()),
|
||||
RpcRequest::SendTransaction => {
|
||||
let signature = if self.url == "malicious" {
|
||||
Signature::new(&[8; 64]).to_string()
|
||||
} else {
|
||||
let tx_str = params.as_array().unwrap()[0].as_str().unwrap().to_string();
|
||||
let data = bs58::decode(tx_str).into_vec().unwrap();
|
||||
let tx: Transaction = bincode::deserialize(&data).unwrap();
|
||||
tx.signatures[0].to_string()
|
||||
};
|
||||
Value::String(signature)
|
||||
}
|
||||
RpcRequest::GetMinimumBalanceForRentExemption => Value::Number(Number::from(1234)),
|
||||
_ => Value::Null,
|
||||
};
|
||||
|
@ -77,17 +77,16 @@ impl RpcClient {
|
||||
signature: &Signature,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<bool> {
|
||||
let response = self
|
||||
.client
|
||||
.send(
|
||||
&RpcRequest::ConfirmTransaction,
|
||||
json!([signature.to_string(), commitment_config]),
|
||||
0,
|
||||
)
|
||||
.map_err(|err| err.into_with_command("ConfirmTransaction"))?;
|
||||
let Response { context, value } = self.get_signature_statuses(&[*signature])?;
|
||||
|
||||
serde_json::from_value::<Response<bool>>(response)
|
||||
.map_err(|err| ClientError::new_with_command(err.into(), "ConfirmTransaction"))
|
||||
Ok(Response {
|
||||
context,
|
||||
value: value[0]
|
||||
.as_ref()
|
||||
.filter(|result| result.satisfies_commitment(commitment_config))
|
||||
.map(|result| result.status.is_ok())
|
||||
.unwrap_or_default(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn send_transaction(&self, transaction: &Transaction) -> ClientResult<Signature> {
|
||||
@ -100,9 +99,24 @@ impl RpcClient {
|
||||
None => {
|
||||
Err(RpcError::ForUser("Received result of an unexpected type".to_string()).into())
|
||||
}
|
||||
Some(signature_base58_str) => signature_base58_str
|
||||
.parse::<Signature>()
|
||||
.map_err(|err| RpcError::ParseError(err.to_string()).into()),
|
||||
Some(signature_base58_str) => {
|
||||
let signature = signature_base58_str.parse::<Signature>().map_err(|err| {
|
||||
Into::<ClientError>::into(RpcError::ParseError(err.to_string()))
|
||||
})?;
|
||||
// A mismatching RPC response signature indicates an issue with the RPC node, and
|
||||
// should not be passed along to confirmation methods. The transaction may or may
|
||||
// not have been submitted to the cluster, so callers should verify the success of
|
||||
// the correct transaction signature independently.
|
||||
if signature != transaction.signatures[0] {
|
||||
Err(RpcError::RpcRequestError(format!(
|
||||
"RPC node returned mismatched signature {:?}, expected {:?}",
|
||||
signature, transaction.signatures[0]
|
||||
))
|
||||
.into())
|
||||
} else {
|
||||
Ok(transaction.signatures[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -113,20 +127,34 @@ impl RpcClient {
|
||||
self.get_signature_status_with_commitment(signature, CommitmentConfig::default())
|
||||
}
|
||||
|
||||
pub fn get_signature_statuses(
|
||||
&self,
|
||||
signatures: &[Signature],
|
||||
) -> RpcResult<Vec<Option<TransactionStatus>>> {
|
||||
let signatures: Vec<_> = signatures.iter().map(|s| s.to_string()).collect();
|
||||
let signature_status =
|
||||
self.client
|
||||
.send(&RpcRequest::GetSignatureStatuses, json!([signatures]), 5)?;
|
||||
Ok(serde_json::from_value(signature_status)
|
||||
.map_err(|err| ClientError::new_with_command(err.into(), "GetSignatureStatuses"))?)
|
||||
}
|
||||
|
||||
pub fn get_signature_status_with_commitment(
|
||||
&self,
|
||||
signature: &Signature,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> ClientResult<Option<transaction::Result<()>>> {
|
||||
let signature_status = self.client.send(
|
||||
&RpcRequest::GetSignatureStatus,
|
||||
json!([[signature.to_string()], commitment_config]),
|
||||
&RpcRequest::GetSignatureStatuses,
|
||||
json!([[signature.to_string()]]),
|
||||
5,
|
||||
)?;
|
||||
let result: Response<Vec<Option<TransactionStatus>>> =
|
||||
serde_json::from_value(signature_status).unwrap();
|
||||
serde_json::from_value(signature_status)
|
||||
.map_err(|err| ClientError::new_with_command(err.into(), "GetSignatureStatuses"))?;
|
||||
Ok(result.value[0]
|
||||
.clone()
|
||||
.filter(|result| result.satisfies_commitment(commitment_config))
|
||||
.map(|status_meta| status_meta.status))
|
||||
}
|
||||
|
||||
@ -855,14 +883,13 @@ impl RpcClient {
|
||||
trace!("check_signature: {:?}", signature);
|
||||
|
||||
for _ in 0..30 {
|
||||
let response = self.client.send(
|
||||
&RpcRequest::ConfirmTransaction,
|
||||
json!([signature.to_string(), CommitmentConfig::recent()]),
|
||||
0,
|
||||
);
|
||||
|
||||
let response =
|
||||
self.confirm_transaction_with_commitment(signature, CommitmentConfig::recent());
|
||||
match response {
|
||||
Ok(Value::Bool(signature_status)) => {
|
||||
Ok(Response {
|
||||
value: signature_status,
|
||||
..
|
||||
}) => {
|
||||
if signature_status {
|
||||
trace!("Response found signature");
|
||||
} else {
|
||||
@ -871,12 +898,6 @@ impl RpcClient {
|
||||
|
||||
return signature_status;
|
||||
}
|
||||
Ok(other) => {
|
||||
debug!(
|
||||
"check_signature request failed, expected bool, got: {:?}",
|
||||
other
|
||||
);
|
||||
}
|
||||
Err(err) => {
|
||||
debug!("check_signature request failed: {:?}", err);
|
||||
}
|
||||
@ -948,20 +969,20 @@ impl RpcClient {
|
||||
let response = self
|
||||
.client
|
||||
.send(
|
||||
&RpcRequest::GetSignatureStatus,
|
||||
json!([[signature.to_string()], CommitmentConfig::recent().ok()]),
|
||||
1,
|
||||
&RpcRequest::GetSignatureStatuses,
|
||||
json!([[signature.to_string()]]),
|
||||
5,
|
||||
)
|
||||
.map_err(|err| err.into_with_command("GetSignatureStatus"))?;
|
||||
let result: Response<Vec<Option<TransactionStatus>>> =
|
||||
serde_json::from_value(response).unwrap();
|
||||
.map_err(|err| err.into_with_command("GetSignatureStatuses"))?;
|
||||
let result: Response<Vec<Option<TransactionStatus>>> = serde_json::from_value(response)
|
||||
.map_err(|err| ClientError::new_with_command(err.into(), "GetSignatureStatuses"))?;
|
||||
|
||||
let confirmations = result.value[0]
|
||||
.clone()
|
||||
.ok_or_else(|| {
|
||||
ClientError::new_with_command(
|
||||
ClientErrorKind::Custom("signature not found".to_string()),
|
||||
"GetSignatureStatus",
|
||||
"GetSignatureStatuses",
|
||||
)
|
||||
})?
|
||||
.confirmations
|
||||
@ -1088,10 +1109,7 @@ pub fn get_rpc_request_str(rpc_addr: SocketAddr, tls: bool) -> String {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{
|
||||
client_error::ClientErrorKind,
|
||||
mock_rpc_client_request::{PUBKEY, SIGNATURE},
|
||||
};
|
||||
use crate::{client_error::ClientErrorKind, mock_rpc_client_request::PUBKEY};
|
||||
use assert_matches::assert_matches;
|
||||
use jsonrpc_core::{Error, IoHandler, Params};
|
||||
use jsonrpc_http_server::{AccessControlAllowOrigin, DomainsValidation, ServerBuilder};
|
||||
@ -1204,12 +1222,17 @@ mod tests {
|
||||
let tx = system_transaction::transfer(&key, &to, 50, blockhash);
|
||||
|
||||
let signature = rpc_client.send_transaction(&tx);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.parse().unwrap());
|
||||
assert_eq!(signature.unwrap(), tx.signatures[0]);
|
||||
|
||||
let rpc_client = RpcClient::new_mock("fails".to_string());
|
||||
|
||||
let signature = rpc_client.send_transaction(&tx);
|
||||
assert!(signature.is_err());
|
||||
|
||||
// Test bad signature returned from rpc node
|
||||
let rpc_client = RpcClient::new_mock("malicious".to_string());
|
||||
let signature = rpc_client.send_transaction(&tx);
|
||||
assert!(signature.is_err());
|
||||
}
|
||||
#[test]
|
||||
fn test_get_recent_blockhash() {
|
||||
|
@ -3,7 +3,6 @@ use thiserror::Error;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Hash)]
|
||||
pub enum RpcRequest {
|
||||
ConfirmTransaction,
|
||||
DeregisterNode,
|
||||
ValidatorExit,
|
||||
GetAccountInfo,
|
||||
@ -22,7 +21,7 @@ pub enum RpcRequest {
|
||||
GetRecentBlockhash,
|
||||
GetFeeCalculatorForBlockhash,
|
||||
GetFeeRateGovernor,
|
||||
GetSignatureStatus,
|
||||
GetSignatureStatuses,
|
||||
GetSlot,
|
||||
GetSlotLeader,
|
||||
GetStorageTurn,
|
||||
@ -45,7 +44,6 @@ impl RpcRequest {
|
||||
pub(crate) fn build_request_json(&self, id: u64, params: Value) -> Value {
|
||||
let jsonrpc = "2.0";
|
||||
let method = match self {
|
||||
RpcRequest::ConfirmTransaction => "confirmTransaction",
|
||||
RpcRequest::DeregisterNode => "deregisterNode",
|
||||
RpcRequest::ValidatorExit => "validatorExit",
|
||||
RpcRequest::GetAccountInfo => "getAccountInfo",
|
||||
@ -64,7 +62,7 @@ impl RpcRequest {
|
||||
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
|
||||
RpcRequest::GetFeeCalculatorForBlockhash => "getFeeCalculatorForBlockhash",
|
||||
RpcRequest::GetFeeRateGovernor => "getFeeRateGovernor",
|
||||
RpcRequest::GetSignatureStatus => "getSignatureStatus",
|
||||
RpcRequest::GetSignatureStatuses => "getSignatureStatuses",
|
||||
RpcRequest::GetSlot => "getSlot",
|
||||
RpcRequest::GetSlotLeader => "getSlotLeader",
|
||||
RpcRequest::GetStorageTurn => "getStorageTurn",
|
||||
|
@ -4,7 +4,7 @@ use solana_sdk::{
|
||||
clock::{Epoch, Slot},
|
||||
fee_calculator::{FeeCalculator, FeeRateGovernor},
|
||||
pubkey::Pubkey,
|
||||
transaction::Result,
|
||||
transaction::{Result, TransactionError},
|
||||
};
|
||||
use std::{collections::HashMap, net::SocketAddr, str::FromStr};
|
||||
|
||||
@ -54,6 +54,12 @@ pub struct RpcKeyedAccount {
|
||||
pub account: RpcAccount,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcSignatureResult {
|
||||
pub err: Option<TransactionError>,
|
||||
}
|
||||
|
||||
/// A duplicate representation of a Message for pretty JSON serialization
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-core"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.1.0"
|
||||
version = "1.1.3"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "../README.md"
|
||||
@ -41,35 +41,35 @@ regex = "1.3.6"
|
||||
serde = "1.0.105"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.48"
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.0" }
|
||||
solana-client = { path = "../client", version = "1.1.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.0" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.0" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.3" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.3" }
|
||||
solana-client = { path = "../client", version = "1.1.3" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.3" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.3" }
|
||||
ed25519-dalek = "=1.0.0-pre.1"
|
||||
solana-ledger = { path = "../ledger", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.1.0" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.0" }
|
||||
solana-measure = { path = "../measure", version = "1.1.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
|
||||
solana-chacha-cuda = { path = "../chacha-cuda", version = "1.1.0" }
|
||||
solana-perf = { path = "../perf", version = "1.1.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.0" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.0" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.0" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.1.0" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "1.1.0" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.3" }
|
||||
solana-logger = { path = "../logger", version = "1.1.3" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.1.3" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.3" }
|
||||
solana-measure = { path = "../measure", version = "1.1.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.3" }
|
||||
solana-chacha-cuda = { path = "../chacha-cuda", version = "1.1.3" }
|
||||
solana-perf = { path = "../perf", version = "1.1.3" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.3" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.3" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.3" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.3" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.1.3" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "1.1.3" }
|
||||
tempfile = "3.1.0"
|
||||
thiserror = "1.0"
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
tokio-fs = "0.1"
|
||||
tokio-io = "0.1"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.0" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.3" }
|
||||
trees = "0.2.1"
|
||||
|
||||
[dev-dependencies]
|
||||
|
@ -3,13 +3,14 @@
|
||||
extern crate test;
|
||||
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana_core::broadcast_stage::{broadcast_shreds, get_broadcast_peers};
|
||||
use solana_core::cluster_info::{ClusterInfo, Node};
|
||||
use solana_core::contact_info::ContactInfo;
|
||||
use solana_ledger::shred::Shred;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::timing::timestamp;
|
||||
use std::collections::HashMap;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::Arc;
|
||||
use std::sync::RwLock;
|
||||
use std::{collections::HashMap, net::UdpSocket, sync::Arc, time::Instant};
|
||||
use test::Bencher;
|
||||
|
||||
#[bench]
|
||||
@ -20,10 +21,8 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
|
||||
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info.clone());
|
||||
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
|
||||
const SHRED_SIZE: usize = 1024;
|
||||
const NUM_SHREDS: usize = 32;
|
||||
let shreds = vec![vec![0; SHRED_SIZE]; NUM_SHREDS];
|
||||
let seeds = vec![[0u8; 32]; NUM_SHREDS];
|
||||
let shreds = vec![Shred::new_empty_data_shred(); NUM_SHREDS];
|
||||
let mut stakes = HashMap::new();
|
||||
const NUM_PEERS: usize = 200;
|
||||
for _ in 0..NUM_PEERS {
|
||||
@ -33,10 +32,19 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
|
||||
stakes.insert(id, thread_rng().gen_range(1, NUM_PEERS) as u64);
|
||||
}
|
||||
let stakes = Arc::new(stakes);
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
let (peers, peers_and_stakes) = get_broadcast_peers(&cluster_info, Some(stakes.clone()));
|
||||
let shreds = Arc::new(shreds);
|
||||
bencher.iter(move || {
|
||||
let shreds = shreds.clone();
|
||||
cluster_info
|
||||
.broadcast_shreds(&socket, shreds, &seeds, Some(stakes.clone()))
|
||||
.unwrap();
|
||||
broadcast_shreds(
|
||||
&socket,
|
||||
&shreds,
|
||||
&peers_and_stakes,
|
||||
&peers,
|
||||
&mut Instant::now(),
|
||||
&mut 0,
|
||||
)
|
||||
.unwrap();
|
||||
});
|
||||
}
|
||||
|
@ -10,29 +10,36 @@ use std::sync::{
|
||||
use std::thread::{self, sleep, Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
|
||||
pub struct AccountsCleanupService {
|
||||
t_cleanup: JoinHandle<()>,
|
||||
pub struct AccountsBackgroundService {
|
||||
t_background: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl AccountsCleanupService {
|
||||
const INTERVAL_MS: u64 = 100;
|
||||
|
||||
impl AccountsBackgroundService {
|
||||
pub fn new(bank_forks: Arc<RwLock<BankForks>>, exit: &Arc<AtomicBool>) -> Self {
|
||||
info!("AccountsCleanupService active");
|
||||
info!("AccountsBackgroundService active");
|
||||
let exit = exit.clone();
|
||||
let t_cleanup = Builder::new()
|
||||
.name("solana-accounts-cleanup".to_string())
|
||||
let t_background = Builder::new()
|
||||
.name("solana-accounts-background".to_string())
|
||||
.spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
let bank = bank_forks.read().unwrap().working_bank();
|
||||
bank.clean_dead_slots();
|
||||
sleep(Duration::from_millis(100));
|
||||
|
||||
bank.process_dead_slots();
|
||||
|
||||
// Currently, given INTERVAL_MS, we process 1 slot/100 ms
|
||||
bank.process_stale_slot();
|
||||
|
||||
sleep(Duration::from_millis(INTERVAL_MS));
|
||||
})
|
||||
.unwrap();
|
||||
Self { t_cleanup }
|
||||
Self { t_background }
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.t_cleanup.join()
|
||||
self.t_background.join()
|
||||
}
|
||||
}
|
@ -4,7 +4,7 @@
|
||||
// hash on gossip. Monitor gossip for messages from validators in the --trusted-validators
|
||||
// set and halt the node if a mismatch is detected.
|
||||
|
||||
use crate::cluster_info::ClusterInfo;
|
||||
use crate::cluster_info::{ClusterInfo, MAX_SNAPSHOT_HASHES};
|
||||
use solana_ledger::{
|
||||
snapshot_package::SnapshotPackage, snapshot_package::SnapshotPackageReceiver,
|
||||
snapshot_package::SnapshotPackageSender,
|
||||
@ -94,6 +94,10 @@ impl AccountsHashVerifier {
|
||||
hashes.push((snapshot_package.root, snapshot_package.hash));
|
||||
}
|
||||
|
||||
while hashes.len() > MAX_SNAPSHOT_HASHES {
|
||||
hashes.remove(0);
|
||||
}
|
||||
|
||||
if halt_on_trusted_validator_accounts_hash_mismatch {
|
||||
let mut slot_to_hash = HashMap::new();
|
||||
for (slot, hash) in hashes.iter() {
|
||||
@ -119,6 +123,7 @@ impl AccountsHashVerifier {
|
||||
slot_to_hash: &mut HashMap<Slot, Hash>,
|
||||
) -> bool {
|
||||
let mut verified_count = 0;
|
||||
let mut highest_slot = 0;
|
||||
if let Some(trusted_validators) = trusted_validators.as_ref() {
|
||||
for trusted_validator in trusted_validators {
|
||||
let cluster_info_r = cluster_info.read().unwrap();
|
||||
@ -140,6 +145,7 @@ impl AccountsHashVerifier {
|
||||
verified_count += 1;
|
||||
}
|
||||
} else {
|
||||
highest_slot = std::cmp::max(*slot, highest_slot);
|
||||
slot_to_hash.insert(*slot, *hash);
|
||||
}
|
||||
}
|
||||
@ -147,6 +153,10 @@ impl AccountsHashVerifier {
|
||||
}
|
||||
}
|
||||
inc_new_counter_info!("accounts_hash_verifier-hashes_verified", verified_count);
|
||||
datapoint_info!(
|
||||
"accounts_hash_verifier",
|
||||
("highest_slot_verified", highest_slot, i64),
|
||||
);
|
||||
false
|
||||
}
|
||||
|
||||
@ -197,4 +207,57 @@ mod tests {
|
||||
&mut slot_to_hash,
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_max_hashes() {
|
||||
solana_logger::setup();
|
||||
use std::path::PathBuf;
|
||||
use tempfile::TempDir;
|
||||
let keypair = Keypair::new();
|
||||
|
||||
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
|
||||
let trusted_validators = HashSet::new();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let mut hashes = vec![];
|
||||
for i in 0..MAX_SNAPSHOT_HASHES + 1 {
|
||||
let snapshot_links = TempDir::new().unwrap();
|
||||
let snapshot_package = SnapshotPackage {
|
||||
hash: hash(&[i as u8]),
|
||||
root: 100 + i as u64,
|
||||
slot_deltas: vec![],
|
||||
snapshot_links,
|
||||
tar_output_file: PathBuf::from("."),
|
||||
storages: vec![],
|
||||
};
|
||||
|
||||
AccountsHashVerifier::process_snapshot(
|
||||
snapshot_package,
|
||||
&cluster_info,
|
||||
&Some(trusted_validators.clone()),
|
||||
false,
|
||||
&None,
|
||||
&mut hashes,
|
||||
&exit,
|
||||
0,
|
||||
);
|
||||
}
|
||||
let cluster_info_r = cluster_info.read().unwrap();
|
||||
let cluster_hashes = cluster_info_r
|
||||
.get_accounts_hash_for_node(&keypair.pubkey())
|
||||
.unwrap();
|
||||
info!("{:?}", cluster_hashes);
|
||||
assert_eq!(hashes.len(), MAX_SNAPSHOT_HASHES);
|
||||
assert_eq!(cluster_hashes.len(), MAX_SNAPSHOT_HASHES);
|
||||
assert_eq!(cluster_hashes[0], (101, hash(&[1])));
|
||||
assert_eq!(
|
||||
cluster_hashes[MAX_SNAPSHOT_HASHES - 1],
|
||||
(
|
||||
100 + MAX_SNAPSHOT_HASHES as u64,
|
||||
hash(&[MAX_SNAPSHOT_HASHES as u8])
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -1982,10 +1982,20 @@ mod tests {
|
||||
{
|
||||
if let EncodedTransaction::Json(transaction) = transaction {
|
||||
if transaction.signatures[0] == success_signature.to_string() {
|
||||
assert_eq!(meta.unwrap().status, Ok(()));
|
||||
let meta = meta.unwrap();
|
||||
assert_eq!(meta.err, None);
|
||||
assert_eq!(meta.status, Ok(()));
|
||||
} else if transaction.signatures[0] == ix_error_signature.to_string() {
|
||||
let meta = meta.unwrap();
|
||||
assert_eq!(
|
||||
meta.unwrap().status,
|
||||
meta.err,
|
||||
Some(TransactionError::InstructionError(
|
||||
0,
|
||||
InstructionError::CustomError(1)
|
||||
))
|
||||
);
|
||||
assert_eq!(
|
||||
meta.status,
|
||||
Err(TransactionError::InstructionError(
|
||||
0,
|
||||
InstructionError::CustomError(1)
|
||||
|
@ -4,6 +4,9 @@ use self::{
|
||||
fail_entry_verification_broadcast_run::FailEntryVerificationBroadcastRun,
|
||||
standard_broadcast_run::StandardBroadcastRun,
|
||||
};
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
|
||||
use crate::weighted_shuffle::weighted_best;
|
||||
use crate::{
|
||||
cluster_info::{ClusterInfo, ClusterInfoError},
|
||||
poh_recorder::WorkingBankEntry,
|
||||
@ -14,9 +17,13 @@ use crossbeam_channel::{
|
||||
Sender as CrossbeamSender,
|
||||
};
|
||||
use solana_ledger::{blockstore::Blockstore, shred::Shred, staking_utils};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::{inc_new_counter_error, inc_new_counter_info};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::timing::duration_as_s;
|
||||
use solana_sdk::timing::timestamp;
|
||||
use solana_sdk::{clock::Slot, pubkey::Pubkey};
|
||||
use solana_streamer::sendmmsg::send_mmsg;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
net::UdpSocket,
|
||||
@ -104,7 +111,7 @@ trait BroadcastRun {
|
||||
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
|
||||
) -> Result<()>;
|
||||
fn transmit(
|
||||
&self,
|
||||
&mut self,
|
||||
receiver: &Arc<Mutex<Receiver<TransmitShreds>>>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
sock: &UdpSocket,
|
||||
@ -226,7 +233,7 @@ impl BroadcastStage {
|
||||
let socket_receiver = Arc::new(Mutex::new(socket_receiver));
|
||||
for sock in socks.into_iter() {
|
||||
let socket_receiver = socket_receiver.clone();
|
||||
let bs_transmit = broadcast_stage_run.clone();
|
||||
let mut bs_transmit = broadcast_stage_run.clone();
|
||||
let cluster_info = cluster_info.clone();
|
||||
let t = Builder::new()
|
||||
.name("solana-broadcaster-transmit".to_string())
|
||||
@ -328,6 +335,84 @@ impl BroadcastStage {
|
||||
}
|
||||
}
|
||||
|
||||
fn update_peer_stats(num_live_peers: i64, broadcast_len: i64, last_datapoint_submit: &mut Instant) {
|
||||
if duration_as_s(&Instant::now().duration_since(*last_datapoint_submit)) >= 1.0 {
|
||||
datapoint_info!(
|
||||
"cluster_info-num_nodes",
|
||||
("live_count", num_live_peers, i64),
|
||||
("broadcast_count", broadcast_len, i64)
|
||||
);
|
||||
*last_datapoint_submit = Instant::now();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_broadcast_peers<S: std::hash::BuildHasher>(
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
stakes: Option<Arc<HashMap<Pubkey, u64, S>>>,
|
||||
) -> (Vec<ContactInfo>, Vec<(u64, usize)>) {
|
||||
use crate::cluster_info;
|
||||
let mut peers = cluster_info.read().unwrap().tvu_peers();
|
||||
let peers_and_stakes = cluster_info::stake_weight_peers(&mut peers, stakes);
|
||||
(peers, peers_and_stakes)
|
||||
}
|
||||
|
||||
/// broadcast messages from the leader to layer 1 nodes
|
||||
/// # Remarks
|
||||
pub fn broadcast_shreds(
|
||||
s: &UdpSocket,
|
||||
shreds: &Arc<Vec<Shred>>,
|
||||
peers_and_stakes: &[(u64, usize)],
|
||||
peers: &[ContactInfo],
|
||||
last_datapoint_submit: &mut Instant,
|
||||
send_mmsg_total: &mut u64,
|
||||
) -> Result<()> {
|
||||
let broadcast_len = peers_and_stakes.len();
|
||||
if broadcast_len == 0 {
|
||||
update_peer_stats(1, 1, last_datapoint_submit);
|
||||
return Ok(());
|
||||
}
|
||||
let packets: Vec<_> = shreds
|
||||
.iter()
|
||||
.map(|shred| {
|
||||
let broadcast_index = weighted_best(&peers_and_stakes, shred.seed());
|
||||
|
||||
(&shred.payload, &peers[broadcast_index].tvu)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let mut sent = 0;
|
||||
let mut send_mmsg_time = Measure::start("send_mmsg");
|
||||
while sent < packets.len() {
|
||||
match send_mmsg(s, &packets[sent..]) {
|
||||
Ok(n) => sent += n,
|
||||
Err(e) => {
|
||||
return Err(Error::IO(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
send_mmsg_time.stop();
|
||||
*send_mmsg_total += send_mmsg_time.as_us();
|
||||
|
||||
let num_live_peers = num_live_peers(&peers);
|
||||
update_peer_stats(
|
||||
num_live_peers,
|
||||
broadcast_len as i64 + 1,
|
||||
last_datapoint_submit,
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn num_live_peers(peers: &[ContactInfo]) -> i64 {
|
||||
let mut num_live_peers = 1i64;
|
||||
peers.iter().for_each(|p| {
|
||||
// A peer is considered live if they generated their contact info recently
|
||||
if timestamp() - p.wallclock <= CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS {
|
||||
num_live_peers += 1;
|
||||
}
|
||||
});
|
||||
num_live_peers
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
use super::*;
|
||||
|
@ -96,7 +96,7 @@ impl BroadcastRun for BroadcastFakeShredsRun {
|
||||
Ok(())
|
||||
}
|
||||
fn transmit(
|
||||
&self,
|
||||
&mut self,
|
||||
receiver: &Arc<Mutex<Receiver<TransmitShreds>>>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
sock: &UdpSocket,
|
||||
|
@ -72,19 +72,25 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
|
||||
Ok(())
|
||||
}
|
||||
fn transmit(
|
||||
&self,
|
||||
&mut self,
|
||||
receiver: &Arc<Mutex<Receiver<TransmitShreds>>>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
sock: &UdpSocket,
|
||||
) -> Result<()> {
|
||||
let (stakes, shreds) = receiver.lock().unwrap().recv()?;
|
||||
let all_seeds: Vec<[u8; 32]> = shreds.iter().map(|s| s.seed()).collect();
|
||||
// Broadcast data
|
||||
let all_shred_bufs: Vec<Vec<u8>> = shreds.to_vec().into_iter().map(|s| s.payload).collect();
|
||||
cluster_info
|
||||
.write()
|
||||
.unwrap()
|
||||
.broadcast_shreds(sock, all_shred_bufs, &all_seeds, stakes)?;
|
||||
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes);
|
||||
|
||||
let mut send_mmsg_total = 0;
|
||||
broadcast_shreds(
|
||||
sock,
|
||||
&shreds,
|
||||
&peers_and_stakes,
|
||||
&peers,
|
||||
&mut Instant::now(),
|
||||
&mut send_mmsg_total,
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
fn record(
|
||||
|
@ -17,26 +17,24 @@ struct BroadcastStats {
|
||||
broadcast_elapsed: u64,
|
||||
receive_elapsed: u64,
|
||||
seed_elapsed: u64,
|
||||
send_mmsg_elapsed: u64,
|
||||
}
|
||||
|
||||
impl BroadcastStats {
|
||||
fn reset(&mut self) {
|
||||
self.insert_shreds_elapsed = 0;
|
||||
self.shredding_elapsed = 0;
|
||||
self.broadcast_elapsed = 0;
|
||||
self.receive_elapsed = 0;
|
||||
self.seed_elapsed = 0;
|
||||
*self = Self::default();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(super) struct StandardBroadcastRun {
|
||||
pub struct StandardBroadcastRun {
|
||||
stats: Arc<RwLock<BroadcastStats>>,
|
||||
unfinished_slot: Option<UnfinishedSlotInfo>,
|
||||
current_slot_and_parent: Option<(u64, u64)>,
|
||||
slot_broadcast_start: Option<Instant>,
|
||||
keypair: Arc<Keypair>,
|
||||
shred_version: u16,
|
||||
last_datapoint_submit: Instant,
|
||||
}
|
||||
|
||||
impl StandardBroadcastRun {
|
||||
@ -48,6 +46,7 @@ impl StandardBroadcastRun {
|
||||
slot_broadcast_start: None,
|
||||
keypair,
|
||||
shred_version,
|
||||
last_datapoint_submit: Instant::now(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -249,31 +248,37 @@ impl StandardBroadcastRun {
|
||||
}
|
||||
|
||||
fn broadcast(
|
||||
&self,
|
||||
&mut self,
|
||||
sock: &UdpSocket,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
stakes: Option<Arc<HashMap<Pubkey, u64>>>,
|
||||
shreds: Arc<Vec<Shred>>,
|
||||
) -> Result<()> {
|
||||
let seed_start = Instant::now();
|
||||
let seeds: Vec<[u8; 32]> = shreds.iter().map(|s| s.seed()).collect();
|
||||
let seed_elapsed = seed_start.elapsed();
|
||||
|
||||
// Broadcast the shreds
|
||||
let broadcast_start = Instant::now();
|
||||
let shred_bufs: Vec<Vec<u8>> = shreds.to_vec().into_iter().map(|s| s.payload).collect();
|
||||
trace!("Broadcasting {:?} shreds", shred_bufs.len());
|
||||
trace!("Broadcasting {:?} shreds", shreds.len());
|
||||
|
||||
cluster_info
|
||||
.write()
|
||||
.unwrap()
|
||||
.broadcast_shreds(sock, shred_bufs, &seeds, stakes)?;
|
||||
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes);
|
||||
|
||||
let mut send_mmsg_total = 0;
|
||||
broadcast_shreds(
|
||||
sock,
|
||||
&shreds,
|
||||
&peers_and_stakes,
|
||||
&peers,
|
||||
&mut self.last_datapoint_submit,
|
||||
&mut send_mmsg_total,
|
||||
)?;
|
||||
|
||||
let broadcast_elapsed = broadcast_start.elapsed();
|
||||
|
||||
self.update_broadcast_stats(BroadcastStats {
|
||||
broadcast_elapsed: duration_as_us(&broadcast_elapsed),
|
||||
seed_elapsed: duration_as_us(&seed_elapsed),
|
||||
send_mmsg_elapsed: send_mmsg_total,
|
||||
..BroadcastStats::default()
|
||||
});
|
||||
Ok(())
|
||||
@ -286,6 +291,7 @@ impl StandardBroadcastRun {
|
||||
wstats.insert_shreds_elapsed += stats.insert_shreds_elapsed;
|
||||
wstats.broadcast_elapsed += stats.broadcast_elapsed;
|
||||
wstats.seed_elapsed += stats.seed_elapsed;
|
||||
wstats.send_mmsg_elapsed += stats.send_mmsg_elapsed;
|
||||
}
|
||||
|
||||
fn report_and_reset_stats(&mut self) {
|
||||
@ -298,6 +304,7 @@ impl StandardBroadcastRun {
|
||||
("insertion_time", stats.insert_shreds_elapsed as i64, i64),
|
||||
("broadcast_time", stats.broadcast_elapsed as i64, i64),
|
||||
("receive_time", stats.receive_elapsed as i64, i64),
|
||||
("send_mmsg", stats.send_mmsg_elapsed as i64, i64),
|
||||
("seed", stats.seed_elapsed as i64, i64),
|
||||
(
|
||||
"num_shreds",
|
||||
@ -332,7 +339,7 @@ impl BroadcastRun for StandardBroadcastRun {
|
||||
)
|
||||
}
|
||||
fn transmit(
|
||||
&self,
|
||||
&mut self,
|
||||
receiver: &Arc<Mutex<Receiver<TransmitShreds>>>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
sock: &UdpSocket,
|
||||
@ -469,11 +476,9 @@ mod test {
|
||||
.receive_elapsed = 10;
|
||||
|
||||
// Try to fetch ticks from blockstore, nothing should break
|
||||
assert_eq!(blockstore.get_slot_entries(0, 0, None).unwrap(), ticks0);
|
||||
assert_eq!(blockstore.get_slot_entries(0, 0).unwrap(), ticks0);
|
||||
assert_eq!(
|
||||
blockstore
|
||||
.get_slot_entries(0, num_shreds_per_slot, None)
|
||||
.unwrap(),
|
||||
blockstore.get_slot_entries(0, num_shreds_per_slot).unwrap(),
|
||||
vec![],
|
||||
);
|
||||
|
||||
@ -509,11 +514,9 @@ mod test {
|
||||
);
|
||||
|
||||
// Try to fetch the incomplete ticks from blockstore, should succeed
|
||||
assert_eq!(blockstore.get_slot_entries(0, 0, None).unwrap(), ticks0);
|
||||
assert_eq!(blockstore.get_slot_entries(0, 0).unwrap(), ticks0);
|
||||
assert_eq!(
|
||||
blockstore
|
||||
.get_slot_entries(0, num_shreds_per_slot, None)
|
||||
.unwrap(),
|
||||
blockstore.get_slot_entries(0, num_shreds_per_slot).unwrap(),
|
||||
vec![],
|
||||
);
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ use crate::{
|
||||
},
|
||||
epoch_slots::EpochSlots,
|
||||
result::{Error, Result},
|
||||
weighted_shuffle::{weighted_best, weighted_shuffle},
|
||||
weighted_shuffle::weighted_shuffle,
|
||||
};
|
||||
use bincode::{serialize, serialized_size};
|
||||
use core::cmp;
|
||||
@ -43,7 +43,6 @@ use solana_perf::packet::{
|
||||
};
|
||||
use solana_rayon_threadlimit::get_thread_count;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::timing::duration_as_s;
|
||||
use solana_sdk::{
|
||||
clock::{Slot, DEFAULT_MS_PER_SLOT, DEFAULT_SLOTS_PER_EPOCH},
|
||||
pubkey::Pubkey,
|
||||
@ -51,7 +50,7 @@ use solana_sdk::{
|
||||
timing::{duration_as_ms, timestamp},
|
||||
transaction::Transaction,
|
||||
};
|
||||
use solana_streamer::sendmmsg::{multicast, send_mmsg};
|
||||
use solana_streamer::sendmmsg::multicast;
|
||||
use solana_streamer::streamer::{PacketReceiver, PacketSender};
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
@ -101,7 +100,6 @@ pub struct ClusterInfo {
|
||||
pub(crate) keypair: Arc<Keypair>,
|
||||
/// The network entrypoint
|
||||
entrypoint: Option<ContactInfo>,
|
||||
last_datapoint_submit: Instant,
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
@ -209,7 +207,6 @@ impl ClusterInfo {
|
||||
gossip: CrdsGossip::default(),
|
||||
keypair,
|
||||
entrypoint: None,
|
||||
last_datapoint_submit: Instant::now(),
|
||||
};
|
||||
let id = contact_info.id;
|
||||
me.gossip.set_self(&id);
|
||||
@ -936,76 +933,6 @@ impl ClusterInfo {
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn sorted_tvu_peers_and_stakes(
|
||||
&self,
|
||||
stakes: Option<Arc<HashMap<Pubkey, u64>>>,
|
||||
) -> (Vec<ContactInfo>, Vec<(u64, usize)>) {
|
||||
let mut peers = self.tvu_peers();
|
||||
peers.dedup();
|
||||
let stakes_and_index = ClusterInfo::sorted_stakes_with_index(&peers, stakes);
|
||||
(peers, stakes_and_index)
|
||||
}
|
||||
|
||||
/// broadcast messages from the leader to layer 1 nodes
|
||||
/// # Remarks
|
||||
pub fn broadcast_shreds(
|
||||
&mut self,
|
||||
s: &UdpSocket,
|
||||
shreds: Vec<Vec<u8>>,
|
||||
seeds: &[[u8; 32]],
|
||||
stakes: Option<Arc<HashMap<Pubkey, u64>>>,
|
||||
) -> Result<()> {
|
||||
let (peers, peers_and_stakes) = self.sorted_tvu_peers_and_stakes(stakes);
|
||||
let broadcast_len = peers_and_stakes.len();
|
||||
if broadcast_len == 0 {
|
||||
if duration_as_s(&Instant::now().duration_since(self.last_datapoint_submit)) >= 1.0 {
|
||||
datapoint_info!(
|
||||
"cluster_info-num_nodes",
|
||||
("live_count", 1, i64),
|
||||
("broadcast_count", 1, i64)
|
||||
);
|
||||
self.last_datapoint_submit = Instant::now();
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
let mut packets: Vec<_> = shreds
|
||||
.into_iter()
|
||||
.zip(seeds)
|
||||
.map(|(shred, seed)| {
|
||||
let broadcast_index = weighted_best(&peers_and_stakes, *seed);
|
||||
|
||||
(shred, &peers[broadcast_index].tvu)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let mut sent = 0;
|
||||
while sent < packets.len() {
|
||||
match send_mmsg(s, &mut packets[sent..]) {
|
||||
Ok(n) => sent += n,
|
||||
Err(e) => {
|
||||
return Err(Error::IO(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut num_live_peers = 1i64;
|
||||
peers.iter().for_each(|p| {
|
||||
// A peer is considered live if they generated their contact info recently
|
||||
if timestamp() - p.wallclock <= CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS {
|
||||
num_live_peers += 1;
|
||||
}
|
||||
});
|
||||
if duration_as_s(&Instant::now().duration_since(self.last_datapoint_submit)) >= 1.0 {
|
||||
datapoint_info!(
|
||||
"cluster_info-num_nodes",
|
||||
("live_count", num_live_peers, i64),
|
||||
("broadcast_count", broadcast_len + 1, i64)
|
||||
);
|
||||
self.last_datapoint_submit = Instant::now();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// retransmit messages to a list of nodes
|
||||
/// # Remarks
|
||||
/// We need to avoid having obj locked while doing a io, such as the `send_to`
|
||||
@ -1313,7 +1240,7 @@ impl ClusterInfo {
|
||||
Protocol::PullRequest(filter, caller) => {
|
||||
let start = allocated.get();
|
||||
if !caller.verify() {
|
||||
inc_new_counter_error!(
|
||||
inc_new_counter_info!(
|
||||
"cluster_info-gossip_pull_request_verify_fail",
|
||||
1
|
||||
);
|
||||
@ -1339,7 +1266,7 @@ impl ClusterInfo {
|
||||
data.retain(|v| {
|
||||
let ret = v.verify();
|
||||
if !ret {
|
||||
inc_new_counter_error!(
|
||||
inc_new_counter_info!(
|
||||
"cluster_info-gossip_pull_response_verify_fail",
|
||||
1
|
||||
);
|
||||
@ -1357,7 +1284,7 @@ impl ClusterInfo {
|
||||
data.retain(|v| {
|
||||
let ret = v.verify();
|
||||
if !ret {
|
||||
inc_new_counter_error!(
|
||||
inc_new_counter_info!(
|
||||
"cluster_info-gossip_push_msg_verify_fail",
|
||||
1
|
||||
);
|
||||
@ -1937,12 +1864,20 @@ impl Node {
|
||||
}
|
||||
|
||||
fn report_time_spent(label: &str, time: &Duration, extra: &str) {
|
||||
let count = duration_as_ms(time);
|
||||
if count > 5 {
|
||||
info!("{} took: {} ms {}", label, count, extra);
|
||||
let time_ms = duration_as_ms(time);
|
||||
if time_ms > 50 {
|
||||
info!("{} took: {} ms {}", label, time_ms, extra);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn stake_weight_peers<S: std::hash::BuildHasher>(
|
||||
peers: &mut Vec<ContactInfo>,
|
||||
stakes: Option<Arc<HashMap<Pubkey, u64, S>>>,
|
||||
) -> Vec<(u64, usize)> {
|
||||
peers.dedup();
|
||||
ClusterInfo::sorted_stakes_with_index(peers, stakes)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@ -2063,40 +1998,39 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn new_with_external_ip_test_gossip() {
|
||||
let ip = IpAddr::V4(Ipv4Addr::from(0));
|
||||
let port = {
|
||||
bind_in_range(ip, VALIDATOR_PORT_RANGE)
|
||||
.expect("Failed to bind")
|
||||
.0
|
||||
};
|
||||
let node = Node::new_with_external_ip(
|
||||
&Pubkey::new_rand(),
|
||||
&socketaddr!(0, port),
|
||||
VALIDATOR_PORT_RANGE,
|
||||
ip,
|
||||
);
|
||||
// Can't use VALIDATOR_PORT_RANGE because if this test runs in parallel with others, the
|
||||
// port returned by `bind_in_range()` might be snatched up before `Node::new_with_external_ip()` runs
|
||||
let port_range = (VALIDATOR_PORT_RANGE.1 + 10, VALIDATOR_PORT_RANGE.1 + 20);
|
||||
|
||||
check_node_sockets(&node, ip, VALIDATOR_PORT_RANGE);
|
||||
let ip = IpAddr::V4(Ipv4Addr::from(0));
|
||||
let port = bind_in_range(ip, port_range).expect("Failed to bind").0;
|
||||
let node =
|
||||
Node::new_with_external_ip(&Pubkey::new_rand(), &socketaddr!(0, port), port_range, ip);
|
||||
|
||||
check_node_sockets(&node, ip, port_range);
|
||||
|
||||
assert_eq!(node.sockets.gossip.local_addr().unwrap().port(), port);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn new_archiver_external_ip_test() {
|
||||
// Can't use VALIDATOR_PORT_RANGE because if this test runs in parallel with others, the
|
||||
// port returned by `bind_in_range()` might be snatched up before `Node::new_with_external_ip()` runs
|
||||
let port_range = (VALIDATOR_PORT_RANGE.1 + 20, VALIDATOR_PORT_RANGE.1 + 30);
|
||||
let ip = Ipv4Addr::from(0);
|
||||
let node = Node::new_archiver_with_external_ip(
|
||||
&Pubkey::new_rand(),
|
||||
&socketaddr!(ip, 0),
|
||||
VALIDATOR_PORT_RANGE,
|
||||
port_range,
|
||||
IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)),
|
||||
);
|
||||
|
||||
let ip = IpAddr::V4(ip);
|
||||
check_socket(&node.sockets.storage.unwrap(), ip, VALIDATOR_PORT_RANGE);
|
||||
check_socket(&node.sockets.gossip, ip, VALIDATOR_PORT_RANGE);
|
||||
check_socket(&node.sockets.repair, ip, VALIDATOR_PORT_RANGE);
|
||||
check_socket(&node.sockets.storage.unwrap(), ip, port_range);
|
||||
check_socket(&node.sockets.gossip, ip, port_range);
|
||||
check_socket(&node.sockets.repair, ip, port_range);
|
||||
|
||||
check_sockets(&node.sockets.tvu, ip, VALIDATOR_PORT_RANGE);
|
||||
check_sockets(&node.sockets.tvu, ip, port_range);
|
||||
}
|
||||
|
||||
//test that all cluster_info objects only generate signed messages
|
||||
@ -2481,7 +2415,8 @@ mod tests {
|
||||
stakes.insert(id4, 10);
|
||||
|
||||
let stakes = Arc::new(stakes);
|
||||
let (peers, peers_and_stakes) = cluster_info.sorted_tvu_peers_and_stakes(Some(stakes));
|
||||
let mut peers = cluster_info.tvu_peers();
|
||||
let peers_and_stakes = stake_weight_peers(&mut peers, Some(stakes));
|
||||
assert_eq!(peers.len(), 2);
|
||||
assert_eq!(peers[0].id, id);
|
||||
assert_eq!(peers[1].id, id2);
|
||||
|
@ -1,3 +1,6 @@
|
||||
use crate::consensus::VOTE_THRESHOLD_SIZE;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::inc_new_counter_info;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::clock::Slot;
|
||||
use solana_vote_program::{vote_state::VoteState, vote_state::MAX_LOCKOUT_HISTORY};
|
||||
@ -31,17 +34,40 @@ impl BlockCommitment {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
#[derive(Default)]
|
||||
pub struct BlockCommitmentCache {
|
||||
block_commitment: HashMap<Slot, BlockCommitment>,
|
||||
total_stake: u64,
|
||||
bank: Arc<Bank>,
|
||||
root: Slot,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for BlockCommitmentCache {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("BlockCommitmentCache")
|
||||
.field("block_commitment", &self.block_commitment)
|
||||
.field("total_stake", &self.total_stake)
|
||||
.field(
|
||||
"bank",
|
||||
&format_args!("Bank({{current_slot: {:?}}})", self.bank.slot()),
|
||||
)
|
||||
.field("root", &self.root)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockCommitmentCache {
|
||||
pub fn new(block_commitment: HashMap<Slot, BlockCommitment>, total_stake: u64) -> Self {
|
||||
pub fn new(
|
||||
block_commitment: HashMap<Slot, BlockCommitment>,
|
||||
total_stake: u64,
|
||||
bank: Arc<Bank>,
|
||||
root: Slot,
|
||||
) -> Self {
|
||||
Self {
|
||||
block_commitment,
|
||||
total_stake,
|
||||
bank,
|
||||
root,
|
||||
}
|
||||
}
|
||||
|
||||
@ -53,38 +79,62 @@ impl BlockCommitmentCache {
|
||||
self.total_stake
|
||||
}
|
||||
|
||||
pub fn get_block_with_depth_commitment(
|
||||
&self,
|
||||
minimum_depth: usize,
|
||||
minimum_stake_percentage: f64,
|
||||
) -> Option<Slot> {
|
||||
self.block_commitment
|
||||
.iter()
|
||||
.filter(|&(_, block_commitment)| {
|
||||
let fork_stake_minimum_depth: u64 = block_commitment.commitment[minimum_depth..]
|
||||
.iter()
|
||||
.cloned()
|
||||
.sum();
|
||||
fork_stake_minimum_depth as f64 / self.total_stake as f64
|
||||
>= minimum_stake_percentage
|
||||
})
|
||||
.map(|(slot, _)| *slot)
|
||||
.max()
|
||||
pub fn bank(&self) -> Arc<Bank> {
|
||||
self.bank.clone()
|
||||
}
|
||||
|
||||
pub fn get_rooted_block_with_commitment(&self, minimum_stake_percentage: f64) -> Option<u64> {
|
||||
self.get_block_with_depth_commitment(MAX_LOCKOUT_HISTORY - 1, minimum_stake_percentage)
|
||||
pub fn slot(&self) -> Slot {
|
||||
self.bank.slot()
|
||||
}
|
||||
|
||||
pub fn root(&self) -> Slot {
|
||||
self.root
|
||||
}
|
||||
|
||||
pub fn get_confirmation_count(&self, slot: Slot) -> Option<usize> {
|
||||
self.get_lockout_count(slot, VOTE_THRESHOLD_SIZE)
|
||||
}
|
||||
|
||||
// Returns the lowest level at which at least `minimum_stake_percentage` of the total epoch
|
||||
// stake is locked out
|
||||
fn get_lockout_count(&self, slot: Slot, minimum_stake_percentage: f64) -> Option<usize> {
|
||||
self.get_block_commitment(slot).map(|block_commitment| {
|
||||
let iterator = block_commitment.commitment.iter().enumerate().rev();
|
||||
let mut sum = 0;
|
||||
for (i, stake) in iterator {
|
||||
sum += stake;
|
||||
if (sum as f64 / self.total_stake as f64) > minimum_stake_percentage {
|
||||
return i + 1;
|
||||
}
|
||||
}
|
||||
0
|
||||
})
|
||||
}
|
||||
#[cfg(test)]
|
||||
pub fn new_for_tests() -> Self {
|
||||
let mut block_commitment: HashMap<Slot, BlockCommitment> = HashMap::new();
|
||||
block_commitment.insert(0, BlockCommitment::default());
|
||||
Self {
|
||||
block_commitment,
|
||||
total_stake: 42,
|
||||
..Self::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CommitmentAggregationData {
|
||||
bank: Arc<Bank>,
|
||||
root: Slot,
|
||||
total_staked: u64,
|
||||
}
|
||||
|
||||
impl CommitmentAggregationData {
|
||||
pub fn new(bank: Arc<Bank>, total_staked: u64) -> Self {
|
||||
Self { bank, total_staked }
|
||||
pub fn new(bank: Arc<Bank>, root: Slot, total_staked: u64) -> Self {
|
||||
Self {
|
||||
bank,
|
||||
root,
|
||||
total_staked,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -144,14 +194,24 @@ impl AggregateCommitmentService {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut aggregate_commitment_time = Measure::start("aggregate-commitment-ms");
|
||||
let block_commitment = Self::aggregate_commitment(&ancestors, &aggregation_data.bank);
|
||||
|
||||
let mut new_block_commitment =
|
||||
BlockCommitmentCache::new(block_commitment, aggregation_data.total_staked);
|
||||
let mut new_block_commitment = BlockCommitmentCache::new(
|
||||
block_commitment,
|
||||
aggregation_data.total_staked,
|
||||
aggregation_data.bank,
|
||||
aggregation_data.root,
|
||||
);
|
||||
|
||||
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
|
||||
|
||||
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
|
||||
aggregate_commitment_time.stop();
|
||||
inc_new_counter_info!(
|
||||
"aggregate-commitment-ms",
|
||||
aggregate_commitment_time.as_ms() as usize
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@ -246,84 +306,31 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_block_with_depth_commitment() {
|
||||
fn test_get_confirmations() {
|
||||
let bank = Arc::new(Bank::default());
|
||||
// Build BlockCommitmentCache with votes at depths 0 and 1 for 2 slots
|
||||
let mut cache0 = BlockCommitment::default();
|
||||
cache0.increase_confirmation_stake(1, 15);
|
||||
cache0.increase_confirmation_stake(2, 25);
|
||||
cache0.increase_confirmation_stake(1, 5);
|
||||
cache0.increase_confirmation_stake(2, 40);
|
||||
|
||||
let mut cache1 = BlockCommitment::default();
|
||||
cache1.increase_confirmation_stake(1, 10);
|
||||
cache1.increase_confirmation_stake(2, 20);
|
||||
cache1.increase_confirmation_stake(1, 40);
|
||||
cache1.increase_confirmation_stake(2, 5);
|
||||
|
||||
let mut cache2 = BlockCommitment::default();
|
||||
cache2.increase_confirmation_stake(1, 20);
|
||||
cache2.increase_confirmation_stake(2, 5);
|
||||
|
||||
let mut block_commitment = HashMap::new();
|
||||
block_commitment.entry(0).or_insert(cache0.clone());
|
||||
block_commitment.entry(1).or_insert(cache1.clone());
|
||||
let block_commitment_cache = BlockCommitmentCache::new(block_commitment, 50);
|
||||
block_commitment.entry(2).or_insert(cache2.clone());
|
||||
let block_commitment_cache = BlockCommitmentCache::new(block_commitment, 50, bank, 0);
|
||||
|
||||
// Neither slot has rooted votes
|
||||
assert_eq!(
|
||||
block_commitment_cache.get_rooted_block_with_commitment(0.1),
|
||||
None
|
||||
);
|
||||
// Neither slot meets the minimum level of commitment 0.6 at depth 1
|
||||
assert_eq!(
|
||||
block_commitment_cache.get_block_with_depth_commitment(1, 0.6),
|
||||
None
|
||||
);
|
||||
// Only slot 0 meets the minimum level of commitment 0.5 at depth 1
|
||||
assert_eq!(
|
||||
block_commitment_cache.get_block_with_depth_commitment(1, 0.5),
|
||||
Some(0)
|
||||
);
|
||||
// If multiple slots meet the minimum level of commitment, method should return the most recent
|
||||
assert_eq!(
|
||||
block_commitment_cache.get_block_with_depth_commitment(1, 0.4),
|
||||
Some(1)
|
||||
);
|
||||
// If multiple slots meet the minimum level of commitment, method should return the most recent
|
||||
assert_eq!(
|
||||
block_commitment_cache.get_block_with_depth_commitment(0, 0.6),
|
||||
Some(1)
|
||||
);
|
||||
// Neither slot meets the minimum level of commitment 0.9 at depth 0
|
||||
assert_eq!(
|
||||
block_commitment_cache.get_block_with_depth_commitment(0, 0.9),
|
||||
None
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_rooted_block_with_commitment() {
|
||||
// Build BlockCommitmentCache with rooted votes
|
||||
let mut cache0 = BlockCommitment::new([0; MAX_LOCKOUT_HISTORY]);
|
||||
cache0.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, 40);
|
||||
cache0.increase_confirmation_stake(MAX_LOCKOUT_HISTORY - 1, 10);
|
||||
let mut cache1 = BlockCommitment::new([0; MAX_LOCKOUT_HISTORY]);
|
||||
cache1.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, 30);
|
||||
cache1.increase_confirmation_stake(MAX_LOCKOUT_HISTORY - 1, 10);
|
||||
cache1.increase_confirmation_stake(MAX_LOCKOUT_HISTORY - 2, 10);
|
||||
|
||||
let mut block_commitment = HashMap::new();
|
||||
block_commitment.entry(0).or_insert(cache0.clone());
|
||||
block_commitment.entry(1).or_insert(cache1.clone());
|
||||
let block_commitment_cache = BlockCommitmentCache::new(block_commitment, 50);
|
||||
|
||||
// Only slot 0 meets the minimum level of commitment 0.66 at root
|
||||
assert_eq!(
|
||||
block_commitment_cache.get_rooted_block_with_commitment(0.66),
|
||||
Some(0)
|
||||
);
|
||||
// If multiple slots meet the minimum level of commitment, method should return the most recent
|
||||
assert_eq!(
|
||||
block_commitment_cache.get_rooted_block_with_commitment(0.6),
|
||||
Some(1)
|
||||
);
|
||||
// Neither slot meets the minimum level of commitment 0.9 at root
|
||||
assert_eq!(
|
||||
block_commitment_cache.get_rooted_block_with_commitment(0.9),
|
||||
None
|
||||
);
|
||||
assert_eq!(block_commitment_cache.get_confirmation_count(0), Some(2));
|
||||
assert_eq!(block_commitment_cache.get_confirmation_count(1), Some(1));
|
||||
assert_eq!(block_commitment_cache.get_confirmation_count(2), Some(0),);
|
||||
assert_eq!(block_commitment_cache.get_confirmation_count(3), None,);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -321,7 +321,7 @@ impl Tower {
|
||||
}
|
||||
pub fn check_vote_stake_threshold(
|
||||
&self,
|
||||
slot: u64,
|
||||
slot: Slot,
|
||||
stake_lockouts: &HashMap<u64, StakeLockout>,
|
||||
total_staked: u64,
|
||||
) -> bool {
|
||||
@ -332,11 +332,8 @@ impl Tower {
|
||||
if let Some(fork_stake) = stake_lockouts.get(&vote.slot) {
|
||||
let lockout = fork_stake.stake as f64 / total_staked as f64;
|
||||
trace!(
|
||||
"fork_stake slot: {} lockout: {} fork_stake: {} total_stake: {}",
|
||||
slot,
|
||||
lockout,
|
||||
fork_stake.stake,
|
||||
total_staked
|
||||
"fork_stake slot: {}, vote slot: {}, lockout: {} fork_stake: {} total_stake: {}",
|
||||
slot, vote.slot, lockout, fork_stake.stake, total_staked
|
||||
);
|
||||
if vote.confirmation_count as usize > self.threshold_depth {
|
||||
for old_vote in &self.lockouts.votes {
|
||||
@ -358,11 +355,12 @@ impl Tower {
|
||||
|
||||
pub(crate) fn check_switch_threshold(
|
||||
&self,
|
||||
_slot: u64,
|
||||
_slot: Slot,
|
||||
_ancestors: &HashMap<Slot, HashSet<u64>>,
|
||||
_descendants: &HashMap<Slot, HashSet<u64>>,
|
||||
_progress: &ProgressMap,
|
||||
_total_stake: u64,
|
||||
_total_epoch_stake: u64,
|
||||
_epoch_vote_accounts: &HashMap<Pubkey, (u64, Account)>,
|
||||
) -> bool {
|
||||
true
|
||||
}
|
||||
@ -482,6 +480,7 @@ pub mod test {
|
||||
use super::*;
|
||||
use crate::{
|
||||
cluster_info_vote_listener::VoteTracker,
|
||||
cluster_slots::ClusterSlots,
|
||||
progress_map::ForkProgress,
|
||||
replay_stage::{HeaviestForkFailures, ReplayStage},
|
||||
};
|
||||
@ -497,107 +496,96 @@ pub mod test {
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
transaction::Transaction,
|
||||
};
|
||||
use solana_vote_program::{
|
||||
vote_instruction,
|
||||
vote_state::{Vote, VoteStateVersions},
|
||||
vote_transaction,
|
||||
};
|
||||
use std::collections::{HashMap, VecDeque};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::RwLock;
|
||||
use std::{thread::sleep, time::Duration};
|
||||
use trees::{tr, Node, Tree};
|
||||
use trees::{tr, Tree, TreeWalk};
|
||||
|
||||
pub(crate) struct VoteSimulator<'a> {
|
||||
searchable_nodes: HashMap<u64, &'a Node<u64>>,
|
||||
pub(crate) struct VoteSimulator {
|
||||
pub validator_keypairs: HashMap<Pubkey, ValidatorVoteKeypairs>,
|
||||
pub node_pubkeys: Vec<Pubkey>,
|
||||
pub vote_pubkeys: Vec<Pubkey>,
|
||||
pub bank_forks: RwLock<BankForks>,
|
||||
pub progress: ProgressMap,
|
||||
}
|
||||
|
||||
impl<'a> VoteSimulator<'a> {
|
||||
pub(crate) fn new(forks: &'a Tree<u64>) -> Self {
|
||||
let mut searchable_nodes = HashMap::new();
|
||||
let root = forks.root();
|
||||
searchable_nodes.insert(root.data, root);
|
||||
Self { searchable_nodes }
|
||||
impl VoteSimulator {
|
||||
pub(crate) fn new(num_keypairs: usize) -> Self {
|
||||
let (validator_keypairs, node_pubkeys, vote_pubkeys, bank_forks, progress) =
|
||||
Self::init_state(num_keypairs);
|
||||
Self {
|
||||
validator_keypairs,
|
||||
node_pubkeys,
|
||||
vote_pubkeys,
|
||||
bank_forks: RwLock::new(bank_forks),
|
||||
progress,
|
||||
}
|
||||
}
|
||||
pub(crate) fn fill_bank_forks(
|
||||
&mut self,
|
||||
forks: Tree<u64>,
|
||||
cluster_votes: &HashMap<Pubkey, Vec<u64>>,
|
||||
) {
|
||||
let root = forks.root().data;
|
||||
assert!(self.bank_forks.read().unwrap().get(root).is_some());
|
||||
|
||||
let mut walk = TreeWalk::from(forks);
|
||||
loop {
|
||||
if let Some(visit) = walk.get() {
|
||||
let slot = visit.node().data;
|
||||
self.progress
|
||||
.entry(slot)
|
||||
.or_insert_with(|| ForkProgress::new(Hash::default(), None, None, 0, 0));
|
||||
if self.bank_forks.read().unwrap().get(slot).is_some() {
|
||||
walk.forward();
|
||||
continue;
|
||||
}
|
||||
let parent = walk.get_parent().unwrap().data;
|
||||
let parent_bank = self.bank_forks.read().unwrap().get(parent).unwrap().clone();
|
||||
let new_bank = Bank::new_from_parent(&parent_bank, &Pubkey::default(), slot);
|
||||
for (pubkey, vote) in cluster_votes.iter() {
|
||||
if vote.contains(&parent) {
|
||||
let keypairs = self.validator_keypairs.get(pubkey).unwrap();
|
||||
let last_blockhash = parent_bank.last_blockhash();
|
||||
let vote_tx = vote_transaction::new_vote_transaction(
|
||||
// Must vote > root to be processed
|
||||
vec![parent],
|
||||
parent_bank.hash(),
|
||||
last_blockhash,
|
||||
&keypairs.node_keypair,
|
||||
&keypairs.vote_keypair,
|
||||
&keypairs.vote_keypair,
|
||||
);
|
||||
info!("voting {} {}", parent_bank.slot(), parent_bank.hash());
|
||||
new_bank.process_transaction(&vote_tx).unwrap();
|
||||
}
|
||||
}
|
||||
new_bank.freeze();
|
||||
self.bank_forks.write().unwrap().insert(new_bank);
|
||||
walk.forward();
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn simulate_vote(
|
||||
&mut self,
|
||||
vote_slot: Slot,
|
||||
bank_forks: &RwLock<BankForks>,
|
||||
cluster_votes: &mut HashMap<Pubkey, Vec<u64>>,
|
||||
validator_keypairs: &HashMap<Pubkey, ValidatorVoteKeypairs>,
|
||||
my_keypairs: &ValidatorVoteKeypairs,
|
||||
progress: &mut ProgressMap,
|
||||
my_pubkey: &Pubkey,
|
||||
tower: &mut Tower,
|
||||
) -> Vec<HeaviestForkFailures> {
|
||||
let node = self
|
||||
.find_node_and_update_simulation(vote_slot)
|
||||
.expect("Vote to simulate must be for a slot in the tree");
|
||||
|
||||
let mut missing_nodes = VecDeque::new();
|
||||
let mut current = node;
|
||||
loop {
|
||||
let current_slot = current.data;
|
||||
if bank_forks.read().unwrap().get(current_slot).is_some()
|
||||
|| tower.root().map(|r| current_slot < r).unwrap_or(false)
|
||||
{
|
||||
break;
|
||||
} else {
|
||||
missing_nodes.push_front(current);
|
||||
}
|
||||
|
||||
if let Some(parent) = current.parent() {
|
||||
current = parent;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Create any missing banks along the path
|
||||
for missing_node in missing_nodes {
|
||||
let missing_slot = missing_node.data;
|
||||
let parent = missing_node.parent().unwrap().data;
|
||||
let parent_bank = bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(parent)
|
||||
.expect("parent bank must exist")
|
||||
.clone();
|
||||
info!("parent of {} is {}", missing_slot, parent_bank.slot(),);
|
||||
progress
|
||||
.entry(missing_slot)
|
||||
.or_insert_with(|| ForkProgress::new(parent_bank.last_blockhash(), None, None));
|
||||
|
||||
// Create the missing bank
|
||||
let new_bank =
|
||||
Bank::new_from_parent(&parent_bank, &Pubkey::default(), missing_slot);
|
||||
|
||||
// Simulate ingesting the cluster's votes for the parent into this bank
|
||||
for (pubkey, vote) in cluster_votes.iter() {
|
||||
if vote.contains(&parent_bank.slot()) {
|
||||
let keypairs = validator_keypairs.get(pubkey).unwrap();
|
||||
let node_pubkey = keypairs.node_keypair.pubkey();
|
||||
let vote_pubkey = keypairs.vote_keypair.pubkey();
|
||||
let last_blockhash = parent_bank.last_blockhash();
|
||||
let votes = Vote::new(vec![parent_bank.slot()], parent_bank.hash());
|
||||
info!("voting {} {}", parent_bank.slot(), parent_bank.hash());
|
||||
let vote_ix = vote_instruction::vote(&vote_pubkey, &vote_pubkey, votes);
|
||||
let mut vote_tx =
|
||||
Transaction::new_with_payer(vec![vote_ix], Some(&node_pubkey));
|
||||
vote_tx.partial_sign(&[&keypairs.node_keypair], last_blockhash);
|
||||
vote_tx.partial_sign(&[&keypairs.vote_keypair], last_blockhash);
|
||||
new_bank.process_transaction(&vote_tx).unwrap();
|
||||
}
|
||||
}
|
||||
new_bank.freeze();
|
||||
bank_forks.write().unwrap().insert(new_bank);
|
||||
}
|
||||
|
||||
// Now try to simulate the vote
|
||||
let my_pubkey = my_keypairs.node_keypair.pubkey();
|
||||
// Try to simulate the vote
|
||||
let my_keypairs = self.validator_keypairs.get(&my_pubkey).unwrap();
|
||||
let my_vote_pubkey = my_keypairs.vote_keypair.pubkey();
|
||||
let ancestors = bank_forks.read().unwrap().ancestors();
|
||||
let mut frozen_banks: Vec<_> = bank_forks
|
||||
let ancestors = self.bank_forks.read().unwrap().ancestors();
|
||||
let mut frozen_banks: Vec<_> = self
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.frozen_banks()
|
||||
@ -610,90 +598,118 @@ pub mod test {
|
||||
&ancestors,
|
||||
&mut frozen_banks,
|
||||
tower,
|
||||
progress,
|
||||
&mut self.progress,
|
||||
&VoteTracker::default(),
|
||||
bank_forks,
|
||||
&ClusterSlots::default(),
|
||||
&self.bank_forks,
|
||||
&mut HashSet::new(),
|
||||
);
|
||||
|
||||
let bank = bank_forks
|
||||
let vote_bank = self
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(vote_slot)
|
||||
.expect("Bank must have been created before vote simulation")
|
||||
.clone();
|
||||
|
||||
// Try to vote on the given slot
|
||||
let descendants = self.bank_forks.read().unwrap().descendants();
|
||||
let (_, _, failure_reasons) = ReplayStage::select_vote_and_reset_forks(
|
||||
&Some(vote_bank.clone()),
|
||||
&None,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&self.progress,
|
||||
&tower,
|
||||
);
|
||||
|
||||
// Make sure this slot isn't locked out or failing threshold
|
||||
let fork_progress = progress
|
||||
.get(&vote_slot)
|
||||
.expect("Slot for vote must exist in progress map");
|
||||
info!("Checking vote: {}", vote_slot);
|
||||
info!("lockouts: {:?}", fork_progress.fork_stats.stake_lockouts);
|
||||
let mut failures = vec![];
|
||||
if fork_progress.fork_stats.is_locked_out {
|
||||
failures.push(HeaviestForkFailures::LockedOut(vote_slot));
|
||||
info!("Checking vote: {}", vote_bank.slot());
|
||||
if !failure_reasons.is_empty() {
|
||||
return failure_reasons;
|
||||
}
|
||||
if !fork_progress.fork_stats.vote_threshold {
|
||||
failures.push(HeaviestForkFailures::FailedThreshold(vote_slot));
|
||||
}
|
||||
if !failures.is_empty() {
|
||||
return failures;
|
||||
}
|
||||
let vote = tower.new_vote_from_bank(&bank, &my_vote_pubkey).0;
|
||||
let vote = tower.new_vote_from_bank(&vote_bank, &my_vote_pubkey).0;
|
||||
if let Some(new_root) = tower.record_bank_vote(vote) {
|
||||
ReplayStage::handle_new_root(
|
||||
new_root,
|
||||
bank_forks,
|
||||
progress,
|
||||
&self.bank_forks,
|
||||
&mut self.progress,
|
||||
&None,
|
||||
&mut 0,
|
||||
&mut HashSet::new(),
|
||||
);
|
||||
}
|
||||
|
||||
// Mark the vote for this bank under this node's pubkey so it will be
|
||||
// integrated into any future child banks
|
||||
cluster_votes.entry(my_pubkey).or_default().push(vote_slot);
|
||||
vec![]
|
||||
}
|
||||
|
||||
// Find a node representing the given slot
|
||||
fn find_node_and_update_simulation(&mut self, slot: u64) -> Option<&'a Node<u64>> {
|
||||
let mut successful_search_node: Option<&'a Node<u64>> = None;
|
||||
let mut found_node = None;
|
||||
for search_node in self.searchable_nodes.values() {
|
||||
if let Some((target, new_searchable_nodes)) = Self::find_node(search_node, slot) {
|
||||
successful_search_node = Some(search_node);
|
||||
found_node = Some(target);
|
||||
for node in new_searchable_nodes {
|
||||
self.searchable_nodes.insert(node.data, node);
|
||||
}
|
||||
break;
|
||||
fn can_progress_on_fork(
|
||||
&mut self,
|
||||
my_pubkey: &Pubkey,
|
||||
tower: &mut Tower,
|
||||
start_slot: u64,
|
||||
num_slots: u64,
|
||||
cluster_votes: &mut HashMap<Pubkey, Vec<u64>>,
|
||||
) -> bool {
|
||||
// Check that within some reasonable time, validator can make a new
|
||||
// root on this fork
|
||||
let old_root = tower.root();
|
||||
|
||||
for i in 1..num_slots {
|
||||
// The parent of the tip of the fork
|
||||
let mut fork_tip_parent = tr(start_slot + i - 1);
|
||||
// The tip of the fork
|
||||
fork_tip_parent.push_front(tr(start_slot + i));
|
||||
self.fill_bank_forks(fork_tip_parent, cluster_votes);
|
||||
if self
|
||||
.simulate_vote(i + start_slot, &my_pubkey, tower)
|
||||
.is_empty()
|
||||
{
|
||||
cluster_votes
|
||||
.entry(*my_pubkey)
|
||||
.or_default()
|
||||
.push(start_slot + i);
|
||||
}
|
||||
if old_root != tower.root() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
successful_search_node.map(|node| {
|
||||
self.searchable_nodes.remove(&node.data);
|
||||
});
|
||||
found_node
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
fn find_node(
|
||||
node: &'a Node<u64>,
|
||||
slot: u64,
|
||||
) -> Option<(&'a Node<u64>, Vec<&'a Node<u64>>)> {
|
||||
if node.data == slot {
|
||||
Some((node, node.iter().collect()))
|
||||
} else {
|
||||
let mut search_result: Option<(&'a Node<u64>, Vec<&'a Node<u64>>)> = None;
|
||||
for child in node.iter() {
|
||||
if let Some((_, ref mut new_searchable_nodes)) = search_result {
|
||||
new_searchable_nodes.push(child);
|
||||
continue;
|
||||
}
|
||||
search_result = Self::find_node(child, slot);
|
||||
}
|
||||
fn init_state(
|
||||
num_keypairs: usize,
|
||||
) -> (
|
||||
HashMap<Pubkey, ValidatorVoteKeypairs>,
|
||||
Vec<Pubkey>,
|
||||
Vec<Pubkey>,
|
||||
BankForks,
|
||||
ProgressMap,
|
||||
) {
|
||||
let keypairs: HashMap<_, _> = std::iter::repeat_with(|| {
|
||||
let node_keypair = Keypair::new();
|
||||
let vote_keypair = Keypair::new();
|
||||
let stake_keypair = Keypair::new();
|
||||
let node_pubkey = node_keypair.pubkey();
|
||||
(
|
||||
node_pubkey,
|
||||
ValidatorVoteKeypairs::new(node_keypair, vote_keypair, stake_keypair),
|
||||
)
|
||||
})
|
||||
.take(num_keypairs)
|
||||
.collect();
|
||||
let node_pubkeys: Vec<_> = keypairs
|
||||
.values()
|
||||
.map(|keys| keys.node_keypair.pubkey())
|
||||
.collect();
|
||||
let vote_pubkeys: Vec<_> = keypairs
|
||||
.values()
|
||||
.map(|keys| keys.vote_keypair.pubkey())
|
||||
.collect();
|
||||
|
||||
search_result
|
||||
}
|
||||
let (bank_forks, progress) = initialize_state(&keypairs, 10_000);
|
||||
(keypairs, node_pubkeys, vote_pubkeys, bank_forks, progress)
|
||||
}
|
||||
}
|
||||
|
||||
@ -717,7 +733,10 @@ pub mod test {
|
||||
|
||||
bank0.freeze();
|
||||
let mut progress = ProgressMap::default();
|
||||
progress.insert(0, ForkProgress::new(bank0.last_blockhash(), None, None));
|
||||
progress.insert(
|
||||
0,
|
||||
ForkProgress::new(bank0.last_blockhash(), None, None, 0, 0),
|
||||
);
|
||||
(BankForks::new(0, bank0), progress)
|
||||
}
|
||||
|
||||
@ -741,84 +760,26 @@ pub mod test {
|
||||
stakes
|
||||
}
|
||||
|
||||
fn can_progress_on_fork(
|
||||
my_pubkey: &Pubkey,
|
||||
tower: &mut Tower,
|
||||
start_slot: u64,
|
||||
num_slots: u64,
|
||||
bank_forks: &RwLock<BankForks>,
|
||||
cluster_votes: &mut HashMap<Pubkey, Vec<u64>>,
|
||||
keypairs: &HashMap<Pubkey, ValidatorVoteKeypairs>,
|
||||
progress: &mut ProgressMap,
|
||||
) -> bool {
|
||||
// Check that within some reasonable time, validator can make a new
|
||||
// root on this fork
|
||||
let old_root = tower.root();
|
||||
let mut main_fork = tr(start_slot);
|
||||
let mut tip = main_fork.root_mut();
|
||||
|
||||
for i in 1..num_slots {
|
||||
tip.push_front(tr(start_slot + i));
|
||||
tip = tip.first_mut().unwrap();
|
||||
}
|
||||
let mut voting_simulator = VoteSimulator::new(&main_fork);
|
||||
for i in 1..num_slots {
|
||||
voting_simulator.simulate_vote(
|
||||
i + start_slot,
|
||||
&bank_forks,
|
||||
cluster_votes,
|
||||
&keypairs,
|
||||
keypairs.get(&my_pubkey).unwrap(),
|
||||
progress,
|
||||
tower,
|
||||
);
|
||||
if old_root != tower.root() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_simple_votes() {
|
||||
let node_keypair = Keypair::new();
|
||||
let vote_keypair = Keypair::new();
|
||||
let stake_keypair = Keypair::new();
|
||||
let node_pubkey = node_keypair.pubkey();
|
||||
|
||||
let mut keypairs = HashMap::new();
|
||||
keypairs.insert(
|
||||
node_pubkey,
|
||||
ValidatorVoteKeypairs::new(node_keypair, vote_keypair, stake_keypair),
|
||||
);
|
||||
|
||||
// Initialize BankForks
|
||||
let (bank_forks, mut progress) = initialize_state(&keypairs, 10_000);
|
||||
let bank_forks = RwLock::new(bank_forks);
|
||||
// Init state
|
||||
let mut vote_simulator = VoteSimulator::new(1);
|
||||
let node_pubkey = vote_simulator.node_pubkeys[0];
|
||||
let mut tower = Tower::new_with_key(&node_pubkey);
|
||||
|
||||
// Create the tree of banks
|
||||
let forks = tr(0) / (tr(1) / (tr(2) / (tr(3) / (tr(4) / tr(5)))));
|
||||
|
||||
// Set the voting behavior
|
||||
let mut voting_simulator = VoteSimulator::new(&forks);
|
||||
let mut cluster_votes = HashMap::new();
|
||||
let votes = vec![0, 1, 2, 3, 4, 5];
|
||||
cluster_votes.insert(node_pubkey, votes.clone());
|
||||
vote_simulator.fill_bank_forks(forks, &cluster_votes);
|
||||
|
||||
// Simulate the votes
|
||||
let mut tower = Tower::new_with_key(&node_pubkey);
|
||||
|
||||
let mut cluster_votes = HashMap::new();
|
||||
for vote in votes {
|
||||
assert!(voting_simulator
|
||||
.simulate_vote(
|
||||
vote,
|
||||
&bank_forks,
|
||||
&mut cluster_votes,
|
||||
&keypairs,
|
||||
keypairs.get(&node_pubkey).unwrap(),
|
||||
&mut progress,
|
||||
&mut tower,
|
||||
)
|
||||
assert!(vote_simulator
|
||||
.simulate_vote(vote, &node_pubkey, &mut tower,)
|
||||
.is_empty());
|
||||
}
|
||||
|
||||
@ -830,21 +791,14 @@ pub mod test {
|
||||
|
||||
#[test]
|
||||
fn test_double_partition() {
|
||||
solana_logger::setup();
|
||||
let node_keypair = Keypair::new();
|
||||
let vote_keypair = Keypair::new();
|
||||
let stake_keypair = Keypair::new();
|
||||
let node_pubkey = node_keypair.pubkey();
|
||||
let vote_pubkey = vote_keypair.pubkey();
|
||||
// Init state
|
||||
let mut vote_simulator = VoteSimulator::new(2);
|
||||
let node_pubkey = vote_simulator.node_pubkeys[0];
|
||||
let vote_pubkey = vote_simulator.vote_pubkeys[0];
|
||||
let mut tower = Tower::new_with_key(&node_pubkey);
|
||||
|
||||
let mut keypairs = HashMap::new();
|
||||
info!("my_pubkey: {}", node_pubkey);
|
||||
keypairs.insert(
|
||||
node_pubkey,
|
||||
ValidatorVoteKeypairs::new(node_keypair, vote_keypair, stake_keypair),
|
||||
);
|
||||
|
||||
// Create the tree of banks in a BankForks object
|
||||
let num_slots_to_try = 200;
|
||||
// Create the tree of banks
|
||||
let forks = tr(0)
|
||||
/ (tr(1)
|
||||
/ (tr(2)
|
||||
@ -861,56 +815,37 @@ pub mod test {
|
||||
/ (tr(44)
|
||||
// Minor fork 2
|
||||
/ (tr(45) / (tr(46) / (tr(47) / (tr(48) / (tr(49) / (tr(50)))))))
|
||||
/ (tr(110)))))))))))));
|
||||
/ (tr(110) / (tr(110 + 2 * num_slots_to_try))))))))))))));
|
||||
|
||||
// Set the voting behavior
|
||||
let mut voting_simulator = VoteSimulator::new(&forks);
|
||||
let mut votes: Vec<Slot> = vec![];
|
||||
// Set the successful voting behavior
|
||||
let mut cluster_votes = HashMap::new();
|
||||
let mut my_votes: Vec<Slot> = vec![];
|
||||
let next_unlocked_slot = 110;
|
||||
// Vote on the first minor fork
|
||||
votes.extend((0..=14).into_iter());
|
||||
my_votes.extend((0..=14).into_iter());
|
||||
// Come back to the main fork
|
||||
votes.extend((43..=44).into_iter());
|
||||
my_votes.extend((43..=44).into_iter());
|
||||
// Vote on the second minor fork
|
||||
votes.extend((45..=50).into_iter());
|
||||
my_votes.extend((45..=50).into_iter());
|
||||
// Vote to come back to main fork
|
||||
my_votes.push(next_unlocked_slot);
|
||||
cluster_votes.insert(node_pubkey, my_votes.clone());
|
||||
// Make the other validator vote fork to pass the threshold checks
|
||||
let other_votes = my_votes.clone();
|
||||
cluster_votes.insert(vote_simulator.node_pubkeys[1], other_votes);
|
||||
vote_simulator.fill_bank_forks(forks, &cluster_votes);
|
||||
|
||||
let mut cluster_votes: HashMap<Pubkey, Vec<Slot>> = HashMap::new();
|
||||
let (bank_forks, mut progress) = initialize_state(&keypairs, 10_000);
|
||||
let bank_forks = RwLock::new(bank_forks);
|
||||
|
||||
// Simulate the votes. Should fail on trying to come back to the main fork
|
||||
// at 106 exclusively due to threshold failure
|
||||
let mut tower = Tower::new_with_key(&node_pubkey);
|
||||
for vote in &votes {
|
||||
// Simulate the votes.
|
||||
for vote in &my_votes {
|
||||
// All these votes should be ok
|
||||
assert!(voting_simulator
|
||||
.simulate_vote(
|
||||
*vote,
|
||||
&bank_forks,
|
||||
&mut cluster_votes,
|
||||
&keypairs,
|
||||
keypairs.get(&node_pubkey).unwrap(),
|
||||
&mut progress,
|
||||
&mut tower,
|
||||
)
|
||||
assert!(vote_simulator
|
||||
.simulate_vote(*vote, &node_pubkey, &mut tower,)
|
||||
.is_empty());
|
||||
}
|
||||
|
||||
// Try to come back to main fork
|
||||
let next_unlocked_slot = 110;
|
||||
assert!(voting_simulator
|
||||
.simulate_vote(
|
||||
next_unlocked_slot,
|
||||
&bank_forks,
|
||||
&mut cluster_votes,
|
||||
&keypairs,
|
||||
keypairs.get(&node_pubkey).unwrap(),
|
||||
&mut progress,
|
||||
&mut tower,
|
||||
)
|
||||
.is_empty());
|
||||
|
||||
info!("local tower: {:#?}", tower.lockouts.votes);
|
||||
let vote_accounts = bank_forks
|
||||
let vote_accounts = vote_simulator
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(next_unlocked_slot)
|
||||
@ -920,15 +855,17 @@ pub mod test {
|
||||
let state = VoteState::from(&observed.1).unwrap();
|
||||
info!("observed tower: {:#?}", state.votes);
|
||||
|
||||
assert!(can_progress_on_fork(
|
||||
let num_slots_to_try = 200;
|
||||
cluster_votes
|
||||
.get_mut(&vote_simulator.node_pubkeys[1])
|
||||
.unwrap()
|
||||
.extend(next_unlocked_slot + 1..next_unlocked_slot + num_slots_to_try);
|
||||
assert!(vote_simulator.can_progress_on_fork(
|
||||
&node_pubkey,
|
||||
&mut tower,
|
||||
next_unlocked_slot,
|
||||
200,
|
||||
&bank_forks,
|
||||
num_slots_to_try,
|
||||
&mut cluster_votes,
|
||||
&keypairs,
|
||||
&mut progress
|
||||
));
|
||||
}
|
||||
|
||||
|
@ -69,8 +69,8 @@ pub enum CrdsData {
|
||||
Vote(VoteIndex, Vote),
|
||||
LowestSlot(u8, LowestSlot),
|
||||
SnapshotHashes(SnapshotHash),
|
||||
EpochSlots(EpochSlotsIndex, EpochSlots),
|
||||
AccountsHashes(SnapshotHash),
|
||||
EpochSlots(EpochSlotsIndex, EpochSlots),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
@ -189,8 +189,8 @@ impl CrdsValue {
|
||||
CrdsData::Vote(_, vote) => vote.wallclock,
|
||||
CrdsData::LowestSlot(_, obj) => obj.wallclock,
|
||||
CrdsData::SnapshotHashes(hash) => hash.wallclock,
|
||||
CrdsData::EpochSlots(_, p) => p.wallclock,
|
||||
CrdsData::AccountsHashes(hash) => hash.wallclock,
|
||||
CrdsData::EpochSlots(_, p) => p.wallclock,
|
||||
}
|
||||
}
|
||||
pub fn pubkey(&self) -> Pubkey {
|
||||
@ -199,8 +199,8 @@ impl CrdsValue {
|
||||
CrdsData::Vote(_, vote) => vote.from,
|
||||
CrdsData::LowestSlot(_, slots) => slots.from,
|
||||
CrdsData::SnapshotHashes(hash) => hash.from,
|
||||
CrdsData::EpochSlots(_, p) => p.from,
|
||||
CrdsData::AccountsHashes(hash) => hash.from,
|
||||
CrdsData::EpochSlots(_, p) => p.from,
|
||||
}
|
||||
}
|
||||
pub fn label(&self) -> CrdsValueLabel {
|
||||
@ -209,8 +209,8 @@ impl CrdsValue {
|
||||
CrdsData::Vote(ix, _) => CrdsValueLabel::Vote(*ix, self.pubkey()),
|
||||
CrdsData::LowestSlot(_, _) => CrdsValueLabel::LowestSlot(self.pubkey()),
|
||||
CrdsData::SnapshotHashes(_) => CrdsValueLabel::SnapshotHashes(self.pubkey()),
|
||||
CrdsData::EpochSlots(ix, _) => CrdsValueLabel::EpochSlots(*ix, self.pubkey()),
|
||||
CrdsData::AccountsHashes(_) => CrdsValueLabel::AccountsHashes(self.pubkey()),
|
||||
CrdsData::EpochSlots(ix, _) => CrdsValueLabel::EpochSlots(*ix, self.pubkey()),
|
||||
}
|
||||
}
|
||||
pub fn contact_info(&self) -> Option<&ContactInfo> {
|
||||
|
@ -1,6 +1,8 @@
|
||||
//! The `ledger_cleanup_service` drops older ledger data to limit disk space usage
|
||||
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_ledger::blockstore_db::Result as BlockstoreResult;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::datapoint_debug;
|
||||
use solana_sdk::clock::Slot;
|
||||
use std::string::ToString;
|
||||
@ -11,13 +13,25 @@ use std::thread;
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
|
||||
// - To try and keep the RocksDB size under 400GB:
|
||||
// Seeing about 1600b/shred, using 2000b/shred for margin, so 200m shreds can be stored in 400gb.
|
||||
// at 5k shreds/slot at 50k tps, this is 500k slots (~5 hours).
|
||||
// At idle, 60 shreds/slot this is about 4m slots (18 days)
|
||||
// This is chosen to allow enough time for
|
||||
// - To try and keep the RocksDB size under 512GB at 50k tps (100 slots take ~2GB).
|
||||
// - A validator to download a snapshot from a peer and boot from it
|
||||
// - To make sure that if a validator needs to reboot from its own snapshot, it has enough slots locally
|
||||
// to catch back up to where it was when it stopped
|
||||
pub const DEFAULT_MAX_LEDGER_SLOTS: u64 = 270_000;
|
||||
// Remove a fixed number of slots at a time, it's more efficient than doing it one-by-one
|
||||
pub const DEFAULT_MAX_LEDGER_SHREDS: u64 = 200_000_000;
|
||||
|
||||
// Allow down to 50m, or 3.5 days at idle, 1hr at 50k load, around ~100GB
|
||||
pub const DEFAULT_MIN_MAX_LEDGER_SHREDS: u64 = 50_000_000;
|
||||
|
||||
// Check for removing slots at this interval so we don't purge too often
|
||||
// and starve other blockstore users.
|
||||
pub const DEFAULT_PURGE_SLOT_INTERVAL: u64 = 512;
|
||||
|
||||
// Remove a limited number of slots at a time, so the operation
|
||||
// does not take too long and block other blockstore users.
|
||||
pub const DEFAULT_PURGE_BATCH_SIZE: u64 = 256;
|
||||
|
||||
pub struct LedgerCleanupService {
|
||||
@ -36,7 +50,7 @@ impl LedgerCleanupService {
|
||||
max_ledger_slots
|
||||
);
|
||||
let exit = exit.clone();
|
||||
let mut next_purge_batch = max_ledger_slots;
|
||||
let mut last_purge_slot = 0;
|
||||
let t_cleanup = Builder::new()
|
||||
.name("solana-ledger-cleanup".to_string())
|
||||
.spawn(move || loop {
|
||||
@ -47,7 +61,8 @@ impl LedgerCleanupService {
|
||||
&new_root_receiver,
|
||||
&blockstore,
|
||||
max_ledger_slots,
|
||||
&mut next_purge_batch,
|
||||
&mut last_purge_slot,
|
||||
DEFAULT_PURGE_SLOT_INTERVAL,
|
||||
) {
|
||||
match e {
|
||||
RecvTimeoutError::Disconnected => break,
|
||||
@ -59,45 +74,123 @@ impl LedgerCleanupService {
|
||||
Self { t_cleanup }
|
||||
}
|
||||
|
||||
fn find_slots_to_clean(
|
||||
blockstore: &Arc<Blockstore>,
|
||||
root: Slot,
|
||||
max_ledger_shreds: u64,
|
||||
) -> (u64, Slot, Slot) {
|
||||
let mut shreds = Vec::new();
|
||||
let mut iterate_time = Measure::start("iterate_time");
|
||||
let mut total_shreds = 0;
|
||||
let mut first_slot = 0;
|
||||
for (i, (slot, meta)) in blockstore.slot_meta_iterator(0).unwrap().enumerate() {
|
||||
if i == 0 {
|
||||
first_slot = slot;
|
||||
debug!("purge: searching from slot: {}", slot);
|
||||
}
|
||||
// Not exact since non-full slots will have holes
|
||||
total_shreds += meta.received;
|
||||
shreds.push((slot, meta.received));
|
||||
if slot > root {
|
||||
break;
|
||||
}
|
||||
}
|
||||
iterate_time.stop();
|
||||
info!(
|
||||
"checking for ledger purge: max_shreds: {} slots: {} total_shreds: {} {}",
|
||||
max_ledger_shreds,
|
||||
shreds.len(),
|
||||
total_shreds,
|
||||
iterate_time
|
||||
);
|
||||
if (total_shreds as u64) < max_ledger_shreds {
|
||||
return (0, 0, 0);
|
||||
}
|
||||
let mut cur_shreds = 0;
|
||||
let mut lowest_slot_to_clean = shreds[0].0;
|
||||
for (slot, num_shreds) in shreds.iter().rev() {
|
||||
cur_shreds += *num_shreds as u64;
|
||||
if cur_shreds > max_ledger_shreds {
|
||||
lowest_slot_to_clean = *slot;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
(cur_shreds, lowest_slot_to_clean, first_slot)
|
||||
}
|
||||
|
||||
fn cleanup_ledger(
|
||||
new_root_receiver: &Receiver<Slot>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
max_ledger_slots: u64,
|
||||
next_purge_batch: &mut u64,
|
||||
max_ledger_shreds: u64,
|
||||
last_purge_slot: &mut u64,
|
||||
purge_interval: u64,
|
||||
) -> Result<(), RecvTimeoutError> {
|
||||
let disk_utilization_pre = blockstore.storage_size();
|
||||
|
||||
let root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
|
||||
|
||||
// Notify blockstore of impending purge
|
||||
if root > *next_purge_batch {
|
||||
//cleanup
|
||||
let lowest_slot = root - max_ledger_slots;
|
||||
*blockstore.lowest_cleanup_slot.write().unwrap() = lowest_slot;
|
||||
blockstore.purge_slots(0, Some(lowest_slot));
|
||||
*next_purge_batch += DEFAULT_PURGE_BATCH_SIZE;
|
||||
let mut root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
|
||||
// Get the newest root
|
||||
while let Ok(new_root) = new_root_receiver.try_recv() {
|
||||
root = new_root;
|
||||
}
|
||||
|
||||
let disk_utilization_post = blockstore.storage_size();
|
||||
|
||||
if let (Ok(disk_utilization_pre), Ok(disk_utilization_post)) =
|
||||
(disk_utilization_pre, disk_utilization_post)
|
||||
{
|
||||
datapoint_debug!(
|
||||
"ledger_disk_utilization",
|
||||
("disk_utilization_pre", disk_utilization_pre as i64, i64),
|
||||
("disk_utilization_post", disk_utilization_post as i64, i64),
|
||||
(
|
||||
"disk_utilization_delta",
|
||||
(disk_utilization_pre as i64 - disk_utilization_post as i64),
|
||||
i64
|
||||
)
|
||||
if root - *last_purge_slot > purge_interval {
|
||||
let disk_utilization_pre = blockstore.storage_size();
|
||||
info!(
|
||||
"purge: new root: {} last_purge: {} purge_interval: {} disk: {:?}",
|
||||
root, last_purge_slot, purge_interval, disk_utilization_pre
|
||||
);
|
||||
*last_purge_slot = root;
|
||||
|
||||
let (num_shreds_to_clean, lowest_slot_to_clean, mut first_slot) =
|
||||
Self::find_slots_to_clean(blockstore, root, max_ledger_shreds);
|
||||
|
||||
if num_shreds_to_clean > 0 {
|
||||
debug!(
|
||||
"cleaning up to: {} shreds: {} first: {}",
|
||||
lowest_slot_to_clean, num_shreds_to_clean, first_slot
|
||||
);
|
||||
loop {
|
||||
let current_lowest =
|
||||
std::cmp::min(lowest_slot_to_clean, first_slot + DEFAULT_PURGE_BATCH_SIZE);
|
||||
|
||||
let mut slot_update_time = Measure::start("slot_update");
|
||||
*blockstore.lowest_cleanup_slot.write().unwrap() = current_lowest;
|
||||
slot_update_time.stop();
|
||||
|
||||
let mut clean_time = Measure::start("ledger_clean");
|
||||
blockstore.purge_slots(first_slot, Some(current_lowest));
|
||||
clean_time.stop();
|
||||
|
||||
debug!(
|
||||
"ledger purge {} -> {}: {} {}",
|
||||
first_slot, current_lowest, slot_update_time, clean_time
|
||||
);
|
||||
first_slot += DEFAULT_PURGE_BATCH_SIZE;
|
||||
if current_lowest == lowest_slot_to_clean {
|
||||
break;
|
||||
}
|
||||
thread::sleep(Duration::from_millis(500));
|
||||
}
|
||||
}
|
||||
|
||||
let disk_utilization_post = blockstore.storage_size();
|
||||
|
||||
Self::report_disk_metrics(disk_utilization_pre, disk_utilization_post);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn report_disk_metrics(pre: BlockstoreResult<u64>, post: BlockstoreResult<u64>) {
|
||||
if let (Ok(pre), Ok(post)) = (pre, post) {
|
||||
datapoint_debug!(
|
||||
"ledger_disk_utilization",
|
||||
("disk_utilization_pre", pre as i64, i64),
|
||||
("disk_utilization_post", post as i64, i64),
|
||||
("disk_utilization_delta", (pre as i64 - post as i64), i64)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.t_cleanup.join()
|
||||
}
|
||||
@ -111,6 +204,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_cleanup() {
|
||||
solana_logger::setup();
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||
let (shreds, _) = make_many_slot_entries(0, 50, 5);
|
||||
@ -118,10 +212,10 @@ mod tests {
|
||||
let blockstore = Arc::new(blockstore);
|
||||
let (sender, receiver) = channel();
|
||||
|
||||
//send a signal to kill slots 0-40
|
||||
let mut next_purge_slot = 0;
|
||||
//send a signal to kill all but 5 shreds, which will be in the newest slots
|
||||
let mut last_purge_slot = 0;
|
||||
sender.send(50).unwrap();
|
||||
LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 10, &mut next_purge_slot)
|
||||
LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 5, &mut last_purge_slot, 10)
|
||||
.unwrap();
|
||||
|
||||
//check that 0-40 don't exist
|
||||
@ -134,6 +228,62 @@ mod tests {
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cleanup_speed() {
|
||||
solana_logger::setup();
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
let mut blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||
blockstore.set_no_compaction(true);
|
||||
let blockstore = Arc::new(blockstore);
|
||||
let (sender, receiver) = channel();
|
||||
|
||||
let mut first_insert = Measure::start("first_insert");
|
||||
let initial_slots = 50;
|
||||
let initial_entries = 5;
|
||||
let (shreds, _) = make_many_slot_entries(0, initial_slots, initial_entries);
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
first_insert.stop();
|
||||
info!("{}", first_insert);
|
||||
|
||||
let mut last_purge_slot = 0;
|
||||
let mut slot = initial_slots;
|
||||
let mut num_slots = 6;
|
||||
for _ in 0..5 {
|
||||
let mut insert_time = Measure::start("insert time");
|
||||
let batch_size = 2;
|
||||
let batches = num_slots / batch_size;
|
||||
for i in 0..batches {
|
||||
let (shreds, _) = make_many_slot_entries(slot + i * batch_size, batch_size, 5);
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
if i % 100 == 0 {
|
||||
info!("inserting..{} of {}", i, batches);
|
||||
}
|
||||
}
|
||||
insert_time.stop();
|
||||
|
||||
let mut time = Measure::start("purge time");
|
||||
sender.send(slot + num_slots).unwrap();
|
||||
LedgerCleanupService::cleanup_ledger(
|
||||
&receiver,
|
||||
&blockstore,
|
||||
initial_slots,
|
||||
&mut last_purge_slot,
|
||||
10,
|
||||
)
|
||||
.unwrap();
|
||||
time.stop();
|
||||
info!(
|
||||
"slot: {} size: {} {} {}",
|
||||
slot, num_slots, insert_time, time
|
||||
);
|
||||
slot += num_slots;
|
||||
num_slots *= 2;
|
||||
}
|
||||
|
||||
drop(blockstore);
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compaction() {
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
@ -142,7 +292,7 @@ mod tests {
|
||||
let n = 10_000;
|
||||
let batch_size = 100;
|
||||
let batches = n / batch_size;
|
||||
let max_ledger_slots = 100;
|
||||
let max_ledger_shreds = 100;
|
||||
|
||||
for i in 0..batches {
|
||||
let (shreds, _) = make_many_slot_entries(i * batch_size, batch_size, 1);
|
||||
@ -158,8 +308,9 @@ mod tests {
|
||||
LedgerCleanupService::cleanup_ledger(
|
||||
&receiver,
|
||||
&blockstore,
|
||||
max_ledger_slots,
|
||||
max_ledger_shreds,
|
||||
&mut next_purge_batch,
|
||||
10,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@ -170,7 +321,7 @@ mod tests {
|
||||
assert!(u2 < u1, "insufficient compaction! pre={},post={}", u1, u2,);
|
||||
|
||||
// check that early slots don't exist
|
||||
let max_slot = n - max_ledger_slots;
|
||||
let max_slot = n - max_ledger_shreds - 1;
|
||||
blockstore
|
||||
.slot_meta_iterator(0)
|
||||
.unwrap()
|
||||
|
@ -5,7 +5,7 @@
|
||||
//! command-line tools to spin up validators and a Rust library
|
||||
//!
|
||||
|
||||
pub mod accounts_cleanup_service;
|
||||
pub mod accounts_background_service;
|
||||
pub mod accounts_hash_verifier;
|
||||
pub mod banking_stage;
|
||||
pub mod broadcast_stage;
|
||||
|
@ -1,13 +1,13 @@
|
||||
use crate::{
|
||||
cluster_info_vote_listener::SlotVoteTracker, consensus::StakeLockout,
|
||||
replay_stage::SUPERMINORITY_THRESHOLD,
|
||||
cluster_info_vote_listener::SlotVoteTracker, cluster_slots::SlotPubkeys,
|
||||
consensus::StakeLockout, replay_stage::SUPERMINORITY_THRESHOLD,
|
||||
};
|
||||
use solana_ledger::{
|
||||
bank_forks::BankForks,
|
||||
blockstore_processor::{ConfirmationProgress, ConfirmationTiming},
|
||||
};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{clock::Slot, hash::Hash, pubkey::Pubkey};
|
||||
use solana_sdk::{account::Account, clock::Slot, hash::Hash, pubkey::Pubkey};
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
rc::Rc,
|
||||
@ -85,6 +85,12 @@ pub(crate) struct ForkProgress {
|
||||
pub(crate) propagated_stats: PropagatedStats,
|
||||
pub(crate) replay_stats: ReplaySlotStats,
|
||||
pub(crate) replay_progress: ConfirmationProgress,
|
||||
// Note `num_blocks_on_fork` and `num_dropped_blocks_on_fork` only
|
||||
// count new blocks replayed since last restart, which won't include
|
||||
// blocks already existing in the ledger/before snapshot at start,
|
||||
// so these stats do not span all of time
|
||||
pub(crate) num_blocks_on_fork: u64,
|
||||
pub(crate) num_dropped_blocks_on_fork: u64,
|
||||
}
|
||||
|
||||
impl ForkProgress {
|
||||
@ -92,6 +98,8 @@ impl ForkProgress {
|
||||
last_entry: Hash,
|
||||
prev_leader_slot: Option<Slot>,
|
||||
validator_stake_info: Option<ValidatorStakeInfo>,
|
||||
num_blocks_on_fork: u64,
|
||||
num_dropped_blocks_on_fork: u64,
|
||||
) -> Self {
|
||||
let (
|
||||
is_leader_slot,
|
||||
@ -124,6 +132,8 @@ impl ForkProgress {
|
||||
fork_stats: ForkStats::default(),
|
||||
replay_stats: ReplaySlotStats::default(),
|
||||
replay_progress: ConfirmationProgress::new(last_entry),
|
||||
num_blocks_on_fork,
|
||||
num_dropped_blocks_on_fork,
|
||||
propagated_stats: PropagatedStats {
|
||||
prev_leader_slot,
|
||||
is_leader_slot,
|
||||
@ -141,6 +151,8 @@ impl ForkProgress {
|
||||
my_pubkey: &Pubkey,
|
||||
voting_pubkey: &Pubkey,
|
||||
prev_leader_slot: Option<Slot>,
|
||||
num_blocks_on_fork: u64,
|
||||
num_dropped_blocks_on_fork: u64,
|
||||
) -> Self {
|
||||
let validator_fork_info = {
|
||||
if bank.collector_id() == my_pubkey {
|
||||
@ -155,7 +167,13 @@ impl ForkProgress {
|
||||
}
|
||||
};
|
||||
|
||||
Self::new(bank.last_blockhash(), prev_leader_slot, validator_fork_info)
|
||||
Self::new(
|
||||
bank.last_blockhash(),
|
||||
prev_leader_slot,
|
||||
validator_fork_info,
|
||||
num_blocks_on_fork,
|
||||
num_dropped_blocks_on_fork,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@ -164,7 +182,6 @@ pub(crate) struct ForkStats {
|
||||
pub(crate) weight: u128,
|
||||
pub(crate) fork_weight: u128,
|
||||
pub(crate) total_staked: u64,
|
||||
pub(crate) slot: Slot,
|
||||
pub(crate) block_height: u64,
|
||||
pub(crate) has_voted: bool,
|
||||
pub(crate) is_recent: bool,
|
||||
@ -179,14 +196,84 @@ pub(crate) struct ForkStats {
|
||||
#[derive(Clone, Default)]
|
||||
pub(crate) struct PropagatedStats {
|
||||
pub(crate) propagated_validators: HashSet<Rc<Pubkey>>,
|
||||
pub(crate) propagated_node_ids: HashSet<Rc<Pubkey>>,
|
||||
pub(crate) propagated_validators_stake: u64,
|
||||
pub(crate) is_propagated: bool,
|
||||
pub(crate) is_leader_slot: bool,
|
||||
pub(crate) prev_leader_slot: Option<Slot>,
|
||||
pub(crate) slot_vote_tracker: Option<Arc<RwLock<SlotVoteTracker>>>,
|
||||
pub(crate) cluster_slot_pubkeys: Option<Arc<RwLock<SlotPubkeys>>>,
|
||||
pub(crate) total_epoch_stake: u64,
|
||||
}
|
||||
|
||||
impl PropagatedStats {
|
||||
pub fn add_vote_pubkey(
|
||||
&mut self,
|
||||
vote_pubkey: &Pubkey,
|
||||
all_pubkeys: &mut HashSet<Rc<Pubkey>>,
|
||||
stake: u64,
|
||||
) {
|
||||
if !self.propagated_validators.contains(vote_pubkey) {
|
||||
let mut cached_pubkey: Option<Rc<Pubkey>> = all_pubkeys.get(vote_pubkey).cloned();
|
||||
if cached_pubkey.is_none() {
|
||||
let new_pubkey = Rc::new(*vote_pubkey);
|
||||
all_pubkeys.insert(new_pubkey.clone());
|
||||
cached_pubkey = Some(new_pubkey);
|
||||
}
|
||||
let vote_pubkey = cached_pubkey.unwrap();
|
||||
self.propagated_validators.insert(vote_pubkey);
|
||||
self.propagated_validators_stake += stake;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_node_pubkey(
|
||||
&mut self,
|
||||
node_pubkey: &Pubkey,
|
||||
all_pubkeys: &mut HashSet<Rc<Pubkey>>,
|
||||
bank: &Bank,
|
||||
) {
|
||||
if !self.propagated_node_ids.contains(node_pubkey) {
|
||||
let node_vote_accounts = bank
|
||||
.epoch_vote_accounts_for_node_id(&node_pubkey)
|
||||
.map(|v| &v.vote_accounts);
|
||||
|
||||
if let Some(node_vote_accounts) = node_vote_accounts {
|
||||
self.add_node_pubkey_internal(
|
||||
node_pubkey,
|
||||
all_pubkeys,
|
||||
node_vote_accounts,
|
||||
bank.epoch_vote_accounts(bank.epoch())
|
||||
.expect("Epoch stakes for bank's own epoch must exist"),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn add_node_pubkey_internal(
|
||||
&mut self,
|
||||
node_pubkey: &Pubkey,
|
||||
all_pubkeys: &mut HashSet<Rc<Pubkey>>,
|
||||
vote_account_pubkeys: &[Pubkey],
|
||||
epoch_vote_accounts: &HashMap<Pubkey, (u64, Account)>,
|
||||
) {
|
||||
let mut cached_pubkey: Option<Rc<Pubkey>> = all_pubkeys.get(node_pubkey).cloned();
|
||||
if cached_pubkey.is_none() {
|
||||
let new_pubkey = Rc::new(*node_pubkey);
|
||||
all_pubkeys.insert(new_pubkey.clone());
|
||||
cached_pubkey = Some(new_pubkey);
|
||||
}
|
||||
let node_pubkey = cached_pubkey.unwrap();
|
||||
self.propagated_node_ids.insert(node_pubkey);
|
||||
for vote_account_pubkey in vote_account_pubkeys.iter() {
|
||||
let stake = epoch_vote_accounts
|
||||
.get(vote_account_pubkey)
|
||||
.map(|(stake, _)| *stake)
|
||||
.unwrap_or(0);
|
||||
self.add_vote_pubkey(vote_account_pubkey, all_pubkeys, stake);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct ProgressMap {
|
||||
progress_map: HashMap<Slot, ForkProgress>,
|
||||
@ -282,17 +369,151 @@ impl ProgressMap {
|
||||
self.progress_map
|
||||
.retain(|k, _| bank_forks.get(*k).is_some());
|
||||
}
|
||||
|
||||
pub fn log_propagated_stats(&self, slot: Slot, bank_forks: &RwLock<BankForks>) {
|
||||
if let Some(stats) = self.get_propagated_stats(slot) {
|
||||
info!(
|
||||
"Propagated stats:
|
||||
total staked: {},
|
||||
observed staked: {},
|
||||
vote pubkeys: {:?},
|
||||
node_pubkeys: {:?},
|
||||
slot: {},
|
||||
epoch: {:?}",
|
||||
stats.total_epoch_stake,
|
||||
stats.propagated_validators_stake,
|
||||
stats.propagated_validators,
|
||||
stats.propagated_node_ids,
|
||||
slot,
|
||||
bank_forks.read().unwrap().get(slot).map(|x| x.epoch()),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_add_vote_pubkey() {
|
||||
let mut stats = PropagatedStats::default();
|
||||
let mut all_pubkeys = HashSet::new();
|
||||
let mut vote_pubkey = Pubkey::new_rand();
|
||||
all_pubkeys.insert(Rc::new(vote_pubkey.clone()));
|
||||
|
||||
// Add a vote pubkey, the number of references in all_pubkeys
|
||||
// should be 2
|
||||
stats.add_vote_pubkey(&vote_pubkey, &mut all_pubkeys, 1);
|
||||
assert!(stats.propagated_validators.contains(&vote_pubkey));
|
||||
assert_eq!(stats.propagated_validators_stake, 1);
|
||||
assert_eq!(Rc::strong_count(all_pubkeys.get(&vote_pubkey).unwrap()), 2);
|
||||
|
||||
// Adding it again should change no state since the key already existed
|
||||
stats.add_vote_pubkey(&vote_pubkey, &mut all_pubkeys, 1);
|
||||
assert!(stats.propagated_validators.contains(&vote_pubkey));
|
||||
assert_eq!(stats.propagated_validators_stake, 1);
|
||||
|
||||
// Addding another pubkey should succeed
|
||||
vote_pubkey = Pubkey::new_rand();
|
||||
stats.add_vote_pubkey(&vote_pubkey, &mut all_pubkeys, 2);
|
||||
assert!(stats.propagated_validators.contains(&vote_pubkey));
|
||||
assert_eq!(stats.propagated_validators_stake, 3);
|
||||
assert_eq!(Rc::strong_count(all_pubkeys.get(&vote_pubkey).unwrap()), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_add_node_pubkey_internal() {
|
||||
let num_vote_accounts = 10;
|
||||
let staked_vote_accounts = 5;
|
||||
let vote_account_pubkeys: Vec<_> = std::iter::repeat_with(|| Pubkey::new_rand())
|
||||
.take(num_vote_accounts)
|
||||
.collect();
|
||||
let epoch_vote_accounts: HashMap<_, _> = vote_account_pubkeys
|
||||
.iter()
|
||||
.skip(num_vote_accounts - staked_vote_accounts)
|
||||
.map(|pubkey| (*pubkey, (1, Account::default())))
|
||||
.collect();
|
||||
|
||||
let mut stats = PropagatedStats::default();
|
||||
let mut all_pubkeys = HashSet::new();
|
||||
let mut node_pubkey = Pubkey::new_rand();
|
||||
all_pubkeys.insert(Rc::new(node_pubkey.clone()));
|
||||
|
||||
// Add a vote pubkey, the number of references in all_pubkeys
|
||||
// should be 2
|
||||
stats.add_node_pubkey_internal(
|
||||
&node_pubkey,
|
||||
&mut all_pubkeys,
|
||||
&vote_account_pubkeys,
|
||||
&epoch_vote_accounts,
|
||||
);
|
||||
assert!(stats.propagated_node_ids.contains(&node_pubkey));
|
||||
assert_eq!(
|
||||
stats.propagated_validators_stake,
|
||||
staked_vote_accounts as u64
|
||||
);
|
||||
assert_eq!(Rc::strong_count(all_pubkeys.get(&node_pubkey).unwrap()), 2);
|
||||
|
||||
// Adding it again should not change any state
|
||||
stats.add_node_pubkey_internal(
|
||||
&node_pubkey,
|
||||
&mut all_pubkeys,
|
||||
&vote_account_pubkeys,
|
||||
&epoch_vote_accounts,
|
||||
);
|
||||
assert!(stats.propagated_node_ids.contains(&node_pubkey));
|
||||
assert_eq!(
|
||||
stats.propagated_validators_stake,
|
||||
staked_vote_accounts as u64
|
||||
);
|
||||
|
||||
// Addding another pubkey with same vote accounts should succeed, but stake
|
||||
// shouldn't increase
|
||||
node_pubkey = Pubkey::new_rand();
|
||||
stats.add_node_pubkey_internal(
|
||||
&node_pubkey,
|
||||
&mut all_pubkeys,
|
||||
&vote_account_pubkeys,
|
||||
&epoch_vote_accounts,
|
||||
);
|
||||
assert!(stats.propagated_node_ids.contains(&node_pubkey));
|
||||
assert_eq!(
|
||||
stats.propagated_validators_stake,
|
||||
staked_vote_accounts as u64
|
||||
);
|
||||
assert_eq!(Rc::strong_count(all_pubkeys.get(&node_pubkey).unwrap()), 2);
|
||||
|
||||
// Addding another pubkey with different vote accounts should succeed
|
||||
// and increase stake
|
||||
node_pubkey = Pubkey::new_rand();
|
||||
let vote_account_pubkeys: Vec<_> = std::iter::repeat_with(|| Pubkey::new_rand())
|
||||
.take(num_vote_accounts)
|
||||
.collect();
|
||||
let epoch_vote_accounts: HashMap<_, _> = vote_account_pubkeys
|
||||
.iter()
|
||||
.skip(num_vote_accounts - staked_vote_accounts)
|
||||
.map(|pubkey| (*pubkey, (1, Account::default())))
|
||||
.collect();
|
||||
stats.add_node_pubkey_internal(
|
||||
&node_pubkey,
|
||||
&mut all_pubkeys,
|
||||
&vote_account_pubkeys,
|
||||
&epoch_vote_accounts,
|
||||
);
|
||||
assert!(stats.propagated_node_ids.contains(&node_pubkey));
|
||||
assert_eq!(
|
||||
stats.propagated_validators_stake,
|
||||
2 * staked_vote_accounts as u64
|
||||
);
|
||||
assert_eq!(Rc::strong_count(all_pubkeys.get(&node_pubkey).unwrap()), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_propagated_status_on_construction() {
|
||||
// If the given ValidatorStakeInfo == None, then this is not
|
||||
// a leader slot and is_propagated == false
|
||||
let progress = ForkProgress::new(Hash::default(), Some(9), None);
|
||||
let progress = ForkProgress::new(Hash::default(), Some(9), None, 0, 0);
|
||||
assert!(!progress.propagated_stats.is_propagated);
|
||||
|
||||
// If the stake is zero, then threshold is always achieved
|
||||
@ -303,6 +524,8 @@ mod test {
|
||||
total_epoch_stake: 0,
|
||||
..ValidatorStakeInfo::default()
|
||||
}),
|
||||
0,
|
||||
0,
|
||||
);
|
||||
assert!(progress.propagated_stats.is_propagated);
|
||||
|
||||
@ -315,6 +538,8 @@ mod test {
|
||||
total_epoch_stake: 2,
|
||||
..ValidatorStakeInfo::default()
|
||||
}),
|
||||
0,
|
||||
0,
|
||||
);
|
||||
assert!(!progress.propagated_stats.is_propagated);
|
||||
|
||||
@ -327,6 +552,8 @@ mod test {
|
||||
total_epoch_stake: 2,
|
||||
..ValidatorStakeInfo::default()
|
||||
}),
|
||||
0,
|
||||
0,
|
||||
);
|
||||
assert!(progress.propagated_stats.is_propagated);
|
||||
|
||||
@ -337,6 +564,8 @@ mod test {
|
||||
Hash::default(),
|
||||
Some(9),
|
||||
Some(ValidatorStakeInfo::default()),
|
||||
0,
|
||||
0,
|
||||
);
|
||||
assert!(!progress.propagated_stats.is_propagated);
|
||||
}
|
||||
@ -347,10 +576,16 @@ mod test {
|
||||
|
||||
// Insert new ForkProgress for slot 10 (not a leader slot) and its
|
||||
// previous leader slot 9 (leader slot)
|
||||
progress_map.insert(10, ForkProgress::new(Hash::default(), Some(9), None));
|
||||
progress_map.insert(10, ForkProgress::new(Hash::default(), Some(9), None, 0, 0));
|
||||
progress_map.insert(
|
||||
9,
|
||||
ForkProgress::new(Hash::default(), None, Some(ValidatorStakeInfo::default())),
|
||||
ForkProgress::new(
|
||||
Hash::default(),
|
||||
None,
|
||||
Some(ValidatorStakeInfo::default()),
|
||||
0,
|
||||
0,
|
||||
),
|
||||
);
|
||||
|
||||
// None of these slot have parents which are confirmed
|
||||
@ -361,7 +596,7 @@ mod test {
|
||||
// The previous leader before 8, slot 7, does not exist in
|
||||
// progress map, so is_propagated(8) should return true as
|
||||
// this implies the parent is rooted
|
||||
progress_map.insert(8, ForkProgress::new(Hash::default(), Some(7), None));
|
||||
progress_map.insert(8, ForkProgress::new(Hash::default(), Some(7), None, 0, 0));
|
||||
assert!(progress_map.is_propagated(8));
|
||||
|
||||
// If we set the is_propagated = true, is_propagated should return true
|
||||
|
@ -20,9 +20,31 @@ use std::{
|
||||
sync::{Arc, RwLock},
|
||||
thread::sleep,
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::Duration,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct RepairStatsGroup {
|
||||
pub count: u64,
|
||||
pub min: u64,
|
||||
pub max: u64,
|
||||
}
|
||||
|
||||
impl RepairStatsGroup {
|
||||
pub fn update(&mut self, slot: u64) {
|
||||
self.count += 1;
|
||||
self.min = std::cmp::min(self.min, slot);
|
||||
self.max = std::cmp::max(self.max, slot);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct RepairStats {
|
||||
pub shred: RepairStatsGroup,
|
||||
pub highest_shred: RepairStatsGroup,
|
||||
pub orphan: RepairStatsGroup,
|
||||
}
|
||||
|
||||
pub const MAX_REPAIR_LENGTH: usize = 512;
|
||||
pub const REPAIR_MS: u64 = 100;
|
||||
pub const MAX_ORPHANS: usize = 5;
|
||||
@ -93,6 +115,8 @@ impl RepairService {
|
||||
if let RepairStrategy::RepairAll { .. } = repair_strategy {
|
||||
Self::initialize_lowest_slot(id, blockstore, cluster_info);
|
||||
}
|
||||
let mut repair_stats = RepairStats::default();
|
||||
let mut last_stats = Instant::now();
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
@ -137,7 +161,12 @@ impl RepairService {
|
||||
.into_iter()
|
||||
.filter_map(|repair_request| {
|
||||
serve_repair
|
||||
.repair_request(&cluster_slots, &repair_request, &mut cache)
|
||||
.repair_request(
|
||||
&cluster_slots,
|
||||
&repair_request,
|
||||
&mut cache,
|
||||
&mut repair_stats,
|
||||
)
|
||||
.map(|result| (result, repair_request))
|
||||
.ok()
|
||||
})
|
||||
@ -150,6 +179,24 @@ impl RepairService {
|
||||
});
|
||||
}
|
||||
}
|
||||
if last_stats.elapsed().as_secs() > 1 {
|
||||
let repair_total = repair_stats.shred.count
|
||||
+ repair_stats.highest_shred.count
|
||||
+ repair_stats.orphan.count;
|
||||
if repair_total > 0 {
|
||||
datapoint_info!(
|
||||
"serve_repair-repair",
|
||||
("repair-total", repair_total, i64),
|
||||
("shred-count", repair_stats.shred.count, i64),
|
||||
("highest-shred-count", repair_stats.highest_shred.count, i64),
|
||||
("orphan-count", repair_stats.orphan.count, i64),
|
||||
("repair-highest-slot", repair_stats.highest_shred.max, i64),
|
||||
("repair-orphan", repair_stats.orphan.max, i64),
|
||||
);
|
||||
}
|
||||
repair_stats = RepairStats::default();
|
||||
last_stats = Instant::now();
|
||||
}
|
||||
sleep(Duration::from_millis(REPAIR_MS));
|
||||
}
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
441
core/src/rpc.rs
441
core/src/rpc.rs
@ -13,7 +13,7 @@ use solana_ledger::{
|
||||
bank_forks::BankForks, blockstore::Blockstore, rooted_slot_iterator::RootedSlotIterator,
|
||||
};
|
||||
use solana_perf::packet::PACKET_DATA_SIZE;
|
||||
use solana_runtime::{bank::Bank, status_cache::SignatureConfirmationStatus};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{
|
||||
clock::{Slot, UnixTimestamp},
|
||||
commitment_config::{CommitmentConfig, CommitmentLevel},
|
||||
@ -23,9 +23,11 @@ use solana_sdk::{
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
timing::slot_duration_from_slots_per_year,
|
||||
transaction::Transaction,
|
||||
transaction::{self, Transaction},
|
||||
};
|
||||
use solana_transaction_status::{
|
||||
ConfirmedBlock, ConfirmedTransaction, TransactionEncoding, TransactionStatus,
|
||||
};
|
||||
use solana_transaction_status::{ConfirmedBlock, TransactionEncoding, TransactionStatus};
|
||||
use solana_vote_program::vote_state::{VoteState, MAX_LOCKOUT_HISTORY};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
@ -36,6 +38,9 @@ use std::{
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
const MAX_QUERY_ITEMS: usize = 256;
|
||||
const MAX_SLOT_RANGE: u64 = 10_000;
|
||||
|
||||
type RpcResponse<T> = Result<Response<T>>;
|
||||
|
||||
fn new_response<T>(bank: &Bank, value: T) -> RpcResponse<T> {
|
||||
@ -52,6 +57,15 @@ pub struct JsonRpcConfig {
|
||||
pub faucet_addr: Option<SocketAddr>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcSignatureStatusConfig {
|
||||
pub search_transaction_history: Option<bool>,
|
||||
// DEPRECATED
|
||||
#[serde(flatten)]
|
||||
pub commitment: Option<CommitmentConfig>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct JsonRpcRequestProcessor {
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
@ -196,11 +210,9 @@ impl JsonRpcRequestProcessor {
|
||||
match signature {
|
||||
Err(e) => Err(e),
|
||||
Ok(sig) => {
|
||||
let status = bank.get_signature_confirmation_status(&sig);
|
||||
let status = bank.get_signature_status(&sig);
|
||||
match status {
|
||||
Some(SignatureConfirmationStatus { status, .. }) => {
|
||||
new_response(bank, status.is_ok())
|
||||
}
|
||||
Some(status) => new_response(bank, status.is_ok()),
|
||||
None => new_response(bank, false),
|
||||
}
|
||||
}
|
||||
@ -399,31 +411,71 @@ impl JsonRpcRequestProcessor {
|
||||
.unwrap_or(None))
|
||||
}
|
||||
|
||||
pub fn get_signature_confirmation_status(
|
||||
&self,
|
||||
signature: Signature,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Option<RpcSignatureConfirmation> {
|
||||
self.get_transaction_status(signature, &self.bank(commitment))
|
||||
.map(
|
||||
|TransactionStatus {
|
||||
status,
|
||||
confirmations,
|
||||
..
|
||||
}| RpcSignatureConfirmation {
|
||||
confirmations: confirmations.unwrap_or(MAX_LOCKOUT_HISTORY + 1),
|
||||
status,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub fn get_signature_status(
|
||||
&self,
|
||||
signatures: Vec<Signature>,
|
||||
signature: Signature,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Option<transaction::Result<()>> {
|
||||
self.bank(commitment)
|
||||
.get_signature_status_slot(&signature)
|
||||
.map(|(_, status)| status)
|
||||
}
|
||||
|
||||
pub fn get_signature_statuses(
|
||||
&self,
|
||||
signatures: Vec<Signature>,
|
||||
config: Option<RpcSignatureStatusConfig>,
|
||||
) -> RpcResponse<Vec<Option<TransactionStatus>>> {
|
||||
let mut statuses: Vec<Option<TransactionStatus>> = vec![];
|
||||
|
||||
// DEPRECATED
|
||||
let commitment = config
|
||||
.clone()
|
||||
.and_then(|x| x.commitment)
|
||||
.or_else(|| Some(CommitmentConfig::recent()));
|
||||
|
||||
let search_transaction_history = config
|
||||
.and_then(|x| x.search_transaction_history)
|
||||
.unwrap_or(false);
|
||||
let bank = self.bank(commitment);
|
||||
|
||||
for signature in signatures {
|
||||
let status = bank.get_signature_confirmation_status(&signature).map(
|
||||
|SignatureConfirmationStatus {
|
||||
slot,
|
||||
status,
|
||||
confirmations,
|
||||
}| TransactionStatus {
|
||||
slot,
|
||||
status,
|
||||
confirmations: if confirmations <= MAX_LOCKOUT_HISTORY {
|
||||
Some(confirmations)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
},
|
||||
);
|
||||
let status = if let Some(status) = self.get_transaction_status(signature, &bank) {
|
||||
Some(status)
|
||||
} else if self.config.enable_rpc_transaction_history && search_transaction_history {
|
||||
self.blockstore
|
||||
.get_transaction_status(signature)
|
||||
.map_err(|_| Error::internal_error())?
|
||||
.map(|(slot, status_meta)| {
|
||||
let err = status_meta.status.clone().err();
|
||||
TransactionStatus {
|
||||
slot,
|
||||
status: status_meta.status,
|
||||
confirmations: None,
|
||||
err,
|
||||
}
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
statuses.push(status);
|
||||
}
|
||||
Ok(Response {
|
||||
@ -431,6 +483,63 @@ impl JsonRpcRequestProcessor {
|
||||
value: statuses,
|
||||
})
|
||||
}
|
||||
|
||||
fn get_transaction_status(
|
||||
&self,
|
||||
signature: Signature,
|
||||
bank: &Arc<Bank>,
|
||||
) -> Option<TransactionStatus> {
|
||||
bank.get_signature_status_slot(&signature)
|
||||
.map(|(slot, status)| {
|
||||
let r_block_commitment_cache = self.block_commitment_cache.read().unwrap();
|
||||
|
||||
let confirmations = if r_block_commitment_cache.root() >= slot {
|
||||
None
|
||||
} else {
|
||||
r_block_commitment_cache
|
||||
.get_confirmation_count(slot)
|
||||
.or(Some(0))
|
||||
};
|
||||
let err = status.clone().err();
|
||||
TransactionStatus {
|
||||
slot,
|
||||
status,
|
||||
confirmations,
|
||||
err,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_confirmed_transaction(
|
||||
&self,
|
||||
signature: Signature,
|
||||
encoding: Option<TransactionEncoding>,
|
||||
) -> Result<Option<ConfirmedTransaction>> {
|
||||
if self.config.enable_rpc_transaction_history {
|
||||
Ok(self
|
||||
.blockstore
|
||||
.get_confirmed_transaction(signature, encoding)
|
||||
.unwrap_or(None))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_confirmed_signatures_for_address(
|
||||
&self,
|
||||
pubkey: Pubkey,
|
||||
start_slot: Slot,
|
||||
end_slot: Slot,
|
||||
) -> Result<Vec<Signature>> {
|
||||
if self.config.enable_rpc_transaction_history {
|
||||
Ok(self
|
||||
.blockstore
|
||||
.get_confirmed_signatures_for_address(pubkey, start_slot, end_slot)
|
||||
.unwrap_or_else(|_| vec![]))
|
||||
} else {
|
||||
Ok(vec![])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_tpu_addr(cluster_info: &Arc<RwLock<ClusterInfo>>) -> Result<SocketAddr> {
|
||||
@ -458,6 +567,7 @@ impl Metadata for Meta {}
|
||||
pub trait RpcSol {
|
||||
type Metadata;
|
||||
|
||||
// DEPRECATED
|
||||
#[rpc(meta, name = "confirmTransaction")]
|
||||
fn confirm_transaction(
|
||||
&self,
|
||||
@ -466,6 +576,24 @@ pub trait RpcSol {
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> RpcResponse<bool>;
|
||||
|
||||
// DEPRECATED
|
||||
#[rpc(meta, name = "getSignatureStatus")]
|
||||
fn get_signature_status(
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
signature_str: String,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Result<Option<transaction::Result<()>>>;
|
||||
|
||||
// DEPRECATED (used by Trust Wallet)
|
||||
#[rpc(meta, name = "getSignatureConfirmation")]
|
||||
fn get_signature_confirmation(
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
signature_str: String,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Result<Option<RpcSignatureConfirmation>>;
|
||||
|
||||
#[rpc(meta, name = "getAccountInfo")]
|
||||
fn get_account_info(
|
||||
&self,
|
||||
@ -553,12 +681,12 @@ pub trait RpcSol {
|
||||
#[rpc(meta, name = "getFeeRateGovernor")]
|
||||
fn get_fee_rate_governor(&self, meta: Self::Metadata) -> RpcResponse<RpcFeeRateGovernor>;
|
||||
|
||||
#[rpc(meta, name = "getSignatureStatus")]
|
||||
fn get_signature_status(
|
||||
#[rpc(meta, name = "getSignatureStatuses")]
|
||||
fn get_signature_statuses(
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
signature_strs: Vec<String>,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
config: Option<RpcSignatureStatusConfig>,
|
||||
) -> RpcResponse<Vec<Option<TransactionStatus>>>;
|
||||
|
||||
#[rpc(meta, name = "getSlot")]
|
||||
@ -653,6 +781,23 @@ pub trait RpcSol {
|
||||
start_slot: Slot,
|
||||
end_slot: Option<Slot>,
|
||||
) -> Result<Vec<Slot>>;
|
||||
|
||||
#[rpc(meta, name = "getConfirmedTransaction")]
|
||||
fn get_confirmed_transaction(
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
signature_str: String,
|
||||
encoding: Option<TransactionEncoding>,
|
||||
) -> Result<Option<ConfirmedTransaction>>;
|
||||
|
||||
#[rpc(meta, name = "getConfirmedSignaturesForAddress")]
|
||||
fn get_confirmed_signatures_for_address(
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
pubkey_str: String,
|
||||
start_slot: Slot,
|
||||
end_slot: Slot,
|
||||
) -> Result<Vec<String>>;
|
||||
}
|
||||
|
||||
pub struct RpcSolImpl;
|
||||
@ -884,12 +1029,50 @@ impl RpcSol for RpcSolImpl {
|
||||
.get_fee_rate_governor()
|
||||
}
|
||||
|
||||
fn get_signature_confirmation(
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
signature_str: String,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Result<Option<RpcSignatureConfirmation>> {
|
||||
debug!(
|
||||
"get_signature_confirmation rpc request received: {:?}",
|
||||
signature_str
|
||||
);
|
||||
let signature = verify_signature(&signature_str)?;
|
||||
Ok(meta
|
||||
.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_signature_confirmation_status(signature, commitment))
|
||||
}
|
||||
|
||||
fn get_signature_status(
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
signature_strs: Vec<String>,
|
||||
signature_str: String,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Result<Option<transaction::Result<()>>> {
|
||||
let signature = verify_signature(&signature_str)?;
|
||||
Ok(meta
|
||||
.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_signature_status(signature, commitment))
|
||||
}
|
||||
|
||||
fn get_signature_statuses(
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
signature_strs: Vec<String>,
|
||||
config: Option<RpcSignatureStatusConfig>,
|
||||
) -> RpcResponse<Vec<Option<TransactionStatus>>> {
|
||||
if signature_strs.len() > MAX_QUERY_ITEMS {
|
||||
return Err(Error::invalid_params(format!(
|
||||
"Too many inputs provided; max {}",
|
||||
MAX_QUERY_ITEMS
|
||||
)));
|
||||
}
|
||||
let mut signatures: Vec<Signature> = vec![];
|
||||
for signature_str in signature_strs {
|
||||
signatures.push(verify_signature(&signature_str)?);
|
||||
@ -897,7 +1080,7 @@ impl RpcSol for RpcSolImpl {
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_signature_status(signatures, commitment)
|
||||
.get_signature_statuses(signatures, config)
|
||||
}
|
||||
|
||||
fn get_slot(&self, meta: Self::Metadata, commitment: Option<CommitmentConfig>) -> Result<u64> {
|
||||
@ -990,9 +1173,10 @@ impl RpcSol for RpcSolImpl {
|
||||
.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_signature_status(vec![signature], commitment.clone())?
|
||||
.get_signature_statuses(vec![signature], None)?
|
||||
.value[0]
|
||||
.clone()
|
||||
.filter(|result| result.satisfies_commitment(commitment.unwrap_or_default()))
|
||||
.map(|x| x.status);
|
||||
|
||||
if signature_status == Some(Ok(())) {
|
||||
@ -1157,6 +1341,51 @@ impl RpcSol for RpcSolImpl {
|
||||
fn get_block_time(&self, meta: Self::Metadata, slot: Slot) -> Result<Option<UnixTimestamp>> {
|
||||
meta.request_processor.read().unwrap().get_block_time(slot)
|
||||
}
|
||||
|
||||
fn get_confirmed_transaction(
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
signature_str: String,
|
||||
encoding: Option<TransactionEncoding>,
|
||||
) -> Result<Option<ConfirmedTransaction>> {
|
||||
let signature = verify_signature(&signature_str)?;
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_confirmed_transaction(signature, encoding)
|
||||
}
|
||||
|
||||
fn get_confirmed_signatures_for_address(
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
pubkey_str: String,
|
||||
start_slot: Slot,
|
||||
end_slot: Slot,
|
||||
) -> Result<Vec<String>> {
|
||||
let pubkey = verify_pubkey(pubkey_str)?;
|
||||
if end_slot <= start_slot {
|
||||
return Err(Error::invalid_params(format!(
|
||||
"start_slot {} must be smaller than end_slot {}",
|
||||
start_slot, end_slot
|
||||
)));
|
||||
}
|
||||
if end_slot - start_slot > MAX_SLOT_RANGE {
|
||||
return Err(Error::invalid_params(format!(
|
||||
"Slot range too large; max {}",
|
||||
MAX_SLOT_RANGE
|
||||
)));
|
||||
}
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_confirmed_signatures_for_address(pubkey, start_slot, end_slot)
|
||||
.map(|signatures| {
|
||||
signatures
|
||||
.iter()
|
||||
.map(|signature| signature.to_string())
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@ -1222,18 +1451,6 @@ pub mod tests {
|
||||
) -> RpcHandler {
|
||||
let (bank_forks, alice, leader_vote_keypair) = new_bank_forks();
|
||||
let bank = bank_forks.read().unwrap().working_bank();
|
||||
|
||||
let commitment_slot0 = BlockCommitment::new([8; MAX_LOCKOUT_HISTORY]);
|
||||
let commitment_slot1 = BlockCommitment::new([9; MAX_LOCKOUT_HISTORY]);
|
||||
let mut block_commitment: HashMap<u64, BlockCommitment> = HashMap::new();
|
||||
block_commitment
|
||||
.entry(0)
|
||||
.or_insert(commitment_slot0.clone());
|
||||
block_commitment
|
||||
.entry(1)
|
||||
.or_insert(commitment_slot1.clone());
|
||||
let block_commitment_cache =
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::new(block_commitment, 42)));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Blockstore::open(&ledger_path).unwrap();
|
||||
let blockstore = Arc::new(blockstore);
|
||||
@ -1249,6 +1466,24 @@ pub mod tests {
|
||||
blockstore.clone(),
|
||||
);
|
||||
|
||||
let mut commitment_slot0 = BlockCommitment::default();
|
||||
commitment_slot0.increase_confirmation_stake(2, 9);
|
||||
let mut commitment_slot1 = BlockCommitment::default();
|
||||
commitment_slot1.increase_confirmation_stake(1, 9);
|
||||
let mut block_commitment: HashMap<u64, BlockCommitment> = HashMap::new();
|
||||
block_commitment
|
||||
.entry(0)
|
||||
.or_insert(commitment_slot0.clone());
|
||||
block_commitment
|
||||
.entry(1)
|
||||
.or_insert(commitment_slot1.clone());
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::new(
|
||||
block_commitment,
|
||||
10,
|
||||
bank.clone(),
|
||||
0,
|
||||
)));
|
||||
|
||||
// Add timestamp vote to blockstore
|
||||
let vote = Vote {
|
||||
slots: vec![1],
|
||||
@ -1750,6 +1985,76 @@ pub mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_rpc_get_signature_status() {
|
||||
let bob_pubkey = Pubkey::new_rand();
|
||||
let RpcHandler {
|
||||
io,
|
||||
meta,
|
||||
blockhash,
|
||||
alice,
|
||||
..
|
||||
} = start_rpc_handler_with_tx(&bob_pubkey);
|
||||
|
||||
let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash);
|
||||
let req = format!(
|
||||
r#"{{"jsonrpc":"2.0","id":1,"method":"getSignatureStatus","params":["{}"]}}"#,
|
||||
tx.signatures[0]
|
||||
);
|
||||
let res = io.handle_request_sync(&req, meta.clone());
|
||||
let expected_res: Option<transaction::Result<()>> = Some(Ok(()));
|
||||
let expected = json!({
|
||||
"jsonrpc": "2.0",
|
||||
"result": expected_res,
|
||||
"id": 1
|
||||
});
|
||||
let expected: Response =
|
||||
serde_json::from_value(expected).expect("expected response deserialization");
|
||||
let result: Response = serde_json::from_str(&res.expect("actual response"))
|
||||
.expect("actual response deserialization");
|
||||
assert_eq!(expected, result);
|
||||
|
||||
// Test getSignatureStatus request on unprocessed tx
|
||||
let tx = system_transaction::transfer(&alice, &bob_pubkey, 10, blockhash);
|
||||
let req = format!(
|
||||
r#"{{"jsonrpc":"2.0","id":1,"method":"getSignatureStatus","params":["{}"]}}"#,
|
||||
tx.signatures[0]
|
||||
);
|
||||
let res = io.handle_request_sync(&req, meta.clone());
|
||||
let expected_res: Option<String> = None;
|
||||
let expected = json!({
|
||||
"jsonrpc": "2.0",
|
||||
"result": expected_res,
|
||||
"id": 1
|
||||
});
|
||||
let expected: Response =
|
||||
serde_json::from_value(expected).expect("expected response deserialization");
|
||||
let result: Response = serde_json::from_str(&res.expect("actual response"))
|
||||
.expect("actual response deserialization");
|
||||
assert_eq!(expected, result);
|
||||
|
||||
// Test getSignatureStatus request on a TransactionError
|
||||
let tx = system_transaction::transfer(&alice, &bob_pubkey, std::u64::MAX, blockhash);
|
||||
let req = format!(
|
||||
r#"{{"jsonrpc":"2.0","id":1,"method":"getSignatureStatus","params":["{}"]}}"#,
|
||||
tx.signatures[0]
|
||||
);
|
||||
let res = io.handle_request_sync(&req, meta);
|
||||
let expected_res: Option<transaction::Result<()>> = Some(Err(
|
||||
TransactionError::InstructionError(0, InstructionError::CustomError(1)),
|
||||
));
|
||||
let expected = json!({
|
||||
"jsonrpc": "2.0",
|
||||
"result": expected_res,
|
||||
"id": 1
|
||||
});
|
||||
let expected: Response =
|
||||
serde_json::from_value(expected).expect("expected response deserialization");
|
||||
let result: Response = serde_json::from_str(&res.expect("actual response"))
|
||||
.expect("actual response deserialization");
|
||||
assert_eq!(expected, result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rpc_get_signature_statuses() {
|
||||
let bob_pubkey = Pubkey::new_rand();
|
||||
let RpcHandler {
|
||||
io,
|
||||
@ -1761,7 +2066,7 @@ pub mod tests {
|
||||
} = start_rpc_handler_with_tx(&bob_pubkey);
|
||||
|
||||
let req = format!(
|
||||
r#"{{"jsonrpc":"2.0","id":1,"method":"getSignatureStatus","params":[["{}"]]}}"#,
|
||||
r#"{{"jsonrpc":"2.0","id":1,"method":"getSignatureStatuses","params":[["{}"]]}}"#,
|
||||
confirmed_block_signatures[0]
|
||||
);
|
||||
let res = io.handle_request_sync(&req, meta.clone());
|
||||
@ -1770,12 +2075,14 @@ pub mod tests {
|
||||
let result: Option<TransactionStatus> =
|
||||
serde_json::from_value(json["result"]["value"][0].clone())
|
||||
.expect("actual response deserialization");
|
||||
assert_eq!(expected_res, result.as_ref().unwrap().status);
|
||||
let result = result.as_ref().unwrap();
|
||||
assert_eq!(expected_res, result.status);
|
||||
assert_eq!(None, result.confirmations);
|
||||
|
||||
// Test getSignatureStatus request on unprocessed tx
|
||||
let tx = system_transaction::transfer(&alice, &bob_pubkey, 10, blockhash);
|
||||
let req = format!(
|
||||
r#"{{"jsonrpc":"2.0","id":1,"method":"getSignatureStatus","params":[["{}"]]}}"#,
|
||||
r#"{{"jsonrpc":"2.0","id":1,"method":"getSignatureStatuses","params":[["{}"]]}}"#,
|
||||
tx.signatures[0]
|
||||
);
|
||||
let res = io.handle_request_sync(&req, meta.clone());
|
||||
@ -1787,7 +2094,7 @@ pub mod tests {
|
||||
|
||||
// Test getSignatureStatus request on a TransactionError
|
||||
let req = format!(
|
||||
r#"{{"jsonrpc":"2.0","id":1,"method":"getSignatureStatus","params":[["{}"]]}}"#,
|
||||
r#"{{"jsonrpc":"2.0","id":1,"method":"getSignatureStatuses","params":[["{}"]]}}"#,
|
||||
confirmed_block_signatures[1]
|
||||
);
|
||||
let res = io.handle_request_sync(&req, meta.clone());
|
||||
@ -2119,6 +2426,8 @@ pub mod tests {
|
||||
fn test_rpc_processor_get_block_commitment() {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let validator_exit = create_validator_exit(&exit);
|
||||
let bank_forks = new_bank_forks().0;
|
||||
|
||||
let commitment_slot0 = BlockCommitment::new([8; MAX_LOCKOUT_HISTORY]);
|
||||
let commitment_slot1 = BlockCommitment::new([9; MAX_LOCKOUT_HISTORY]);
|
||||
let mut block_commitment: HashMap<u64, BlockCommitment> = HashMap::new();
|
||||
@ -2128,8 +2437,12 @@ pub mod tests {
|
||||
block_commitment
|
||||
.entry(1)
|
||||
.or_insert(commitment_slot1.clone());
|
||||
let block_commitment_cache =
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::new(block_commitment, 42)));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::new(
|
||||
block_commitment,
|
||||
42,
|
||||
bank_forks.read().unwrap().working_bank(),
|
||||
0,
|
||||
)));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Blockstore::open(&ledger_path).unwrap();
|
||||
|
||||
@ -2137,7 +2450,7 @@ pub mod tests {
|
||||
config.enable_validator_exit = true;
|
||||
let request_processor = JsonRpcRequestProcessor::new(
|
||||
config,
|
||||
new_bank_forks().0,
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
Arc::new(blockstore),
|
||||
StorageState::default(),
|
||||
@ -2201,7 +2514,7 @@ pub mod tests {
|
||||
.get_block_commitment(0)
|
||||
.map(|block_commitment| block_commitment.commitment)
|
||||
);
|
||||
assert_eq!(total_stake, 42);
|
||||
assert_eq!(total_stake, 10);
|
||||
|
||||
let req =
|
||||
format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getBlockCommitment","params":[2]}}"#);
|
||||
@ -2219,7 +2532,7 @@ pub mod tests {
|
||||
panic!("Expected single response");
|
||||
};
|
||||
assert_eq!(commitment_response.commitment, None);
|
||||
assert_eq!(commitment_response.total_stake, 42);
|
||||
assert_eq!(commitment_response.total_stake, 10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -2248,11 +2561,21 @@ pub mod tests {
|
||||
{
|
||||
if let EncodedTransaction::Json(transaction) = transaction {
|
||||
if transaction.signatures[0] == confirmed_block_signatures[0].to_string() {
|
||||
let meta = meta.unwrap();
|
||||
assert_eq!(transaction.message.recent_blockhash, blockhash.to_string());
|
||||
assert_eq!(meta.unwrap().status, Ok(()));
|
||||
assert_eq!(meta.status, Ok(()));
|
||||
assert_eq!(meta.err, None);
|
||||
} else if transaction.signatures[0] == confirmed_block_signatures[1].to_string() {
|
||||
let meta = meta.unwrap();
|
||||
assert_eq!(
|
||||
meta.unwrap().status,
|
||||
meta.err,
|
||||
Some(TransactionError::InstructionError(
|
||||
0,
|
||||
InstructionError::CustomError(1)
|
||||
))
|
||||
);
|
||||
assert_eq!(
|
||||
meta.status,
|
||||
Err(TransactionError::InstructionError(
|
||||
0,
|
||||
InstructionError::CustomError(1)
|
||||
@ -2282,11 +2605,21 @@ pub mod tests {
|
||||
let decoded_transaction: Transaction =
|
||||
deserialize(&bs58::decode(&transaction).into_vec().unwrap()).unwrap();
|
||||
if decoded_transaction.signatures[0] == confirmed_block_signatures[0] {
|
||||
let meta = meta.unwrap();
|
||||
assert_eq!(decoded_transaction.message.recent_blockhash, blockhash);
|
||||
assert_eq!(meta.unwrap().status, Ok(()));
|
||||
assert_eq!(meta.status, Ok(()));
|
||||
assert_eq!(meta.err, None);
|
||||
} else if decoded_transaction.signatures[0] == confirmed_block_signatures[1] {
|
||||
let meta = meta.unwrap();
|
||||
assert_eq!(
|
||||
meta.unwrap().status,
|
||||
meta.err,
|
||||
Some(TransactionError::InstructionError(
|
||||
0,
|
||||
InstructionError::CustomError(1)
|
||||
))
|
||||
);
|
||||
assert_eq!(
|
||||
meta.status,
|
||||
Err(TransactionError::InstructionError(
|
||||
0,
|
||||
InstructionError::CustomError(1)
|
||||
|
@ -4,8 +4,10 @@ use crate::rpc_subscriptions::{Confirmations, RpcSubscriptions, SlotInfo};
|
||||
use jsonrpc_core::{Error, ErrorCode, Result};
|
||||
use jsonrpc_derive::rpc;
|
||||
use jsonrpc_pubsub::{typed::Subscriber, Session, SubscriptionId};
|
||||
use solana_client::rpc_response::{Response as RpcResponse, RpcAccount, RpcKeyedAccount};
|
||||
use solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature, transaction};
|
||||
use solana_client::rpc_response::{
|
||||
Response as RpcResponse, RpcAccount, RpcKeyedAccount, RpcSignatureResult,
|
||||
};
|
||||
use solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature};
|
||||
use std::sync::{atomic, Arc};
|
||||
|
||||
// Suppress needless_return due to
|
||||
@ -74,7 +76,7 @@ pub trait RpcSolPubSub {
|
||||
fn signature_subscribe(
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
subscriber: Subscriber<RpcResponse<transaction::Result<()>>>,
|
||||
subscriber: Subscriber<RpcResponse<RpcSignatureResult>>,
|
||||
signature_str: String,
|
||||
confirmations: Option<Confirmations>,
|
||||
);
|
||||
@ -225,7 +227,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
|
||||
fn signature_subscribe(
|
||||
&self,
|
||||
_meta: Self::Metadata,
|
||||
subscriber: Subscriber<RpcResponse<transaction::Result<()>>>,
|
||||
subscriber: Subscriber<RpcResponse<RpcSignatureResult>>,
|
||||
signature_str: String,
|
||||
confirmations: Option<Confirmations>,
|
||||
) {
|
||||
@ -312,7 +314,10 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::rpc_subscriptions::tests::robust_poll_or_panic;
|
||||
use crate::{
|
||||
commitment::{BlockCommitment, BlockCommitmentCache},
|
||||
rpc_subscriptions::tests::robust_poll_or_panic,
|
||||
};
|
||||
use jsonrpc_core::{futures::sync::mpsc, Response};
|
||||
use jsonrpc_pubsub::{PubSubHandler, Session};
|
||||
use solana_budget_program::{self, budget_instruction};
|
||||
@ -325,7 +330,12 @@ mod tests {
|
||||
system_program, system_transaction,
|
||||
transaction::{self, Transaction},
|
||||
};
|
||||
use std::{sync::RwLock, thread::sleep, time::Duration};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::{atomic::AtomicBool, RwLock},
|
||||
thread::sleep,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
fn process_transaction_and_notify(
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
@ -358,8 +368,13 @@ mod tests {
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
|
||||
let rpc = RpcSolPubSubImpl::default();
|
||||
let rpc = RpcSolPubSubImpl {
|
||||
subscriptions: Arc::new(RpcSubscriptions::new(
|
||||
&Arc::new(AtomicBool::new(false)),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
|
||||
)),
|
||||
..RpcSolPubSubImpl::default()
|
||||
};
|
||||
|
||||
// Test signature subscriptions
|
||||
let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash);
|
||||
@ -372,7 +387,7 @@ mod tests {
|
||||
|
||||
// Test signature confirmation notification
|
||||
let (response, _) = robust_poll_or_panic(receiver);
|
||||
let expected_res: Option<transaction::Result<()>> = Some(Ok(()));
|
||||
let expected_res = RpcSignatureResult { err: None };
|
||||
let expected = json!({
|
||||
"jsonrpc": "2.0",
|
||||
"method": "signatureNotification",
|
||||
@ -457,7 +472,13 @@ mod tests {
|
||||
let blockhash = bank.last_blockhash();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
|
||||
let rpc = RpcSolPubSubImpl::default();
|
||||
let rpc = RpcSolPubSubImpl {
|
||||
subscriptions: Arc::new(RpcSubscriptions::new(
|
||||
&Arc::new(AtomicBool::new(false)),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
|
||||
)),
|
||||
..RpcSolPubSubImpl::default()
|
||||
};
|
||||
let session = create_session();
|
||||
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification");
|
||||
rpc.account_subscribe(
|
||||
@ -591,7 +612,13 @@ mod tests {
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
let bob = Keypair::new();
|
||||
|
||||
let rpc = RpcSolPubSubImpl::default();
|
||||
let mut rpc = RpcSolPubSubImpl::default();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let subscriptions = RpcSubscriptions::new(
|
||||
&exit,
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
|
||||
);
|
||||
rpc.subscriptions = Arc::new(subscriptions);
|
||||
let session = create_session();
|
||||
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification");
|
||||
rpc.account_subscribe(session, subscriber, bob.pubkey().to_string(), Some(2));
|
||||
@ -622,7 +649,12 @@ mod tests {
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
let bob = Keypair::new();
|
||||
|
||||
let rpc = RpcSolPubSubImpl::default();
|
||||
let mut rpc = RpcSolPubSubImpl::default();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests()));
|
||||
|
||||
let subscriptions = RpcSubscriptions::new(&exit, block_commitment_cache.clone());
|
||||
rpc.subscriptions = Arc::new(subscriptions);
|
||||
let session = create_session();
|
||||
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification");
|
||||
rpc.account_subscribe(session, subscriber, bob.pubkey().to_string(), Some(2));
|
||||
@ -640,10 +672,32 @@ mod tests {
|
||||
let bank0 = bank_forks.read().unwrap()[0].clone();
|
||||
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
|
||||
bank_forks.write().unwrap().insert(bank1);
|
||||
rpc.subscriptions.notify_subscribers(1, &bank_forks);
|
||||
let bank1 = bank_forks.read().unwrap()[1].clone();
|
||||
|
||||
let mut cache0 = BlockCommitment::default();
|
||||
cache0.increase_confirmation_stake(1, 10);
|
||||
let mut block_commitment = HashMap::new();
|
||||
block_commitment.entry(0).or_insert(cache0.clone());
|
||||
let mut new_block_commitment =
|
||||
BlockCommitmentCache::new(block_commitment, 10, bank1.clone(), 0);
|
||||
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
|
||||
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
|
||||
drop(w_block_commitment_cache);
|
||||
|
||||
rpc.subscriptions.notify_subscribers(1, &bank_forks);
|
||||
let bank2 = Bank::new_from_parent(&bank1, &Pubkey::default(), 2);
|
||||
bank_forks.write().unwrap().insert(bank2);
|
||||
let bank2 = bank_forks.read().unwrap()[2].clone();
|
||||
|
||||
let mut cache0 = BlockCommitment::default();
|
||||
cache0.increase_confirmation_stake(2, 10);
|
||||
let mut block_commitment = HashMap::new();
|
||||
block_commitment.entry(0).or_insert(cache0.clone());
|
||||
let mut new_block_commitment = BlockCommitmentCache::new(block_commitment, 10, bank2, 0);
|
||||
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
|
||||
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
|
||||
drop(w_block_commitment_cache);
|
||||
|
||||
rpc.subscriptions.notify_subscribers(2, &bank_forks);
|
||||
let expected = json!({
|
||||
"jsonrpc": "2.0",
|
||||
|
@ -1,14 +1,20 @@
|
||||
//! The `pubsub` module implements a threaded subscription service on client RPC request
|
||||
|
||||
use crate::rpc_pubsub::{RpcSolPubSub, RpcSolPubSubImpl};
|
||||
use crate::rpc_subscriptions::RpcSubscriptions;
|
||||
use crate::{
|
||||
rpc_pubsub::{RpcSolPubSub, RpcSolPubSubImpl},
|
||||
rpc_subscriptions::RpcSubscriptions,
|
||||
};
|
||||
use jsonrpc_pubsub::{PubSubHandler, Session};
|
||||
use jsonrpc_ws_server::{RequestContext, ServerBuilder};
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::thread::{self, sleep, Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
},
|
||||
thread::{self, sleep, Builder, JoinHandle},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
pub struct PubSubService {
|
||||
thread_hdl: JoinHandle<()>,
|
||||
@ -66,13 +72,20 @@ impl PubSubService {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
use crate::commitment::BlockCommitmentCache;
|
||||
use std::{
|
||||
net::{IpAddr, Ipv4Addr},
|
||||
sync::RwLock,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_pubsub_new() {
|
||||
let pubsub_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new(&exit));
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new(
|
||||
&exit,
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
|
||||
));
|
||||
let pubsub_service = PubSubService::new(&subscriptions, pubsub_addr, &exit);
|
||||
let thread = pubsub_service.thread_hdl.thread();
|
||||
assert_eq!(thread.name().unwrap(), "solana-pubsub");
|
||||
|
@ -1,5 +1,6 @@
|
||||
//! The `pubsub` module implements a threaded subscription service on client RPC request
|
||||
|
||||
use crate::commitment::BlockCommitmentCache;
|
||||
use core::hash::Hash;
|
||||
use jsonrpc_core::futures::Future;
|
||||
use jsonrpc_pubsub::{
|
||||
@ -7,18 +8,23 @@ use jsonrpc_pubsub::{
|
||||
SubscriptionId,
|
||||
};
|
||||
use serde::Serialize;
|
||||
use solana_client::rpc_response::{Response, RpcAccount, RpcKeyedAccount, RpcResponseContext};
|
||||
use solana_client::rpc_response::{
|
||||
Response, RpcAccount, RpcKeyedAccount, RpcResponseContext, RpcSignatureResult,
|
||||
};
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{
|
||||
account::Account, clock::Slot, pubkey::Pubkey, signature::Signature, transaction,
|
||||
};
|
||||
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{Receiver, RecvTimeoutError, SendError, Sender};
|
||||
use std::sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
mpsc::{Receiver, RecvTimeoutError, SendError, Sender},
|
||||
};
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use std::{
|
||||
cmp::min,
|
||||
collections::{HashMap, HashSet},
|
||||
iter,
|
||||
sync::{Arc, Mutex, RwLock},
|
||||
@ -62,7 +68,7 @@ type RpcProgramSubscriptions = RwLock<
|
||||
type RpcSignatureSubscriptions = RwLock<
|
||||
HashMap<
|
||||
Signature,
|
||||
HashMap<SubscriptionId, (Sink<Response<transaction::Result<()>>>, Confirmations)>,
|
||||
HashMap<SubscriptionId, (Sink<Response<RpcSignatureResult>>, Confirmations)>,
|
||||
>,
|
||||
>;
|
||||
type RpcSlotSubscriptions = RwLock<HashMap<SubscriptionId, Sink<SlotInfo>>>;
|
||||
@ -80,11 +86,7 @@ fn add_subscription<K, S>(
|
||||
{
|
||||
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
|
||||
let confirmations = confirmations.unwrap_or(0);
|
||||
let confirmations = if confirmations > MAX_LOCKOUT_HISTORY {
|
||||
MAX_LOCKOUT_HISTORY
|
||||
} else {
|
||||
confirmations
|
||||
};
|
||||
let confirmations = min(confirmations, MAX_LOCKOUT_HISTORY + 1);
|
||||
if let Some(current_hashmap) = subscriptions.get_mut(&hashmap_key) {
|
||||
current_hashmap.insert(sub_id, (sink, confirmations));
|
||||
return;
|
||||
@ -120,8 +122,8 @@ where
|
||||
fn check_confirmations_and_notify<K, S, B, F, X>(
|
||||
subscriptions: &HashMap<K, HashMap<SubscriptionId, (Sink<Response<S>>, Confirmations)>>,
|
||||
hashmap_key: &K,
|
||||
current_slot: Slot,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
|
||||
bank_method: B,
|
||||
filter_results: F,
|
||||
notifier: &RpcNotifier,
|
||||
@ -133,6 +135,10 @@ where
|
||||
F: Fn(X, u64) -> Box<dyn Iterator<Item = S>>,
|
||||
X: Clone + Serialize,
|
||||
{
|
||||
let mut confirmation_slots: HashMap<usize, Slot> = HashMap::new();
|
||||
let r_block_commitment_cache = block_commitment_cache.read().unwrap();
|
||||
let current_slot = r_block_commitment_cache.slot();
|
||||
let root = r_block_commitment_cache.root();
|
||||
let current_ancestors = bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
@ -140,27 +146,29 @@ where
|
||||
.unwrap()
|
||||
.ancestors
|
||||
.clone();
|
||||
for (slot, _) in current_ancestors.iter() {
|
||||
if let Some(confirmations) = r_block_commitment_cache.get_confirmation_count(*slot) {
|
||||
confirmation_slots.entry(confirmations).or_insert(*slot);
|
||||
}
|
||||
}
|
||||
drop(r_block_commitment_cache);
|
||||
|
||||
let mut notified_set: HashSet<SubscriptionId> = HashSet::new();
|
||||
if let Some(hashmap) = subscriptions.get(hashmap_key) {
|
||||
for (sub_id, (sink, confirmations)) in hashmap.iter() {
|
||||
let desired_slot: Vec<u64> = current_ancestors
|
||||
.iter()
|
||||
.filter(|(_, &v)| v == *confirmations)
|
||||
.map(|(k, _)| k)
|
||||
.cloned()
|
||||
.collect();
|
||||
let root: Vec<u64> = current_ancestors
|
||||
.iter()
|
||||
.filter(|(_, &v)| v == 32)
|
||||
.map(|(k, _)| k)
|
||||
.cloned()
|
||||
.collect();
|
||||
let root = if root.len() == 1 { root[0] } else { 0 };
|
||||
if desired_slot.len() == 1 {
|
||||
let slot = desired_slot[0];
|
||||
let desired_bank = bank_forks.read().unwrap().get(slot).unwrap().clone();
|
||||
let results = bank_method(&desired_bank, hashmap_key);
|
||||
let desired_slot = if *confirmations == 0 {
|
||||
Some(¤t_slot)
|
||||
} else if *confirmations == MAX_LOCKOUT_HISTORY + 1 {
|
||||
Some(&root)
|
||||
} else {
|
||||
confirmation_slots.get(confirmations)
|
||||
};
|
||||
if let Some(&slot) = desired_slot {
|
||||
let results = {
|
||||
let bank_forks = bank_forks.read().unwrap();
|
||||
let desired_bank = bank_forks.get(slot).unwrap();
|
||||
bank_method(&desired_bank, hashmap_key)
|
||||
};
|
||||
for result in filter_results(results, root) {
|
||||
notifier.notify(
|
||||
Response {
|
||||
@ -201,11 +209,15 @@ fn filter_account_result(
|
||||
Box::new(iter::empty())
|
||||
}
|
||||
|
||||
fn filter_signature_result<S>(result: Option<S>, _root: Slot) -> Box<dyn Iterator<Item = S>>
|
||||
where
|
||||
S: 'static + Clone + Serialize,
|
||||
{
|
||||
Box::new(result.into_iter())
|
||||
fn filter_signature_result(
|
||||
result: Option<transaction::Result<()>>,
|
||||
_root: Slot,
|
||||
) -> Box<dyn Iterator<Item = RpcSignatureResult>> {
|
||||
Box::new(
|
||||
result
|
||||
.into_iter()
|
||||
.map(|result| RpcSignatureResult { err: result.err() }),
|
||||
)
|
||||
}
|
||||
|
||||
fn filter_program_results(
|
||||
@ -236,7 +248,10 @@ pub struct RpcSubscriptions {
|
||||
|
||||
impl Default for RpcSubscriptions {
|
||||
fn default() -> Self {
|
||||
Self::new(&Arc::new(AtomicBool::new(false)))
|
||||
Self::new(
|
||||
&Arc::new(AtomicBool::new(false)),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default())),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@ -249,7 +264,10 @@ impl Drop for RpcSubscriptions {
|
||||
}
|
||||
|
||||
impl RpcSubscriptions {
|
||||
pub fn new(exit: &Arc<AtomicBool>) -> Self {
|
||||
pub fn new(
|
||||
exit: &Arc<AtomicBool>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
) -> Self {
|
||||
let (notification_sender, notification_receiver): (
|
||||
Sender<NotificationEntry>,
|
||||
Receiver<NotificationEntry>,
|
||||
@ -288,6 +306,7 @@ impl RpcSubscriptions {
|
||||
signature_subscriptions_clone,
|
||||
slot_subscriptions_clone,
|
||||
root_subscriptions_clone,
|
||||
block_commitment_cache,
|
||||
);
|
||||
})
|
||||
.unwrap();
|
||||
@ -307,8 +326,8 @@ impl RpcSubscriptions {
|
||||
|
||||
fn check_account(
|
||||
pubkey: &Pubkey,
|
||||
current_slot: Slot,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
|
||||
account_subscriptions: Arc<RpcAccountSubscriptions>,
|
||||
notifier: &RpcNotifier,
|
||||
) {
|
||||
@ -316,8 +335,8 @@ impl RpcSubscriptions {
|
||||
check_confirmations_and_notify(
|
||||
&subscriptions,
|
||||
pubkey,
|
||||
current_slot,
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
Bank::get_account_modified_since_parent,
|
||||
filter_account_result,
|
||||
notifier,
|
||||
@ -326,8 +345,8 @@ impl RpcSubscriptions {
|
||||
|
||||
fn check_program(
|
||||
program_id: &Pubkey,
|
||||
current_slot: Slot,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
|
||||
program_subscriptions: Arc<RpcProgramSubscriptions>,
|
||||
notifier: &RpcNotifier,
|
||||
) {
|
||||
@ -335,8 +354,8 @@ impl RpcSubscriptions {
|
||||
check_confirmations_and_notify(
|
||||
&subscriptions,
|
||||
program_id,
|
||||
current_slot,
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
Bank::get_program_accounts_modified_since_parent,
|
||||
filter_program_results,
|
||||
notifier,
|
||||
@ -345,8 +364,8 @@ impl RpcSubscriptions {
|
||||
|
||||
fn check_signature(
|
||||
signature: &Signature,
|
||||
current_slot: Slot,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
|
||||
signature_subscriptions: Arc<RpcSignatureSubscriptions>,
|
||||
notifier: &RpcNotifier,
|
||||
) {
|
||||
@ -354,8 +373,8 @@ impl RpcSubscriptions {
|
||||
let notified_ids = check_confirmations_and_notify(
|
||||
&subscriptions,
|
||||
signature,
|
||||
current_slot,
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
Bank::get_signature_status_processed_since_parent,
|
||||
filter_signature_result,
|
||||
notifier,
|
||||
@ -417,7 +436,7 @@ impl RpcSubscriptions {
|
||||
signature: Signature,
|
||||
confirmations: Option<Confirmations>,
|
||||
sub_id: SubscriptionId,
|
||||
subscriber: Subscriber<Response<transaction::Result<()>>>,
|
||||
subscriber: Subscriber<Response<RpcSignatureResult>>,
|
||||
) {
|
||||
let mut subscriptions = self.signature_subscriptions.write().unwrap();
|
||||
add_subscription(
|
||||
@ -499,6 +518,7 @@ impl RpcSubscriptions {
|
||||
signature_subscriptions: Arc<RpcSignatureSubscriptions>,
|
||||
slot_subscriptions: Arc<RpcSlotSubscriptions>,
|
||||
root_subscriptions: Arc<RpcRootSubscriptions>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
) {
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
@ -518,7 +538,7 @@ impl RpcSubscriptions {
|
||||
notifier.notify(root, sink);
|
||||
}
|
||||
}
|
||||
NotificationEntry::Bank((current_slot, bank_forks)) => {
|
||||
NotificationEntry::Bank((_current_slot, bank_forks)) => {
|
||||
let pubkeys: Vec<_> = {
|
||||
let subs = account_subscriptions.read().unwrap();
|
||||
subs.keys().cloned().collect()
|
||||
@ -526,8 +546,8 @@ impl RpcSubscriptions {
|
||||
for pubkey in &pubkeys {
|
||||
Self::check_account(
|
||||
pubkey,
|
||||
current_slot,
|
||||
&bank_forks,
|
||||
&block_commitment_cache,
|
||||
account_subscriptions.clone(),
|
||||
¬ifier,
|
||||
);
|
||||
@ -540,8 +560,8 @@ impl RpcSubscriptions {
|
||||
for program_id in &programs {
|
||||
Self::check_program(
|
||||
program_id,
|
||||
current_slot,
|
||||
&bank_forks,
|
||||
&block_commitment_cache,
|
||||
program_subscriptions.clone(),
|
||||
¬ifier,
|
||||
);
|
||||
@ -554,8 +574,8 @@ impl RpcSubscriptions {
|
||||
for signature in &signatures {
|
||||
Self::check_signature(
|
||||
signature,
|
||||
current_slot,
|
||||
&bank_forks,
|
||||
&block_commitment_cache,
|
||||
signature_subscriptions.clone(),
|
||||
¬ifier,
|
||||
);
|
||||
@ -596,6 +616,7 @@ impl RpcSubscriptions {
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use super::*;
|
||||
use crate::commitment::BlockCommitment;
|
||||
use jsonrpc_core::futures::{self, stream::Stream};
|
||||
use jsonrpc_pubsub::typed::Subscriber;
|
||||
use solana_budget_program;
|
||||
@ -663,7 +684,10 @@ pub(crate) mod tests {
|
||||
Subscriber::new_test("accountNotification");
|
||||
let sub_id = SubscriptionId::Number(0 as u64);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let subscriptions = RpcSubscriptions::new(&exit);
|
||||
let subscriptions = RpcSubscriptions::new(
|
||||
&exit,
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
|
||||
);
|
||||
subscriptions.add_account_subscription(alice.pubkey(), None, sub_id.clone(), subscriber);
|
||||
|
||||
assert!(subscriptions
|
||||
@ -732,7 +756,10 @@ pub(crate) mod tests {
|
||||
Subscriber::new_test("programNotification");
|
||||
let sub_id = SubscriptionId::Number(0 as u64);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let subscriptions = RpcSubscriptions::new(&exit);
|
||||
let subscriptions = RpcSubscriptions::new(
|
||||
&exit,
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
|
||||
);
|
||||
subscriptions.add_program_subscription(
|
||||
solana_budget_program::id(),
|
||||
None,
|
||||
@ -812,27 +839,41 @@ pub(crate) mod tests {
|
||||
.unwrap()
|
||||
.process_transaction(&processed_tx)
|
||||
.unwrap();
|
||||
let bank1 = bank_forks[1].clone();
|
||||
|
||||
let bank_forks = Arc::new(RwLock::new(bank_forks));
|
||||
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let subscriptions = RpcSubscriptions::new(&exit);
|
||||
let mut cache0 = BlockCommitment::default();
|
||||
cache0.increase_confirmation_stake(1, 10);
|
||||
let cache1 = BlockCommitment::default();
|
||||
|
||||
let (past_bank_sub, _id_receiver, past_bank_recv) =
|
||||
let mut block_commitment = HashMap::new();
|
||||
block_commitment.entry(0).or_insert(cache0.clone());
|
||||
block_commitment.entry(1).or_insert(cache1.clone());
|
||||
let block_commitment_cache = BlockCommitmentCache::new(block_commitment, 10, bank1, 0);
|
||||
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let subscriptions =
|
||||
RpcSubscriptions::new(&exit, Arc::new(RwLock::new(block_commitment_cache)));
|
||||
|
||||
let (past_bank_sub1, _id_receiver, past_bank_recv1) =
|
||||
Subscriber::new_test("signatureNotification");
|
||||
let (past_bank_sub2, _id_receiver, past_bank_recv2) =
|
||||
Subscriber::new_test("signatureNotification");
|
||||
let (processed_sub, _id_receiver, processed_recv) =
|
||||
Subscriber::new_test("signatureNotification");
|
||||
|
||||
subscriptions.add_signature_subscription(
|
||||
past_bank_tx.signatures[0],
|
||||
Some(0),
|
||||
SubscriptionId::Number(1 as u64),
|
||||
Subscriber::new_test("signatureNotification").0,
|
||||
past_bank_sub1,
|
||||
);
|
||||
subscriptions.add_signature_subscription(
|
||||
past_bank_tx.signatures[0],
|
||||
Some(1),
|
||||
SubscriptionId::Number(2 as u64),
|
||||
past_bank_sub,
|
||||
past_bank_sub2,
|
||||
);
|
||||
subscriptions.add_signature_subscription(
|
||||
processed_tx.signatures[0],
|
||||
@ -855,43 +896,48 @@ pub(crate) mod tests {
|
||||
}
|
||||
|
||||
subscriptions.notify_subscribers(1, &bank_forks);
|
||||
let expected_res: Option<transaction::Result<()>> = Some(Ok(()));
|
||||
let expected_res = RpcSignatureResult { err: None };
|
||||
|
||||
let expected = json!({
|
||||
"jsonrpc": "2.0",
|
||||
"method": "signatureNotification",
|
||||
"params": {
|
||||
"result": {
|
||||
"context": { "slot": 0 },
|
||||
"value": expected_res,
|
||||
},
|
||||
"subscription": 2,
|
||||
}
|
||||
});
|
||||
let (response, _) = robust_poll_or_panic(past_bank_recv);
|
||||
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
|
||||
struct Notification {
|
||||
slot: Slot,
|
||||
id: u64,
|
||||
}
|
||||
|
||||
let expected = json!({
|
||||
"jsonrpc": "2.0",
|
||||
"method": "signatureNotification",
|
||||
"params": {
|
||||
"result": {
|
||||
"context": { "slot": 1 },
|
||||
"value": expected_res,
|
||||
},
|
||||
"subscription": 3,
|
||||
}
|
||||
});
|
||||
let expected_notification = |exp: Notification| -> String {
|
||||
let json = json!({
|
||||
"jsonrpc": "2.0",
|
||||
"method": "signatureNotification",
|
||||
"params": {
|
||||
"result": {
|
||||
"context": { "slot": exp.slot },
|
||||
"value": &expected_res,
|
||||
},
|
||||
"subscription": exp.id,
|
||||
}
|
||||
});
|
||||
serde_json::to_string(&json).unwrap()
|
||||
};
|
||||
|
||||
// Expect to receive a notification from bank 1 because this subscription is
|
||||
// looking for 0 confirmations and so checks the current bank
|
||||
let expected = expected_notification(Notification { slot: 1, id: 1 });
|
||||
let (response, _) = robust_poll_or_panic(past_bank_recv1);
|
||||
assert_eq!(expected, response);
|
||||
|
||||
// Expect to receive a notification from bank 0 because this subscription is
|
||||
// looking for 1 confirmation and so checks the past bank
|
||||
let expected = expected_notification(Notification { slot: 0, id: 2 });
|
||||
let (response, _) = robust_poll_or_panic(past_bank_recv2);
|
||||
assert_eq!(expected, response);
|
||||
|
||||
let expected = expected_notification(Notification { slot: 1, id: 3 });
|
||||
let (response, _) = robust_poll_or_panic(processed_recv);
|
||||
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
|
||||
|
||||
let sig_subs = subscriptions.signature_subscriptions.read().unwrap();
|
||||
assert_eq!(expected, response);
|
||||
|
||||
// Subscription should be automatically removed after notification
|
||||
let sig_subs = subscriptions.signature_subscriptions.read().unwrap();
|
||||
assert!(!sig_subs.contains_key(&processed_tx.signatures[0]));
|
||||
|
||||
// Only one notification is expected for signature processed in previous bank
|
||||
assert_eq!(sig_subs.get(&past_bank_tx.signatures[0]).unwrap().len(), 1);
|
||||
assert!(!sig_subs.contains_key(&past_bank_tx.signatures[0]));
|
||||
|
||||
// Unprocessed signature subscription should not be removed
|
||||
assert_eq!(
|
||||
@ -906,7 +952,10 @@ pub(crate) mod tests {
|
||||
Subscriber::new_test("slotNotification");
|
||||
let sub_id = SubscriptionId::Number(0 as u64);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let subscriptions = RpcSubscriptions::new(&exit);
|
||||
let subscriptions = RpcSubscriptions::new(
|
||||
&exit,
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
|
||||
);
|
||||
subscriptions.add_slot_subscription(sub_id.clone(), subscriber);
|
||||
|
||||
assert!(subscriptions
|
||||
@ -944,7 +993,10 @@ pub(crate) mod tests {
|
||||
Subscriber::new_test("rootNotification");
|
||||
let sub_id = SubscriptionId::Number(0 as u64);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let subscriptions = RpcSubscriptions::new(&exit);
|
||||
let subscriptions = RpcSubscriptions::new(
|
||||
&exit,
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
|
||||
);
|
||||
subscriptions.add_root_subscription(sub_id.clone(), subscriber);
|
||||
|
||||
assert!(subscriptions
|
||||
|
@ -2,11 +2,13 @@ use crate::{
|
||||
cluster_info::{ClusterInfo, ClusterInfoError},
|
||||
cluster_slots::ClusterSlots,
|
||||
contact_info::ContactInfo,
|
||||
repair_service::RepairStats,
|
||||
result::{Error, Result},
|
||||
weighted_shuffle::weighted_best,
|
||||
};
|
||||
use bincode::serialize;
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_measure::thread_mem_usage;
|
||||
use solana_metrics::{datapoint_debug, inc_new_counter_debug};
|
||||
use solana_perf::packet::{limited_deserialize, Packet, Packets, PacketsRecycler};
|
||||
@ -46,6 +48,17 @@ impl RepairType {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ServeRepairStats {
|
||||
pub total_packets: usize,
|
||||
pub dropped_packets: usize,
|
||||
pub processed: usize,
|
||||
pub self_repair: usize,
|
||||
pub window_index: usize,
|
||||
pub highest_window_index: usize,
|
||||
pub orphan: usize,
|
||||
}
|
||||
|
||||
/// Window protocol messages
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
enum RepairProtocol {
|
||||
@ -106,6 +119,7 @@ impl ServeRepair {
|
||||
from_addr: &SocketAddr,
|
||||
blockstore: Option<&Arc<Blockstore>>,
|
||||
request: RepairProtocol,
|
||||
stats: &mut ServeRepairStats,
|
||||
) -> Option<Packets> {
|
||||
let now = Instant::now();
|
||||
|
||||
@ -113,18 +127,14 @@ impl ServeRepair {
|
||||
let my_id = me.read().unwrap().keypair.pubkey();
|
||||
let from = Self::get_repair_sender(&request);
|
||||
if from.id == my_id {
|
||||
warn!(
|
||||
"{}: Ignored received repair request from ME {}",
|
||||
my_id, from.id,
|
||||
);
|
||||
inc_new_counter_debug!("serve_repair-handle-repair--eq", 1);
|
||||
stats.self_repair += 1;
|
||||
return None;
|
||||
}
|
||||
|
||||
let (res, label) = {
|
||||
match &request {
|
||||
RepairProtocol::WindowIndex(from, slot, shred_index) => {
|
||||
inc_new_counter_debug!("serve_repair-request-window-index", 1);
|
||||
stats.window_index += 1;
|
||||
(
|
||||
Self::run_window_request(
|
||||
recycler,
|
||||
@ -140,7 +150,7 @@ impl ServeRepair {
|
||||
}
|
||||
|
||||
RepairProtocol::HighestWindowIndex(_, slot, highest_index) => {
|
||||
inc_new_counter_debug!("serve_repair-request-highest-window-index", 1);
|
||||
stats.highest_window_index += 1;
|
||||
(
|
||||
Self::run_highest_window_request(
|
||||
recycler,
|
||||
@ -153,7 +163,7 @@ impl ServeRepair {
|
||||
)
|
||||
}
|
||||
RepairProtocol::Orphan(_, slot) => {
|
||||
inc_new_counter_debug!("serve_repair-request-orphan", 1);
|
||||
stats.orphan += 1;
|
||||
(
|
||||
Self::run_orphan(
|
||||
recycler,
|
||||
@ -187,15 +197,71 @@ impl ServeRepair {
|
||||
blockstore: Option<&Arc<Blockstore>>,
|
||||
requests_receiver: &PacketReceiver,
|
||||
response_sender: &PacketSender,
|
||||
stats: &mut ServeRepairStats,
|
||||
max_packets: &mut usize,
|
||||
) -> Result<()> {
|
||||
//TODO cache connections
|
||||
let timeout = Duration::new(1, 0);
|
||||
let reqs = requests_receiver.recv_timeout(timeout)?;
|
||||
let mut reqs_v = vec![requests_receiver.recv_timeout(timeout)?];
|
||||
let mut total_packets = reqs_v[0].packets.len();
|
||||
|
||||
Self::handle_packets(obj, &recycler, blockstore, reqs, response_sender);
|
||||
let mut dropped_packets = 0;
|
||||
while let Ok(more) = requests_receiver.try_recv() {
|
||||
total_packets += more.packets.len();
|
||||
if total_packets < *max_packets {
|
||||
// Drop the rest in the channel in case of dos
|
||||
reqs_v.push(more);
|
||||
} else {
|
||||
dropped_packets += more.packets.len();
|
||||
}
|
||||
}
|
||||
|
||||
stats.dropped_packets += dropped_packets;
|
||||
stats.total_packets += total_packets;
|
||||
|
||||
let mut time = Measure::start("repair::handle_packets");
|
||||
for reqs in reqs_v {
|
||||
Self::handle_packets(obj, &recycler, blockstore, reqs, response_sender, stats);
|
||||
}
|
||||
time.stop();
|
||||
if total_packets >= *max_packets {
|
||||
if time.as_ms() > 1000 {
|
||||
*max_packets = (*max_packets * 9) / 10;
|
||||
} else {
|
||||
*max_packets = (*max_packets * 10) / 9;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn report_reset_stats(me: &Arc<RwLock<Self>>, stats: &mut ServeRepairStats) {
|
||||
if stats.self_repair > 0 {
|
||||
let my_id = me.read().unwrap().keypair.pubkey();
|
||||
warn!(
|
||||
"{}: Ignored received repair requests from ME: {}",
|
||||
my_id, stats.self_repair,
|
||||
);
|
||||
inc_new_counter_debug!("serve_repair-handle-repair--eq", stats.self_repair);
|
||||
}
|
||||
|
||||
inc_new_counter_info!("serve_repair-total_packets", stats.total_packets);
|
||||
inc_new_counter_info!("serve_repair-dropped_packets", stats.dropped_packets);
|
||||
|
||||
debug!(
|
||||
"repair_listener: total_packets: {} passed: {}",
|
||||
stats.total_packets, stats.processed
|
||||
);
|
||||
|
||||
inc_new_counter_debug!("serve_repair-request-window-index", stats.window_index);
|
||||
inc_new_counter_debug!(
|
||||
"serve_repair-request-highest-window-index",
|
||||
stats.highest_window_index
|
||||
);
|
||||
inc_new_counter_debug!("serve_repair-request-orphan", stats.orphan);
|
||||
|
||||
*stats = ServeRepairStats::default();
|
||||
}
|
||||
|
||||
pub fn listen(
|
||||
me: Arc<RwLock<Self>>,
|
||||
blockstore: Option<Arc<Blockstore>>,
|
||||
@ -207,22 +273,33 @@ impl ServeRepair {
|
||||
let recycler = PacketsRecycler::default();
|
||||
Builder::new()
|
||||
.name("solana-repair-listen".to_string())
|
||||
.spawn(move || loop {
|
||||
let result = Self::run_listen(
|
||||
&me,
|
||||
&recycler,
|
||||
blockstore.as_ref(),
|
||||
&requests_receiver,
|
||||
&response_sender,
|
||||
);
|
||||
match result {
|
||||
Err(Error::RecvTimeoutError(_)) | Ok(_) => {}
|
||||
Err(err) => info!("repair listener error: {:?}", err),
|
||||
};
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
.spawn(move || {
|
||||
let mut last_print = Instant::now();
|
||||
let mut stats = ServeRepairStats::default();
|
||||
let mut max_packets = 1024;
|
||||
loop {
|
||||
let result = Self::run_listen(
|
||||
&me,
|
||||
&recycler,
|
||||
blockstore.as_ref(),
|
||||
&requests_receiver,
|
||||
&response_sender,
|
||||
&mut stats,
|
||||
&mut max_packets,
|
||||
);
|
||||
match result {
|
||||
Err(Error::RecvTimeoutError(_)) | Ok(_) => {}
|
||||
Err(err) => info!("repair listener error: {:?}", err),
|
||||
};
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
if last_print.elapsed().as_secs() > 2 {
|
||||
Self::report_reset_stats(&me, &mut stats);
|
||||
last_print = Instant::now();
|
||||
}
|
||||
thread_mem_usage::datapoint("solana-repair-listen");
|
||||
}
|
||||
thread_mem_usage::datapoint("solana-repair-listen");
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
@ -233,6 +310,7 @@ impl ServeRepair {
|
||||
blockstore: Option<&Arc<Blockstore>>,
|
||||
packets: Packets,
|
||||
response_sender: &PacketSender,
|
||||
stats: &mut ServeRepairStats,
|
||||
) {
|
||||
// iter over the packets, collect pulls separately and process everything else
|
||||
let allocated = thread_mem_usage::Allocatedp::default();
|
||||
@ -242,7 +320,9 @@ impl ServeRepair {
|
||||
limited_deserialize(&packet.data[..packet.meta.size])
|
||||
.into_iter()
|
||||
.for_each(|request| {
|
||||
let rsp = Self::handle_repair(me, recycler, &from_addr, blockstore, request);
|
||||
stats.processed += 1;
|
||||
let rsp =
|
||||
Self::handle_repair(me, recycler, &from_addr, blockstore, request, stats);
|
||||
if let Some(rsp) = rsp {
|
||||
let _ignore_disconnect = response_sender.send(rsp);
|
||||
}
|
||||
@ -277,6 +357,7 @@ impl ServeRepair {
|
||||
cluster_slots: &ClusterSlots,
|
||||
repair_request: &RepairType,
|
||||
cache: &mut RepairCache,
|
||||
repair_stats: &mut RepairStats,
|
||||
) -> Result<(SocketAddr, Vec<u8>)> {
|
||||
// find a peer that appears to be accepting replication and has the desired slot, as indicated
|
||||
// by a valid tvu port location
|
||||
@ -295,30 +376,26 @@ impl ServeRepair {
|
||||
let (repair_peers, weights) = cache.get(&repair_request.slot()).unwrap();
|
||||
let n = weighted_best(&weights, Pubkey::new_rand().to_bytes());
|
||||
let addr = repair_peers[n].serve_repair; // send the request to the peer's serve_repair port
|
||||
let out = self.map_repair_request(repair_request)?;
|
||||
let out = self.map_repair_request(repair_request, repair_stats)?;
|
||||
Ok((addr, out))
|
||||
}
|
||||
|
||||
pub fn map_repair_request(&self, repair_request: &RepairType) -> Result<Vec<u8>> {
|
||||
pub fn map_repair_request(
|
||||
&self,
|
||||
repair_request: &RepairType,
|
||||
repair_stats: &mut RepairStats,
|
||||
) -> Result<Vec<u8>> {
|
||||
match repair_request {
|
||||
RepairType::Shred(slot, shred_index) => {
|
||||
datapoint_debug!(
|
||||
"serve_repair-repair",
|
||||
("repair-slot", *slot, i64),
|
||||
("repair-ix", *shred_index, i64)
|
||||
);
|
||||
repair_stats.shred.update(*slot);
|
||||
Ok(self.window_index_request_bytes(*slot, *shred_index)?)
|
||||
}
|
||||
RepairType::HighestShred(slot, shred_index) => {
|
||||
datapoint_info!(
|
||||
"serve_repair-repair_highest",
|
||||
("repair-highest-slot", *slot, i64),
|
||||
("repair-highest-ix", *shred_index, i64)
|
||||
);
|
||||
repair_stats.highest_shred.update(*slot);
|
||||
Ok(self.window_highest_index_request_bytes(*slot, *shred_index)?)
|
||||
}
|
||||
RepairType::Orphan(slot) => {
|
||||
datapoint_info!("serve_repair-repair_orphan", ("repair-orphan", *slot, i64));
|
||||
repair_stats.orphan.update(*slot);
|
||||
Ok(self.orphan_bytes(*slot)?)
|
||||
}
|
||||
}
|
||||
@ -583,6 +660,7 @@ mod tests {
|
||||
&cluster_slots,
|
||||
&RepairType::Shred(0, 0),
|
||||
&mut HashMap::new(),
|
||||
&mut RepairStats::default(),
|
||||
);
|
||||
assert_matches!(rv, Err(Error::ClusterInfoError(ClusterInfoError::NoPeers)));
|
||||
|
||||
@ -608,6 +686,7 @@ mod tests {
|
||||
&cluster_slots,
|
||||
&RepairType::Shred(0, 0),
|
||||
&mut HashMap::new(),
|
||||
&mut RepairStats::default(),
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(nxt.serve_repair, serve_repair_addr);
|
||||
@ -639,6 +718,7 @@ mod tests {
|
||||
&cluster_slots,
|
||||
&RepairType::Shred(0, 0),
|
||||
&mut HashMap::new(),
|
||||
&mut RepairStats::default(),
|
||||
)
|
||||
.unwrap();
|
||||
if rv.0 == serve_repair_addr {
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
use crate::{
|
||||
cluster_info::ClusterInfo,
|
||||
commitment::BlockCommitmentCache,
|
||||
contact_info::ContactInfo,
|
||||
result::{Error, Result},
|
||||
};
|
||||
@ -11,9 +12,7 @@ use rand::{Rng, SeedableRng};
|
||||
use rand_chacha::ChaChaRng;
|
||||
use solana_chacha_cuda::chacha_cuda::chacha_cbc_encrypt_file_many_keys;
|
||||
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore};
|
||||
use solana_runtime::{
|
||||
bank::Bank, status_cache::SignatureConfirmationStatus, storage_utils::archiver_accounts,
|
||||
};
|
||||
use solana_runtime::{bank::Bank, storage_utils::archiver_accounts};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
account_utils::StateMut,
|
||||
@ -30,6 +29,7 @@ use solana_storage_program::{
|
||||
storage_instruction,
|
||||
storage_instruction::proof_validation,
|
||||
};
|
||||
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
|
||||
use std::{
|
||||
cmp,
|
||||
collections::HashMap,
|
||||
@ -185,6 +185,7 @@ impl StorageStage {
|
||||
exit: &Arc<AtomicBool>,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
) -> Self {
|
||||
let (instruction_sender, instruction_receiver) = channel();
|
||||
|
||||
@ -256,6 +257,7 @@ impl StorageStage {
|
||||
&keypair,
|
||||
&storage_keypair,
|
||||
&transactions_socket,
|
||||
&block_commitment_cache,
|
||||
)
|
||||
.unwrap_or_else(|err| {
|
||||
info!("failed to send storage transaction: {:?}", err)
|
||||
@ -289,6 +291,7 @@ impl StorageStage {
|
||||
keypair: &Arc<Keypair>,
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
transactions_socket: &UdpSocket,
|
||||
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
|
||||
) -> io::Result<()> {
|
||||
let working_bank = bank_forks.read().unwrap().working_bank();
|
||||
let blockhash = working_bank.confirmed_last_blockhash().0;
|
||||
@ -323,8 +326,13 @@ impl StorageStage {
|
||||
cluster_info.read().unwrap().my_data().tpu,
|
||||
)?;
|
||||
sleep(Duration::from_millis(100));
|
||||
if Self::poll_for_signature_confirmation(bank_forks, &transaction.signatures[0], 0)
|
||||
.is_ok()
|
||||
if Self::poll_for_signature_confirmation(
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
&transaction.signatures[0],
|
||||
0,
|
||||
)
|
||||
.is_ok()
|
||||
{
|
||||
break;
|
||||
};
|
||||
@ -334,23 +342,24 @@ impl StorageStage {
|
||||
|
||||
fn poll_for_signature_confirmation(
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
|
||||
signature: &Signature,
|
||||
min_confirmed_blocks: usize,
|
||||
) -> Result<()> {
|
||||
let mut now = Instant::now();
|
||||
let mut confirmed_blocks = 0;
|
||||
loop {
|
||||
let response = bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.working_bank()
|
||||
.get_signature_confirmation_status(signature);
|
||||
if let Some(SignatureConfirmationStatus {
|
||||
confirmations,
|
||||
status,
|
||||
..
|
||||
}) = response
|
||||
{
|
||||
let working_bank = bank_forks.read().unwrap().working_bank();
|
||||
let response = working_bank.get_signature_status_slot(signature);
|
||||
if let Some((slot, status)) = response {
|
||||
let confirmations = if working_bank.src.roots().contains(&slot) {
|
||||
MAX_LOCKOUT_HISTORY + 1
|
||||
} else {
|
||||
let r_block_commitment_cache = block_commitment_cache.read().unwrap();
|
||||
r_block_commitment_cache
|
||||
.get_confirmation_count(slot)
|
||||
.unwrap_or(0)
|
||||
};
|
||||
if status.is_ok() {
|
||||
if confirmed_blocks != confirmations {
|
||||
now = Instant::now();
|
||||
@ -655,12 +664,18 @@ mod tests {
|
||||
use rayon::prelude::*;
|
||||
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::hash::Hasher;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use std::cmp::{max, min};
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use solana_sdk::{
|
||||
hash::Hasher,
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
use std::{
|
||||
cmp::{max, min},
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicUsize, Ordering},
|
||||
mpsc::channel,
|
||||
Arc, RwLock,
|
||||
},
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_storage_stage_none_ledger() {
|
||||
@ -675,6 +690,7 @@ mod tests {
|
||||
&[bank.clone()],
|
||||
vec![0],
|
||||
)));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||
let (_slot_sender, slot_receiver) = channel();
|
||||
let storage_state = StorageState::new(
|
||||
&bank.last_blockhash(),
|
||||
@ -690,6 +706,7 @@ mod tests {
|
||||
&exit.clone(),
|
||||
&bank_forks,
|
||||
&cluster_info,
|
||||
block_commitment_cache,
|
||||
);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
storage_stage.join().unwrap();
|
||||
|
@ -70,9 +70,14 @@ impl TransactionStatusService {
|
||||
}
|
||||
.expect("FeeCalculator must exist");
|
||||
let fee = fee_calculator.calculate_fee(transaction.message());
|
||||
let (writable_keys, readonly_keys) =
|
||||
transaction.message.get_account_keys_by_lock_type();
|
||||
blockstore
|
||||
.write_transaction_status(
|
||||
(slot, transaction.signatures[0]),
|
||||
slot,
|
||||
transaction.signatures[0],
|
||||
writable_keys,
|
||||
readonly_keys,
|
||||
&TransactionStatusMeta {
|
||||
status,
|
||||
fee,
|
||||
|
@ -2,7 +2,7 @@
|
||||
//! validation pipeline in software.
|
||||
|
||||
use crate::{
|
||||
accounts_cleanup_service::AccountsCleanupService,
|
||||
accounts_background_service::AccountsBackgroundService,
|
||||
accounts_hash_verifier::AccountsHashVerifier,
|
||||
broadcast_stage::RetransmitSlotsSender,
|
||||
cluster_info::ClusterInfo,
|
||||
@ -49,7 +49,7 @@ pub struct Tvu {
|
||||
retransmit_stage: RetransmitStage,
|
||||
replay_stage: ReplayStage,
|
||||
ledger_cleanup_service: Option<LedgerCleanupService>,
|
||||
accounts_cleanup_service: AccountsCleanupService,
|
||||
accounts_background_service: AccountsBackgroundService,
|
||||
storage_stage: StorageStage,
|
||||
accounts_hash_verifier: AccountsHashVerifier,
|
||||
}
|
||||
@ -81,7 +81,7 @@ impl Tvu {
|
||||
#[allow(clippy::new_ret_no_self, clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
vote_account: &Pubkey,
|
||||
voting_keypair: Option<Arc<Keypair>>,
|
||||
authorized_voter_keypairs: Vec<Arc<Keypair>>,
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
@ -160,7 +160,7 @@ impl Tvu {
|
||||
*bank_forks.read().unwrap().working_bank().epoch_schedule(),
|
||||
cfg,
|
||||
tvu_config.shred_version,
|
||||
cluster_slots,
|
||||
cluster_slots.clone(),
|
||||
);
|
||||
|
||||
let (ledger_cleanup_slot_sender, ledger_cleanup_slot_receiver) = channel();
|
||||
@ -179,13 +179,13 @@ impl Tvu {
|
||||
let replay_stage_config = ReplayStageConfig {
|
||||
my_pubkey: keypair.pubkey(),
|
||||
vote_account: *vote_account,
|
||||
voting_keypair,
|
||||
authorized_voter_keypairs,
|
||||
exit: exit.clone(),
|
||||
subscriptions: subscriptions.clone(),
|
||||
leader_schedule_cache: leader_schedule_cache.clone(),
|
||||
latest_root_senders: vec![ledger_cleanup_slot_sender],
|
||||
accounts_hash_sender: Some(accounts_hash_sender),
|
||||
block_commitment_cache,
|
||||
block_commitment_cache: block_commitment_cache.clone(),
|
||||
transaction_status_sender,
|
||||
rewards_recorder_sender,
|
||||
};
|
||||
@ -198,6 +198,7 @@ impl Tvu {
|
||||
ledger_signal_receiver,
|
||||
poh_recorder.clone(),
|
||||
vote_tracker,
|
||||
cluster_slots,
|
||||
retransmit_slots_sender,
|
||||
);
|
||||
|
||||
@ -210,7 +211,7 @@ impl Tvu {
|
||||
)
|
||||
});
|
||||
|
||||
let accounts_cleanup_service = AccountsCleanupService::new(bank_forks.clone(), &exit);
|
||||
let accounts_background_service = AccountsBackgroundService::new(bank_forks.clone(), &exit);
|
||||
|
||||
let storage_stage = StorageStage::new(
|
||||
storage_state,
|
||||
@ -221,6 +222,7 @@ impl Tvu {
|
||||
&exit,
|
||||
&bank_forks,
|
||||
&cluster_info,
|
||||
block_commitment_cache,
|
||||
);
|
||||
|
||||
Tvu {
|
||||
@ -229,7 +231,7 @@ impl Tvu {
|
||||
retransmit_stage,
|
||||
replay_stage,
|
||||
ledger_cleanup_service,
|
||||
accounts_cleanup_service,
|
||||
accounts_background_service,
|
||||
storage_stage,
|
||||
accounts_hash_verifier,
|
||||
}
|
||||
@ -243,7 +245,7 @@ impl Tvu {
|
||||
if self.ledger_cleanup_service.is_some() {
|
||||
self.ledger_cleanup_service.unwrap().join()?;
|
||||
}
|
||||
self.accounts_cleanup_service.join()?;
|
||||
self.accounts_background_service.join()?;
|
||||
self.replay_stage.join()?;
|
||||
self.accounts_hash_verifier.join()?;
|
||||
Ok(())
|
||||
@ -285,14 +287,14 @@ pub mod tests {
|
||||
let bank = bank_forks.working_bank();
|
||||
let (exit, poh_recorder, poh_service, _entry_receiver) =
|
||||
create_test_recorder(&bank, &blockstore, None);
|
||||
let voting_keypair = Keypair::new();
|
||||
let vote_keypair = Keypair::new();
|
||||
let storage_keypair = Arc::new(Keypair::new());
|
||||
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||
let (retransmit_slots_sender, _retransmit_slots_receiver) = unbounded();
|
||||
let tvu = Tvu::new(
|
||||
&voting_keypair.pubkey(),
|
||||
Some(Arc::new(voting_keypair)),
|
||||
&vote_keypair.pubkey(),
|
||||
vec![Arc::new(vote_keypair)],
|
||||
&storage_keypair,
|
||||
&Arc::new(RwLock::new(bank_forks)),
|
||||
&cref1,
|
||||
@ -307,7 +309,10 @@ pub mod tests {
|
||||
blockstore,
|
||||
&StorageState::default(),
|
||||
l_receiver,
|
||||
&Arc::new(RpcSubscriptions::new(&exit)),
|
||||
&Arc::new(RpcSubscriptions::new(
|
||||
&exit,
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default())),
|
||||
)),
|
||||
&poh_recorder,
|
||||
&leader_schedule_cache,
|
||||
&exit,
|
||||
|
@ -151,7 +151,7 @@ impl Validator {
|
||||
keypair: &Arc<Keypair>,
|
||||
ledger_path: &Path,
|
||||
vote_account: &Pubkey,
|
||||
authorized_voter: &Arc<Keypair>,
|
||||
mut authorized_voter_keypairs: Vec<Arc<Keypair>>,
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
entrypoint_info_option: Option<&ContactInfo>,
|
||||
poh_verify: bool,
|
||||
@ -162,7 +162,15 @@ impl Validator {
|
||||
|
||||
warn!("identity: {}", id);
|
||||
warn!("vote account: {}", vote_account);
|
||||
warn!("authorized voter: {}", authorized_voter.pubkey());
|
||||
|
||||
if config.voting_disabled {
|
||||
warn!("voting disabled");
|
||||
authorized_voter_keypairs.clear();
|
||||
} else {
|
||||
for authorized_voter_keypair in &authorized_voter_keypairs {
|
||||
warn!("authorized voter: {}", authorized_voter_keypair.pubkey());
|
||||
}
|
||||
}
|
||||
report_target_features();
|
||||
|
||||
info!("entrypoint: {:?}", entrypoint_info_option);
|
||||
@ -234,7 +242,7 @@ impl Validator {
|
||||
|
||||
let blockstore = Arc::new(blockstore);
|
||||
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new(&exit));
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new(&exit, block_commitment_cache.clone()));
|
||||
|
||||
let rpc_service = config.rpc_ports.map(|(rpc_port, rpc_pubsub_port)| {
|
||||
if ContactInfo::is_valid_address(&node.info.rpc) {
|
||||
@ -386,11 +394,7 @@ impl Validator {
|
||||
let (retransmit_slots_sender, retransmit_slots_receiver) = unbounded();
|
||||
let tvu = Tvu::new(
|
||||
vote_account,
|
||||
if config.voting_disabled {
|
||||
None
|
||||
} else {
|
||||
Some(authorized_voter.clone())
|
||||
},
|
||||
authorized_voter_keypairs,
|
||||
storage_keypair,
|
||||
&bank_forks,
|
||||
&cluster_info,
|
||||
@ -666,6 +670,7 @@ pub struct TestValidator {
|
||||
pub struct TestValidatorOptions {
|
||||
pub fees: u64,
|
||||
pub bootstrap_validator_lamports: u64,
|
||||
pub mint_lamports: u64,
|
||||
}
|
||||
|
||||
impl Default for TestValidatorOptions {
|
||||
@ -674,6 +679,7 @@ impl Default for TestValidatorOptions {
|
||||
TestValidatorOptions {
|
||||
fees: 0,
|
||||
bootstrap_validator_lamports: BOOTSTRAP_VALIDATOR_LAMPORTS,
|
||||
mint_lamports: 1_000_000,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -692,6 +698,7 @@ impl TestValidator {
|
||||
let TestValidatorOptions {
|
||||
fees,
|
||||
bootstrap_validator_lamports,
|
||||
mint_lamports,
|
||||
} = options;
|
||||
let node_keypair = Arc::new(Keypair::new());
|
||||
let node = Node::new_localhost_with_pubkey(&node_keypair.pubkey());
|
||||
@ -702,7 +709,7 @@ impl TestValidator {
|
||||
mint_keypair,
|
||||
voting_keypair,
|
||||
} = create_genesis_config_with_leader_ex(
|
||||
1_000_000,
|
||||
mint_lamports,
|
||||
&contact_info.id,
|
||||
42,
|
||||
bootstrap_validator_lamports,
|
||||
@ -728,7 +735,7 @@ impl TestValidator {
|
||||
&node_keypair,
|
||||
&ledger_path,
|
||||
&leader_voting_keypair.pubkey(),
|
||||
&leader_voting_keypair,
|
||||
vec![leader_voting_keypair.clone()],
|
||||
&storage_keypair,
|
||||
None,
|
||||
true,
|
||||
@ -836,7 +843,7 @@ mod tests {
|
||||
&Arc::new(validator_keypair),
|
||||
&validator_ledger_path,
|
||||
&voting_keypair.pubkey(),
|
||||
&voting_keypair,
|
||||
vec![voting_keypair.clone()],
|
||||
&storage_keypair,
|
||||
Some(&leader_node.info),
|
||||
true,
|
||||
@ -861,7 +868,7 @@ mod tests {
|
||||
.genesis_config;
|
||||
let (validator_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
|
||||
ledger_paths.push(validator_ledger_path.clone());
|
||||
let voting_keypair = Arc::new(Keypair::new());
|
||||
let vote_account_keypair = Arc::new(Keypair::new());
|
||||
let storage_keypair = Arc::new(Keypair::new());
|
||||
let config = ValidatorConfig {
|
||||
rpc_ports: Some((
|
||||
@ -874,8 +881,8 @@ mod tests {
|
||||
validator_node,
|
||||
&Arc::new(validator_keypair),
|
||||
&validator_ledger_path,
|
||||
&voting_keypair.pubkey(),
|
||||
&voting_keypair,
|
||||
&vote_account_keypair.pubkey(),
|
||||
vec![vote_account_keypair.clone()],
|
||||
&storage_keypair,
|
||||
Some(&leader_node.info),
|
||||
true,
|
||||
|
@ -542,10 +542,7 @@ mod test {
|
||||
.insert_shreds(shreds, None, false)
|
||||
.expect("Expect successful processing of shred");
|
||||
|
||||
assert_eq!(
|
||||
blockstore.get_slot_entries(0, 0, None).unwrap(),
|
||||
original_entries
|
||||
);
|
||||
assert_eq!(blockstore.get_slot_entries(0, 0).unwrap(), original_entries);
|
||||
|
||||
drop(blockstore);
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
|
@ -3,8 +3,8 @@ use solana_client::{
|
||||
rpc_client::RpcClient,
|
||||
};
|
||||
use solana_core::{
|
||||
rpc_pubsub_service::PubSubService, rpc_subscriptions::RpcSubscriptions,
|
||||
validator::TestValidator,
|
||||
commitment::BlockCommitmentCache, rpc_pubsub_service::PubSubService,
|
||||
rpc_subscriptions::RpcSubscriptions, validator::TestValidator,
|
||||
};
|
||||
use solana_sdk::{
|
||||
commitment_config::CommitmentConfig, pubkey::Pubkey, rpc_port, signature::Signer,
|
||||
@ -15,7 +15,7 @@ use std::{
|
||||
net::{IpAddr, SocketAddr},
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
@ -85,7 +85,10 @@ fn test_slot_subscription() {
|
||||
rpc_port::DEFAULT_RPC_PUBSUB_PORT,
|
||||
);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new(&exit));
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new(
|
||||
&exit,
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default())),
|
||||
));
|
||||
let pubsub_service = PubSubService::new(&subscriptions, pubsub_addr, &exit);
|
||||
std::thread::sleep(Duration::from_millis(400));
|
||||
|
||||
|
@ -9,22 +9,18 @@ use reqwest::{self, header::CONTENT_TYPE};
|
||||
use serde_json::{json, Value};
|
||||
use solana_client::{
|
||||
rpc_client::{get_rpc_request_str, RpcClient},
|
||||
rpc_response::Response,
|
||||
rpc_response::{Response, RpcSignatureResult},
|
||||
};
|
||||
use solana_core::{rpc_pubsub::gen_client::Client as PubsubClient, validator::TestValidator};
|
||||
use solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
system_transaction,
|
||||
transaction::{self, Transaction},
|
||||
commitment_config::CommitmentConfig, hash::Hash, pubkey::Pubkey, signature::Signer,
|
||||
system_transaction, transaction::Transaction,
|
||||
};
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
fs::remove_dir_all,
|
||||
net::UdpSocket,
|
||||
sync::mpsc::channel,
|
||||
sync::{Arc, Mutex},
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
@ -210,9 +206,11 @@ fn test_rpc_subscriptions() {
|
||||
..
|
||||
} = TestValidator::run();
|
||||
|
||||
// Create transaction signatures to subscribe to
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let transactions: Vec<Transaction> = (0..100)
|
||||
transactions_socket.connect(leader_data.tpu).unwrap();
|
||||
|
||||
// Create transaction signatures to subscribe to
|
||||
let transactions: Vec<Transaction> = (0..1000)
|
||||
.map(|_| system_transaction::transfer(&alice, &Pubkey::new_rand(), 1, genesis_hash))
|
||||
.collect();
|
||||
let mut signature_set: HashSet<String> = transactions
|
||||
@ -220,15 +218,15 @@ fn test_rpc_subscriptions() {
|
||||
.map(|tx| tx.signatures[0].to_string())
|
||||
.collect();
|
||||
|
||||
// Track when subscriptions are ready
|
||||
let (ready_sender, ready_receiver) = channel::<()>();
|
||||
// Track when status notifications are received
|
||||
let (status_sender, status_receiver) = channel::<(String, Response<RpcSignatureResult>)>();
|
||||
|
||||
// Create the pub sub runtime
|
||||
let mut rt = Runtime::new().unwrap();
|
||||
let rpc_pubsub_url = format!("ws://{}/", leader_data.rpc_pubsub);
|
||||
|
||||
let (status_sender, status_receiver) = channel::<(String, Response<transaction::Result<()>>)>();
|
||||
let status_sender = Arc::new(Mutex::new(status_sender));
|
||||
let (sent_sender, sent_receiver) = channel::<()>();
|
||||
let sent_sender = Arc::new(Mutex::new(sent_sender));
|
||||
|
||||
// Subscribe to all signatures
|
||||
rt.spawn({
|
||||
let connect = ws::try_connect::<PubsubClient>(&rpc_pubsub_url).unwrap();
|
||||
@ -237,18 +235,12 @@ fn test_rpc_subscriptions() {
|
||||
.and_then(move |client| {
|
||||
for sig in signature_set {
|
||||
let status_sender = status_sender.clone();
|
||||
let sent_sender = sent_sender.clone();
|
||||
tokio::spawn(
|
||||
client
|
||||
.signature_subscribe(sig.clone(), None)
|
||||
.and_then(move |sig_stream| {
|
||||
sent_sender.lock().unwrap().send(()).unwrap();
|
||||
sig_stream.for_each(move |result| {
|
||||
status_sender
|
||||
.lock()
|
||||
.unwrap()
|
||||
.send((sig.clone(), result))
|
||||
.unwrap();
|
||||
status_sender.send((sig.clone(), result)).unwrap();
|
||||
future::ok(())
|
||||
})
|
||||
})
|
||||
@ -257,37 +249,50 @@ fn test_rpc_subscriptions() {
|
||||
}),
|
||||
);
|
||||
}
|
||||
tokio::spawn(
|
||||
client
|
||||
.slot_subscribe()
|
||||
.and_then(move |slot_stream| {
|
||||
slot_stream.for_each(move |_| {
|
||||
ready_sender.send(()).unwrap();
|
||||
future::ok(())
|
||||
})
|
||||
})
|
||||
.map_err(|err| {
|
||||
eprintln!("slot sub err: {:#?}", err);
|
||||
}),
|
||||
);
|
||||
future::ok(())
|
||||
})
|
||||
.map_err(|_| ())
|
||||
});
|
||||
|
||||
// Wait for signature subscriptions
|
||||
let deadline = Instant::now() + Duration::from_secs(2);
|
||||
(0..transactions.len()).for_each(|_| {
|
||||
sent_receiver
|
||||
.recv_timeout(deadline.saturating_duration_since(Instant::now()))
|
||||
.unwrap();
|
||||
});
|
||||
ready_receiver.recv_timeout(Duration::from_secs(2)).unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
let mut transaction_count = rpc_client
|
||||
.get_transaction_count_with_commitment(CommitmentConfig::recent())
|
||||
.unwrap();
|
||||
let mut mint_balance = rpc_client
|
||||
.get_balance_with_commitment(&alice.pubkey(), CommitmentConfig::recent())
|
||||
.unwrap()
|
||||
.value;
|
||||
assert!(mint_balance >= transactions.len() as u64);
|
||||
|
||||
// Send all transactions to tpu socket for processing
|
||||
transactions.iter().for_each(|tx| {
|
||||
transactions_socket
|
||||
.send_to(&bincode::serialize(&tx).unwrap(), leader_data.tpu)
|
||||
.send(&bincode::serialize(&tx).unwrap())
|
||||
.unwrap();
|
||||
});
|
||||
|
||||
// Track mint balance to know when transactions have completed
|
||||
let now = Instant::now();
|
||||
let expected_transaction_count = transaction_count + transactions.len() as u64;
|
||||
while transaction_count < expected_transaction_count && now.elapsed() < Duration::from_secs(5) {
|
||||
transaction_count = rpc_client
|
||||
.get_transaction_count_with_commitment(CommitmentConfig::recent())
|
||||
.unwrap();
|
||||
sleep(Duration::from_millis(200));
|
||||
let expected_mint_balance = mint_balance - transactions.len() as u64;
|
||||
while mint_balance != expected_mint_balance && now.elapsed() < Duration::from_secs(5) {
|
||||
mint_balance = rpc_client
|
||||
.get_balance_with_commitment(&alice.pubkey(), CommitmentConfig::recent())
|
||||
.unwrap()
|
||||
.value;
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
|
||||
// Wait for all signature subscriptions
|
||||
@ -296,16 +301,16 @@ fn test_rpc_subscriptions() {
|
||||
let timeout = deadline.saturating_duration_since(Instant::now());
|
||||
match status_receiver.recv_timeout(timeout) {
|
||||
Ok((sig, result)) => {
|
||||
assert!(result.value.is_ok());
|
||||
assert!(result.value.err.is_none());
|
||||
assert!(signature_set.remove(&sig));
|
||||
}
|
||||
Err(_err) => {
|
||||
eprintln!(
|
||||
assert!(
|
||||
false,
|
||||
"recv_timeout, {}/{} signatures remaining",
|
||||
signature_set.len(),
|
||||
transactions.len()
|
||||
);
|
||||
assert!(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3,28 +3,36 @@
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use log::*;
|
||||
use solana_core::storage_stage::{test_cluster_info, SLOTS_PER_TURN_TEST};
|
||||
use solana_core::storage_stage::{StorageStage, StorageState};
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_ledger::blockstore_processor;
|
||||
use solana_ledger::entry;
|
||||
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_ledger::{blockstore::Blockstore, create_new_tmp_ledger};
|
||||
use solana_core::{
|
||||
commitment::BlockCommitmentCache,
|
||||
storage_stage::{test_cluster_info, StorageStage, StorageState, SLOTS_PER_TURN_TEST},
|
||||
};
|
||||
use solana_ledger::{
|
||||
bank_forks::BankForks,
|
||||
blockstore::Blockstore,
|
||||
blockstore_processor, create_new_tmp_ledger, entry,
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::message::Message;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_sdk::transaction::Transaction;
|
||||
use solana_storage_program::storage_instruction;
|
||||
use solana_storage_program::storage_instruction::StorageAccountType;
|
||||
use std::fs::remove_dir_all;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use solana_sdk::{
|
||||
clock::DEFAULT_TICKS_PER_SLOT,
|
||||
hash::Hash,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
transaction::Transaction,
|
||||
};
|
||||
use solana_storage_program::storage_instruction::{self, StorageAccountType};
|
||||
use std::{
|
||||
fs::remove_dir_all,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
mpsc::channel,
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::sleep,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_storage_stage_process_account_proofs() {
|
||||
@ -52,6 +60,7 @@ mod tests {
|
||||
&[bank.clone()],
|
||||
vec![0],
|
||||
)));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||
let cluster_info = test_cluster_info(&keypair.pubkey());
|
||||
|
||||
let (bank_sender, bank_receiver) = channel();
|
||||
@ -69,6 +78,7 @@ mod tests {
|
||||
&exit.clone(),
|
||||
&bank_forks,
|
||||
&cluster_info,
|
||||
block_commitment_cache,
|
||||
);
|
||||
bank_sender.send(vec![bank.clone()]).unwrap();
|
||||
|
||||
@ -171,6 +181,7 @@ mod tests {
|
||||
&[bank.clone()],
|
||||
vec![0],
|
||||
)));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||
|
||||
let cluster_info = test_cluster_info(&keypair.pubkey());
|
||||
let (bank_sender, bank_receiver) = channel();
|
||||
@ -188,6 +199,7 @@ mod tests {
|
||||
&exit.clone(),
|
||||
&bank_forks,
|
||||
&cluster_info,
|
||||
block_commitment_cache,
|
||||
);
|
||||
bank_sender.send(vec![bank.clone()]).unwrap();
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-crate-features"
|
||||
version = "1.1.0"
|
||||
version = "1.1.3"
|
||||
description = "Solana Crate Features"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@ -14,7 +14,7 @@ else
|
||||
fi
|
||||
|
||||
set -x
|
||||
find html/ -name \*.html -exec sed -i '' "s/LATEST_SOLANA_RELEASE_VERSION/$LATEST_SOLANA_RELEASE_VERSION/g" {} \;
|
||||
find html/ -name \*.html -exec sed -i "s/LATEST_SOLANA_RELEASE_VERSION/$LATEST_SOLANA_RELEASE_VERSION/g" {} \;
|
||||
if [[ -n $CI ]]; then
|
||||
find src/ -name \*.md -exec sed -i '' "s/LATEST_SOLANA_RELEASE_VERSION/$LATEST_SOLANA_RELEASE_VERSION/g" {} \;
|
||||
find src/ -name \*.md -exec sed -i "s/LATEST_SOLANA_RELEASE_VERSION/$LATEST_SOLANA_RELEASE_VERSION/g" {} \;
|
||||
fi
|
||||
|
BIN
docs/src/.gitbook/assets/ledger-live-enable-developer-mode.png
Normal file
BIN
docs/src/.gitbook/assets/ledger-live-enable-developer-mode.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 184 KiB |
BIN
docs/src/.gitbook/assets/ledger-live-install-solana-app.png
Normal file
BIN
docs/src/.gitbook/assets/ledger-live-install-solana-app.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 195 KiB |
@ -6,15 +6,19 @@
|
||||
* [Trust Wallet](wallet/trust-wallet.md)
|
||||
* [Ledger Live](wallet/ledger-live.md)
|
||||
* [Command-line Wallets](wallet/cli-wallets.md)
|
||||
* [Hardware Wallets](remote-wallet/README.md)
|
||||
* [Ledger Hardware Wallet](remote-wallet/ledger.md)
|
||||
* [Paper Wallet](paper-wallet/README.md)
|
||||
* [Paper Wallet Usage](paper-wallet/paper-wallet-usage.md)
|
||||
* [Hardware Wallets](remote-wallet/README.md)
|
||||
* [Ledger Hardware Wallet](remote-wallet/ledger.md)
|
||||
* [File System Wallet](file-system-wallet/README.md)
|
||||
* [Support / Troubleshooting](wallet/support.md)
|
||||
* [Command Line Guide](cli/README.md)
|
||||
* [Install the Solana Command Line Tool Suite](cli/install-solana-cli-tools.md)
|
||||
* [Generate Keys](cli/generate-keys.md)
|
||||
* [Command Line Conventions](cli/conventions.md)
|
||||
* [Choose a Cluster](cli/choose-a-cluster.md)
|
||||
* [Send and Receive Tokens](cli/transfer-tokens.md)
|
||||
* [Delegate Stake](cli/delegate-stake.md)
|
||||
* [Manage Stake Accounts](cli/manage-stake-accounts.md)
|
||||
* [Offline Signing](offline-signing/README.md)
|
||||
* [Durable Transaction Nonces](offline-signing/durable-nonce.md)
|
||||
* [Command-line Reference](cli/usage.md)
|
||||
|
@ -14,7 +14,6 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
|
||||
|
||||
## Methods
|
||||
|
||||
* [confirmTransaction](jsonrpc-api.md#confirmtransaction)
|
||||
* [getAccountInfo](jsonrpc-api.md#getaccountinfo)
|
||||
* [getBalance](jsonrpc-api.md#getbalance)
|
||||
* [getBlockCommitment](jsonrpc-api.md#getblockcommitment)
|
||||
@ -22,6 +21,8 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
|
||||
* [getClusterNodes](jsonrpc-api.md#getclusternodes)
|
||||
* [getConfirmedBlock](jsonrpc-api.md#getconfirmedblock)
|
||||
* [getConfirmedBlocks](jsonrpc-api.md#getconfirmedblocks)
|
||||
* [getConfirmedSignaturesForAddress](jsonrpc-api.md#getconfirmedsignaturesforaddress)
|
||||
* [getConfirmedTransaction](jsonrpc-api.md#getconfirmedtransaction)
|
||||
* [getEpochInfo](jsonrpc-api.md#getepochinfo)
|
||||
* [getEpochSchedule](jsonrpc-api.md#getepochschedule)
|
||||
* [getFeeCalculatorForBlockhash](jsonrpc-api.md#getfeecalculatorforblockhash)
|
||||
@ -33,7 +34,7 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
|
||||
* [getMinimumBalanceForRentExemption](jsonrpc-api.md#getminimumbalanceforrentexemption)
|
||||
* [getProgramAccounts](jsonrpc-api.md#getprogramaccounts)
|
||||
* [getRecentBlockhash](jsonrpc-api.md#getrecentblockhash)
|
||||
* [getSignatureStatus](jsonrpc-api.md#getsignaturestatus)
|
||||
* [getSignatureStatuses](jsonrpc-api.md#getsignaturestatuses)
|
||||
* [getSlot](jsonrpc-api.md#getslot)
|
||||
* [getSlotLeader](jsonrpc-api.md#getslotleader)
|
||||
* [getSlotsPerSegment](jsonrpc-api.md#getslotspersegment)
|
||||
@ -116,29 +117,6 @@ Many methods that take a commitment parameter return an RpcResponse JSON object
|
||||
|
||||
## JSON RPC API Reference
|
||||
|
||||
### confirmTransaction
|
||||
|
||||
Returns a transaction receipt
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<string>` - Signature of Transaction to confirm, as base-58 encoded string
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
* `RpcResponse<bool>` - RpcResponse JSON object with `value` field set to Transaction status, boolean true if Transaction is confirmed
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"confirmTransaction", "params":["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW"]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":true},"id":1}
|
||||
```
|
||||
|
||||
### getAccountInfo
|
||||
|
||||
Returns all information associated with the account of provided Pubkey
|
||||
@ -298,12 +276,13 @@ The result field will be an object with the following fields:
|
||||
* `transactions: <array>` - an array of JSON objects containing:
|
||||
* `transaction: <object|string>` - [Transaction](#transaction-structure) object, either in JSON format or base-58 encoded binary data, depending on encoding parameter
|
||||
* `meta: <object>` - transaction status metadata object, containing `null` or:
|
||||
* `status: <object>` - Transaction status:
|
||||
* `"Ok": null` - Transaction was successful
|
||||
* `"Err": <ERR>` - Transaction failed with TransactionError [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L18)
|
||||
* `fee: <u64>` - fee this transaction was charged, as u64 integer
|
||||
* `preBalances: <array>` - array of u64 account balances from before the transaction was processed
|
||||
* `postBalances: <array>` - array of u64 account balances after the transaction was processed
|
||||
* `err: <object | null>` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
||||
* `fee: <u64>` - fee this transaction was charged, as u64 integer
|
||||
* `preBalances: <array>` - array of u64 account balances from before the transaction was processed
|
||||
* `postBalances: <array>` - array of u64 account balances after the transaction was processed
|
||||
* DEPRECATED: `status: <object>` - Transaction status
|
||||
* `"Ok": <null>` - Transaction was successful
|
||||
* `"Err": <ERR>` - Transaction failed with TransactionError
|
||||
* `rewards: <array>` - an array of JSON objects containing:
|
||||
* `pubkey: <string>` - The public key, as base-58 encoded string, of the account that received the reward
|
||||
* `lamports: <i64>`- number of reward lamports credited or debited by the account, as a i64
|
||||
@ -315,13 +294,13 @@ The result field will be an object with the following fields:
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlock","params":[430, "json"]}' localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"blockhash":"Gp3t5bfDsJv1ovP8cB1SuRhXVuoTqDv7p3tymyubYg5","parentSlot":429,"previousBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA","transactions":[{"transaction":{"message":{"accountKeys":["6H94zdiaYfRfPfKjYLjyr2VFBg6JHXygy84r3qhc3NsC","39UAy8hsoYPywGPGdmun747omSr79zLSjqvPJN3zetoH","SysvarS1otHashes111111111111111111111111111","SysvarC1ock11111111111111111111111111111111","Vote111111111111111111111111111111111111111"],"header":{"numReadonlySignedAccounts":0,"numReadonlyUnsignedAccounts":3,"numRequiredSignatures":2},"instructions":[{"accounts":[1,2,3],"data":"29z5mr1JoRmJYQ6ynmk3pf31cGFRziAF1M3mT3L6sFXf5cKLdkEaMXMT8AqLpD4CpcupHmuMEmtZHpomrwfdZetSomNy3d","programIdIndex":4}],"recentBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA"},"signatures":["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby","4vANMjSKiwEchGSXwVrQkwHnmsbKQmy9vdrsYxWdCup1bLsFzX8gKrFTSVDCZCae2dbxJB9mPNhqB2sD1vvr4sAD"]},"meta":{"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}}]},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"blockhash":"Gp3t5bfDsJv1ovP8cB1SuRhXVuoTqDv7p3tymyubYg5","parentSlot":429,"previousBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA","transactions":[{"transaction":{"message":{"accountKeys":["6H94zdiaYfRfPfKjYLjyr2VFBg6JHXygy84r3qhc3NsC","39UAy8hsoYPywGPGdmun747omSr79zLSjqvPJN3zetoH","SysvarS1otHashes111111111111111111111111111","SysvarC1ock11111111111111111111111111111111","Vote111111111111111111111111111111111111111"],"header":{"numReadonlySignedAccounts":0,"numReadonlyUnsignedAccounts":3,"numRequiredSignatures":2},"instructions":[{"accounts":[1,2,3],"data":"29z5mr1JoRmJYQ6ynmk3pf31cGFRziAF1M3mT3L6sFXf5cKLdkEaMXMT8AqLpD4CpcupHmuMEmtZHpomrwfdZetSomNy3d","programIdIndex":4}],"recentBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA"},"signatures":["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby","4vANMjSKiwEchGSXwVrQkwHnmsbKQmy9vdrsYxWdCup1bLsFzX8gKrFTSVDCZCae2dbxJB9mPNhqB2sD1vvr4sAD"]},"meta":{"err":null,"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}}]},"id":1}
|
||||
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlock","params":[430, "binary"]}' localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"blockhash":"Gp3t5bfDsJv1ovP8cB1SuRhXVuoTqDv7p3tymyubYg5","parentSlot":429,"previousBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA","transactions":[{"transaction":"81UZJt4dh4Do66jDhrgkQudS8J2N6iG3jaVav7gJrqJSFY4Ug53iA9JFJZh2gxKWcaFdLJwhHx9mRdg9JwDAWB4ywiu5154CRwXV4FMdnPLg7bhxRLwhhYaLsVgMF5AyNRcTzjCVoBvqFgDU7P8VEKDEiMvD3qxzm1pLZVxDG1LTQpT3Dz4Uviv4KQbFQNuC22KupBoyHFB7Zh6KFdMqux4M9PvhoqcoJsJKwXjWpKu7xmEKnnrSbfLadkgjBmmjhW3fdTrFvnhQdTkhtdJxUL1xS9GMuJQer8YgSKNtUXB1eXZQwXU8bU2BjYkZE6Q5Xww8hu9Z4E4Mo4QsooVtHoP6BM3NKw8zjVbWfoCQqxTrwuSzrNCWCWt58C24LHecH67CTt2uXbYSviixvrYkK7A3t68BxTJcF1dXJitEPTFe2ceTkauLJqrJgnER4iUrsjr26T8YgWvpY9wkkWFSviQW6wV5RASTCUasVEcrDiaKj8EQMkgyDoe9HyKitSVg67vMWJFpUXpQobseWJUs5FTWWzmfHmFp8FZ","meta":{"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}}]},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"blockhash":"Gp3t5bfDsJv1ovP8cB1SuRhXVuoTqDv7p3tymyubYg5","parentSlot":429,"previousBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA","transactions":[{"transaction":"81UZJt4dh4Do66jDhrgkQudS8J2N6iG3jaVav7gJrqJSFY4Ug53iA9JFJZh2gxKWcaFdLJwhHx9mRdg9JwDAWB4ywiu5154CRwXV4FMdnPLg7bhxRLwhhYaLsVgMF5AyNRcTzjCVoBvqFgDU7P8VEKDEiMvD3qxzm1pLZVxDG1LTQpT3Dz4Uviv4KQbFQNuC22KupBoyHFB7Zh6KFdMqux4M9PvhoqcoJsJKwXjWpKu7xmEKnnrSbfLadkgjBmmjhW3fdTrFvnhQdTkhtdJxUL1xS9GMuJQer8YgSKNtUXB1eXZQwXU8bU2BjYkZE6Q5Xww8hu9Z4E4Mo4QsooVtHoP6BM3NKw8zjVbWfoCQqxTrwuSzrNCWCWt58C24LHecH67CTt2uXbYSviixvrYkK7A3t68BxTJcF1dXJitEPTFe2ceTkauLJqrJgnER4iUrsjr26T8YgWvpY9wkkWFSviQW6wV5RASTCUasVEcrDiaKj8EQMkgyDoe9HyKitSVg67vMWJFpUXpQobseWJUs5FTWWzmfHmFp8FZ","meta":{"err":null,"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}}]},"id":1}
|
||||
```
|
||||
|
||||
#### Transaction Structure
|
||||
@ -368,6 +347,72 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"m
|
||||
{"jsonrpc":"2.0","result":[5,6,7,8,9,10],"id":1}
|
||||
```
|
||||
|
||||
### getConfirmedSignaturesForAddress
|
||||
|
||||
Returns a list of all the confirmed signatures for transactions involving an address, within a specified Slot range. Max range allowed is 10_000 Slots.
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<string>` - account address as base-58 encoded string
|
||||
* `<u64>` - start slot, inclusive
|
||||
* `<u64>` - end slot, inclusive
|
||||
|
||||
#### Results:
|
||||
|
||||
The result field will be an array of:
|
||||
* `<string>` - transaction signature as base-58 encoded string
|
||||
|
||||
The signatures will be ordered based on the Slot in which they were confirmed in, from lowest to highest Slot
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedSignaturesForAddress","params":["6H94zdiaYfRfPfKjYLjyr2VFBg6JHXygy84r3qhc3NsC", 0, 100]}' localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby","4bJdGN8Tt2kLWZ3Fa1dpwPSEkXWWTSszPSf1rRVsCwNjxbbUdwTeiWtmi8soA26YmwnKD4aAxNp8ci1Gjpdv4gsr","4LQ14a7BYY27578Uj8LPCaVhSdJGLn9DJqnUJHpy95FMqdKf9acAhUhecPQNjNUy6VoNFUbvwYkPociFSf87cWbG"]},"id":1}
|
||||
```
|
||||
|
||||
### getConfirmedTransaction
|
||||
|
||||
Returns transaction details for a confirmed transaction
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<string>` - transaction signature as base-58 encoded string
|
||||
* `<string>` - (optional) encoding for the returned Transaction, either "json" or "binary". If not provided, the default encoding is JSON.
|
||||
|
||||
#### Results:
|
||||
|
||||
The result field will be an object with the following fields:
|
||||
* `slot: <u64>` - the slot this transaction was processed in
|
||||
* `transaction: <object|string>` - [Transaction](#transaction-structure) object, either in JSON format or base-58 encoded binary data, depending on encoding parameter
|
||||
* `meta: <object>` - transaction status metadata object, containing `null` or:
|
||||
* `err: <object | null>` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
||||
* `fee: <u64>` - fee this transaction was charged, as u64 integer
|
||||
* `preBalances: <array>` - array of u64 account balances from before the transaction was processed
|
||||
* `postBalances: <array>` - array of u64 account balances after the transaction was processed
|
||||
* DEPRECATED: `status: <object>` - Transaction status
|
||||
* `"Ok": <null>` - Transaction was successful
|
||||
* `"Err": <ERR>` - Transaction failed with TransactionError
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedTransaction","params":["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby", "json"]}' localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"slot":430,"transaction":{"message":{"accountKeys":["6H94zdiaYfRfPfKjYLjyr2VFBg6JHXygy84r3qhc3NsC","39UAy8hsoYPywGPGdmun747omSr79zLSjqvPJN3zetoH","SysvarS1otHashes111111111111111111111111111","SysvarC1ock11111111111111111111111111111111","Vote111111111111111111111111111111111111111"],"header":{"numReadonlySignedAccounts":0,"numReadonlyUnsignedAccounts":3,"numRequiredSignatures":2},"instructions":[{"accounts":[1,2,3],"data":"29z5mr1JoRmJYQ6ynmk3pf31cGFRziAF1M3mT3L6sFXf5cKLdkEaMXMT8AqLpD4CpcupHmuMEmtZHpomrwfdZetSomNy3d","programIdIndex":4}],"recentBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA"},"signatures":["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby","4vANMjSKiwEchGSXwVrQkwHnmsbKQmy9vdrsYxWdCup1bLsFzX8gKrFTSVDCZCae2dbxJB9mPNhqB2sD1vvr4sAD"]},"meta":{"err":null,"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}},"id":1}
|
||||
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedTransaction","params":["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby", "binary"]}' localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"slot":430,"transaction":"81UZJt4dh4Do66jDhrgkQudS8J2N6iG3jaVav7gJrqJSFY4Ug53iA9JFJZh2gxKWcaFdLJwhHx9mRdg9JwDAWB4ywiu5154CRwXV4FMdnPLg7bhxRLwhhYaLsVgMF5AyNRcTzjCVoBvqFgDU7P8VEKDEiMvD3qxzm1pLZVxDG1LTQpT3Dz4Uviv4KQbFQNuC22KupBoyHFB7Zh6KFdMqux4M9PvhoqcoJsJKwXjWpKu7xmEKnnrSbfLadkgjBmmjhW3fdTrFvnhQdTkhtdJxUL1xS9GMuJQer8YgSKNtUXB1eXZQwXU8bU2BjYkZE6Q5Xww8hu9Z4E4Mo4QsooVtHoP6BM3NKw8zjVbWfoCQqxTrwuSzrNCWCWt58C24LHecH67CTt2uXbYSviixvrYkK7A3t68BxTJcF1dXJitEPTFe2ceTkauLJqrJgnER4iUrsjr26T8YgWvpY9wkkWFSviQW6wV5RASTCUasVEcrDiaKj8EQMkgyDoe9HyKitSVg67vMWJFpUXpQobseWJUs5FTWWzmfHmFp8FZ","meta":{"err":null,"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}},"id":1}
|
||||
```
|
||||
|
||||
### getEpochInfo
|
||||
|
||||
Returns information about the current epoch
|
||||
@ -654,16 +699,18 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"blockhash":"CSymwgTNX1j3E4qhKfJAUE41nBWEwXufoYryPbkde5RR","feeCalculator":{"burnPercent":50,"lamportsPerSignature":5000,"maxLamportsPerSignature":100000,"minLamportsPerSignature":5000,"targetLamportsPerSignature":10000,"targetSignaturesPerSlot":20000}}},"id":1}
|
||||
```
|
||||
|
||||
### getSignatureStatus
|
||||
### getSignatureStatuses
|
||||
|
||||
Returns the status of a given signature. This method is similar to [confirmTransaction](jsonrpc-api.md#confirmtransaction) but provides more resolution for error events.
|
||||
Returns the statuses of a list of signatures. Unless the
|
||||
`searchTransactionHistory` configuration parameter is included, this method only
|
||||
searches the recent status cache of signatures, which retains statuses for all
|
||||
active slots plus `MAX_RECENT_BLOCKHASHES` rooted slots.
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<array>` - An array of transaction signatures to confirm, as base-58 encoded strings
|
||||
* `<object>` - (optional) Extended Rpc configuration, containing the following optional fields:
|
||||
* `commitment: <string>` - [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
* `searchTransactionHistory: <bool>` - whether to search the ledger transaction status cache, which may be expensive
|
||||
* `<object>` - (optional) Configuration object containing the following field:
|
||||
* `searchTransactionHistory: <bool>` - if true, a Solana node will search its ledger cache for any signatures not found in the recent status cache
|
||||
|
||||
#### Results:
|
||||
|
||||
@ -677,21 +724,25 @@ An array of:
|
||||
* `<object>`
|
||||
* `slot: <u64>` - The slot the transaction was processed
|
||||
* `confirmations: <usize | null>` - Number of blocks since signature confirmation, null if rooted
|
||||
* `status: <object>` - Transaction status
|
||||
* `err: <object | null>` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
||||
* DEPRECATED: `status: <object>` - Transaction status
|
||||
* `"Ok": <null>` - Transaction was successful
|
||||
* `"Err": <ERR>` - Transaction failed with TransactionError [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
||||
* `"Err": <ERR>` - Transaction failed with TransactionError
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getSignatureStatus", "params":[["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW", "5j7s6NiJS3JAkvgkoc18WVAsiSaci2pxB2A6ueCJP4tprA2TFg9wSyTLeYouxPBJEMzJinENTkpA52YStRW5Dia7"]]]}' http://localhost:8899
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getSignatureStatuses", "params":[["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW", "5j7s6NiJS3JAkvgkoc18WVAsiSaci2pxB2A6ueCJP4tprA2TFg9wSyTLeYouxPBJEMzJinENTkpA52YStRW5Dia7"]]}' http://localhost:8899
|
||||
|
||||
// Request with configuration
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getSignatureStatuses", "params":[["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW"], {"searchTransactionHistory": true}]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":82},"value":[{"slot": 72, "confirmations": 10, "status": {"Ok": null}}, null]},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":82},"value":[{"slot": 72, "confirmations": 10, "err": null, "status": {"Ok": null}}, null]},"id":1}
|
||||
|
||||
// Result, first transaction rooted
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":82},"value":[{"slot": 48, "confirmations": null, "status": {"Ok": null}}, null]},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":82},"value":[{"slot": 48, "confirmations": null, "err": null, "status": {"Ok": null}}, null]},"id":1}
|
||||
```
|
||||
|
||||
### getSlot
|
||||
@ -889,7 +940,7 @@ The result field will be a JSON object with the following fields:
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"solana-core": "1.1.0"},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"solana-core": "1.1.3"},"id":1}
|
||||
```
|
||||
|
||||
### getVoteAccounts
|
||||
@ -1200,7 +1251,7 @@ Subscribe to a transaction signature to receive notification when the transactio
|
||||
#### Notification Format:
|
||||
|
||||
```bash
|
||||
{"jsonrpc": "2.0","method": "signatureNotification", "params": {"result": "Confirmed","subscription":0}}
|
||||
{"jsonrpc": "2.0","method": "signatureNotification", "params": {"result": {"err": null}, "subscription":0}}
|
||||
```
|
||||
|
||||
### signatureUnsubscribe
|
||||
|
@ -9,3 +9,10 @@ known as the CLI. We use the command-line because it is the first place the
|
||||
Solana core team deploys new functionality. The command-line interface is not
|
||||
necessarily the easiest to use, but it provides the most direct, flexible, and
|
||||
secure access to your Solana accounts.
|
||||
|
||||
## Getting Started
|
||||
To get started using the Solana Command Line (CLI) tools:
|
||||
- [Install the Solana Tools](install-solana-cli-tools.md)
|
||||
- [Choose a Cluster](choose-a-cluster.md)
|
||||
- [Create a Wallet](../wallet/cli-wallets.md)
|
||||
- [Check out our CLI conventions](conventions.md)
|
||||
|
99
docs/src/cli/choose-a-cluster.md
Normal file
99
docs/src/cli/choose-a-cluster.md
Normal file
@ -0,0 +1,99 @@
|
||||
# Connecting to a Cluster
|
||||
|
||||
Solana maintains several different clusters with different purposes. Each
|
||||
cluster features a Solana-run gossip node that serves as an entrypoint to the
|
||||
cluster.
|
||||
|
||||
## Before you Begin
|
||||
|
||||
- Make sure you have first
|
||||
[installed the Solana command line tools](install-solana-cli-tools.md)
|
||||
|
||||
## Choose a Cluster
|
||||
|
||||
#### Mainnet Beta
|
||||
A permissionless, persistent cluster for early token holders and launch partners.
|
||||
No smart contracts or inflation.
|
||||
* Tokens that are issued on Mainnet Beta are **real** SOL
|
||||
* If you have paid money to purchase/be issued tokens, such as through our
|
||||
CoinList auction, these tokens will be transferred on Mainnet Beta.
|
||||
* Note: If you are using a non-command-line wallet such as
|
||||
[Trust Wallet](../wallet/trust-wallet.md),
|
||||
the wallet will always be connecting to Mainnet Beta.
|
||||
* Gossip entrypoint for Mainnet Beta: `mainnet-beta.solana.com:8001`
|
||||
* RPC URL for Mainnet Beta: `https://api.mainnet-beta.solana.com`
|
||||
|
||||
#### Devnet
|
||||
* Devnet serves as a playground for anyone who wants to take Solana for a
|
||||
test drive, as a user, token holder, app developer, or validator.
|
||||
* Application developers should target Devnet.
|
||||
* Potential validators should first target Devnet.
|
||||
* Key differences between Devnet and Mainnet Beta:
|
||||
* Devnet tokens are **not real**
|
||||
* Devnet includes a token faucet for airdrops for application testing
|
||||
* Devnet may be subject to ledger resets
|
||||
* Devnet typically runs a newer software version than Mainnet Beta
|
||||
* Devnet may be maintained by different validators than Mainnet Beta
|
||||
* Gossip entrypoint for Devnet: `devnet.solana.com:8001`
|
||||
* RPC URL for Devnet: `https://devnet.solana.com`
|
||||
|
||||
#### Testnet (Tour de SOL Cluster)
|
||||
* Testnet is where we stress test recent release features on a live
|
||||
cluster, particularly focused on network performance, stability and validator
|
||||
behavior.
|
||||
* [Tour de SOL](../tour-de-sol/README.md) initiative runs on Testnet, where we
|
||||
encourage malicious behavior and attacks on the network to help us find and
|
||||
squash bugs or network vulnerabilities.
|
||||
* Testnet tokens are **not real**
|
||||
* Testnet may be subject to ledger resets.
|
||||
* Testnet typically runs a newer software release than both Devnet and
|
||||
Mainnet Beta
|
||||
* Testnet may be maintained by different validators than Mainnet Beta
|
||||
* Gossip entrypoint for Testnet: `testnet.solana.com:8001`
|
||||
* RPC URL for Testnet: `https://testnet.solana.com`
|
||||
|
||||
## Configure the Command-line
|
||||
|
||||
You can check what cluster the Solana CLI is currently targeting by
|
||||
running the following command:
|
||||
|
||||
```bash
|
||||
solana config get
|
||||
```
|
||||
|
||||
Use `solana config set` command to target a particular cluster. After setting
|
||||
a cluster target, any future subcommands will send/receive information from that
|
||||
cluster.
|
||||
|
||||
##### Targetting Mainnet Beta
|
||||
```bash
|
||||
solana config set --url https://api.mainnet-beta.solana.com
|
||||
```
|
||||
|
||||
##### Targetting Devnet
|
||||
```bash
|
||||
solana config set --url https://devnet.solana.com
|
||||
```
|
||||
|
||||
##### Targetting Testnet
|
||||
```bash
|
||||
solana config set --url https://testnet.solana.com
|
||||
```
|
||||
|
||||
## Ensure Versions Match
|
||||
|
||||
Though not strictly necessary, the CLI will generally work best when its version
|
||||
matches the software version running on the cluster. To get the locally-installed
|
||||
CLI version, run:
|
||||
|
||||
```bash
|
||||
solana --version
|
||||
```
|
||||
|
||||
To get the cluster version, run:
|
||||
|
||||
```bash
|
||||
solana cluster-version
|
||||
```
|
||||
|
||||
Ensure the local CLI version is greater than or equal to the cluster version.
|
73
docs/src/cli/conventions.md
Normal file
73
docs/src/cli/conventions.md
Normal file
@ -0,0 +1,73 @@
|
||||
# Using Solana CLI
|
||||
|
||||
Before running any Solana CLI commands, let's go over some conventions that
|
||||
you will see across all commands. First, the Solana CLI is actually a collection
|
||||
of different commands for each action you might want to take. You can view the list
|
||||
of all possible commands by running:
|
||||
|
||||
```bash
|
||||
solana --help
|
||||
```
|
||||
|
||||
To zoom in on how to use a particular command, run:
|
||||
|
||||
```bash
|
||||
solana <COMMAND> --help
|
||||
```
|
||||
|
||||
where you replace the text `<COMMAND>` with the name of the command you want
|
||||
to learn more about.
|
||||
|
||||
The command's usage message will typically contain words such as `<AMOUNT>`,
|
||||
`<ACCOUNT_ADDRESS>` or `<KEYPAIR>`. Each word is a placeholder for the *type* of
|
||||
text you can execute the command with. For example, you can replace `<AMOUNT>`
|
||||
with a number such as `42` or `100.42`. You can replace `<ACCOUNT_ADDRESS>` with
|
||||
the base58 encoding of your public key, such as
|
||||
`9grmKMwTiZwUHSExjtbFzHLPTdWoXgcg1bZkhvwTrTww`.
|
||||
|
||||
## Keypair conventions
|
||||
|
||||
Many commands using the CLI tools require a value for a `<KEYPAIR>`. The value
|
||||
you should use for the keypair depend on what type of
|
||||
[command line wallet you created](../wallet/cli-wallets.md).
|
||||
|
||||
For example, the way to display any wallet's address
|
||||
(also known as the keypair's pubkey), the CLI help document shows:
|
||||
```bash
|
||||
solana-keygen pubkey <KEYPAIR>
|
||||
```
|
||||
|
||||
Below, we show how to resolve what you should put in `<KEYPAIR>` depending
|
||||
on your wallet type.
|
||||
|
||||
#### Paper Wallet
|
||||
|
||||
In a paper wallet, the keypair is securely derived from the seed words and
|
||||
optional passphrase you entered when the wallet was create. To use a paper wallet
|
||||
keypair anywhere the `<KEYPAIR>` text is shown in examples or help documents,
|
||||
enter the word `ASK` and the program will prompt you to enter your seed words
|
||||
when you run the command.
|
||||
|
||||
To display the wallet address of a Paper Wallet:
|
||||
```bash
|
||||
solana-keygen pubkey ASK
|
||||
```
|
||||
#### File System Wallet
|
||||
|
||||
With a file system wallet, the keypair is stored in a file on your computer.
|
||||
Replace `<KEYPAIR>` with the complete file path to the keypair file.
|
||||
|
||||
For example, if the file system keypair file location is
|
||||
`/home/solana/my_wallet.json`, to display the address, do:
|
||||
```bash
|
||||
solana-keygen pubkey /home/solana/my_wallet.json
|
||||
```
|
||||
|
||||
#### Hardware Wallet
|
||||
|
||||
If you chose a hardware wallet, use your
|
||||
[keypair URL](../remote-wallet/README.md#specify-a-hardware-wallet-key),
|
||||
such as `usb://ledger?key=0`.
|
||||
```bash
|
||||
solana-keygen pubkey usb://ledger?key=0
|
||||
```
|
@ -33,11 +33,13 @@ want to perform an action on the stake account you create next.
|
||||
Now, create a stake account:
|
||||
|
||||
```bash
|
||||
solana create-stake-account --from <KEYPAIR> stake-account.json <AMOUNT> --stake-authority <KEYPAIR> --withdraw-authority <KEYPAIR>
|
||||
solana create-stake-account --from <KEYPAIR> stake-account.json <AMOUNT> \
|
||||
--stake-authority <KEYPAIR> --withdraw-authority <KEYPAIR> \
|
||||
--fee-payer <KEYPAIR>
|
||||
```
|
||||
|
||||
`<AMOUNT>` tokens are transferred from the account at `<KEYPAIR>` to a new
|
||||
stake account at the public key of stake-account.json.
|
||||
`<AMOUNT>` tokens are transferred from the account at the "from" `<KEYPAIR>` to
|
||||
a new stake account at the public key of stake-account.json.
|
||||
|
||||
The stake-account.json file can now be discarded. To authorize additional
|
||||
actions, you will use the `--stake-authority` or `withdraw-authority` keypair,
|
||||
@ -72,7 +74,9 @@ Stake and withdraw authorities can be set when creating an account via the
|
||||
run:
|
||||
|
||||
```bash
|
||||
solana stake-authorize <STAKE_ACCOUNT_ADDRESS> --stake-authority <KEYPAIR> --new-stake-authority <PUBKEY>
|
||||
solana stake-authorize <STAKE_ACCOUNT_ADDRESS> \
|
||||
--stake-authority <KEYPAIR> --new-stake-authority <PUBKEY> \
|
||||
--fee-payer <KEYPAIR>
|
||||
```
|
||||
|
||||
This will use the existing stake authority `<KEYPAIR>` to authorize a new stake
|
||||
@ -87,7 +91,8 @@ addresses can be cumbersome. Fortunately, you can derive stake addresses using
|
||||
the `--seed` option:
|
||||
|
||||
```bash
|
||||
solana create-stake-account --from <KEYPAIR> <STAKE_ACCOUNT_KEYPAIR> --seed <STRING> <AMOUNT> --stake-authority <PUBKEY> --withdraw-authority <PUBKEY>
|
||||
solana create-stake-account --from <KEYPAIR> <STAKE_ACCOUNT_KEYPAIR> --seed <STRING> <AMOUNT> \
|
||||
--stake-authority <PUBKEY> --withdraw-authority <PUBKEY> --fee-payer <KEYPAIR>
|
||||
```
|
||||
|
||||
`<STRING>` is an arbitrary string up to 32 bytes, but will typically be a
|
||||
@ -122,12 +127,13 @@ is the vote account address. Choose a validator and use its vote account
|
||||
address in `solana delegate-stake`:
|
||||
|
||||
```bash
|
||||
solana delegate-stake --stake-authority <KEYPAIR> <STAKE_ACCOUNT_ADDRESS> <VOTE_ACCOUNT_ADDRESS>
|
||||
solana delegate-stake --stake-authority <KEYPAIR> <STAKE_ACCOUNT_ADDRESS> <VOTE_ACCOUNT_ADDRESS> \
|
||||
--fee-payer <KEYPAIR>
|
||||
```
|
||||
|
||||
`<KEYPAIR>` authorizes the operation on the account with address
|
||||
`<STAKE_ACCOUNT_ADDRESS>`. The stake is delegated to the vote account with
|
||||
address `<VOTE_ACCOUNT_ADDRESS>`.
|
||||
The stake authority `<KEYPAIR>` authorizes the operation on the account with
|
||||
address `<STAKE_ACCOUNT_ADDRESS>`. The stake is delegated to the vote account
|
||||
with address `<VOTE_ACCOUNT_ADDRESS>`.
|
||||
|
||||
After delegating stake, use `solana stake-account` to observe the changes
|
||||
to the stake account:
|
||||
@ -155,11 +161,12 @@ Once delegated, you can undelegate stake with the `solana deactivate-stake`
|
||||
command:
|
||||
|
||||
```bash
|
||||
solana deactivate-stake --stake-authority <KEYPAIR> <STAKE_ACCOUNT_ADDRESS>
|
||||
solana deactivate-stake --stake-authority <KEYPAIR> <STAKE_ACCOUNT_ADDRESS> \
|
||||
--fee-payer <KEYPAIR>
|
||||
```
|
||||
|
||||
`<KEYPAIR>` authorizes the operation on the account with address
|
||||
`<STAKE_ACCOUNT_ADDRESS>`.
|
||||
The stake authority `<KEYPAIR>` authorizes the operation on the account
|
||||
with address `<STAKE_ACCOUNT_ADDRESS>`.
|
||||
|
||||
Note that stake takes several epochs to "cool down". Attempts to delegate stake
|
||||
in the cool down period will fail.
|
||||
@ -169,12 +176,13 @@ in the cool down period will fail.
|
||||
Transfer tokens out of a stake account with the `solana withdraw-stake` command:
|
||||
|
||||
```bash
|
||||
solana withdraw-stake --withdraw-authority <KEYPAIR> <STAKE_ACCOUNT_ADDRESS> <RECIPIENT_ADDRESS> <AMOUNT>
|
||||
solana withdraw-stake --withdraw-authority <KEYPAIR> <STAKE_ACCOUNT_ADDRESS> <RECIPIENT_ADDRESS> <AMOUNT> \
|
||||
--fee-payer <KEYPAIR>
|
||||
```
|
||||
|
||||
`<STAKE_ACCOUNT_ADDRESS>` is the existing stake account, `<KEYPAIR>` is the
|
||||
withdraw authority, and `<AMOUNT>` is the number of tokens to transfer to
|
||||
`<RECIPIENT_ADDRESS>`.
|
||||
`<STAKE_ACCOUNT_ADDRESS>` is the existing stake account, the stake authority
|
||||
`<KEYPAIR>` is the withdraw authority, and `<AMOUNT>` is the number of tokens
|
||||
to transfer to `<RECIPIENT_ADDRESS>`.
|
||||
|
||||
## Split Stake
|
||||
|
||||
@ -184,12 +192,14 @@ currently staked, cooling down, or locked up. To transfer tokens from an
|
||||
existing stake account to a new one, use the `solana split-stake` command:
|
||||
|
||||
```bash
|
||||
solana split-stake --stake-authority <KEYPAIR> <STAKE_ACCOUNT_ADDRESS> <NEW_STAKE_ACCOUNT_KEYPAIR> <AMOUNT>
|
||||
solana split-stake --stake-authority <KEYPAIR> <STAKE_ACCOUNT_ADDRESS> <NEW_STAKE_ACCOUNT_KEYPAIR> <AMOUNT> \
|
||||
--fee-payer <KEYPAIR>
|
||||
```
|
||||
|
||||
`<STAKE_ACCOUNT_ADDRESS>` is the existing stake account, `<KEYPAIR>` is the
|
||||
stake authority, `<NEW_STAKE_ACCOUNT_KEYPAIR>` is the keypair for the new account,
|
||||
and `<AMOUNT>` is the number of tokens to transfer to the new account.
|
||||
`<STAKE_ACCOUNT_ADDRESS>` is the existing stake account, the stake authority
|
||||
`<KEYPAIR>` is the stake authority, `<NEW_STAKE_ACCOUNT_KEYPAIR>` is the
|
||||
keypair for the new account, and `<AMOUNT>` is the number of tokens to transfer
|
||||
to the new account.
|
||||
|
||||
To split a stake account into a derived account address, use the `--seed`
|
||||
option. See
|
||||
|
@ -1,90 +0,0 @@
|
||||
# Generate a Keypair and its Public Key
|
||||
|
||||
In this section, we will generate a keypair, query it for its public key,
|
||||
and verify you control its private key. Before you begin, you will need
|
||||
to:
|
||||
|
||||
* [Install the Solana Tool Suite](install-solana-cli-tools.md)
|
||||
* [Choose a Command-line wallet](../wallet/cli-wallets.md)
|
||||
|
||||
## Generate an FS Wallet Keypair
|
||||
|
||||
Use Solana's command-line tool `solana-keygen` to generate keypair files. For
|
||||
example, run the following from a command-line shell:
|
||||
|
||||
```bash
|
||||
mkdir ~/my-solana-wallet
|
||||
solana-keygen new -o ~/my-solana-wallet/my-keypair.json
|
||||
```
|
||||
|
||||
If you view the file, you will see a long list of numbers, such as:
|
||||
|
||||
```text
|
||||
[42,200,155,187,52,228,32,9,179,129,192,196,149,41,177,47,87,228,5,19,70,82,170,6,142,114,68,85,124,34,165,216,110,186,177,254,198,143,235,59,173,59,17,250,142,32,66,162,130,62,53,252,48,33,148,38,149,17,81,154,95,178,163,164]
|
||||
```
|
||||
|
||||
This file contains your **unencrypted** keypair. In fact, even if you specify
|
||||
a password, that password applies to the recovery seed phrase, not the file. Do
|
||||
not share this file with others. Anyone with access to this file will have access
|
||||
to all tokens sent to its public key. Instead, you should share only its public
|
||||
key. To display its public key, run:
|
||||
|
||||
```bash
|
||||
solana-keygen pubkey ~/my-solana-wallet/my-keypair.json
|
||||
```
|
||||
|
||||
It will output a string of characters, such as:
|
||||
|
||||
```text
|
||||
ErRr1caKzK8L8nn4xmEWtimYRiTCAZXjBtVphuZ5vMKy
|
||||
```
|
||||
|
||||
This is the public key corresponding to the keypair in `~/my-solana-wallet/my-keypair.json`.
|
||||
To verify you hold the private key for a given public key, use `solana-keygen verify`:
|
||||
|
||||
```bash
|
||||
solana-keygen verify <PUBKEY> ~/my-solana-wallet/my-keypair.json
|
||||
```
|
||||
|
||||
where `<PUBKEY>` is the public key output from the previous command.
|
||||
The command will output "Success" if the given public key matches the
|
||||
the one in your keypair file, and "Failed" otherwise.
|
||||
|
||||
## Generate a Paper Wallet Seed Phrase
|
||||
|
||||
See [Creating a Paper Wallet](../paper-wallet/paper-wallet-usage.md#creating-a-paper-wallet).
|
||||
|
||||
To verify you control the private key of that public key, use `solana-keygen verify`:
|
||||
|
||||
```bash
|
||||
solana-keygen verify <PUBKEY> ASK
|
||||
```
|
||||
|
||||
where `<PUBKEY>` is the keypair's public key and they keyword `ASK` tells the
|
||||
command to prompt you for the keypair's seed phrase. Note that for security
|
||||
reasons, your seed phrase will not be displayed as you type. After entering your
|
||||
seed phrase, the command will output "Success" if the given public key matches the
|
||||
keypair generated from your seed phrase, and "Failed" otherwise.
|
||||
|
||||
## Generate a Hardware Wallet Keypair
|
||||
|
||||
Keypairs are automatically derived when you query a hardware wallet with a
|
||||
[keypair URL](../remote-wallet/README.md#specify-a-hardware-wallet-key).
|
||||
|
||||
Once you have your keypair URL, use `solana-keygen pubkey` to query the hardware
|
||||
wallet for the keypair's public key:
|
||||
|
||||
```bash
|
||||
solana-keygen pubkey <KEYPAIR>
|
||||
```
|
||||
|
||||
where `<KEYPAIR>` is the keypair URL.
|
||||
|
||||
To verify you control the private key of that public key, use `solana-keygen verify`:
|
||||
|
||||
```bash
|
||||
solana-keygen verify <PUBKEY> <KEYPAIR>
|
||||
```
|
||||
|
||||
The command will output "Success" if the given public key matches the
|
||||
the one at your keypair URL, and "Failed" otherwise.
|
@ -77,55 +77,3 @@ prebuilt binaries:
|
||||
```bash
|
||||
solana-install init
|
||||
```
|
||||
|
||||
# Choosing a Cluster
|
||||
|
||||
Solana maintains several clusters, each featuring a Solana-owned validator
|
||||
that serves as an entrypoint to the cluster.
|
||||
|
||||
Current cluster entrypoints:
|
||||
|
||||
* Devnet: devnet.solana.com
|
||||
* Tour de SOL: tds.solana.com
|
||||
|
||||
Application developers should target Devnet. Key differences
|
||||
between Devnet and what will be Mainnet:
|
||||
|
||||
* Devnet tokens are not real
|
||||
* Devnet includes a token faucet for application testing
|
||||
* Devnet may be subject to ledger resets
|
||||
* Devnet typically runs a newer software version than mainnet
|
||||
* Devnet may be maintained by different validators than mainnet
|
||||
|
||||
## Configure the Command-line
|
||||
|
||||
You can check what cluster the Solana CLI is currently targeting by
|
||||
running the following command:
|
||||
|
||||
```bash
|
||||
solana config get
|
||||
```
|
||||
|
||||
Use the `solana config set` command to target a different cluster.
|
||||
For example, for Devnet, use:
|
||||
|
||||
```bash
|
||||
solana config set --url http://devnet.solana.com
|
||||
```
|
||||
|
||||
## Ensure Versions Match
|
||||
|
||||
Though not strictly necessary, the CLI will generally work best when its version
|
||||
matches the software version running on the cluster. To get the CLI version, run:
|
||||
|
||||
```bash
|
||||
solana --version
|
||||
```
|
||||
|
||||
To get the cluster version, run:
|
||||
|
||||
```bash
|
||||
solana cluster-version
|
||||
```
|
||||
|
||||
Ensure the CLI version is greater than or equal to the cluster version.
|
||||
|
76
docs/src/cli/manage-stake-accounts.md
Normal file
76
docs/src/cli/manage-stake-accounts.md
Normal file
@ -0,0 +1,76 @@
|
||||
# Manage Stake Accounts
|
||||
|
||||
If you want to delegate stake to many different validators, you will need
|
||||
to create a separate stake account for each. If you follow the convention
|
||||
of creating the first stake account at seed "0", the second at "1", the
|
||||
third at "2", and so on, then the `solana-stake-accounts` tool will allow
|
||||
you to operate on all accounts with single invocations. You can use it to
|
||||
sum up the balances of all accounts, move accounts to a new wallet, or set
|
||||
new authorities.
|
||||
|
||||
## Usage
|
||||
|
||||
### Create a stake account
|
||||
|
||||
Create and fund a derived stake account at the stake authority public key:
|
||||
|
||||
```bash
|
||||
solana-stake-accounts new <FUNDING_KEYPAIR> <BASE_KEYPAIR> <AMOUNT> \
|
||||
--stake-authority <PUBKEY> --withdraw-authority <PUBKEY> \
|
||||
--fee-payer <KEYPAIR>
|
||||
```
|
||||
|
||||
### Count accounts
|
||||
|
||||
Count the number of derived accounts:
|
||||
|
||||
```bash
|
||||
solana-stake-accounts count <BASE_PUBKEY>
|
||||
```
|
||||
|
||||
### Get stake account balances
|
||||
|
||||
Sum the balance of derived stake accounts:
|
||||
|
||||
```bash
|
||||
solana-stake-accounts balance <BASE_PUBKEY> --num-accounts <NUMBER>
|
||||
```
|
||||
|
||||
### Get stake account addresses
|
||||
|
||||
List the address of each stake account derived from the given public key:
|
||||
|
||||
```bash
|
||||
solana-stake-accounts addresses <BASE_PUBKEY> --num-accounts <NUMBER>
|
||||
```
|
||||
|
||||
### Set new authorities
|
||||
|
||||
Set new authorities on each derived stake account:
|
||||
|
||||
```bash
|
||||
solana-stake-accounts authorize <BASE_PUBKEY> \
|
||||
--stake-authority <KEYPAIR> --withdraw-authority <KEYPAIR> \
|
||||
--new-stake-authority <PUBKEY> --new-withdraw-authority <PUBKEY> \
|
||||
--num-accounts <NUMBER> --fee-payer <KEYPAIR>
|
||||
```
|
||||
|
||||
### Relocate stake accounts
|
||||
|
||||
Relocate stake accounts:
|
||||
|
||||
```bash
|
||||
solana-stake-accounts rebase <BASE_PUBKEY> <NEW_BASE_KEYPAIR> \
|
||||
--stake-authority <KEYPAIR> --num-accounts <NUMBER> \
|
||||
--fee-payer <KEYPAIR>
|
||||
```
|
||||
|
||||
To atomically rebase and authorize each stake account, use the 'move'
|
||||
command:
|
||||
|
||||
```bash
|
||||
solana-stake-accounts move <BASE_PUBKEY> <NEW_BASE_KEYPAIR> \
|
||||
--stake-authority <KEYPAIR> --withdraw-authority <KEYPAIR> \
|
||||
--new-stake-authority <PUBKEY> --new-withdraw-authority <PUBKEY> \
|
||||
--num-accounts <NUMBER> --fee-payer <KEYPAIR>
|
||||
```
|
@ -1,94 +1,79 @@
|
||||
# Send and Receive Tokens
|
||||
This page decribes how to receive and send SOL tokens using the command line
|
||||
tools with a command line wallet such as a [paper wallet](../paper-wallet/README.md),
|
||||
a [file system wallet](../file-system-wallet/README.md), or a
|
||||
[hardware wallet](../remote-wallet/README.md). Before you begin, make sure
|
||||
you have created a wallet and have access to its address (pubkey) and the
|
||||
signing keypair. Check out our
|
||||
[conventions for entering keypairs for different wallet types](../cli/conventions.md#keypair-conventions).
|
||||
|
||||
## Receive Tokens
|
||||
|
||||
To receive tokens, you will need an address for others to send tokens to. In
|
||||
Solana, an address is the public key of a keypair. There are a variety
|
||||
of techniques for generating keypairs. The method you choose will depend on how
|
||||
you choose to store keypairs. Keypairs are stored in wallets. Before receiving
|
||||
tokens, you will need to [choose a wallet](../wallet/cli-wallets.md) and
|
||||
[generate keys](generate-keys.md). Once completed, you should have a public key
|
||||
for each keypair you generated. The public key is a long string of base58
|
||||
characters. Its length varies from 32 to 44 characters.
|
||||
|
||||
### Using Solana CLI
|
||||
|
||||
Before running any Solana CLI commands, let's go over some conventions that
|
||||
you will see across all commands. First, the Solana CLI is actually a collection
|
||||
of different commands for each action you might want to take. You can view the list
|
||||
of all possible commands by running:
|
||||
|
||||
```bash
|
||||
solana --help
|
||||
```
|
||||
|
||||
To zoom in on how to use a particular command, run:
|
||||
|
||||
```bash
|
||||
solana <COMMAND> --help
|
||||
```
|
||||
|
||||
where you replace the text `<COMMAND>` with the name of the command you want
|
||||
to learn more about.
|
||||
|
||||
The command's usage message will typically contain words such as `<AMOUNT>`,
|
||||
`<ACCOUNT_ADDRESS>` or `<KEYPAIR>`. Each word is a placeholder for the *type* of
|
||||
text you can execute the command with. For example, you can replace `<AMOUNT>`
|
||||
with a number such as `42` or `100.42`. You can replace `<ACCOUNT_ADDRESS>` with
|
||||
the base58 encoding of your public key. For `<KEYPAIR>`, it depends on what type
|
||||
of wallet you chose. If you chose an fs wallet, that path might be
|
||||
`~/my-solana-wallet/my-keypair.json`. If you chose a paper wallet, use the
|
||||
keyword `ASK`, and the Solana CLI will prompt you for your seed phrase. If
|
||||
you chose a hardware wallet, use your keypair URL, such as `usb://ledger?key=0`.
|
||||
|
||||
### Test-drive your Public Keys
|
||||
## Testing your Wallet
|
||||
|
||||
Before sharing your public key with others, you may want to first ensure the
|
||||
key is valid and that you indeed hold the corresponding private key.
|
||||
|
||||
Try and *airdrop* yourself some play tokens on the developer testnet, called
|
||||
Devnet:
|
||||
In this example, we will create a second wallet in addition to your first wallet,
|
||||
and then transfer some tokens to it. This will confirm that you can send and
|
||||
receive tokens on your wallet type of choice.
|
||||
|
||||
This test example uses our Developer Testnet, called devnet. Tokens issued
|
||||
on devnet have **no** value, so don't worry if you lose them.
|
||||
|
||||
#### Airdrop some tokens to get started
|
||||
|
||||
First, *airdrop* yourself some play tokens on the devnet.
|
||||
|
||||
```bash
|
||||
solana airdrop 10 <RECIPIENT_ACCOUNT_ADDRESS> --url http://devnet.solana.com
|
||||
solana airdrop 10 <RECIPIENT_ACCOUNT_ADDRESS> --url https://devnet.solana.com
|
||||
```
|
||||
|
||||
where you replace the text `<RECIPIENT_ACCOUNT_ADDRESS>` with your base58-encoded
|
||||
public key.
|
||||
public key/wallet address.
|
||||
|
||||
#### Check your balance
|
||||
|
||||
Confirm the airdrop was successful by checking the account's balance.
|
||||
It should output `10 SOL`:
|
||||
|
||||
```bash
|
||||
solana balance <ACCOUNT_ADDRESS> --url http://devnet.solana.com
|
||||
solana balance <ACCOUNT_ADDRESS> --url https://devnet.solana.com
|
||||
```
|
||||
|
||||
Next, prove that you own those tokens by transferring them. The Solana cluster
|
||||
will only accept the transfer if you sign the transaction with the private
|
||||
key corresponding to the sender's public key in the transaction.
|
||||
#### Create a second wallet address
|
||||
|
||||
First, we will need a public key to receive our tokens. Create a second
|
||||
We will need a new address to receive our tokens. Create a second
|
||||
keypair and record its pubkey:
|
||||
|
||||
```bash
|
||||
solana-keygen new --no-passphrase --no-outfile
|
||||
```
|
||||
|
||||
The output will contain the public key after the text `pubkey:`. Copy the
|
||||
public key. We will use it in the next step.
|
||||
The output will contain the address after the text `pubkey:`. Copy the
|
||||
address. We will use it in the next step.
|
||||
|
||||
```text
|
||||
============================================================================
|
||||
pubkey: GKvqsuNcnwWqPzzuhLmGi4rzzh55FhJtGizkhHaEJqiV
|
||||
============================================================================
|
||||
```
|
||||
|
||||
You can also create a second (or more) wallet of any type:
|
||||
[paper](../paper-wallet/paper-wallet-usage.md#creating-multiple-paper-wallet-addresses),
|
||||
[file system](../file-system-wallet/README.md#creating-multiple-file-system-wallet-addresses),
|
||||
or [hardware](../remote-wallet/README.md#multiple-addresses-on-a-single-hardware-wallet).
|
||||
|
||||
#### Transfer tokens from your first wallet to the second address
|
||||
|
||||
Next, prove that you own the airdropped tokens by transferring them.
|
||||
The Solana cluster will only accept the transfer if you sign the transaction
|
||||
with the private keypair corresponding to the sender's public key in the
|
||||
transaction.
|
||||
|
||||
```bash
|
||||
solana transfer --from <SENDER_KEYPAIR> <RECIPIENT_ACCOUNT_ADDRESS> 5 --url http://devnet.solana.com
|
||||
solana transfer --from <KEYPAIR> <RECIPIENT_ACCOUNT_ADDRESS> 5 --url https://devnet.solana.com --fee-payer <KEYPAIR>
|
||||
```
|
||||
|
||||
where you replace `<SENDER_KEYPAIR>` with the path to a keypair in your wallet,
|
||||
and replace `<RECIPIENT_ACCOUNT_ADDRESS>` with the output of `solana-keygen new` above.
|
||||
where you replace `<KEYPAIR>` with the path to a keypair in your first wallet,
|
||||
and replace `<RECIPIENT_ACCOUNT_ADDRESS>` with the address of your second
|
||||
wallet.
|
||||
|
||||
Confirm the updated balances with `solana balance`:
|
||||
|
||||
@ -99,6 +84,58 @@ solana balance <ACCOUNT_ADDRESS> --url http://devnet.solana.com
|
||||
where `<ACCOUNT_ADDRESS>` is either the public key from your keypair or the
|
||||
recipient's public key.
|
||||
|
||||
#### Full example of test transfer
|
||||
```bash
|
||||
$ solana-keygen new --outfile my_solana_wallet.json # Creating my first wallet, a file system wallet
|
||||
Generating a new keypair
|
||||
For added security, enter a passphrase (empty for no passphrase):
|
||||
Wrote new keypair to my_solana_wallet.json
|
||||
==========================================================================
|
||||
pubkey: DYw8jCTfwHNRJhhmFcbXvVDTqWMEVFBX6ZKUmG5CNSKK # Here is the address of the first wallet
|
||||
==========================================================================
|
||||
Save this seed phrase to recover your new keypair:
|
||||
width enhance concert vacant ketchup eternal spy craft spy guard tag punch # If this was a real wallet, never share these words on the internet like this!
|
||||
==========================================================================
|
||||
|
||||
$ solana airdrop 10 DYw8jCTfwHNRJhhmFcbXvVDTqWMEVFBX6ZKUmG5CNSKK --url https://devnet.solana.com # Airdropping 10 SOL to my wallet's address/pubkey
|
||||
Requesting airdrop of 10 SOL from 35.233.193.70:9900
|
||||
10 SOL
|
||||
|
||||
$ solana balance DYw8jCTfwHNRJhhmFcbXvVDTqWMEVFBX6ZKUmG5CNSKK --url https://devnet.solana.com # Check the address's balance
|
||||
10 SOL
|
||||
|
||||
$ solana-keygen new --no-outfile # Creating a second wallet, a paper wallet
|
||||
Generating a new keypair
|
||||
For added security, enter a passphrase (empty for no passphrase):
|
||||
====================================================================
|
||||
pubkey: 7S3P4HxJpyyigGzodYwHtCxZyUQe9JiBMHyRWXArAaKv # Here is the address of the second, paper, wallet.
|
||||
====================================================================
|
||||
Save this seed phrase to recover your new keypair:
|
||||
clump panic cousin hurt coast charge engage fall eager urge win love # If this was a real wallet, never share these words on the internet like this!
|
||||
====================================================================
|
||||
|
||||
$ solana transfer --from my_solana_wallet.json 7S3P4HxJpyyigGzodYwHtCxZyUQe9JiBMHyRWXArAaKv 5 --url https://devnet.solana.com --fee-payer my_solana_wallet.json # Transferring tokens to the public address of the paper wallet
|
||||
3gmXvykAd1nCQQ7MjosaHLf69Xyaqyq1qw2eu1mgPyYXd5G4v1rihhg1CiRw35b9fHzcftGKKEu4mbUeXY2pEX2z # This is the transaction signature
|
||||
|
||||
$ solana balance DYw8jCTfwHNRJhhmFcbXvVDTqWMEVFBX6ZKUmG5CNSKK --url https://devnet.solana.com
|
||||
4.999995 SOL # The sending account has slightly less than 5 SOL remaining due to the 0.000005 SOL transaction fee payment
|
||||
|
||||
$ solana balance 7S3P4HxJpyyigGzodYwHtCxZyUQe9JiBMHyRWXArAaKv --url https://devnet.solana.com
|
||||
5 SOL # The second wallet has now received the 5 SOL transfer from the first wallet
|
||||
|
||||
```
|
||||
|
||||
## Receive Tokens
|
||||
|
||||
To receive tokens, you will need an address for others to send tokens to. In
|
||||
Solana, the wallet address is the public key of a keypair. There are a variety
|
||||
of techniques for generating keypairs. The method you choose will depend on how
|
||||
you choose to store keypairs. Keypairs are stored in wallets. Before receiving
|
||||
tokens, you will need to [create a wallet](../wallet/cli-wallets.md).
|
||||
Once completed, you should have a public key
|
||||
for each keypair you generated. The public key is a long string of base58
|
||||
characters. Its length varies from 32 to 44 characters.
|
||||
|
||||
## Send Tokens
|
||||
|
||||
If you already hold SOL and want to send tokens to someone, you will need
|
||||
@ -107,7 +144,7 @@ tokens to transfer. Once you have that collected, you can transfer tokens
|
||||
with the `solana transfer` command:
|
||||
|
||||
```bash
|
||||
solana transfer --from <SENDER_KEYPAIR> <RECIPIENT_ACCOUNT_ADDRESS> <AMOUNT>
|
||||
solana transfer --from <KEYPAIR> <RECIPIENT_ACCOUNT_ADDRESS> <AMOUNT> --fee-payer <KEYPAIR>
|
||||
```
|
||||
|
||||
Confirm the updated balances with `solana balance`:
|
||||
|
64
docs/src/file-system-wallet/README.md
Normal file
64
docs/src/file-system-wallet/README.md
Normal file
@ -0,0 +1,64 @@
|
||||
# File System Wallet
|
||||
|
||||
This document describes how to create and use a file system wallet with the
|
||||
Solana CLI tools. A file system wallet exists as an unencrypted keypair file
|
||||
on your computer system's filesystem.
|
||||
|
||||
{% hint style="info" %}
|
||||
File system wallets are the **least secure** method of storing SOL tokens.
|
||||
Storing large amounts of tokens in a file system wallet is **not recommended**.
|
||||
{% endhint %}
|
||||
|
||||
## Before you Begin
|
||||
Make sure you have
|
||||
[installed the Solana Command Line Tools](../cli/install-solana-cli-tools.md)
|
||||
|
||||
## Generate a File System Wallet Keypair
|
||||
|
||||
Use Solana's command-line tool `solana-keygen` to generate keypair files. For
|
||||
example, run the following from a command-line shell:
|
||||
|
||||
```bash
|
||||
mkdir ~/my-solana-wallet
|
||||
solana-keygen new --outfile ~/my-solana-wallet/my-keypair.json
|
||||
```
|
||||
|
||||
This file contains your **unencrypted** keypair. In fact, even if you specify
|
||||
a password, that password applies to the recovery seed phrase, not the file. Do
|
||||
not share this file with others. Anyone with access to this file will have access
|
||||
to all tokens sent to its public key. Instead, you should share only its public
|
||||
key. To display its public key, run:
|
||||
|
||||
```bash
|
||||
solana-keygen pubkey ~/my-solana-wallet/my-keypair.json
|
||||
```
|
||||
|
||||
It will output a string of characters, such as:
|
||||
|
||||
```text
|
||||
ErRr1caKzK8L8nn4xmEWtimYRiTCAZXjBtVphuZ5vMKy
|
||||
```
|
||||
|
||||
This is the public key corresponding to the keypair in
|
||||
`~/my-solana-wallet/my-keypair.json`. The public key of the keypair file is
|
||||
your *wallet address*.
|
||||
|
||||
## Verify your Address against your Keypair file
|
||||
|
||||
To verify you hold the private key for a given address, use
|
||||
`solana-keygen verify`:
|
||||
|
||||
```bash
|
||||
solana-keygen verify <PUBKEY> ~/my-solana-wallet/my-keypair.json
|
||||
```
|
||||
|
||||
where `<PUBKEY>` is replaced with your wallet address.
|
||||
The command will output "Success" if the given address matches the
|
||||
the one in your keypair file, and "Failed" otherwise.
|
||||
|
||||
## Creating Multiple File System Wallet Addresses
|
||||
You can create as many wallet addresses as you like. Simply re-run the
|
||||
steps in [Generate a File System Wallet](#generate-a-file-system-wallet-keypair)
|
||||
and make sure to use a new filename or path with the `--outfile` argument.
|
||||
Multiple wallet addresses can be useful if you want to transfer tokens between
|
||||
your own accounts for different purposes.
|
@ -52,9 +52,20 @@ solana-keygen new --no-outfile
|
||||
|
||||
{% hint style="warning" %}
|
||||
If the `--no-outfile` flag is **omitted**, the default behavior is to write the
|
||||
keypair to `~/.config/solana/id.json`
|
||||
keypair to `~/.config/solana/id.json`, resulting in a
|
||||
[file system wallet](../file-system-wallet/README.md)
|
||||
{% endhint %}
|
||||
|
||||
The output of this command will display a line like this:
|
||||
```bash
|
||||
pubkey: 9ZNTfG4NyQgxy2SWjSiQoUyBPEvXT2xo7fKc5hPYYJ7b
|
||||
```
|
||||
|
||||
The value shown after `pubkey:` is your *wallet address*.
|
||||
|
||||
**Note:** In working with paper wallets and file system wallets, the terms "pubkey"
|
||||
and "wallet address" are sometimes used interchangably.
|
||||
|
||||
{% hint style="info" %}
|
||||
For added security, increase the seed phrase word count using the `--word-count`
|
||||
argument
|
||||
@ -92,8 +103,12 @@ validation.
|
||||
solana-keygen pubkey ASK --skip-seed-phrase-validation
|
||||
```
|
||||
|
||||
After entering your seed phrase with `solana-keygen pubkey ASK` the console
|
||||
will display a string of base-58 character. This is the *wallet address*
|
||||
associated with your seed phrase.
|
||||
|
||||
{% hint style="info" %}
|
||||
Copy the derived public key to a USB stick for easy usage on networked computers
|
||||
Copy the derived address to a USB stick for easy usage on networked computers
|
||||
{% endhint %}
|
||||
|
||||
{% hint style="info" %}
|
||||
@ -109,87 +124,18 @@ solana-keygen pubkey --help
|
||||
|
||||
## Verifying the Keypair
|
||||
|
||||
A keypair can be verified by following a variation on the
|
||||
[offline signing](../offline-signing/README.md) procedure with a dummy transaction.
|
||||
To verify you control the private key of a paper wallet address, use
|
||||
`solana-keygen verify`:
|
||||
|
||||
### Create and Sign a Dummy Transaction
|
||||
|
||||
Use offline signing to acquire the signature of a dummy transaction that can
|
||||
be verified in the next step. A 0 Lamport [transfer](../cli/usage.md#solana-transfer)
|
||||
is used to prevent inadvertent loss of funds. Additionally, an improbable _blockhash_
|
||||
value is specified, as well as using the address of the _system program_ for the `TO`
|
||||
argument, to ensure the transaction would be rejected by the _cluster_ should
|
||||
it be submitted in error.
|
||||
|
||||
Command
|
||||
|
||||
```text
|
||||
solana transfer 11111111111111111111111111111111 0 --sign-only \
|
||||
--keypair ASK --blockhash 11111111111111111111111111111111
|
||||
```bash
|
||||
solana-keygen verify <PUBKEY> ASK
|
||||
```
|
||||
|
||||
Prompt for seed phrase
|
||||
|
||||
```text
|
||||
[keypair] seed phrase:
|
||||
[keypair] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue:
|
||||
Recovered pubkey `AjTz9EX6vXB6EboKpFm7SwrbDannb6icjvEE632D3rfi`. Continue? (y/n): y
|
||||
```
|
||||
|
||||
Output
|
||||
|
||||
```text
|
||||
Blockhash: 11111111111111111111111111111111
|
||||
Signers (Pubkey=Signature):
|
||||
AjTz9EX6vXB6EboKpFm7SwrbDannb6icjvEE632D3rfi=3uZndChSmPoYfaCihC993E7EAHKDsuu53Ge6Dk1K6ULwhJkgcgiHNm9J1Geqq2azW6PKxQTFjC8rMm5bGxRcYWA
|
||||
|
||||
{"blockhash":"11111111111111111111111111111111","signers":["AjTz9EX6vXB6EboKpFm7SwrbDannb6icjvEE632D3rfi=3uZndChSmPoYfaCihC993E7EAHKDsuu53Ge6Dk1K6ULwhJkgcgiHNm9J1Geqq2azW6PKxQTFjC8rMm5bGxRcYWA"]}
|
||||
```
|
||||
|
||||
### Verify the Signature
|
||||
|
||||
Using the _Signers_ output from the [previous step](#create-and-sign-a-dummy-transaction)
|
||||
to reconstruct the transaction, this time specifying the _pubkey_ and _signature_
|
||||
as in the submission step of [offline signing](../offline-signing/README.md). That is, the `--from` and
|
||||
`--fee-payer` are explicitly set to the _pubkey_ rather than being taken from
|
||||
the keypair (which is not queried this time).
|
||||
|
||||
Command
|
||||
|
||||
```text
|
||||
solana transfer 11111111111111111111111111111111 0 --sign-only --from AjTz9EX6vXB6EboKpFm7SwrbDannb6icjvEE632D3rfi \
|
||||
--signer AjTz9EX6vXB6EboKpFm7SwrbDannb6icjvEE632D3rfi=3uZndChSmPoYfaCihC993E7EAHKDsuu53Ge6Dk1K6ULwhJkgcgiHNm9J1Geqq2azW6PKxQTFjC8rMm5bGxRcYWA \
|
||||
--blockhash 11111111111111111111111111111111 --fee-payer AjTz9EX6vXB6EboKpFm7SwrbDannb6icjvEE632D3rfi
|
||||
```
|
||||
|
||||
Output
|
||||
|
||||
```text
|
||||
Blockhash: 11111111111111111111111111111111
|
||||
Signers (Pubkey=Signature):
|
||||
AjTz9EX6vXB6EboKpFm7SwrbDannb6icjvEE632D3rfi=3uZndChSmPoYfaCihC993E7EAHKDsuu53Ge6Dk1K6ULwhJkgcgiHNm9J1Geqq2azW6PKxQTFjC8rMm5bGxRcYWA
|
||||
|
||||
{"blockhash":"11111111111111111111111111111111","signers":["AjTz9EX6vXB6EboKpFm7SwrbDannb6icjvEE632D3rfi=3uZndChSmPoYfaCihC993E7EAHKDsuu53Ge6Dk1K6ULwhJkgcgiHNm9J1Geqq2azW6PKxQTFjC8rMm5bGxRcYWA"]}
|
||||
```
|
||||
|
||||
### An Example of Failure
|
||||
|
||||
To simulate an error the [verification step](#verify-the-signature) is repeated,
|
||||
but with a corrupted _signature_ (the last letter is changed from "A" to "B").
|
||||
|
||||
Command
|
||||
|
||||
```text
|
||||
solana transfer 11111111111111111111111111111111 0 --sign-only --from AjTz9EX6vXB6EboKpFm7SwrbDannb6icjvEE632D3rfi \
|
||||
--signer AjTz9EX6vXB6EboKpFm7SwrbDannb6icjvEE632D3rfi=3uZndChSmPoYfaCihC993E7EAHKDsuu53Ge6Dk1K6ULwhJkgcgiHNm9J1Geqq2azW6PKxQTFjC8rMm5bGxRcYWB \
|
||||
--blockhash 11111111111111111111111111111111 --fee-payer AjTz9EX6vXB6EboKpFm7SwrbDannb6icjvEE632D3rfi
|
||||
```
|
||||
|
||||
Output (Error)
|
||||
|
||||
```text
|
||||
Error: BadParameter("Transaction construction failed, incorrect signature or public key provided")
|
||||
```
|
||||
where `<PUBKEY>` is replaced with the wallet address and they keyword `ASK` tells the
|
||||
command to prompt you for the keypair's seed phrase. Note that for security
|
||||
reasons, your seed phrase will not be displayed as you type. After entering your
|
||||
seed phrase, the command will output "Success" if the given public key matches the
|
||||
keypair generated from your seed phrase, and "Failed" otherwise.
|
||||
|
||||
## Checking Account Balance
|
||||
|
||||
@ -200,10 +146,11 @@ To retrieve public keys securely from a paper wallet, follow the
|
||||
Public keys can then be typed manually or transferred via a USB stick to a
|
||||
networked machine.
|
||||
|
||||
Next, configure the `solana` CLI tool to connect to a particular cluster:
|
||||
Next, configure the `solana` CLI tool to
|
||||
[connect to a particular cluster](../cli/choose-a-cluster.md):
|
||||
|
||||
```bash
|
||||
solana config set --url <CLUSTER URL> # (i.e. http://devnet.solana.com)
|
||||
solana config set --url <CLUSTER URL> # (i.e. https://api.mainnet-beta.solana.com)
|
||||
```
|
||||
|
||||
Finally, to check the balance, run the following command:
|
||||
@ -212,80 +159,13 @@ Finally, to check the balance, run the following command:
|
||||
solana balance <PUBKEY>
|
||||
```
|
||||
|
||||
In order to check a list of public keys quickly, append public keys to a file,
|
||||
one per line, like so:
|
||||
## Creating Multiple Paper Wallet Addresses
|
||||
You can create as many wallet addresses as you like. Simply re-run the
|
||||
steps in [Seed Phrase Generation](#seed-phrase-generation) or
|
||||
[Public Key Derivation](#public-key-derivation) to create a new address.
|
||||
Multiple wallet addresses can be useful if you want to transfer tokens between
|
||||
your own accounts for different purposes.
|
||||
|
||||
`public_keys.txt`
|
||||
```bash
|
||||
7hTw3XhprjT2DkVxVixtig9eZwHTZ2rksTSYN7Jh5niZ
|
||||
9ufAiSyboCZmmEsoStgLYQfnx9KfqP1ZtDK8Wr1j8SJV
|
||||
# ...
|
||||
```
|
||||
## Support
|
||||
|
||||
And run the following command:
|
||||
```bash
|
||||
while read PUBLIC_KEY;
|
||||
do echo "$PUBLIC_KEY: $(solana balance "$PUBLIC_KEY" | tail -n1)";
|
||||
done < public_keys.txt
|
||||
```
|
||||
|
||||
## Running a Validator
|
||||
|
||||
In order to run a validator, you will need to specify an "identity keypair"
|
||||
which will be used to fund all of the vote transactions signed by your validator.
|
||||
Rather than specifying a path with `--identity <PATH>` you can pass
|
||||
`ASK` to securely input the funding keypair.
|
||||
|
||||
```bash
|
||||
solana-validator --identity ASK --ledger ...
|
||||
|
||||
[identity] seed phrase: 🔒
|
||||
[identity] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue:
|
||||
```
|
||||
|
||||
You can use this input method for your voting keypair as well:
|
||||
|
||||
```bash
|
||||
solana-validator --identity ASK --authorized-voter ASK --ledger ...
|
||||
|
||||
[identity] seed phrase: 🔒
|
||||
[identity] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue:
|
||||
[authorized-voter] seed phrase: 🔒
|
||||
[authorized-voter] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue:
|
||||
```
|
||||
|
||||
Refer to the following page for a comprehensive guide on running a validator:
|
||||
{% page-ref page="../running-validator/README.md" %}
|
||||
|
||||
## Delegating Stake
|
||||
|
||||
Solana CLI tooling supports secure keypair input for stake delegation. To do so,
|
||||
first create a stake account with some SOL. Use the special `ASK` keyword to
|
||||
trigger a seed phrase input prompt for the stake account and use
|
||||
`--keypair ASK` to securely input the funding keypair.
|
||||
|
||||
```bash
|
||||
solana create-stake-account ASK 1 --keypair ASK
|
||||
|
||||
[stake_account] seed phrase: 🔒
|
||||
[stake_account] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue:
|
||||
[keypair] seed phrase: 🔒
|
||||
[keypair] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue:
|
||||
```
|
||||
|
||||
Then, to delegate that stake to a validator, use `--keypair ASK` to
|
||||
securely input the funding keypair.
|
||||
|
||||
```bash
|
||||
solana delegate-stake --keypair ASK <STAKE_ACCOUNT_PUBKEY> <VOTE_ACCOUNT_PUBKEY>
|
||||
|
||||
[keypair] seed phrase: 🔒
|
||||
[keypair] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue:
|
||||
```
|
||||
|
||||
Refer to the following page for a comprehensive guide on delegating stake:
|
||||
{% page-ref page="../running-validator/validator-stake.md" %}
|
||||
|
||||
---
|
||||
|
||||
{% page-ref page="../api-reference/cli.md" %}
|
||||
Check out our [Wallet Support Page](../wallet/support.md) for ways to get help.
|
||||
|
@ -49,6 +49,26 @@ For example, a fully qualified URL for a Ledger device might be:
|
||||
usb://ledger/BsNsvfXqQTtJnagwFWdBS7FBXgnsK8VZ5CmuznN85swK?key=0/0
|
||||
```
|
||||
|
||||
### Multiple Addresses on a Single Hardware Wallet
|
||||
You can derive as many wallet addresses as you like. To view them, simply
|
||||
iterate the `ACCOUNT` and/or `CHANGE` number when specifying the URL path.
|
||||
Multiple wallet addresses can be useful if you want to transfer tokens between
|
||||
your own accounts for different purposes.
|
||||
|
||||
For example, a first address can be viewed with:
|
||||
```bash
|
||||
solana-keygen pubkey usb://ledger?key=0
|
||||
```
|
||||
A second address can be viewed with:
|
||||
```bash
|
||||
solana-keygen pubkey usb://ledger?key=1
|
||||
```
|
||||
A third address:
|
||||
```bash
|
||||
solana-keygen pubkey usb://ledger?key=2
|
||||
```
|
||||
...and so on.
|
||||
|
||||
## Manage Multiple Hardware Wallets
|
||||
|
||||
It is sometimes useful to sign a transaction with keys from multiple hardware
|
||||
@ -76,3 +96,29 @@ but where `BsNsvfXqQTtJnagwFWdBS7FBXgnsK8VZ5CmuznN85swK` is your `WALLET_ID`.
|
||||
|
||||
With your fully qualified URL, you can connect multiple hardware wallets to
|
||||
the same computer and uniquely identify a keypair from any of them.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Keypair URL parameters are ignored in zsh
|
||||
|
||||
The question mark character is a special character in zsh. If that's not a
|
||||
feature you use, add the following line to your `~/.zshrc` to treat it as a
|
||||
normal character:
|
||||
|
||||
```bash
|
||||
unsetopt nomatch
|
||||
```
|
||||
|
||||
Then either restart your shell window or run `~/.zshrc`:
|
||||
|
||||
```bash
|
||||
source ~/.zshrc
|
||||
```
|
||||
|
||||
If you would prefer not to disable zsh's special handling of the question mark
|
||||
character, you can disable it explictly with a backslash in your keypair URLs.
|
||||
For example:
|
||||
|
||||
```bash
|
||||
solana-keygen pubkey usb://ledger\?key=0
|
||||
```
|
||||
|
@ -6,31 +6,19 @@ secure transaction signing.
|
||||
|
||||
## Before You Begin
|
||||
|
||||
- [Set up a Ledger Nano S with the Solana App](../wallet/ledger-live.md)
|
||||
- [Install the Solana command-line tools](../cli/install-solana-cli-tools.md)
|
||||
- [Initialize your Ledger Nano S](https://support.ledger.com/hc/en-us/articles/360000613793)
|
||||
- [Install the latest device firmware](https://support.ledgerwallet.com/hc/en-us/articles/360002731113-Update-Ledger-Nano-S-firmware)
|
||||
- [Install Ledger Live](https://support.ledger.com/hc/en-us/articles/360006395553/) software on your computer
|
||||
|
||||
## Install the Solana App on Ledger Nano S
|
||||
|
||||
1. Open Ledger Live
|
||||
2. Click Experimental Features and enable Developer Mode
|
||||
3. Open the Manager in Ledger Live
|
||||
4. Connect your Ledger device via USB and enter your pin to unlock it
|
||||
5. When prompted, approve the manager on your device
|
||||
6. Find Solana in the app catalog and click Install
|
||||
7. An installation window appears and your device will display Processing…
|
||||
8. The app installation is confirmed
|
||||
9. Close Ledger Live
|
||||
|
||||
## Use Ledger Device with Solana CLI
|
||||
|
||||
1. Ensure the Ledger Live application is closed
|
||||
2. Plug your Ledger device into your computer's USB port
|
||||
3. Enter your pin and start the Solana app on the Ledger device
|
||||
4. On your computer, run:
|
||||
4. Press both buttons to advance past the "Pending Ledger review" screen
|
||||
5. Ensure the screen reads "Application is ready"
|
||||
6. On your computer, run:
|
||||
|
||||
```text
|
||||
```bash
|
||||
solana-keygen pubkey usb://ledger
|
||||
```
|
||||
|
||||
@ -41,7 +29,7 @@ computer, you can use your wallet key to specify which Ledger hardware wallet
|
||||
you want to use. Run the same command again, but this time, with its fully
|
||||
qualified URL:
|
||||
|
||||
```text
|
||||
```bash
|
||||
solana-keygen pubkey usb://ledger/<WALLET_ID>
|
||||
```
|
||||
|
||||
@ -57,4 +45,4 @@ anywhere you see an option or argument that accepts a `<KEYPAIR>`.
|
||||
|
||||
## Support
|
||||
|
||||
Email maintainers@solana.com
|
||||
Check out our [Wallet Support Page](../wallet/support.md) for ways to get help.
|
||||
|
@ -25,7 +25,7 @@ The `solana vote-account` command displays the recent voting activity from
|
||||
your validator:
|
||||
|
||||
```bash
|
||||
solana vote-account ~/validator-vote-keypair.json
|
||||
solana vote-account ~/vote-account-keypair.json
|
||||
```
|
||||
|
||||
## Get Cluster Info
|
||||
|
@ -41,7 +41,7 @@ solana create-stake-account ~/validator-stake-keypair.json 1
|
||||
and then delegating that stake to your validator:
|
||||
|
||||
```bash
|
||||
solana delegate-stake ~/validator-stake-keypair.json ~/validator-vote-keypair.json
|
||||
solana delegate-stake ~/validator-stake-keypair.json ~/vote-account-keypair.json
|
||||
```
|
||||
|
||||
> Don’t delegate your remaining SOL, as your validator will use those tokens to vote.
|
||||
@ -50,7 +50,7 @@ Stakes can be re-delegated to another node at any time with the same command,
|
||||
but only one re-delegation is permitted per epoch:
|
||||
|
||||
```bash
|
||||
solana delegate-stake ~/validator-stake-keypair.json ~/some-other-validator-vote-keypair.json
|
||||
solana delegate-stake ~/validator-stake-keypair.json ~/some-other-vote-account-keypair.json
|
||||
```
|
||||
|
||||
Assuming the node is voting, now you're up and running and generating validator
|
||||
@ -81,7 +81,7 @@ so it can take an hour or more for stake to come fully online.
|
||||
|
||||
To monitor your validator during its warmup period:
|
||||
|
||||
* View your vote account:`solana vote-account ~/validator-vote-keypair.json` This displays the current state of all the votes the validator has submitted to the network.
|
||||
* View your vote account:`solana vote-account ~/vote-account-keypair.json` This displays the current state of all the votes the validator has submitted to the network.
|
||||
* View your stake account, the delegation preference and details of your stake:`solana stake-account ~/validator-stake-keypair.json`
|
||||
* `solana validators` displays the current active stake of all validators, including yours
|
||||
* `solana stake-history ` shows the history of stake warming up and cooling down over recent epochs
|
||||
|
@ -74,15 +74,25 @@ solana-keygen pubkey ~/validator-keypair.json
|
||||
|
||||
> Note: The "validator-keypair.json” file is also your \(ed25519\) private key.
|
||||
|
||||
Your validator identity keypair uniquely identifies your validator within the
|
||||
network. **It is crucial to back-up this information.**
|
||||
### Paper Wallet identity
|
||||
|
||||
If you don’t back up this information, you WILL NOT BE ABLE TO RECOVER YOUR
|
||||
VALIDATOR if you lose access to it. If this happens, YOU WILL LOSE YOUR
|
||||
ALLOCATION OF LAMPORTS TOO.
|
||||
You can create a paper wallet for your identity file instead of writing the
|
||||
keypair file to disk with:
|
||||
|
||||
To back-up your validator identify keypair, **back-up your
|
||||
"validator-keypair.json” file to a secure location.**
|
||||
```bash
|
||||
solana-keygen new --no-outfile
|
||||
```
|
||||
|
||||
The corresponding identity public key can now be viewed by running:
|
||||
|
||||
```bash
|
||||
solana-keygen pubkey ASK
|
||||
```
|
||||
and then entering your seed phrase.
|
||||
|
||||
See [Paper Wallet Usage](../paper-wallet/paper-wallet-usage.md) for more info.
|
||||
|
||||
-------
|
||||
|
||||
### Vanity Keypair
|
||||
|
||||
@ -94,6 +104,18 @@ solana-keygen grind --starts-with e1v1s
|
||||
|
||||
Depending on the string requested, it may take days to find a match...
|
||||
|
||||
------
|
||||
|
||||
Your validator identity keypair uniquely identifies your validator within the
|
||||
network. **It is crucial to back-up this information.**
|
||||
|
||||
If you don’t back up this information, you WILL NOT BE ABLE TO RECOVER YOUR
|
||||
VALIDATOR if you lose access to it. If this happens, YOU WILL LOSE YOUR
|
||||
ALLOCATION OF LAMPORTS TOO.
|
||||
|
||||
To back-up your validator identify keypair, **back-up your
|
||||
"validator-keypair.json” file or your seed phrase to a secure location.**
|
||||
|
||||
## More Solana CLI Configuration
|
||||
|
||||
Now that you have a keypair, set the solana configuration to use your validator
|
||||
@ -176,6 +198,13 @@ solana-validator --identity ~/validator-keypair.json --vote-account ~/vote-accou
|
||||
To force validator logging to the console add a `--log -` argument, otherwise
|
||||
the validator will automatically log to a file.
|
||||
|
||||
> Note: You can use a
|
||||
[paper wallet seed phrase](../paper-wallet/paper-wallet-usage.md)
|
||||
for your `--identity` and/or
|
||||
`--vote-account` keypairs. To use these, pass the respective argument as
|
||||
`solana-validator --identity ASK ... --vote-account ASK ...` and you will be
|
||||
prompted to enter your seed phrases and optional passphrase.
|
||||
|
||||
Confirm your validator connected to the network by opening a new terminal and
|
||||
running:
|
||||
|
||||
|
@ -24,7 +24,7 @@ solana create-stake-account ~/validator-stake-keypair.json 1
|
||||
and then delegating that stake to your validator:
|
||||
|
||||
```bash
|
||||
solana delegate-stake ~/validator-stake-keypair.json ~/validator-vote-keypair.json
|
||||
solana delegate-stake ~/validator-stake-keypair.json ~/vote-account-keypair.json
|
||||
```
|
||||
|
||||
{% hint style="warning" %}
|
||||
@ -43,7 +43,7 @@ Stakes need to warm up, and warmup increments are applied at Epoch boundaries, s
|
||||
|
||||
To monitor your validator during its warmup period:
|
||||
|
||||
* View your vote account:`solana vote-account ~/validator-vote-keypair.json` This displays the current state of all the votes the validator has submitted to the network.
|
||||
* View your vote account:`solana vote-account ~/vote-account-keypair.json` This displays the current state of all the votes the validator has submitted to the network.
|
||||
* View your stake account, the delegation preference and details of your stake:`solana stake-account ~/validator-stake-keypair.json`
|
||||
* `solana validators` displays the current active stake of all validators, including yours
|
||||
* `solana stake-history ` shows the history of stake warming up and cooling down over recent epochs
|
||||
|
@ -12,10 +12,10 @@ Note the version number
|
||||
|
||||
## Install Software
|
||||
|
||||
Install the Solana release [v0.23.7](https://github.com/solana-labs/solana/releases/tag/v0.23.7) on your machine by running:
|
||||
Install the Solana release [LATEST_SOLANA_RELEASE_VERSION](https://github.com/solana-labs/solana/releases/tag/LATEST_SOLANA_RELEASE_VERSION) on your machine by running:
|
||||
|
||||
```bash
|
||||
curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.22.2/install/solana-install-init.sh | sh -s - 0.23.7
|
||||
curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v1.0.0/install/solana-install-init.sh | sh -s - LATEST_SOLANA_RELEASE_VERSION
|
||||
```
|
||||
|
||||
then run `solana --version` to confirm the expected version number.
|
||||
|
@ -14,10 +14,10 @@ To use a Command Line Wallet, you must first [install the Solana CLI tools](../c
|
||||
A *file system wallet*, aka an FS wallet, is a directory in your computer's
|
||||
file system. Each file in the directory holds a keypair.
|
||||
|
||||
### FS Wallet Security
|
||||
### File System Wallet Security
|
||||
|
||||
An FS wallet is the most convenient and least secure form of wallet. It is
|
||||
convenient because the keypair is stored in a simple file. You can generate as
|
||||
A file system wallet is the most convenient and least secure form of wallet. It
|
||||
is convenient because the keypair is stored in a simple file. You can generate as
|
||||
many keys as you would like and trivially back them up by copying the files. It
|
||||
is insecure because the keypair files are **unencrypted**. If you are the only
|
||||
user of your computer and you are confident it is free of malware, an FS wallet
|
||||
@ -28,6 +28,8 @@ keypairs are stored on your computer as files, a skilled hacker with physical
|
||||
access to your computer may be able to access it. Using an encrypted hard
|
||||
drive, such as FileVault on MacOS, minimizes that risk.
|
||||
|
||||
{% page-ref page="../file-system-wallet/README.md" %}
|
||||
|
||||
## Paper Wallet
|
||||
|
||||
A *paper wallet* is a collection of *seed phrases* written on paper. A seed
|
||||
|
@ -1,4 +1,4 @@
|
||||
#Ledger Live and Ledger Nano S
|
||||
# Ledger Live and Ledger Nano S
|
||||
This document describes how to set up a
|
||||
[Ledger Nano S hardware wallet](https://shop.ledger.com/products/ledger-nano-s)
|
||||
with the [Ledger Live](https://www.ledger.com/ledger-live) software.
|
||||
@ -12,12 +12,13 @@ will be available in the future.**
|
||||
Users may [use a Ledger Nano S with the Solana command
|
||||
line tools](../remote-wallet/ledger.md).
|
||||
|
||||
##Set up a Ledger Nano S
|
||||
## Set up a Ledger Nano S
|
||||
- Order a [Nano S from Ledger](https://shop.ledger.com/products/ledger-nano-s)
|
||||
- Follow the instructions for device setup included in the package,
|
||||
or [Ledger's Start page](https://www.ledger.com/start/)
|
||||
- [Install the latest device firmware](https://support.ledgerwallet.com/hc/en-us/articles/360002731113-Update-Ledger-Nano-S-firmware)
|
||||
|
||||
##Install Ledger Live
|
||||
## Install Ledger Live
|
||||
- Install [Ledger Live desktop software](https://www.ledger.com/ledger-live/),
|
||||
or
|
||||
- Install the [Ledger Live app for iOS](https://apps.apple.com/app/id1361671700)
|
||||
@ -26,17 +27,26 @@ line tools](../remote-wallet/ledger.md).
|
||||
- Requires Android 7.0 or later.
|
||||
- Connect your Nano S to your device and follow the instructions
|
||||
|
||||
##Install the Solana App on your Nano S
|
||||
## Install the Solana App on your Nano S
|
||||
- Open Ledger Live
|
||||
- Currently Ledger Live needs to be in "Developer Mode"
|
||||
(Settings > Experimental Features > Developer Mode) to see our app.
|
||||
|
||||

|
||||
|
||||
- Go to Manager in the app and find "Solana" in the App Catalog and
|
||||
click Install
|
||||
- Make sure your device is plugged in via USB and is unlocked with its PIN
|
||||
- You may be prompted on the Nano S to confirm the install of Solana App
|
||||
- "Solana" should now show as "Installed" in the Ledger Live Manager
|
||||
|
||||
##Interact with Solana network
|
||||

|
||||
|
||||
## Interact with Solana network
|
||||
- To interact with your Ledger wallet on our live network, please see our
|
||||
instructions on how to [use a Ledger Nano S with the Solana command
|
||||
line tools](../remote-wallet/ledger.md).
|
||||
instructions on how to
|
||||
[use a Ledger Nano S with the Solana command line tools](../remote-wallet/ledger.md).
|
||||
|
||||
## Support
|
||||
|
||||
Check out our [Wallet Support Page](../wallet/support.md) for ways to get help.
|
||||
|
13
docs/src/wallet/support.md
Normal file
13
docs/src/wallet/support.md
Normal file
@ -0,0 +1,13 @@
|
||||
# Support / Troubleshooting
|
||||
If you have questions or are having trouble setting up or using your wallet
|
||||
of choice, please make sure you've read through all the relevant pages in our
|
||||
[Wallet Guide](README.md). The Solana team is working hard to support new
|
||||
features on popular wallets, and we do our best to keep our documents up to date
|
||||
with the latest available features.
|
||||
|
||||
If you have questions after reading the docs, feel free to reach out to us on
|
||||
our [Telegram](https://t.me/solanaio).
|
||||
|
||||
For **technical support**, reach out to us on
|
||||
[Discord](https://discordapp.com/invite/pquxPsq), using the #wallet-support
|
||||
channel in our Community section.
|
@ -1,16 +1,24 @@
|
||||
#Trust Wallet
|
||||
# Trust Wallet
|
||||
Trust Wallet is an app for your smartphone or tablet and is the fastest and
|
||||
simplest way for most users to get started with a Solana wallet.
|
||||
|
||||
**NOTE: Trust Wallet currently only supports Solana on the iOS version of its
|
||||
app. Support for Android is coming very very soon!**
|
||||
## Install Trust Wallet
|
||||
|
||||
##Set Up Trust Wallet
|
||||
- Open the App Store or Play Store
|
||||
#### iOS
|
||||
|
||||
- Open the App Store
|
||||
- Download “Trust: Crypto & Bitcoin Wallet” from Six Days LLC
|
||||
- Requires iOS 13.0 or higher
|
||||
- Open Trust Wallet and follow the app prompts to get started
|
||||
|
||||
#### Android
|
||||
|
||||
- Open the Play Store
|
||||
- Download “Trust Crypto Wallet” from Six Days LLC
|
||||
- Requires Android 6.0 or higher
|
||||
- Open Trust Wallet and follow the app prompts to get started
|
||||
|
||||
##Add Solana (SOL) tokens to your wallet
|
||||
## Add Solana (SOL) tokens to your wallet
|
||||
- From the main page, go to the “Tokens” tab at the top of the screen
|
||||
- Tap the “+” icon at the top right corner
|
||||
- Search for “Solana” in the search page, and when the “Solana SOL” token is
|
||||
@ -19,7 +27,7 @@ shown, slide the slider to enable this token.
|
||||
|
||||
[Trust Wallet Official Docs: How to Add or Remove a Coin](https://community.trustwallet.com/t/how-to-add-or-remove-a-coin/896)
|
||||
|
||||
##Receiving SOL tokens
|
||||
## Receiving SOL tokens
|
||||
- To receive SOL tokens that you’ve purchased or earned, you need to send your
|
||||
Receive Address to whoever is sending you tokens.
|
||||
- Tap “Receive” to view a QR code and your text address, which is a long string
|
||||
@ -32,6 +40,68 @@ to that address, **those tokens will be lost forever**.
|
||||
|
||||
[Trust Wallet Official Docs: How to Find my Receiving Address](https://community.trustwallet.com/t/how-to-find-my-receiving-address/2006)
|
||||
|
||||
##Troubleshooting
|
||||
## Sending SOL tokens
|
||||
You can send SOL from your Trust Wallet to any other valid address on the Solana
|
||||
network. Once you know the other party's receiving address,
|
||||
go to the main page of the wallet from which you want to send some SOL tokens:
|
||||
- Tap the "Solana" icon.
|
||||
- Tap "Send"
|
||||
- Under "Recipient Address":
|
||||
- If you already have the receiving address you are going to send to,
|
||||
tap "Paste"
|
||||
- If you are transferring to another Trust Wallet user, you can use the app's
|
||||
QR code reader by tapping the square icon to the right of the "Paste" button.
|
||||
- Under "Amount", enter the amount of SOL you want to send, or tap "Max"
|
||||
to send **all** of the SOL in your wallet to the new address
|
||||
- Tap "Next" to view a summary/confirmation page to review before submitting
|
||||
the transaction
|
||||
- The real-time network transaction fee will be shown. This fee will be
|
||||
paid by the sending account in addition to the full amount transferred.
|
||||
- If you chose to send all your tokens to the new address by selecting
|
||||
"Max" under amount, the amount sent to the recipient will be the account
|
||||
balance *minus* the current network transaction fee.
|
||||
- **Make sure you entered the "To" address correctly and that it matches your
|
||||
desired wallet's receiving address!**
|
||||
- Tap "Send" to submit the transaction. The transaction will show as "Pending"
|
||||
for a few seconds, and then will show as "Sent".
|
||||
|
||||
[Trust Wallet Official Docs: Sending Cryptocurrencies](https://community.trustwallet.com/t/sending-cryptocurrencies/65)
|
||||
|
||||
## Using Multiple Wallet Addresses
|
||||
Trust Wallet allows you to create multiple wallets, each of which is
|
||||
secured by a different set of random seed words. If you want to use more than
|
||||
one Solana address, follow these steps.
|
||||
|
||||
#### Create an additional wallet
|
||||
- In the main page of the Trust Wallet App, tap "Settings" in the bottom-right
|
||||
corner.
|
||||
- Tap "Wallets"
|
||||
- Tap "+" to create a new Wallet
|
||||
- Follow the steps above to add SOL tokens to your new wallet
|
||||
|
||||
*Note: In the "Wallets" page under "Settings" you can re-name each of your wallets.
|
||||
Consider giving each a descriptive name if you are planning to use your wallets
|
||||
for different purposes, so you don't mistake one for the other.*
|
||||
|
||||
#### Transferring SOL between your wallets
|
||||
Just like you can transfer SOL to another party, you can transfer SOL between
|
||||
wallets that you own.
|
||||
|
||||
- Copy the receive address of your **newly created** wallet.
|
||||
- Make sure your **new** wallet is selected by going to
|
||||
"Settings" --> "Wallets", then tap on the name of your new wallet.
|
||||
- Tap the "Solana" icon.
|
||||
- Tap "Receive" then tap "Copy".
|
||||
- Select your previous/original wallet which already has some SOL by going to
|
||||
"Settings" --> "Wallets", then tap on the name of your **original** wallet.
|
||||
- Now follow the same process for [sending SOL tokens](#sending-sol-tokens)
|
||||
using your **new** wallet's receiving address as the address in the "To"
|
||||
field when you make the transfer.
|
||||
|
||||
## Troubleshooting
|
||||
If you are having trouble setting up your Trust Wallet app, check out their
|
||||
[Community Help Center](https://community.trustwallet.com/c/helpcenter)
|
||||
|
||||
## Support
|
||||
|
||||
Check out our [Wallet Support Page](../wallet/support.md) for ways to get help.
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-dos"
|
||||
version = "1.1.0"
|
||||
version = "1.1.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -13,7 +13,7 @@ clap = "2.33.0"
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
rayon = "1.3.0"
|
||||
solana-core = { path = "../core", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.0" }
|
||||
solana-core = { path = "../core", version = "1.1.3" }
|
||||
solana-logger = { path = "../logger", version = "1.1.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.3" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.3" }
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-download-utils"
|
||||
version = "1.1.0"
|
||||
version = "1.1.3"
|
||||
description = "Solana Download Utils"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -14,8 +14,8 @@ console = "0.10.0"
|
||||
indicatif = "0.14.0"
|
||||
log = "0.4.8"
|
||||
reqwest = { version = "0.10.4", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.3" }
|
||||
tar = "0.4.26"
|
||||
|
||||
[lib]
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-faucet"
|
||||
version = "1.1.0"
|
||||
version = "1.1.3"
|
||||
description = "Solana Faucet"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -19,10 +19,10 @@ clap = "2.33"
|
||||
log = "0.4.8"
|
||||
serde = "1.0.105"
|
||||
serde_derive = "1.0.103"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.0" }
|
||||
solana-logger = { path = "../logger", version = "1.1.0" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.3" }
|
||||
solana-logger = { path = "../logger", version = "1.1.3" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
|
||||
|
@ -21,10 +21,20 @@ if [[ ! -f target/perf-libs/.$VERSION ]]; then
|
||||
(
|
||||
set -x
|
||||
cd target/perf-libs
|
||||
curl -L --retry 5 --retry-delay 2 --retry-connrefused -o solana-perf.tgz \
|
||||
https://github.com/solana-labs/solana-perf-libs/releases/download/$PERF_LIBS_VERSION/solana-perf.tgz
|
||||
|
||||
if [[ -r ~/.cache/solana-perf-$PERF_LIBS_VERSION.tgz ]]; then
|
||||
cp ~/.cache/solana-perf-$PERF_LIBS_VERSION.tgz solana-perf.tgz
|
||||
else
|
||||
curl -L --retry 5 --retry-delay 2 --retry-connrefused -o solana-perf.tgz \
|
||||
https://github.com/solana-labs/solana-perf-libs/releases/download/$PERF_LIBS_VERSION/solana-perf.tgz
|
||||
fi
|
||||
tar zxvf solana-perf.tgz
|
||||
rm -f solana-perf.tgz
|
||||
|
||||
if [[ ! -r ~/.cache/solana-perf-$PERF_LIBS_VERSION.tgz ]]; then
|
||||
# Save it for next time
|
||||
mkdir -p ~/.cache
|
||||
mv solana-perf.tgz ~/.cache/solana-perf-$PERF_LIBS_VERSION.tgz
|
||||
fi
|
||||
touch .$VERSION
|
||||
)
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-genesis-programs"
|
||||
version = "1.1.0"
|
||||
version = "1.1.3"
|
||||
description = "Solana genesis programs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -10,16 +10,16 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = { version = "0.4.8" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.1.0" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.0" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.1.0" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.1.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.0" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.0" }
|
||||
solana-vest-program = { path = "../programs/vest", version = "1.1.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.0" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.1.3" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.3" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.1.3" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.1.3" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.3" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.3" }
|
||||
solana-vest-program = { path = "../programs/vest", version = "1.1.3" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.3" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-genesis"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.1.0"
|
||||
version = "1.1.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -15,13 +15,13 @@ chrono = "0.4"
|
||||
serde = "1.0.105"
|
||||
serde_json = "1.0.48"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.0" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.1.0" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.0" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.3" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.1.3" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.3" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.3" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.3" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user