Compare commits
40 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
66f006108c | ||
|
47f887bda0 | ||
|
bb64c73aa2 | ||
|
1f30d1e77a | ||
|
04dab9b274 | ||
|
fb4e102670 | ||
|
67e0ba0356 | ||
|
22bb4e6462 | ||
|
79035bdbed | ||
|
70089a5258 | ||
|
34238d5f1e | ||
|
cab6917cbd | ||
|
2951ee5b1d | ||
|
fb16a15900 | ||
|
76b52f4c5d | ||
|
21a2e643c2 | ||
|
733d9cb026 | ||
|
2f54f57b7a | ||
|
7bd95019ef | ||
|
33557c3271 | ||
|
c65b9cd88d | ||
|
038db8167f | ||
|
030498ced5 | ||
|
28eb8b662a | ||
|
de752eaf80 | ||
|
9c5ef19d80 | ||
|
235bd0a46b | ||
|
465d71a3a3 | ||
|
14e6029fae | ||
|
75434158ee | ||
|
1cae9fd893 | ||
|
bea34a812c | ||
|
41a28d7322 | ||
|
235158d2bc | ||
|
521238f7d7 | ||
|
384f52a607 | ||
|
49f2d912ab | ||
|
8652fe30ce | ||
|
899a14ba51 | ||
|
466c7dafb3 |
734
Cargo.lock
generated
734
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -43,6 +43,7 @@ members = [
|
||||
"archiver",
|
||||
"archiver-lib",
|
||||
"archiver-utils",
|
||||
"remote-wallet",
|
||||
"runtime",
|
||||
"sdk",
|
||||
"sdk-c",
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-archiver-lib"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
description = "Solana Archiver Library"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -15,22 +15,22 @@ ed25519-dalek = "=1.0.0-pre.1"
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
rand_chacha = "0.1.1"
|
||||
solana-client = { path = "../client", version = "0.23.1" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.23.1" }
|
||||
solana-client = { path = "../client", version = "0.23.3" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.23.3" }
|
||||
thiserror = "1.0"
|
||||
serde = "1.0.104"
|
||||
serde_json = "1.0.44"
|
||||
serde_derive = "1.0.103"
|
||||
solana-net-utils = { path = "../net-utils", version = "0.23.1" }
|
||||
solana-chacha = { path = "../chacha", version = "0.23.1" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "0.23.1" }
|
||||
solana-ledger = { path = "../ledger", version = "0.23.1" }
|
||||
solana-logger = { path = "../logger", version = "0.23.1" }
|
||||
solana-perf = { path = "../perf", version = "0.23.1" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.1" }
|
||||
solana-core = { path = "../core", version = "0.23.1" }
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "0.23.1" }
|
||||
solana-metrics = { path = "../metrics", version = "0.23.1" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.23.3" }
|
||||
solana-chacha = { path = "../chacha", version = "0.23.3" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "0.23.3" }
|
||||
solana-ledger = { path = "../ledger", version = "0.23.3" }
|
||||
solana-logger = { path = "../logger", version = "0.23.3" }
|
||||
solana-perf = { path = "../perf", version = "0.23.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.3" }
|
||||
solana-core = { path = "../core", version = "0.23.3" }
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "0.23.3" }
|
||||
solana-metrics = { path = "../metrics", version = "0.23.3" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4.0"
|
||||
|
@@ -16,6 +16,7 @@ use solana_core::{
|
||||
packet::{limited_deserialize, PACKET_DATA_SIZE},
|
||||
repair_service,
|
||||
repair_service::{RepairService, RepairSlotRange, RepairStrategy},
|
||||
serve_repair::ServeRepair,
|
||||
shred_fetch_stage::ShredFetchStage,
|
||||
sigverify_stage::{DisabledSigVerifier, SigVerifyStage},
|
||||
storage_stage::NUM_STORAGE_SAMPLES,
|
||||
@@ -195,13 +196,7 @@ impl Archiver {
|
||||
Blockstore::open(ledger_path).expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
|
||||
let gossip_service = GossipService::new(
|
||||
&cluster_info,
|
||||
Some(blockstore.clone()),
|
||||
None,
|
||||
node.sockets.gossip,
|
||||
&exit,
|
||||
);
|
||||
let gossip_service = GossipService::new(&cluster_info, None, node.sockets.gossip, &exit);
|
||||
|
||||
info!("Connecting to the cluster via {:?}", cluster_entrypoint);
|
||||
let (nodes, _) =
|
||||
@@ -814,7 +809,7 @@ impl Archiver {
|
||||
/// It is recommended to use a temporary blockstore for this since the download will not verify
|
||||
/// shreds received and might impact the chaining of shreds across slots
|
||||
pub fn download_from_archiver(
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
serve_repair: &ServeRepair,
|
||||
archiver_info: &ContactInfo,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
slots_per_segment: u64,
|
||||
@@ -834,10 +829,10 @@ impl Archiver {
|
||||
Recycler::default(),
|
||||
"archiver_reeciver",
|
||||
);
|
||||
let id = cluster_info.read().unwrap().id();
|
||||
let id = serve_repair.keypair().pubkey();
|
||||
info!(
|
||||
"Sending repair requests from: {} to: {}",
|
||||
cluster_info.read().unwrap().my_data().id,
|
||||
serve_repair.my_info().id,
|
||||
archiver_info.gossip
|
||||
);
|
||||
let repair_slot_range = RepairSlotRange {
|
||||
@@ -857,9 +852,7 @@ impl Archiver {
|
||||
let reqs: Vec<_> = repairs
|
||||
.into_iter()
|
||||
.filter_map(|repair_request| {
|
||||
cluster_info
|
||||
.read()
|
||||
.unwrap()
|
||||
serve_repair
|
||||
.map_repair_request(&repair_request)
|
||||
.map(|result| ((archiver_info.gossip, result), repair_request))
|
||||
.ok()
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-archiver-utils"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
description = "Solana Archiver Utils"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -12,12 +12,12 @@ edition = "2018"
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
rand_chacha = "0.1.1"
|
||||
solana-chacha = { path = "../chacha", version = "0.23.1" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "0.23.1" }
|
||||
solana-ledger = { path = "../ledger", version = "0.23.1" }
|
||||
solana-logger = { path = "../logger", version = "0.23.1" }
|
||||
solana-perf = { path = "../perf", version = "0.23.1" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.1" }
|
||||
solana-chacha = { path = "../chacha", version = "0.23.3" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "0.23.3" }
|
||||
solana-ledger = { path = "../ledger", version = "0.23.3" }
|
||||
solana-logger = { path = "../logger", version = "0.23.3" }
|
||||
solana-perf = { path = "../perf", version = "0.23.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.3" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4.0"
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-archiver"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -10,11 +10,11 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
console = "0.9.1"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.1" }
|
||||
solana-core = { path = "../core", version = "0.23.1" }
|
||||
solana-logger = { path = "../logger", version = "0.23.1" }
|
||||
solana-metrics = { path = "../metrics", version = "0.23.1" }
|
||||
solana-archiver-lib = { path = "../archiver-lib", version = "0.23.1" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.23.1" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.1" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.3" }
|
||||
solana-core = { path = "../core", version = "0.23.3" }
|
||||
solana-logger = { path = "../logger", version = "0.23.3" }
|
||||
solana-metrics = { path = "../metrics", version = "0.23.3" }
|
||||
solana-archiver-lib = { path = "../archiver-lib", version = "0.23.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.23.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.3" }
|
||||
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -10,11 +10,11 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
log = "0.4.6"
|
||||
rayon = "1.2.0"
|
||||
solana-core = { path = "../core", version = "0.23.1" }
|
||||
solana-ledger = { path = "../ledger", version = "0.23.1" }
|
||||
solana-logger = { path = "../logger", version = "0.23.1" }
|
||||
solana-runtime = { path = "../runtime", version = "0.23.1" }
|
||||
solana-measure = { path = "../measure", version = "0.23.1" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.1" }
|
||||
solana-core = { path = "../core", version = "0.23.3" }
|
||||
solana-ledger = { path = "../ledger", version = "0.23.3" }
|
||||
solana-logger = { path = "../logger", version = "0.23.3" }
|
||||
solana-runtime = { path = "../runtime", version = "0.23.3" }
|
||||
solana-measure = { path = "../measure", version = "0.23.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.3" }
|
||||
rand = "0.6.5"
|
||||
crossbeam-channel = "0.3"
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -23,19 +23,19 @@ serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.44"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.1" }
|
||||
solana-core = { path = "../core", version = "0.23.1" }
|
||||
solana-genesis = { path = "../genesis", version = "0.23.1" }
|
||||
solana-client = { path = "../client", version = "0.23.1" }
|
||||
solana-faucet = { path = "../faucet", version = "0.23.1" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "0.23.1" }
|
||||
solana-logger = { path = "../logger", version = "0.23.1" }
|
||||
solana-metrics = { path = "../metrics", version = "0.23.1" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.23.1" }
|
||||
solana-runtime = { path = "../runtime", version = "0.23.1" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.1" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.3" }
|
||||
solana-core = { path = "../core", version = "0.23.3" }
|
||||
solana-genesis = { path = "../genesis", version = "0.23.3" }
|
||||
solana-client = { path = "../client", version = "0.23.3" }
|
||||
solana-faucet = { path = "../faucet", version = "0.23.3" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "0.23.3" }
|
||||
solana-logger = { path = "../logger", version = "0.23.3" }
|
||||
solana-metrics = { path = "../metrics", version = "0.23.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.23.3" }
|
||||
solana-runtime = { path = "../runtime", version = "0.23.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.3" }
|
||||
untrusted = "0.7.0"
|
||||
ws = "0.9.1"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "0.23.1" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "0.23.3" }
|
||||
|
@@ -16,6 +16,7 @@ use std::sync::mpsc::channel;
|
||||
use std::time::Duration;
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_exchange_local_cluster() {
|
||||
solana_logger::setup();
|
||||
|
||||
|
@@ -2,14 +2,14 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.1" }
|
||||
solana-core = { path = "../core", version = "0.23.1" }
|
||||
solana-logger = { path = "../logger", version = "0.23.1" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.23.1" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.3" }
|
||||
solana-core = { path = "../core", version = "0.23.3" }
|
||||
solana-logger = { path = "../logger", version = "0.23.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.23.3" }
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -16,24 +16,24 @@ serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.44"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.1" }
|
||||
solana-core = { path = "../core", version = "0.23.1" }
|
||||
solana-genesis = { path = "../genesis", version = "0.23.1" }
|
||||
solana-client = { path = "../client", version = "0.23.1" }
|
||||
solana-faucet = { path = "../faucet", version = "0.23.1" }
|
||||
solana-librapay = { path = "../programs/librapay", version = "0.23.1", optional = true }
|
||||
solana-logger = { path = "../logger", version = "0.23.1" }
|
||||
solana-metrics = { path = "../metrics", version = "0.23.1" }
|
||||
solana-measure = { path = "../measure", version = "0.23.1" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.23.1" }
|
||||
solana-runtime = { path = "../runtime", version = "0.23.1" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.1" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader", version = "0.23.1", optional = true }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.3" }
|
||||
solana-core = { path = "../core", version = "0.23.3" }
|
||||
solana-genesis = { path = "../genesis", version = "0.23.3" }
|
||||
solana-client = { path = "../client", version = "0.23.3" }
|
||||
solana-faucet = { path = "../faucet", version = "0.23.3" }
|
||||
solana-librapay = { path = "../programs/librapay", version = "0.23.3", optional = true }
|
||||
solana-logger = { path = "../logger", version = "0.23.3" }
|
||||
solana-metrics = { path = "../metrics", version = "0.23.3" }
|
||||
solana-measure = { path = "../measure", version = "0.23.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.23.3" }
|
||||
solana-runtime = { path = "../runtime", version = "0.23.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.3" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader", version = "0.23.3", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.3.2"
|
||||
serial_test_derive = "0.3.1"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "0.23.1" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "0.23.3" }
|
||||
|
||||
[features]
|
||||
move = ["solana-librapay", "solana-move-loader-program"]
|
||||
|
@@ -303,6 +303,9 @@ The result field will be an object with the following fields:
|
||||
* `fee: <u64>` - fee this transaction was charged, as u64 integer
|
||||
* `preBalances: <array>` - array of u64 account balances from before the transaction was processed
|
||||
* `postBalances: <array>` - array of u64 account balances after the transaction was processed
|
||||
* `rewards: <array>` - an array of JSON objects containing:
|
||||
* `pubkey: <string>` - The public key, as base-58 encoded string, of the account that received the reward
|
||||
* `lamports: <i64>`- number of reward lamports credited or debited by the account, as a i64
|
||||
|
||||
#### Example:
|
||||
|
||||
@@ -827,7 +830,7 @@ The result field will be a JSON object with the following fields:
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"solana-core": "0.23.1"},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"solana-core": "0.23.3"},"id":1}
|
||||
```
|
||||
|
||||
### getVoteAccounts
|
||||
|
@@ -177,7 +177,7 @@ $ solana send-timestamp <PUBKEY> <PROCESS_ID> --date 2018-12-24T23:59:00
|
||||
## Usage
|
||||
### solana-cli
|
||||
```text
|
||||
solana-cli 0.23.1 [channel=unknown commit=unknown]
|
||||
solana-cli 0.23.3 [channel=unknown commit=unknown]
|
||||
Blockchain, Rebuilt for Scale
|
||||
|
||||
USAGE:
|
||||
@@ -241,7 +241,6 @@ SUBCOMMANDS:
|
||||
stakes Show stake account information
|
||||
storage-account Show the contents of a storage account
|
||||
transaction-count Get current transaction count
|
||||
uptime Show the uptime of a validator, based on epoch voting history
|
||||
validator-info Publish/get Validator info on Solana
|
||||
validators Show summary information about the current validators
|
||||
vote-account Show the contents of a vote account
|
||||
@@ -254,7 +253,7 @@ SUBCOMMANDS:
|
||||
|
||||
#### solana-account
|
||||
```text
|
||||
solana-account
|
||||
solana-account
|
||||
Show the contents of an account
|
||||
|
||||
USAGE:
|
||||
@@ -283,7 +282,7 @@ ARGS:
|
||||
|
||||
#### solana-address
|
||||
```text
|
||||
solana-address
|
||||
solana-address
|
||||
Get your public key
|
||||
|
||||
USAGE:
|
||||
@@ -307,7 +306,7 @@ OPTIONS:
|
||||
|
||||
#### solana-airdrop
|
||||
```text
|
||||
solana-airdrop
|
||||
solana-airdrop
|
||||
Request lamports
|
||||
|
||||
USAGE:
|
||||
@@ -337,7 +336,7 @@ ARGS:
|
||||
|
||||
#### solana-authorize-nonce-account
|
||||
```text
|
||||
solana-authorize-nonce-account
|
||||
solana-authorize-nonce-account
|
||||
Assign account authority to a new entity
|
||||
|
||||
USAGE:
|
||||
@@ -373,7 +372,7 @@ ARGS:
|
||||
|
||||
#### solana-balance
|
||||
```text
|
||||
solana-balance
|
||||
solana-balance
|
||||
Get your balance
|
||||
|
||||
USAGE:
|
||||
@@ -401,7 +400,7 @@ ARGS:
|
||||
|
||||
#### solana-block-production
|
||||
```text
|
||||
solana-block-production
|
||||
solana-block-production
|
||||
Show information about block production
|
||||
|
||||
USAGE:
|
||||
@@ -428,7 +427,7 @@ OPTIONS:
|
||||
|
||||
#### solana-block-time
|
||||
```text
|
||||
solana-block-time
|
||||
solana-block-time
|
||||
Get estimated production time of a block
|
||||
|
||||
USAGE:
|
||||
@@ -455,7 +454,7 @@ ARGS:
|
||||
|
||||
#### solana-cancel
|
||||
```text
|
||||
solana-cancel
|
||||
solana-cancel
|
||||
Cancel a transfer
|
||||
|
||||
USAGE:
|
||||
@@ -482,7 +481,7 @@ ARGS:
|
||||
|
||||
#### solana-catchup
|
||||
```text
|
||||
solana-catchup
|
||||
solana-catchup
|
||||
Wait for a validator to catch up to the cluster
|
||||
|
||||
USAGE:
|
||||
@@ -509,7 +508,7 @@ ARGS:
|
||||
|
||||
#### solana-claim-storage-reward
|
||||
```text
|
||||
solana-claim-storage-reward
|
||||
solana-claim-storage-reward
|
||||
Redeem storage reward credits
|
||||
|
||||
USAGE:
|
||||
@@ -537,7 +536,7 @@ ARGS:
|
||||
|
||||
#### solana-cluster-version
|
||||
```text
|
||||
solana-cluster-version
|
||||
solana-cluster-version
|
||||
Get the version of the cluster entrypoint
|
||||
|
||||
USAGE:
|
||||
@@ -561,7 +560,7 @@ OPTIONS:
|
||||
|
||||
#### solana-config
|
||||
```text
|
||||
solana-config
|
||||
solana-config
|
||||
Solana command-line tool configuration settings
|
||||
|
||||
USAGE:
|
||||
@@ -590,7 +589,7 @@ SUBCOMMANDS:
|
||||
|
||||
#### solana-confirm
|
||||
```text
|
||||
solana-confirm
|
||||
solana-confirm
|
||||
Confirm transaction by signature
|
||||
|
||||
USAGE:
|
||||
@@ -617,7 +616,7 @@ ARGS:
|
||||
|
||||
#### solana-create-address-with-seed
|
||||
```text
|
||||
solana-create-address-with-seed
|
||||
solana-create-address-with-seed
|
||||
Generate a derived account address with a seed
|
||||
|
||||
USAGE:
|
||||
@@ -641,13 +640,13 @@ OPTIONS:
|
||||
|
||||
ARGS:
|
||||
<SEED_STRING> The seed. Must not take more than 32 bytes to encode as utf-8
|
||||
<PROGRAM_ID> The program_id that the address will ultimately be used for,
|
||||
<PROGRAM_ID> The program_id that the address will ultimately be used for,
|
||||
or one of STAKE, VOTE, and STORAGE keywords
|
||||
```
|
||||
|
||||
#### solana-create-archiver-storage-account
|
||||
```text
|
||||
solana-create-archiver-storage-account
|
||||
solana-create-archiver-storage-account
|
||||
Create an archiver storage account
|
||||
|
||||
USAGE:
|
||||
@@ -669,13 +668,13 @@ OPTIONS:
|
||||
-k, --keypair <PATH> /path/to/id.json
|
||||
|
||||
ARGS:
|
||||
<STORAGE ACCOUNT OWNER PUBKEY>
|
||||
<STORAGE ACCOUNT>
|
||||
<STORAGE ACCOUNT OWNER PUBKEY>
|
||||
<STORAGE ACCOUNT>
|
||||
```
|
||||
|
||||
#### solana-create-nonce-account
|
||||
```text
|
||||
solana-create-nonce-account
|
||||
solana-create-nonce-account
|
||||
Create a nonce account
|
||||
|
||||
USAGE:
|
||||
@@ -705,7 +704,7 @@ ARGS:
|
||||
|
||||
#### solana-create-stake-account
|
||||
```text
|
||||
solana-create-stake-account
|
||||
solana-create-stake-account
|
||||
Create a stake account
|
||||
|
||||
USAGE:
|
||||
@@ -741,7 +740,7 @@ ARGS:
|
||||
|
||||
#### solana-create-validator-storage-account
|
||||
```text
|
||||
solana-create-validator-storage-account
|
||||
solana-create-validator-storage-account
|
||||
Create a validator storage account
|
||||
|
||||
USAGE:
|
||||
@@ -763,13 +762,13 @@ OPTIONS:
|
||||
-k, --keypair <PATH> /path/to/id.json
|
||||
|
||||
ARGS:
|
||||
<STORAGE ACCOUNT OWNER PUBKEY>
|
||||
<STORAGE ACCOUNT>
|
||||
<STORAGE ACCOUNT OWNER PUBKEY>
|
||||
<STORAGE ACCOUNT>
|
||||
```
|
||||
|
||||
#### solana-create-vote-account
|
||||
```text
|
||||
solana-create-vote-account
|
||||
solana-create-vote-account
|
||||
Create a vote account
|
||||
|
||||
USAGE:
|
||||
@@ -802,7 +801,7 @@ ARGS:
|
||||
|
||||
#### solana-deactivate-stake
|
||||
```text
|
||||
solana-deactivate-stake
|
||||
solana-deactivate-stake
|
||||
Deactivate the delegated stake from the stake account
|
||||
|
||||
USAGE:
|
||||
@@ -827,9 +826,9 @@ OPTIONS:
|
||||
-u, --url <URL> JSON RPC URL for the solana cluster
|
||||
-k, --keypair <PATH> /path/to/id.json
|
||||
--nonce <PUBKEY>
|
||||
Provide the nonce account to use when creating a nonced
|
||||
transaction. Nonced transactions are useful when a transaction
|
||||
requires a lengthy signing process. Learn more about nonced
|
||||
Provide the nonce account to use when creating a nonced
|
||||
transaction. Nonced transactions are useful when a transaction
|
||||
requires a lengthy signing process. Learn more about nonced
|
||||
transactions at https://docs.solana.com/offline-signing/durable-nonce
|
||||
--nonce-authority <KEYPAIR or PUBKEY>
|
||||
Provide the nonce authority keypair to use when signing a nonced transaction
|
||||
@@ -843,7 +842,7 @@ ARGS:
|
||||
|
||||
#### solana-delegate-stake
|
||||
```text
|
||||
solana-delegate-stake
|
||||
solana-delegate-stake
|
||||
Delegate stake to a vote account
|
||||
|
||||
USAGE:
|
||||
@@ -868,9 +867,9 @@ OPTIONS:
|
||||
-u, --url <URL> JSON RPC URL for the solana cluster
|
||||
-k, --keypair <PATH> /path/to/id.json
|
||||
--nonce <PUBKEY>
|
||||
Provide the nonce account to use when creating a nonced
|
||||
transaction. Nonced transactions are useful when a transaction
|
||||
requires a lengthy signing process. Learn more about nonced
|
||||
Provide the nonce account to use when creating a nonced
|
||||
transaction. Nonced transactions are useful when a transaction
|
||||
requires a lengthy signing process. Learn more about nonced
|
||||
transactions at https://docs.solana.com/offline-signing/durable-nonce
|
||||
--nonce-authority <KEYPAIR or PUBKEY>
|
||||
Provide the nonce authority keypair to use when signing a nonced transaction
|
||||
@@ -885,7 +884,7 @@ ARGS:
|
||||
|
||||
#### solana-deploy
|
||||
```text
|
||||
solana-deploy
|
||||
solana-deploy
|
||||
Deploy a program
|
||||
|
||||
USAGE:
|
||||
@@ -912,7 +911,7 @@ ARGS:
|
||||
|
||||
#### solana-epoch-info
|
||||
```text
|
||||
solana-epoch-info
|
||||
solana-epoch-info
|
||||
Get information about the current epoch
|
||||
|
||||
USAGE:
|
||||
@@ -937,7 +936,7 @@ OPTIONS:
|
||||
|
||||
#### solana-fees
|
||||
```text
|
||||
solana-fees
|
||||
solana-fees
|
||||
Display current cluster fees
|
||||
|
||||
USAGE:
|
||||
@@ -961,7 +960,7 @@ OPTIONS:
|
||||
|
||||
#### solana-genesis-hash
|
||||
```text
|
||||
solana-genesis-hash
|
||||
solana-genesis-hash
|
||||
Get the genesis hash
|
||||
|
||||
USAGE:
|
||||
@@ -985,7 +984,7 @@ OPTIONS:
|
||||
|
||||
#### solana-gossip
|
||||
```text
|
||||
solana-gossip
|
||||
solana-gossip
|
||||
Show the current gossip network nodes
|
||||
|
||||
USAGE:
|
||||
@@ -1009,7 +1008,7 @@ OPTIONS:
|
||||
|
||||
#### solana-help
|
||||
```text
|
||||
solana-help
|
||||
solana-help
|
||||
Prints this message or the help of the given subcommand(s)
|
||||
|
||||
USAGE:
|
||||
@@ -1021,7 +1020,7 @@ ARGS:
|
||||
|
||||
#### solana-new-nonce
|
||||
```text
|
||||
solana-new-nonce
|
||||
solana-new-nonce
|
||||
Generate a new nonce, rendering the existing nonce useless
|
||||
|
||||
USAGE:
|
||||
@@ -1053,7 +1052,7 @@ ARGS:
|
||||
|
||||
#### solana-nonce
|
||||
```text
|
||||
solana-nonce
|
||||
solana-nonce
|
||||
Get the current nonce value
|
||||
|
||||
USAGE:
|
||||
@@ -1080,7 +1079,7 @@ ARGS:
|
||||
|
||||
#### solana-nonce-account
|
||||
```text
|
||||
solana-nonce-account
|
||||
solana-nonce-account
|
||||
Show the contents of a nonce account
|
||||
|
||||
USAGE:
|
||||
@@ -1108,14 +1107,14 @@ ARGS:
|
||||
|
||||
#### solana-pay
|
||||
```text
|
||||
solana-pay
|
||||
solana-pay
|
||||
Send a payment
|
||||
|
||||
USAGE:
|
||||
solana pay [FLAGS] [OPTIONS] <TO PUBKEY> <AMOUNT> [--] [UNIT]
|
||||
|
||||
FLAGS:
|
||||
--cancelable
|
||||
--cancelable
|
||||
-h, --help Prints help information
|
||||
--sign-only Sign the transaction offline
|
||||
--skip-seed-phrase-validation Skip validation of seed phrases. Use this if your phrase does not use the BIP39
|
||||
@@ -1134,9 +1133,9 @@ OPTIONS:
|
||||
-u, --url <URL> JSON RPC URL for the solana cluster
|
||||
-k, --keypair <PATH> /path/to/id.json
|
||||
--nonce <PUBKEY>
|
||||
Provide the nonce account to use when creating a nonced
|
||||
transaction. Nonced transactions are useful when a transaction
|
||||
requires a lengthy signing process. Learn more about nonced
|
||||
Provide the nonce account to use when creating a nonced
|
||||
transaction. Nonced transactions are useful when a transaction
|
||||
requires a lengthy signing process. Learn more about nonced
|
||||
transactions at https://docs.solana.com/offline-signing/durable-nonce
|
||||
--nonce-authority <KEYPAIR or PUBKEY>
|
||||
Provide the nonce authority keypair to use when signing a nonced transaction
|
||||
@@ -1154,7 +1153,7 @@ ARGS:
|
||||
|
||||
#### solana-ping
|
||||
```text
|
||||
solana-ping
|
||||
solana-ping
|
||||
Submit transactions sequentially
|
||||
|
||||
USAGE:
|
||||
@@ -1183,7 +1182,7 @@ OPTIONS:
|
||||
|
||||
#### solana-send-signature
|
||||
```text
|
||||
solana-send-signature
|
||||
solana-send-signature
|
||||
Send a signature to authorize a transfer
|
||||
|
||||
USAGE:
|
||||
@@ -1211,7 +1210,7 @@ ARGS:
|
||||
|
||||
#### solana-send-timestamp
|
||||
```text
|
||||
solana-send-timestamp
|
||||
solana-send-timestamp
|
||||
Send a timestamp to unlock a transfer
|
||||
|
||||
USAGE:
|
||||
@@ -1240,7 +1239,7 @@ ARGS:
|
||||
|
||||
#### solana-show-stake-account
|
||||
```text
|
||||
solana-show-stake-account
|
||||
solana-show-stake-account
|
||||
Show the contents of a stake account
|
||||
|
||||
USAGE:
|
||||
@@ -1268,7 +1267,7 @@ ARGS:
|
||||
|
||||
#### solana-slot
|
||||
```text
|
||||
solana-slot
|
||||
solana-slot
|
||||
Get current slot
|
||||
|
||||
USAGE:
|
||||
@@ -1293,7 +1292,7 @@ OPTIONS:
|
||||
|
||||
#### solana-stake-authorize-staker
|
||||
```text
|
||||
solana-stake-authorize-staker
|
||||
solana-stake-authorize-staker
|
||||
Authorize a new stake signing keypair for the given stake account
|
||||
|
||||
USAGE:
|
||||
@@ -1318,9 +1317,9 @@ OPTIONS:
|
||||
-u, --url <URL> JSON RPC URL for the solana cluster
|
||||
-k, --keypair <PATH> /path/to/id.json
|
||||
--nonce <PUBKEY>
|
||||
Provide the nonce account to use when creating a nonced
|
||||
transaction. Nonced transactions are useful when a transaction
|
||||
requires a lengthy signing process. Learn more about nonced
|
||||
Provide the nonce account to use when creating a nonced
|
||||
transaction. Nonced transactions are useful when a transaction
|
||||
requires a lengthy signing process. Learn more about nonced
|
||||
transactions at https://docs.solana.com/offline-signing/durable-nonce
|
||||
--nonce-authority <KEYPAIR or PUBKEY>
|
||||
Provide the nonce authority keypair to use when signing a nonced transaction
|
||||
@@ -1335,7 +1334,7 @@ ARGS:
|
||||
|
||||
#### solana-stake-authorize-withdrawer
|
||||
```text
|
||||
solana-stake-authorize-withdrawer
|
||||
solana-stake-authorize-withdrawer
|
||||
Authorize a new withdraw signing keypair for the given stake account
|
||||
|
||||
USAGE:
|
||||
@@ -1360,9 +1359,9 @@ OPTIONS:
|
||||
-u, --url <URL> JSON RPC URL for the solana cluster
|
||||
-k, --keypair <PATH> /path/to/id.json
|
||||
--nonce <PUBKEY>
|
||||
Provide the nonce account to use when creating a nonced
|
||||
transaction. Nonced transactions are useful when a transaction
|
||||
requires a lengthy signing process. Learn more about nonced
|
||||
Provide the nonce account to use when creating a nonced
|
||||
transaction. Nonced transactions are useful when a transaction
|
||||
requires a lengthy signing process. Learn more about nonced
|
||||
transactions at https://docs.solana.com/offline-signing/durable-nonce
|
||||
--nonce-authority <KEYPAIR or PUBKEY>
|
||||
Provide the nonce authority keypair to use when signing a nonced transaction
|
||||
@@ -1377,7 +1376,7 @@ ARGS:
|
||||
|
||||
#### solana-stake-history
|
||||
```text
|
||||
solana-stake-history
|
||||
solana-stake-history
|
||||
Show the stake history
|
||||
|
||||
USAGE:
|
||||
@@ -1402,7 +1401,7 @@ OPTIONS:
|
||||
|
||||
#### solana-stakes
|
||||
```text
|
||||
solana-stakes
|
||||
solana-stakes
|
||||
Show stake account information
|
||||
|
||||
USAGE:
|
||||
@@ -1430,7 +1429,7 @@ ARGS:
|
||||
|
||||
#### solana-storage-account
|
||||
```text
|
||||
solana-storage-account
|
||||
solana-storage-account
|
||||
Show the contents of a storage account
|
||||
|
||||
USAGE:
|
||||
@@ -1457,7 +1456,7 @@ ARGS:
|
||||
|
||||
#### solana-transaction-count
|
||||
```text
|
||||
solana-transaction-count
|
||||
solana-transaction-count
|
||||
Get current transaction count
|
||||
|
||||
USAGE:
|
||||
@@ -1480,38 +1479,9 @@ OPTIONS:
|
||||
-k, --keypair <PATH> /path/to/id.json
|
||||
```
|
||||
|
||||
#### solana-uptime
|
||||
```text
|
||||
solana-uptime
|
||||
Show the uptime of a validator, based on epoch voting history
|
||||
|
||||
USAGE:
|
||||
solana uptime [FLAGS] [OPTIONS] <VOTE ACCOUNT PUBKEY>
|
||||
|
||||
FLAGS:
|
||||
--aggregate Aggregate uptime data across span
|
||||
-h, --help Prints help information
|
||||
--skip-seed-phrase-validation Skip validation of seed phrases. Use this if your phrase does not use the BIP39
|
||||
official English word list
|
||||
-V, --version Prints version information
|
||||
-v, --verbose Show extra information header
|
||||
|
||||
OPTIONS:
|
||||
--ask-seed-phrase <KEYPAIR NAME> Recover a keypair using a seed phrase and optional passphrase [possible
|
||||
values: keypair]
|
||||
-C, --config <PATH> Configuration file to use [default:
|
||||
~/.config/solana/cli/config.yml]
|
||||
-u, --url <URL> JSON RPC URL for the solana cluster
|
||||
-k, --keypair <PATH> /path/to/id.json
|
||||
--span <NUM OF EPOCHS> Number of recent epochs to examine
|
||||
|
||||
ARGS:
|
||||
<VOTE ACCOUNT PUBKEY> Vote account pubkey
|
||||
```
|
||||
|
||||
#### solana-validator-info
|
||||
```text
|
||||
solana-validator-info
|
||||
solana-validator-info
|
||||
Publish/get Validator info on Solana
|
||||
|
||||
USAGE:
|
||||
@@ -1540,7 +1510,7 @@ SUBCOMMANDS:
|
||||
|
||||
#### solana-validators
|
||||
```text
|
||||
solana-validators
|
||||
solana-validators
|
||||
Show summary information about the current validators
|
||||
|
||||
USAGE:
|
||||
@@ -1565,7 +1535,7 @@ OPTIONS:
|
||||
|
||||
#### solana-vote-account
|
||||
```text
|
||||
solana-vote-account
|
||||
solana-vote-account
|
||||
Show the contents of a vote account
|
||||
|
||||
USAGE:
|
||||
@@ -1593,7 +1563,7 @@ ARGS:
|
||||
|
||||
#### solana-vote-authorize-voter
|
||||
```text
|
||||
solana-vote-authorize-voter
|
||||
solana-vote-authorize-voter
|
||||
Authorize a new vote signing keypair for the given vote account
|
||||
|
||||
USAGE:
|
||||
@@ -1621,7 +1591,7 @@ ARGS:
|
||||
|
||||
#### solana-vote-authorize-withdrawer
|
||||
```text
|
||||
solana-vote-authorize-withdrawer
|
||||
solana-vote-authorize-withdrawer
|
||||
Authorize a new withdraw signing keypair for the given vote account
|
||||
|
||||
USAGE:
|
||||
@@ -1649,7 +1619,7 @@ ARGS:
|
||||
|
||||
#### solana-vote-update-validator
|
||||
```text
|
||||
solana-vote-update-validator
|
||||
solana-vote-update-validator
|
||||
Update the vote account's validator identity
|
||||
|
||||
USAGE:
|
||||
@@ -1678,7 +1648,7 @@ ARGS:
|
||||
|
||||
#### solana-withdraw-from-nonce-account
|
||||
```text
|
||||
solana-withdraw-from-nonce-account
|
||||
solana-withdraw-from-nonce-account
|
||||
Withdraw lamports from the nonce account
|
||||
|
||||
USAGE:
|
||||
@@ -1713,7 +1683,7 @@ ARGS:
|
||||
|
||||
#### solana-withdraw-stake
|
||||
```text
|
||||
solana-withdraw-stake
|
||||
solana-withdraw-stake
|
||||
Withdraw the unstaked lamports from the stake account
|
||||
|
||||
USAGE:
|
||||
|
@@ -2,7 +2,7 @@
|
||||
Follow this guide to setup Solana's key generation tool called `solana-keygen`
|
||||
|
||||
{% hint style="warn" %}
|
||||
After installation, ensure your version is `0.21.1` or higher by running `solana-keygen -V`
|
||||
After installation, ensure your version is `0.23.1` or higher by running `solana-keygen -V`
|
||||
{% endhint %}
|
||||
|
||||
## Download
|
||||
|
@@ -1,14 +1,14 @@
|
||||
# Installing the Validator Software
|
||||
|
||||
Install the Solana release
|
||||
[v0.21.0](https://github.com/solana-labs/solana/releases/tag/v0.21.0) on your
|
||||
[v0.23.1](https://github.com/solana-labs/solana/releases/tag/v0.23.1) on your
|
||||
machine by running:
|
||||
|
||||
```bash
|
||||
curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.21.0/install/solana-install-init.sh | sh -s - 0.21.0
|
||||
curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.23.1/install/solana-install-init.sh | sh -s - 0.23.1
|
||||
```
|
||||
|
||||
If you are connecting to a different testnet, you can replace `0.21.0` with the
|
||||
If you are connecting to a different testnet, you can replace `0.23.1` with the
|
||||
release tag matching the software version of your desired testnet, or replace it
|
||||
with the named channel `stable`, `beta`, or `edge`.
|
||||
|
||||
@@ -16,11 +16,11 @@ The following output indicates a successful update:
|
||||
|
||||
```text
|
||||
looking for latest release
|
||||
downloading v0.21.0 installer
|
||||
downloading v0.23.1 installer
|
||||
Configuration: /home/solana/.config/solana/install/config.yml
|
||||
Active release directory: /home/solana/.local/share/solana/install/active_release
|
||||
* Release version: 0.21.0
|
||||
* Release URL: https://github.com/solana-labs/solana/releases/download/v0.21.0/solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
||||
* Release version: 0.23.1
|
||||
* Release URL: https://github.com/solana-labs/solana/releases/download/v0.23.1/solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
||||
Update successful
|
||||
```
|
||||
|
||||
|
@@ -83,7 +83,6 @@ To monitor your validator during its warmup period:
|
||||
|
||||
* View your vote account:`solana vote-account ~/validator-vote-keypair.json` This displays the current state of all the votes the validator has submitted to the network.
|
||||
* View your stake account, the delegation preference and details of your stake:`solana stake-account ~/validator-stake-keypair.json`
|
||||
* `solana uptime ~/validator-vote-keypair.json` will display the voting history \(aka, uptime\) of your validator over recent Epochs
|
||||
* `solana validators` displays the current active stake of all validators, including yours
|
||||
* `solana stake-history ` shows the history of stake warming up and cooling down over recent epochs
|
||||
* Look for log messages on your validator indicating your next leader slot: `[2019-09-27T20:16:00.319721164Z INFO solana_core::replay_stage] <VALIDATOR_IDENTITY_PUBKEY> voted and reset PoH at tick height ####. My next leader slot is ####`
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha-cuda"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
description = "Solana Chacha Cuda APIs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -10,12 +10,12 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "0.23.1" }
|
||||
solana-chacha = { path = "../chacha", version = "0.23.1" }
|
||||
solana-ledger = { path = "../ledger", version = "0.23.1" }
|
||||
solana-logger = { path = "../logger", version = "0.23.1" }
|
||||
solana-perf = { path = "../perf", version = "0.23.1" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.1" }
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "0.23.3" }
|
||||
solana-chacha = { path = "../chacha", version = "0.23.3" }
|
||||
solana-ledger = { path = "../ledger", version = "0.23.3" }
|
||||
solana-logger = { path = "../logger", version = "0.23.3" }
|
||||
solana-perf = { path = "../perf", version = "0.23.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.3" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex-literal = "0.2.1"
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha-sys"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
description = "Solana chacha-sys"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
description = "Solana Chacha APIs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -12,11 +12,11 @@ edition = "2018"
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
rand_chacha = "0.1.1"
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "0.23.1" }
|
||||
solana-ledger = { path = "../ledger", version = "0.23.1" }
|
||||
solana-logger = { path = "../logger", version = "0.23.1" }
|
||||
solana-perf = { path = "../perf", version = "0.23.1" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.1" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "0.23.3" }
|
||||
solana-ledger = { path = "../ledger", version = "0.23.3" }
|
||||
solana-logger = { path = "../logger", version = "0.23.3" }
|
||||
solana-perf = { path = "../perf", version = "0.23.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.3" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex-literal = "0.2.1"
|
||||
|
@@ -1,4 +1,4 @@
|
||||
FROM solanalabs/rust:1.40.0
|
||||
FROM solanalabs/rust:1.41.0
|
||||
ARG date
|
||||
|
||||
RUN set -x \
|
||||
|
@@ -1,6 +1,6 @@
|
||||
# Note: when the rust version is changed also modify
|
||||
# ci/rust-version.sh to pick up the new image tag
|
||||
FROM rust:1.40.0
|
||||
FROM rust:1.41.0
|
||||
|
||||
# Add Google Protocol Buffers for Libra's metrics library.
|
||||
ENV PROTOC_VERSION 3.8.0
|
||||
@@ -17,6 +17,7 @@ RUN set -x \
|
||||
clang-7 \
|
||||
cmake \
|
||||
lcov \
|
||||
libudev-dev \
|
||||
libclang-common-7-dev \
|
||||
mscgen \
|
||||
net-tools \
|
||||
|
@@ -16,13 +16,13 @@
|
||||
if [[ -n $RUST_STABLE_VERSION ]]; then
|
||||
stable_version="$RUST_STABLE_VERSION"
|
||||
else
|
||||
stable_version=1.40.0
|
||||
stable_version=1.41.0
|
||||
fi
|
||||
|
||||
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
|
||||
nightly_version="$RUST_NIGHTLY_VERSION"
|
||||
else
|
||||
nightly_version=2019-12-19
|
||||
nightly_version=2020-02-06
|
||||
fi
|
||||
|
||||
|
||||
|
@@ -389,7 +389,7 @@ deploy() {
|
||||
(
|
||||
echo "--- net.sh update"
|
||||
set -x
|
||||
time net/net.sh update -t "$CHANNEL_OR_TAG" --platform linux --platform osx --platform windows
|
||||
time net/net.sh update -t "$CHANNEL_OR_TAG" --platform linux --platform osx #--platform windows
|
||||
)
|
||||
;;
|
||||
testnet-perf)
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -12,7 +12,8 @@ edition = "2018"
|
||||
clap = "2.33.0"
|
||||
rpassword = "4.0"
|
||||
semver = "0.9.0"
|
||||
solana-sdk = { path = "../sdk", version = "0.23.1" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "0.23.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.3" }
|
||||
tiny-bip39 = "0.7.0"
|
||||
url = "2.1.0"
|
||||
chrono = "0.4"
|
||||
|
@@ -1,6 +1,7 @@
|
||||
use crate::keypair::{keypair_from_seed_phrase, ASK_KEYWORD, SKIP_SEED_PHRASE_VALIDATION_ARG};
|
||||
use chrono::DateTime;
|
||||
use clap::ArgMatches;
|
||||
use solana_remote_wallet::remote_wallet::DerivationPath;
|
||||
use solana_sdk::{
|
||||
clock::UnixTimestamp,
|
||||
native_token::sol_to_lamports,
|
||||
@@ -100,6 +101,16 @@ pub fn amount_of(matches: &ArgMatches<'_>, name: &str, unit: &str) -> Option<u64
|
||||
}
|
||||
}
|
||||
|
||||
pub fn derivation_of(matches: &ArgMatches<'_>, name: &str) -> Option<DerivationPath> {
|
||||
matches.value_of(name).map(|derivation_str| {
|
||||
let derivation_str = derivation_str.replace("'", "");
|
||||
let mut parts = derivation_str.split('/');
|
||||
let account = parts.next().unwrap().parse::<u16>().unwrap();
|
||||
let change = parts.next().map(|change| change.parse::<u16>().unwrap());
|
||||
DerivationPath { account, change }
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -277,4 +288,40 @@ mod tests {
|
||||
.get_matches_from(vec!["test", "--single", "1.5", "--unit", "lamports"]);
|
||||
assert_eq!(amount_of(&matches, "single", "unit"), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_derivation_of() {
|
||||
let matches = app()
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "--single", "2/3"]);
|
||||
assert_eq!(
|
||||
derivation_of(&matches, "single"),
|
||||
Some(DerivationPath {
|
||||
account: 2,
|
||||
change: Some(3)
|
||||
})
|
||||
);
|
||||
assert_eq!(derivation_of(&matches, "another"), None);
|
||||
let matches = app()
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "--single", "2"]);
|
||||
assert_eq!(
|
||||
derivation_of(&matches, "single"),
|
||||
Some(DerivationPath {
|
||||
account: 2,
|
||||
change: None
|
||||
})
|
||||
);
|
||||
assert_eq!(derivation_of(&matches, "another"), None);
|
||||
let matches = app()
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "--single", "2'/3'"]);
|
||||
assert_eq!(
|
||||
derivation_of(&matches, "single"),
|
||||
Some(DerivationPath {
|
||||
account: 2,
|
||||
change: Some(3)
|
||||
})
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@@ -1,8 +1,10 @@
|
||||
use crate::keypair::ASK_KEYWORD;
|
||||
use chrono::DateTime;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{read_keypair_file, Signature};
|
||||
use solana_sdk::{
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::{read_keypair_file, Signature},
|
||||
};
|
||||
use std::str::FromStr;
|
||||
|
||||
// Return an error if a pubkey cannot be parsed.
|
||||
@@ -141,3 +143,47 @@ pub fn is_rfc3339_datetime(value: String) -> Result<(), String> {
|
||||
.map(|_| ())
|
||||
.map_err(|e| format!("{:?}", e))
|
||||
}
|
||||
|
||||
pub fn is_derivation(value: String) -> Result<(), String> {
|
||||
let value = value.replace("'", "");
|
||||
let mut parts = value.split('/');
|
||||
let account = parts.next().unwrap();
|
||||
account
|
||||
.parse::<u16>()
|
||||
.map_err(|e| {
|
||||
format!(
|
||||
"Unable to parse derivation, provided: {}, err: {:?}",
|
||||
account, e
|
||||
)
|
||||
})
|
||||
.and_then(|_| {
|
||||
if let Some(change) = parts.next() {
|
||||
change.parse::<u16>().map_err(|e| {
|
||||
format!(
|
||||
"Unable to parse derivation, provided: {}, err: {:?}",
|
||||
change, e
|
||||
)
|
||||
})
|
||||
} else {
|
||||
Ok(0)
|
||||
}
|
||||
})
|
||||
.map(|_| ())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_is_derivation() {
|
||||
assert_eq!(is_derivation("2".to_string()), Ok(()));
|
||||
assert_eq!(is_derivation("0".to_string()), Ok(()));
|
||||
assert_eq!(is_derivation("0/2".to_string()), Ok(()));
|
||||
assert_eq!(is_derivation("0'/2'".to_string()), Ok(()));
|
||||
assert!(is_derivation("a".to_string()).is_err());
|
||||
assert!(is_derivation("65537".to_string()).is_err());
|
||||
assert!(is_derivation("a/b".to_string()).is_err());
|
||||
assert!(is_derivation("0/65537".to_string()).is_err());
|
||||
}
|
||||
}
|
||||
|
@@ -32,8 +32,8 @@ pub const SKIP_SEED_PHRASE_VALIDATION_ARG: ArgConstant<'static> = ArgConstant {
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Source {
|
||||
File,
|
||||
Generated,
|
||||
Path,
|
||||
SeedPhrase,
|
||||
}
|
||||
|
||||
@@ -131,7 +131,12 @@ pub fn keypair_input(
|
||||
keypair_from_seed_phrase(keypair_name, skip_validation, true)
|
||||
.map(|keypair| KeypairWithSource::new(keypair, Source::SeedPhrase))
|
||||
} else if let Some(keypair_file) = matches.value_of(keypair_match_name) {
|
||||
read_keypair_file(keypair_file).map(|keypair| KeypairWithSource::new(keypair, Source::File))
|
||||
if keypair_file.starts_with("usb://") {
|
||||
Ok(KeypairWithSource::new(Keypair::new(), Source::Path))
|
||||
} else {
|
||||
read_keypair_file(keypair_file)
|
||||
.map(|keypair| KeypairWithSource::new(keypair, Source::Path))
|
||||
}
|
||||
} else {
|
||||
Ok(KeypairWithSource::new(Keypair::new(), Source::Generated))
|
||||
}
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli-config"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -26,26 +26,27 @@ reqwest = { version = "0.10.1", default-features = false, features = ["blocking"
|
||||
serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.44"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.23.1" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.1" }
|
||||
solana-cli-config = { path = "../cli-config", version = "0.23.1" }
|
||||
solana-client = { path = "../client", version = "0.23.1" }
|
||||
solana-config-program = { path = "../programs/config", version = "0.23.1" }
|
||||
solana-faucet = { path = "../faucet", version = "0.23.1" }
|
||||
solana-logger = { path = "../logger", version = "0.23.1" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.23.1" }
|
||||
solana-runtime = { path = "../runtime", version = "0.23.1" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.1" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.23.1" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.23.1" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.23.1" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.23.1" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.23.3" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.3" }
|
||||
solana-cli-config = { path = "../cli-config", version = "0.23.3" }
|
||||
solana-client = { path = "../client", version = "0.23.3" }
|
||||
solana-config-program = { path = "../programs/config", version = "0.23.3" }
|
||||
solana-faucet = { path = "../faucet", version = "0.23.3" }
|
||||
solana-logger = { path = "../logger", version = "0.23.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.23.3" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "0.23.3" }
|
||||
solana-runtime = { path = "../runtime", version = "0.23.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.3" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.23.3" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.23.3" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.23.3" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.23.3" }
|
||||
titlecase = "1.1.0"
|
||||
url = "2.1.1"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "0.23.1" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.23.1" }
|
||||
solana-core = { path = "../core", version = "0.23.3" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.23.3" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
|
499
cli/src/cli.rs
499
cli/src/cli.rs
@@ -14,12 +14,16 @@ use log::*;
|
||||
use num_traits::FromPrimitive;
|
||||
use serde_json::{self, json, Value};
|
||||
use solana_budget_program::budget_instruction::{self, BudgetError};
|
||||
use solana_clap_utils::{input_parsers::*, input_validators::*};
|
||||
use solana_clap_utils::{input_parsers::*, input_validators::*, ArgConstant};
|
||||
use solana_client::{client_error::ClientError, rpc_client::RpcClient};
|
||||
#[cfg(not(test))]
|
||||
use solana_faucet::faucet::request_airdrop_transaction;
|
||||
#[cfg(test)]
|
||||
use solana_faucet::faucet_mock::request_airdrop_transaction;
|
||||
use solana_remote_wallet::{
|
||||
ledger::get_ledger_from_info,
|
||||
remote_wallet::{DerivationPath, RemoteWallet, RemoteWalletInfo},
|
||||
};
|
||||
use solana_sdk::{
|
||||
bpf_loader,
|
||||
clock::{Epoch, Slot},
|
||||
@@ -33,7 +37,7 @@ use solana_sdk::{
|
||||
native_token::lamports_to_sol,
|
||||
pubkey::Pubkey,
|
||||
signature::{keypair_from_seed, Keypair, KeypairUtil, Signature},
|
||||
system_instruction::{create_address_with_seed, SystemError, MAX_ADDRESS_SEED_LEN},
|
||||
system_instruction::{self, create_address_with_seed, SystemError, MAX_ADDRESS_SEED_LEN},
|
||||
system_transaction,
|
||||
transaction::{Transaction, TransactionError},
|
||||
};
|
||||
@@ -51,6 +55,23 @@ use std::{
|
||||
|
||||
const USERDATA_CHUNK_SIZE: usize = 229; // Keep program chunks under PACKET_DATA_SIZE
|
||||
|
||||
pub const FEE_PAYER_ARG: ArgConstant<'static> = ArgConstant {
|
||||
name: "fee_payer",
|
||||
long: "fee-payer",
|
||||
help: "Specify the fee-payer account. This may be a keypair file, the ASK keyword \n\
|
||||
or the pubkey of an offline signer, provided an appropriate --signer argument \n\
|
||||
is also passed. Defaults to the client keypair.",
|
||||
};
|
||||
|
||||
pub fn fee_payer_arg<'a, 'b>() -> Arg<'a, 'b> {
|
||||
Arg::with_name(FEE_PAYER_ARG.name)
|
||||
.long(FEE_PAYER_ARG.long)
|
||||
.takes_value(true)
|
||||
.value_name("KEYPAIR or PUBKEY")
|
||||
.validator(is_pubkey_or_keypair_or_ask_keyword)
|
||||
.help(FEE_PAYER_ARG.help)
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct KeypairEq(Keypair);
|
||||
|
||||
@@ -264,6 +285,7 @@ pub enum CliCommand {
|
||||
blockhash_query: BlockhashQuery,
|
||||
nonce_account: Option<Pubkey>,
|
||||
nonce_authority: Option<SigningAuthority>,
|
||||
fee_payer: Option<SigningAuthority>,
|
||||
},
|
||||
DelegateStake {
|
||||
stake_account_pubkey: Pubkey,
|
||||
@@ -275,6 +297,20 @@ pub enum CliCommand {
|
||||
blockhash_query: BlockhashQuery,
|
||||
nonce_account: Option<Pubkey>,
|
||||
nonce_authority: Option<SigningAuthority>,
|
||||
fee_payer: Option<SigningAuthority>,
|
||||
},
|
||||
SplitStake {
|
||||
stake_account_pubkey: Pubkey,
|
||||
stake_authority: Option<SigningAuthority>,
|
||||
sign_only: bool,
|
||||
signers: Option<Vec<(Pubkey, Signature)>>,
|
||||
blockhash_query: BlockhashQuery,
|
||||
nonce_account: Option<Pubkey>,
|
||||
nonce_authority: Option<SigningAuthority>,
|
||||
split_stake_account: KeypairEq,
|
||||
seed: Option<String>,
|
||||
lamports: u64,
|
||||
fee_payer: Option<SigningAuthority>,
|
||||
},
|
||||
ShowStakeHistory {
|
||||
use_lamports_unit: bool,
|
||||
@@ -293,6 +329,7 @@ pub enum CliCommand {
|
||||
blockhash_query: BlockhashQuery,
|
||||
nonce_account: Option<Pubkey>,
|
||||
nonce_authority: Option<SigningAuthority>,
|
||||
fee_payer: Option<SigningAuthority>,
|
||||
},
|
||||
WithdrawStake {
|
||||
stake_account_pubkey: Pubkey,
|
||||
@@ -331,11 +368,6 @@ pub enum CliCommand {
|
||||
pubkey: Pubkey,
|
||||
use_lamports_unit: bool,
|
||||
},
|
||||
Uptime {
|
||||
pubkey: Pubkey,
|
||||
aggregate: bool,
|
||||
span: Option<u64>,
|
||||
},
|
||||
VoteAuthorize {
|
||||
vote_account_pubkey: Pubkey,
|
||||
new_authorized_pubkey: Pubkey,
|
||||
@@ -367,7 +399,18 @@ pub enum CliCommand {
|
||||
use_lamports_unit: bool,
|
||||
},
|
||||
TimeElapsed(Pubkey, Pubkey, DateTime<Utc>), // TimeElapsed(to, process_id, timestamp)
|
||||
Witness(Pubkey, Pubkey), // Witness(to, process_id)
|
||||
Transfer {
|
||||
lamports: u64,
|
||||
to: Pubkey,
|
||||
from: Option<SigningAuthority>,
|
||||
sign_only: bool,
|
||||
signers: Option<Vec<(Pubkey, Signature)>>,
|
||||
blockhash_query: BlockhashQuery,
|
||||
nonce_account: Option<Pubkey>,
|
||||
nonce_authority: Option<SigningAuthority>,
|
||||
fee_payer: Option<SigningAuthority>,
|
||||
},
|
||||
Witness(Pubkey, Pubkey), // Witness(to, process_id)
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
@@ -409,6 +452,7 @@ pub struct CliConfig {
|
||||
pub json_rpc_url: String,
|
||||
pub keypair: Keypair,
|
||||
pub keypair_path: Option<String>,
|
||||
pub derivation_path: Option<DerivationPath>,
|
||||
pub rpc_client: Option<RpcClient>,
|
||||
pub verbose: bool,
|
||||
}
|
||||
@@ -423,6 +467,22 @@ impl CliConfig {
|
||||
pub fn default_json_rpc_url() -> String {
|
||||
"http://127.0.0.1:8899".to_string()
|
||||
}
|
||||
|
||||
pub(crate) fn pubkey(&self) -> Result<Pubkey, Box<dyn std::error::Error>> {
|
||||
if let Some(path) = &self.keypair_path {
|
||||
if path.starts_with("usb://") {
|
||||
let (remote_wallet_info, mut derivation_path) =
|
||||
RemoteWalletInfo::parse_path(path.to_string())?;
|
||||
if let Some(derivation) = &self.derivation_path {
|
||||
let derivation = derivation.clone();
|
||||
derivation_path = derivation;
|
||||
}
|
||||
let ledger = get_ledger_from_info(remote_wallet_info)?;
|
||||
return Ok(ledger.get_pubkey(&derivation_path)?);
|
||||
}
|
||||
}
|
||||
Ok(self.keypair.pubkey())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for CliConfig {
|
||||
@@ -435,6 +495,7 @@ impl Default for CliConfig {
|
||||
json_rpc_url: Self::default_json_rpc_url(),
|
||||
keypair: Keypair::new(),
|
||||
keypair_path: Some(Self::default_keypair_path()),
|
||||
derivation_path: None,
|
||||
rpc_client: None,
|
||||
verbose: false,
|
||||
}
|
||||
@@ -493,6 +554,7 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
|
||||
("delegate-stake", Some(matches)) => parse_stake_delegate_stake(matches),
|
||||
("withdraw-stake", Some(matches)) => parse_stake_withdraw_stake(matches),
|
||||
("deactivate-stake", Some(matches)) => parse_stake_deactivate_stake(matches),
|
||||
("split-stake", Some(matches)) => parse_split_stake(matches),
|
||||
("stake-authorize-staker", Some(matches)) => {
|
||||
parse_stake_authorize(matches, StakeAuthorize::Staker)
|
||||
}
|
||||
@@ -526,7 +588,6 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
|
||||
parse_vote_authorize(matches, VoteAuthorize::Withdrawer)
|
||||
}
|
||||
("vote-account", Some(matches)) => parse_vote_get_account_command(matches),
|
||||
("uptime", Some(matches)) => parse_vote_uptime_command(matches),
|
||||
// Wallet Commands
|
||||
("address", Some(_matches)) => Ok(CliCommandInfo {
|
||||
command: CliCommand::Address,
|
||||
@@ -677,6 +738,39 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
|
||||
require_keypair: true,
|
||||
})
|
||||
}
|
||||
("transfer", Some(matches)) => {
|
||||
let lamports = required_lamports_from(matches, "amount", "unit")?;
|
||||
let to = pubkey_of(&matches, "to").unwrap();
|
||||
let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
|
||||
let signers = pubkeys_sigs_of(&matches, SIGNER_ARG.name);
|
||||
let blockhash_query = BlockhashQuery::new_from_matches(matches);
|
||||
let nonce_account = pubkey_of(&matches, NONCE_ARG.name);
|
||||
let nonce_authority = SigningAuthority::new_from_matches(
|
||||
&matches,
|
||||
NONCE_AUTHORITY_ARG.name,
|
||||
signers.as_deref(),
|
||||
)?;
|
||||
let fee_payer = SigningAuthority::new_from_matches(
|
||||
&matches,
|
||||
FEE_PAYER_ARG.name,
|
||||
signers.as_deref(),
|
||||
)?;
|
||||
let from = SigningAuthority::new_from_matches(&matches, "from", signers.as_deref())?;
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::Transfer {
|
||||
lamports,
|
||||
to,
|
||||
from,
|
||||
sign_only,
|
||||
signers,
|
||||
blockhash_query,
|
||||
nonce_account,
|
||||
nonce_authority,
|
||||
fee_payer,
|
||||
},
|
||||
require_keypair: true,
|
||||
})
|
||||
}
|
||||
//
|
||||
("", None) => {
|
||||
eprintln!("{}", matches.usage());
|
||||
@@ -708,12 +802,11 @@ fn check_account_for_multiple_fees(
|
||||
) -> Result<(), Box<dyn error::Error>> {
|
||||
let balance = rpc_client.retry_get_balance(account_pubkey, 5)?;
|
||||
if let Some(lamports) = balance {
|
||||
if lamports
|
||||
>= messages
|
||||
.iter()
|
||||
.map(|message| fee_calculator.calculate_fee(message))
|
||||
.sum()
|
||||
{
|
||||
let fee = messages
|
||||
.iter()
|
||||
.map(|message| fee_calculator.calculate_fee(message))
|
||||
.sum();
|
||||
if lamports != 0 && lamports >= fee {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
@@ -814,7 +907,7 @@ fn process_create_address_with_seed(
|
||||
seed: &str,
|
||||
program_id: &Pubkey,
|
||||
) -> ProcessResult {
|
||||
let config_pubkey = config.keypair.pubkey();
|
||||
let config_pubkey = config.pubkey()?;
|
||||
let from_pubkey = from_pubkey.unwrap_or(&config_pubkey);
|
||||
let address = create_address_with_seed(from_pubkey, seed, program_id)?;
|
||||
Ok(address.to_string())
|
||||
@@ -827,12 +920,13 @@ fn process_airdrop(
|
||||
lamports: u64,
|
||||
use_lamports_unit: bool,
|
||||
) -> ProcessResult {
|
||||
let pubkey = config.pubkey()?;
|
||||
println!(
|
||||
"Requesting airdrop of {} from {}",
|
||||
build_balance_message(lamports, use_lamports_unit, true),
|
||||
faucet_addr
|
||||
);
|
||||
let previous_balance = match rpc_client.retry_get_balance(&config.keypair.pubkey(), 5)? {
|
||||
let previous_balance = match rpc_client.retry_get_balance(&pubkey, 5)? {
|
||||
Some(lamports) => lamports,
|
||||
None => {
|
||||
return Err(CliError::RpcRequestError(
|
||||
@@ -842,10 +936,10 @@ fn process_airdrop(
|
||||
}
|
||||
};
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, faucet_addr, &config.keypair.pubkey(), lamports)?;
|
||||
request_and_confirm_airdrop(&rpc_client, faucet_addr, &pubkey, lamports)?;
|
||||
|
||||
let current_balance = rpc_client
|
||||
.retry_get_balance(&config.keypair.pubkey(), 5)?
|
||||
.retry_get_balance(&pubkey, 5)?
|
||||
.unwrap_or(previous_balance);
|
||||
|
||||
Ok(build_balance_message(
|
||||
@@ -861,7 +955,7 @@ fn process_balance(
|
||||
pubkey: &Option<Pubkey>,
|
||||
use_lamports_unit: bool,
|
||||
) -> ProcessResult {
|
||||
let pubkey = pubkey.unwrap_or(config.keypair.pubkey());
|
||||
let pubkey = pubkey.unwrap_or(config.pubkey()?);
|
||||
let balance = rpc_client.retry_get_balance(&pubkey, 5)?;
|
||||
match balance {
|
||||
Some(lamports) => Ok(build_balance_message(lamports, use_lamports_unit, true)),
|
||||
@@ -1204,6 +1298,77 @@ fn process_time_elapsed(
|
||||
log_instruction_custom_error::<BudgetError>(result)
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn process_transfer(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
lamports: u64,
|
||||
to: &Pubkey,
|
||||
from: Option<&SigningAuthority>,
|
||||
sign_only: bool,
|
||||
signers: Option<&Vec<(Pubkey, Signature)>>,
|
||||
blockhash_query: &BlockhashQuery,
|
||||
nonce_account: Option<&Pubkey>,
|
||||
nonce_authority: Option<&SigningAuthority>,
|
||||
fee_payer: Option<&SigningAuthority>,
|
||||
) -> ProcessResult {
|
||||
let (from_pubkey, from) = from
|
||||
.map(|f| (f.pubkey(), f.keypair()))
|
||||
.unwrap_or((config.keypair.pubkey(), &config.keypair));
|
||||
|
||||
check_unique_pubkeys(
|
||||
(&from_pubkey, "cli keypair".to_string()),
|
||||
(to, "to".to_string()),
|
||||
)?;
|
||||
|
||||
let (recent_blockhash, fee_calculator) =
|
||||
blockhash_query.get_blockhash_fee_calculator(rpc_client)?;
|
||||
let ixs = vec![system_instruction::transfer(&from.pubkey(), to, lamports)];
|
||||
|
||||
let (nonce_authority_pubkey, nonce_authority) = nonce_authority
|
||||
.map(|authority| (authority.pubkey(), authority.keypair()))
|
||||
.unwrap_or((config.keypair.pubkey(), &config.keypair));
|
||||
let fee_payer = fee_payer.map(|fp| fp.keypair()).unwrap_or(&config.keypair);
|
||||
let mut tx = if let Some(nonce_account) = &nonce_account {
|
||||
Transaction::new_signed_with_nonce(
|
||||
ixs,
|
||||
Some(&fee_payer.pubkey()),
|
||||
&[fee_payer, from, nonce_authority],
|
||||
nonce_account,
|
||||
&nonce_authority.pubkey(),
|
||||
recent_blockhash,
|
||||
)
|
||||
} else {
|
||||
Transaction::new_signed_with_payer(
|
||||
ixs,
|
||||
Some(&fee_payer.pubkey()),
|
||||
&[fee_payer, from],
|
||||
recent_blockhash,
|
||||
)
|
||||
};
|
||||
|
||||
if let Some(signers) = signers {
|
||||
replace_signatures(&mut tx, &signers)?;
|
||||
}
|
||||
|
||||
if sign_only {
|
||||
return_signers(&tx)
|
||||
} else {
|
||||
if let Some(nonce_account) = &nonce_account {
|
||||
let nonce_account = rpc_client.get_account(nonce_account)?;
|
||||
check_nonce_account(&nonce_account, &nonce_authority_pubkey, &recent_blockhash)?;
|
||||
}
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&tx.message.account_keys[0],
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
|
||||
log_instruction_custom_error::<SystemError>(result)
|
||||
}
|
||||
}
|
||||
|
||||
fn process_witness(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
@@ -1226,10 +1391,13 @@ fn process_witness(
|
||||
|
||||
pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
if config.verbose {
|
||||
println_name_value("RPC URL:", &config.json_rpc_url);
|
||||
if let Some(keypair_path) = &config.keypair_path {
|
||||
println_name_value("Keypair:", keypair_path);
|
||||
println_name_value("Keypair Path:", keypair_path);
|
||||
if keypair_path.starts_with("usb://") {
|
||||
println_name_value("Pubkey:", &format!("{:?}", config.pubkey()?));
|
||||
}
|
||||
}
|
||||
println_name_value("RPC Endpoint:", &config.json_rpc_url);
|
||||
}
|
||||
|
||||
let mut _rpc_client;
|
||||
@@ -1244,7 +1412,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
match &config.command {
|
||||
// Cluster Query Commands
|
||||
// Get address of this client
|
||||
CliCommand::Address => Ok(format!("{}", config.keypair.pubkey())),
|
||||
CliCommand::Address => Ok(format!("{}", config.pubkey()?)),
|
||||
|
||||
// Return software version of solana-cli and cluster entrypoint node
|
||||
CliCommand::Catchup { node_pubkey } => process_catchup(&rpc_client, node_pubkey),
|
||||
@@ -1382,7 +1550,6 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
lockup,
|
||||
*lamports,
|
||||
),
|
||||
// Deactivate stake account
|
||||
CliCommand::DeactivateStake {
|
||||
stake_account_pubkey,
|
||||
ref stake_authority,
|
||||
@@ -1391,6 +1558,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
blockhash_query,
|
||||
nonce_account,
|
||||
ref nonce_authority,
|
||||
ref fee_payer,
|
||||
} => process_deactivate_stake_account(
|
||||
&rpc_client,
|
||||
config,
|
||||
@@ -1401,6 +1569,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
blockhash_query,
|
||||
*nonce_account,
|
||||
nonce_authority.as_ref(),
|
||||
fee_payer.as_ref(),
|
||||
),
|
||||
CliCommand::DelegateStake {
|
||||
stake_account_pubkey,
|
||||
@@ -1412,6 +1581,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
blockhash_query,
|
||||
nonce_account,
|
||||
ref nonce_authority,
|
||||
ref fee_payer,
|
||||
} => process_delegate_stake(
|
||||
&rpc_client,
|
||||
config,
|
||||
@@ -1424,6 +1594,34 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
blockhash_query,
|
||||
*nonce_account,
|
||||
nonce_authority.as_ref(),
|
||||
fee_payer.as_ref(),
|
||||
),
|
||||
CliCommand::SplitStake {
|
||||
stake_account_pubkey,
|
||||
ref stake_authority,
|
||||
sign_only,
|
||||
ref signers,
|
||||
blockhash_query,
|
||||
nonce_account,
|
||||
ref nonce_authority,
|
||||
split_stake_account,
|
||||
seed,
|
||||
lamports,
|
||||
ref fee_payer,
|
||||
} => process_split_stake(
|
||||
&rpc_client,
|
||||
config,
|
||||
&stake_account_pubkey,
|
||||
stake_authority.as_ref(),
|
||||
*sign_only,
|
||||
signers,
|
||||
blockhash_query,
|
||||
*nonce_account,
|
||||
nonce_authority.as_ref(),
|
||||
split_stake_account,
|
||||
seed,
|
||||
*lamports,
|
||||
fee_payer.as_ref(),
|
||||
),
|
||||
CliCommand::ShowStakeAccount {
|
||||
pubkey: stake_account_pubkey,
|
||||
@@ -1447,6 +1645,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
blockhash_query,
|
||||
nonce_account,
|
||||
ref nonce_authority,
|
||||
ref fee_payer,
|
||||
} => process_stake_authorize(
|
||||
&rpc_client,
|
||||
config,
|
||||
@@ -1459,6 +1658,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
blockhash_query,
|
||||
*nonce_account,
|
||||
nonce_authority.as_ref(),
|
||||
fee_payer.as_ref(),
|
||||
),
|
||||
|
||||
CliCommand::WithdrawStake {
|
||||
@@ -1572,11 +1772,6 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
&new_identity_pubkey,
|
||||
authorized_voter,
|
||||
),
|
||||
CliCommand::Uptime {
|
||||
pubkey: vote_account_pubkey,
|
||||
aggregate,
|
||||
span,
|
||||
} => process_uptime(&rpc_client, config, &vote_account_pubkey, *aggregate, *span),
|
||||
|
||||
// Wallet Commands
|
||||
|
||||
@@ -1661,6 +1856,29 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
CliCommand::TimeElapsed(to, pubkey, dt) => {
|
||||
process_time_elapsed(&rpc_client, config, &to, &pubkey, *dt)
|
||||
}
|
||||
CliCommand::Transfer {
|
||||
lamports,
|
||||
to,
|
||||
ref from,
|
||||
sign_only,
|
||||
ref signers,
|
||||
ref blockhash_query,
|
||||
ref nonce_account,
|
||||
ref nonce_authority,
|
||||
ref fee_payer,
|
||||
} => process_transfer(
|
||||
&rpc_client,
|
||||
config,
|
||||
*lamports,
|
||||
to,
|
||||
from.as_ref(),
|
||||
*sign_only,
|
||||
signers.as_ref(),
|
||||
blockhash_query,
|
||||
nonce_account.as_ref(),
|
||||
nonce_authority.as_ref(),
|
||||
fee_payer.as_ref(),
|
||||
),
|
||||
// Apply witness signature to contract
|
||||
CliCommand::Witness(to, pubkey) => process_witness(&rpc_client, config, &to, &pubkey),
|
||||
}
|
||||
@@ -2033,6 +2251,48 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
|
||||
.help("Optional arbitrary timestamp to apply"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("transfer")
|
||||
.about("Transfer funds between system accounts")
|
||||
.arg(
|
||||
Arg::with_name("to")
|
||||
.index(1)
|
||||
.value_name("TO PUBKEY")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.help("The pubkey of recipient"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("amount")
|
||||
.index(2)
|
||||
.value_name("AMOUNT")
|
||||
.takes_value(true)
|
||||
.validator(is_amount)
|
||||
.required(true)
|
||||
.help("The amount to send (default unit SOL)"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("unit")
|
||||
.index(3)
|
||||
.value_name("UNIT")
|
||||
.takes_value(true)
|
||||
.possible_values(&["SOL", "lamports"])
|
||||
.help("Specify unit to use for request"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("from")
|
||||
.long("from")
|
||||
.takes_value(true)
|
||||
.value_name("KEYPAIR or PUBKEY")
|
||||
.validator(is_pubkey_or_keypair_or_ask_keyword)
|
||||
.help("Source account of funds (if different from client local account)"),
|
||||
)
|
||||
.offline_args()
|
||||
.arg(nonce_arg())
|
||||
.arg(nonce_authority_arg())
|
||||
.arg(fee_payer_arg()),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("account")
|
||||
.about("Show the contents of an account")
|
||||
@@ -2078,7 +2338,7 @@ mod tests {
|
||||
account::Account,
|
||||
nonce_state::{Meta as NonceMeta, NonceState},
|
||||
pubkey::Pubkey,
|
||||
signature::{read_keypair_file, write_keypair_file},
|
||||
signature::{keypair_from_seed, read_keypair_file, write_keypair_file},
|
||||
system_program,
|
||||
transaction::TransactionError,
|
||||
};
|
||||
@@ -2771,6 +3031,25 @@ mod tests {
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
fee_payer: None,
|
||||
};
|
||||
let signature = process_command(&config);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
|
||||
let stake_pubkey = Pubkey::new_rand();
|
||||
let split_stake_account = Keypair::new();
|
||||
config.command = CliCommand::SplitStake {
|
||||
stake_account_pubkey: stake_pubkey,
|
||||
stake_authority: None,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
split_stake_account: split_stake_account.into(),
|
||||
seed: None,
|
||||
lamports: 1234,
|
||||
fee_payer: None,
|
||||
};
|
||||
let signature = process_command(&config);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
@@ -2908,7 +3187,6 @@ mod tests {
|
||||
};
|
||||
assert!(process_command(&config).is_ok());
|
||||
|
||||
config.rpc_client = Some(RpcClient::new_mock("airdrop".to_string()));
|
||||
config.command = CliCommand::TimeElapsed(bob_pubkey, process_id, dt);
|
||||
let signature = process_command(&config);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
@@ -3047,4 +3325,163 @@ mod tests {
|
||||
config.command = CliCommand::Deploy("bad/file/location.so".to_string());
|
||||
assert!(process_command(&config).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_transfer_subcommand() {
|
||||
let test_commands = app("test", "desc", "version");
|
||||
|
||||
//Test Transfer Subcommand, lamports
|
||||
let from_keypair = keypair_from_seed(&[0u8; 32]).unwrap();
|
||||
let from_pubkey = from_keypair.pubkey();
|
||||
let from_string = from_pubkey.to_string();
|
||||
let to_keypair = keypair_from_seed(&[1u8; 32]).unwrap();
|
||||
let to_pubkey = to_keypair.pubkey();
|
||||
let to_string = to_pubkey.to_string();
|
||||
let test_transfer = test_commands
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "transfer", &to_string, "42", "lamports"]);
|
||||
assert_eq!(
|
||||
parse_command(&test_transfer).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Transfer {
|
||||
lamports: 42,
|
||||
to: to_pubkey,
|
||||
from: None,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash_query: BlockhashQuery::All,
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
fee_payer: None,
|
||||
},
|
||||
require_keypair: true,
|
||||
}
|
||||
);
|
||||
|
||||
//Test Transfer Subcommand, SOL
|
||||
let test_transfer = test_commands
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "transfer", &to_string, "42"]);
|
||||
assert_eq!(
|
||||
parse_command(&test_transfer).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Transfer {
|
||||
lamports: 42_000_000_000,
|
||||
to: to_pubkey,
|
||||
from: None,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash_query: BlockhashQuery::All,
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
fee_payer: None,
|
||||
},
|
||||
require_keypair: true,
|
||||
}
|
||||
);
|
||||
|
||||
//Test Transfer Subcommand, offline sign
|
||||
let blockhash = Hash::new(&[1u8; 32]);
|
||||
let blockhash_string = blockhash.to_string();
|
||||
let test_transfer = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"transfer",
|
||||
&to_string,
|
||||
"42",
|
||||
"lamports",
|
||||
"--blockhash",
|
||||
&blockhash_string,
|
||||
"--sign-only",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_transfer).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Transfer {
|
||||
lamports: 42,
|
||||
to: to_pubkey,
|
||||
from: None,
|
||||
sign_only: true,
|
||||
signers: None,
|
||||
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
fee_payer: None,
|
||||
},
|
||||
require_keypair: true,
|
||||
}
|
||||
);
|
||||
|
||||
//Test Transfer Subcommand, submit offline `from`
|
||||
let from_sig = from_keypair.sign_message(&[0u8]);
|
||||
let from_signer = format!("{}={}", from_pubkey, from_sig);
|
||||
let test_transfer = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"transfer",
|
||||
&to_string,
|
||||
"42",
|
||||
"lamports",
|
||||
"--from",
|
||||
&from_string,
|
||||
"--fee-payer",
|
||||
&from_string,
|
||||
"--signer",
|
||||
&from_signer,
|
||||
"--blockhash",
|
||||
&blockhash_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_transfer).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Transfer {
|
||||
lamports: 42,
|
||||
to: to_pubkey,
|
||||
from: Some(from_pubkey.into()),
|
||||
sign_only: false,
|
||||
signers: Some(vec![(from_pubkey, from_sig)]),
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
fee_payer: Some(from_pubkey.into()),
|
||||
},
|
||||
require_keypair: true,
|
||||
}
|
||||
);
|
||||
|
||||
//Test Transfer Subcommand, with nonce
|
||||
let nonce_address = Pubkey::new(&[1u8; 32]);
|
||||
let nonce_address_string = nonce_address.to_string();
|
||||
let nonce_authority = keypair_from_seed(&[2u8; 32]).unwrap();
|
||||
let nonce_authority_file = make_tmp_path("nonce_authority_file");
|
||||
write_keypair_file(&nonce_authority, &nonce_authority_file).unwrap();
|
||||
let test_transfer = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"transfer",
|
||||
&to_string,
|
||||
"42",
|
||||
"lamports",
|
||||
"--blockhash",
|
||||
&blockhash_string,
|
||||
"--nonce",
|
||||
&nonce_address_string,
|
||||
"--nonce-authority",
|
||||
&nonce_authority_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_transfer).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Transfer {
|
||||
lamports: 42,
|
||||
to: to_pubkey,
|
||||
from: None,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
nonce_account: Some(nonce_address.into()),
|
||||
nonce_authority: Some(read_keypair_file(&nonce_authority_file).unwrap().into()),
|
||||
fee_payer: None,
|
||||
},
|
||||
require_keypair: true,
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@@ -14,7 +14,7 @@ use solana_sdk::{
|
||||
account_utils::StateMut,
|
||||
clock::{self, Slot},
|
||||
commitment_config::CommitmentConfig,
|
||||
epoch_schedule::{Epoch, EpochSchedule},
|
||||
epoch_schedule::Epoch,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, KeypairUtil},
|
||||
@@ -321,20 +321,6 @@ fn new_spinner_progress_bar() -> ProgressBar {
|
||||
progress_bar
|
||||
}
|
||||
|
||||
/// Aggregate epoch credit stats and return (total credits, total slots, total epochs)
|
||||
pub fn aggregate_epoch_credits(
|
||||
epoch_credits: &[(Epoch, u64, u64)],
|
||||
epoch_schedule: &EpochSchedule,
|
||||
) -> (u64, u64, u64) {
|
||||
epoch_credits
|
||||
.iter()
|
||||
.fold((0, 0, 0), |acc, (epoch, credits, prev_credits)| {
|
||||
let credits_earned = credits - prev_credits;
|
||||
let slots_in_epoch = epoch_schedule.get_slots_in_epoch(*epoch);
|
||||
(acc.0 + credits_earned, acc.1 + slots_in_epoch, acc.2 + 1)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn process_catchup(rpc_client: &RpcClient, node_pubkey: &Pubkey) -> ProcessResult {
|
||||
let cluster_nodes = rpc_client.get_cluster_nodes()?;
|
||||
|
||||
@@ -465,11 +451,11 @@ pub fn process_get_epoch_info(
|
||||
let start_slot = epoch_info.absolute_slot - epoch_info.slot_index;
|
||||
let end_slot = start_slot + epoch_info.slots_in_epoch;
|
||||
println_name_value(
|
||||
"Epoch slot range:",
|
||||
"Epoch Slot Range:",
|
||||
&format!("[{}..{})", start_slot, end_slot),
|
||||
);
|
||||
println_name_value(
|
||||
"Epoch completed percent:",
|
||||
"Epoch Completed Percent:",
|
||||
&format!(
|
||||
"{:>3.3}%",
|
||||
epoch_info.slot_index as f64 / epoch_info.slots_in_epoch as f64 * 100_f64
|
||||
@@ -477,14 +463,14 @@ pub fn process_get_epoch_info(
|
||||
);
|
||||
let remaining_slots_in_epoch = epoch_info.slots_in_epoch - epoch_info.slot_index;
|
||||
println_name_value(
|
||||
"Epoch completed slots:",
|
||||
"Epoch Completed Slots:",
|
||||
&format!(
|
||||
"{}/{} ({} remaining)",
|
||||
epoch_info.slot_index, epoch_info.slots_in_epoch, remaining_slots_in_epoch
|
||||
),
|
||||
);
|
||||
println_name_value(
|
||||
"Epoch completed time:",
|
||||
"Epoch Completed Time:",
|
||||
&format!(
|
||||
"{}/{} ({} remaining)",
|
||||
slot_to_human_time(epoch_info.slot_index),
|
||||
@@ -709,8 +695,8 @@ pub fn process_ping(
|
||||
) -> ProcessResult {
|
||||
let to = Keypair::new().pubkey();
|
||||
|
||||
println_name_value("Source account:", &config.keypair.pubkey().to_string());
|
||||
println_name_value("Destination account:", &to.to_string());
|
||||
println_name_value("Source Account:", &config.keypair.pubkey().to_string());
|
||||
println_name_value("Destination Account:", &to.to_string());
|
||||
println!();
|
||||
|
||||
let (signal_sender, signal_receiver) = std::sync::mpsc::channel();
|
||||
@@ -900,7 +886,7 @@ pub fn process_show_stakes(
|
||||
}
|
||||
|
||||
pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool) -> ProcessResult {
|
||||
let epoch_schedule = rpc_client.get_epoch_schedule()?;
|
||||
let epoch_info = rpc_client.get_epoch_info()?;
|
||||
let vote_accounts = rpc_client.get_vote_accounts()?;
|
||||
let total_active_stake = vote_accounts
|
||||
.current
|
||||
@@ -949,7 +935,7 @@ pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool)
|
||||
"Commission",
|
||||
"Last Vote",
|
||||
"Root Block",
|
||||
"Uptime",
|
||||
"Credits",
|
||||
"Active Stake",
|
||||
))
|
||||
.bold()
|
||||
@@ -957,7 +943,7 @@ pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool)
|
||||
|
||||
fn print_vote_account(
|
||||
vote_account: RpcVoteAccountInfo,
|
||||
epoch_schedule: &EpochSchedule,
|
||||
current_epoch: Epoch,
|
||||
total_active_stake: f64,
|
||||
use_lamports_unit: bool,
|
||||
delinquent: bool,
|
||||
@@ -970,17 +956,6 @@ pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool)
|
||||
}
|
||||
}
|
||||
|
||||
fn uptime(epoch_credits: Vec<(Epoch, u64, u64)>, epoch_schedule: &EpochSchedule) -> String {
|
||||
let (total_credits, total_slots, _) =
|
||||
aggregate_epoch_credits(&epoch_credits, &epoch_schedule);
|
||||
if total_slots > 0 {
|
||||
let total_uptime = 100_f64 * total_credits as f64 / total_slots as f64;
|
||||
format!("{:.2}%", total_uptime)
|
||||
} else {
|
||||
"-".into()
|
||||
}
|
||||
}
|
||||
|
||||
println!(
|
||||
"{} {:<44} {:<44} {:>9}% {:>8} {:>10} {:>7} {}",
|
||||
if delinquent {
|
||||
@@ -993,7 +968,15 @@ pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool)
|
||||
vote_account.commission,
|
||||
non_zero_or_dash(vote_account.last_vote),
|
||||
non_zero_or_dash(vote_account.root_slot),
|
||||
uptime(vote_account.epoch_credits, epoch_schedule),
|
||||
vote_account
|
||||
.epoch_credits
|
||||
.iter()
|
||||
.find_map(|(epoch, credits, _)| if *epoch == current_epoch {
|
||||
Some(*credits)
|
||||
} else {
|
||||
None
|
||||
})
|
||||
.unwrap_or(0),
|
||||
if vote_account.activated_stake > 0 {
|
||||
format!(
|
||||
"{} ({:.2}%)",
|
||||
@@ -1009,7 +992,7 @@ pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool)
|
||||
for vote_account in vote_accounts.current.into_iter() {
|
||||
print_vote_account(
|
||||
vote_account,
|
||||
&epoch_schedule,
|
||||
epoch_info.epoch,
|
||||
total_active_stake,
|
||||
use_lamports_unit,
|
||||
false,
|
||||
@@ -1018,7 +1001,7 @@ pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool)
|
||||
for vote_account in vote_accounts.delinquent.into_iter() {
|
||||
print_vote_account(
|
||||
vote_account,
|
||||
&epoch_schedule,
|
||||
epoch_info.epoch,
|
||||
total_active_stake,
|
||||
use_lamports_unit,
|
||||
true,
|
||||
|
@@ -2,7 +2,8 @@ use clap::{crate_description, crate_name, AppSettings, Arg, ArgGroup, ArgMatches
|
||||
use console::style;
|
||||
|
||||
use solana_clap_utils::{
|
||||
input_validators::is_url,
|
||||
input_parsers::derivation_of,
|
||||
input_validators::{is_derivation, is_url},
|
||||
keypair::{
|
||||
self, keypair_input, KeypairWithSource, ASK_SEED_PHRASE_ARG,
|
||||
SKIP_SEED_PHRASE_VALIDATION_ARG,
|
||||
@@ -24,21 +25,25 @@ fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error
|
||||
if let Some(config_file) = matches.value_of("config_file") {
|
||||
let config = Config::load(config_file).unwrap_or_default();
|
||||
if let Some(field) = subcommand_matches.value_of("specific_setting") {
|
||||
let (value, default_value) = match field {
|
||||
"url" => (config.url, CliConfig::default_json_rpc_url()),
|
||||
"keypair" => (config.keypair_path, CliConfig::default_keypair_path()),
|
||||
let (field_name, value, default_value) = match field {
|
||||
"url" => ("RPC URL", config.url, CliConfig::default_json_rpc_url()),
|
||||
"keypair" => (
|
||||
"Key Path",
|
||||
config.keypair_path,
|
||||
CliConfig::default_keypair_path(),
|
||||
),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
println_name_value_or(&format!("* {}:", field), &value, &default_value);
|
||||
println_name_value_or(&format!("{}:", field_name), &value, &default_value);
|
||||
} else {
|
||||
println_name_value("Wallet Config:", config_file);
|
||||
println_name_value("Config File:", config_file);
|
||||
println_name_value_or(
|
||||
"* url:",
|
||||
"RPC URL:",
|
||||
&config.url,
|
||||
&CliConfig::default_json_rpc_url(),
|
||||
);
|
||||
println_name_value_or(
|
||||
"* keypair:",
|
||||
"Keypair Path:",
|
||||
&config.keypair_path,
|
||||
&CliConfig::default_keypair_path(),
|
||||
);
|
||||
@@ -61,9 +66,9 @@ fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error
|
||||
config.keypair_path = keypair.to_string();
|
||||
}
|
||||
config.save(config_file)?;
|
||||
println_name_value("Wallet Config Updated:", config_file);
|
||||
println_name_value("* url:", &config.url);
|
||||
println_name_value("* keypair:", &config.keypair_path);
|
||||
println_name_value("Config File:", config_file);
|
||||
println_name_value("RPC URL:", &config.url);
|
||||
println_name_value("Keypair Path:", &config.keypair_path);
|
||||
} else {
|
||||
println!(
|
||||
"{} Either provide the `--config` arg or ensure home directory exists to use the default config location",
|
||||
@@ -102,7 +107,7 @@ pub fn parse_args(matches: &ArgMatches<'_>) -> Result<CliConfig, Box<dyn error::
|
||||
let (keypair, keypair_path) = if require_keypair {
|
||||
let KeypairWithSource { keypair, source } = keypair_input(&matches, "keypair")?;
|
||||
match source {
|
||||
keypair::Source::File => (
|
||||
keypair::Source::Path => (
|
||||
keypair,
|
||||
Some(matches.value_of("keypair").unwrap().to_string()),
|
||||
),
|
||||
@@ -122,12 +127,16 @@ pub fn parse_args(matches: &ArgMatches<'_>) -> Result<CliConfig, Box<dyn error::
|
||||
default_keypair_path
|
||||
};
|
||||
|
||||
let keypair = read_keypair_file(&keypair_path).or_else(|err| {
|
||||
Err(CliError::BadParameter(format!(
|
||||
"{}: Unable to open keypair file: {}",
|
||||
err, keypair_path
|
||||
)))
|
||||
})?;
|
||||
let keypair = if keypair_path.starts_with("usb://") {
|
||||
keypair
|
||||
} else {
|
||||
read_keypair_file(&keypair_path).or_else(|err| {
|
||||
Err(CliError::BadParameter(format!(
|
||||
"{}: Unable to open keypair file: {}",
|
||||
err, keypair_path
|
||||
)))
|
||||
})?
|
||||
};
|
||||
|
||||
(keypair, Some(keypair_path))
|
||||
}
|
||||
@@ -142,6 +151,7 @@ pub fn parse_args(matches: &ArgMatches<'_>) -> Result<CliConfig, Box<dyn error::
|
||||
json_rpc_url,
|
||||
keypair,
|
||||
keypair_path,
|
||||
derivation_path: derivation_of(matches, "derivation_path"),
|
||||
rpc_client: None,
|
||||
verbose: matches.is_present("verbose"),
|
||||
})
|
||||
@@ -185,7 +195,15 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
.value_name("PATH")
|
||||
.global(true)
|
||||
.takes_value(true)
|
||||
.help("/path/to/id.json"),
|
||||
.help("/path/to/id.json or usb://remote/wallet/path"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("derivation_path")
|
||||
.long("derivation-path")
|
||||
.value_name("ACCOUNT or ACCOUNT/CHANGE")
|
||||
.takes_value(true)
|
||||
.validator(is_derivation)
|
||||
.help("Derivation path to use: m/44'/501'/ACCOUNT'/CHANGE'; default key is device base pubkey: m/44'/501'/0'")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("verbose")
|
||||
|
@@ -540,11 +540,11 @@ pub fn process_show_nonce_account(
|
||||
}
|
||||
let print_account = |data: Option<(Meta, Hash)>| {
|
||||
println!(
|
||||
"balance: {}",
|
||||
"Balance: {}",
|
||||
build_balance_message(nonce_account.lamports, use_lamports_unit, true)
|
||||
);
|
||||
println!(
|
||||
"minimum balance required: {}",
|
||||
"Minimum Balance Required: {}",
|
||||
build_balance_message(
|
||||
rpc_client.get_minimum_balance_for_rent_exemption(NonceState::size())?,
|
||||
use_lamports_unit,
|
||||
@@ -553,12 +553,12 @@ pub fn process_show_nonce_account(
|
||||
);
|
||||
match data {
|
||||
Some((meta, hash)) => {
|
||||
println!("nonce: {}", hash);
|
||||
println!("authority: {}", meta.nonce_authority);
|
||||
println!("Nonce: {}", hash);
|
||||
println!("Authority: {}", meta.nonce_authority);
|
||||
}
|
||||
None => {
|
||||
println!("nonce: uninitialized");
|
||||
println!("authority: uninitialized");
|
||||
println!("Nonce: uninitialized");
|
||||
println!("Authority: uninitialized");
|
||||
}
|
||||
}
|
||||
Ok("".to_string())
|
||||
|
@@ -1,11 +1,13 @@
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use serde_json::Value;
|
||||
use solana_clap_utils::{
|
||||
input_parsers::value_of,
|
||||
input_validators::{is_hash, is_pubkey_sig},
|
||||
ArgConstant,
|
||||
};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_sdk::{fee_calculator::FeeCalculator, hash::Hash};
|
||||
use solana_sdk::{fee_calculator::FeeCalculator, hash::Hash, pubkey::Pubkey, signature::Signature};
|
||||
use std::str::FromStr;
|
||||
|
||||
pub const BLOCKHASH_ARG: ArgConstant<'static> = ArgConstant {
|
||||
name: "blockhash",
|
||||
@@ -112,6 +114,23 @@ impl OfflineArgs for App<'_, '_> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_sign_only_reply_string(reply: &str) -> (Hash, Vec<(Pubkey, Signature)>) {
|
||||
let object: Value = serde_json::from_str(&reply).unwrap();
|
||||
let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap();
|
||||
let blockhash = blockhash_str.parse::<Hash>().unwrap();
|
||||
let signer_strings = object.get("signers").unwrap().as_array().unwrap();
|
||||
let signers = signer_strings
|
||||
.iter()
|
||||
.map(|signer_string| {
|
||||
let mut signer = signer_string.as_str().unwrap().split('=');
|
||||
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
|
||||
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
|
||||
(key, sig)
|
||||
})
|
||||
.collect();
|
||||
(blockhash, signers)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
694
cli/src/stake.rs
694
cli/src/stake.rs
File diff suppressed because it is too large
Load Diff
@@ -251,7 +251,7 @@ pub fn process_show_storage_account(
|
||||
CliError::RpcRequestError(format!("Unable to deserialize storage account: {:?}", err))
|
||||
})?;
|
||||
println!("{:#?}", storage_contract);
|
||||
println!("account lamports: {}", account.lamports);
|
||||
println!("Account Lamports: {}", account.lamports);
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
|
@@ -22,8 +22,8 @@ use solana_sdk::{
|
||||
signature::{Keypair, KeypairUtil},
|
||||
transaction::Transaction,
|
||||
};
|
||||
|
||||
use std::error;
|
||||
use titlecase::titlecase;
|
||||
|
||||
pub const MAX_SHORT_FIELD_LENGTH: usize = 70;
|
||||
pub const MAX_LONG_FIELD_LENGTH: usize = 300;
|
||||
@@ -390,9 +390,12 @@ pub fn process_get_validator_info(rpc_client: &RpcClient, pubkey: Option<Pubkey>
|
||||
parse_validator_info(&validator_info_pubkey, &validator_info_account)?;
|
||||
println!();
|
||||
println_name_value("Validator Identity Pubkey:", &validator_pubkey.to_string());
|
||||
println_name_value(" info pubkey:", &validator_info_pubkey.to_string());
|
||||
println_name_value(" Info Pubkey:", &validator_info_pubkey.to_string());
|
||||
for (key, value) in validator_info.iter() {
|
||||
println_name_value(&format!(" {}:", key), &value.as_str().unwrap_or("?"));
|
||||
println_name_value(
|
||||
&format!(" {}:", titlecase(key)),
|
||||
&value.as_str().unwrap_or("?"),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
147
cli/src/vote.rs
147
cli/src/vote.rs
@@ -1,10 +1,6 @@
|
||||
use crate::{
|
||||
cli::{
|
||||
build_balance_message, check_account_for_fee, check_unique_pubkeys,
|
||||
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError,
|
||||
ProcessResult,
|
||||
},
|
||||
cluster_query::aggregate_epoch_credits,
|
||||
use crate::cli::{
|
||||
build_balance_message, check_account_for_fee, check_unique_pubkeys,
|
||||
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult,
|
||||
};
|
||||
use clap::{value_t_or_exit, App, Arg, ArgMatches, SubCommand};
|
||||
use solana_clap_utils::{input_parsers::*, input_validators::*};
|
||||
@@ -176,31 +172,6 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.help("Display balance in lamports instead of SOL"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("uptime")
|
||||
.about("Show the uptime of a validator, based on epoch voting history")
|
||||
.arg(
|
||||
Arg::with_name("vote_account_pubkey")
|
||||
.index(1)
|
||||
.value_name("VOTE ACCOUNT PUBKEY")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.help("Vote account pubkey"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("span")
|
||||
.long("span")
|
||||
.value_name("NUM OF EPOCHS")
|
||||
.takes_value(true)
|
||||
.help("Number of recent epochs to examine"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("aggregate")
|
||||
.long("aggregate")
|
||||
.help("Aggregate uptime data across span"),
|
||||
),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -271,24 +242,6 @@ pub fn parse_vote_get_account_command(
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_vote_uptime_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap();
|
||||
let aggregate = matches.is_present("aggregate");
|
||||
let span = if matches.is_present("span") {
|
||||
Some(value_t_or_exit!(matches, "span", u64))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::Uptime {
|
||||
pubkey: vote_account_pubkey,
|
||||
aggregate,
|
||||
span,
|
||||
},
|
||||
require_keypair: false,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn process_create_vote_account(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
@@ -476,25 +429,25 @@ pub fn process_show_vote_account(
|
||||
let epoch_schedule = rpc_client.get_epoch_schedule()?;
|
||||
|
||||
println!(
|
||||
"account balance: {}",
|
||||
"Account Balance: {}",
|
||||
build_balance_message(vote_account.lamports, use_lamports_unit, true)
|
||||
);
|
||||
println!("validator identity: {}", vote_state.node_pubkey);
|
||||
println!("authorized voter: {}", vote_state.authorized_voter);
|
||||
println!("Validator Identity: {}", vote_state.node_pubkey);
|
||||
println!("Authorized Voter: {}", vote_state.authorized_voter);
|
||||
println!(
|
||||
"authorized withdrawer: {}",
|
||||
"Authorized Withdrawer: {}",
|
||||
vote_state.authorized_withdrawer
|
||||
);
|
||||
println!("credits: {}", vote_state.credits());
|
||||
println!("commission: {}%", vote_state.commission);
|
||||
println!("Credits: {}", vote_state.credits());
|
||||
println!("Commission: {}%", vote_state.commission);
|
||||
println!(
|
||||
"root slot: {}",
|
||||
"Root Slot: {}",
|
||||
match vote_state.root_slot {
|
||||
Some(slot) => slot.to_string(),
|
||||
None => "~".to_string(),
|
||||
}
|
||||
);
|
||||
println!("recent timestamp: {:?}", vote_state.last_timestamp);
|
||||
println!("Recent Timestamp: {:?}", vote_state.last_timestamp);
|
||||
if !vote_state.votes.is_empty() {
|
||||
println!("recent votes:");
|
||||
for vote in &vote_state.votes {
|
||||
@@ -504,7 +457,7 @@ pub fn process_show_vote_account(
|
||||
);
|
||||
}
|
||||
|
||||
println!("epoch voting history:");
|
||||
println!("Epoch Voting History:");
|
||||
for (epoch, credits, prev_credits) in vote_state.epoch_credits() {
|
||||
let credits_earned = credits - prev_credits;
|
||||
let slots_in_epoch = epoch_schedule.get_slots_in_epoch(*epoch);
|
||||
@@ -517,60 +470,6 @@ pub fn process_show_vote_account(
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
pub fn process_uptime(
|
||||
rpc_client: &RpcClient,
|
||||
_config: &CliConfig,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
aggregate: bool,
|
||||
span: Option<u64>,
|
||||
) -> ProcessResult {
|
||||
let (_vote_account, vote_state) = get_vote_account(rpc_client, vote_account_pubkey)?;
|
||||
|
||||
let epoch_schedule = rpc_client.get_epoch_schedule()?;
|
||||
|
||||
println!("validator identity: {}", vote_state.node_pubkey);
|
||||
println!("authorized voter: {}", vote_state.authorized_voter);
|
||||
if !vote_state.votes.is_empty() {
|
||||
println!("uptime:");
|
||||
|
||||
let epoch_credits: Vec<(u64, u64, u64)> = if let Some(x) = span {
|
||||
vote_state
|
||||
.epoch_credits()
|
||||
.iter()
|
||||
.rev()
|
||||
.take(x as usize)
|
||||
.cloned()
|
||||
.collect()
|
||||
} else {
|
||||
vote_state.epoch_credits().iter().rev().cloned().collect()
|
||||
};
|
||||
|
||||
if aggregate {
|
||||
let (total_credits, total_slots, epochs) =
|
||||
aggregate_epoch_credits(&epoch_credits, &epoch_schedule);
|
||||
if total_slots > 0 {
|
||||
let total_uptime = 100_f64 * total_credits as f64 / total_slots as f64;
|
||||
println!("{:.2}% over {} epochs", total_uptime, epochs);
|
||||
} else {
|
||||
println!("Insufficient voting history available");
|
||||
}
|
||||
} else {
|
||||
for (epoch, credits, prev_credits) in epoch_credits {
|
||||
let credits_earned = credits - prev_credits;
|
||||
let slots_in_epoch = epoch_schedule.get_slots_in_epoch(epoch);
|
||||
let uptime = credits_earned as f64 / slots_in_epoch as f64;
|
||||
println!("- epoch: {} {:.2}% uptime", epoch, uptime * 100_f64,);
|
||||
}
|
||||
}
|
||||
if let Some(x) = span {
|
||||
if x > vote_state.epoch_credits().len() as u64 {
|
||||
println!("(span longer than available epochs)");
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -741,27 +640,5 @@ mod tests {
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
|
||||
// Test Uptime Subcommand
|
||||
let pubkey = Pubkey::new_rand();
|
||||
let matches = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"uptime",
|
||||
&pubkey.to_string(),
|
||||
"--span",
|
||||
"4",
|
||||
"--aggregate",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&matches).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Uptime {
|
||||
pubkey,
|
||||
aggregate: true,
|
||||
span: Some(4)
|
||||
},
|
||||
require_keypair: false
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@@ -2,20 +2,18 @@ use chrono::prelude::*;
|
||||
use serde_json::Value;
|
||||
use solana_cli::{
|
||||
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig, PayCommand},
|
||||
offline::BlockhashQuery,
|
||||
offline::{parse_sign_only_reply_string, BlockhashQuery},
|
||||
};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_faucet::faucet::run_local_faucet;
|
||||
use solana_sdk::{
|
||||
account_utils::StateMut,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
nonce_state::NonceState,
|
||||
pubkey::Pubkey,
|
||||
signature::{read_keypair_file, write_keypair, Keypair, KeypairUtil, Signature},
|
||||
signature::{read_keypair_file, write_keypair, Keypair, KeypairUtil},
|
||||
};
|
||||
use std::fs::remove_dir_all;
|
||||
use std::str::FromStr;
|
||||
use std::sync::mpsc::channel;
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -305,24 +303,12 @@ fn test_offline_pay_tx() {
|
||||
check_balance(50, &rpc_client, &config_online.keypair.pubkey());
|
||||
check_balance(0, &rpc_client, &bob_pubkey);
|
||||
|
||||
let object: Value = serde_json::from_str(&sig_response).unwrap();
|
||||
let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap();
|
||||
let signer_strings = object.get("signers").unwrap().as_array().unwrap();
|
||||
let signers: Vec<_> = signer_strings
|
||||
.iter()
|
||||
.map(|signer_string| {
|
||||
let mut signer = signer_string.as_str().unwrap().split('=');
|
||||
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
|
||||
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
|
||||
(key, sig)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let (blockhash, signers) = parse_sign_only_reply_string(&sig_response);
|
||||
config_online.command = CliCommand::Pay(PayCommand {
|
||||
lamports: 10,
|
||||
to: bob_pubkey,
|
||||
signers: Some(signers),
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_str.parse::<Hash>().unwrap()),
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
..PayCommand::default()
|
||||
});
|
||||
process_command(&config_online).unwrap();
|
||||
|
@@ -1,26 +1,25 @@
|
||||
use serde_json::Value;
|
||||
use solana_cli::{
|
||||
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
|
||||
offline::BlockhashQuery,
|
||||
offline::{parse_sign_only_reply_string, BlockhashQuery},
|
||||
};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_faucet::faucet::run_local_faucet;
|
||||
use solana_sdk::{
|
||||
account_utils::StateMut,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
nonce_state::NonceState,
|
||||
pubkey::Pubkey,
|
||||
signature::{read_keypair_file, write_keypair, Keypair, KeypairUtil, Signature},
|
||||
signature::{keypair_from_seed, read_keypair_file, write_keypair, Keypair, KeypairUtil},
|
||||
system_instruction::create_address_with_seed,
|
||||
};
|
||||
use solana_stake_program::stake_state::{Lockup, StakeAuthorize, StakeState};
|
||||
use std::fs::remove_dir_all;
|
||||
use std::str::FromStr;
|
||||
use std::sync::mpsc::channel;
|
||||
|
||||
#[cfg(test)]
|
||||
use solana_core::validator::new_validator_for_tests;
|
||||
use solana_core::validator::{
|
||||
new_validator_for_tests, new_validator_for_tests_ex, new_validator_for_tests_with_vote_pubkey,
|
||||
};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
@@ -44,28 +43,89 @@ fn check_balance(expected_balance: u64, client: &RpcClient, pubkey: &Pubkey) {
|
||||
});
|
||||
}
|
||||
|
||||
fn parse_sign_only_reply_string(reply: &str) -> (Hash, Vec<(Pubkey, Signature)>) {
|
||||
let object: Value = serde_json::from_str(&reply).unwrap();
|
||||
let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap();
|
||||
let blockhash = blockhash_str.parse::<Hash>().unwrap();
|
||||
let signer_strings = object.get("signers").unwrap().as_array().unwrap();
|
||||
let signers = signer_strings
|
||||
.iter()
|
||||
.map(|signer_string| {
|
||||
let mut signer = signer_string.as_str().unwrap().split('=');
|
||||
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
|
||||
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
|
||||
(key, sig)
|
||||
})
|
||||
.collect();
|
||||
(blockhash, signers)
|
||||
#[test]
|
||||
fn test_stake_delegation_force() {
|
||||
let (server, leader_data, alice, ledger_path) = new_validator_for_tests();
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
|
||||
let mut config = CliConfig::default();
|
||||
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &config.keypair.pubkey(), 100_000)
|
||||
.unwrap();
|
||||
|
||||
// Create vote account
|
||||
let vote_keypair = Keypair::new();
|
||||
let (vote_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&vote_keypair, tmp_file.as_file_mut()).unwrap();
|
||||
config.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: read_keypair_file(&vote_keypair_file).unwrap().into(),
|
||||
seed: None,
|
||||
node_pubkey: config.keypair.pubkey(),
|
||||
authorized_voter: None,
|
||||
authorized_withdrawer: None,
|
||||
commission: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
// Create stake account
|
||||
let stake_keypair = Keypair::new();
|
||||
let (stake_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&stake_keypair, tmp_file.as_file_mut()).unwrap();
|
||||
config.command = CliCommand::CreateStakeAccount {
|
||||
stake_account: read_keypair_file(&stake_keypair_file).unwrap().into(),
|
||||
seed: None,
|
||||
staker: None,
|
||||
withdrawer: None,
|
||||
lockup: Lockup::default(),
|
||||
lamports: 50_000,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
// Delegate stake fails (vote account had never voted)
|
||||
config.command = CliCommand::DelegateStake {
|
||||
stake_account_pubkey: stake_keypair.pubkey(),
|
||||
vote_account_pubkey: vote_keypair.pubkey(),
|
||||
stake_authority: None,
|
||||
force: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
fee_payer: None,
|
||||
};
|
||||
process_command(&config).unwrap_err();
|
||||
|
||||
// But if we force it, it works anyway!
|
||||
config.command = CliCommand::DelegateStake {
|
||||
stake_account_pubkey: stake_keypair.pubkey(),
|
||||
vote_account_pubkey: vote_keypair.pubkey(),
|
||||
stake_authority: None,
|
||||
force: true,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
fee_payer: None,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_seed_stake_delegation_and_deactivation() {
|
||||
solana_logger::setup();
|
||||
|
||||
let (server, leader_data, alice, ledger_path) = new_validator_for_tests();
|
||||
let (server, leader_data, alice, ledger_path, vote_pubkey) =
|
||||
new_validator_for_tests_with_vote_pubkey();
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
@@ -79,12 +139,6 @@ fn test_seed_stake_delegation_and_deactivation() {
|
||||
let (validator_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&config_validator.keypair, tmp_file.as_file_mut()).unwrap();
|
||||
|
||||
let mut config_vote = CliConfig::default();
|
||||
config_vote.json_rpc_url =
|
||||
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
let (vote_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&config_vote.keypair, tmp_file.as_file_mut()).unwrap();
|
||||
|
||||
let mut config_stake = CliConfig::default();
|
||||
config_stake.json_rpc_url =
|
||||
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
@@ -98,17 +152,6 @@ fn test_seed_stake_delegation_and_deactivation() {
|
||||
.unwrap();
|
||||
check_balance(100_000, &rpc_client, &config_validator.keypair.pubkey());
|
||||
|
||||
// Create vote account
|
||||
config_validator.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: read_keypair_file(&vote_keypair_file).unwrap().into(),
|
||||
seed: None,
|
||||
node_pubkey: config_validator.keypair.pubkey(),
|
||||
authorized_voter: None,
|
||||
authorized_withdrawer: None,
|
||||
commission: 0,
|
||||
};
|
||||
process_command(&config_validator).unwrap();
|
||||
|
||||
let stake_address = create_address_with_seed(
|
||||
&config_validator.keypair.pubkey(),
|
||||
"hi there",
|
||||
@@ -131,14 +174,15 @@ fn test_seed_stake_delegation_and_deactivation() {
|
||||
// Delegate stake
|
||||
config_validator.command = CliCommand::DelegateStake {
|
||||
stake_account_pubkey: stake_address,
|
||||
vote_account_pubkey: config_vote.keypair.pubkey(),
|
||||
vote_account_pubkey: vote_pubkey,
|
||||
stake_authority: None,
|
||||
force: true,
|
||||
force: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
fee_payer: None,
|
||||
};
|
||||
process_command(&config_validator).unwrap();
|
||||
|
||||
@@ -151,6 +195,7 @@ fn test_seed_stake_delegation_and_deactivation() {
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
fee_payer: None,
|
||||
};
|
||||
process_command(&config_validator).unwrap();
|
||||
|
||||
@@ -162,7 +207,8 @@ fn test_seed_stake_delegation_and_deactivation() {
|
||||
fn test_stake_delegation_and_deactivation() {
|
||||
solana_logger::setup();
|
||||
|
||||
let (server, leader_data, alice, ledger_path) = new_validator_for_tests();
|
||||
let (server, leader_data, alice, ledger_path, vote_pubkey) =
|
||||
new_validator_for_tests_with_vote_pubkey();
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
@@ -173,12 +219,6 @@ fn test_stake_delegation_and_deactivation() {
|
||||
config_validator.json_rpc_url =
|
||||
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
|
||||
let mut config_vote = CliConfig::default();
|
||||
config_vote.json_rpc_url =
|
||||
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
let (vote_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&config_vote.keypair, tmp_file.as_file_mut()).unwrap();
|
||||
|
||||
let mut config_stake = CliConfig::default();
|
||||
config_stake.json_rpc_url =
|
||||
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
@@ -194,17 +234,6 @@ fn test_stake_delegation_and_deactivation() {
|
||||
.unwrap();
|
||||
check_balance(100_000, &rpc_client, &config_validator.keypair.pubkey());
|
||||
|
||||
// Create vote account
|
||||
config_validator.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: read_keypair_file(&vote_keypair_file).unwrap().into(),
|
||||
seed: None,
|
||||
node_pubkey: config_validator.keypair.pubkey(),
|
||||
authorized_voter: None,
|
||||
authorized_withdrawer: None,
|
||||
commission: 0,
|
||||
};
|
||||
process_command(&config_validator).unwrap();
|
||||
|
||||
// Create stake account
|
||||
config_validator.command = CliCommand::CreateStakeAccount {
|
||||
stake_account: read_keypair_file(&stake_keypair_file).unwrap().into(),
|
||||
@@ -219,14 +248,15 @@ fn test_stake_delegation_and_deactivation() {
|
||||
// Delegate stake
|
||||
config_validator.command = CliCommand::DelegateStake {
|
||||
stake_account_pubkey: config_stake.keypair.pubkey(),
|
||||
vote_account_pubkey: config_vote.keypair.pubkey(),
|
||||
vote_account_pubkey: vote_pubkey,
|
||||
stake_authority: None,
|
||||
force: true,
|
||||
force: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
fee_payer: None,
|
||||
};
|
||||
process_command(&config_validator).unwrap();
|
||||
|
||||
@@ -239,6 +269,7 @@ fn test_stake_delegation_and_deactivation() {
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
fee_payer: None,
|
||||
};
|
||||
process_command(&config_validator).unwrap();
|
||||
|
||||
@@ -250,7 +281,8 @@ fn test_stake_delegation_and_deactivation() {
|
||||
fn test_offline_stake_delegation_and_deactivation() {
|
||||
solana_logger::setup();
|
||||
|
||||
let (server, leader_data, alice, ledger_path) = new_validator_for_tests();
|
||||
let (server, leader_data, alice, ledger_path, vote_pubkey) =
|
||||
new_validator_for_tests_with_vote_pubkey();
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
@@ -265,18 +297,18 @@ fn test_offline_stake_delegation_and_deactivation() {
|
||||
config_payer.json_rpc_url =
|
||||
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
|
||||
let mut config_vote = CliConfig::default();
|
||||
config_vote.json_rpc_url =
|
||||
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
let (vote_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&config_vote.keypair, tmp_file.as_file_mut()).unwrap();
|
||||
|
||||
let mut config_stake = CliConfig::default();
|
||||
config_stake.json_rpc_url =
|
||||
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
let (stake_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&config_stake.keypair, tmp_file.as_file_mut()).unwrap();
|
||||
|
||||
let mut config_offline = CliConfig::default();
|
||||
config_offline.json_rpc_url = String::default();
|
||||
config_offline.command = CliCommand::ClusterVersion;
|
||||
// Verfiy that we cannot reach the cluster
|
||||
process_command(&config_offline).unwrap_err();
|
||||
|
||||
request_and_confirm_airdrop(
|
||||
&rpc_client,
|
||||
&faucet_addr,
|
||||
@@ -286,22 +318,20 @@ fn test_offline_stake_delegation_and_deactivation() {
|
||||
.unwrap();
|
||||
check_balance(100_000, &rpc_client, &config_validator.keypair.pubkey());
|
||||
|
||||
// Create vote account
|
||||
config_validator.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: read_keypair_file(&vote_keypair_file).unwrap().into(),
|
||||
seed: None,
|
||||
node_pubkey: config_validator.keypair.pubkey(),
|
||||
authorized_voter: None,
|
||||
authorized_withdrawer: None,
|
||||
commission: 0,
|
||||
};
|
||||
process_command(&config_validator).unwrap();
|
||||
request_and_confirm_airdrop(
|
||||
&rpc_client,
|
||||
&faucet_addr,
|
||||
&config_offline.keypair.pubkey(),
|
||||
100_000,
|
||||
)
|
||||
.unwrap();
|
||||
check_balance(100_000, &rpc_client, &config_offline.keypair.pubkey());
|
||||
|
||||
// Create stake account
|
||||
config_validator.command = CliCommand::CreateStakeAccount {
|
||||
stake_account: read_keypair_file(&stake_keypair_file).unwrap().into(),
|
||||
seed: None,
|
||||
staker: None,
|
||||
staker: Some(config_offline.keypair.pubkey().into()),
|
||||
withdrawer: None,
|
||||
lockup: Lockup::default(),
|
||||
lamports: 50_000,
|
||||
@@ -310,37 +340,37 @@ fn test_offline_stake_delegation_and_deactivation() {
|
||||
|
||||
// Delegate stake offline
|
||||
let (blockhash, _) = rpc_client.get_recent_blockhash().unwrap();
|
||||
config_validator.command = CliCommand::DelegateStake {
|
||||
config_offline.command = CliCommand::DelegateStake {
|
||||
stake_account_pubkey: config_stake.keypair.pubkey(),
|
||||
vote_account_pubkey: config_vote.keypair.pubkey(),
|
||||
vote_account_pubkey: vote_pubkey,
|
||||
stake_authority: None,
|
||||
force: true,
|
||||
force: false,
|
||||
sign_only: true,
|
||||
signers: None,
|
||||
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
fee_payer: None,
|
||||
};
|
||||
let sig_response = process_command(&config_validator).unwrap();
|
||||
let sig_response = process_command(&config_offline).unwrap();
|
||||
let (blockhash, signers) = parse_sign_only_reply_string(&sig_response);
|
||||
|
||||
// Delegate stake online
|
||||
config_payer.command = CliCommand::DelegateStake {
|
||||
stake_account_pubkey: config_stake.keypair.pubkey(),
|
||||
vote_account_pubkey: config_vote.keypair.pubkey(),
|
||||
stake_authority: None,
|
||||
force: true,
|
||||
vote_account_pubkey: vote_pubkey,
|
||||
stake_authority: Some(config_offline.keypair.pubkey().into()),
|
||||
force: false,
|
||||
sign_only: false,
|
||||
signers: Some(signers),
|
||||
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
fee_payer: Some(config_offline.keypair.pubkey().into()),
|
||||
};
|
||||
process_command(&config_payer).unwrap();
|
||||
|
||||
// Deactivate stake offline
|
||||
let (blockhash, _) = rpc_client.get_recent_blockhash().unwrap();
|
||||
config_validator.command = CliCommand::DeactivateStake {
|
||||
config_offline.command = CliCommand::DeactivateStake {
|
||||
stake_account_pubkey: config_stake.keypair.pubkey(),
|
||||
stake_authority: None,
|
||||
sign_only: true,
|
||||
@@ -348,19 +378,19 @@ fn test_offline_stake_delegation_and_deactivation() {
|
||||
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
fee_payer: None,
|
||||
};
|
||||
let sig_response = process_command(&config_validator).unwrap();
|
||||
let sig_response = process_command(&config_offline).unwrap();
|
||||
let (blockhash, signers) = parse_sign_only_reply_string(&sig_response);
|
||||
|
||||
// Deactivate stake online
|
||||
config_payer.command = CliCommand::DeactivateStake {
|
||||
stake_account_pubkey: config_stake.keypair.pubkey(),
|
||||
stake_authority: None,
|
||||
stake_authority: Some(config_offline.keypair.pubkey().into()),
|
||||
sign_only: false,
|
||||
signers: Some(signers),
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
fee_payer: Some(config_offline.keypair.pubkey().into()),
|
||||
};
|
||||
process_command(&config_payer).unwrap();
|
||||
|
||||
@@ -372,7 +402,8 @@ fn test_offline_stake_delegation_and_deactivation() {
|
||||
fn test_nonced_stake_delegation_and_deactivation() {
|
||||
solana_logger::setup();
|
||||
|
||||
let (server, leader_data, alice, ledger_path) = new_validator_for_tests();
|
||||
let (server, leader_data, alice, ledger_path, vote_pubkey) =
|
||||
new_validator_for_tests_with_vote_pubkey();
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
@@ -389,20 +420,6 @@ fn test_nonced_stake_delegation_and_deactivation() {
|
||||
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &config.keypair.pubkey(), 100_000)
|
||||
.unwrap();
|
||||
|
||||
// Create vote account
|
||||
let vote_keypair = Keypair::new();
|
||||
let (vote_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&vote_keypair, tmp_file.as_file_mut()).unwrap();
|
||||
config.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: read_keypair_file(&vote_keypair_file).unwrap().into(),
|
||||
seed: None,
|
||||
node_pubkey: config.keypair.pubkey(),
|
||||
authorized_voter: None,
|
||||
authorized_withdrawer: None,
|
||||
commission: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
// Create stake account
|
||||
let stake_keypair = Keypair::new();
|
||||
let (stake_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
@@ -440,14 +457,15 @@ fn test_nonced_stake_delegation_and_deactivation() {
|
||||
// Delegate stake
|
||||
config.command = CliCommand::DelegateStake {
|
||||
stake_account_pubkey: stake_keypair.pubkey(),
|
||||
vote_account_pubkey: vote_keypair.pubkey(),
|
||||
vote_account_pubkey: vote_pubkey,
|
||||
stake_authority: None,
|
||||
force: true,
|
||||
force: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash_query: BlockhashQuery::None(nonce_hash, FeeCalculator::default()),
|
||||
nonce_account: Some(nonce_account.pubkey()),
|
||||
nonce_authority: None,
|
||||
fee_payer: None,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
@@ -469,6 +487,7 @@ fn test_nonced_stake_delegation_and_deactivation() {
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(nonce_hash),
|
||||
nonce_account: Some(nonce_account.pubkey()),
|
||||
nonce_authority: Some(config_keypair.into()),
|
||||
fee_payer: None,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
@@ -493,6 +512,20 @@ fn test_stake_authorize() {
|
||||
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &config.keypair.pubkey(), 100_000)
|
||||
.unwrap();
|
||||
|
||||
let mut config_offline = CliConfig::default();
|
||||
config_offline.json_rpc_url = String::default();
|
||||
config_offline.command = CliCommand::ClusterVersion;
|
||||
// Verfiy that we cannot reach the cluster
|
||||
process_command(&config_offline).unwrap_err();
|
||||
|
||||
request_and_confirm_airdrop(
|
||||
&rpc_client,
|
||||
&faucet_addr,
|
||||
&config_offline.keypair.pubkey(),
|
||||
100_000,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Create stake account, identity is authority
|
||||
let stake_keypair = Keypair::new();
|
||||
let stake_account_pubkey = stake_keypair.pubkey();
|
||||
@@ -523,6 +556,7 @@ fn test_stake_authorize() {
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
fee_payer: None,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap();
|
||||
@@ -534,10 +568,9 @@ fn test_stake_authorize() {
|
||||
assert_eq!(current_authority, online_authority_pubkey);
|
||||
|
||||
// Assign new offline stake authority
|
||||
let offline_authority = Keypair::new();
|
||||
let offline_authority_pubkey = offline_authority.pubkey();
|
||||
let offline_authority_pubkey = config_offline.keypair.pubkey();
|
||||
let (offline_authority_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&offline_authority, tmp_file.as_file_mut()).unwrap();
|
||||
write_keypair(&config_offline.keypair, tmp_file.as_file_mut()).unwrap();
|
||||
config.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorized_pubkey: offline_authority_pubkey,
|
||||
@@ -548,6 +581,7 @@ fn test_stake_authorize() {
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
fee_payer: None,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap();
|
||||
@@ -564,7 +598,7 @@ fn test_stake_authorize() {
|
||||
let (nonced_authority_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&nonced_authority, tmp_file.as_file_mut()).unwrap();
|
||||
let (blockhash, _) = rpc_client.get_recent_blockhash().unwrap();
|
||||
config.command = CliCommand::StakeAuthorize {
|
||||
config_offline.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorized_pubkey: nonced_authority_pubkey,
|
||||
stake_authorize: StakeAuthorize::Staker,
|
||||
@@ -574,8 +608,9 @@ fn test_stake_authorize() {
|
||||
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
fee_payer: None,
|
||||
};
|
||||
let sign_reply = process_command(&config).unwrap();
|
||||
let sign_reply = process_command(&config_offline).unwrap();
|
||||
let (blockhash, signers) = parse_sign_only_reply_string(&sign_reply);
|
||||
config.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
@@ -587,6 +622,7 @@ fn test_stake_authorize() {
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
fee_payer: Some(offline_authority_pubkey.into()),
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap();
|
||||
@@ -607,7 +643,7 @@ fn test_stake_authorize() {
|
||||
config.command = CliCommand::CreateNonceAccount {
|
||||
nonce_account: read_keypair_file(&nonce_keypair_file).unwrap().into(),
|
||||
seed: None,
|
||||
nonce_authority: Some(config.keypair.pubkey()),
|
||||
nonce_authority: Some(config_offline.keypair.pubkey()),
|
||||
lamports: minimum_nonce_balance,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
@@ -625,7 +661,7 @@ fn test_stake_authorize() {
|
||||
let online_authority_pubkey = online_authority.pubkey();
|
||||
let (_online_authority_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&online_authority, tmp_file.as_file_mut()).unwrap();
|
||||
config.command = CliCommand::StakeAuthorize {
|
||||
config_offline.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorized_pubkey: online_authority_pubkey,
|
||||
stake_authorize: StakeAuthorize::Staker,
|
||||
@@ -635,8 +671,9 @@ fn test_stake_authorize() {
|
||||
blockhash_query: BlockhashQuery::None(nonce_hash, FeeCalculator::default()),
|
||||
nonce_account: Some(nonce_account.pubkey()),
|
||||
nonce_authority: None,
|
||||
fee_payer: None,
|
||||
};
|
||||
let sign_reply = process_command(&config).unwrap();
|
||||
let sign_reply = process_command(&config_offline).unwrap();
|
||||
let (blockhash, signers) = parse_sign_only_reply_string(&sign_reply);
|
||||
assert_eq!(blockhash, nonce_hash);
|
||||
config.command = CliCommand::StakeAuthorize {
|
||||
@@ -648,7 +685,8 @@ fn test_stake_authorize() {
|
||||
signers: Some(signers),
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
nonce_account: Some(nonce_account.pubkey()),
|
||||
nonce_authority: None,
|
||||
nonce_authority: Some(offline_authority_pubkey.into()),
|
||||
fee_payer: Some(offline_authority_pubkey.into()),
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap();
|
||||
@@ -665,6 +703,272 @@ fn test_stake_authorize() {
|
||||
_ => panic!("Nonce is not initialized"),
|
||||
};
|
||||
assert_ne!(nonce_hash, new_nonce_hash);
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stake_authorize_with_fee_payer() {
|
||||
solana_logger::setup();
|
||||
const SIG_FEE: u64 = 42;
|
||||
|
||||
let (server, leader_data, alice, ledger_path, _voter) =
|
||||
new_validator_for_tests_ex(SIG_FEE, 42_000);
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
|
||||
let mut config = CliConfig::default();
|
||||
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
|
||||
let mut config_payer = CliConfig::default();
|
||||
config_payer.json_rpc_url =
|
||||
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
let payer_pubkey = config_payer.keypair.pubkey();
|
||||
let (payer_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&config_payer.keypair, tmp_file.as_file_mut()).unwrap();
|
||||
|
||||
let mut config_offline = CliConfig::default();
|
||||
let offline_pubkey = config_offline.keypair.pubkey();
|
||||
let (_offline_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&config_offline.keypair, tmp_file.as_file_mut()).unwrap();
|
||||
// Verify we're offline
|
||||
config_offline.command = CliCommand::ClusterVersion;
|
||||
process_command(&config_offline).unwrap_err();
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &config.keypair.pubkey(), 100_000)
|
||||
.unwrap();
|
||||
check_balance(100_000, &rpc_client, &config.keypair.pubkey());
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &payer_pubkey, 100_000).unwrap();
|
||||
check_balance(100_000, &rpc_client, &payer_pubkey);
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 100_000).unwrap();
|
||||
check_balance(100_000, &rpc_client, &offline_pubkey);
|
||||
|
||||
// Create stake account, identity is authority
|
||||
let stake_keypair = Keypair::new();
|
||||
let stake_account_pubkey = stake_keypair.pubkey();
|
||||
let (stake_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&stake_keypair, tmp_file.as_file_mut()).unwrap();
|
||||
config.command = CliCommand::CreateStakeAccount {
|
||||
stake_account: read_keypair_file(&stake_keypair_file).unwrap().into(),
|
||||
seed: None,
|
||||
staker: None,
|
||||
withdrawer: None,
|
||||
lockup: Lockup::default(),
|
||||
lamports: 50_000,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
// `config` balance should be 50,000 - 1 stake account sig - 1 fee sig
|
||||
check_balance(
|
||||
50_000 - SIG_FEE - SIG_FEE,
|
||||
&rpc_client,
|
||||
&config.keypair.pubkey(),
|
||||
);
|
||||
|
||||
// Assign authority with separate fee payer
|
||||
config.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorized_pubkey: offline_pubkey,
|
||||
stake_authorize: StakeAuthorize::Staker,
|
||||
authority: None,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash_query: BlockhashQuery::All,
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
fee_payer: Some(read_keypair_file(&payer_keypair_file).unwrap().into()),
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
// `config` balance has not changed, despite submitting the TX
|
||||
check_balance(
|
||||
50_000 - SIG_FEE - SIG_FEE,
|
||||
&rpc_client,
|
||||
&config.keypair.pubkey(),
|
||||
);
|
||||
// `config_payer` however has paid `config`'s authority sig
|
||||
// and `config_payer`'s fee sig
|
||||
check_balance(
|
||||
100_000 - SIG_FEE - SIG_FEE,
|
||||
&rpc_client,
|
||||
&config_payer.keypair.pubkey(),
|
||||
);
|
||||
|
||||
// Assign authority with offline fee payer
|
||||
let (blockhash, _) = rpc_client.get_recent_blockhash().unwrap();
|
||||
config_offline.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorized_pubkey: payer_pubkey,
|
||||
stake_authorize: StakeAuthorize::Staker,
|
||||
authority: None,
|
||||
sign_only: true,
|
||||
signers: None,
|
||||
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
fee_payer: None,
|
||||
};
|
||||
let sign_reply = process_command(&config_offline).unwrap();
|
||||
let (blockhash, signers) = parse_sign_only_reply_string(&sign_reply);
|
||||
config.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorized_pubkey: payer_pubkey,
|
||||
stake_authorize: StakeAuthorize::Staker,
|
||||
authority: Some(offline_pubkey.into()),
|
||||
sign_only: false,
|
||||
signers: Some(signers),
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
fee_payer: Some(offline_pubkey.into()),
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
// `config`'s balance again has not changed
|
||||
check_balance(
|
||||
50_000 - SIG_FEE - SIG_FEE,
|
||||
&rpc_client,
|
||||
&config.keypair.pubkey(),
|
||||
);
|
||||
// `config_offline` however has paid 1 sig due to being both authority
|
||||
// and fee payer
|
||||
check_balance(
|
||||
100_000 - SIG_FEE,
|
||||
&rpc_client,
|
||||
&config_offline.keypair.pubkey(),
|
||||
);
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stake_split() {
|
||||
solana_logger::setup();
|
||||
|
||||
let (server, leader_data, alice, ledger_path, _voter) = new_validator_for_tests_ex(1, 42_000);
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
|
||||
let mut config = CliConfig::default();
|
||||
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
|
||||
let mut config_offline = CliConfig::default();
|
||||
let offline_pubkey = config_offline.keypair.pubkey();
|
||||
let (_offline_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&config_offline.keypair, tmp_file.as_file_mut()).unwrap();
|
||||
// Verify we're offline
|
||||
config_offline.command = CliCommand::ClusterVersion;
|
||||
process_command(&config_offline).unwrap_err();
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &config.keypair.pubkey(), 500_000)
|
||||
.unwrap();
|
||||
check_balance(500_000, &rpc_client, &config.keypair.pubkey());
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 100_000).unwrap();
|
||||
check_balance(100_000, &rpc_client, &offline_pubkey);
|
||||
|
||||
// Create stake account, identity is authority
|
||||
let minimum_stake_balance = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(std::mem::size_of::<StakeState>())
|
||||
.unwrap();
|
||||
println!("stake min: {}", minimum_stake_balance);
|
||||
let stake_keypair = keypair_from_seed(&[0u8; 32]).unwrap();
|
||||
let stake_account_pubkey = stake_keypair.pubkey();
|
||||
let (stake_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&stake_keypair, tmp_file.as_file_mut()).unwrap();
|
||||
config.command = CliCommand::CreateStakeAccount {
|
||||
stake_account: read_keypair_file(&stake_keypair_file).unwrap().into(),
|
||||
seed: None,
|
||||
staker: Some(offline_pubkey),
|
||||
withdrawer: Some(offline_pubkey),
|
||||
lockup: Lockup::default(),
|
||||
lamports: 10 * minimum_stake_balance,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_balance(
|
||||
10 * minimum_stake_balance,
|
||||
&rpc_client,
|
||||
&stake_account_pubkey,
|
||||
);
|
||||
|
||||
// Create nonce account
|
||||
let minimum_nonce_balance = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(NonceState::size())
|
||||
.unwrap();
|
||||
println!("nonce min: {}", minimum_nonce_balance);
|
||||
let nonce_account = keypair_from_seed(&[1u8; 32]).unwrap();
|
||||
let nonce_account_pubkey = nonce_account.pubkey();
|
||||
let (nonce_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&nonce_account, tmp_file.as_file_mut()).unwrap();
|
||||
config.command = CliCommand::CreateNonceAccount {
|
||||
nonce_account: read_keypair_file(&nonce_keypair_file).unwrap().into(),
|
||||
seed: None,
|
||||
nonce_authority: Some(offline_pubkey),
|
||||
lamports: minimum_nonce_balance,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_balance(minimum_nonce_balance, &rpc_client, &nonce_account_pubkey);
|
||||
|
||||
// Fetch nonce hash
|
||||
let account = rpc_client.get_account(&nonce_account_pubkey).unwrap();
|
||||
let nonce_state: NonceState = account.state().unwrap();
|
||||
let nonce_hash = match nonce_state {
|
||||
NonceState::Initialized(_meta, hash) => hash,
|
||||
_ => panic!("Nonce is not initialized"),
|
||||
};
|
||||
|
||||
// Nonced offline split
|
||||
let split_account = keypair_from_seed(&[2u8; 32]).unwrap();
|
||||
let (split_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&split_account, tmp_file.as_file_mut()).unwrap();
|
||||
check_balance(0, &rpc_client, &split_account.pubkey());
|
||||
config_offline.command = CliCommand::SplitStake {
|
||||
stake_account_pubkey: stake_account_pubkey,
|
||||
stake_authority: None,
|
||||
sign_only: true,
|
||||
signers: None,
|
||||
blockhash_query: BlockhashQuery::None(nonce_hash, FeeCalculator::default()),
|
||||
nonce_account: Some(nonce_account_pubkey.into()),
|
||||
nonce_authority: None,
|
||||
split_stake_account: read_keypair_file(&split_keypair_file).unwrap().into(),
|
||||
seed: None,
|
||||
lamports: 2 * minimum_stake_balance,
|
||||
fee_payer: None,
|
||||
};
|
||||
let sig_response = process_command(&config_offline).unwrap();
|
||||
let (blockhash, signers) = parse_sign_only_reply_string(&sig_response);
|
||||
config.command = CliCommand::SplitStake {
|
||||
stake_account_pubkey: stake_account_pubkey,
|
||||
stake_authority: Some(offline_pubkey.into()),
|
||||
sign_only: false,
|
||||
signers: Some(signers),
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
nonce_account: Some(nonce_account_pubkey.into()),
|
||||
nonce_authority: Some(offline_pubkey.into()),
|
||||
split_stake_account: read_keypair_file(&split_keypair_file).unwrap().into(),
|
||||
seed: None,
|
||||
lamports: 2 * minimum_stake_balance,
|
||||
fee_payer: Some(offline_pubkey.into()),
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_balance(
|
||||
8 * minimum_stake_balance,
|
||||
&rpc_client,
|
||||
&stake_account_pubkey,
|
||||
);
|
||||
check_balance(
|
||||
2 * minimum_stake_balance,
|
||||
&rpc_client,
|
||||
&split_account.pubkey(),
|
||||
);
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
215
cli/tests/transfer.rs
Normal file
215
cli/tests/transfer.rs
Normal file
@@ -0,0 +1,215 @@
|
||||
use solana_cli::{
|
||||
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
|
||||
offline::{parse_sign_only_reply_string, BlockhashQuery},
|
||||
};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_faucet::faucet::run_local_faucet;
|
||||
use solana_sdk::{
|
||||
account_utils::StateMut,
|
||||
fee_calculator::FeeCalculator,
|
||||
nonce_state::NonceState,
|
||||
pubkey::Pubkey,
|
||||
signature::{keypair_from_seed, read_keypair_file, write_keypair, KeypairUtil},
|
||||
};
|
||||
use std::fs::remove_dir_all;
|
||||
use std::sync::mpsc::channel;
|
||||
|
||||
#[cfg(test)]
|
||||
use solana_core::validator::new_validator_for_tests_ex;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use tempfile::NamedTempFile;
|
||||
|
||||
fn make_tmp_file() -> (String, NamedTempFile) {
|
||||
let tmp_file = NamedTempFile::new().unwrap();
|
||||
(String::from(tmp_file.path().to_str().unwrap()), tmp_file)
|
||||
}
|
||||
|
||||
fn check_balance(expected_balance: u64, client: &RpcClient, pubkey: &Pubkey) {
|
||||
(0..5).for_each(|tries| {
|
||||
let balance = client.retry_get_balance(pubkey, 1).unwrap().unwrap();
|
||||
if balance == expected_balance {
|
||||
return;
|
||||
}
|
||||
if tries == 4 {
|
||||
assert_eq!(balance, expected_balance);
|
||||
}
|
||||
sleep(Duration::from_millis(500));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer() {
|
||||
let (server, leader_data, mint_keypair, ledger_path, _) = new_validator_for_tests_ex(1, 42_000);
|
||||
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(mint_keypair, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
|
||||
let mut config = CliConfig::default();
|
||||
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
|
||||
let sender_pubkey = config.keypair.pubkey();
|
||||
let recipient_pubkey = Pubkey::new(&[1u8; 32]);
|
||||
println!("sender: {:?}", sender_pubkey);
|
||||
println!("recipient: {:?}", recipient_pubkey);
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &sender_pubkey, 50_000).unwrap();
|
||||
check_balance(50_000, &rpc_client, &sender_pubkey);
|
||||
check_balance(0, &rpc_client, &recipient_pubkey);
|
||||
|
||||
// Plain ole transfer
|
||||
config.command = CliCommand::Transfer {
|
||||
lamports: 10,
|
||||
to: recipient_pubkey,
|
||||
from: None,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash_query: BlockhashQuery::All,
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
fee_payer: None,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_balance(49_989, &rpc_client, &sender_pubkey);
|
||||
check_balance(10, &rpc_client, &recipient_pubkey);
|
||||
|
||||
let mut offline = CliConfig::default();
|
||||
offline.json_rpc_url = String::default();
|
||||
// Verify we cannot contact the cluster
|
||||
offline.command = CliCommand::ClusterVersion;
|
||||
process_command(&offline).unwrap_err();
|
||||
|
||||
let offline_pubkey = offline.keypair.pubkey();
|
||||
println!("offline: {:?}", offline_pubkey);
|
||||
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 50).unwrap();
|
||||
check_balance(50, &rpc_client, &offline_pubkey);
|
||||
|
||||
// Offline transfer
|
||||
let (blockhash, _) = rpc_client.get_recent_blockhash().unwrap();
|
||||
offline.command = CliCommand::Transfer {
|
||||
lamports: 10,
|
||||
to: recipient_pubkey,
|
||||
from: None,
|
||||
sign_only: true,
|
||||
signers: None,
|
||||
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
fee_payer: None,
|
||||
};
|
||||
let sign_only_reply = process_command(&offline).unwrap();
|
||||
let (blockhash, signers) = parse_sign_only_reply_string(&sign_only_reply);
|
||||
config.command = CliCommand::Transfer {
|
||||
lamports: 10,
|
||||
to: recipient_pubkey,
|
||||
from: Some(offline_pubkey.into()),
|
||||
sign_only: false,
|
||||
signers: Some(signers),
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: None,
|
||||
fee_payer: Some(offline_pubkey.into()),
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_balance(39, &rpc_client, &offline_pubkey);
|
||||
check_balance(20, &rpc_client, &recipient_pubkey);
|
||||
|
||||
// Create nonce account
|
||||
let nonce_account = keypair_from_seed(&[3u8; 32]).unwrap();
|
||||
let (nonce_account_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&nonce_account, tmp_file.as_file_mut()).unwrap();
|
||||
let minimum_nonce_balance = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(NonceState::size())
|
||||
.unwrap();
|
||||
config.command = CliCommand::CreateNonceAccount {
|
||||
nonce_account: read_keypair_file(&nonce_account_file).unwrap().into(),
|
||||
seed: None,
|
||||
nonce_authority: None,
|
||||
lamports: minimum_nonce_balance,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_balance(49_987 - minimum_nonce_balance, &rpc_client, &sender_pubkey);
|
||||
|
||||
// Fetch nonce hash
|
||||
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
|
||||
let nonce_state: NonceState = account.state().unwrap();
|
||||
let nonce_hash = match nonce_state {
|
||||
NonceState::Initialized(_meta, hash) => hash,
|
||||
_ => panic!("Nonce is not initialized"),
|
||||
};
|
||||
|
||||
// Nonced transfer
|
||||
config.command = CliCommand::Transfer {
|
||||
lamports: 10,
|
||||
to: recipient_pubkey,
|
||||
from: None,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(nonce_hash),
|
||||
nonce_account: Some(nonce_account.pubkey()),
|
||||
nonce_authority: None,
|
||||
fee_payer: None,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_balance(49_976 - minimum_nonce_balance, &rpc_client, &sender_pubkey);
|
||||
check_balance(30, &rpc_client, &recipient_pubkey);
|
||||
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
|
||||
let nonce_state: NonceState = account.state().unwrap();
|
||||
let new_nonce_hash = match nonce_state {
|
||||
NonceState::Initialized(_meta, hash) => hash,
|
||||
_ => panic!("Nonce is not initialized"),
|
||||
};
|
||||
assert_ne!(nonce_hash, new_nonce_hash);
|
||||
|
||||
// Assign nonce authority to offline
|
||||
config.command = CliCommand::AuthorizeNonceAccount {
|
||||
nonce_account: nonce_account.pubkey(),
|
||||
nonce_authority: None,
|
||||
new_authority: offline_pubkey,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_balance(49_975 - minimum_nonce_balance, &rpc_client, &sender_pubkey);
|
||||
|
||||
// Fetch nonce hash
|
||||
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
|
||||
let nonce_state: NonceState = account.state().unwrap();
|
||||
let nonce_hash = match nonce_state {
|
||||
NonceState::Initialized(_meta, hash) => hash,
|
||||
_ => panic!("Nonce is not initialized"),
|
||||
};
|
||||
|
||||
// Offline, nonced transfer
|
||||
offline.command = CliCommand::Transfer {
|
||||
lamports: 10,
|
||||
to: recipient_pubkey,
|
||||
from: None,
|
||||
sign_only: true,
|
||||
signers: None,
|
||||
blockhash_query: BlockhashQuery::None(nonce_hash, FeeCalculator::default()),
|
||||
nonce_account: Some(nonce_account.pubkey()),
|
||||
nonce_authority: None,
|
||||
fee_payer: None,
|
||||
};
|
||||
let sign_only_reply = process_command(&offline).unwrap();
|
||||
let (blockhash, signers) = parse_sign_only_reply_string(&sign_only_reply);
|
||||
config.command = CliCommand::Transfer {
|
||||
lamports: 10,
|
||||
to: recipient_pubkey,
|
||||
from: Some(offline_pubkey.into()),
|
||||
sign_only: false,
|
||||
signers: Some(signers),
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
|
||||
nonce_account: Some(nonce_account.pubkey()),
|
||||
nonce_authority: Some(offline_pubkey.into()),
|
||||
fee_payer: Some(offline_pubkey.into()),
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_balance(28, &rpc_client, &offline_pubkey);
|
||||
check_balance(40, &rpc_client, &recipient_pubkey);
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -19,11 +19,11 @@ reqwest = { version = "0.10.1", default-features = false, features = ["blocking"
|
||||
serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.44"
|
||||
solana-net-utils = { path = "../net-utils", version = "0.23.1" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.1" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.23.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.3" }
|
||||
|
||||
[dev-dependencies]
|
||||
assert_matches = "1.3.0"
|
||||
jsonrpc-core = "14.0.5"
|
||||
jsonrpc-http-server = "14.0.5"
|
||||
solana-logger = { path = "../logger", version = "0.23.1" }
|
||||
solana-logger = { path = "../logger", version = "0.23.3" }
|
||||
|
@@ -60,13 +60,10 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
|
||||
Value::Null
|
||||
}
|
||||
}
|
||||
RpcRequest::GetBalance => {
|
||||
let n = if self.url == "airdrop" { 0 } else { 50 };
|
||||
serde_json::to_value(Response {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: Value::Number(Number::from(n)),
|
||||
})?
|
||||
}
|
||||
RpcRequest::GetBalance => serde_json::to_value(Response {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: Value::Number(Number::from(50)),
|
||||
})?,
|
||||
RpcRequest::GetRecentBlockhash => serde_json::to_value(Response {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: (
|
||||
|
@@ -32,6 +32,14 @@ pub struct RpcBlockCommitment<T> {
|
||||
pub total_stake: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct RpcReward {
|
||||
pub pubkey: String,
|
||||
pub lamports: i64,
|
||||
}
|
||||
|
||||
pub type RpcRewards = Vec<RpcReward>;
|
||||
|
||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcConfirmedBlock {
|
||||
@@ -39,6 +47,7 @@ pub struct RpcConfirmedBlock {
|
||||
pub blockhash: String,
|
||||
pub parent_slot: Slot,
|
||||
pub transactions: Vec<RpcTransactionWithStatusMeta>,
|
||||
pub rewards: RpcRewards,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||
|
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-core"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "../README.md"
|
||||
@@ -40,26 +40,26 @@ rayon = "1.2.0"
|
||||
serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.44"
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.23.1" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.1" }
|
||||
solana-client = { path = "../client", version = "0.23.1" }
|
||||
solana-faucet = { path = "../faucet", version = "0.23.1" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.23.3" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.3" }
|
||||
solana-client = { path = "../client", version = "0.23.3" }
|
||||
solana-faucet = { path = "../faucet", version = "0.23.3" }
|
||||
ed25519-dalek = "=1.0.0-pre.1"
|
||||
solana-ledger = { path = "../ledger", version = "0.23.1" }
|
||||
solana-logger = { path = "../logger", version = "0.23.1" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "0.23.1" }
|
||||
solana-metrics = { path = "../metrics", version = "0.23.1" }
|
||||
solana-measure = { path = "../measure", version = "0.23.1" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.23.1" }
|
||||
solana-chacha-cuda = { path = "../chacha-cuda", version = "0.23.1" }
|
||||
solana-perf = { path = "../perf", version = "0.23.1" }
|
||||
solana-runtime = { path = "../runtime", version = "0.23.1" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.1" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.23.1" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.23.1" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.23.1" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.23.1" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "0.23.1" }
|
||||
solana-ledger = { path = "../ledger", version = "0.23.3" }
|
||||
solana-logger = { path = "../logger", version = "0.23.3" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "0.23.3" }
|
||||
solana-metrics = { path = "../metrics", version = "0.23.3" }
|
||||
solana-measure = { path = "../measure", version = "0.23.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.23.3" }
|
||||
solana-chacha-cuda = { path = "../chacha-cuda", version = "0.23.3" }
|
||||
solana-perf = { path = "../perf", version = "0.23.3" }
|
||||
solana-runtime = { path = "../runtime", version = "0.23.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.3" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.23.3" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.23.3" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.23.3" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.23.3" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "0.23.3" }
|
||||
symlink = "0.1.0"
|
||||
sys-info = "0.5.8"
|
||||
tempfile = "3.1.0"
|
||||
@@ -69,7 +69,7 @@ tokio-codec = "0.1"
|
||||
tokio-fs = "0.1"
|
||||
tokio-io = "0.1"
|
||||
untrusted = "0.7.0"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.23.1" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.23.3" }
|
||||
reed-solomon-erasure = { package = "solana-reed-solomon-erasure", version = "4.0.1-3", features = ["simd-accel"] }
|
||||
|
||||
[dev-dependencies]
|
||||
|
@@ -21,7 +21,6 @@ use crate::{
|
||||
crds_gossip_pull::{CrdsFilter, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS},
|
||||
crds_value::{self, CrdsData, CrdsValue, CrdsValueLabel, EpochSlots, Vote},
|
||||
packet::{Packet, PACKET_DATA_SIZE},
|
||||
repair_service::RepairType,
|
||||
result::{Error, Result},
|
||||
sendmmsg::{multicast, send_mmsg},
|
||||
weighted_shuffle::{weighted_best, weighted_shuffle},
|
||||
@@ -29,8 +28,7 @@ use crate::{
|
||||
use bincode::{serialize, serialized_size};
|
||||
use core::cmp;
|
||||
use itertools::Itertools;
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore, staking_utils};
|
||||
use solana_ledger::{bank_forks::BankForks, staking_utils};
|
||||
use solana_measure::thread_mem_usage;
|
||||
use solana_metrics::{datapoint_debug, inc_new_counter_debug, inc_new_counter_error};
|
||||
use solana_net_utils::{
|
||||
@@ -63,15 +61,12 @@ pub const VALIDATOR_PORT_RANGE: PortRange = (8000, 10_000);
|
||||
pub const DATA_PLANE_FANOUT: usize = 200;
|
||||
/// milliseconds we sleep for between gossip requests
|
||||
pub const GOSSIP_SLEEP_MILLIS: u64 = 100;
|
||||
|
||||
/// the number of slots to respond with when responding to `Orphan` requests
|
||||
pub const MAX_ORPHAN_REPAIR_RESPONSES: usize = 10;
|
||||
/// The maximum size of a bloom filter
|
||||
pub const MAX_BLOOM_SIZE: usize = 1028;
|
||||
pub const MAX_BLOOM_SIZE: usize = 1018;
|
||||
/// The maximum size of a protocol payload
|
||||
const MAX_PROTOCOL_PAYLOAD_SIZE: u64 = PACKET_DATA_SIZE as u64 - MAX_PROTOCOL_HEADER_SIZE;
|
||||
/// The largest protocol header size
|
||||
const MAX_PROTOCOL_HEADER_SIZE: u64 = 204;
|
||||
const MAX_PROTOCOL_HEADER_SIZE: u64 = 214;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum ClusterInfoError {
|
||||
@@ -174,12 +169,6 @@ enum Protocol {
|
||||
PullResponse(Pubkey, Vec<CrdsValue>),
|
||||
PushMessage(Pubkey, Vec<CrdsValue>),
|
||||
PruneMessage(Pubkey, PruneData),
|
||||
|
||||
/// Window protocol messages
|
||||
/// TODO: move this message to a different module
|
||||
RequestWindowIndex(ContactInfo, u64, u64),
|
||||
RequestHighestWindowIndex(ContactInfo, u64, u64),
|
||||
RequestOrphan(ContactInfo, u64),
|
||||
}
|
||||
|
||||
impl ClusterInfo {
|
||||
@@ -525,7 +514,7 @@ impl ClusterInfo {
|
||||
}
|
||||
|
||||
/// all tvu peers with valid gossip addrs that likely have the slot being requested
|
||||
fn repair_peers(&self, slot: Slot) -> Vec<ContactInfo> {
|
||||
pub fn repair_peers(&self, slot: Slot) -> Vec<ContactInfo> {
|
||||
let me = self.my_data();
|
||||
ClusterInfo::tvu_peers(self)
|
||||
.into_iter()
|
||||
@@ -866,61 +855,6 @@ impl ClusterInfo {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn window_index_request_bytes(&self, slot: Slot, shred_index: u64) -> Result<Vec<u8>> {
|
||||
let req = Protocol::RequestWindowIndex(self.my_data(), slot, shred_index);
|
||||
let out = serialize(&req)?;
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
fn window_highest_index_request_bytes(&self, slot: Slot, shred_index: u64) -> Result<Vec<u8>> {
|
||||
let req = Protocol::RequestHighestWindowIndex(self.my_data(), slot, shred_index);
|
||||
let out = serialize(&req)?;
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
fn orphan_bytes(&self, slot: Slot) -> Result<Vec<u8>> {
|
||||
let req = Protocol::RequestOrphan(self.my_data(), slot);
|
||||
let out = serialize(&req)?;
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
pub fn repair_request(&self, repair_request: &RepairType) -> Result<(SocketAddr, Vec<u8>)> {
|
||||
// find a peer that appears to be accepting replication and has the desired slot, as indicated
|
||||
// by a valid tvu port location
|
||||
let valid: Vec<_> = self.repair_peers(repair_request.slot());
|
||||
if valid.is_empty() {
|
||||
return Err(ClusterInfoError::NoPeers.into());
|
||||
}
|
||||
let n = thread_rng().gen::<usize>() % valid.len();
|
||||
let addr = valid[n].gossip; // send the request to the peer's gossip port
|
||||
let out = self.map_repair_request(repair_request)?;
|
||||
|
||||
Ok((addr, out))
|
||||
}
|
||||
pub fn map_repair_request(&self, repair_request: &RepairType) -> Result<Vec<u8>> {
|
||||
match repair_request {
|
||||
RepairType::Shred(slot, shred_index) => {
|
||||
datapoint_debug!(
|
||||
"cluster_info-repair",
|
||||
("repair-slot", *slot, i64),
|
||||
("repair-ix", *shred_index, i64)
|
||||
);
|
||||
Ok(self.window_index_request_bytes(*slot, *shred_index)?)
|
||||
}
|
||||
RepairType::HighestShred(slot, shred_index) => {
|
||||
datapoint_debug!(
|
||||
"cluster_info-repair_highest",
|
||||
("repair-highest-slot", *slot, i64),
|
||||
("repair-highest-ix", *shred_index, i64)
|
||||
);
|
||||
Ok(self.window_highest_index_request_bytes(*slot, *shred_index)?)
|
||||
}
|
||||
RepairType::Orphan(slot) => {
|
||||
datapoint_debug!("cluster_info-repair_orphan", ("repair-orphan", *slot, i64));
|
||||
Ok(self.orphan_bytes(*slot)?)
|
||||
}
|
||||
}
|
||||
}
|
||||
// If the network entrypoint hasn't been discovered yet, add it to the crds table
|
||||
fn add_entrypoint(&mut self, pulls: &mut Vec<(Pubkey, CrdsFilter, SocketAddr, CrdsValue)>) {
|
||||
let pull_from_entrypoint = if let Some(entrypoint) = &mut self.entrypoint {
|
||||
@@ -1173,124 +1107,18 @@ impl ClusterInfo {
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn get_data_shred_as_packet(
|
||||
blockstore: &Arc<Blockstore>,
|
||||
slot: Slot,
|
||||
shred_index: u64,
|
||||
dest: &SocketAddr,
|
||||
) -> Result<Option<Packet>> {
|
||||
let data = blockstore.get_data_shred(slot, shred_index)?;
|
||||
Ok(data.map(|data| {
|
||||
let mut packet = Packet::default();
|
||||
packet.meta.size = data.len();
|
||||
packet.meta.set_addr(dest);
|
||||
packet.data.copy_from_slice(&data);
|
||||
packet
|
||||
}))
|
||||
}
|
||||
|
||||
fn run_window_request(
|
||||
recycler: &PacketsRecycler,
|
||||
from: &ContactInfo,
|
||||
from_addr: &SocketAddr,
|
||||
blockstore: Option<&Arc<Blockstore>>,
|
||||
me: &ContactInfo,
|
||||
slot: Slot,
|
||||
shred_index: u64,
|
||||
) -> Option<Packets> {
|
||||
if let Some(blockstore) = blockstore {
|
||||
// Try to find the requested index in one of the slots
|
||||
let packet = Self::get_data_shred_as_packet(blockstore, slot, shred_index, from_addr);
|
||||
|
||||
if let Ok(Some(packet)) = packet {
|
||||
inc_new_counter_debug!("cluster_info-window-request-ledger", 1);
|
||||
return Some(Packets::new_with_recycler_data(
|
||||
recycler,
|
||||
"run_window_request",
|
||||
vec![packet],
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
inc_new_counter_debug!("cluster_info-window-request-fail", 1);
|
||||
trace!(
|
||||
"{}: failed RequestWindowIndex {} {} {}",
|
||||
me.id,
|
||||
from.id,
|
||||
slot,
|
||||
shred_index,
|
||||
);
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
fn run_highest_window_request(
|
||||
recycler: &PacketsRecycler,
|
||||
from_addr: &SocketAddr,
|
||||
blockstore: Option<&Arc<Blockstore>>,
|
||||
slot: Slot,
|
||||
highest_index: u64,
|
||||
) -> Option<Packets> {
|
||||
let blockstore = blockstore?;
|
||||
// Try to find the requested index in one of the slots
|
||||
let meta = blockstore.meta(slot).ok()??;
|
||||
if meta.received > highest_index {
|
||||
// meta.received must be at least 1 by this point
|
||||
let packet =
|
||||
Self::get_data_shred_as_packet(blockstore, slot, meta.received - 1, from_addr)
|
||||
.ok()??;
|
||||
return Some(Packets::new_with_recycler_data(
|
||||
recycler,
|
||||
"run_highest_window_request",
|
||||
vec![packet],
|
||||
));
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn run_orphan(
|
||||
recycler: &PacketsRecycler,
|
||||
from_addr: &SocketAddr,
|
||||
blockstore: Option<&Arc<Blockstore>>,
|
||||
mut slot: Slot,
|
||||
max_responses: usize,
|
||||
) -> Option<Packets> {
|
||||
let mut res = Packets::new_with_recycler(recycler.clone(), 64, "run_orphan");
|
||||
if let Some(blockstore) = blockstore {
|
||||
// Try to find the next "n" parent slots of the input slot
|
||||
while let Ok(Some(meta)) = blockstore.meta(slot) {
|
||||
if meta.received == 0 {
|
||||
break;
|
||||
}
|
||||
let packet =
|
||||
Self::get_data_shred_as_packet(blockstore, slot, meta.received - 1, from_addr);
|
||||
if let Ok(Some(packet)) = packet {
|
||||
res.packets.push(packet);
|
||||
}
|
||||
if meta.is_parent_set() && res.packets.len() <= max_responses {
|
||||
slot = meta.parent_slot;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if res.is_empty() {
|
||||
return None;
|
||||
}
|
||||
Some(res)
|
||||
}
|
||||
|
||||
fn handle_packets(
|
||||
me: &Arc<RwLock<Self>>,
|
||||
recycler: &PacketsRecycler,
|
||||
blockstore: Option<&Arc<Blockstore>>,
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
packets: Packets,
|
||||
response_sender: &PacketSender,
|
||||
epoch_ms: u64,
|
||||
) {
|
||||
// iter over the packets, collect pulls separately and process everything else
|
||||
let allocated = thread_mem_usage::Allocatedp::default();
|
||||
let mut gossip_pull_data: Vec<PullData> = vec![];
|
||||
let timeouts = me.read().unwrap().gossip.make_timeouts(&stakes, epoch_ms);
|
||||
packets.packets.iter().for_each(|packet| {
|
||||
let from_addr = packet.meta.addr();
|
||||
limited_deserialize(&packet.data[..packet.meta.size])
|
||||
@@ -1332,7 +1160,7 @@ impl ClusterInfo {
|
||||
}
|
||||
ret
|
||||
});
|
||||
Self::handle_pull_response(me, &from, data);
|
||||
Self::handle_pull_response(me, &from, data, &timeouts);
|
||||
datapoint_debug!(
|
||||
"solana-gossip-listen-memory",
|
||||
("pull_response", (allocated.get() - start) as i64, i64),
|
||||
@@ -1390,13 +1218,6 @@ impl ClusterInfo {
|
||||
("prune_message", (allocated.get() - start) as i64, i64),
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
let rsp =
|
||||
Self::handle_repair(me, recycler, &from_addr, blockstore, request);
|
||||
if let Some(rsp) = rsp {
|
||||
let _ignore_disconnect = response_sender.send(rsp);
|
||||
}
|
||||
}
|
||||
})
|
||||
});
|
||||
// process the collected pulls together
|
||||
@@ -1451,7 +1272,12 @@ impl ClusterInfo {
|
||||
Some(packets)
|
||||
}
|
||||
|
||||
fn handle_pull_response(me: &Arc<RwLock<Self>>, from: &Pubkey, data: Vec<CrdsValue>) {
|
||||
fn handle_pull_response(
|
||||
me: &Arc<RwLock<Self>>,
|
||||
from: &Pubkey,
|
||||
data: Vec<CrdsValue>,
|
||||
timeouts: &HashMap<Pubkey, u64>,
|
||||
) {
|
||||
let len = data.len();
|
||||
let now = Instant::now();
|
||||
let self_id = me.read().unwrap().gossip.id;
|
||||
@@ -1459,7 +1285,7 @@ impl ClusterInfo {
|
||||
me.write()
|
||||
.unwrap()
|
||||
.gossip
|
||||
.process_pull_response(from, data, timestamp());
|
||||
.process_pull_response(from, timeouts, data, timestamp());
|
||||
inc_new_counter_debug!("cluster_info-pull_request_response", 1);
|
||||
inc_new_counter_debug!("cluster_info-pull_request_response-size", len);
|
||||
|
||||
@@ -1524,104 +1350,10 @@ impl ClusterInfo {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_repair_sender(request: &Protocol) -> &ContactInfo {
|
||||
match request {
|
||||
Protocol::RequestWindowIndex(ref from, _, _) => from,
|
||||
Protocol::RequestHighestWindowIndex(ref from, _, _) => from,
|
||||
Protocol::RequestOrphan(ref from, _) => from,
|
||||
_ => panic!("Not a repair request"),
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_repair(
|
||||
me: &Arc<RwLock<Self>>,
|
||||
recycler: &PacketsRecycler,
|
||||
from_addr: &SocketAddr,
|
||||
blockstore: Option<&Arc<Blockstore>>,
|
||||
request: Protocol,
|
||||
) -> Option<Packets> {
|
||||
let now = Instant::now();
|
||||
|
||||
//TODO this doesn't depend on cluster_info module, could be moved
|
||||
//but we are using the listen thread to service these request
|
||||
//TODO verify from is signed
|
||||
|
||||
let self_id = me.read().unwrap().gossip.id;
|
||||
let from = Self::get_repair_sender(&request);
|
||||
if from.id == me.read().unwrap().gossip.id {
|
||||
warn!(
|
||||
"{}: Ignored received repair request from ME {}",
|
||||
self_id, from.id,
|
||||
);
|
||||
inc_new_counter_debug!("cluster_info-handle-repair--eq", 1);
|
||||
return None;
|
||||
}
|
||||
|
||||
me.write()
|
||||
.unwrap()
|
||||
.gossip
|
||||
.crds
|
||||
.update_record_timestamp(&from.id, timestamp());
|
||||
let my_info = me.read().unwrap().my_data();
|
||||
|
||||
let (res, label) = {
|
||||
match &request {
|
||||
Protocol::RequestWindowIndex(from, slot, shred_index) => {
|
||||
inc_new_counter_debug!("cluster_info-request-window-index", 1);
|
||||
(
|
||||
Self::run_window_request(
|
||||
recycler,
|
||||
from,
|
||||
&from_addr,
|
||||
blockstore,
|
||||
&my_info,
|
||||
*slot,
|
||||
*shred_index,
|
||||
),
|
||||
"RequestWindowIndex",
|
||||
)
|
||||
}
|
||||
|
||||
Protocol::RequestHighestWindowIndex(_, slot, highest_index) => {
|
||||
inc_new_counter_debug!("cluster_info-request-highest-window-index", 1);
|
||||
(
|
||||
Self::run_highest_window_request(
|
||||
recycler,
|
||||
&from_addr,
|
||||
blockstore,
|
||||
*slot,
|
||||
*highest_index,
|
||||
),
|
||||
"RequestHighestWindowIndex",
|
||||
)
|
||||
}
|
||||
Protocol::RequestOrphan(_, slot) => {
|
||||
inc_new_counter_debug!("cluster_info-request-orphan", 1);
|
||||
(
|
||||
Self::run_orphan(
|
||||
recycler,
|
||||
&from_addr,
|
||||
blockstore,
|
||||
*slot,
|
||||
MAX_ORPHAN_REPAIR_RESPONSES,
|
||||
),
|
||||
"RequestOrphan",
|
||||
)
|
||||
}
|
||||
_ => panic!("Not a repair request"),
|
||||
}
|
||||
};
|
||||
|
||||
trace!("{}: received repair request: {:?}", self_id, request);
|
||||
report_time_spent(label, &now.elapsed(), "");
|
||||
res
|
||||
}
|
||||
|
||||
/// Process messages from the network
|
||||
fn run_listen(
|
||||
obj: &Arc<RwLock<Self>>,
|
||||
recycler: &PacketsRecycler,
|
||||
blockstore: Option<&Arc<Blockstore>>,
|
||||
bank_forks: Option<&Arc<RwLock<BankForks>>>,
|
||||
requests_receiver: &PacketReceiver,
|
||||
response_sender: &PacketSender,
|
||||
@@ -1629,19 +1361,27 @@ impl ClusterInfo {
|
||||
//TODO cache connections
|
||||
let timeout = Duration::new(1, 0);
|
||||
let reqs = requests_receiver.recv_timeout(timeout)?;
|
||||
let epoch_ms;
|
||||
let stakes: HashMap<_, _> = match bank_forks {
|
||||
Some(ref bank_forks) => {
|
||||
staking_utils::staked_nodes(&bank_forks.read().unwrap().working_bank())
|
||||
let bank = bank_forks.read().unwrap().working_bank();
|
||||
let epoch = bank.epoch();
|
||||
let epoch_schedule = bank.epoch_schedule();
|
||||
epoch_ms = epoch_schedule.get_slots_in_epoch(epoch) * DEFAULT_MS_PER_SLOT;
|
||||
staking_utils::staked_nodes(&bank)
|
||||
}
|
||||
None => {
|
||||
inc_new_counter_info!("cluster_info-purge-no_working_bank", 1);
|
||||
epoch_ms = CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
|
||||
HashMap::new()
|
||||
}
|
||||
None => HashMap::new(),
|
||||
};
|
||||
|
||||
Self::handle_packets(obj, &recycler, blockstore, &stakes, reqs, response_sender);
|
||||
Self::handle_packets(obj, &recycler, &stakes, reqs, response_sender, epoch_ms);
|
||||
Ok(())
|
||||
}
|
||||
pub fn listen(
|
||||
me: Arc<RwLock<Self>>,
|
||||
blockstore: Option<Arc<Blockstore>>,
|
||||
bank_forks: Option<Arc<RwLock<BankForks>>>,
|
||||
requests_receiver: PacketReceiver,
|
||||
response_sender: PacketSender,
|
||||
@@ -1655,7 +1395,6 @@ impl ClusterInfo {
|
||||
let e = Self::run_listen(
|
||||
&me,
|
||||
&recycler,
|
||||
blockstore.as_ref(),
|
||||
bank_forks.as_ref(),
|
||||
&requests_receiver,
|
||||
&response_sender,
|
||||
@@ -1690,6 +1429,7 @@ impl ClusterInfo {
|
||||
dummy_addr,
|
||||
dummy_addr,
|
||||
dummy_addr,
|
||||
dummy_addr,
|
||||
timestamp(),
|
||||
)
|
||||
}
|
||||
@@ -1770,6 +1510,7 @@ pub struct Sockets {
|
||||
pub repair: UdpSocket,
|
||||
pub retransmit_sockets: Vec<UdpSocket>,
|
||||
pub storage: Option<UdpSocket>,
|
||||
pub serve_repair: UdpSocket,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -1790,9 +1531,10 @@ impl Node {
|
||||
let storage = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let empty = "0.0.0.0:0".parse().unwrap();
|
||||
let repair = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
|
||||
let broadcast = vec![UdpSocket::bind("0.0.0.0:0").unwrap()];
|
||||
let retransmit = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let serve_repair = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
|
||||
let info = ContactInfo::new(
|
||||
pubkey,
|
||||
gossip.local_addr().unwrap(),
|
||||
@@ -1804,6 +1546,7 @@ impl Node {
|
||||
storage.local_addr().unwrap(),
|
||||
empty,
|
||||
empty,
|
||||
serve_repair.local_addr().unwrap(),
|
||||
timestamp(),
|
||||
);
|
||||
|
||||
@@ -1818,6 +1561,7 @@ impl Node {
|
||||
broadcast,
|
||||
repair,
|
||||
retransmit_sockets: vec![retransmit],
|
||||
serve_repair,
|
||||
storage: Some(storage),
|
||||
ip_echo: None,
|
||||
},
|
||||
@@ -1840,6 +1584,7 @@ impl Node {
|
||||
let broadcast = vec![UdpSocket::bind("0.0.0.0:0").unwrap()];
|
||||
let retransmit_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let storage = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let serve_repair = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let info = ContactInfo::new(
|
||||
pubkey,
|
||||
gossip_addr,
|
||||
@@ -1851,6 +1596,7 @@ impl Node {
|
||||
storage.local_addr().unwrap(),
|
||||
rpc_addr,
|
||||
rpc_pubsub_addr,
|
||||
serve_repair.local_addr().unwrap(),
|
||||
timestamp(),
|
||||
);
|
||||
Node {
|
||||
@@ -1866,6 +1612,7 @@ impl Node {
|
||||
repair,
|
||||
retransmit_sockets: vec![retransmit_socket],
|
||||
storage: None,
|
||||
serve_repair,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1908,6 +1655,8 @@ impl Node {
|
||||
multi_bind_in_range(port_range, 8).expect("retransmit multi_bind");
|
||||
|
||||
let (repair_port, repair) = Self::bind(port_range);
|
||||
let (serve_repair_port, serve_repair) = Self::bind(port_range);
|
||||
|
||||
let (_, broadcast) = multi_bind_in_range(port_range, 4).expect("broadcast multi_bind");
|
||||
|
||||
let info = ContactInfo::new(
|
||||
@@ -1918,6 +1667,7 @@ impl Node {
|
||||
SocketAddr::new(gossip_addr.ip(), repair_port),
|
||||
SocketAddr::new(gossip_addr.ip(), tpu_port),
|
||||
SocketAddr::new(gossip_addr.ip(), tpu_forwards_port),
|
||||
SocketAddr::new(gossip_addr.ip(), serve_repair_port),
|
||||
socketaddr_any!(),
|
||||
socketaddr_any!(),
|
||||
socketaddr_any!(),
|
||||
@@ -1937,6 +1687,7 @@ impl Node {
|
||||
repair,
|
||||
retransmit_sockets,
|
||||
storage: None,
|
||||
serve_repair,
|
||||
ip_echo: Some(ip_echo),
|
||||
},
|
||||
}
|
||||
@@ -1973,18 +1724,8 @@ fn report_time_spent(label: &str, time: &Duration, extra: &str) {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::crds_value::CrdsValueLabel;
|
||||
use crate::repair_service::RepairType;
|
||||
use crate::result::Error;
|
||||
use rayon::prelude::*;
|
||||
use solana_ledger::blockstore::make_many_slot_entries;
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_ledger::blockstore_processor::fill_blockstore_slot_with_ticks;
|
||||
use solana_ledger::get_tmp_ledger_path;
|
||||
use solana_ledger::shred::{
|
||||
max_ticks_per_n_shreds, CodingShredHeader, DataShredHeader, Shred, ShredCommonHeader,
|
||||
};
|
||||
use solana_perf::test_tx::test_tx;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use std::collections::HashSet;
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
@@ -2055,242 +1796,6 @@ mod tests {
|
||||
let label = CrdsValueLabel::ContactInfo(d.id);
|
||||
assert!(cluster_info.gossip.crds.lookup(&label).is_none());
|
||||
}
|
||||
#[test]
|
||||
fn window_index_request() {
|
||||
let me = ContactInfo::new_localhost(&Pubkey::new_rand(), timestamp());
|
||||
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(me);
|
||||
let rv = cluster_info.repair_request(&RepairType::Shred(0, 0));
|
||||
assert_matches!(rv, Err(Error::ClusterInfoError(ClusterInfoError::NoPeers)));
|
||||
|
||||
let gossip_addr = socketaddr!([127, 0, 0, 1], 1234);
|
||||
let nxt = ContactInfo::new(
|
||||
&Pubkey::new_rand(),
|
||||
gossip_addr,
|
||||
socketaddr!([127, 0, 0, 1], 1235),
|
||||
socketaddr!([127, 0, 0, 1], 1236),
|
||||
socketaddr!([127, 0, 0, 1], 1237),
|
||||
socketaddr!([127, 0, 0, 1], 1238),
|
||||
socketaddr!([127, 0, 0, 1], 1239),
|
||||
socketaddr!([127, 0, 0, 1], 1240),
|
||||
socketaddr!([127, 0, 0, 1], 1241),
|
||||
socketaddr!([127, 0, 0, 1], 1242),
|
||||
0,
|
||||
);
|
||||
cluster_info.insert_info(nxt.clone());
|
||||
let rv = cluster_info
|
||||
.repair_request(&RepairType::Shred(0, 0))
|
||||
.unwrap();
|
||||
assert_eq!(nxt.gossip, gossip_addr);
|
||||
assert_eq!(rv.0, nxt.gossip);
|
||||
|
||||
let gossip_addr2 = socketaddr!([127, 0, 0, 2], 1234);
|
||||
let nxt = ContactInfo::new(
|
||||
&Pubkey::new_rand(),
|
||||
gossip_addr2,
|
||||
socketaddr!([127, 0, 0, 1], 1235),
|
||||
socketaddr!([127, 0, 0, 1], 1236),
|
||||
socketaddr!([127, 0, 0, 1], 1237),
|
||||
socketaddr!([127, 0, 0, 1], 1238),
|
||||
socketaddr!([127, 0, 0, 1], 1239),
|
||||
socketaddr!([127, 0, 0, 1], 1240),
|
||||
socketaddr!([127, 0, 0, 1], 1241),
|
||||
socketaddr!([127, 0, 0, 1], 1242),
|
||||
0,
|
||||
);
|
||||
cluster_info.insert_info(nxt);
|
||||
let mut one = false;
|
||||
let mut two = false;
|
||||
while !one || !two {
|
||||
//this randomly picks an option, so eventually it should pick both
|
||||
let rv = cluster_info
|
||||
.repair_request(&RepairType::Shred(0, 0))
|
||||
.unwrap();
|
||||
if rv.0 == gossip_addr {
|
||||
one = true;
|
||||
}
|
||||
if rv.0 == gossip_addr2 {
|
||||
two = true;
|
||||
}
|
||||
}
|
||||
assert!(one && two);
|
||||
}
|
||||
|
||||
/// test window requests respond with the right shred, and do not overrun
|
||||
#[test]
|
||||
fn run_window_request() {
|
||||
let recycler = PacketsRecycler::default();
|
||||
solana_logger::setup();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let me = ContactInfo::new(
|
||||
&Pubkey::new_rand(),
|
||||
socketaddr!("127.0.0.1:1234"),
|
||||
socketaddr!("127.0.0.1:1235"),
|
||||
socketaddr!("127.0.0.1:1236"),
|
||||
socketaddr!("127.0.0.1:1237"),
|
||||
socketaddr!("127.0.0.1:1238"),
|
||||
socketaddr!("127.0.0.1:1239"),
|
||||
socketaddr!("127.0.0.1:1240"),
|
||||
socketaddr!("127.0.0.1:1241"),
|
||||
socketaddr!("127.0.0.1:1242"),
|
||||
0,
|
||||
);
|
||||
let rv = ClusterInfo::run_window_request(
|
||||
&recycler,
|
||||
&me,
|
||||
&socketaddr_any!(),
|
||||
Some(&blockstore),
|
||||
&me,
|
||||
0,
|
||||
0,
|
||||
);
|
||||
assert!(rv.is_none());
|
||||
let mut common_header = ShredCommonHeader::default();
|
||||
common_header.slot = 2;
|
||||
common_header.index = 1;
|
||||
let mut data_header = DataShredHeader::default();
|
||||
data_header.parent_offset = 1;
|
||||
let shred_info = Shred::new_empty_from_header(
|
||||
common_header,
|
||||
data_header,
|
||||
CodingShredHeader::default(),
|
||||
);
|
||||
|
||||
blockstore
|
||||
.insert_shreds(vec![shred_info], None, false)
|
||||
.expect("Expect successful ledger write");
|
||||
|
||||
let rv = ClusterInfo::run_window_request(
|
||||
&recycler,
|
||||
&me,
|
||||
&socketaddr_any!(),
|
||||
Some(&blockstore),
|
||||
&me,
|
||||
2,
|
||||
1,
|
||||
);
|
||||
assert!(!rv.is_none());
|
||||
let rv: Vec<Shred> = rv
|
||||
.expect("packets")
|
||||
.packets
|
||||
.into_iter()
|
||||
.filter_map(|b| Shred::new_from_serialized_shred(b.data.to_vec()).ok())
|
||||
.collect();
|
||||
assert_eq!(rv[0].index(), 1);
|
||||
assert_eq!(rv[0].slot(), 2);
|
||||
}
|
||||
|
||||
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
/// test run_window_requestwindow requests respond with the right shred, and do not overrun
|
||||
#[test]
|
||||
fn run_highest_window_request() {
|
||||
let recycler = PacketsRecycler::default();
|
||||
solana_logger::setup();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let rv = ClusterInfo::run_highest_window_request(
|
||||
&recycler,
|
||||
&socketaddr_any!(),
|
||||
Some(&blockstore),
|
||||
0,
|
||||
0,
|
||||
);
|
||||
assert!(rv.is_none());
|
||||
|
||||
let _ = fill_blockstore_slot_with_ticks(
|
||||
&blockstore,
|
||||
max_ticks_per_n_shreds(1) + 1,
|
||||
2,
|
||||
1,
|
||||
Hash::default(),
|
||||
);
|
||||
|
||||
let rv = ClusterInfo::run_highest_window_request(
|
||||
&recycler,
|
||||
&socketaddr_any!(),
|
||||
Some(&blockstore),
|
||||
2,
|
||||
1,
|
||||
);
|
||||
let rv: Vec<Shred> = rv
|
||||
.expect("packets")
|
||||
.packets
|
||||
.into_iter()
|
||||
.filter_map(|b| Shred::new_from_serialized_shred(b.data.to_vec()).ok())
|
||||
.collect();
|
||||
assert!(!rv.is_empty());
|
||||
let index = blockstore.meta(2).unwrap().unwrap().received - 1;
|
||||
assert_eq!(rv[0].index(), index as u32);
|
||||
assert_eq!(rv[0].slot(), 2);
|
||||
|
||||
let rv = ClusterInfo::run_highest_window_request(
|
||||
&recycler,
|
||||
&socketaddr_any!(),
|
||||
Some(&blockstore),
|
||||
2,
|
||||
index + 1,
|
||||
);
|
||||
assert!(rv.is_none());
|
||||
}
|
||||
|
||||
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn run_orphan() {
|
||||
solana_logger::setup();
|
||||
let recycler = PacketsRecycler::default();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let rv =
|
||||
ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 2, 0);
|
||||
assert!(rv.is_none());
|
||||
|
||||
// Create slots 1, 2, 3 with 5 shreds apiece
|
||||
let (shreds, _) = make_many_slot_entries(1, 3, 5);
|
||||
|
||||
blockstore
|
||||
.insert_shreds(shreds, None, false)
|
||||
.expect("Expect successful ledger write");
|
||||
|
||||
// We don't have slot 4, so we don't know how to service this requeset
|
||||
let rv =
|
||||
ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 4, 5);
|
||||
assert!(rv.is_none());
|
||||
|
||||
// For slot 3, we should return the highest shreds from slots 3, 2, 1 respectively
|
||||
// for this request
|
||||
let rv: Vec<_> =
|
||||
ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 3, 5)
|
||||
.expect("run_orphan packets")
|
||||
.packets
|
||||
.iter()
|
||||
.map(|b| b.clone())
|
||||
.collect();
|
||||
let expected: Vec<_> = (1..=3)
|
||||
.rev()
|
||||
.map(|slot| {
|
||||
let index = blockstore.meta(slot).unwrap().unwrap().received - 1;
|
||||
ClusterInfo::get_data_shred_as_packet(
|
||||
&blockstore,
|
||||
slot,
|
||||
index,
|
||||
&socketaddr_any!(),
|
||||
)
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(rv, expected)
|
||||
}
|
||||
|
||||
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
fn assert_in_range(x: u16, range: (u16, u16)) {
|
||||
assert!(x >= range.0);
|
||||
@@ -2597,10 +2102,12 @@ mod tests {
|
||||
let entrypoint_crdsvalue =
|
||||
CrdsValue::new_unsigned(CrdsData::ContactInfo(entrypoint.clone()));
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
let timeouts = cluster_info.read().unwrap().gossip.make_timeouts_test();
|
||||
ClusterInfo::handle_pull_response(
|
||||
&cluster_info,
|
||||
&entrypoint_pubkey,
|
||||
vec![entrypoint_crdsvalue],
|
||||
&timeouts,
|
||||
);
|
||||
let pulls = cluster_info
|
||||
.write()
|
||||
@@ -2671,13 +2178,16 @@ mod tests {
|
||||
}
|
||||
|
||||
fn test_split_messages(value: CrdsValue) {
|
||||
const NUM_VALUES: usize = 30;
|
||||
const NUM_VALUES: u64 = 30;
|
||||
let value_size = value.size();
|
||||
let expected_len = NUM_VALUES / (MAX_PROTOCOL_PAYLOAD_SIZE / value_size).max(1) as usize;
|
||||
let msgs = vec![value; NUM_VALUES];
|
||||
let num_values_per_payload = (MAX_PROTOCOL_PAYLOAD_SIZE / value_size).max(1);
|
||||
|
||||
// Expected len is the ceiling of the division
|
||||
let expected_len = (NUM_VALUES + num_values_per_payload - 1) / num_values_per_payload;
|
||||
let msgs = vec![value; NUM_VALUES as usize];
|
||||
|
||||
let split = ClusterInfo::split_gossip_messages(msgs);
|
||||
assert!(split.len() <= expected_len);
|
||||
assert!(split.len() as u64 <= expected_len);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -2850,25 +2360,6 @@ mod tests {
|
||||
- serialized_size(&PruneData::default()).unwrap(),
|
||||
);
|
||||
|
||||
// make sure repairs are always smaller than the gossip messages
|
||||
assert!(
|
||||
max_protocol_size
|
||||
> serialized_size(&Protocol::RequestWindowIndex(ContactInfo::default(), 0, 0))
|
||||
.unwrap()
|
||||
);
|
||||
assert!(
|
||||
max_protocol_size
|
||||
> serialized_size(&Protocol::RequestHighestWindowIndex(
|
||||
ContactInfo::default(),
|
||||
0,
|
||||
0
|
||||
))
|
||||
.unwrap()
|
||||
);
|
||||
assert!(
|
||||
max_protocol_size
|
||||
> serialized_size(&Protocol::RequestOrphan(ContactInfo::default(), 0)).unwrap()
|
||||
);
|
||||
// finally assert the header size estimation is correct
|
||||
assert_eq!(MAX_PROTOCOL_HEADER_SIZE, max_protocol_size);
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -37,7 +37,6 @@ impl StakeLockout {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Tower {
|
||||
node_pubkey: Pubkey,
|
||||
threshold_depth: usize,
|
||||
@@ -47,15 +46,24 @@ pub struct Tower {
|
||||
last_timestamp: BlockTimestamp,
|
||||
}
|
||||
|
||||
impl Tower {
|
||||
pub fn new(node_pubkey: &Pubkey, vote_account_pubkey: &Pubkey, bank_forks: &BankForks) -> Self {
|
||||
let mut tower = Self {
|
||||
node_pubkey: *node_pubkey,
|
||||
impl Default for Tower {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
node_pubkey: Pubkey::default(),
|
||||
threshold_depth: VOTE_THRESHOLD_DEPTH,
|
||||
threshold_size: VOTE_THRESHOLD_SIZE,
|
||||
lockouts: VoteState::default(),
|
||||
last_vote: Vote::default(),
|
||||
last_timestamp: BlockTimestamp::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Tower {
|
||||
pub fn new(node_pubkey: &Pubkey, vote_account_pubkey: &Pubkey, bank_forks: &BankForks) -> Self {
|
||||
let mut tower = Self {
|
||||
node_pubkey: *node_pubkey,
|
||||
..Tower::default()
|
||||
};
|
||||
|
||||
tower.initialize_lockouts_from_bank_forks(&bank_forks, vote_account_pubkey);
|
||||
@@ -327,21 +335,16 @@ impl Tower {
|
||||
fork_stake.stake,
|
||||
total_staked
|
||||
);
|
||||
for (new_lockout, original_lockout) in
|
||||
lockouts.votes.iter().zip(self.lockouts.votes.iter())
|
||||
{
|
||||
if new_lockout.slot == original_lockout.slot {
|
||||
if new_lockout.confirmation_count <= self.threshold_depth as u32 {
|
||||
break;
|
||||
if vote.confirmation_count as usize > self.threshold_depth {
|
||||
for old_vote in &self.lockouts.votes {
|
||||
if old_vote.slot == vote.slot
|
||||
&& old_vote.confirmation_count == vote.confirmation_count
|
||||
{
|
||||
return true;
|
||||
}
|
||||
if new_lockout.confirmation_count != original_lockout.confirmation_count {
|
||||
return lockout > self.threshold_size;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
true
|
||||
lockout > self.threshold_size
|
||||
} else {
|
||||
false
|
||||
}
|
||||
@@ -556,6 +559,24 @@ mod test {
|
||||
assert!(tower.check_vote_stake_threshold(0, &stakes, 2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_vote_threshold_no_skip_lockout_with_new_root() {
|
||||
solana_logger::setup();
|
||||
let mut tower = Tower::new_for_tests(4, 0.67);
|
||||
let mut stakes = HashMap::new();
|
||||
for i in 0..(MAX_LOCKOUT_HISTORY as u64 + 1) {
|
||||
stakes.insert(
|
||||
i,
|
||||
StakeLockout {
|
||||
stake: 1,
|
||||
lockout: 8,
|
||||
},
|
||||
);
|
||||
tower.record_vote(i, Hash::default());
|
||||
}
|
||||
assert!(!tower.check_vote_stake_threshold(MAX_LOCKOUT_HISTORY as u64 + 1, &stakes, 2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_slot_confirmed_not_enough_stake_failure() {
|
||||
let tower = Tower::new_for_tests(1, 0.67);
|
||||
|
@@ -17,7 +17,7 @@ pub struct ContactInfo {
|
||||
pub tvu: SocketAddr,
|
||||
/// address to forward shreds to
|
||||
pub tvu_forwards: SocketAddr,
|
||||
/// address to send repairs to
|
||||
/// address to send repair responses to
|
||||
pub repair: SocketAddr,
|
||||
/// transactions address
|
||||
pub tpu: SocketAddr,
|
||||
@@ -29,6 +29,8 @@ pub struct ContactInfo {
|
||||
pub rpc: SocketAddr,
|
||||
/// websocket for JSON-RPC push notifications
|
||||
pub rpc_pubsub: SocketAddr,
|
||||
/// address to send repair requests to
|
||||
pub serve_repair: SocketAddr,
|
||||
/// latest wallclock picked
|
||||
pub wallclock: u64,
|
||||
/// node shred version
|
||||
@@ -85,6 +87,7 @@ impl Default for ContactInfo {
|
||||
storage_addr: socketaddr_any!(),
|
||||
rpc: socketaddr_any!(),
|
||||
rpc_pubsub: socketaddr_any!(),
|
||||
serve_repair: socketaddr_any!(),
|
||||
wallclock: 0,
|
||||
shred_version: 0,
|
||||
}
|
||||
@@ -104,6 +107,7 @@ impl ContactInfo {
|
||||
storage_addr: SocketAddr,
|
||||
rpc: SocketAddr,
|
||||
rpc_pubsub: SocketAddr,
|
||||
serve_repair: SocketAddr,
|
||||
now: u64,
|
||||
) -> Self {
|
||||
Self {
|
||||
@@ -117,6 +121,7 @@ impl ContactInfo {
|
||||
storage_addr,
|
||||
rpc,
|
||||
rpc_pubsub,
|
||||
serve_repair,
|
||||
wallclock: now,
|
||||
shred_version: 0,
|
||||
}
|
||||
@@ -134,6 +139,7 @@ impl ContactInfo {
|
||||
socketaddr!("127.0.0.1:1240"),
|
||||
socketaddr!("127.0.0.1:1241"),
|
||||
socketaddr!("127.0.0.1:1242"),
|
||||
socketaddr!("127.0.0.1:1243"),
|
||||
now,
|
||||
)
|
||||
}
|
||||
@@ -154,6 +160,7 @@ impl ContactInfo {
|
||||
addr,
|
||||
addr,
|
||||
addr,
|
||||
addr,
|
||||
0,
|
||||
)
|
||||
}
|
||||
@@ -174,6 +181,7 @@ impl ContactInfo {
|
||||
let repair = next_port(&bind_addr, 5);
|
||||
let rpc_addr = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PORT);
|
||||
let rpc_pubsub_addr = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PUBSUB_PORT);
|
||||
let serve_repair = next_port(&bind_addr, 6);
|
||||
Self::new(
|
||||
pubkey,
|
||||
gossip_addr,
|
||||
@@ -185,6 +193,7 @@ impl ContactInfo {
|
||||
"0.0.0.0:0".parse().unwrap(),
|
||||
rpc_addr,
|
||||
rpc_pubsub_addr,
|
||||
serve_repair,
|
||||
timestamp(),
|
||||
)
|
||||
}
|
||||
@@ -209,6 +218,7 @@ impl ContactInfo {
|
||||
daddr,
|
||||
daddr,
|
||||
daddr,
|
||||
daddr,
|
||||
timestamp(),
|
||||
)
|
||||
}
|
||||
@@ -267,6 +277,7 @@ mod tests {
|
||||
assert!(ci.rpc_pubsub.ip().is_unspecified());
|
||||
assert!(ci.tpu.ip().is_unspecified());
|
||||
assert!(ci.storage_addr.ip().is_unspecified());
|
||||
assert!(ci.serve_repair.ip().is_unspecified());
|
||||
}
|
||||
#[test]
|
||||
fn test_multicast() {
|
||||
@@ -278,6 +289,7 @@ mod tests {
|
||||
assert!(ci.rpc_pubsub.ip().is_multicast());
|
||||
assert!(ci.tpu.ip().is_multicast());
|
||||
assert!(ci.storage_addr.ip().is_multicast());
|
||||
assert!(ci.serve_repair.ip().is_multicast());
|
||||
}
|
||||
#[test]
|
||||
fn test_entry_point() {
|
||||
@@ -290,6 +302,7 @@ mod tests {
|
||||
assert!(ci.rpc_pubsub.ip().is_unspecified());
|
||||
assert!(ci.tpu.ip().is_unspecified());
|
||||
assert!(ci.storage_addr.ip().is_unspecified());
|
||||
assert!(ci.serve_repair.ip().is_unspecified());
|
||||
}
|
||||
#[test]
|
||||
fn test_socketaddr() {
|
||||
@@ -302,7 +315,9 @@ mod tests {
|
||||
assert_eq!(ci.rpc.port(), rpc_port::DEFAULT_RPC_PORT);
|
||||
assert_eq!(ci.rpc_pubsub.port(), rpc_port::DEFAULT_RPC_PUBSUB_PORT);
|
||||
assert!(ci.storage_addr.ip().is_unspecified());
|
||||
assert_eq!(ci.serve_repair.port(), 16);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn replayed_data_new_with_socketaddr_with_pubkey() {
|
||||
let keypair = Keypair::new();
|
||||
@@ -323,6 +338,9 @@ mod tests {
|
||||
d1.rpc_pubsub,
|
||||
socketaddr!(format!("127.0.0.1:{}", rpc_port::DEFAULT_RPC_PUBSUB_PORT))
|
||||
);
|
||||
assert_eq!(d1.tvu_forwards, socketaddr!("127.0.0.1:1238"));
|
||||
assert_eq!(d1.repair, socketaddr!("127.0.0.1:1239"));
|
||||
assert_eq!(d1.serve_repair, socketaddr!("127.0.0.1:1240"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@@ -156,11 +156,12 @@ impl CrdsGossip {
|
||||
pub fn process_pull_response(
|
||||
&mut self,
|
||||
from: &Pubkey,
|
||||
timeouts: &HashMap<Pubkey, u64>,
|
||||
response: Vec<CrdsValue>,
|
||||
now: u64,
|
||||
) -> usize {
|
||||
self.pull
|
||||
.process_pull_response(&mut self.crds, from, response, now)
|
||||
.process_pull_response(&mut self.crds, from, timeouts, response, now)
|
||||
}
|
||||
|
||||
pub fn make_timeouts_test(&self) -> HashMap<Pubkey, u64> {
|
||||
|
@@ -25,6 +25,8 @@ use std::collections::HashMap;
|
||||
use std::collections::VecDeque;
|
||||
|
||||
pub const CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS: u64 = 15000;
|
||||
// The maximum age of a value received over pull responses
|
||||
pub const CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS: u64 = 60000;
|
||||
pub const FALSE_RATE: f64 = 0.1f64;
|
||||
pub const KEYS: f64 = 8f64;
|
||||
|
||||
@@ -117,6 +119,7 @@ pub struct CrdsGossipPull {
|
||||
/// hash and insert time
|
||||
purged_values: VecDeque<(Hash, u64)>,
|
||||
pub crds_timeout: u64,
|
||||
pub msg_timeout: u64,
|
||||
}
|
||||
|
||||
impl Default for CrdsGossipPull {
|
||||
@@ -125,6 +128,7 @@ impl Default for CrdsGossipPull {
|
||||
purged_values: VecDeque::new(),
|
||||
pull_request_time: HashMap::new(),
|
||||
crds_timeout: CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS,
|
||||
msg_timeout: CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -210,12 +214,56 @@ impl CrdsGossipPull {
|
||||
&mut self,
|
||||
crds: &mut Crds,
|
||||
from: &Pubkey,
|
||||
timeouts: &HashMap<Pubkey, u64>,
|
||||
response: Vec<CrdsValue>,
|
||||
now: u64,
|
||||
) -> usize {
|
||||
let mut failed = 0;
|
||||
for r in response {
|
||||
let owner = r.label().pubkey();
|
||||
// Check if the crds value is older than the msg_timeout
|
||||
if now
|
||||
> r.wallclock()
|
||||
.checked_add(self.msg_timeout)
|
||||
.unwrap_or_else(|| 0)
|
||||
|| now + self.msg_timeout < r.wallclock()
|
||||
{
|
||||
match &r.label() {
|
||||
CrdsValueLabel::ContactInfo(_) => {
|
||||
// Check if this ContactInfo is actually too old, it's possible that it has
|
||||
// stake and so might have a longer effective timeout
|
||||
let timeout = *timeouts
|
||||
.get(&owner)
|
||||
.unwrap_or_else(|| timeouts.get(&Pubkey::default()).unwrap());
|
||||
if now > r.wallclock().checked_add(timeout).unwrap_or_else(|| 0)
|
||||
|| now + timeout < r.wallclock()
|
||||
{
|
||||
inc_new_counter_warn!(
|
||||
"cluster_info-gossip_pull_response_value_timeout",
|
||||
1
|
||||
);
|
||||
failed += 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
// Before discarding this value, check if a ContactInfo for the owner
|
||||
// exists in the table. If it doesn't, that implies that this value can be discarded
|
||||
if crds.lookup(&CrdsValueLabel::ContactInfo(owner)).is_none() {
|
||||
inc_new_counter_warn!(
|
||||
"cluster_info-gossip_pull_response_value_timeout",
|
||||
1
|
||||
);
|
||||
failed += 1;
|
||||
continue;
|
||||
} else {
|
||||
// Silently insert this old value without bumping record timestamps
|
||||
failed += crds.insert(r, now).is_err() as usize;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
let old = crds.insert(r, now);
|
||||
failed += old.is_err() as usize;
|
||||
old.ok().map(|opt| {
|
||||
@@ -322,8 +370,9 @@ impl CrdsGossipPull {
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::crds_value::CrdsData;
|
||||
use crate::crds_value::{CrdsData, Vote};
|
||||
use itertools::Itertools;
|
||||
use solana_perf::test_tx::test_tx;
|
||||
use solana_sdk::hash::hash;
|
||||
use solana_sdk::packet::PACKET_DATA_SIZE;
|
||||
|
||||
@@ -534,8 +583,13 @@ mod test {
|
||||
continue;
|
||||
}
|
||||
assert_eq!(rsp.len(), 1);
|
||||
let failed =
|
||||
node.process_pull_response(&mut node_crds, &node_pubkey, rsp.pop().unwrap(), 1);
|
||||
let failed = node.process_pull_response(
|
||||
&mut node_crds,
|
||||
&node_pubkey,
|
||||
&node.make_timeouts_def(&node_pubkey, &HashMap::new(), 0, 1),
|
||||
rsp.pop().unwrap(),
|
||||
1,
|
||||
);
|
||||
assert_eq!(failed, 0);
|
||||
assert_eq!(
|
||||
node_crds
|
||||
@@ -675,4 +729,87 @@ mod test {
|
||||
.collect();
|
||||
assert_eq!(masks.len(), 2u64.pow(mask_bits) as usize)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_process_pull_response() {
|
||||
let mut node_crds = Crds::default();
|
||||
let mut node = CrdsGossipPull::default();
|
||||
|
||||
let peer_pubkey = Pubkey::new_rand();
|
||||
let peer_entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(
|
||||
ContactInfo::new_localhost(&peer_pubkey, 0),
|
||||
));
|
||||
let mut timeouts = HashMap::new();
|
||||
timeouts.insert(Pubkey::default(), node.crds_timeout);
|
||||
timeouts.insert(peer_pubkey, node.msg_timeout + 1);
|
||||
// inserting a fresh value should be fine.
|
||||
assert_eq!(
|
||||
node.process_pull_response(
|
||||
&mut node_crds,
|
||||
&peer_pubkey,
|
||||
&timeouts,
|
||||
vec![peer_entry.clone()],
|
||||
1,
|
||||
),
|
||||
0
|
||||
);
|
||||
|
||||
let mut node_crds = Crds::default();
|
||||
let unstaked_peer_entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(
|
||||
ContactInfo::new_localhost(&peer_pubkey, 0),
|
||||
));
|
||||
// check that old contact infos fail if they are too old, regardless of "timeouts"
|
||||
assert_eq!(
|
||||
node.process_pull_response(
|
||||
&mut node_crds,
|
||||
&peer_pubkey,
|
||||
&timeouts,
|
||||
vec![peer_entry.clone(), unstaked_peer_entry],
|
||||
node.msg_timeout + 100,
|
||||
),
|
||||
2
|
||||
);
|
||||
|
||||
let mut node_crds = Crds::default();
|
||||
// check that old contact infos can still land as long as they have a "timeouts" entry
|
||||
assert_eq!(
|
||||
node.process_pull_response(
|
||||
&mut node_crds,
|
||||
&peer_pubkey,
|
||||
&timeouts,
|
||||
vec![peer_entry.clone()],
|
||||
node.msg_timeout + 1,
|
||||
),
|
||||
0
|
||||
);
|
||||
|
||||
// construct something that's not a contact info
|
||||
let peer_vote =
|
||||
CrdsValue::new_unsigned(CrdsData::Vote(0, Vote::new(&peer_pubkey, test_tx(), 0)));
|
||||
// check that older CrdsValues (non-ContactInfos) infos pass even if are too old,
|
||||
// but a recent contact info (inserted above) exists
|
||||
assert_eq!(
|
||||
node.process_pull_response(
|
||||
&mut node_crds,
|
||||
&peer_pubkey,
|
||||
&timeouts,
|
||||
vec![peer_vote.clone()],
|
||||
node.msg_timeout + 1,
|
||||
),
|
||||
0
|
||||
);
|
||||
|
||||
let mut node_crds = Crds::default();
|
||||
// without a contact info, inserting an old value should fail
|
||||
assert_eq!(
|
||||
node.process_pull_response(
|
||||
&mut node_crds,
|
||||
&peer_pubkey,
|
||||
&timeouts,
|
||||
vec![peer_vote.clone()],
|
||||
node.msg_timeout + 1,
|
||||
),
|
||||
1
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@@ -30,7 +30,10 @@ use std::collections::{HashMap, HashSet};
|
||||
|
||||
pub const CRDS_GOSSIP_NUM_ACTIVE: usize = 30;
|
||||
pub const CRDS_GOSSIP_PUSH_FANOUT: usize = 6;
|
||||
pub const CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS: u64 = 5000;
|
||||
// With a fanout of 6, a 1000 node cluster should only take ~4 hops to converge.
|
||||
// However since pushes are stake weighed, some trailing nodes
|
||||
// might need more time to receive values. 30 seconds should be plenty.
|
||||
pub const CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS: u64 = 30000;
|
||||
pub const CRDS_GOSSIP_PRUNE_MSG_TIMEOUT_MS: u64 = 500;
|
||||
pub const CRDS_GOSSIP_PRUNE_STAKE_THRESHOLD_PCT: f64 = 0.15;
|
||||
|
||||
@@ -135,7 +138,12 @@ impl CrdsGossipPush {
|
||||
value: CrdsValue,
|
||||
now: u64,
|
||||
) -> Result<Option<VersionedCrdsValue>, CrdsGossipError> {
|
||||
if now > value.wallclock() + self.msg_timeout {
|
||||
if now
|
||||
> value
|
||||
.wallclock()
|
||||
.checked_add(self.msg_timeout)
|
||||
.unwrap_or_else(|| 0)
|
||||
{
|
||||
return Err(CrdsGossipError::PushMessageTimeout);
|
||||
}
|
||||
if now + self.msg_timeout < value.wallclock() {
|
||||
|
@@ -6,7 +6,6 @@ use crate::streamer;
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana_client::thin_client::{create_client, ThinClient};
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_perf::recycler::Recycler;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
@@ -24,7 +23,6 @@ pub struct GossipService {
|
||||
impl GossipService {
|
||||
pub fn new(
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
blockstore: Option<Arc<Blockstore>>,
|
||||
bank_forks: Option<Arc<RwLock<BankForks>>>,
|
||||
gossip_socket: UdpSocket,
|
||||
exit: &Arc<AtomicBool>,
|
||||
@@ -47,7 +45,6 @@ impl GossipService {
|
||||
let t_responder = streamer::responder("gossip", gossip_socket, response_receiver);
|
||||
let t_listen = ClusterInfo::listen(
|
||||
cluster_info.clone(),
|
||||
blockstore,
|
||||
bank_forks.clone(),
|
||||
request_receiver,
|
||||
response_sender.clone(),
|
||||
@@ -283,8 +280,7 @@ fn make_gossip_node(
|
||||
cluster_info.set_entrypoint(ContactInfo::new_gossip_entry_point(entrypoint));
|
||||
}
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
let gossip_service =
|
||||
GossipService::new(&cluster_info.clone(), None, None, gossip_socket, &exit);
|
||||
let gossip_service = GossipService::new(&cluster_info.clone(), None, gossip_socket, &exit);
|
||||
(gossip_service, ip_echo, cluster_info)
|
||||
}
|
||||
|
||||
@@ -303,7 +299,7 @@ mod tests {
|
||||
let tn = Node::new_localhost();
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(tn.info.clone());
|
||||
let c = Arc::new(RwLock::new(cluster_info));
|
||||
let d = GossipService::new(&c, None, None, tn.sockets.gossip, &exit);
|
||||
let d = GossipService::new(&c, None, tn.sockets.gossip, &exit);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
d.join().unwrap();
|
||||
}
|
||||
|
@@ -15,7 +15,6 @@ pub mod contact_info;
|
||||
pub mod blockstream;
|
||||
pub mod blockstream_service;
|
||||
pub mod cluster_info;
|
||||
pub mod cluster_info_repair_listener;
|
||||
pub mod consensus;
|
||||
pub mod crds;
|
||||
pub mod crds_gossip;
|
||||
@@ -37,12 +36,15 @@ pub mod repair_service;
|
||||
pub mod replay_stage;
|
||||
mod result;
|
||||
pub mod retransmit_stage;
|
||||
pub mod rewards_recorder_service;
|
||||
pub mod rpc;
|
||||
pub mod rpc_pubsub;
|
||||
pub mod rpc_pubsub_service;
|
||||
pub mod rpc_service;
|
||||
pub mod rpc_subscriptions;
|
||||
pub mod sendmmsg;
|
||||
pub mod serve_repair;
|
||||
pub mod serve_repair_service;
|
||||
pub mod sigverify;
|
||||
pub mod sigverify_shreds;
|
||||
pub mod sigverify_stage;
|
||||
|
@@ -15,10 +15,9 @@ pub struct LocalVoteSignerService {
|
||||
impl LocalVoteSignerService {
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new(port_range: PortRange) -> (Self, SocketAddr) {
|
||||
let addr = match solana_net_utils::find_available_port_in_range(port_range) {
|
||||
Ok(port) => SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), port),
|
||||
Err(_e) => panic!("Failed to find an available port for local vote signer service"),
|
||||
};
|
||||
let addr = solana_net_utils::find_available_port_in_range(port_range)
|
||||
.map(|port| SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), port))
|
||||
.expect("Failed to find an available port for local vote signer service");
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let thread_exit = exit.clone();
|
||||
let thread = Builder::new()
|
||||
|
@@ -9,7 +9,7 @@ use solana_metrics::inc_new_counter_debug;
|
||||
pub use solana_sdk::packet::{Meta, Packet, PACKET_DATA_SIZE};
|
||||
use std::{io::Result, net::UdpSocket, time::Instant};
|
||||
|
||||
pub fn recv_from(obj: &mut Packets, socket: &UdpSocket) -> Result<usize> {
|
||||
pub fn recv_from(obj: &mut Packets, socket: &UdpSocket, max_wait_ms: usize) -> Result<usize> {
|
||||
let mut i = 0;
|
||||
//DOCUMENTED SIDE-EFFECT
|
||||
//Performance out of the IO without poll
|
||||
@@ -20,9 +20,11 @@ pub fn recv_from(obj: &mut Packets, socket: &UdpSocket) -> Result<usize> {
|
||||
socket.set_nonblocking(false)?;
|
||||
trace!("receiving on {}", socket.local_addr().unwrap());
|
||||
let start = Instant::now();
|
||||
let mut total_size = 0;
|
||||
loop {
|
||||
obj.packets.resize(i + NUM_RCVMMSGS, Packet::default());
|
||||
obj.packets.resize(
|
||||
std::cmp::min(i + NUM_RCVMMSGS, PACKETS_PER_BATCH),
|
||||
Packet::default(),
|
||||
);
|
||||
match recv_mmsg(socket, &mut obj.packets[i..]) {
|
||||
Err(_) if i > 0 => {
|
||||
if start.elapsed().as_millis() > 1 {
|
||||
@@ -33,16 +35,15 @@ pub fn recv_from(obj: &mut Packets, socket: &UdpSocket) -> Result<usize> {
|
||||
trace!("recv_from err {:?}", e);
|
||||
return Err(e);
|
||||
}
|
||||
Ok((size, npkts)) => {
|
||||
Ok((_, npkts)) => {
|
||||
if i == 0 {
|
||||
socket.set_nonblocking(true)?;
|
||||
}
|
||||
trace!("got {} packets", npkts);
|
||||
i += npkts;
|
||||
total_size += size;
|
||||
// Try to batch into big enough buffers
|
||||
// will cause less re-shuffling later on.
|
||||
if start.elapsed().as_millis() > 1 || total_size >= PACKETS_BATCH_SIZE {
|
||||
if start.elapsed().as_millis() > max_wait_ms as u128 || i >= PACKETS_PER_BATCH {
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -95,7 +96,7 @@ mod tests {
|
||||
}
|
||||
send_to(&p, &send_socket).unwrap();
|
||||
|
||||
let recvd = recv_from(&mut p, &recv_socket).unwrap();
|
||||
let recvd = recv_from(&mut p, &recv_socket, 1).unwrap();
|
||||
|
||||
assert_eq!(recvd, p.packets.len());
|
||||
|
||||
@@ -127,4 +128,32 @@ mod tests {
|
||||
p2.data[0] = 4;
|
||||
assert!(p1 != p2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_packet_resize() {
|
||||
solana_logger::setup();
|
||||
let recv_socket = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let addr = recv_socket.local_addr().unwrap();
|
||||
let send_socket = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let mut p = Packets::default();
|
||||
p.packets.resize(PACKETS_PER_BATCH, Packet::default());
|
||||
|
||||
// Should only get PACKETS_PER_BATCH packets per iteration even
|
||||
// if a lot more were sent, and regardless of packet size
|
||||
for _ in 0..2 * PACKETS_PER_BATCH {
|
||||
let mut p = Packets::default();
|
||||
p.packets.resize(1, Packet::default());
|
||||
for m in p.packets.iter_mut() {
|
||||
m.meta.set_addr(&addr);
|
||||
m.meta.size = 1;
|
||||
}
|
||||
send_to(&p, &send_socket).unwrap();
|
||||
}
|
||||
|
||||
let recvd = recv_from(&mut p, &recv_socket, 100).unwrap();
|
||||
|
||||
// Check we only got PACKETS_PER_BATCH packets
|
||||
assert_eq!(recvd, PACKETS_PER_BATCH);
|
||||
assert_eq!(p.packets.capacity(), PACKETS_PER_BATCH);
|
||||
}
|
||||
}
|
||||
|
@@ -1,8 +1,9 @@
|
||||
//! The `repair_service` module implements the tools necessary to generate a thread which
|
||||
//! regularly finds missing shreds in the ledger and sends repair requests for those shreds
|
||||
use crate::{
|
||||
cluster_info::ClusterInfo, cluster_info_repair_listener::ClusterInfoRepairListener,
|
||||
cluster_info::ClusterInfo,
|
||||
result::Result,
|
||||
serve_repair::{RepairType, ServeRepair},
|
||||
};
|
||||
use solana_ledger::{
|
||||
bank_forks::BankForks,
|
||||
@@ -33,23 +34,6 @@ pub enum RepairStrategy {
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum RepairType {
|
||||
Orphan(Slot),
|
||||
HighestShred(Slot, u64),
|
||||
Shred(Slot, u64),
|
||||
}
|
||||
|
||||
impl RepairType {
|
||||
pub fn slot(&self) -> Slot {
|
||||
match self {
|
||||
RepairType::Orphan(slot) => *slot,
|
||||
RepairType::HighestShred(slot, _) => *slot,
|
||||
RepairType::Shred(slot, _) => *slot,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RepairSlotRange {
|
||||
pub start: Slot,
|
||||
pub end: Slot,
|
||||
@@ -66,7 +50,6 @@ impl Default for RepairSlotRange {
|
||||
|
||||
pub struct RepairService {
|
||||
t_repair: JoinHandle<()>,
|
||||
cluster_info_repair_listener: Option<ClusterInfoRepairListener>,
|
||||
}
|
||||
|
||||
impl RepairService {
|
||||
@@ -77,19 +60,6 @@ impl RepairService {
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
repair_strategy: RepairStrategy,
|
||||
) -> Self {
|
||||
let cluster_info_repair_listener = match repair_strategy {
|
||||
RepairStrategy::RepairAll {
|
||||
ref epoch_schedule, ..
|
||||
} => Some(ClusterInfoRepairListener::new(
|
||||
&blockstore,
|
||||
&exit,
|
||||
cluster_info.clone(),
|
||||
*epoch_schedule,
|
||||
)),
|
||||
|
||||
_ => None,
|
||||
};
|
||||
|
||||
let t_repair = Builder::new()
|
||||
.name("solana-repair-service".to_string())
|
||||
.spawn(move || {
|
||||
@@ -103,10 +73,7 @@ impl RepairService {
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
RepairService {
|
||||
t_repair,
|
||||
cluster_info_repair_listener,
|
||||
}
|
||||
RepairService { t_repair }
|
||||
}
|
||||
|
||||
fn run(
|
||||
@@ -116,6 +83,7 @@ impl RepairService {
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
repair_strategy: RepairStrategy,
|
||||
) {
|
||||
let serve_repair = ServeRepair::new(cluster_info.clone());
|
||||
let mut epoch_slots: BTreeSet<Slot> = BTreeSet::new();
|
||||
let id = cluster_info.read().unwrap().id();
|
||||
let mut current_root = 0;
|
||||
@@ -173,9 +141,7 @@ impl RepairService {
|
||||
let reqs: Vec<_> = repairs
|
||||
.into_iter()
|
||||
.filter_map(|repair_request| {
|
||||
cluster_info
|
||||
.read()
|
||||
.unwrap()
|
||||
serve_repair
|
||||
.repair_request(&repair_request)
|
||||
.map(|result| (result, repair_request))
|
||||
.ok()
|
||||
@@ -391,14 +357,7 @@ impl RepairService {
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
let mut results = vec![self.t_repair.join()];
|
||||
if let Some(cluster_info_repair_listener) = self.cluster_info_repair_listener {
|
||||
results.push(cluster_info_repair_listener.join());
|
||||
}
|
||||
for r in results {
|
||||
r?;
|
||||
}
|
||||
Ok(())
|
||||
self.t_repair.join()
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -6,6 +6,7 @@ use crate::{
|
||||
consensus::{StakeLockout, Tower},
|
||||
poh_recorder::PohRecorder,
|
||||
result::Result,
|
||||
rewards_recorder_service::RewardsRecorderSender,
|
||||
rpc_subscriptions::RpcSubscriptions,
|
||||
};
|
||||
use solana_ledger::{
|
||||
@@ -79,6 +80,7 @@ pub struct ReplayStageConfig {
|
||||
pub snapshot_package_sender: Option<SnapshotPackageSender>,
|
||||
pub block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
pub transaction_status_sender: Option<TransactionStatusSender>,
|
||||
pub rewards_recorder_sender: Option<RewardsRecorderSender>,
|
||||
}
|
||||
|
||||
pub struct ReplayStage {
|
||||
@@ -181,6 +183,7 @@ impl ReplayStage {
|
||||
snapshot_package_sender,
|
||||
block_commitment_cache,
|
||||
transaction_status_sender,
|
||||
rewards_recorder_sender,
|
||||
} = config;
|
||||
|
||||
let (root_bank_sender, root_bank_receiver) = channel();
|
||||
@@ -221,6 +224,7 @@ impl ReplayStage {
|
||||
&bank_forks,
|
||||
&leader_schedule_cache,
|
||||
&subscriptions,
|
||||
rewards_recorder_sender.clone(),
|
||||
);
|
||||
datapoint_debug!(
|
||||
"replay_stage-memory",
|
||||
@@ -361,6 +365,7 @@ impl ReplayStage {
|
||||
&poh_recorder,
|
||||
&leader_schedule_cache,
|
||||
&subscriptions,
|
||||
rewards_recorder_sender.clone(),
|
||||
);
|
||||
|
||||
if let Some(bank) = poh_recorder.lock().unwrap().bank() {
|
||||
@@ -434,6 +439,7 @@ impl ReplayStage {
|
||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
subscriptions: &Arc<RpcSubscriptions>,
|
||||
rewards_recorder_sender: Option<RewardsRecorderSender>,
|
||||
) {
|
||||
// all the individual calls to poh_recorder.lock() are designed to
|
||||
// increase granularity, decrease contention
|
||||
@@ -499,6 +505,7 @@ impl ReplayStage {
|
||||
.unwrap()
|
||||
.insert(Bank::new_from_parent(&parent, my_pubkey, poh_slot));
|
||||
|
||||
Self::record_rewards(&tpu_bank, &rewards_recorder_sender);
|
||||
poh_recorder.lock().unwrap().set_bank(&tpu_bank);
|
||||
} else {
|
||||
error!("{} No next leader found", my_pubkey);
|
||||
@@ -934,6 +941,7 @@ impl ReplayStage {
|
||||
forks_lock: &RwLock<BankForks>,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
subscriptions: &Arc<RpcSubscriptions>,
|
||||
rewards_recorder_sender: Option<RewardsRecorderSender>,
|
||||
) {
|
||||
// Find the next slot that chains to the old slot
|
||||
let forks = forks_lock.read().unwrap();
|
||||
@@ -969,10 +977,10 @@ impl ReplayStage {
|
||||
forks.root()
|
||||
);
|
||||
subscriptions.notify_slot(child_slot, parent_slot, forks.root());
|
||||
new_banks.insert(
|
||||
child_slot,
|
||||
Bank::new_from_parent(&parent_bank, &leader, child_slot),
|
||||
);
|
||||
|
||||
let child_bank = Bank::new_from_parent(&parent_bank, &leader, child_slot);
|
||||
Self::record_rewards(&child_bank, &rewards_recorder_sender);
|
||||
new_banks.insert(child_slot, child_bank);
|
||||
}
|
||||
}
|
||||
drop(forks);
|
||||
@@ -983,6 +991,16 @@ impl ReplayStage {
|
||||
}
|
||||
}
|
||||
|
||||
fn record_rewards(bank: &Bank, rewards_recorder_sender: &Option<RewardsRecorderSender>) {
|
||||
if let Some(rewards_recorder_sender) = rewards_recorder_sender {
|
||||
if let Some(ref rewards) = bank.rewards {
|
||||
rewards_recorder_sender
|
||||
.send((bank.slot(), rewards.iter().copied().collect()))
|
||||
.unwrap_or_else(|err| warn!("rewards_recorder_sender failed: {:?}", err));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.commitment_service.join()?;
|
||||
self.t_replay.join().map(|_| ())
|
||||
@@ -1329,6 +1347,7 @@ pub(crate) mod tests {
|
||||
&bank_forks,
|
||||
&leader_schedule_cache,
|
||||
&subscriptions,
|
||||
None,
|
||||
);
|
||||
assert!(bank_forks.read().unwrap().get(1).is_some());
|
||||
|
||||
@@ -1341,6 +1360,7 @@ pub(crate) mod tests {
|
||||
&bank_forks,
|
||||
&leader_schedule_cache,
|
||||
&subscriptions,
|
||||
None,
|
||||
);
|
||||
assert!(bank_forks.read().unwrap().get(1).is_some());
|
||||
assert!(bank_forks.read().unwrap().get(2).is_some());
|
||||
|
@@ -331,7 +331,7 @@ mod tests {
|
||||
// it should send this over the sockets.
|
||||
retransmit_sender.send(packets).unwrap();
|
||||
let mut packets = Packets::new(vec![]);
|
||||
packet::recv_from(&mut packets, &me_retransmit).unwrap();
|
||||
packet::recv_from(&mut packets, &me_retransmit, 1).unwrap();
|
||||
assert_eq!(packets.packets.len(), 1);
|
||||
assert_eq!(packets.packets[0].meta.repair, false);
|
||||
|
||||
@@ -347,7 +347,7 @@ mod tests {
|
||||
let packets = Packets::new(vec![repair, Packet::default()]);
|
||||
retransmit_sender.send(packets).unwrap();
|
||||
let mut packets = Packets::new(vec![]);
|
||||
packet::recv_from(&mut packets, &me_retransmit).unwrap();
|
||||
packet::recv_from(&mut packets, &me_retransmit, 1).unwrap();
|
||||
assert_eq!(packets.packets.len(), 1);
|
||||
assert_eq!(packets.packets[0].meta.repair, false);
|
||||
}
|
||||
|
67
core/src/rewards_recorder_service.rs
Normal file
67
core/src/rewards_recorder_service.rs
Normal file
@@ -0,0 +1,67 @@
|
||||
use crossbeam_channel::{Receiver, RecvTimeoutError, Sender};
|
||||
use solana_client::rpc_response::RpcReward;
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_sdk::{clock::Slot, pubkey::Pubkey};
|
||||
use std::{
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
pub type RewardsRecorderReceiver = Receiver<(Slot, Vec<(Pubkey, i64)>)>;
|
||||
pub type RewardsRecorderSender = Sender<(Slot, Vec<(Pubkey, i64)>)>;
|
||||
|
||||
pub struct RewardsRecorderService {
|
||||
thread_hdl: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl RewardsRecorderService {
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new(
|
||||
rewards_receiver: RewardsRecorderReceiver,
|
||||
blockstore: Arc<Blockstore>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let exit = exit.clone();
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-rewards-writer".to_string())
|
||||
.spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
if let Err(RecvTimeoutError::Disconnected) =
|
||||
Self::write_rewards(&rewards_receiver, &blockstore)
|
||||
{
|
||||
break;
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
Self { thread_hdl }
|
||||
}
|
||||
|
||||
fn write_rewards(
|
||||
rewards_receiver: &RewardsRecorderReceiver,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
) -> Result<(), RecvTimeoutError> {
|
||||
let (slot, rewards) = rewards_receiver.recv_timeout(Duration::from_secs(1))?;
|
||||
let rpc_rewards = rewards
|
||||
.into_iter()
|
||||
.map(|(pubkey, lamports)| RpcReward {
|
||||
pubkey: pubkey.to_string(),
|
||||
lamports,
|
||||
})
|
||||
.collect();
|
||||
|
||||
blockstore
|
||||
.write_rewards(slot, rpc_rewards)
|
||||
.expect("Expect database write to succeed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.thread_hdl.join()
|
||||
}
|
||||
}
|
@@ -47,7 +47,8 @@ fn new_response<T>(bank: &Bank, value: T) -> RpcResponse<T> {
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct JsonRpcConfig {
|
||||
pub enable_validator_exit: bool, // Enable the 'validatorExit' command
|
||||
pub enable_validator_exit: bool,
|
||||
pub enable_get_confirmed_block: bool,
|
||||
pub faucet_addr: Option<SocketAddr>,
|
||||
}
|
||||
|
||||
@@ -336,7 +337,11 @@ impl JsonRpcRequestProcessor {
|
||||
slot: Slot,
|
||||
encoding: Option<RpcTransactionEncoding>,
|
||||
) -> Result<Option<RpcConfirmedBlock>> {
|
||||
Ok(self.blockstore.get_confirmed_block(slot, encoding).ok())
|
||||
if self.config.enable_get_confirmed_block {
|
||||
Ok(self.blockstore.get_confirmed_block(slot, encoding).ok())
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_confirmed_blocks(
|
||||
@@ -1246,7 +1251,10 @@ pub mod tests {
|
||||
let _ = bank.process_transaction(&tx);
|
||||
|
||||
let request_processor = Arc::new(RwLock::new(JsonRpcRequestProcessor::new(
|
||||
JsonRpcConfig::default(),
|
||||
JsonRpcConfig {
|
||||
enable_get_confirmed_block: true,
|
||||
..JsonRpcConfig::default()
|
||||
},
|
||||
bank_forks.clone(),
|
||||
block_commitment_cache.clone(),
|
||||
blockstore,
|
||||
|
@@ -95,10 +95,11 @@ where
|
||||
let mut found = false;
|
||||
subscriptions.retain(|_, v| {
|
||||
v.retain(|k, _| {
|
||||
if *k == *sub_id {
|
||||
let retain = *k != *sub_id;
|
||||
if !retain {
|
||||
found = true;
|
||||
}
|
||||
!found
|
||||
retain
|
||||
});
|
||||
!v.is_empty()
|
||||
});
|
||||
@@ -622,6 +623,7 @@ pub(crate) mod tests {
|
||||
.unwrap()
|
||||
.contains_key(&solana_budget_program::id()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_signature_subscribe() {
|
||||
let GenesisConfigInfo {
|
||||
@@ -675,6 +677,7 @@ pub(crate) mod tests {
|
||||
.unwrap()
|
||||
.contains_key(&signature));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_slot_subscribe() {
|
||||
let (subscriber, _id_receiver, transport_receiver) =
|
||||
@@ -713,4 +716,49 @@ pub(crate) mod tests {
|
||||
.unwrap()
|
||||
.contains_key(&sub_id));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_add_and_remove_subscription() {
|
||||
let (subscriber, _id_receiver, _transport_receiver) = Subscriber::new_test("notification");
|
||||
let sink = subscriber
|
||||
.assign_id(SubscriptionId::String("test".to_string()))
|
||||
.unwrap();
|
||||
let mut subscriptions: HashMap<u64, HashMap<SubscriptionId, (Sink<()>, Confirmations)>> =
|
||||
HashMap::new();
|
||||
|
||||
let num_keys = 5;
|
||||
let mut next_id: u64 = 0;
|
||||
for _ in 0..num_keys {
|
||||
let key = next_id;
|
||||
let sub_id = SubscriptionId::Number(next_id);
|
||||
add_subscription(&mut subscriptions, &key, None, &sub_id, &sink.clone());
|
||||
next_id += 1;
|
||||
}
|
||||
|
||||
// Add another subscription to the "0" key
|
||||
let sub_id = SubscriptionId::Number(next_id);
|
||||
add_subscription(&mut subscriptions, &0, None, &sub_id, &sink.clone());
|
||||
|
||||
assert_eq!(subscriptions.len(), num_keys);
|
||||
assert_eq!(subscriptions.get(&0).unwrap().len(), 2);
|
||||
assert_eq!(subscriptions.get(&1).unwrap().len(), 1);
|
||||
|
||||
assert_eq!(
|
||||
remove_subscription(&mut subscriptions, &SubscriptionId::Number(0)),
|
||||
true
|
||||
);
|
||||
assert_eq!(subscriptions.len(), num_keys);
|
||||
assert_eq!(subscriptions.get(&0).unwrap().len(), 1);
|
||||
assert_eq!(
|
||||
remove_subscription(&mut subscriptions, &SubscriptionId::Number(0)),
|
||||
false
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
remove_subscription(&mut subscriptions, &SubscriptionId::Number(next_id)),
|
||||
true
|
||||
);
|
||||
assert_eq!(subscriptions.len(), num_keys - 1);
|
||||
assert!(subscriptions.get(&0).is_none());
|
||||
}
|
||||
}
|
||||
|
676
core/src/serve_repair.rs
Normal file
676
core/src/serve_repair.rs
Normal file
@@ -0,0 +1,676 @@
|
||||
use crate::packet::limited_deserialize;
|
||||
use crate::streamer::{PacketReceiver, PacketSender};
|
||||
use crate::{
|
||||
cluster_info::{ClusterInfo, ClusterInfoError},
|
||||
contact_info::ContactInfo,
|
||||
packet::Packet,
|
||||
result::Result,
|
||||
};
|
||||
use bincode::serialize;
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_measure::thread_mem_usage;
|
||||
use solana_metrics::{datapoint_debug, inc_new_counter_debug};
|
||||
use solana_perf::packet::{Packets, PacketsRecycler};
|
||||
use solana_sdk::{
|
||||
clock::Slot,
|
||||
signature::{Keypair, KeypairUtil},
|
||||
timing::duration_as_ms,
|
||||
};
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
sync::{Arc, RwLock},
|
||||
thread::{Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
/// the number of slots to respond with when responding to `Orphan` requests
|
||||
pub const MAX_ORPHAN_REPAIR_RESPONSES: usize = 10;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum RepairType {
|
||||
Orphan(Slot),
|
||||
HighestShred(Slot, u64),
|
||||
Shred(Slot, u64),
|
||||
}
|
||||
|
||||
impl RepairType {
|
||||
pub fn slot(&self) -> Slot {
|
||||
match self {
|
||||
RepairType::Orphan(slot) => *slot,
|
||||
RepairType::HighestShred(slot, _) => *slot,
|
||||
RepairType::Shred(slot, _) => *slot,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Window protocol messages
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
enum RepairProtocol {
|
||||
WindowIndex(ContactInfo, u64, u64),
|
||||
HighestWindowIndex(ContactInfo, u64, u64),
|
||||
Orphan(ContactInfo, u64),
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct ServeRepair {
|
||||
/// set the keypair that will be used to sign repair responses
|
||||
keypair: Arc<Keypair>,
|
||||
my_info: ContactInfo,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
}
|
||||
|
||||
impl ServeRepair {
|
||||
/// Without a valid keypair gossip will not function. Only useful for tests.
|
||||
pub fn new_with_invalid_keypair(contact_info: ContactInfo) -> Self {
|
||||
Self::new(Arc::new(RwLock::new(
|
||||
ClusterInfo::new_with_invalid_keypair(contact_info),
|
||||
)))
|
||||
}
|
||||
|
||||
pub fn new(cluster_info: Arc<RwLock<ClusterInfo>>) -> Self {
|
||||
let (keypair, my_info) = {
|
||||
let r_cluster_info = cluster_info.read().unwrap();
|
||||
(r_cluster_info.keypair.clone(), r_cluster_info.my_data())
|
||||
};
|
||||
Self {
|
||||
keypair,
|
||||
my_info,
|
||||
cluster_info,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn my_info(&self) -> &ContactInfo {
|
||||
&self.my_info
|
||||
}
|
||||
|
||||
pub fn keypair(&self) -> &Arc<Keypair> {
|
||||
&self.keypair
|
||||
}
|
||||
|
||||
fn get_repair_sender(request: &RepairProtocol) -> &ContactInfo {
|
||||
match request {
|
||||
RepairProtocol::WindowIndex(ref from, _, _) => from,
|
||||
RepairProtocol::HighestWindowIndex(ref from, _, _) => from,
|
||||
RepairProtocol::Orphan(ref from, _) => from,
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_repair(
|
||||
me: &Arc<RwLock<Self>>,
|
||||
recycler: &PacketsRecycler,
|
||||
from_addr: &SocketAddr,
|
||||
blockstore: Option<&Arc<Blockstore>>,
|
||||
request: RepairProtocol,
|
||||
) -> Option<Packets> {
|
||||
let now = Instant::now();
|
||||
|
||||
//TODO verify from is signed
|
||||
let my_id = me.read().unwrap().keypair.pubkey();
|
||||
let from = Self::get_repair_sender(&request);
|
||||
if from.id == my_id {
|
||||
warn!(
|
||||
"{}: Ignored received repair request from ME {}",
|
||||
my_id, from.id,
|
||||
);
|
||||
inc_new_counter_debug!("serve_repair-handle-repair--eq", 1);
|
||||
return None;
|
||||
}
|
||||
|
||||
let (res, label) = {
|
||||
match &request {
|
||||
RepairProtocol::WindowIndex(from, slot, shred_index) => {
|
||||
inc_new_counter_debug!("serve_repair-request-window-index", 1);
|
||||
(
|
||||
Self::run_window_request(
|
||||
recycler,
|
||||
from,
|
||||
&from_addr,
|
||||
blockstore,
|
||||
&me.read().unwrap().my_info,
|
||||
*slot,
|
||||
*shred_index,
|
||||
),
|
||||
"WindowIndex",
|
||||
)
|
||||
}
|
||||
|
||||
RepairProtocol::HighestWindowIndex(_, slot, highest_index) => {
|
||||
inc_new_counter_debug!("serve_repair-request-highest-window-index", 1);
|
||||
(
|
||||
Self::run_highest_window_request(
|
||||
recycler,
|
||||
&from_addr,
|
||||
blockstore,
|
||||
*slot,
|
||||
*highest_index,
|
||||
),
|
||||
"HighestWindowIndex",
|
||||
)
|
||||
}
|
||||
RepairProtocol::Orphan(_, slot) => {
|
||||
inc_new_counter_debug!("serve_repair-request-orphan", 1);
|
||||
(
|
||||
Self::run_orphan(
|
||||
recycler,
|
||||
&from_addr,
|
||||
blockstore,
|
||||
*slot,
|
||||
MAX_ORPHAN_REPAIR_RESPONSES,
|
||||
),
|
||||
"Orphan",
|
||||
)
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
trace!("{}: received repair request: {:?}", my_id, request);
|
||||
Self::report_time_spent(label, &now.elapsed(), "");
|
||||
res
|
||||
}
|
||||
|
||||
fn report_time_spent(label: &str, time: &Duration, extra: &str) {
|
||||
let count = duration_as_ms(time);
|
||||
if count > 5 {
|
||||
info!("{} took: {} ms {}", label, count, extra);
|
||||
}
|
||||
}
|
||||
|
||||
/// Process messages from the network
|
||||
fn run_listen(
|
||||
obj: &Arc<RwLock<Self>>,
|
||||
recycler: &PacketsRecycler,
|
||||
blockstore: Option<&Arc<Blockstore>>,
|
||||
requests_receiver: &PacketReceiver,
|
||||
response_sender: &PacketSender,
|
||||
) -> Result<()> {
|
||||
//TODO cache connections
|
||||
let timeout = Duration::new(1, 0);
|
||||
let reqs = requests_receiver.recv_timeout(timeout)?;
|
||||
|
||||
Self::handle_packets(obj, &recycler, blockstore, reqs, response_sender);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn listen(
|
||||
me: Arc<RwLock<Self>>,
|
||||
blockstore: Option<Arc<Blockstore>>,
|
||||
requests_receiver: PacketReceiver,
|
||||
response_sender: PacketSender,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> JoinHandle<()> {
|
||||
let exit = exit.clone();
|
||||
let recycler = PacketsRecycler::default();
|
||||
Builder::new()
|
||||
.name("solana-repair-listen".to_string())
|
||||
.spawn(move || loop {
|
||||
let e = Self::run_listen(
|
||||
&me,
|
||||
&recycler,
|
||||
blockstore.as_ref(),
|
||||
&requests_receiver,
|
||||
&response_sender,
|
||||
);
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
if e.is_err() {
|
||||
info!("repair listener error: {:?}", e);
|
||||
}
|
||||
thread_mem_usage::datapoint("solana-repair-listen");
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn handle_packets(
|
||||
me: &Arc<RwLock<Self>>,
|
||||
recycler: &PacketsRecycler,
|
||||
blockstore: Option<&Arc<Blockstore>>,
|
||||
packets: Packets,
|
||||
response_sender: &PacketSender,
|
||||
) {
|
||||
// iter over the packets, collect pulls separately and process everything else
|
||||
let allocated = thread_mem_usage::Allocatedp::default();
|
||||
packets.packets.iter().for_each(|packet| {
|
||||
let start = allocated.get();
|
||||
let from_addr = packet.meta.addr();
|
||||
limited_deserialize(&packet.data[..packet.meta.size])
|
||||
.into_iter()
|
||||
.for_each(|request| {
|
||||
let rsp = Self::handle_repair(me, recycler, &from_addr, blockstore, request);
|
||||
if let Some(rsp) = rsp {
|
||||
let _ignore_disconnect = response_sender.send(rsp);
|
||||
}
|
||||
});
|
||||
datapoint_debug!(
|
||||
"solana-serve-repair-memory",
|
||||
("serve_repair", (allocated.get() - start) as i64, i64),
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
fn window_index_request_bytes(&self, slot: Slot, shred_index: u64) -> Result<Vec<u8>> {
|
||||
let req = RepairProtocol::WindowIndex(self.my_info.clone(), slot, shred_index);
|
||||
let out = serialize(&req)?;
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
fn window_highest_index_request_bytes(&self, slot: Slot, shred_index: u64) -> Result<Vec<u8>> {
|
||||
let req = RepairProtocol::HighestWindowIndex(self.my_info.clone(), slot, shred_index);
|
||||
let out = serialize(&req)?;
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
fn orphan_bytes(&self, slot: Slot) -> Result<Vec<u8>> {
|
||||
let req = RepairProtocol::Orphan(self.my_info.clone(), slot);
|
||||
let out = serialize(&req)?;
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
pub fn repair_request(&self, repair_request: &RepairType) -> Result<(SocketAddr, Vec<u8>)> {
|
||||
// find a peer that appears to be accepting replication and has the desired slot, as indicated
|
||||
// by a valid tvu port location
|
||||
let valid: Vec<_> = self
|
||||
.cluster_info
|
||||
.read()
|
||||
.unwrap()
|
||||
.repair_peers(repair_request.slot());
|
||||
if valid.is_empty() {
|
||||
return Err(ClusterInfoError::NoPeers.into());
|
||||
}
|
||||
let n = thread_rng().gen::<usize>() % valid.len();
|
||||
let addr = valid[n].serve_repair; // send the request to the peer's serve_repair port
|
||||
let out = self.map_repair_request(repair_request)?;
|
||||
|
||||
Ok((addr, out))
|
||||
}
|
||||
|
||||
pub fn map_repair_request(&self, repair_request: &RepairType) -> Result<Vec<u8>> {
|
||||
match repair_request {
|
||||
RepairType::Shred(slot, shred_index) => {
|
||||
datapoint_debug!(
|
||||
"serve_repair-repair",
|
||||
("repair-slot", *slot, i64),
|
||||
("repair-ix", *shred_index, i64)
|
||||
);
|
||||
Ok(self.window_index_request_bytes(*slot, *shred_index)?)
|
||||
}
|
||||
RepairType::HighestShred(slot, shred_index) => {
|
||||
datapoint_debug!(
|
||||
"serve_repair-repair_highest",
|
||||
("repair-highest-slot", *slot, i64),
|
||||
("repair-highest-ix", *shred_index, i64)
|
||||
);
|
||||
Ok(self.window_highest_index_request_bytes(*slot, *shred_index)?)
|
||||
}
|
||||
RepairType::Orphan(slot) => {
|
||||
datapoint_debug!("serve_repair-repair_orphan", ("repair-orphan", *slot, i64));
|
||||
Ok(self.orphan_bytes(*slot)?)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn run_window_request(
|
||||
recycler: &PacketsRecycler,
|
||||
from: &ContactInfo,
|
||||
from_addr: &SocketAddr,
|
||||
blockstore: Option<&Arc<Blockstore>>,
|
||||
me: &ContactInfo,
|
||||
slot: Slot,
|
||||
shred_index: u64,
|
||||
) -> Option<Packets> {
|
||||
if let Some(blockstore) = blockstore {
|
||||
// Try to find the requested index in one of the slots
|
||||
let packet = Self::get_data_shred_as_packet(blockstore, slot, shred_index, from_addr);
|
||||
|
||||
if let Ok(Some(packet)) = packet {
|
||||
inc_new_counter_debug!("serve_repair-window-request-ledger", 1);
|
||||
return Some(Packets::new_with_recycler_data(
|
||||
recycler,
|
||||
"run_window_request",
|
||||
vec![packet],
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
inc_new_counter_debug!("serve_repair-window-request-fail", 1);
|
||||
trace!(
|
||||
"{}: failed WindowIndex {} {} {}",
|
||||
me.id,
|
||||
from.id,
|
||||
slot,
|
||||
shred_index,
|
||||
);
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
fn run_highest_window_request(
|
||||
recycler: &PacketsRecycler,
|
||||
from_addr: &SocketAddr,
|
||||
blockstore: Option<&Arc<Blockstore>>,
|
||||
slot: Slot,
|
||||
highest_index: u64,
|
||||
) -> Option<Packets> {
|
||||
let blockstore = blockstore?;
|
||||
// Try to find the requested index in one of the slots
|
||||
let meta = blockstore.meta(slot).ok()??;
|
||||
if meta.received > highest_index {
|
||||
// meta.received must be at least 1 by this point
|
||||
let packet =
|
||||
Self::get_data_shred_as_packet(blockstore, slot, meta.received - 1, from_addr)
|
||||
.ok()??;
|
||||
return Some(Packets::new_with_recycler_data(
|
||||
recycler,
|
||||
"run_highest_window_request",
|
||||
vec![packet],
|
||||
));
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn run_orphan(
|
||||
recycler: &PacketsRecycler,
|
||||
from_addr: &SocketAddr,
|
||||
blockstore: Option<&Arc<Blockstore>>,
|
||||
mut slot: Slot,
|
||||
max_responses: usize,
|
||||
) -> Option<Packets> {
|
||||
let mut res = Packets::new_with_recycler(recycler.clone(), 64, "run_orphan");
|
||||
if let Some(blockstore) = blockstore {
|
||||
// Try to find the next "n" parent slots of the input slot
|
||||
while let Ok(Some(meta)) = blockstore.meta(slot) {
|
||||
if meta.received == 0 {
|
||||
break;
|
||||
}
|
||||
let packet =
|
||||
Self::get_data_shred_as_packet(blockstore, slot, meta.received - 1, from_addr);
|
||||
if let Ok(Some(packet)) = packet {
|
||||
res.packets.push(packet);
|
||||
}
|
||||
if meta.is_parent_set() && res.packets.len() <= max_responses {
|
||||
slot = meta.parent_slot;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if res.is_empty() {
|
||||
return None;
|
||||
}
|
||||
Some(res)
|
||||
}
|
||||
|
||||
fn get_data_shred_as_packet(
|
||||
blockstore: &Arc<Blockstore>,
|
||||
slot: Slot,
|
||||
shred_index: u64,
|
||||
dest: &SocketAddr,
|
||||
) -> Result<Option<Packet>> {
|
||||
let data = blockstore.get_data_shred(slot, shred_index)?;
|
||||
Ok(data.map(|data| {
|
||||
let mut packet = Packet::default();
|
||||
packet.meta.size = data.len();
|
||||
packet.meta.set_addr(dest);
|
||||
packet.data.copy_from_slice(&data);
|
||||
packet
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::result::Error;
|
||||
use solana_ledger::get_tmp_ledger_path;
|
||||
use solana_ledger::{
|
||||
blockstore::make_many_slot_entries,
|
||||
blockstore_processor::fill_blockstore_slot_with_ticks,
|
||||
shred::{
|
||||
max_ticks_per_n_shreds, CodingShredHeader, DataShredHeader, Shred, ShredCommonHeader,
|
||||
},
|
||||
};
|
||||
use solana_sdk::{hash::Hash, pubkey::Pubkey, timing::timestamp};
|
||||
|
||||
/// test run_window_requestwindow requests respond with the right shred, and do not overrun
|
||||
#[test]
|
||||
fn run_highest_window_request() {
|
||||
let recycler = PacketsRecycler::default();
|
||||
solana_logger::setup();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let rv = ServeRepair::run_highest_window_request(
|
||||
&recycler,
|
||||
&socketaddr_any!(),
|
||||
Some(&blockstore),
|
||||
0,
|
||||
0,
|
||||
);
|
||||
assert!(rv.is_none());
|
||||
|
||||
let _ = fill_blockstore_slot_with_ticks(
|
||||
&blockstore,
|
||||
max_ticks_per_n_shreds(1) + 1,
|
||||
2,
|
||||
1,
|
||||
Hash::default(),
|
||||
);
|
||||
|
||||
let rv = ServeRepair::run_highest_window_request(
|
||||
&recycler,
|
||||
&socketaddr_any!(),
|
||||
Some(&blockstore),
|
||||
2,
|
||||
1,
|
||||
);
|
||||
let rv: Vec<Shred> = rv
|
||||
.expect("packets")
|
||||
.packets
|
||||
.into_iter()
|
||||
.filter_map(|b| Shred::new_from_serialized_shred(b.data.to_vec()).ok())
|
||||
.collect();
|
||||
assert!(!rv.is_empty());
|
||||
let index = blockstore.meta(2).unwrap().unwrap().received - 1;
|
||||
assert_eq!(rv[0].index(), index as u32);
|
||||
assert_eq!(rv[0].slot(), 2);
|
||||
|
||||
let rv = ServeRepair::run_highest_window_request(
|
||||
&recycler,
|
||||
&socketaddr_any!(),
|
||||
Some(&blockstore),
|
||||
2,
|
||||
index + 1,
|
||||
);
|
||||
assert!(rv.is_none());
|
||||
}
|
||||
|
||||
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
/// test window requests respond with the right shred, and do not overrun
|
||||
#[test]
|
||||
fn run_window_request() {
|
||||
let recycler = PacketsRecycler::default();
|
||||
solana_logger::setup();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let me = ContactInfo::new(
|
||||
&Pubkey::new_rand(),
|
||||
socketaddr!("127.0.0.1:1234"),
|
||||
socketaddr!("127.0.0.1:1235"),
|
||||
socketaddr!("127.0.0.1:1236"),
|
||||
socketaddr!("127.0.0.1:1237"),
|
||||
socketaddr!("127.0.0.1:1238"),
|
||||
socketaddr!("127.0.0.1:1239"),
|
||||
socketaddr!("127.0.0.1:1240"),
|
||||
socketaddr!("127.0.0.1:1241"),
|
||||
socketaddr!("127.0.0.1:1242"),
|
||||
socketaddr!("127.0.0.1:1243"),
|
||||
0,
|
||||
);
|
||||
let rv = ServeRepair::run_window_request(
|
||||
&recycler,
|
||||
&me,
|
||||
&socketaddr_any!(),
|
||||
Some(&blockstore),
|
||||
&me,
|
||||
0,
|
||||
0,
|
||||
);
|
||||
assert!(rv.is_none());
|
||||
let mut common_header = ShredCommonHeader::default();
|
||||
common_header.slot = 2;
|
||||
common_header.index = 1;
|
||||
let mut data_header = DataShredHeader::default();
|
||||
data_header.parent_offset = 1;
|
||||
let shred_info = Shred::new_empty_from_header(
|
||||
common_header,
|
||||
data_header,
|
||||
CodingShredHeader::default(),
|
||||
);
|
||||
|
||||
blockstore
|
||||
.insert_shreds(vec![shred_info], None, false)
|
||||
.expect("Expect successful ledger write");
|
||||
|
||||
let rv = ServeRepair::run_window_request(
|
||||
&recycler,
|
||||
&me,
|
||||
&socketaddr_any!(),
|
||||
Some(&blockstore),
|
||||
&me,
|
||||
2,
|
||||
1,
|
||||
);
|
||||
assert!(!rv.is_none());
|
||||
let rv: Vec<Shred> = rv
|
||||
.expect("packets")
|
||||
.packets
|
||||
.into_iter()
|
||||
.filter_map(|b| Shred::new_from_serialized_shred(b.data.to_vec()).ok())
|
||||
.collect();
|
||||
assert_eq!(rv[0].index(), 1);
|
||||
assert_eq!(rv[0].slot(), 2);
|
||||
}
|
||||
|
||||
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn window_index_request() {
|
||||
let me = ContactInfo::new_localhost(&Pubkey::new_rand(), timestamp());
|
||||
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(me)));
|
||||
let serve_repair = ServeRepair::new(cluster_info.clone());
|
||||
let rv = serve_repair.repair_request(&RepairType::Shred(0, 0));
|
||||
assert_matches!(rv, Err(Error::ClusterInfoError(ClusterInfoError::NoPeers)));
|
||||
|
||||
let serve_repair_addr = socketaddr!([127, 0, 0, 1], 1243);
|
||||
let nxt = ContactInfo::new(
|
||||
&Pubkey::new_rand(),
|
||||
socketaddr!([127, 0, 0, 1], 1234),
|
||||
socketaddr!([127, 0, 0, 1], 1235),
|
||||
socketaddr!([127, 0, 0, 1], 1236),
|
||||
socketaddr!([127, 0, 0, 1], 1237),
|
||||
socketaddr!([127, 0, 0, 1], 1238),
|
||||
socketaddr!([127, 0, 0, 1], 1239),
|
||||
socketaddr!([127, 0, 0, 1], 1240),
|
||||
socketaddr!([127, 0, 0, 1], 1241),
|
||||
socketaddr!([127, 0, 0, 1], 1242),
|
||||
serve_repair_addr,
|
||||
0,
|
||||
);
|
||||
cluster_info.write().unwrap().insert_info(nxt.clone());
|
||||
let rv = serve_repair
|
||||
.repair_request(&RepairType::Shred(0, 0))
|
||||
.unwrap();
|
||||
assert_eq!(nxt.serve_repair, serve_repair_addr);
|
||||
assert_eq!(rv.0, nxt.serve_repair);
|
||||
|
||||
let serve_repair_addr2 = socketaddr!([127, 0, 0, 2], 1243);
|
||||
let nxt = ContactInfo::new(
|
||||
&Pubkey::new_rand(),
|
||||
socketaddr!([127, 0, 0, 1], 1234),
|
||||
socketaddr!([127, 0, 0, 1], 1235),
|
||||
socketaddr!([127, 0, 0, 1], 1236),
|
||||
socketaddr!([127, 0, 0, 1], 1237),
|
||||
socketaddr!([127, 0, 0, 1], 1238),
|
||||
socketaddr!([127, 0, 0, 1], 1239),
|
||||
socketaddr!([127, 0, 0, 1], 1240),
|
||||
socketaddr!([127, 0, 0, 1], 1241),
|
||||
socketaddr!([127, 0, 0, 1], 1242),
|
||||
serve_repair_addr2,
|
||||
0,
|
||||
);
|
||||
cluster_info.write().unwrap().insert_info(nxt);
|
||||
let mut one = false;
|
||||
let mut two = false;
|
||||
while !one || !two {
|
||||
//this randomly picks an option, so eventually it should pick both
|
||||
let rv = serve_repair
|
||||
.repair_request(&RepairType::Shred(0, 0))
|
||||
.unwrap();
|
||||
if rv.0 == serve_repair_addr {
|
||||
one = true;
|
||||
}
|
||||
if rv.0 == serve_repair_addr2 {
|
||||
two = true;
|
||||
}
|
||||
}
|
||||
assert!(one && two);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn run_orphan() {
|
||||
solana_logger::setup();
|
||||
let recycler = PacketsRecycler::default();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let rv =
|
||||
ServeRepair::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 2, 0);
|
||||
assert!(rv.is_none());
|
||||
|
||||
// Create slots 1, 2, 3 with 5 shreds apiece
|
||||
let (shreds, _) = make_many_slot_entries(1, 3, 5);
|
||||
|
||||
blockstore
|
||||
.insert_shreds(shreds, None, false)
|
||||
.expect("Expect successful ledger write");
|
||||
|
||||
// We don't have slot 4, so we don't know how to service this requeset
|
||||
let rv =
|
||||
ServeRepair::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 4, 5);
|
||||
assert!(rv.is_none());
|
||||
|
||||
// For slot 3, we should return the highest shreds from slots 3, 2, 1 respectively
|
||||
// for this request
|
||||
let rv: Vec<_> =
|
||||
ServeRepair::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 3, 5)
|
||||
.expect("run_orphan packets")
|
||||
.packets
|
||||
.iter()
|
||||
.map(|b| b.clone())
|
||||
.collect();
|
||||
let expected: Vec<_> = (1..=3)
|
||||
.rev()
|
||||
.map(|slot| {
|
||||
let index = blockstore.meta(slot).unwrap().unwrap().received - 1;
|
||||
ServeRepair::get_data_shred_as_packet(
|
||||
&blockstore,
|
||||
slot,
|
||||
index,
|
||||
&socketaddr_any!(),
|
||||
)
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(rv, expected)
|
||||
}
|
||||
|
||||
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
}
|
||||
}
|
57
core/src/serve_repair_service.rs
Normal file
57
core/src/serve_repair_service.rs
Normal file
@@ -0,0 +1,57 @@
|
||||
use crate::serve_repair::ServeRepair;
|
||||
use crate::streamer;
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_perf::recycler::Recycler;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::{self, JoinHandle};
|
||||
|
||||
pub struct ServeRepairService {
|
||||
thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl ServeRepairService {
|
||||
pub fn new(
|
||||
serve_repair: &Arc<RwLock<ServeRepair>>,
|
||||
blockstore: Option<Arc<Blockstore>>,
|
||||
serve_repair_socket: UdpSocket,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let (request_sender, request_receiver) = channel();
|
||||
let serve_repair_socket = Arc::new(serve_repair_socket);
|
||||
trace!(
|
||||
"ServeRepairService: id: {}, listening on: {:?}",
|
||||
&serve_repair.read().unwrap().my_info().id,
|
||||
serve_repair_socket.local_addr().unwrap()
|
||||
);
|
||||
let t_receiver = streamer::receiver(
|
||||
serve_repair_socket.clone(),
|
||||
&exit,
|
||||
request_sender,
|
||||
Recycler::default(),
|
||||
"serve_repair_receiver",
|
||||
);
|
||||
let (response_sender, response_receiver) = channel();
|
||||
let t_responder =
|
||||
streamer::responder("serve-repairs", serve_repair_socket, response_receiver);
|
||||
let t_listen = ServeRepair::listen(
|
||||
serve_repair.clone(),
|
||||
blockstore,
|
||||
request_receiver,
|
||||
response_sender,
|
||||
exit,
|
||||
);
|
||||
|
||||
let thread_hdls = vec![t_receiver, t_responder, t_listen];
|
||||
Self { thread_hdls }
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
for thread_hdl in self.thread_hdls {
|
||||
thread_hdl.join()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
@@ -49,7 +49,7 @@ fn recv_loop(
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return Ok(());
|
||||
}
|
||||
if let Ok(len) = packet::recv_from(&mut msgs, sock) {
|
||||
if let Ok(len) = packet::recv_from(&mut msgs, sock, 1) {
|
||||
if len == NUM_RCVMMSGS {
|
||||
num_max_received += 1;
|
||||
}
|
||||
|
@@ -9,6 +9,7 @@ use crate::{
|
||||
poh_recorder::PohRecorder,
|
||||
replay_stage::{ReplayStage, ReplayStageConfig},
|
||||
retransmit_stage::RetransmitStage,
|
||||
rewards_recorder_service::RewardsRecorderSender,
|
||||
rpc_subscriptions::RpcSubscriptions,
|
||||
shred_fetch_stage::ShredFetchStage,
|
||||
sigverify_shreds::ShredSigVerifier,
|
||||
@@ -86,6 +87,7 @@ impl Tvu {
|
||||
cfg: Option<Arc<AtomicBool>>,
|
||||
shred_version: u16,
|
||||
transaction_status_sender: Option<TransactionStatusSender>,
|
||||
rewards_recorder_sender: Option<RewardsRecorderSender>,
|
||||
) -> Self {
|
||||
let keypair: Arc<Keypair> = cluster_info
|
||||
.read()
|
||||
@@ -170,6 +172,7 @@ impl Tvu {
|
||||
snapshot_package_sender,
|
||||
block_commitment_cache,
|
||||
transaction_status_sender,
|
||||
rewards_recorder_sender,
|
||||
};
|
||||
|
||||
let (replay_stage, root_bank_receiver) = ReplayStage::new(
|
||||
@@ -312,6 +315,7 @@ pub mod tests {
|
||||
None,
|
||||
0,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
tvu.join().unwrap();
|
||||
|
@@ -8,10 +8,13 @@ use crate::{
|
||||
gossip_service::{discover_cluster, GossipService},
|
||||
poh_recorder::PohRecorder,
|
||||
poh_service::PohService,
|
||||
rewards_recorder_service::RewardsRecorderService,
|
||||
rpc::JsonRpcConfig,
|
||||
rpc_pubsub_service::PubSubService,
|
||||
rpc_service::JsonRpcService,
|
||||
rpc_subscriptions::RpcSubscriptions,
|
||||
serve_repair::ServeRepair,
|
||||
serve_repair_service::ServeRepairService,
|
||||
sigverify,
|
||||
storage_stage::StorageState,
|
||||
tpu::Tpu,
|
||||
@@ -19,7 +22,6 @@ use crate::{
|
||||
tvu::{Sockets, Tvu},
|
||||
};
|
||||
use crossbeam_channel::unbounded;
|
||||
use solana_ledger::shred::Shred;
|
||||
use solana_ledger::{
|
||||
bank_forks::{BankForks, SnapshotConfig},
|
||||
bank_forks_utils,
|
||||
@@ -28,13 +30,14 @@ use solana_ledger::{
|
||||
create_new_tmp_ledger,
|
||||
leader_schedule::FixedSchedule,
|
||||
leader_schedule_cache::LeaderScheduleCache,
|
||||
shred_version::compute_shred_version,
|
||||
};
|
||||
use solana_metrics::datapoint_info;
|
||||
use solana_runtime::{bank::Bank, hard_forks::HardForks};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{
|
||||
clock::{Slot, DEFAULT_SLOTS_PER_TURN},
|
||||
genesis_config::GenesisConfig,
|
||||
hash::{extend_and_hash, Hash},
|
||||
hash::Hash,
|
||||
poh_config::PohConfig,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, KeypairUtil},
|
||||
@@ -58,7 +61,6 @@ pub struct ValidatorConfig {
|
||||
pub expected_genesis_hash: Option<Hash>,
|
||||
pub expected_shred_version: Option<u16>,
|
||||
pub voting_disabled: bool,
|
||||
pub transaction_status_service_disabled: bool,
|
||||
pub blockstream_unix_socket: Option<PathBuf>,
|
||||
pub storage_slots_per_turn: u64,
|
||||
pub account_paths: Vec<PathBuf>,
|
||||
@@ -81,7 +83,6 @@ impl Default for ValidatorConfig {
|
||||
expected_genesis_hash: None,
|
||||
expected_shred_version: None,
|
||||
voting_disabled: false,
|
||||
transaction_status_service_disabled: false,
|
||||
blockstream_unix_socket: None,
|
||||
storage_slots_per_turn: DEFAULT_SLOTS_PER_TURN,
|
||||
max_ledger_slots: None,
|
||||
@@ -120,7 +121,9 @@ pub struct Validator {
|
||||
validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
|
||||
rpc_service: Option<(JsonRpcService, PubSubService)>,
|
||||
transaction_status_service: Option<TransactionStatusService>,
|
||||
rewards_recorder_service: Option<RewardsRecorderService>,
|
||||
gossip_service: GossipService,
|
||||
serve_repair_service: ServeRepairService,
|
||||
poh_recorder: Arc<Mutex<PohRecorder>>,
|
||||
poh_service: PohService,
|
||||
tpu: Tpu,
|
||||
@@ -193,7 +196,7 @@ impl Validator {
|
||||
|
||||
node.info.wallclock = timestamp();
|
||||
node.info.shred_version =
|
||||
compute_shred_version(&genesis_hash, &bank.hard_forks().read().unwrap());
|
||||
compute_shred_version(&genesis_hash, Some(&bank.hard_forks().read().unwrap()));
|
||||
Self::print_node_info(&node);
|
||||
|
||||
if let Some(expected_shred_version) = config.expected_shred_version {
|
||||
@@ -251,7 +254,7 @@ impl Validator {
|
||||
});
|
||||
|
||||
let (transaction_status_sender, transaction_status_service) =
|
||||
if rpc_service.is_some() && !config.transaction_status_service_disabled {
|
||||
if rpc_service.is_some() && config.rpc_config.enable_get_confirmed_block {
|
||||
let (transaction_status_sender, transaction_status_receiver) = unbounded();
|
||||
(
|
||||
Some(transaction_status_sender),
|
||||
@@ -265,6 +268,21 @@ impl Validator {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
let (rewards_recorder_sender, rewards_recorder_service) =
|
||||
if rpc_service.is_some() && config.rpc_config.enable_get_confirmed_block {
|
||||
let (rewards_recorder_sender, rewards_receiver) = unbounded();
|
||||
(
|
||||
Some(rewards_recorder_sender),
|
||||
Some(RewardsRecorderService::new(
|
||||
rewards_receiver,
|
||||
blockstore.clone(),
|
||||
&exit,
|
||||
)),
|
||||
)
|
||||
} else {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
info!(
|
||||
"Starting PoH: epoch={} slot={} tick_height={} blockhash={} leader={:?}",
|
||||
bank.epoch(),
|
||||
@@ -302,12 +320,19 @@ impl Validator {
|
||||
|
||||
let gossip_service = GossipService::new(
|
||||
&cluster_info,
|
||||
Some(blockstore.clone()),
|
||||
Some(bank_forks.clone()),
|
||||
node.sockets.gossip,
|
||||
&exit,
|
||||
);
|
||||
|
||||
let serve_repair = Arc::new(RwLock::new(ServeRepair::new(cluster_info.clone())));
|
||||
let serve_repair_service = ServeRepairService::new(
|
||||
&serve_repair,
|
||||
Some(blockstore.clone()),
|
||||
node.sockets.serve_repair,
|
||||
&exit,
|
||||
);
|
||||
|
||||
// Insert the entrypoint info, should only be None if this node
|
||||
// is the bootstrap validator
|
||||
if let Some(entrypoint_info) = entrypoint_info_option {
|
||||
@@ -378,6 +403,7 @@ impl Validator {
|
||||
config.enable_partition.clone(),
|
||||
node.info.shred_version,
|
||||
transaction_status_sender.clone(),
|
||||
rewards_recorder_sender,
|
||||
);
|
||||
|
||||
if config.dev_sigverify_disabled {
|
||||
@@ -403,8 +429,10 @@ impl Validator {
|
||||
Self {
|
||||
id,
|
||||
gossip_service,
|
||||
serve_repair_service,
|
||||
rpc_service,
|
||||
transaction_status_service,
|
||||
rewards_recorder_service,
|
||||
tpu,
|
||||
tvu,
|
||||
poh_service,
|
||||
@@ -462,7 +490,12 @@ impl Validator {
|
||||
transaction_status_service.join()?;
|
||||
}
|
||||
|
||||
if let Some(rewards_recorder_service) = self.rewards_recorder_service {
|
||||
rewards_recorder_service.join()?;
|
||||
}
|
||||
|
||||
self.gossip_service.join()?;
|
||||
self.serve_repair_service.join()?;
|
||||
self.tpu.join()?;
|
||||
self.tvu.join()?;
|
||||
self.ip_echo_server.shutdown_now();
|
||||
@@ -471,20 +504,6 @@ impl Validator {
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_shred_version(genesis_hash: &Hash, hard_forks: &HardForks) -> u16 {
|
||||
use byteorder::{ByteOrder, LittleEndian};
|
||||
|
||||
let mut hash = *genesis_hash;
|
||||
for (slot, count) in hard_forks.iter() {
|
||||
let mut buf = [0u8; 16];
|
||||
LittleEndian::write_u64(&mut buf[..8], *slot);
|
||||
LittleEndian::write_u64(&mut buf[8..], *count as u64);
|
||||
hash = extend_and_hash(&hash, &buf);
|
||||
}
|
||||
|
||||
Shred::version_from_hash(&hash)
|
||||
}
|
||||
|
||||
fn new_banks_from_blockstore(
|
||||
expected_genesis_hash: Option<Hash>,
|
||||
blockstore_path: &Path,
|
||||
@@ -581,7 +600,23 @@ fn wait_for_supermajority(
|
||||
}
|
||||
|
||||
pub fn new_validator_for_tests() -> (Validator, ContactInfo, Keypair, PathBuf) {
|
||||
use crate::genesis_utils::{create_genesis_config_with_leader, GenesisConfigInfo};
|
||||
let (node, contact_info, mint_keypair, ledger_path, _vote_pubkey) =
|
||||
new_validator_for_tests_with_vote_pubkey();
|
||||
(node, contact_info, mint_keypair, ledger_path)
|
||||
}
|
||||
|
||||
pub fn new_validator_for_tests_with_vote_pubkey(
|
||||
) -> (Validator, ContactInfo, Keypair, PathBuf, Pubkey) {
|
||||
use crate::genesis_utils::BOOTSTRAP_VALIDATOR_LAMPORTS;
|
||||
new_validator_for_tests_ex(0, BOOTSTRAP_VALIDATOR_LAMPORTS)
|
||||
}
|
||||
|
||||
pub fn new_validator_for_tests_ex(
|
||||
fees: u64,
|
||||
bootstrap_validator_lamports: u64,
|
||||
) -> (Validator, ContactInfo, Keypair, PathBuf, Pubkey) {
|
||||
use crate::genesis_utils::{create_genesis_config_with_leader_ex, GenesisConfigInfo};
|
||||
use solana_sdk::fee_calculator::FeeCalculator;
|
||||
|
||||
let node_keypair = Arc::new(Keypair::new());
|
||||
let node = Node::new_localhost_with_pubkey(&node_keypair.pubkey());
|
||||
@@ -591,20 +626,25 @@ pub fn new_validator_for_tests() -> (Validator, ContactInfo, Keypair, PathBuf) {
|
||||
mut genesis_config,
|
||||
mint_keypair,
|
||||
voting_keypair,
|
||||
} = create_genesis_config_with_leader(1_000_000, &contact_info.id, 42);
|
||||
} = create_genesis_config_with_leader_ex(
|
||||
1_000_000,
|
||||
&contact_info.id,
|
||||
42,
|
||||
bootstrap_validator_lamports,
|
||||
);
|
||||
genesis_config
|
||||
.native_instruction_processors
|
||||
.push(solana_budget_program!());
|
||||
|
||||
genesis_config.rent.lamports_per_byte_year = 1;
|
||||
genesis_config.rent.exemption_threshold = 1.0;
|
||||
genesis_config.fee_calculator = FeeCalculator::new(fees, 0);
|
||||
|
||||
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
|
||||
|
||||
let leader_voting_keypair = Arc::new(voting_keypair);
|
||||
let storage_keypair = Arc::new(Keypair::new());
|
||||
let config = ValidatorConfig {
|
||||
transaction_status_service_disabled: true,
|
||||
rpc_ports: Some((node.info.rpc.port(), node.info.rpc_pubsub.port())),
|
||||
..ValidatorConfig::default()
|
||||
};
|
||||
@@ -620,7 +660,13 @@ pub fn new_validator_for_tests() -> (Validator, ContactInfo, Keypair, PathBuf) {
|
||||
&config,
|
||||
);
|
||||
discover_cluster(&contact_info.gossip, 1).expect("Node startup failed");
|
||||
(node, contact_info, mint_keypair, ledger_path)
|
||||
(
|
||||
node,
|
||||
contact_info,
|
||||
mint_keypair,
|
||||
ledger_path,
|
||||
leader_voting_keypair.pubkey(),
|
||||
)
|
||||
}
|
||||
|
||||
fn report_target_features() {
|
||||
@@ -683,16 +729,6 @@ mod tests {
|
||||
use crate::genesis_utils::create_genesis_config_with_leader;
|
||||
use std::fs::remove_dir_all;
|
||||
|
||||
#[test]
|
||||
fn test_compute_shred_version() {
|
||||
let mut hard_forks = HardForks::default();
|
||||
assert_eq!(compute_shred_version(&Hash::default(), &hard_forks), 1);
|
||||
hard_forks.register(1);
|
||||
assert_eq!(compute_shred_version(&Hash::default(), &hard_forks), 55551);
|
||||
hard_forks.register(1);
|
||||
assert_eq!(compute_shred_version(&Hash::default(), &hard_forks), 46353);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn validator_exit() {
|
||||
solana_logger::setup();
|
||||
@@ -709,7 +745,6 @@ mod tests {
|
||||
let voting_keypair = Arc::new(Keypair::new());
|
||||
let storage_keypair = Arc::new(Keypair::new());
|
||||
let config = ValidatorConfig {
|
||||
transaction_status_service_disabled: true,
|
||||
rpc_ports: Some((
|
||||
validator_node.info.rpc.port(),
|
||||
validator_node.info.rpc_pubsub.port(),
|
||||
@@ -749,7 +784,6 @@ mod tests {
|
||||
let voting_keypair = Arc::new(Keypair::new());
|
||||
let storage_keypair = Arc::new(Keypair::new());
|
||||
let config = ValidatorConfig {
|
||||
transaction_status_service_disabled: true,
|
||||
rpc_ports: Some((
|
||||
validator_node.info.rpc.port(),
|
||||
validator_node.info.rpc_pubsub.port(),
|
||||
|
@@ -5,6 +5,7 @@ use solana_core::cluster_info;
|
||||
use solana_core::contact_info::ContactInfo;
|
||||
use solana_core::crds_gossip::*;
|
||||
use solana_core::crds_gossip_error::CrdsGossipError;
|
||||
use solana_core::crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
|
||||
use solana_core::crds_gossip_push::CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS;
|
||||
use solana_core::crds_value::CrdsValueLabel;
|
||||
use solana_core::crds_value::{CrdsData, CrdsValue};
|
||||
@@ -396,6 +397,9 @@ fn network_run_pull(
|
||||
let mut convergance = 0f64;
|
||||
let num = network.len();
|
||||
let network_values: Vec<Node> = network.values().cloned().collect();
|
||||
let mut timeouts = HashMap::new();
|
||||
timeouts.insert(Pubkey::default(), CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS);
|
||||
|
||||
for t in start..end {
|
||||
let now = t as u64 * 100;
|
||||
let requests: Vec<_> = {
|
||||
@@ -448,7 +452,10 @@ fn network_run_pull(
|
||||
node.lock()
|
||||
.unwrap()
|
||||
.mark_pull_request_creation_time(&from, now);
|
||||
overhead += node.lock().unwrap().process_pull_response(&from, rsp, now);
|
||||
overhead += node
|
||||
.lock()
|
||||
.unwrap()
|
||||
.process_pull_response(&from, &timeouts, rsp, now);
|
||||
});
|
||||
(bytes, msgs, overhead)
|
||||
})
|
||||
|
@@ -21,8 +21,7 @@ fn test_node(exit: &Arc<AtomicBool>) -> (Arc<RwLock<ClusterInfo>>, GossipService
|
||||
test_node.info.clone(),
|
||||
keypair,
|
||||
)));
|
||||
let gossip_service =
|
||||
GossipService::new(&cluster_info, None, None, test_node.sockets.gossip, exit);
|
||||
let gossip_service = GossipService::new(&cluster_info, None, test_node.sockets.gossip, exit);
|
||||
let _ = cluster_info.read().unwrap().my_data();
|
||||
(
|
||||
cluster_info,
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-crate-features"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
description = "Solana Crate Features"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-faucet"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
description = "Solana Faucet"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -19,10 +19,10 @@ clap = "2.33"
|
||||
log = "0.4.8"
|
||||
serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.1" }
|
||||
solana-logger = { path = "../logger", version = "0.23.1" }
|
||||
solana-metrics = { path = "../metrics", version = "0.23.1" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.1" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.3" }
|
||||
solana-logger = { path = "../logger", version = "0.23.3" }
|
||||
solana-metrics = { path = "../metrics", version = "0.23.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.3" }
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-genesis-programs"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
description = "Solana genesis programs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -10,16 +10,16 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = { version = "0.4.8" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "0.23.1" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.23.1" }
|
||||
solana-config-program = { path = "../programs/config", version = "0.23.1" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "0.23.1" }
|
||||
solana-runtime = { path = "../runtime", version = "0.23.1" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.1" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.23.1" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.23.1" }
|
||||
solana-vest-program = { path = "../programs/vest", version = "0.23.1" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.23.1" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "0.23.3" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.23.3" }
|
||||
solana-config-program = { path = "../programs/config", version = "0.23.3" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "0.23.3" }
|
||||
solana-runtime = { path = "../runtime", version = "0.23.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.3" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.23.3" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.23.3" }
|
||||
solana-vest-program = { path = "../programs/vest", version = "0.23.3" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.23.3" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-genesis"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -17,13 +17,13 @@ serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.44"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.1" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "0.23.1" }
|
||||
solana-ledger = { path = "../ledger", version = "0.23.1" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.1" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.23.1" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.23.1" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.23.1" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.3" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "0.23.3" }
|
||||
solana-ledger = { path = "../ledger", version = "0.23.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.3" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.23.3" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.23.3" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.23.3" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
|
@@ -7,7 +7,10 @@ use solana_clap_utils::{
|
||||
input_validators::{is_rfc3339_datetime, is_valid_percentage},
|
||||
};
|
||||
use solana_genesis::{genesis_accounts::add_genesis_accounts, Base64Account};
|
||||
use solana_ledger::{blockstore::create_new_ledger, poh::compute_hashes_per_tick};
|
||||
use solana_ledger::{
|
||||
blockstore::create_new_ledger, poh::compute_hashes_per_tick,
|
||||
shred_version::compute_shred_version,
|
||||
};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock,
|
||||
@@ -521,10 +524,19 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
create_new_ledger(&ledger_path, &genesis_config)?;
|
||||
|
||||
println!(
|
||||
"Genesis hash: {}\nCreation time: {}\nOperating mode: {:?}\nHashes per tick: {:?}\nSlots per epoch: {}\nCapitalization: {} SOL in {} accounts",
|
||||
genesis_config.hash(),
|
||||
"\
|
||||
Creation time: {}\n\
|
||||
Operating mode: {:?}\n\
|
||||
Genesis hash: {}\n\
|
||||
Shred version: {}\n\
|
||||
Hashes per tick: {:?}\n\
|
||||
Slots per epoch: {}\n\
|
||||
Capitalization: {} SOL in {} accounts\
|
||||
",
|
||||
Utc.timestamp(genesis_config.creation_time, 0).to_rfc3339(),
|
||||
operating_mode,
|
||||
genesis_config.hash(),
|
||||
compute_shred_version(&genesis_config.hash(), None),
|
||||
genesis_config.poh_config.hashes_per_tick,
|
||||
slots_per_epoch,
|
||||
lamports_to_sol(
|
||||
@@ -537,7 +549,8 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
}
|
||||
account.lamports
|
||||
})
|
||||
.sum::<u64>()),
|
||||
.sum::<u64>()
|
||||
),
|
||||
genesis_config.accounts.len()
|
||||
);
|
||||
|
||||
|
@@ -3,19 +3,19 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-gossip"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.1" }
|
||||
solana-core = { path = "../core", version = "0.23.1" }
|
||||
solana-client = { path = "../client", version = "0.23.1" }
|
||||
solana-logger = { path = "../logger", version = "0.23.1" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.23.1" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.1" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.3" }
|
||||
solana-core = { path = "../core", version = "0.23.3" }
|
||||
solana-client = { path = "../client", version = "0.23.3" }
|
||||
solana-logger = { path = "../logger", version = "0.23.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.23.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.3" }
|
||||
|
||||
|
||||
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-install"
|
||||
description = "The solana cluster software installer"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -26,11 +26,11 @@ reqwest = { version = "0.10.1", default-features = false, features = ["blocking"
|
||||
serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.1" }
|
||||
solana-client = { path = "../client", version = "0.23.1" }
|
||||
solana-config-program = { path = "../programs/config", version = "0.23.1" }
|
||||
solana-logger = { path = "../logger", version = "0.23.1" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.1" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.3" }
|
||||
solana-client = { path = "../client", version = "0.23.3" }
|
||||
solana-config-program = { path = "../programs/config", version = "0.23.3" }
|
||||
solana-logger = { path = "../logger", version = "0.23.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.3" }
|
||||
tar = "0.4.26"
|
||||
tempdir = "0.3.7"
|
||||
url = "2.1.1"
|
||||
|
@@ -6,6 +6,7 @@ use crate::{
|
||||
use chrono::{Local, TimeZone};
|
||||
use console::{style, Emoji};
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
use serde_derive::Deserialize;
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_config_program::{config_instruction, get_config_data, ConfigState};
|
||||
use solana_sdk::{
|
||||
@@ -25,6 +26,13 @@ use std::{
|
||||
use tempdir::TempDir;
|
||||
use url::Url;
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct ReleaseVersion {
|
||||
pub target: String,
|
||||
pub commit: String,
|
||||
channel: String,
|
||||
}
|
||||
|
||||
static TRUCK: Emoji = Emoji("🚚 ", "");
|
||||
static LOOKING_GLASS: Emoji = Emoji("🔍 ", "");
|
||||
static BULLET: Emoji = Emoji("• ", "* ");
|
||||
@@ -46,15 +54,15 @@ fn println_name_value(name: &str, value: &str) {
|
||||
println!("{} {}", style(name).bold(), value);
|
||||
}
|
||||
|
||||
/// Downloads the release archive at `url` to a temporary location. If `expected_sha256` is
|
||||
/// Some(_), produce an error if the release SHA256 doesn't match.
|
||||
/// Downloads a file at `url` to a temporary location. If `expected_sha256` is
|
||||
/// Some(_), produce an error if the SHA256 of the file contents doesn't match.
|
||||
///
|
||||
/// Returns a tuple consisting of:
|
||||
/// * TempDir - drop this value to clean up the temporary location
|
||||
/// * PathBuf - path to the downloaded release (within `TempDir`)
|
||||
/// * PathBuf - path to the downloaded file (within `TempDir`)
|
||||
/// * String - SHA256 of the release
|
||||
///
|
||||
fn download_to_temp_archive(
|
||||
fn download_to_temp(
|
||||
url: &str,
|
||||
expected_sha256: Option<&Hash>,
|
||||
) -> Result<(TempDir, PathBuf, Hash), Box<dyn std::error::Error>> {
|
||||
@@ -77,7 +85,7 @@ fn download_to_temp_archive(
|
||||
let url = Url::parse(url).map_err(|err| format!("Unable to parse {}: {}", url, err))?;
|
||||
|
||||
let temp_dir = TempDir::new(clap::crate_name!())?;
|
||||
let temp_file = temp_dir.path().join("release.tar.bz2");
|
||||
let temp_file = temp_dir.path().join("download");
|
||||
|
||||
let client = reqwest::blocking::Client::new();
|
||||
|
||||
@@ -162,22 +170,21 @@ fn extract_release_archive(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Reads the supported TARGET triple for the given release
|
||||
fn load_release_target(release_dir: &Path) -> Result<String, Box<dyn std::error::Error>> {
|
||||
use serde_derive::Deserialize;
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct ReleaseVersion {
|
||||
pub target: String,
|
||||
pub commit: String,
|
||||
channel: String,
|
||||
}
|
||||
fn load_release_version(version_yml: &Path) -> Result<ReleaseVersion, String> {
|
||||
let file = File::open(&version_yml)
|
||||
.map_err(|err| format!("Unable to open {:?}: {:?}", version_yml, err))?;
|
||||
let version: ReleaseVersion = serde_yaml::from_reader(file)
|
||||
.map_err(|err| format!("Unable to parse {:?}: {:?}", version_yml, err))?;
|
||||
Ok(version)
|
||||
}
|
||||
|
||||
/// Reads the supported TARGET triple for the given release
|
||||
fn load_release_target(release_dir: &Path) -> Result<String, String> {
|
||||
let mut version_yml = PathBuf::from(release_dir);
|
||||
version_yml.push("solana-release");
|
||||
version_yml.push("version.yml");
|
||||
|
||||
let file = File::open(&version_yml)?;
|
||||
let version: ReleaseVersion = serde_yaml::from_reader(file)?;
|
||||
let version = load_release_version(&version_yml)?;
|
||||
Ok(version.target)
|
||||
}
|
||||
|
||||
@@ -554,6 +561,14 @@ fn release_channel_download_url(release_channel: &str) -> String {
|
||||
)
|
||||
}
|
||||
|
||||
fn release_channel_version_url(release_channel: &str) -> String {
|
||||
format!(
|
||||
"http://release.solana.com/{}/solana-release-{}.yml",
|
||||
release_channel,
|
||||
crate::build_env::TARGET
|
||||
)
|
||||
}
|
||||
|
||||
pub fn info(config_file: &str, local_info_only: bool) -> Result<Option<UpdateManifest>, String> {
|
||||
let config = Config::load(config_file)?;
|
||||
|
||||
@@ -663,9 +678,8 @@ pub fn deploy(
|
||||
}
|
||||
|
||||
// Download the release
|
||||
let (temp_dir, temp_archive, temp_archive_sha256) =
|
||||
download_to_temp_archive(download_url, None)
|
||||
.map_err(|err| format!("Unable to download {}: {}", download_url, err))?;
|
||||
let (temp_dir, temp_archive, temp_archive_sha256) = download_to_temp(download_url, None)
|
||||
.map_err(|err| format!("Unable to download {}: {}", download_url, err))?;
|
||||
|
||||
if let Ok(update_manifest) = get_update_manifest(&rpc_client, &update_manifest_keypair.pubkey())
|
||||
{
|
||||
@@ -743,31 +757,57 @@ pub fn update(config_file: &str) -> Result<bool, String> {
|
||||
let update_manifest = info(config_file, false)?;
|
||||
|
||||
let release_dir = if let Some(explicit_release) = &config.explicit_release {
|
||||
let (download, release_dir) = match explicit_release {
|
||||
let (download_url, release_dir) = match explicit_release {
|
||||
ExplicitRelease::Semver(release_semver) => {
|
||||
let download_url = github_release_download_url(release_semver);
|
||||
let release_dir = config.release_dir(&release_semver);
|
||||
let download = if release_dir.join(".ok").exists() {
|
||||
let download_url = if release_dir.join(".ok").exists() {
|
||||
// If this release_semver has already been successfully downloaded, no update
|
||||
// needed
|
||||
println!("{} is present, no download required.", release_semver);
|
||||
None
|
||||
} else {
|
||||
Some(download_url)
|
||||
};
|
||||
(download, release_dir)
|
||||
(download_url, release_dir)
|
||||
}
|
||||
ExplicitRelease::Channel(release_channel) => {
|
||||
let download_url = release_channel_download_url(release_channel);
|
||||
let release_dir = config.release_dir(&release_channel);
|
||||
// Note: There's currently no mechanism to check for an updated binary for a release
|
||||
// channel so a download always occurs.
|
||||
(Some(download_url), release_dir)
|
||||
let current_release_version_yml =
|
||||
release_dir.join("solana-release").join("version.yml");
|
||||
let download_url = Some(release_channel_download_url(release_channel));
|
||||
|
||||
if !current_release_version_yml.exists() {
|
||||
(download_url, release_dir)
|
||||
} else {
|
||||
let version_url = release_channel_version_url(release_channel);
|
||||
|
||||
let (_temp_dir, temp_file, _temp_archive_sha256) =
|
||||
download_to_temp(&version_url, None).map_err(|err| {
|
||||
format!("Unable to download {}: {}", version_url, err)
|
||||
})?;
|
||||
|
||||
let update_release_version = load_release_version(&temp_file)?;
|
||||
let current_release_version =
|
||||
load_release_version(¤t_release_version_yml)?;
|
||||
|
||||
if update_release_version.commit == current_release_version.commit {
|
||||
// Same commit, no update required
|
||||
println!(
|
||||
"Latest {} build is already present, no download required.",
|
||||
release_channel
|
||||
);
|
||||
(None, release_dir)
|
||||
} else {
|
||||
(download_url, release_dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(download_url) = download {
|
||||
if let Some(download_url) = download_url {
|
||||
let (_temp_dir, temp_archive, _temp_archive_sha256) =
|
||||
download_to_temp_archive(&download_url, None)
|
||||
download_to_temp(&download_url, None)
|
||||
.map_err(|err| format!("Unable to download {}: {}", download_url, err))?;
|
||||
extract_release_archive(&temp_archive, &release_dir).map_err(|err| {
|
||||
format!(
|
||||
@@ -797,7 +837,7 @@ pub fn update(config_file: &str) -> Result<bool, String> {
|
||||
}
|
||||
}
|
||||
let release_dir = config.release_dir(&update_manifest.download_sha256.to_string());
|
||||
let (_temp_dir, temp_archive, _temp_archive_sha256) = download_to_temp_archive(
|
||||
let (_temp_dir, temp_archive, _temp_archive_sha256) = download_to_temp(
|
||||
&update_manifest.download_url,
|
||||
Some(&update_manifest.download_sha256),
|
||||
)
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-keygen"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
description = "Solana key generation utility"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -13,10 +13,10 @@ bs58 = "0.3.0"
|
||||
clap = "2.33"
|
||||
dirs = "2.0.2"
|
||||
num_cpus = "1.12.0"
|
||||
rpassword = "4.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.1" }
|
||||
solana-cli-config = { path = "../cli-config", version = "0.23.1" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.1" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.3" }
|
||||
solana-cli-config = { path = "../cli-config", version = "0.23.3" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "0.23.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.3" }
|
||||
tiny-bip39 = "0.7.0"
|
||||
|
||||
[[bin]]
|
||||
|
@@ -5,12 +5,20 @@ use clap::{
|
||||
SubCommand,
|
||||
};
|
||||
use num_cpus;
|
||||
use solana_clap_utils::keypair::{
|
||||
keypair_from_seed_phrase, prompt_passphrase, ASK_KEYWORD, SKIP_SEED_PHRASE_VALIDATION_ARG,
|
||||
use solana_clap_utils::{
|
||||
input_parsers::derivation_of,
|
||||
input_validators::is_derivation,
|
||||
keypair::{
|
||||
keypair_from_seed_phrase, prompt_passphrase, ASK_KEYWORD, SKIP_SEED_PHRASE_VALIDATION_ARG,
|
||||
},
|
||||
};
|
||||
use solana_cli_config::config::{Config, CONFIG_FILE};
|
||||
use solana_remote_wallet::{
|
||||
ledger::get_ledger_from_info,
|
||||
remote_wallet::{RemoteWallet, RemoteWalletInfo},
|
||||
};
|
||||
use solana_sdk::{
|
||||
pubkey::write_pubkey_file,
|
||||
pubkey::{write_pubkey_file, Pubkey},
|
||||
signature::{
|
||||
keypair_from_seed, read_keypair, read_keypair_file, write_keypair, write_keypair_file,
|
||||
Keypair, KeypairUtil, Signature,
|
||||
@@ -65,11 +73,47 @@ fn get_keypair_from_matches(
|
||||
} else if keypair == ASK_KEYWORD {
|
||||
let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name);
|
||||
keypair_from_seed_phrase("pubkey recovery", skip_validation, false)
|
||||
} else if keypair.starts_with("usb://") {
|
||||
Err(String::from("Remote wallet signing not yet implemented").into())
|
||||
} else {
|
||||
read_keypair_file(keypair)
|
||||
}
|
||||
}
|
||||
|
||||
fn get_pubkey_from_matches(
|
||||
matches: &ArgMatches,
|
||||
config: Config,
|
||||
) -> Result<Pubkey, Box<dyn error::Error>> {
|
||||
let mut path = dirs::home_dir().expect("home directory");
|
||||
let keypair = if matches.is_present("keypair") {
|
||||
matches.value_of("keypair").unwrap()
|
||||
} else if config.keypair_path != "" {
|
||||
&config.keypair_path
|
||||
} else {
|
||||
path.extend(&[".config", "solana", "id.json"]);
|
||||
path.to_str().unwrap()
|
||||
};
|
||||
|
||||
if keypair == "-" {
|
||||
let mut stdin = std::io::stdin();
|
||||
read_keypair(&mut stdin).map(|keypair| keypair.pubkey())
|
||||
} else if keypair == ASK_KEYWORD {
|
||||
let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name);
|
||||
keypair_from_seed_phrase("pubkey recovery", skip_validation, false)
|
||||
.map(|keypair| keypair.pubkey())
|
||||
} else if keypair.starts_with("usb://") {
|
||||
let (remote_wallet_info, mut derivation_path) =
|
||||
RemoteWalletInfo::parse_path(keypair.to_string())?;
|
||||
if let Some(derivation) = derivation_of(matches, "derivation_path") {
|
||||
derivation_path = derivation;
|
||||
}
|
||||
let ledger = get_ledger_from_info(remote_wallet_info)?;
|
||||
Ok(ledger.get_pubkey(&derivation_path)?)
|
||||
} else {
|
||||
read_keypair_file(keypair).map(|keypair| keypair.pubkey())
|
||||
}
|
||||
}
|
||||
|
||||
fn output_keypair(
|
||||
keypair: &Keypair,
|
||||
outfile: &str,
|
||||
@@ -326,7 +370,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
.index(1)
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("Path to keypair file"),
|
||||
.help("Path to keypair file or remote wallet"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(SKIP_SEED_PHRASE_VALIDATION_ARG.name)
|
||||
@@ -346,6 +390,14 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
.short("f")
|
||||
.long("force")
|
||||
.help("Overwrite the output file if it exists"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("derivation_path")
|
||||
.long("derivation-path")
|
||||
.value_name("ACCOUNT or ACCOUNT/CHANGE")
|
||||
.takes_value(true)
|
||||
.validator(is_derivation)
|
||||
.help("Derivation path to use: m/44'/501'/ACCOUNT'/CHANGE'; default key is device base pubkey: m/44'/501'/0'")
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
@@ -382,14 +434,14 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
|
||||
match matches.subcommand() {
|
||||
("pubkey", Some(matches)) => {
|
||||
let keypair = get_keypair_from_matches(matches, config)?;
|
||||
let pubkey = get_pubkey_from_matches(matches, config)?;
|
||||
|
||||
if matches.is_present("outfile") {
|
||||
let outfile = matches.value_of("outfile").unwrap();
|
||||
check_for_overwrite(&outfile, &matches);
|
||||
write_pubkey_file(outfile, keypair.pubkey())?;
|
||||
write_pubkey_file(outfile, pubkey)?;
|
||||
} else {
|
||||
println!("{}", keypair.pubkey());
|
||||
println!("{}", pubkey);
|
||||
}
|
||||
}
|
||||
("new", Some(matches)) => {
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-ledger-tool"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -16,12 +16,12 @@ serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.44"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.1" }
|
||||
solana-ledger = { path = "../ledger", version = "0.23.1" }
|
||||
solana-logger = { path = "../logger", version = "0.23.1" }
|
||||
solana-runtime = { path = "../runtime", version = "0.23.1" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.1" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.23.1" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.3" }
|
||||
solana-ledger = { path = "../ledger", version = "0.23.3" }
|
||||
solana-logger = { path = "../logger", version = "0.23.3" }
|
||||
solana-runtime = { path = "../runtime", version = "0.23.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.3" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.23.3" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
|
@@ -11,6 +11,7 @@ use solana_ledger::{
|
||||
blockstore_db::{self, Column, Database},
|
||||
blockstore_processor::{BankForksInfo, BlockstoreProcessorResult, ProcessOptions},
|
||||
rooted_slot_iterator::RootedSlotIterator,
|
||||
shred_version::compute_shred_version,
|
||||
snapshot_utils,
|
||||
};
|
||||
use solana_sdk::{
|
||||
@@ -526,6 +527,7 @@ fn hardforks_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<Slot>> {
|
||||
fn load_bank_forks(
|
||||
arg_matches: &ArgMatches,
|
||||
ledger_path: &PathBuf,
|
||||
genesis_config: &GenesisConfig,
|
||||
process_options: ProcessOptions,
|
||||
) -> BlockstoreProcessorResult {
|
||||
let snapshot_config = if arg_matches.is_present("no_snapshot") {
|
||||
@@ -544,7 +546,7 @@ fn load_bank_forks(
|
||||
};
|
||||
|
||||
bank_forks_utils::load(
|
||||
&open_genesis_config(&ledger_path),
|
||||
&genesis_config,
|
||||
&open_blockstore(&ledger_path),
|
||||
account_paths,
|
||||
snapshot_config.as_ref(),
|
||||
@@ -615,9 +617,14 @@ fn main() {
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("print-genesis-hash")
|
||||
SubCommand::with_name("genesis-hash")
|
||||
.about("Prints the ledger's genesis hash")
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("shred-version")
|
||||
.about("Prints the ledger's shred hash")
|
||||
.arg(&hard_forks_arg)
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("bounds")
|
||||
.about("Print lowest and highest non-empty slots. Note that there may be empty slots within the bounds")
|
||||
@@ -741,19 +748,46 @@ fn main() {
|
||||
});
|
||||
|
||||
match matches.subcommand() {
|
||||
("print", Some(args_matches)) => {
|
||||
let starting_slot = value_t_or_exit!(args_matches, "starting_slot", Slot);
|
||||
("print", Some(arg_matches)) => {
|
||||
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
|
||||
output_ledger(
|
||||
open_blockstore(&ledger_path),
|
||||
starting_slot,
|
||||
LedgerOutputMethod::Print,
|
||||
);
|
||||
}
|
||||
("print-genesis-hash", Some(_args_matches)) => {
|
||||
("genesis-hash", Some(_arg_matches)) => {
|
||||
println!("{}", open_genesis_config(&ledger_path).hash());
|
||||
}
|
||||
("print-slot", Some(args_matches)) => {
|
||||
let slots = values_t_or_exit!(args_matches, "slots", Slot);
|
||||
("shred-version", Some(arg_matches)) => {
|
||||
let process_options = ProcessOptions {
|
||||
dev_halt_at_slot: Some(0),
|
||||
new_hard_forks: hardforks_of(arg_matches, "hard_forks"),
|
||||
poh_verify: false,
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
let genesis_config = open_genesis_config(&ledger_path);
|
||||
match load_bank_forks(arg_matches, &ledger_path, &genesis_config, process_options) {
|
||||
Ok((bank_forks, bank_forks_info, _leader_schedule_cache)) => {
|
||||
let bank_info = &bank_forks_info[0];
|
||||
let bank = bank_forks[bank_info.bank_slot].clone();
|
||||
|
||||
println!(
|
||||
"{}",
|
||||
compute_shred_version(
|
||||
&genesis_config.hash(),
|
||||
Some(&bank.hard_forks().read().unwrap())
|
||||
)
|
||||
);
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("Failed to load ledger: {:?}", err);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
("print-slot", Some(arg_matches)) => {
|
||||
let slots = values_t_or_exit!(arg_matches, "slots", Slot);
|
||||
for slot in slots {
|
||||
println!("Slot {}", slot);
|
||||
output_slot(
|
||||
@@ -763,8 +797,8 @@ fn main() {
|
||||
);
|
||||
}
|
||||
}
|
||||
("json", Some(args_matches)) => {
|
||||
let starting_slot = value_t_or_exit!(args_matches, "starting_slot", Slot);
|
||||
("json", Some(arg_matches)) => {
|
||||
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
|
||||
output_ledger(
|
||||
open_blockstore(&ledger_path),
|
||||
starting_slot,
|
||||
@@ -778,8 +812,15 @@ fn main() {
|
||||
poh_verify: !arg_matches.is_present("skip_poh_verify"),
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
println!("{}", open_genesis_config(&ledger_path).hash());
|
||||
|
||||
load_bank_forks(arg_matches, &ledger_path, process_options).unwrap_or_else(|err| {
|
||||
load_bank_forks(
|
||||
arg_matches,
|
||||
&ledger_path,
|
||||
&open_genesis_config(&ledger_path),
|
||||
process_options,
|
||||
)
|
||||
.unwrap_or_else(|err| {
|
||||
eprintln!("Ledger verification failed: {:?}", err);
|
||||
exit(1);
|
||||
});
|
||||
@@ -795,7 +836,12 @@ fn main() {
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
|
||||
match load_bank_forks(arg_matches, &ledger_path, process_options) {
|
||||
match load_bank_forks(
|
||||
arg_matches,
|
||||
&ledger_path,
|
||||
&open_genesis_config(&ledger_path),
|
||||
process_options,
|
||||
) {
|
||||
Ok((bank_forks, bank_forks_info, _leader_schedule_cache)) => {
|
||||
let dot = graph_forks(
|
||||
&bank_forks,
|
||||
@@ -834,7 +880,8 @@ fn main() {
|
||||
poh_verify: false,
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
match load_bank_forks(arg_matches, &ledger_path, process_options) {
|
||||
let genesis_config = open_genesis_config(&ledger_path);
|
||||
match load_bank_forks(arg_matches, &ledger_path, &genesis_config, process_options) {
|
||||
Ok((bank_forks, _bank_forks_info, _leader_schedule_cache)) => {
|
||||
let bank = bank_forks.get(snapshot_slot).unwrap_or_else(|| {
|
||||
eprintln!("Error: Slot {} is not available", snapshot_slot);
|
||||
@@ -865,6 +912,13 @@ fn main() {
|
||||
"Successfully created snapshot for slot {}: {:?}",
|
||||
snapshot_slot, package.tar_output_file
|
||||
);
|
||||
println!(
|
||||
"Shred version: {}",
|
||||
compute_shred_version(
|
||||
&genesis_config.hash(),
|
||||
Some(&bank.hard_forks().read().unwrap())
|
||||
)
|
||||
);
|
||||
ok
|
||||
})
|
||||
})
|
||||
@@ -879,8 +933,8 @@ fn main() {
|
||||
}
|
||||
}
|
||||
}
|
||||
("prune", Some(args_matches)) => {
|
||||
if let Some(prune_file_path) = args_matches.value_of("slot_list") {
|
||||
("prune", Some(arg_matches)) => {
|
||||
if let Some(prune_file_path) = arg_matches.value_of("slot_list") {
|
||||
let blockstore = open_blockstore(&ledger_path);
|
||||
let prune_file = File::open(prune_file_path.to_string()).unwrap();
|
||||
let slot_hashes: BTreeMap<u64, String> =
|
||||
@@ -916,14 +970,14 @@ fn main() {
|
||||
blockstore.prune(*target_slot);
|
||||
}
|
||||
}
|
||||
("list-roots", Some(args_matches)) => {
|
||||
("list-roots", Some(arg_matches)) => {
|
||||
let blockstore = open_blockstore(&ledger_path);
|
||||
let max_height = if let Some(height) = args_matches.value_of("max_height") {
|
||||
let max_height = if let Some(height) = arg_matches.value_of("max_height") {
|
||||
usize::from_str(height).expect("Maximum height must be a number")
|
||||
} else {
|
||||
panic!("Maximum height must be provided");
|
||||
};
|
||||
let num_roots = if let Some(roots) = args_matches.value_of("num_roots") {
|
||||
let num_roots = if let Some(roots) = arg_matches.value_of("num_roots") {
|
||||
usize::from_str(roots).expect("Number of roots must be a number")
|
||||
} else {
|
||||
usize::from_str(DEFAULT_ROOT_COUNT).unwrap()
|
||||
@@ -948,7 +1002,7 @@ fn main() {
|
||||
.collect();
|
||||
|
||||
let mut output_file: Box<dyn Write> =
|
||||
if let Some(path) = args_matches.value_of("slot_list") {
|
||||
if let Some(path) = arg_matches.value_of("slot_list") {
|
||||
match File::create(path) {
|
||||
Ok(file) => Box::new(file),
|
||||
_ => Box::new(stdout()),
|
||||
@@ -969,10 +1023,10 @@ fn main() {
|
||||
}
|
||||
});
|
||||
}
|
||||
("bounds", Some(args_matches)) => {
|
||||
("bounds", Some(arg_matches)) => {
|
||||
match open_blockstore(&ledger_path).slot_meta_iterator(0) {
|
||||
Ok(metas) => {
|
||||
let all = args_matches.is_present("all");
|
||||
let all = arg_matches.is_present("all");
|
||||
|
||||
println!("Collecting Ledger information...");
|
||||
let slots: Vec<_> = metas.map(|(slot, _)| slot).collect();
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-ledger"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
description = "Solana ledger"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -29,19 +29,19 @@ reed-solomon-erasure = { package = "solana-reed-solomon-erasure", version = "4.0
|
||||
serde = "1.0.104"
|
||||
serde_bytes = "0.11.3"
|
||||
serde_derive = "1.0.103"
|
||||
solana-client = { path = "../client", version = "0.23.1" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "0.23.1" }
|
||||
solana-logger = { path = "../logger", version = "0.23.1" }
|
||||
solana-measure = { path = "../measure", version = "0.23.1" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "0.23.1" }
|
||||
solana-metrics = { path = "../metrics", version = "0.23.1" }
|
||||
solana-perf = { path = "../perf", version = "0.23.1" }
|
||||
solana-client = { path = "../client", version = "0.23.3" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "0.23.3" }
|
||||
solana-logger = { path = "../logger", version = "0.23.3" }
|
||||
solana-measure = { path = "../measure", version = "0.23.3" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "0.23.3" }
|
||||
solana-metrics = { path = "../metrics", version = "0.23.3" }
|
||||
solana-perf = { path = "../perf", version = "0.23.3" }
|
||||
ed25519-dalek = "1.0.0-pre.1"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.23.1" }
|
||||
solana-runtime = { path = "../runtime", version = "0.23.1" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.1" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.23.1" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.23.1" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.23.3" }
|
||||
solana-runtime = { path = "../runtime", version = "0.23.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.3" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.23.3" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.23.3" }
|
||||
sys-info = "0.5.8"
|
||||
symlink = "0.1.0"
|
||||
tar = "0.4.26"
|
||||
@@ -59,7 +59,7 @@ features = ["lz4"]
|
||||
[dev-dependencies]
|
||||
assert_matches = "1.3.0"
|
||||
matches = "0.1.6"
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.23.1" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.23.3" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@@ -22,8 +22,8 @@ use rayon::{
|
||||
};
|
||||
use rocksdb::DBRawIterator;
|
||||
use solana_client::rpc_response::{
|
||||
RpcConfirmedBlock, RpcEncodedTransaction, RpcTransactionEncoding, RpcTransactionStatus,
|
||||
RpcTransactionWithStatusMeta,
|
||||
RpcConfirmedBlock, RpcEncodedTransaction, RpcRewards, RpcTransactionEncoding,
|
||||
RpcTransactionStatus, RpcTransactionWithStatusMeta,
|
||||
};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::{datapoint_debug, datapoint_error};
|
||||
@@ -86,6 +86,7 @@ pub struct Blockstore {
|
||||
data_shred_cf: LedgerColumn<cf::ShredData>,
|
||||
code_shred_cf: LedgerColumn<cf::ShredCode>,
|
||||
transaction_status_cf: LedgerColumn<cf::TransactionStatus>,
|
||||
rewards_cf: LedgerColumn<cf::Rewards>,
|
||||
last_root: Arc<RwLock<Slot>>,
|
||||
insert_shreds_lock: Arc<Mutex<()>>,
|
||||
pub new_shreds_signals: Vec<SyncSender<bool>>,
|
||||
@@ -195,6 +196,7 @@ impl Blockstore {
|
||||
let data_shred_cf = db.column();
|
||||
let code_shred_cf = db.column();
|
||||
let transaction_status_cf = db.column();
|
||||
let rewards_cf = db.column();
|
||||
|
||||
let db = Arc::new(db);
|
||||
|
||||
@@ -219,6 +221,7 @@ impl Blockstore {
|
||||
data_shred_cf,
|
||||
code_shred_cf,
|
||||
transaction_status_cf,
|
||||
rewards_cf,
|
||||
new_shreds_signals: vec![],
|
||||
completed_slots_senders: vec![],
|
||||
insert_shreds_lock: Arc::new(Mutex::new(())),
|
||||
@@ -346,6 +349,10 @@ impl Blockstore {
|
||||
& self
|
||||
.db
|
||||
.delete_range_cf::<cf::TransactionStatus>(&mut write_batch, from_slot, to_slot)
|
||||
.unwrap_or(false)
|
||||
& self
|
||||
.db
|
||||
.delete_range_cf::<cf::Rewards>(&mut write_batch, from_slot, to_slot)
|
||||
.unwrap_or(false);
|
||||
if let Err(e) = self.db.write(write_batch) {
|
||||
error!(
|
||||
@@ -398,6 +405,10 @@ impl Blockstore {
|
||||
&& self
|
||||
.transaction_status_cf
|
||||
.compact_range(from_slot, to_slot)
|
||||
.unwrap_or(false)
|
||||
&& self
|
||||
.rewards_cf
|
||||
.compact_range(from_slot, to_slot)
|
||||
.unwrap_or(false);
|
||||
Ok(result)
|
||||
}
|
||||
@@ -1396,6 +1407,12 @@ impl Blockstore {
|
||||
let blockhash = get_last_hash(slot_entries.iter())
|
||||
.unwrap_or_else(|| panic!("Rooted slot {:?} must have blockhash", slot));
|
||||
|
||||
let rewards = self
|
||||
.rewards_cf
|
||||
.get(slot)
|
||||
.expect("Expect rewards get to succeed")
|
||||
.unwrap_or_else(|| vec![]);
|
||||
|
||||
let block = RpcConfirmedBlock {
|
||||
previous_blockhash: previous_blockhash.to_string(),
|
||||
blockhash: blockhash.to_string(),
|
||||
@@ -1405,6 +1422,7 @@ impl Blockstore {
|
||||
encoding,
|
||||
slot_transaction_iterator,
|
||||
),
|
||||
rewards,
|
||||
};
|
||||
return Ok(block);
|
||||
}
|
||||
@@ -1442,6 +1460,10 @@ impl Blockstore {
|
||||
self.transaction_status_cf.put(index, status)
|
||||
}
|
||||
|
||||
pub fn write_rewards(&self, index: Slot, rewards: RpcRewards) -> Result<()> {
|
||||
self.rewards_cf.put(index, &rewards)
|
||||
}
|
||||
|
||||
fn get_block_timestamps(&self, slot: Slot) -> Result<Vec<(Pubkey, (Slot, UnixTimestamp))>> {
|
||||
let slot_entries = self.get_slot_entries(slot, 0, None)?;
|
||||
Ok(slot_entries
|
||||
@@ -2574,6 +2596,13 @@ pub mod tests {
|
||||
.unwrap()
|
||||
.next()
|
||||
.map(|((slot, _), _)| slot >= min_slot)
|
||||
.unwrap_or(true)
|
||||
& blockstore
|
||||
.db
|
||||
.iter::<cf::Rewards>(IteratorMode::Start)
|
||||
.unwrap()
|
||||
.next()
|
||||
.map(|(slot, _)| slot >= min_slot)
|
||||
.unwrap_or(true);
|
||||
assert!(condition_met);
|
||||
}
|
||||
@@ -4826,6 +4855,7 @@ pub mod tests {
|
||||
parent_slot: slot - 1,
|
||||
blockhash: blockhash.to_string(),
|
||||
previous_blockhash: Hash::default().to_string(),
|
||||
rewards: vec![],
|
||||
};
|
||||
// The previous_blockhash of `expected_block` is default because its parent slot is a
|
||||
// root, but empty of entries. This is special handling for snapshot root slots.
|
||||
@@ -4846,6 +4876,7 @@ pub mod tests {
|
||||
parent_slot: slot,
|
||||
blockhash: blockhash.to_string(),
|
||||
previous_blockhash: blockhash.to_string(),
|
||||
rewards: vec![],
|
||||
};
|
||||
assert_eq!(confirmed_block, expected_block);
|
||||
|
||||
|
@@ -10,7 +10,7 @@ use rocksdb::{
|
||||
};
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::Serialize;
|
||||
use solana_client::rpc_response::RpcTransactionStatus;
|
||||
use solana_client::rpc_response::{RpcRewards, RpcTransactionStatus};
|
||||
use solana_sdk::{clock::Slot, signature::Signature};
|
||||
use std::{collections::HashMap, fs, marker::PhantomData, path::Path, sync::Arc};
|
||||
use thiserror::Error;
|
||||
@@ -38,6 +38,8 @@ const DATA_SHRED_CF: &str = "data_shred";
|
||||
const CODE_SHRED_CF: &str = "code_shred";
|
||||
/// Column family for Transaction Status
|
||||
const TRANSACTION_STATUS_CF: &str = "transaction_status";
|
||||
/// Column family for Rewards
|
||||
const REWARDS_CF: &str = "rewards";
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum BlockstoreError {
|
||||
@@ -105,6 +107,10 @@ pub mod columns {
|
||||
#[derive(Debug)]
|
||||
/// The transaction status column
|
||||
pub struct TransactionStatus;
|
||||
|
||||
#[derive(Debug)]
|
||||
/// The rewards column
|
||||
pub struct Rewards;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -113,8 +119,8 @@ struct Rocks(rocksdb::DB);
|
||||
impl Rocks {
|
||||
fn open(path: &Path) -> Result<Rocks> {
|
||||
use columns::{
|
||||
DeadSlots, DuplicateSlots, ErasureMeta, Index, Orphans, Root, ShredCode, ShredData,
|
||||
SlotMeta, TransactionStatus,
|
||||
DeadSlots, DuplicateSlots, ErasureMeta, Index, Orphans, Rewards, Root, ShredCode,
|
||||
ShredData, SlotMeta, TransactionStatus,
|
||||
};
|
||||
|
||||
fs::create_dir_all(&path)?;
|
||||
@@ -139,6 +145,7 @@ impl Rocks {
|
||||
ColumnFamilyDescriptor::new(ShredCode::NAME, get_cf_options());
|
||||
let transaction_status_cf_descriptor =
|
||||
ColumnFamilyDescriptor::new(TransactionStatus::NAME, get_cf_options());
|
||||
let rewards_cf_descriptor = ColumnFamilyDescriptor::new(Rewards::NAME, get_cf_options());
|
||||
|
||||
let cfs = vec![
|
||||
meta_cf_descriptor,
|
||||
@@ -151,6 +158,7 @@ impl Rocks {
|
||||
shred_data_cf_descriptor,
|
||||
shred_code_cf_descriptor,
|
||||
transaction_status_cf_descriptor,
|
||||
rewards_cf_descriptor,
|
||||
];
|
||||
|
||||
// Open the database
|
||||
@@ -161,8 +169,8 @@ impl Rocks {
|
||||
|
||||
fn columns(&self) -> Vec<&'static str> {
|
||||
use columns::{
|
||||
DeadSlots, DuplicateSlots, ErasureMeta, Index, Orphans, Root, ShredCode, ShredData,
|
||||
SlotMeta, TransactionStatus,
|
||||
DeadSlots, DuplicateSlots, ErasureMeta, Index, Orphans, Rewards, Root, ShredCode,
|
||||
ShredData, SlotMeta, TransactionStatus,
|
||||
};
|
||||
|
||||
vec![
|
||||
@@ -176,6 +184,7 @@ impl Rocks {
|
||||
ShredData::NAME,
|
||||
ShredCode::NAME,
|
||||
TransactionStatus::NAME,
|
||||
Rewards::NAME,
|
||||
]
|
||||
}
|
||||
|
||||
@@ -316,6 +325,14 @@ impl ColumnName for columns::TransactionStatus {
|
||||
const NAME: &'static str = TRANSACTION_STATUS_CF;
|
||||
}
|
||||
|
||||
impl SlotColumn for columns::Rewards {}
|
||||
impl ColumnName for columns::Rewards {
|
||||
const NAME: &'static str = REWARDS_CF;
|
||||
}
|
||||
impl TypedColumn for columns::Rewards {
|
||||
type Type = RpcRewards;
|
||||
}
|
||||
|
||||
impl Column for columns::ShredCode {
|
||||
type Index = (u64, u64);
|
||||
|
||||
|
@@ -1,5 +1,6 @@
|
||||
pub use solana_runtime::genesis_utils::{
|
||||
create_genesis_config_with_leader, GenesisConfigInfo, BOOTSTRAP_VALIDATOR_LAMPORTS,
|
||||
create_genesis_config_with_leader, create_genesis_config_with_leader_ex, GenesisConfigInfo,
|
||||
BOOTSTRAP_VALIDATOR_LAMPORTS,
|
||||
};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
|
||||
|
@@ -15,6 +15,7 @@ pub mod leader_schedule_utils;
|
||||
pub mod poh;
|
||||
pub mod rooted_slot_iterator;
|
||||
pub mod shred;
|
||||
pub mod shred_version;
|
||||
pub mod sigverify_shreds;
|
||||
pub mod snapshot_package;
|
||||
pub mod snapshot_utils;
|
||||
|
44
ledger/src/shred_version.rs
Normal file
44
ledger/src/shred_version.rs
Normal file
@@ -0,0 +1,44 @@
|
||||
use crate::shred::Shred;
|
||||
use solana_runtime::hard_forks::HardForks;
|
||||
use solana_sdk::hash::{extend_and_hash, Hash};
|
||||
|
||||
pub fn compute_shred_version(genesis_hash: &Hash, hard_forks: Option<&HardForks>) -> u16 {
|
||||
use byteorder::{ByteOrder, LittleEndian};
|
||||
|
||||
let mut hash = *genesis_hash;
|
||||
if let Some(hard_forks) = hard_forks {
|
||||
for (slot, count) in hard_forks.iter() {
|
||||
let mut buf = [0u8; 16];
|
||||
LittleEndian::write_u64(&mut buf[..8], *slot);
|
||||
LittleEndian::write_u64(&mut buf[8..], *count as u64);
|
||||
hash = extend_and_hash(&hash, &buf);
|
||||
}
|
||||
}
|
||||
|
||||
Shred::version_from_hash(&hash)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_compute_shred_version() {
|
||||
assert_eq!(compute_shred_version(&Hash::default(), None), 1);
|
||||
let mut hard_forks = HardForks::default();
|
||||
assert_eq!(
|
||||
compute_shred_version(&Hash::default(), Some(&hard_forks)),
|
||||
1
|
||||
);
|
||||
hard_forks.register(1);
|
||||
assert_eq!(
|
||||
compute_shred_version(&Hash::default(), Some(&hard_forks)),
|
||||
55551
|
||||
);
|
||||
hard_forks.register(1);
|
||||
assert_eq!(
|
||||
compute_shred_version(&Hash::default(), Some(&hard_forks)),
|
||||
46353
|
||||
);
|
||||
}
|
||||
}
|
@@ -137,7 +137,7 @@ fn slot_key_data_for_gpu<
|
||||
let keyvec_size = keys_to_slots.len() * size_of::<T>();
|
||||
keyvec.resize(keyvec_size, 0);
|
||||
|
||||
for (i, (k, slots)) in keys_to_slots.iter_mut().enumerate() {
|
||||
for (i, (k, slots)) in keys_to_slots.iter().enumerate() {
|
||||
let start = i * size_of::<T>();
|
||||
let end = start + size_of::<T>();
|
||||
keyvec[start..end].copy_from_slice(k.as_ref());
|
||||
|
@@ -26,6 +26,8 @@ pub const TAR_SNAPSHOTS_DIR: &str = "snapshots";
|
||||
pub const TAR_ACCOUNTS_DIR: &str = "accounts";
|
||||
pub const TAR_VERSION_FILE: &str = "version";
|
||||
|
||||
pub const SNAPSHOT_VERSION: &str = "0.23.2"; // format!("{}\n", env!("CARGO_PKG_VERSION"));
|
||||
|
||||
#[derive(PartialEq, Ord, Eq, Debug)]
|
||||
pub struct SlotSnapshotPaths {
|
||||
pub slot: Slot,
|
||||
@@ -168,10 +170,8 @@ pub fn archive_snapshot_package(snapshot_package: &SnapshotPackage) -> Result<()
|
||||
|
||||
// Write version file
|
||||
{
|
||||
let snapshot_version = format!("{}\n", env!("CARGO_PKG_VERSION"));
|
||||
let mut f = std::fs::File::create(staging_version_file)?;
|
||||
//f.write_all(&snapshot_version.to_string().into_bytes())?;
|
||||
f.write_all(&snapshot_version.into_bytes())?;
|
||||
f.write_all(&SNAPSHOT_VERSION.to_string().into_bytes())?;
|
||||
}
|
||||
|
||||
// Tar the staging directory into the archive at `archive_path`
|
||||
@@ -459,7 +459,7 @@ pub fn bank_from_archive<P: AsRef<Path>>(
|
||||
)?;
|
||||
|
||||
if !bank.verify_snapshot_bank() {
|
||||
panic!("Snapshot bank failed to verify");
|
||||
panic!("Snapshot bank for slot {} failed to verify", bank.slot());
|
||||
}
|
||||
measure.stop();
|
||||
info!("{}", measure);
|
||||
@@ -524,7 +524,7 @@ where
|
||||
MAX_SNAPSHOT_DATA_FILE_SIZE,
|
||||
|stream| {
|
||||
let mut bank: Bank = match snapshot_version {
|
||||
env!("CARGO_PKG_VERSION") => deserialize_from_snapshot(stream.by_ref())?,
|
||||
SNAPSHOT_VERSION => deserialize_from_snapshot(stream.by_ref())?,
|
||||
_ => {
|
||||
return Err(get_io_error(&format!(
|
||||
"unsupported snapshot version: {}",
|
||||
@@ -532,7 +532,7 @@ where
|
||||
)));
|
||||
}
|
||||
};
|
||||
// Rebuild accounts
|
||||
info!("Rebuilding accounts...");
|
||||
bank.set_bank_rc(
|
||||
bank::BankRc::new(account_paths.to_vec(), 0, bank.slot()),
|
||||
bank::StatusCacheRc::default(),
|
||||
@@ -548,16 +548,16 @@ where
|
||||
&status_cache_path,
|
||||
MAX_SNAPSHOT_DATA_FILE_SIZE,
|
||||
|stream| {
|
||||
// Rebuild status cache
|
||||
info!("Rebuilding status cache...");
|
||||
let slot_deltas: Vec<SlotDelta<transaction::Result<()>>> =
|
||||
deserialize_from_snapshot(stream)?;
|
||||
|
||||
Ok(slot_deltas)
|
||||
},
|
||||
)?;
|
||||
|
||||
bank.src.append(&slot_deltas);
|
||||
|
||||
info!("Loaded bank for slot: {}", bank.slot());
|
||||
Ok(bank)
|
||||
}
|
||||
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-local-cluster"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -12,24 +12,24 @@ homepage = "https://solana.com/"
|
||||
itertools = "0.8.1"
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
solana-archiver-lib = { path = "../archiver-lib", version = "0.23.1" }
|
||||
solana-config-program = { path = "../programs/config", version = "0.23.1" }
|
||||
solana-core = { path = "../core", version = "0.23.1" }
|
||||
solana-client = { path = "../client", version = "0.23.1" }
|
||||
solana-faucet = { path = "../faucet", version = "0.23.1" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "0.23.1" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "0.23.1" }
|
||||
solana-ledger = { path = "../ledger", version = "0.23.1" }
|
||||
solana-logger = { path = "../logger", version = "0.23.1" }
|
||||
solana-runtime = { path = "../runtime", version = "0.23.1" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.1" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.23.1" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.23.1" }
|
||||
solana-vest-program = { path = "../programs/vest", version = "0.23.1" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.23.1" }
|
||||
solana-archiver-lib = { path = "../archiver-lib", version = "0.23.3" }
|
||||
solana-config-program = { path = "../programs/config", version = "0.23.3" }
|
||||
solana-core = { path = "../core", version = "0.23.3" }
|
||||
solana-client = { path = "../client", version = "0.23.3" }
|
||||
solana-faucet = { path = "../faucet", version = "0.23.3" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "0.23.3" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "0.23.3" }
|
||||
solana-ledger = { path = "../ledger", version = "0.23.3" }
|
||||
solana-logger = { path = "../logger", version = "0.23.3" }
|
||||
solana-runtime = { path = "../runtime", version = "0.23.3" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.3" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.23.3" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.23.3" }
|
||||
solana-vest-program = { path = "../programs/vest", version = "0.23.3" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.23.3" }
|
||||
symlink = "0.1.0"
|
||||
tempfile = "3.1.0"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.23.1" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.23.3" }
|
||||
|
||||
[dev-dependencies]
|
||||
assert_matches = "1.3.0"
|
||||
|
@@ -211,7 +211,6 @@ impl LocalCluster {
|
||||
leader_node.info.rpc.port(),
|
||||
leader_node.info.rpc_pubsub.port(),
|
||||
));
|
||||
leader_config.transaction_status_service_disabled = true;
|
||||
let leader_server = Validator::new(
|
||||
leader_node,
|
||||
&leader_keypair,
|
||||
@@ -359,7 +358,6 @@ impl LocalCluster {
|
||||
validator_node.info.rpc.port(),
|
||||
validator_node.info.rpc_pubsub.port(),
|
||||
));
|
||||
config.transaction_status_service_disabled = true;
|
||||
let voting_keypair = Arc::new(voting_keypair);
|
||||
let validator_server = Validator::new(
|
||||
validator_node,
|
||||
@@ -668,9 +666,6 @@ impl Cluster for LocalCluster {
|
||||
cluster_validator_info.info.contact_info = node.info.clone();
|
||||
cluster_validator_info.config.rpc_ports =
|
||||
Some((node.info.rpc.port(), node.info.rpc_pubsub.port()));
|
||||
cluster_validator_info
|
||||
.config
|
||||
.transaction_status_service_disabled = true;
|
||||
|
||||
let entry_point_info = {
|
||||
if *pubkey == self.entry_point_info.id {
|
||||
|
@@ -6,6 +6,7 @@ use solana_core::{
|
||||
cluster_info::{ClusterInfo, Node, VALIDATOR_PORT_RANGE},
|
||||
contact_info::ContactInfo,
|
||||
gossip_service::discover_cluster,
|
||||
serve_repair::ServeRepair,
|
||||
storage_stage::SLOTS_PER_TURN_TEST,
|
||||
validator::ValidatorConfig,
|
||||
};
|
||||
@@ -61,10 +62,11 @@ fn run_archiver_startup_basic(num_nodes: usize, num_archivers: usize) {
|
||||
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
|
||||
cluster_nodes[0].clone(),
|
||||
)));
|
||||
let serve_repair = ServeRepair::new(cluster_info);
|
||||
let path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&path).unwrap());
|
||||
Archiver::download_from_archiver(
|
||||
&cluster_info,
|
||||
&serve_repair,
|
||||
&archiver_info,
|
||||
&blockstore,
|
||||
slots_per_segment,
|
||||
|
@@ -943,100 +943,6 @@ fn test_no_voting() {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_repairman_catchup() {
|
||||
solana_logger::setup();
|
||||
error!("test_repairman_catchup");
|
||||
run_repairman_catchup(3);
|
||||
}
|
||||
|
||||
fn run_repairman_catchup(num_repairmen: u64) {
|
||||
let mut validator_config = ValidatorConfig::default();
|
||||
let num_ticks_per_second = 100;
|
||||
let num_ticks_per_slot = 40;
|
||||
let num_slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH as u64;
|
||||
let num_root_buffer_slots = 10;
|
||||
// Calculate the leader schedule num_root_buffer_slots ahead. Otherwise, if stakers_slot_offset ==
|
||||
// num_slots_per_epoch, and num_slots_per_epoch == MINIMUM_SLOTS_PER_EPOCH, then repairmen
|
||||
// will stop sending repairs after the last slot in epoch 1 (0-indexed), because the root
|
||||
// is at most in the first epoch.
|
||||
//
|
||||
// For example:
|
||||
// Assume:
|
||||
// 1) num_slots_per_epoch = 32
|
||||
// 2) stakers_slot_offset = 32
|
||||
// 3) MINIMUM_SLOTS_PER_EPOCH = 32
|
||||
//
|
||||
// Then the last slot in epoch 1 is slot 63. After completing slots 0 to 63, the root on the
|
||||
// repairee is at most 31. Because, the stakers_slot_offset == 32, then the max confirmed epoch
|
||||
// on the repairee is epoch 1.
|
||||
// Thus the repairmen won't send any slots past epoch 1, slot 63 to this repairee until the repairee
|
||||
// updates their root, and the repairee can't update their root until they get slot 64, so no progress
|
||||
// is made. This is also not accounting for the fact that the repairee may not vote on every slot, so
|
||||
// their root could actually be much less than 31. This is why we give a num_root_buffer_slots buffer.
|
||||
let stakers_slot_offset = num_slots_per_epoch + num_root_buffer_slots;
|
||||
|
||||
validator_config.rpc_config.enable_validator_exit = true;
|
||||
|
||||
let lamports_per_repairman = 1000;
|
||||
|
||||
// Make the repairee_stake small relative to the repairmen stake so that the repairee doesn't
|
||||
// get included in the leader schedule, causing slots to get skipped while it's still trying
|
||||
// to catch up
|
||||
let repairee_stake = 3;
|
||||
let cluster_lamports = 2 * lamports_per_repairman * num_repairmen + repairee_stake;
|
||||
let node_stakes: Vec<_> = (0..num_repairmen).map(|_| lamports_per_repairman).collect();
|
||||
let mut cluster = LocalCluster::new(&ClusterConfig {
|
||||
node_stakes,
|
||||
cluster_lamports,
|
||||
validator_configs: vec![validator_config.clone(); num_repairmen as usize],
|
||||
ticks_per_slot: num_ticks_per_slot,
|
||||
slots_per_epoch: num_slots_per_epoch,
|
||||
stakers_slot_offset,
|
||||
poh_config: PohConfig::new_sleep(Duration::from_millis(1000 / num_ticks_per_second)),
|
||||
..ClusterConfig::default()
|
||||
});
|
||||
|
||||
let repairman_pubkeys: HashSet<_> = cluster.get_node_pubkeys().into_iter().collect();
|
||||
let epoch_schedule = EpochSchedule::custom(num_slots_per_epoch, stakers_slot_offset, true);
|
||||
let num_warmup_epochs = epoch_schedule.get_leader_schedule_epoch(0) + 1;
|
||||
|
||||
// Sleep for longer than the first N warmup epochs, with a one epoch buffer for timing issues
|
||||
cluster_tests::sleep_n_epochs(
|
||||
num_warmup_epochs as f64 + 1.0,
|
||||
&cluster.genesis_config.poh_config,
|
||||
num_ticks_per_slot,
|
||||
num_slots_per_epoch,
|
||||
);
|
||||
|
||||
// Start up a new node, wait for catchup. Backwards repair won't be sufficient because the
|
||||
// leader is sending shreds past this validator's first two confirmed epochs. Thus, the repairman
|
||||
// protocol will have to kick in for this validator to repair.
|
||||
cluster.add_validator(&validator_config, repairee_stake, Arc::new(Keypair::new()));
|
||||
|
||||
let all_pubkeys = cluster.get_node_pubkeys();
|
||||
let repairee_id = all_pubkeys
|
||||
.into_iter()
|
||||
.find(|x| !repairman_pubkeys.contains(x))
|
||||
.unwrap();
|
||||
|
||||
// Wait for repairman protocol to catch this validator up
|
||||
let repairee_client = cluster.get_validator_client(&repairee_id).unwrap();
|
||||
let mut current_slot = 0;
|
||||
|
||||
// Make sure this validator can get repaired past the first few warmup epochs
|
||||
let target_slot = (num_warmup_epochs) * num_slots_per_epoch + 1;
|
||||
while current_slot <= target_slot {
|
||||
trace!("current_slot: {}", current_slot);
|
||||
if let Ok(slot) = repairee_client.get_slot_with_commitment(CommitmentConfig::recent()) {
|
||||
current_slot = slot;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
sleep(Duration::from_secs(1));
|
||||
}
|
||||
}
|
||||
|
||||
fn wait_for_next_snapshot<P: AsRef<Path>>(cluster: &LocalCluster, tar: P) {
|
||||
// Get slot after which this was generated
|
||||
let client = cluster
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-log-analyzer"
|
||||
description = "The solana cluster network analysis tool"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -17,8 +17,8 @@ semver = "0.9.0"
|
||||
serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.44"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.1" }
|
||||
solana-logger = { path = "../logger", version = "0.23.1" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.3" }
|
||||
solana-logger = { path = "../logger", version = "0.23.3" }
|
||||
|
||||
[[bin]]
|
||||
name = "solana-log-analyzer"
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-logger"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
description = "Solana Logger"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-measure"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "../README.md"
|
||||
@@ -12,8 +12,8 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
solana-sdk = { path = "../sdk", version = "0.23.1" }
|
||||
solana-metrics = { path = "../metrics", version = "0.23.1" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.3" }
|
||||
solana-metrics = { path = "../metrics", version = "0.23.3" }
|
||||
|
||||
[target."cfg(unix)".dependencies]
|
||||
jemallocator = "0.3.2"
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-merkle-tree"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
description = "Solana Merkle Tree"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -9,7 +9,7 @@ homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../sdk", version = "0.23.1" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.3" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4.0"
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-metrics"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
description = "Solana Metrics"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -13,7 +13,7 @@ env_logger = "0.7.1"
|
||||
lazy_static = "1.4.0"
|
||||
log = "0.4.8"
|
||||
reqwest = { version = "0.10.1", default-features = false, features = ["blocking", "rustls-tls"] }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.1" }
|
||||
solana-sdk = { path = "../sdk", version = "0.23.3" }
|
||||
sys-info = "0.5.8"
|
||||
|
||||
[dev-dependencies]
|
||||
|
@@ -6903,7 +6903,7 @@
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [
|
||||
{
|
||||
"alias": "cluster_info-repair_highest.ix",
|
||||
"alias": "serve_repair-repair_highest.ix",
|
||||
"yaxis": 2
|
||||
}
|
||||
],
|
||||
@@ -6928,7 +6928,7 @@
|
||||
],
|
||||
"orderByTime": "ASC",
|
||||
"policy": "default",
|
||||
"query": "SELECT last(\"repair-highest-slot\") AS \"slot\" FROM \"$testnet\".\"autogen\".\"cluster_info-repair_highest\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
|
||||
"query": "SELECT last(\"repair-highest-slot\") AS \"slot\" FROM \"$testnet\".\"autogen\".\"serve_repair-repair_highest\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
|
||||
"rawQuery": true,
|
||||
"refId": "C",
|
||||
"resultFormat": "time_series",
|
||||
@@ -6965,7 +6965,7 @@
|
||||
],
|
||||
"orderByTime": "ASC",
|
||||
"policy": "default",
|
||||
"query": "SELECT last(\"repair-highest-ix\") AS \"ix\" FROM \"$testnet\".\"autogen\".\"cluster_info-repair_highest\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
|
||||
"query": "SELECT last(\"repair-highest-ix\") AS \"ix\" FROM \"$testnet\".\"autogen\".\"serve_repair-repair_highest\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
|
||||
"rawQuery": true,
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
@@ -7064,7 +7064,7 @@
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [
|
||||
{
|
||||
"alias": "cluster_info-repair.repair-ix",
|
||||
"alias": "serve_repair-repair.repair-ix",
|
||||
"yaxis": 2
|
||||
}
|
||||
],
|
||||
@@ -7089,7 +7089,7 @@
|
||||
],
|
||||
"orderByTime": "ASC",
|
||||
"policy": "default",
|
||||
"query": "SELECT last(\"repair-ix\") AS \"repair-ix\" FROM \"$testnet\".\"autogen\".\"cluster_info-repair\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
|
||||
"query": "SELECT last(\"repair-ix\") AS \"repair-ix\" FROM \"$testnet\".\"autogen\".\"serve_repair-repair\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
|
||||
"rawQuery": true,
|
||||
"refId": "C",
|
||||
"resultFormat": "time_series",
|
||||
@@ -7126,7 +7126,7 @@
|
||||
],
|
||||
"orderByTime": "ASC",
|
||||
"policy": "default",
|
||||
"query": "SELECT last(\"repair-slot\") AS \"repair-slot\" FROM \"$testnet\".\"autogen\".\"cluster_info-repair\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
|
||||
"query": "SELECT last(\"repair-slot\") AS \"repair-slot\" FROM \"$testnet\".\"autogen\".\"serve_repair-repair\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
|
||||
"rawQuery": true,
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
@@ -7245,7 +7245,7 @@
|
||||
],
|
||||
"orderByTime": "ASC",
|
||||
"policy": "default",
|
||||
"query": "SELECT last(\"repair-orphan\") AS \"slot\" FROM \"$testnet\".\"autogen\".\"cluster_info-repair_orphan\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
|
||||
"query": "SELECT last(\"repair-orphan\") AS \"slot\" FROM \"$testnet\".\"autogen\".\"serve_repair-repair_orphan\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
|
||||
"rawQuery": true,
|
||||
"refId": "C",
|
||||
"resultFormat": "time_series",
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-net-shaper"
|
||||
description = "The solana cluster network shaping tool"
|
||||
version = "0.23.1"
|
||||
version = "0.23.3"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -16,8 +16,8 @@ semver = "0.9.0"
|
||||
serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.44"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.1" }
|
||||
solana-logger = { path = "../logger", version = "0.23.1" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.23.3" }
|
||||
solana-logger = { path = "../logger", version = "0.23.3" }
|
||||
rand = "0.6.5"
|
||||
|
||||
[[bin]]
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user