Compare commits

...

94 Commits

Author SHA1 Message Date
carllin
2cdd3f835f log leader (#10280)
Co-authored-by: Carl <carl@solana.com>
2020-05-27 18:07:31 -07:00
Michael Vines
c4e04f70d0 Adjust mainnet-beta shred version 2020-05-27 17:10:53 -07:00
Michael Vines
5d971472b2 Purge next slots to avoid a blockstore_processor panic on restart (#10277) 2020-05-27 17:10:27 -07:00
Michael Vines
f1201502d4 Bump version to 1.1.15 2020-05-26 21:22:34 -07:00
Tyera Eulberg
fd5222ad21 V1.1 single gossip commitment (#10263)
automerge
2020-05-26 21:16:46 -07:00
mergify[bot]
768a5f2b40 Cluster info metrics (#10215) (#10235)
automerge
2020-05-26 17:28:14 -07:00
mergify[bot]
87b57b53f9 Wait for one slot to be produced (#10257) (#10258)
automerge
2020-05-26 17:27:36 -07:00
Michael Vines
55a64c8945 Activate eager rent collection and BPF loader on mainnet-beta epoch 34 (#10231) 2020-05-26 10:28:29 -07:00
Ryo Onodera
e8c6233c6e Adjust owner hashing activation slot (#10243)
automerge
2020-05-26 01:21:22 -07:00
Michael Vines
f51b214449 Adjust include_owner_in_hash to match mainet-beta v1.0 activation (#10230)
automerge
2020-05-25 12:31:15 -07:00
mergify[bot]
8fe8a5717e Clean up RPCClient retry handling: only retry on 429, after a little sleep (#10182) (#10184)
automerge
2020-05-25 11:41:46 -07:00
Michael Vines
9adf8b4fc8 Reduce UNLOCK_NONCE_SLOT to ensure it is active on all three clusters (#10223)
automerge
2020-05-25 01:08:30 -07:00
mergify[bot]
82772f95a1 LedgerCleanupService no longer causes an OOM and actually purges (bp #10199) (#10221)
automerge
2020-05-24 23:24:45 -07:00
mergify[bot]
0b5d3df251 Optimize banking processing of AccountInUse (#10154) (#10193)
automerge
2020-05-24 11:46:10 -07:00
Ryo Onodera
e63fdba252 Test ledger-tool commands in run-sanity.sh (#10211)
automerge
2020-05-24 06:07:24 -07:00
mergify[bot]
5e65b7cbd9 Retry a couple times before declaring a UDP port unreachable (#10181) (#10191)
automerge
2020-05-22 15:57:18 -07:00
Michael Vines
68d0fe2dbc Update another non-circulating account 2020-05-22 15:11:19 -07:00
mergify[bot]
3aad5f563e Add another non-circulating account (#10186) (#10190)
automerge
2020-05-22 14:59:21 -07:00
mergify[bot]
ccfe09e460 Fixup deserialize_bs58_transaction, and make a few error types more targeted (#10171) (#10177)
automerge
2020-05-21 19:09:24 -07:00
mergify[bot]
6fd57fafc8 REST API now returns supply in SOL rather than lamports (#10170) (#10174)
automerge

(cherry picked from commit 18be7a7966)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-05-21 16:54:12 -07:00
mergify[bot]
c7d857583f Revert "Add AVX2 runtime checks (#10033)" (#10167) (#10169)
This reverts commit cf8eb7700b.

(cherry picked from commit 486168b796)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-05-21 13:19:47 -07:00
mergify[bot]
e29b7876ad Add v0 REST APIs for circulating and total supply (bp #10102) (#10160)
automerge
2020-05-20 21:51:25 -07:00
mergify[bot]
de479ebda9 transaction-history now searches over the entire history by default (#10145) (#10153)
automerge
2020-05-20 15:32:21 -07:00
mergify[bot]
d3447f2f41 Fixup subscription docs (#10146) (#10148)
automerge
2020-05-20 12:23:07 -07:00
mergify[bot]
d9e14b4a82 Fix another unstable test after eager rent (#10120) (#10143)
automerge
2020-05-20 10:22:11 -07:00
mergify[bot]
94b97e4b56 Ignore test_tvu_exit (#10134) (#10138)
automerge
2020-05-20 00:57:34 -07:00
mergify[bot]
abd977b819 Fix erasure (bp #10095) (#10127)
automerge
2020-05-19 22:21:35 -07:00
mergify[bot]
36eafa56a3 Rename getCirculatingSuppy to getSupply in JSON API doc (#10121) (#10123)
automerge
2020-05-19 15:47:39 -07:00
mergify[bot]
06a63549c1 Add SimulateTransaction RPC endpoint (#10106) (#10116)
automerge
2020-05-19 14:25:06 -07:00
carllin
a4047bb9c8 Fix deserialize reference tick (#10111)
Co-authored-by: Carl <carl@solana.com>
2020-05-19 13:55:37 -07:00
Michael Vines
a235423000 Cargo.lock 2020-05-19 08:14:29 -07:00
Michael Vines
726eadc64b Bump version to 1.1.14 2020-05-18 15:15:26 -07:00
mergify[bot]
4d18144232 Update accounts whitelist (#10100) (#10104)
automerge
2020-05-18 14:42:02 -07:00
mergify[bot]
342cf90ce1 Trigger RPC notifications after block commitment cache update (#10077) (#10101)
automerge
2020-05-18 13:34:18 -07:00
mergify[bot]
3ec109a0e4 Bump solana-rbpf to v0.1.28 (#9976) (#9983)
automerge
2020-05-18 00:10:05 -07:00
Michael Vines
2634402fef Bump version to 1.1.13 2020-05-17 16:35:36 -07:00
carllin
997f317c23 v1.1: Add nonce to shreds repairs, add shred data size to header (#10076)
* Add nonce to shreds/repairs

* Add data shred size to header

* Align nonce unlock with epoch 47

Co-authored-by: Carl <carl@solana.com>
2020-05-17 13:36:15 -07:00
mergify[bot]
7bc915c0d1 Abort if the open fd limit cannot be increased (bp #10064) (#10074)
automerge
2020-05-15 14:35:29 -07:00
mergify[bot]
8651f058eb Add docs section to upgrade Solana App on Ledger Live (#10070) (#10072)
automerge
2020-05-15 11:30:32 -07:00
mergify[bot]
b6d6ff786a Forge a confirmed root before halting for RPC inspection (#10061) (#10067)
automerge
2020-05-15 10:30:02 -07:00
mergify[bot]
b9a80152df Fix unstable test after eager rent collection (#10031) (#10060)
automerge
2020-05-15 01:25:48 -07:00
Ryo Onodera
e9dda5ebd7 v1.1: Eager rent collection (#10028)
* Introduce eager rent collection (#9527)

* Switch AccountsIndex.account_maps from HashMap to BTreeMap

* Introduce eager rent collection

* Start to add tests

* Avoid too short eager rent collection cycles

* Add more tests

* Add more tests...

* Refacotr!!!!!!

* Refactoring follow up

* More tiny cleanups

* Don't rewrite 0-lamport accounts to be deterministic

* Refactor a bit

* Do hard fork, restore tests, and perf. mitigation

* Fix build...

* Refactor and add switch over for testnet (TdS)

* Use to_be_bytes

* cleanup

* More tiny cleanup

* Rebase cleanup

* Set Bank::genesis_hash when resuming from snapshot

* Reorder fns and clean ups

* Better naming and commenting

* Yet more naming clarifications

* Make prefix width strictly uniform for 2-base partition_count

* Fix typo...

* Revert cluster-dependent gate

* kick ci?

* kick ci?

* kick ci?

(cherry picked from commit 1eb40c3fe0)

# Conflicts:
#	core/tests/bank_forks.rs
#	ledger/src/bank_forks_utils.rs
#	ledger/src/snapshot_utils.rs
#	runtime/src/bank.rs

* Fix merge conflicts

* Add gating

* Add Danger comment...

* Delay activation epoch

* Add gating for stable as well

* fmt...

* fmt!!!!
2020-05-15 15:38:31 +09:00
mergify[bot]
5f0be1793c Add Ledger error codes (#10056) (#10059)
automerge
2020-05-14 23:13:18 -07:00
mergify[bot]
2d8533075d Base58 (#10052) (#10055)
automerge
2020-05-14 18:06:27 -07:00
mergify[bot]
bf382c6069 Remove inline from all BPF C functions (bp #10038) (#10039)
automerge
2020-05-14 14:47:04 -07:00
mergify[bot]
366e426f2b Clean up Ledger instructions (#10047) (#10049)
automerge
2020-05-14 13:08:34 -07:00
mergify[bot]
fa34e6e419 solana-gossip spy can now specify a shred version (#10040) (#10042)
automerge
2020-05-13 21:17:12 -07:00
mergify[bot]
ab9fe5e9ad Add AVX2 runtime checks (#10033) (#10035)
automerge
2020-05-13 13:43:06 -07:00
Ryo Onodera
3474419111 Revert "[NO-MERGE; needs gating logic] Introduce eager rent collection (bp #9527) (#10022)" (#10026)
This reverts commit ff21251416.
2020-05-13 22:51:59 +09:00
mergify[bot]
ff21251416 [NO-MERGE; needs gating logic] Introduce eager rent collection (bp #9527) (#10022)
automerge
2020-05-13 06:12:45 -07:00
mergify[bot]
7e6bbc7b77 Introduce type alias Ancestors (#9699) (#10018)
automerge
2020-05-13 01:46:38 -07:00
mergify[bot]
82783b18ea Rpc: optionally filter getLargestAccounts by circulating/nonCirculating (#10007) (#10014)
automerge
2020-05-12 21:54:44 -07:00
mergify[bot]
b7c6f38665 Enable disk metrics (#10009) (#10010)
automerge
2020-05-12 16:45:26 -07:00
mergify[bot]
11da07eca7 Update testnet shred version (#10000) (#10002)
automerge
2020-05-11 23:35:14 -07:00
Michael Vines
b6b779d2c4 Use CommitmentConfig::root() when checking accounts, CommitmentConfig::max() may not be available yet 2020-05-11 22:55:04 -07:00
mergify[bot]
1c85d62fe4 Fix crash when CI_COMMIT=HEAD (#9994) (#9998)
automerge

(cherry picked from commit 28d1f7c5e7)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-05-11 22:53:48 -07:00
Michael Vines
867a213cd3 Bump version to v1.1.12 2020-05-11 22:10:03 -07:00
Michael Vines
c51a18a887 getClusterNodes RPC API now includes the node software version (#9993) 2020-05-11 21:38:19 -07:00
mergify[bot]
206ff02be9 Fix up a couple cli commands that fail when a node is in the --wait-for-supermajority state (#9985) (#9991)
automerge

(cherry picked from commit 3b9dc50541)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-05-11 19:48:59 -07:00
Michael Vines
8d7e90e9b8 Advertise node version in gossip (#9986)
automerge
2020-05-11 17:45:19 -07:00
mergify[bot]
eb11db3e3e Check slot cleaned up for RPC blockstore/slot queries (#9982) (#9989)
automerge
2020-05-11 16:49:22 -07:00
mergify[bot]
8d8ad84527 Add retransmit packets_by_slot metrics (#9975) (#9984)
automerge
2020-05-11 15:25:40 -07:00
Dan Albert
fa059bb3c3 Add windows instructions to CLI install docs (#9987)
automerge
2020-05-11 14:50:26 -07:00
mergify[bot]
9652e832c2 Write non-error output to stdout (#9960) (#9972)
automerge
2020-05-11 10:18:15 -07:00
mergify[bot]
52e27712e1 Retransmit and shred fetch metrics (#9965) (#9969)
automerge
2020-05-10 23:15:15 -07:00
mergify[bot]
c00ec26a3b Cli: Add solana supply command; hide total-supply (bp #9956) (#9963)
automerge
2020-05-10 18:04:46 -07:00
mergify[bot]
50eba96b58 More logging around failure (#9967) (#9968)
automerge
2020-05-10 17:23:30 -07:00
mergify[bot]
e7c0629951 Remove RpcClient code duplication (#9952) (#9961)
automerge
2020-05-10 10:36:56 -07:00
mergify[bot]
a08235da9a send_and_confirm_transaction() no longer needs a keypair (#9950) (#9962)
automerge
2020-05-10 10:14:31 -07:00
mergify[bot]
b213004157 Rpc: Add getCirculatingSupply endpoint, redux (#9953) (#9955)
automerge
2020-05-09 12:32:08 -07:00
Jack May
92562b4349 Pull in hardened BPF virtual machine (#9931) 2020-05-08 16:06:22 -07:00
Jack May
01c490d354 Rename BPF helper to syscall (#9819)
automerge
2020-05-08 16:06:22 -07:00
Ryo Onodera
cfdc0eb99e Maintain sysvar balances for consistent market cap. (#9942)
automerge
2020-05-08 12:15:37 -07:00
mergify[bot]
0b7b3c9f20 Support ad-hoc genesis args in run.sh (#9697) (#9940)
automerge
2020-05-08 08:29:29 -07:00
Michael Vines
5cd685ed3a Bump version to v1.1.11 2020-05-07 16:57:43 -07:00
Ryo Onodera
9498f11d46 v1.1: Include account.owner into account hash (#9918)
automerge
2020-05-07 13:00:52 -07:00
mergify[bot]
558324b861 Refactor RPC subscriptions account handling (#9888) (#9912)
automerge
2020-05-07 01:14:58 -07:00
Tyera Eulberg
9a5fc3513a Add using OutputFormat enum to --sign-only transactions (#9650) (#9911)
automerge
2020-05-06 23:19:36 -07:00
carllin
b7c6e139e6 Revert (#9908)
automerge
2020-05-06 22:28:19 -07:00
mergify[bot]
a9d2fa6aad Cli: Update OutputFormat method to return a String to restore consistency (#9904) (#9905)
automerge
2020-05-06 20:51:45 -07:00
Michael Vines
056a9952c3 Cargo.lock 2020-05-06 16:35:26 -07:00
Michael Vines
fc21c857a3 Bump version to v1.1.10 2020-05-06 16:04:41 -07:00
mergify[bot]
614ad64ec1 Fix (#9896) (#9901)
automerge
2020-05-06 13:31:02 -07:00
Michael Vines
f72c186004 Correct method name 2020-05-06 11:28:16 -07:00
mergify[bot]
59a2b05b44 Gossip no longer pushes/pulls from nodes with a different shred version (#9868) (#9894)
automerge
2020-05-05 23:06:36 -07:00
mergify[bot]
bed6e566ef Display transaction fee in SOL (#9892) (#9898)
automerge

(cherry picked from commit e078ba1dde)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-05-05 22:44:36 -07:00
mergify[bot]
e85f9fcb73 Repair alternate versions of dead slots (bp #9805) (#9886)
automerge
2020-05-05 19:22:16 -07:00
mergify[bot]
8cb3953c9a Cli: add cluster-date subcommand, and make block-time slot optional (#9878) (#9883)
automerge
2020-05-05 10:19:10 -07:00
Michael Vines
d8e885f425 Enable inflation on testnet effective epoch 44 (#9879) 2020-05-05 08:29:24 -07:00
mergify[bot]
28fa5149b7 Rpc: Filter blockstore data by cluster-confirmed root (#9873) (#9881)
automerge
2020-05-04 22:34:37 -07:00
mergify[bot]
190acd7d15 Rpc: add getLargestAccounts endpoint (#9869) (#9877)
automerge
2020-05-04 18:54:21 -07:00
mergify[bot]
909316bd53 Avoid panic caused by converting non-positive / non-normal floating points values to duration (#9867) (#9872)
(cherry picked from commit 3aedb81d48)

Co-authored-by: Kristofer Peterson <svenski123@users.noreply.github.com>
2020-05-04 14:27:40 -07:00
mergify[bot]
000b763e95 Add clap.rs default for --commitment (#9859) (#9861)
automerge
2020-05-02 14:47:30 -07:00
Michael Vines
9d00ff6624 Bump to 1.1.9 2020-05-02 11:41:51 -07:00
206 changed files with 9082 additions and 3961 deletions

693
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -58,6 +58,7 @@ members = [
"transaction-status",
"upload-perf",
"net-utils",
"version",
"vote-signer",
"cli",
"rayon-threadlimit",

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-accounts-bench"
version = "1.1.8"
version = "1.1.15"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -10,10 +10,10 @@ homepage = "https://solana.com/"
[dependencies]
log = "0.4.6"
rayon = "1.3.0"
solana-logger = { path = "../logger", version = "1.1.8" }
solana-runtime = { path = "../runtime", version = "1.1.8" }
solana-measure = { path = "../measure", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.8" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-runtime = { path = "../runtime", version = "1.1.15" }
solana-measure = { path = "../measure", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
rand = "0.7.0"
clap = "2.33.0"
crossbeam-channel = "0.4"

View File

@@ -1,9 +1,11 @@
use clap::{value_t, App, Arg};
use rayon::prelude::*;
use solana_measure::measure::Measure;
use solana_runtime::accounts::{create_test_accounts, update_accounts, Accounts};
use solana_runtime::{
accounts::{create_test_accounts, update_accounts, Accounts},
accounts_index::Ancestors,
};
use solana_sdk::pubkey::Pubkey;
use std::collections::HashMap;
use std::fs;
use std::path::PathBuf;
@@ -76,7 +78,7 @@ fn main() {
num_slots,
create_time
);
let mut ancestors: HashMap<u64, usize> = vec![(0, 0)].into_iter().collect();
let mut ancestors: Ancestors = vec![(0, 0)].into_iter().collect();
for i in 1..num_slots {
ancestors.insert(i as u64, i - 1);
accounts.add_root(i as u64);

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-archiver-lib"
version = "1.1.8"
version = "1.1.15"
description = "Solana Archiver Library"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -15,23 +15,23 @@ ed25519-dalek = "=1.0.0-pre.3"
log = "0.4.8"
rand = "0.7.0"
rand_chacha = "0.2.2"
solana-client = { path = "../client", version = "1.1.8" }
solana-storage-program = { path = "../programs/storage", version = "1.1.8" }
solana-client = { path = "../client", version = "1.1.15" }
solana-storage-program = { path = "../programs/storage", version = "1.1.15" }
thiserror = "1.0"
serde = "1.0.105"
serde_json = "1.0.48"
serde_derive = "1.0.103"
solana-net-utils = { path = "../net-utils", version = "1.1.8" }
solana-chacha = { path = "../chacha", version = "1.1.8" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.8" }
solana-ledger = { path = "../ledger", version = "1.1.8" }
solana-logger = { path = "../logger", version = "1.1.8" }
solana-perf = { path = "../perf", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.8" }
solana-core = { path = "../core", version = "1.1.8" }
solana-streamer = { path = "../streamer", version = "1.1.8" }
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.8" }
solana-metrics = { path = "../metrics", version = "1.1.8" }
solana-net-utils = { path = "../net-utils", version = "1.1.15" }
solana-chacha = { path = "../chacha", version = "1.1.15" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.15" }
solana-ledger = { path = "../ledger", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-perf = { path = "../perf", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-core = { path = "../core", version = "1.1.15" }
solana-streamer = { path = "../streamer", version = "1.1.15" }
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.15" }
solana-metrics = { path = "../metrics", version = "1.1.15" }
[dev-dependencies]
hex = "0.4.2"

View File

@@ -13,8 +13,7 @@ use solana_core::{
cluster_slots::ClusterSlots,
contact_info::ContactInfo,
gossip_service::GossipService,
repair_service,
repair_service::{RepairService, RepairSlotRange, RepairStats, RepairStrategy},
repair_service::{self, RepairService, RepairSlotRange, RepairStats, RepairStrategy},
serve_repair::ServeRepair,
shred_fetch_stage::ShredFetchStage,
sigverify_stage::{DisabledSigVerifier, SigVerifyStage},
@@ -697,16 +696,10 @@ impl Archiver {
RpcClient::new_socket(rpc_peers[node_index].rpc)
};
Ok(rpc_client
.send(
&RpcRequest::GetSlotsPerSegment,
.send::<u64>(
RpcRequest::GetSlotsPerSegment,
serde_json::json!([client_commitment]),
0,
)
.map_err(|err| {
warn!("Error while making rpc request {:?}", err);
ArchiverError::ClientError(err)
})?
.as_u64()
.unwrap())
} else {
Err(ArchiverError::NoRpcPeers)
@@ -749,21 +742,10 @@ impl Archiver {
let node_index = thread_rng().gen_range(0, rpc_peers.len());
RpcClient::new_socket(rpc_peers[node_index].rpc)
};
let response = rpc_client
.send(
&RpcRequest::GetStorageTurn,
serde_json::value::Value::Null,
0,
)
.map_err(|err| {
warn!("Error while making rpc request {:?}", err);
ArchiverError::ClientError(err)
})?;
let RpcStorageTurn {
blockhash: storage_blockhash,
slot: turn_slot,
} = serde_json::from_value::<RpcStorageTurn>(response)
.map_err(ArchiverError::JsonError)?;
} = rpc_client.send(RpcRequest::GetStorageTurn, serde_json::value::Value::Null)?;
let turn_blockhash = storage_blockhash.parse().map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
@@ -842,7 +824,7 @@ impl Archiver {
.into_iter()
.filter_map(|repair_request| {
serve_repair
.map_repair_request(&repair_request, &mut repair_stats)
.map_repair_request(&repair_request, &mut repair_stats, Some(0))
.map(|result| ((archiver_info.gossip, result), repair_request))
.ok()
})

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-archiver-utils"
version = "1.1.8"
version = "1.1.15"
description = "Solana Archiver Utils"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,12 +11,12 @@ edition = "2018"
[dependencies]
log = "0.4.8"
rand = "0.7.0"
solana-chacha = { path = "../chacha", version = "1.1.8" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.8" }
solana-ledger = { path = "../ledger", version = "1.1.8" }
solana-logger = { path = "../logger", version = "1.1.8" }
solana-perf = { path = "../perf", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.8" }
solana-chacha = { path = "../chacha", version = "1.1.15" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.15" }
solana-ledger = { path = "../ledger", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-perf = { path = "../perf", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
[dev-dependencies]
hex = "0.4.2"

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-archiver"
version = "1.1.8"
version = "1.1.15"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -10,13 +10,13 @@ homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
console = "0.10.0"
solana-clap-utils = { path = "../clap-utils", version = "1.1.8" }
solana-core = { path = "../core", version = "1.1.8" }
solana-logger = { path = "../logger", version = "1.1.8" }
solana-metrics = { path = "../metrics", version = "1.1.8" }
solana-archiver-lib = { path = "../archiver-lib", version = "1.1.8" }
solana-net-utils = { path = "../net-utils", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.8" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.15" }
solana-core = { path = "../core", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-metrics = { path = "../metrics", version = "1.1.15" }
solana-archiver-lib = { path = "../archiver-lib", version = "1.1.15" }
solana-net-utils = { path = "../net-utils", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
[package.metadata.docs.rs]

View File

@@ -2,24 +2,26 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-banking-bench"
version = "1.1.8"
version = "1.1.15"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
log = "0.4.6"
rayon = "1.3.0"
solana-core = { path = "../core", version = "1.1.8" }
solana-streamer = { path = "../streamer", version = "1.1.8" }
solana-perf = { path = "../perf", version = "1.1.8" }
solana-ledger = { path = "../ledger", version = "1.1.8" }
solana-logger = { path = "../logger", version = "1.1.8" }
solana-runtime = { path = "../runtime", version = "1.1.8" }
solana-measure = { path = "../measure", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.8" }
rand = "0.7.0"
clap = "2.33.1"
crossbeam-channel = "0.4"
log = "0.4.6"
rand = "0.7.0"
rayon = "1.3.0"
solana-core = { path = "../core", version = "1.1.15" }
solana-streamer = { path = "../streamer", version = "1.1.15" }
solana-perf = { path = "../perf", version = "1.1.15" }
solana-ledger = { path = "../ledger", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-runtime = { path = "../runtime", version = "1.1.15" }
solana-measure = { path = "../measure", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-version = { path = "../version", version = "1.1.15" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,3 +1,4 @@
use clap::{crate_description, crate_name, value_t, App, Arg};
use crossbeam_channel::unbounded;
use log::*;
use rand::{thread_rng, Rng};
@@ -64,15 +65,22 @@ fn check_txs(
no_bank
}
fn make_accounts_txs(txes: usize, mint_keypair: &Keypair, hash: Hash) -> Vec<Transaction> {
fn make_accounts_txs(
total_num_transactions: usize,
hash: Hash,
same_payer: bool,
) -> Vec<Transaction> {
let to_pubkey = Pubkey::new_rand();
let dummy = system_transaction::transfer(mint_keypair, &to_pubkey, 1, hash);
(0..txes)
let payer_key = Keypair::new();
let dummy = system_transaction::transfer(&payer_key, &to_pubkey, 1, hash);
(0..total_num_transactions)
.into_par_iter()
.map(|_| {
let mut new = dummy.clone();
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
new.message.account_keys[0] = Pubkey::new_rand();
if !same_payer {
new.message.account_keys[0] = Pubkey::new_rand();
}
new.message.account_keys[1] = Pubkey::new_rand();
new.signatures = vec![Signature::new(&sig[0..64])];
new
@@ -96,13 +104,61 @@ fn bytes_as_usize(bytes: &[u8]) -> usize {
bytes[0] as usize | (bytes[1] as usize) << 8
}
#[allow(clippy::cognitive_complexity)]
fn main() {
solana_logger::setup();
let num_threads = BankingStage::num_threads() as usize;
let matches = App::new(crate_name!())
.about(crate_description!())
.version(solana_version::version!())
.arg(
Arg::with_name("num_chunks")
.long("num-chunks")
.takes_value(true)
.value_name("SIZE")
.help("Number of transaction chunks."),
)
.arg(
Arg::with_name("packets_per_chunk")
.long("packets-per-chunk")
.takes_value(true)
.value_name("SIZE")
.help("Packets per chunk"),
)
.arg(
Arg::with_name("skip_sanity")
.long("skip-sanity")
.takes_value(false)
.help("Skip transaction sanity execution"),
)
.arg(
Arg::with_name("same_payer")
.long("same-payer")
.takes_value(false)
.help("Use the same payer for transfers"),
)
.arg(
Arg::with_name("iterations")
.long("iterations")
.takes_value(true)
.help("Number of iterations"),
)
.arg(
Arg::with_name("num_threads")
.long("num-threads")
.takes_value(true)
.help("Number of iterations"),
)
.get_matches();
let num_threads =
value_t!(matches, "num_threads", usize).unwrap_or(BankingStage::num_threads() as usize);
// a multiple of packet chunk duplicates to avoid races
const CHUNKS: usize = 8 * 2;
const PACKETS_PER_BATCH: usize = 192;
let txes = PACKETS_PER_BATCH * num_threads * CHUNKS;
let num_chunks = value_t!(matches, "num_chunks", usize).unwrap_or(16);
let packets_per_chunk = value_t!(matches, "packets_per_chunk", usize).unwrap_or(192);
let iterations = value_t!(matches, "iterations", usize).unwrap_or(1000);
let total_num_transactions = num_chunks * num_threads * packets_per_chunk;
let mint_total = 1_000_000_000_000;
let GenesisConfigInfo {
genesis_config,
@@ -116,34 +172,44 @@ fn main() {
let mut bank_forks = BankForks::new(0, bank0);
let mut bank = bank_forks.working_bank();
info!("threads: {} txs: {}", num_threads, txes);
info!("threads: {} txs: {}", num_threads, total_num_transactions);
let mut transactions = make_accounts_txs(txes, &mint_keypair, genesis_config.hash());
let same_payer = matches.is_present("same_payer");
let mut transactions =
make_accounts_txs(total_num_transactions, genesis_config.hash(), same_payer);
// fund all the accounts
transactions.iter().for_each(|tx| {
let fund = system_transaction::transfer(
let mut fund = system_transaction::transfer(
&mint_keypair,
&tx.message.account_keys[0],
mint_total / txes as u64,
mint_total / total_num_transactions as u64,
genesis_config.hash(),
);
// Ignore any pesky duplicate signature errors in the case we are using single-payer
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
fund.signatures = vec![Signature::new(&sig[0..64])];
let x = bank.process_transaction(&fund);
x.unwrap();
});
//sanity check, make sure all the transactions can execute sequentially
transactions.iter().for_each(|tx| {
let res = bank.process_transaction(&tx);
assert!(res.is_ok(), "sanity test transactions");
});
bank.clear_signatures();
//sanity check, make sure all the transactions can execute in parallel
let res = bank.process_transactions(&transactions);
for r in res {
assert!(r.is_ok(), "sanity parallel execution");
let skip_sanity = matches.is_present("skip_sanity");
if !skip_sanity {
//sanity check, make sure all the transactions can execute sequentially
transactions.iter().for_each(|tx| {
let res = bank.process_transaction(&tx);
assert!(res.is_ok(), "sanity test transactions error: {:?}", res);
});
bank.clear_signatures();
//sanity check, make sure all the transactions can execute in parallel
let res = bank.process_transactions(&transactions);
for r in res {
assert!(r.is_ok(), "sanity parallel execution error: {:?}", r);
}
bank.clear_signatures();
}
bank.clear_signatures();
let mut verified: Vec<_> = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH);
let mut verified: Vec<_> = to_packets_chunked(&transactions.clone(), packets_per_chunk);
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(
@@ -162,7 +228,7 @@ fn main() {
);
poh_recorder.lock().unwrap().set_bank(&bank);
let chunk_len = verified.len() / CHUNKS;
let chunk_len = verified.len() / num_chunks;
let mut start = 0;
// This is so that the signal_receiver does not go out of scope after the closure.
@@ -171,17 +237,17 @@ fn main() {
let signal_receiver = Arc::new(signal_receiver);
let mut total_us = 0;
let mut tx_total_us = 0;
let base_tx_count = bank.transaction_count();
let mut txs_processed = 0;
let mut root = 1;
let collector = Pubkey::new_rand();
const ITERS: usize = 1_000;
let config = Config {
packets_per_batch: PACKETS_PER_BATCH,
packets_per_batch: packets_per_chunk,
chunk_len,
num_threads,
};
let mut total_sent = 0;
for _ in 0..ITERS {
for _ in 0..iterations {
let now = Instant::now();
let mut sent = 0;
@@ -222,7 +288,11 @@ fn main() {
sleep(Duration::from_millis(5));
}
}
if check_txs(&signal_receiver, txes / CHUNKS, &poh_recorder) {
if check_txs(
&signal_receiver,
total_num_transactions / num_chunks,
&poh_recorder,
) {
debug!(
"resetting bank {} tx count: {} txs_proc: {}",
bank.slot(),
@@ -274,7 +344,7 @@ fn main() {
debug!(
"time: {} us checked: {} sent: {}",
duration_as_us(&now.elapsed()),
txes / CHUNKS,
total_num_transactions / num_chunks,
sent,
);
total_sent += sent;
@@ -285,20 +355,26 @@ fn main() {
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
tx.signatures[0] = Signature::new(&sig[0..64]);
}
verified = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH);
verified = to_packets_chunked(&transactions.clone(), packets_per_chunk);
}
start += chunk_len;
start %= verified.len();
}
let txs_processed = bank_forks.working_bank().transaction_count();
debug!("processed: {} base: {}", txs_processed, base_tx_count);
eprintln!(
"{{'name': 'banking_bench_total', 'median': '{}'}}",
"{{'name': 'banking_bench_total', 'median': '{:.2}'}}",
(1000.0 * 1000.0 * total_sent as f64) / (total_us as f64),
);
eprintln!(
"{{'name': 'banking_bench_tx_total', 'median': '{}'}}",
"{{'name': 'banking_bench_tx_total', 'median': '{:.2}'}}",
(1000.0 * 1000.0 * total_sent as f64) / (tx_total_us as f64),
);
eprintln!(
"{{'name': 'banking_bench_success_tx_total', 'median': '{:.2}'}}",
(1000.0 * 1000.0 * (txs_processed - base_tx_count) as f64) / (total_us as f64),
);
drop(verified_sender);
drop(vote_sender);

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-exchange"
version = "1.1.8"
version = "1.1.15"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -18,20 +18,20 @@ rand = "0.7.0"
rayon = "1.3.0"
serde_json = "1.0.48"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "1.1.8" }
solana-core = { path = "../core", version = "1.1.8" }
solana-genesis = { path = "../genesis", version = "1.1.8" }
solana-client = { path = "../client", version = "1.1.8" }
solana-faucet = { path = "../faucet", version = "1.1.8" }
solana-exchange-program = { path = "../programs/exchange", version = "1.1.8" }
solana-logger = { path = "../logger", version = "1.1.8" }
solana-metrics = { path = "../metrics", version = "1.1.8" }
solana-net-utils = { path = "../net-utils", version = "1.1.8" }
solana-runtime = { path = "../runtime", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.8" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.15" }
solana-core = { path = "../core", version = "1.1.15" }
solana-genesis = { path = "../genesis", version = "1.1.15" }
solana-client = { path = "../client", version = "1.1.15" }
solana-faucet = { path = "../faucet", version = "1.1.15" }
solana-exchange-program = { path = "../programs/exchange", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-metrics = { path = "../metrics", version = "1.1.15" }
solana-net-utils = { path = "../net-utils", version = "1.1.15" }
solana-runtime = { path = "../runtime", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
[dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "1.1.8" }
solana-local-cluster = { path = "../local-cluster", version = "1.1.15" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,17 +2,17 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-streamer"
version = "1.1.8"
version = "1.1.15"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
solana-clap-utils = { path = "../clap-utils", version = "1.1.8" }
solana-streamer = { path = "../streamer", version = "1.1.8" }
solana-logger = { path = "../logger", version = "1.1.8" }
solana-net-utils = { path = "../net-utils", version = "1.1.8" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.15" }
solana-streamer = { path = "../streamer", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-net-utils = { path = "../net-utils", version = "1.1.15" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-tps"
version = "1.1.8"
version = "1.1.15"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -14,24 +14,24 @@ log = "0.4.8"
rayon = "1.3.0"
serde_json = "1.0.48"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "1.1.8" }
solana-core = { path = "../core", version = "1.1.8" }
solana-genesis = { path = "../genesis", version = "1.1.8" }
solana-client = { path = "../client", version = "1.1.8" }
solana-faucet = { path = "../faucet", version = "1.1.8" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.15" }
solana-core = { path = "../core", version = "1.1.15" }
solana-genesis = { path = "../genesis", version = "1.1.15" }
solana-client = { path = "../client", version = "1.1.15" }
solana-faucet = { path = "../faucet", version = "1.1.15" }
#solana-librapay = { path = "../programs/librapay", version = "1.1.8", optional = true }
solana-logger = { path = "../logger", version = "1.1.8" }
solana-metrics = { path = "../metrics", version = "1.1.8" }
solana-measure = { path = "../measure", version = "1.1.8" }
solana-net-utils = { path = "../net-utils", version = "1.1.8" }
solana-runtime = { path = "../runtime", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.8" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-metrics = { path = "../metrics", version = "1.1.15" }
solana-measure = { path = "../measure", version = "1.1.15" }
solana-net-utils = { path = "../net-utils", version = "1.1.15" }
solana-runtime = { path = "../runtime", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
#solana-move-loader-program = { path = "../programs/move_loader", version = "1.1.8", optional = true }
[dev-dependencies]
serial_test = "0.4.0"
serial_test_derive = "0.4.0"
solana-local-cluster = { path = "../local-cluster", version = "1.1.8" }
solana-local-cluster = { path = "../local-cluster", version = "1.1.15" }
#[features]
#move = ["solana-librapay", "solana-move-loader-program"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-chacha-cuda"
version = "1.1.8"
version = "1.1.15"
description = "Solana Chacha Cuda APIs"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -10,12 +10,12 @@ edition = "2018"
[dependencies]
log = "0.4.8"
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.8" }
solana-chacha = { path = "../chacha", version = "1.1.8" }
solana-ledger = { path = "../ledger", version = "1.1.8" }
solana-logger = { path = "../logger", version = "1.1.8" }
solana-perf = { path = "../perf", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.8" }
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.15" }
solana-chacha = { path = "../chacha", version = "1.1.15" }
solana-ledger = { path = "../ledger", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-perf = { path = "../perf", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
[dev-dependencies]
hex-literal = "0.2.1"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-chacha-sys"
version = "1.1.8"
version = "1.1.15"
description = "Solana chacha-sys"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-chacha"
version = "1.1.8"
version = "1.1.15"
description = "Solana Chacha APIs"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,11 +12,11 @@ edition = "2018"
log = "0.4.8"
rand = "0.7.0"
rand_chacha = "0.2.2"
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.8" }
solana-ledger = { path = "../ledger", version = "1.1.8" }
solana-logger = { path = "../logger", version = "1.1.8" }
solana-perf = { path = "../perf", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.8" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.15" }
solana-ledger = { path = "../ledger", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-perf = { path = "../perf", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
[dev-dependencies]
hex-literal = "0.2.1"

View File

@@ -2,8 +2,10 @@
set -e
cd "$(dirname "$0")/.."
# shellcheck source=multinode-demo/common.sh
source multinode-demo/common.sh
rm -f config/run/init-completed
rm -rf config/run/init-completed config/ledger config/snapshot-ledger
timeout 15 ./run.sh &
pid=$!
@@ -17,6 +19,13 @@ while [[ ! -f config/run/init-completed ]]; do
fi
done
while [[ $($solana_cli slot --commitment recent) -eq 0 ]]; do
sleep 1
done
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"validatorExit"}' http://localhost:8899
wait $pid
$solana_ledger_tool create-snapshot --ledger config/ledger 1 config/snapshot-ledger
cp config/ledger/genesis.tar.bz2 config/snapshot-ledger
$solana_ledger_tool verify --ledger config/snapshot-ledger

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-clap-utils"
version = "1.1.8"
version = "1.1.15"
description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,8 +11,8 @@ edition = "2018"
[dependencies]
clap = "2.33.0"
rpassword = "4.0"
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.8" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
thiserror = "1.0.11"
tiny-bip39 = "0.7.0"
url = "2.1.0"

View File

@@ -8,10 +8,15 @@ pub const COMMITMENT_ARG: ArgConstant<'static> = ArgConstant {
};
pub fn commitment_arg<'a, 'b>() -> Arg<'a, 'b> {
commitment_arg_with_default("recent")
}
pub fn commitment_arg_with_default<'a, 'b>(default_value: &'static str) -> Arg<'a, 'b> {
Arg::with_name(COMMITMENT_ARG.name)
.long(COMMITMENT_ARG.long)
.takes_value(true)
.possible_values(&["default", "max", "recent", "root"])
.possible_values(&["recent", "root", "max"])
.default_value(default_value)
.value_name("COMMITMENT_LEVEL")
.help(COMMITMENT_ARG.help)
}

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-cli-config"
description = "Blockchain, Rebuilt for Scale"
version = "1.1.8"
version = "1.1.15"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-cli"
description = "Blockchain, Rebuilt for Scale"
version = "1.1.8"
version = "1.1.15"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -27,28 +27,28 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
serde = "1.0.105"
serde_derive = "1.0.103"
serde_json = "1.0.48"
solana-budget-program = { path = "../programs/budget", version = "1.1.8" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.8" }
solana-cli-config = { path = "../cli-config", version = "1.1.8" }
solana-client = { path = "../client", version = "1.1.8" }
solana-config-program = { path = "../programs/config", version = "1.1.8" }
solana-faucet = { path = "../faucet", version = "1.1.8" }
solana-logger = { path = "../logger", version = "1.1.8" }
solana-net-utils = { path = "../net-utils", version = "1.1.8" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.8" }
solana-runtime = { path = "../runtime", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.8" }
solana-stake-program = { path = "../programs/stake", version = "1.1.8" }
solana-storage-program = { path = "../programs/storage", version = "1.1.8" }
solana-transaction-status = { path = "../transaction-status", version = "1.1.8" }
solana-vote-program = { path = "../programs/vote", version = "1.1.8" }
solana-vote-signer = { path = "../vote-signer", version = "1.1.8" }
solana-budget-program = { path = "../programs/budget", version = "1.1.15" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.15" }
solana-cli-config = { path = "../cli-config", version = "1.1.15" }
solana-client = { path = "../client", version = "1.1.15" }
solana-config-program = { path = "../programs/config", version = "1.1.15" }
solana-faucet = { path = "../faucet", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-net-utils = { path = "../net-utils", version = "1.1.15" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.15" }
solana-runtime = { path = "../runtime", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-stake-program = { path = "../programs/stake", version = "1.1.15" }
solana-storage-program = { path = "../programs/storage", version = "1.1.15" }
solana-transaction-status = { path = "../transaction-status", version = "1.1.15" }
solana-vote-program = { path = "../programs/vote", version = "1.1.15" }
solana-vote-signer = { path = "../vote-signer", version = "1.1.15" }
thiserror = "1.0.13"
url = "2.1.1"
[dev-dependencies]
solana-core = { path = "../core", version = "1.1.8" }
solana-budget-program = { path = "../programs/budget", version = "1.1.8" }
solana-core = { path = "../core", version = "1.1.15" }
solana-budget-program = { path = "../programs/budget", version = "1.1.15" }
tempfile = "3.1.0"
[[bin]]

View File

@@ -1,7 +1,7 @@
use crate::{
cli_output::{CliAccount, OutputFormat},
cli_output::{CliAccount, CliSignOnlyData, CliSignature, OutputFormat},
cluster_query::*,
display::{println_name_value, println_signers},
display::println_name_value,
nonce::{self, *},
offline::{blockhash_query::BlockhashQuery, *},
stake::*,
@@ -16,12 +16,17 @@ use num_traits::FromPrimitive;
use serde_json::{self, json, Value};
use solana_budget_program::budget_instruction::{self, BudgetError};
use solana_clap_utils::{
input_parsers::*, input_validators::*, keypair::signer_from_path, offline::SIGN_ONLY_ARG,
commitment::{commitment_arg_with_default, COMMITMENT_ARG},
input_parsers::*,
input_validators::*,
keypair::signer_from_path,
offline::SIGN_ONLY_ARG,
ArgConstant,
};
use solana_client::{
client_error::{ClientErrorKind, Result as ClientResult},
rpc_client::RpcClient,
rpc_config::RpcLargestAccountsFilter,
rpc_response::{RpcAccount, RpcKeyedAccount},
};
#[cfg(not(test))]
@@ -55,6 +60,7 @@ use solana_transaction_status::{EncodedTransaction, TransactionEncoding};
use solana_vote_program::vote_state::VoteAuthorize;
use std::{
error,
fmt::Write as FmtWrite,
fs::File,
io::{Read, Write},
net::{IpAddr, SocketAddr},
@@ -179,6 +185,7 @@ pub enum CliCommand {
commitment_config: CommitmentConfig,
follow: bool,
},
ClusterDate,
ClusterVersion,
CreateAddressWithSeed {
from_pubkey: Option<Pubkey>,
@@ -187,7 +194,7 @@ pub enum CliCommand {
},
Fees,
GetBlockTime {
slot: Slot,
slot: Option<Slot>,
},
GetEpochInfo {
commitment_config: CommitmentConfig,
@@ -199,6 +206,14 @@ pub enum CliCommand {
GetSlot {
commitment_config: CommitmentConfig,
},
LargestAccounts {
commitment_config: CommitmentConfig,
filter: Option<RpcLargestAccountsFilter>,
},
Supply {
commitment_config: CommitmentConfig,
print_accounts: bool,
},
TotalSupply {
commitment_config: CommitmentConfig,
},
@@ -229,8 +244,8 @@ pub enum CliCommand {
},
TransactionHistory {
address: Pubkey,
end_slot: Option<Slot>, // None == latest slot
slot_limit: u64,
end_slot: Option<Slot>, // None == latest slot
slot_limit: Option<u64>, // None == search full history
},
// Nonce commands
AuthorizeNonceAccount {
@@ -403,6 +418,7 @@ pub enum CliCommand {
Balance {
pubkey: Option<Pubkey>,
use_lamports_unit: bool,
commitment_config: CommitmentConfig,
},
Cancel(Pubkey),
Confirm(Signature),
@@ -567,6 +583,7 @@ impl Default for CliConfig<'_> {
command: CliCommand::Balance {
pubkey: Some(Pubkey::default()),
use_lamports_unit: false,
commitment_config: CommitmentConfig::default(),
},
json_rpc_url: Self::default_json_rpc_url(),
websocket_url: Self::default_websocket_url(),
@@ -587,6 +604,10 @@ pub fn parse_command(
let response = match matches.subcommand() {
// Cluster Query Commands
("catchup", Some(matches)) => parse_catchup(matches, wallet_manager),
("cluster-date", Some(_matches)) => Ok(CliCommandInfo {
command: CliCommand::ClusterDate,
signers: vec![],
}),
("cluster-version", Some(_matches)) => Ok(CliCommandInfo {
command: CliCommand::ClusterVersion,
signers: vec![],
@@ -606,6 +627,8 @@ pub fn parse_command(
}),
("epoch", Some(matches)) => parse_get_epoch(matches),
("slot", Some(matches)) => parse_get_slot(matches),
("largest-accounts", Some(matches)) => parse_largest_accounts(matches),
("supply", Some(matches)) => parse_supply(matches),
("total-supply", Some(matches)) => parse_total_supply(matches),
("transaction-count", Some(matches)) => parse_get_transaction_count(matches),
("leader-schedule", Some(_matches)) => Ok(CliCommandInfo {
@@ -776,6 +799,7 @@ pub fn parse_command(
}
("balance", Some(matches)) => {
let pubkey = pubkey_of_signer(matches, "pubkey", wallet_manager)?;
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
let signers = if pubkey.is_some() {
vec![]
} else {
@@ -790,6 +814,7 @@ pub fn parse_command(
command: CliCommand::Balance {
pubkey,
use_lamports_unit: matches.is_present("lamports"),
commitment_config,
},
signers,
})
@@ -813,7 +838,7 @@ pub fn parse_command(
},
("decode-transaction", Some(matches)) => {
let encoded_transaction = EncodedTransaction::Binary(
matches.value_of("base85_transaction").unwrap().to_string(),
matches.value_of("base58_transaction").unwrap().to_string(),
);
if let Some(transaction) = encoded_transaction.decode() {
Ok(CliCommandInfo {
@@ -1044,7 +1069,7 @@ pub fn get_blockhash_and_fee_calculator(
})
}
pub fn return_signers(tx: &Transaction) -> ProcessResult {
pub fn return_signers(tx: &Transaction, config: &CliConfig) -> ProcessResult {
let verify_results = tx.verify_with_results();
let mut signers = Vec::new();
let mut absent = Vec::new();
@@ -1063,15 +1088,14 @@ pub fn return_signers(tx: &Transaction) -> ProcessResult {
}
});
println_signers(&tx.message.recent_blockhash, &signers, &absent, &bad_sig);
let cli_command = CliSignOnlyData {
blockhash: tx.message.recent_blockhash.to_string(),
signers,
absent,
bad_sig,
};
Ok(json!({
"blockhash": tx.message.recent_blockhash.to_string(),
"signers": &signers,
"absent": &absent,
"badSig": &bad_sig,
})
.to_string())
Ok(config.output_format.formatted_string(&cli_command))
}
pub fn parse_create_address_with_seed(
@@ -1159,7 +1183,7 @@ fn process_airdrop(
}
};
request_and_confirm_airdrop(&rpc_client, faucet_addr, &pubkey, lamports)?;
request_and_confirm_airdrop(&rpc_client, faucet_addr, &pubkey, lamports, &config)?;
let current_balance = rpc_client
.retry_get_balance(&pubkey, 5)?
@@ -1173,19 +1197,17 @@ fn process_balance(
config: &CliConfig,
pubkey: &Option<Pubkey>,
use_lamports_unit: bool,
commitment_config: CommitmentConfig,
) -> ProcessResult {
let pubkey = if let Some(pubkey) = pubkey {
*pubkey
} else {
config.pubkey()?
};
let balance = rpc_client.retry_get_balance(&pubkey, 5)?;
match balance {
Some(lamports) => Ok(build_balance_message(lamports, use_lamports_unit, true)),
None => Err(
CliError::RpcRequestError("Received result of an unexpected type".to_string()).into(),
),
}
let balance = rpc_client
.get_balance_with_commitment(&pubkey, commitment_config)?
.value;
Ok(build_balance_message(balance, use_lamports_unit, true))
}
fn process_confirm(
@@ -1260,21 +1282,21 @@ fn process_show_account(
use_lamports_unit,
};
config.output_format.formatted_print(&cli_account);
let mut account_string = config.output_format.formatted_string(&cli_account);
if config.output_format == OutputFormat::Display {
if let Some(output_file) = output_file {
let mut f = File::create(output_file)?;
f.write_all(&data)?;
println!();
println!("Wrote account data to {}", output_file);
writeln!(&mut account_string)?;
writeln!(&mut account_string, "Wrote account data to {}", output_file)?;
} else if !data.is_empty() {
use pretty_hex::*;
println!("{:?}", data.hex_dump());
writeln!(&mut account_string, "{:?}", data.hex_dump())?;
}
}
Ok("".to_string())
Ok(account_string)
}
fn process_deploy(
@@ -1338,17 +1360,17 @@ fn process_deploy(
)?;
trace!("Creating program account");
let result =
rpc_client.send_and_confirm_transaction_with_spinner(&mut create_account_tx, &signers);
log_instruction_custom_error::<SystemError>(result)
.map_err(|_| CliError::DynamicProgramError("Program allocate space failed".to_string()))?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&create_account_tx);
log_instruction_custom_error::<SystemError>(result, &config).map_err(|_| {
CliError::DynamicProgramError("Program account allocation failed".to_string())
})?;
trace!("Writing program data");
rpc_client.send_and_confirm_transactions(write_transactions, &signers)?;
trace!("Finalizing program account");
rpc_client
.send_and_confirm_transaction_with_spinner(&mut finalize_tx, &signers)
.send_and_confirm_transaction_with_spinner(&finalize_tx)
.map_err(|e| {
CliError::DynamicProgramError(format!("Program finalize transaction failed: {}", e))
})?;
@@ -1400,7 +1422,7 @@ fn process_pay(
if sign_only {
tx.try_partial_sign(&config.signers, blockhash)?;
return_signers(&tx)
return_signers(&tx, &config)
} else {
tx.try_sign(&config.signers, blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -1413,9 +1435,8 @@ fn process_pay(
&fee_calculator,
&tx.message,
)?;
let result =
rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<SystemError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<SystemError>(result, &config)
}
} else if *witnesses == None {
let dt = timestamp.unwrap();
@@ -1440,7 +1461,7 @@ fn process_pay(
let mut tx = Transaction::new_unsigned(message);
if sign_only {
tx.try_partial_sign(&[config.signers[0], &contract_state], blockhash)?;
return_signers(&tx)
return_signers(&tx, &config)
} else {
tx.try_sign(&[config.signers[0], &contract_state], blockhash)?;
check_account_for_fee(
@@ -1449,14 +1470,10 @@ fn process_pay(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(
&mut tx,
&[config.signers[0], &contract_state],
);
let signature_str = log_instruction_custom_error::<BudgetError>(result)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
let signature = log_instruction_custom_error::<BudgetError>(result, &config)?;
Ok(json!({
"signature": signature_str,
"signature": signature,
"processId": format!("{}", contract_state.pubkey()),
})
.to_string())
@@ -1486,23 +1503,19 @@ fn process_pay(
let mut tx = Transaction::new_unsigned(message);
if sign_only {
tx.try_partial_sign(&[config.signers[0], &contract_state], blockhash)?;
return_signers(&tx)
return_signers(&tx, &config)
} else {
tx.try_sign(&[config.signers[0], &contract_state], blockhash)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(
&mut tx,
&[config.signers[0], &contract_state],
);
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
check_account_for_fee(
rpc_client,
&config.signers[0].pubkey(),
&fee_calculator,
&tx.message,
)?;
let signature_str = log_instruction_custom_error::<BudgetError>(result)?;
let signature = log_instruction_custom_error::<BudgetError>(result, &config)?;
Ok(json!({
"signature": signature_str,
"signature": signature,
"processId": format!("{}", contract_state.pubkey()),
})
.to_string())
@@ -1528,9 +1541,8 @@ fn process_cancel(rpc_client: &RpcClient, config: &CliConfig, pubkey: &Pubkey) -
&fee_calculator,
&tx.message,
)?;
let result =
rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &[config.signers[0]]);
log_instruction_custom_error::<BudgetError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<BudgetError>(result, &config)
}
fn process_time_elapsed(
@@ -1552,9 +1564,8 @@ fn process_time_elapsed(
&fee_calculator,
&tx.message,
)?;
let result =
rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &[config.signers[0]]);
log_instruction_custom_error::<BudgetError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<BudgetError>(result, &config)
}
#[allow(clippy::too_many_arguments)]
@@ -1599,7 +1610,7 @@ fn process_transfer(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx)
return_signers(&tx, &config)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -1615,9 +1626,9 @@ fn process_transfer(
let result = if no_wait {
rpc_client.send_transaction(&tx)
} else {
rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers)
rpc_client.send_and_confirm_transaction_with_spinner(&tx)
};
log_instruction_custom_error::<SystemError>(result)
log_instruction_custom_error::<SystemError>(result, &config)
}
}
@@ -1639,9 +1650,8 @@ fn process_witness(
&fee_calculator,
&tx.message,
)?;
let result =
rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &[config.signers[0]]);
log_instruction_custom_error::<BudgetError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<BudgetError>(result, &config)
}
pub fn process_command(config: &CliConfig) -> ProcessResult {
@@ -1680,6 +1690,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
*commitment_config,
*follow,
),
CliCommand::ClusterDate => process_cluster_date(&rpc_client, config),
CliCommand::ClusterVersion => process_cluster_version(&rpc_client),
CliCommand::CreateAddressWithSeed {
from_pubkey,
@@ -1687,7 +1698,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
program_id,
} => process_create_address_with_seed(config, from_pubkey.as_ref(), &seed, &program_id),
CliCommand::Fees => process_fees(&rpc_client),
CliCommand::GetBlockTime { slot } => process_get_block_time(&rpc_client, *slot),
CliCommand::GetBlockTime { slot } => process_get_block_time(&rpc_client, config, *slot),
CliCommand::GetGenesisHash => process_get_genesis_hash(&rpc_client),
CliCommand::GetEpochInfo { commitment_config } => {
process_get_epoch_info(&rpc_client, config, *commitment_config)
@@ -1698,6 +1709,14 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
CliCommand::GetSlot { commitment_config } => {
process_get_slot(&rpc_client, *commitment_config)
}
CliCommand::LargestAccounts {
commitment_config,
filter,
} => process_largest_accounts(&rpc_client, config, *commitment_config, filter.clone()),
CliCommand::Supply {
commitment_config,
print_accounts,
} => process_supply(&rpc_client, config, *commitment_config, *print_accounts),
CliCommand::TotalSupply { commitment_config } => {
process_total_supply(&rpc_client, *commitment_config)
}
@@ -2128,7 +2147,14 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
CliCommand::Balance {
pubkey,
use_lamports_unit,
} => process_balance(&rpc_client, config, &pubkey, *use_lamports_unit),
commitment_config,
} => process_balance(
&rpc_client,
config,
&pubkey,
*use_lamports_unit,
*commitment_config,
),
// Cancel a contract by contract Pubkey
CliCommand::Cancel(pubkey) => process_cancel(&rpc_client, config, &pubkey),
// Confirm the last client transaction by signature
@@ -2256,6 +2282,7 @@ pub fn request_and_confirm_airdrop(
faucet_addr: &SocketAddr,
to_pubkey: &Pubkey,
lamports: u64,
config: &CliConfig,
) -> ProcessResult {
let (blockhash, _fee_calculator) = rpc_client.get_recent_blockhash()?;
let keypair = {
@@ -2269,12 +2296,15 @@ pub fn request_and_confirm_airdrop(
sleep(Duration::from_secs(1));
}
}?;
let mut tx = keypair.airdrop_transaction();
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &[&keypair]);
log_instruction_custom_error::<SystemError>(result)
let tx = keypair.airdrop_transaction();
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<SystemError>(result, &config)
}
pub fn log_instruction_custom_error<E>(result: ClientResult<Signature>) -> ProcessResult
pub fn log_instruction_custom_error<E>(
result: ClientResult<Signature>,
config: &CliConfig,
) -> ProcessResult
where
E: 'static + std::error::Error + DecodeError<E> + FromPrimitive,
{
@@ -2291,7 +2321,12 @@ where
}
Err(err.into())
}
Ok(sig) => Ok(sig.to_string()),
Ok(sig) => {
let signature = CliSignature {
signature: sig.clone().to_string(),
};
Ok(config.output_format.formatted_string(&signature))
}
}
}
@@ -2388,7 +2423,8 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
.long("lamports")
.takes_value(false)
.help("Display balance in lamports instead of SOL"),
),
)
.arg(commitment_arg_with_default("max")),
)
.subcommand(
SubCommand::with_name("cancel")
@@ -2417,9 +2453,9 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
)
.subcommand(
SubCommand::with_name("decode-transaction")
.about("Decode a base-85 binary transaction")
.about("Decode a base-58 binary transaction")
.arg(
Arg::with_name("base85_transaction")
Arg::with_name("base58_transaction")
.index(1)
.value_name("BASE58_TRANSACTION")
.takes_value(true)
@@ -2799,7 +2835,8 @@ mod tests {
CliCommandInfo {
command: CliCommand::Balance {
pubkey: Some(keypair.pubkey()),
use_lamports_unit: false
use_lamports_unit: false,
commitment_config: CommitmentConfig::default(),
},
signers: vec![],
}
@@ -2815,7 +2852,8 @@ mod tests {
CliCommandInfo {
command: CliCommand::Balance {
pubkey: Some(keypair.pubkey()),
use_lamports_unit: true
use_lamports_unit: true,
commitment_config: CommitmentConfig::default(),
},
signers: vec![],
}
@@ -2829,7 +2867,8 @@ mod tests {
CliCommandInfo {
command: CliCommand::Balance {
pubkey: None,
use_lamports_unit: true
use_lamports_unit: true,
commitment_config: CommitmentConfig::default(),
},
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
}
@@ -3299,12 +3338,14 @@ mod tests {
config.command = CliCommand::Balance {
pubkey: None,
use_lamports_unit: true,
commitment_config: CommitmentConfig::default(),
};
assert_eq!(process_command(&config).unwrap(), "50 lamports");
config.command = CliCommand::Balance {
pubkey: None,
use_lamports_unit: false,
commitment_config: CommitmentConfig::default(),
};
assert_eq!(process_command(&config).unwrap(), "0.00000005 SOL");
@@ -3538,6 +3579,7 @@ mod tests {
config.command = CliCommand::Balance {
pubkey: None,
use_lamports_unit: false,
commitment_config: CommitmentConfig::default(),
};
assert!(process_command(&config).is_err());
@@ -3833,6 +3875,8 @@ mod tests {
}
}
let mut config = CliConfig::default();
config.output_format = OutputFormat::JsonCompact;
let present: Box<dyn Signer> = Box::new(keypair_from_seed(&[2u8; 32]).unwrap());
let absent: Box<dyn Signer> = Box::new(NullSigner::new(&Pubkey::new(&[3u8; 32])));
let bad: Box<dyn Signer> = Box::new(BadSigner::new(Pubkey::new(&[4u8; 32])));
@@ -3851,7 +3895,7 @@ mod tests {
let signers = vec![present.as_ref(), absent.as_ref(), bad.as_ref()];
let blockhash = Hash::new(&[7u8; 32]);
tx.try_partial_sign(&signers, blockhash).unwrap();
let res = return_signers(&tx).unwrap();
let res = return_signers(&tx, &config).unwrap();
let sign_only = parse_sign_only_reply_string(&res);
assert_eq!(sign_only.blockhash, blockhash);
assert_eq!(sign_only.present_signers[0].0, present.pubkey());

View File

@@ -4,9 +4,12 @@ use console::{style, Emoji};
use inflector::cases::titlecase::to_title_case;
use serde::Serialize;
use serde_json::{Map, Value};
use solana_client::rpc_response::{RpcEpochInfo, RpcKeyedAccount, RpcVoteAccountInfo};
use solana_client::rpc_response::{
RpcAccountBalance, RpcEpochInfo, RpcKeyedAccount, RpcSupply, RpcVoteAccountInfo,
};
use solana_sdk::{
clock::{self, Epoch, Slot, UnixTimestamp},
native_token::lamports_to_sol,
stake_history::StakeHistoryEntry,
};
use solana_stake_program::stake_state::{Authorized, Lockup};
@@ -26,20 +29,14 @@ pub enum OutputFormat {
}
impl OutputFormat {
pub fn formatted_print<T>(&self, item: &T)
pub fn formatted_string<T>(&self, item: &T) -> String
where
T: Serialize + fmt::Display,
{
match self {
OutputFormat::Display => {
println!("{}", item);
}
OutputFormat::Json => {
println!("{}", serde_json::to_string_pretty(item).unwrap());
}
OutputFormat::JsonCompact => {
println!("{}", serde_json::to_value(item).unwrap());
}
OutputFormat::Display => format!("{}", item),
OutputFormat::Json => serde_json::to_string_pretty(item).unwrap(),
OutputFormat::JsonCompact => serde_json::to_value(item).unwrap().to_string(),
}
}
}
@@ -839,3 +836,147 @@ impl From<&Lockout> for CliLockout {
}
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliBlockTime {
pub slot: Slot,
pub timestamp: UnixTimestamp,
}
impl fmt::Display for CliBlockTime {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln_name_value(f, "Block:", &self.slot.to_string())?;
writeln_name_value(
f,
"Date:",
&format!(
"{} (UnixTimestamp: {})",
DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(self.timestamp, 0), Utc)
.to_rfc3339_opts(SecondsFormat::Secs, true),
self.timestamp
),
)
}
}
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
pub struct CliSignOnlyData {
pub blockhash: String,
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub signers: Vec<String>,
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub absent: Vec<String>,
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub bad_sig: Vec<String>,
}
impl fmt::Display for CliSignOnlyData {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f)?;
writeln_name_value(f, "Blockhash:", &self.blockhash)?;
if !self.signers.is_empty() {
writeln!(f, "{}", style("Signers (Pubkey=Signature):").bold())?;
for signer in self.signers.iter() {
writeln!(f, " {}", signer)?;
}
}
if !self.absent.is_empty() {
writeln!(f, "{}", style("Absent Signers (Pubkey):").bold())?;
for pubkey in self.absent.iter() {
writeln!(f, " {}", pubkey)?;
}
}
if !self.bad_sig.is_empty() {
writeln!(f, "{}", style("Bad Signatures (Pubkey):").bold())?;
for pubkey in self.bad_sig.iter() {
writeln!(f, " {}", pubkey)?;
}
}
Ok(())
}
}
#[derive(Serialize, Deserialize)]
pub struct CliSignature {
pub signature: String,
}
impl fmt::Display for CliSignature {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f)?;
writeln_name_value(f, "Signature:", &self.signature)?;
Ok(())
}
}
#[derive(Serialize, Deserialize)]
pub struct CliAccountBalances {
pub accounts: Vec<RpcAccountBalance>,
}
impl fmt::Display for CliAccountBalances {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
f,
"{}",
style(format!("{:<44} {}", "Address", "Balance",)).bold()
)?;
for account in &self.accounts {
writeln!(
f,
"{:<44} {}",
account.address,
&format!("{} SOL", lamports_to_sol(account.lamports))
)?;
}
Ok(())
}
}
#[derive(Serialize, Deserialize)]
pub struct CliSupply {
pub total: u64,
pub circulating: u64,
pub non_circulating: u64,
pub non_circulating_accounts: Vec<String>,
#[serde(skip_serializing)]
pub print_accounts: bool,
}
impl From<RpcSupply> for CliSupply {
fn from(rpc_supply: RpcSupply) -> Self {
Self {
total: rpc_supply.total,
circulating: rpc_supply.circulating,
non_circulating: rpc_supply.non_circulating,
non_circulating_accounts: rpc_supply.non_circulating_accounts,
print_accounts: false,
}
}
}
impl fmt::Display for CliSupply {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln_name_value(f, "Total:", &format!("{} SOL", lamports_to_sol(self.total)))?;
writeln_name_value(
f,
"Circulating:",
&format!("{} SOL", lamports_to_sol(self.circulating)),
)?;
writeln_name_value(
f,
"Non-Circulating:",
&format!("{} SOL", lamports_to_sol(self.non_circulating)),
)?;
if self.print_accounts {
writeln!(f)?;
writeln_name_value(f, "Non-Circulating Accounts:", " ")?;
for account in &self.non_circulating_accounts {
writeln!(f, " {}", account)?;
}
}
Ok(())
}
}

View File

@@ -1,13 +1,9 @@
use crate::{
cli::{check_account_for_fee, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult},
cli_output::{
CliBlockProduction, CliBlockProductionEntry, CliEpochInfo, CliKeyedStakeState,
CliSlotStatus, CliStakeVec, CliValidator, CliValidators,
},
cli_output::*,
display::println_name_value,
};
use chrono::{DateTime, NaiveDateTime, SecondsFormat, Utc};
use clap::{value_t, value_t_or_exit, App, Arg, ArgMatches, SubCommand};
use clap::{value_t, value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand};
use console::{style, Emoji};
use indicatif::{ProgressBar, ProgressStyle};
use solana_clap_utils::{
@@ -19,12 +15,13 @@ use solana_clap_utils::{
use solana_client::{
pubsub_client::{PubsubClient, SlotInfoMessage},
rpc_client::RpcClient,
rpc_config::{RpcLargestAccountsConfig, RpcLargestAccountsFilter},
rpc_request::MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE,
};
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_sdk::{
account_utils::StateMut,
clock::{self, Slot},
clock::{self, Clock, Slot},
commitment_config::CommitmentConfig,
epoch_schedule::Epoch,
hash::Hash,
@@ -33,6 +30,7 @@ use solana_sdk::{
pubkey::Pubkey,
signature::{Keypair, Signer},
system_instruction,
sysvar::{self, Sysvar},
transaction::Transaction,
};
use std::{
@@ -83,6 +81,10 @@ impl ClusterQuerySubCommands for App<'_, '_> {
)
.arg(commitment_arg()),
)
.subcommand(
SubCommand::with_name("cluster-date")
.about("Get current cluster date, computed from genesis creation time and network time")
)
.subcommand(
SubCommand::with_name("cluster-version")
.about("Get the version of the cluster entrypoint"),
@@ -96,7 +98,6 @@ impl ClusterQuerySubCommands for App<'_, '_> {
.index(1)
.takes_value(true)
.value_name("SLOT")
.required(true)
.help("Slot number of the block to query")
)
)
@@ -121,8 +122,36 @@ impl ClusterQuerySubCommands for App<'_, '_> {
SubCommand::with_name("epoch").about("Get current epoch")
.arg(commitment_arg()),
)
.subcommand(
SubCommand::with_name("largest-accounts").about("Get addresses of largest cluster accounts")
.arg(
Arg::with_name("circulating")
.long("circulating")
.takes_value(false)
.help("Filter address list to only circulating accounts")
)
.arg(
Arg::with_name("non_circulating")
.long("non-circulating")
.takes_value(false)
.conflicts_with("circulating")
.help("Filter address list to only non-circulating accounts")
)
.arg(commitment_arg()),
)
.subcommand(
SubCommand::with_name("supply").about("Get information about the cluster supply of SOL")
.arg(
Arg::with_name("print_accounts")
.long("print-accounts")
.takes_value(false)
.help("Print list of non-circualting account addresses")
)
.arg(commitment_arg()),
)
.subcommand(
SubCommand::with_name("total-supply").about("Get total number of SOL")
.setting(AppSettings::Hidden)
.arg(commitment_arg()),
)
.subcommand(
@@ -168,14 +197,7 @@ impl ClusterQuerySubCommands for App<'_, '_> {
.default_value("15")
.help("Wait up to timeout seconds for transaction confirmation"),
)
.arg(
Arg::with_name(COMMITMENT_ARG.name)
.long(COMMITMENT_ARG.long)
.takes_value(true)
.possible_values(&["default", "max", "recent", "root"])
.value_name("COMMITMENT_LEVEL")
.help("Wait until the transaction is confirmed at selected commitment level"),
),
.arg(commitment_arg()),
)
.subcommand(
SubCommand::with_name("live-slots")
@@ -277,8 +299,7 @@ pub fn parse_catchup(
) -> Result<CliCommandInfo, CliError> {
let node_pubkey = pubkey_of_signer(matches, "node_pubkey", wallet_manager)?.unwrap();
let node_json_rpc_url = value_t!(matches, "node_json_rpc_url", String).ok();
let commitment_config =
commitment_of(matches, COMMITMENT_ARG.long).unwrap_or_else(CommitmentConfig::recent);
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
let follow = matches.is_present("follow");
Ok(CliCommandInfo {
command: CliCommand::Catchup {
@@ -304,8 +325,7 @@ pub fn parse_cluster_ping(
None
};
let timeout = Duration::from_secs(value_t_or_exit!(matches, "timeout", u64));
let commitment_config =
commitment_of(matches, COMMITMENT_ARG.long).unwrap_or_else(CommitmentConfig::recent);
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
Ok(CliCommandInfo {
command: CliCommand::Ping {
lamports,
@@ -324,7 +344,7 @@ pub fn parse_cluster_ping(
}
pub fn parse_get_block_time(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let slot = value_t_or_exit!(matches, "slot", u64);
let slot = value_of(matches, "slot");
Ok(CliCommandInfo {
command: CliCommand::GetBlockTime { slot },
signers: vec![],
@@ -332,8 +352,7 @@ pub fn parse_get_block_time(matches: &ArgMatches<'_>) -> Result<CliCommandInfo,
}
pub fn parse_get_epoch_info(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let commitment_config =
commitment_of(matches, COMMITMENT_ARG.long).unwrap_or_else(CommitmentConfig::recent);
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
Ok(CliCommandInfo {
command: CliCommand::GetEpochInfo { commitment_config },
signers: vec![],
@@ -341,8 +360,7 @@ pub fn parse_get_epoch_info(matches: &ArgMatches<'_>) -> Result<CliCommandInfo,
}
pub fn parse_get_slot(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let commitment_config =
commitment_of(matches, COMMITMENT_ARG.long).unwrap_or_else(CommitmentConfig::recent);
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
Ok(CliCommandInfo {
command: CliCommand::GetSlot { commitment_config },
signers: vec![],
@@ -350,17 +368,45 @@ pub fn parse_get_slot(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliErr
}
pub fn parse_get_epoch(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let commitment_config =
commitment_of(matches, COMMITMENT_ARG.long).unwrap_or_else(CommitmentConfig::recent);
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
Ok(CliCommandInfo {
command: CliCommand::GetEpoch { commitment_config },
signers: vec![],
})
}
pub fn parse_largest_accounts(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
let filter = if matches.is_present("circulating") {
Some(RpcLargestAccountsFilter::Circulating)
} else if matches.is_present("non_circulating") {
Some(RpcLargestAccountsFilter::NonCirculating)
} else {
None
};
Ok(CliCommandInfo {
command: CliCommand::LargestAccounts {
commitment_config,
filter,
},
signers: vec![],
})
}
pub fn parse_supply(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
let print_accounts = matches.is_present("print_accounts");
Ok(CliCommandInfo {
command: CliCommand::Supply {
commitment_config,
print_accounts,
},
signers: vec![],
})
}
pub fn parse_total_supply(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let commitment_config =
commitment_of(matches, COMMITMENT_ARG.long).unwrap_or_else(CommitmentConfig::recent);
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
Ok(CliCommandInfo {
command: CliCommand::TotalSupply { commitment_config },
signers: vec![],
@@ -368,8 +414,7 @@ pub fn parse_total_supply(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Cl
}
pub fn parse_get_transaction_count(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let commitment_config =
commitment_of(matches, COMMITMENT_ARG.long).unwrap_or_else(CommitmentConfig::recent);
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
Ok(CliCommandInfo {
command: CliCommand::GetTransactionCount { commitment_config },
signers: vec![],
@@ -395,8 +440,7 @@ pub fn parse_show_stakes(
pub fn parse_show_validators(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let use_lamports_unit = matches.is_present("lamports");
let commitment_config =
commitment_of(matches, COMMITMENT_ARG.long).unwrap_or_else(CommitmentConfig::recent);
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
Ok(CliCommandInfo {
command: CliCommand::ShowValidators {
@@ -413,8 +457,7 @@ pub fn parse_transaction_history(
) -> Result<CliCommandInfo, CliError> {
let address = pubkey_of_signer(matches, "address", wallet_manager)?.unwrap();
let end_slot = value_t!(matches, "end_slot", Slot).ok();
let slot_limit = value_t!(matches, "limit", u64)
.unwrap_or(MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE);
let slot_limit = value_t!(matches, "limit", u64).ok();
Ok(CliCommandInfo {
command: CliCommand::TransactionHistory {
@@ -501,14 +544,13 @@ pub fn process_catchup(
let slot_distance = rpc_slot as i64 - node_slot as i64;
let slots_per_second =
(previous_slot_distance - slot_distance) as f64 / f64::from(sleep_interval);
let time_remaining = if slots_per_second <= 0.0 {
let time_remaining = (slot_distance as f64 / slots_per_second).round();
let time_remaining = if !time_remaining.is_normal() || time_remaining <= 0.0 {
"".to_string()
} else {
format!(
". Time remaining: {}",
humantime::format_duration(Duration::from_secs_f64(
(slot_distance as f64 / slots_per_second).round()
))
humantime::format_duration(Duration::from_secs_f64(time_remaining))
)
};
@@ -539,6 +581,23 @@ pub fn process_catchup(
}
}
pub fn process_cluster_date(rpc_client: &RpcClient, config: &CliConfig) -> ProcessResult {
let result = rpc_client
.get_account_with_commitment(&sysvar::clock::id(), CommitmentConfig::default())?;
if let Some(clock_account) = result.value {
let clock: Clock = Sysvar::from_account(&clock_account).ok_or_else(|| {
CliError::RpcRequestError("Failed to deserialize clock sysvar".to_string())
})?;
let block_time = CliBlockTime {
slot: result.context.slot,
timestamp: clock.unix_timestamp,
};
Ok(config.output_format.formatted_string(&block_time))
} else {
Err(format!("AccountNotFound: pubkey={}", sysvar::clock::id()).into())
}
}
pub fn process_cluster_version(rpc_client: &RpcClient) -> ProcessResult {
let remote_version = rpc_client.get_version()?;
Ok(remote_version.solana_core)
@@ -588,15 +647,19 @@ pub fn process_leader_schedule(rpc_client: &RpcClient) -> ProcessResult {
Ok("".to_string())
}
pub fn process_get_block_time(rpc_client: &RpcClient, slot: Slot) -> ProcessResult {
pub fn process_get_block_time(
rpc_client: &RpcClient,
config: &CliConfig,
slot: Option<Slot>,
) -> ProcessResult {
let slot = if let Some(slot) = slot {
slot
} else {
rpc_client.get_slot()?
};
let timestamp = rpc_client.get_block_time(slot)?;
let result = format!(
"{} (UnixTimestamp: {})",
DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(timestamp, 0), Utc)
.to_rfc3339_opts(SecondsFormat::Secs, true),
timestamp
);
Ok(result)
let block_time = CliBlockTime { slot, timestamp };
Ok(config.output_format.formatted_string(&block_time))
}
pub fn process_get_epoch_info(
@@ -607,8 +670,7 @@ pub fn process_get_epoch_info(
let epoch_info: CliEpochInfo = rpc_client
.get_epoch_info_with_commitment(commitment_config.clone())?
.into();
config.output_format.formatted_print(&epoch_info);
Ok("".to_string())
Ok(config.output_format.formatted_string(&epoch_info))
}
pub fn process_get_genesis_hash(rpc_client: &RpcClient) -> ProcessResult {
@@ -649,7 +711,7 @@ pub fn process_show_block_production(
slot_limit: Option<u64>,
) -> ProcessResult {
let epoch_schedule = rpc_client.get_epoch_schedule()?;
let epoch_info = rpc_client.get_epoch_info_with_commitment(CommitmentConfig::max())?;
let epoch_info = rpc_client.get_epoch_info_with_commitment(CommitmentConfig::root())?;
let epoch = epoch.unwrap_or(epoch_info.epoch);
if epoch > epoch_info.epoch {
@@ -708,7 +770,7 @@ pub fn process_show_block_production(
progress_bar.set_message(&format!("Fetching leader schedule for epoch {}...", epoch));
let leader_schedule = rpc_client
.get_leader_schedule_with_commitment(Some(start_slot), CommitmentConfig::max())?;
.get_leader_schedule_with_commitment(Some(start_slot), CommitmentConfig::root())?;
if leader_schedule.is_none() {
return Err(format!("Unable to fetch leader schedule for slot {}", start_slot).into());
}
@@ -790,8 +852,35 @@ pub fn process_show_block_production(
individual_slot_status,
verbose: config.verbose,
};
config.output_format.formatted_print(&block_production);
Ok("".to_string())
Ok(config.output_format.formatted_string(&block_production))
}
pub fn process_largest_accounts(
rpc_client: &RpcClient,
config: &CliConfig,
commitment_config: CommitmentConfig,
filter: Option<RpcLargestAccountsFilter>,
) -> ProcessResult {
let accounts = rpc_client
.get_largest_accounts_with_config(RpcLargestAccountsConfig {
commitment: Some(commitment_config),
filter,
})?
.value;
let largest_accounts = CliAccountBalances { accounts };
Ok(config.output_format.formatted_string(&largest_accounts))
}
pub fn process_supply(
rpc_client: &RpcClient,
config: &CliConfig,
commitment_config: CommitmentConfig,
print_accounts: bool,
) -> ProcessResult {
let supply_response = rpc_client.supply_with_commitment(commitment_config.clone())?;
let mut supply: CliSupply = supply_response.value.into();
supply.print_accounts = print_accounts;
Ok(config.output_format.formatted_string(&supply))
}
pub fn process_total_supply(
@@ -1120,10 +1209,9 @@ pub fn process_show_stakes(
}
}
}
config
Ok(config
.output_format
.formatted_print(&CliStakeVec::new(stake_accounts));
Ok("".to_string())
.formatted_string(&CliStakeVec::new(stake_accounts)))
}
pub fn process_show_validators(
@@ -1167,15 +1255,14 @@ pub fn process_show_validators(
delinquent_validators,
use_lamports_unit,
};
config.output_format.formatted_print(&cli_validators);
Ok("".to_string())
Ok(config.output_format.formatted_string(&cli_validators))
}
pub fn process_transaction_history(
rpc_client: &RpcClient,
address: &Pubkey,
end_slot: Option<Slot>, // None == use latest slot
slot_limit: u64,
slot_limit: Option<u64>,
) -> ProcessResult {
let end_slot = {
if let Some(end_slot) = end_slot {
@@ -1184,18 +1271,30 @@ pub fn process_transaction_history(
rpc_client.get_slot_with_commitment(CommitmentConfig::max())?
}
};
let start_slot = end_slot.saturating_sub(slot_limit);
let mut start_slot = match slot_limit {
Some(slot_limit) => end_slot.saturating_sub(slot_limit),
None => rpc_client.minimum_ledger_slot()?,
};
println!(
"Transactions affecting {} within slots [{},{}]",
address, start_slot, end_slot
);
let signatures =
rpc_client.get_confirmed_signatures_for_address(address, start_slot, end_slot)?;
for signature in &signatures {
println!("{}", signature);
let mut transaction_count = 0;
while start_slot < end_slot {
let signatures = rpc_client.get_confirmed_signatures_for_address(
address,
start_slot,
(start_slot + MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE).min(end_slot),
)?;
for signature in &signatures {
println!("{}", signature);
}
transaction_count += signatures.len();
start_slot += MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE;
}
Ok(format!("{} transactions found", signatures.len(),))
Ok(format!("{} transactions found", transaction_count))
}
#[cfg(test)]
@@ -1217,6 +1316,17 @@ mod tests {
let (default_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&default_keypair, tmp_file.as_file_mut()).unwrap();
let test_cluster_version = test_commands
.clone()
.get_matches_from(vec!["test", "cluster-date"]);
assert_eq!(
parse_command(&test_cluster_version, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::ClusterDate,
signers: vec![],
}
);
let test_cluster_version = test_commands
.clone()
.get_matches_from(vec!["test", "cluster-version"]);
@@ -1245,7 +1355,7 @@ mod tests {
assert_eq!(
parse_command(&test_get_block_time, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::GetBlockTime { slot },
command: CliCommand::GetBlockTime { slot: Some(slot) },
signers: vec![],
}
);

View File

@@ -146,7 +146,12 @@ pub fn write_transaction<W: io::Write>(
Err(err) => err.to_string(),
}
)?;
writeln!(w, "{} Fee: {}", prefix, transaction_status.fee)?;
writeln!(
w,
"{} Fee: {} SOL",
prefix,
lamports_to_sol(transaction_status.fee)
)?;
assert_eq!(
transaction_status.pre_balances.len(),
transaction_status.post_balances.len()

View File

@@ -2,8 +2,7 @@ use clap::{crate_description, crate_name, AppSettings, Arg, ArgGroup, ArgMatches
use console::style;
use solana_clap_utils::{
input_validators::is_url, keypair::SKIP_SEED_PHRASE_VALIDATION_ARG, offline::SIGN_ONLY_ARG,
DisplayError,
input_validators::is_url, keypair::SKIP_SEED_PHRASE_VALIDATION_ARG, DisplayError,
};
use solana_cli::{
cli::{app, parse_command, process_command, CliCommandInfo, CliConfig, CliSigners},
@@ -262,13 +261,7 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box<dyn error::Error>> {
let (mut config, signers) = parse_args(&matches, &mut wallet_manager)?;
config.signers = signers.iter().map(|s| s.as_ref()).collect();
let result = process_command(&config)?;
let (_, submatches) = matches.subcommand();
let sign_only = submatches
.map(|m| m.is_present(SIGN_ONLY_ARG.name))
.unwrap_or(false);
if !sign_only {
println!("{}", result);
}
println!("{}", result);
};
Ok(())
}

View File

@@ -462,8 +462,8 @@ pub fn process_authorize_nonce_account(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<NonceError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<NonceError>(result, &config)
}
pub fn process_create_nonce_account(
@@ -539,8 +539,8 @@ pub fn process_create_nonce_account(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<SystemError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<SystemError>(result, &config)
}
pub fn process_get_nonce(rpc_client: &RpcClient, nonce_account_pubkey: &Pubkey) -> ProcessResult {
@@ -580,9 +580,8 @@ pub fn process_new_nonce(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client
.send_and_confirm_transaction_with_spinner(&mut tx, &[config.signers[0], nonce_authority]);
log_instruction_custom_error::<SystemError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<SystemError>(result, &config)
}
pub fn process_show_nonce_account(
@@ -606,8 +605,7 @@ pub fn process_show_nonce_account(
nonce_account.authority = Some(data.authority.to_string());
}
config.output_format.formatted_print(&nonce_account);
Ok("".to_string())
Ok(config.output_format.formatted_string(&nonce_account))
};
match state_from_account(&nonce_account)? {
State::Uninitialized => print_account(None),
@@ -641,8 +639,8 @@ pub fn process_withdraw_from_nonce_account(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<NonceError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<NonceError>(result, &config)
}
#[cfg(test)]

View File

@@ -79,32 +79,47 @@ pub fn parse_sign_only_reply_string(reply: &str) -> SignOnly {
let object: Value = serde_json::from_str(&reply).unwrap();
let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap();
let blockhash = blockhash_str.parse::<Hash>().unwrap();
let signer_strings = object.get("signers").unwrap().as_array().unwrap();
let present_signers = signer_strings
.iter()
.map(|signer_string| {
let mut signer = signer_string.as_str().unwrap().split('=');
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
(key, sig)
})
.collect();
let signer_strings = object.get("absent").unwrap().as_array().unwrap();
let absent_signers = signer_strings
.iter()
.map(|val| {
let s = val.as_str().unwrap();
Pubkey::from_str(s).unwrap()
})
.collect();
let signer_strings = object.get("badSig").unwrap().as_array().unwrap();
let bad_signers = signer_strings
.iter()
.map(|val| {
let s = val.as_str().unwrap();
Pubkey::from_str(s).unwrap()
})
.collect();
let mut present_signers: Vec<(Pubkey, Signature)> = Vec::new();
let signer_strings = object.get("signers");
if let Some(sig_strings) = signer_strings {
present_signers = sig_strings
.as_array()
.unwrap()
.iter()
.map(|signer_string| {
let mut signer = signer_string.as_str().unwrap().split('=');
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
(key, sig)
})
.collect();
}
let mut absent_signers: Vec<Pubkey> = Vec::new();
let signer_strings = object.get("absent");
if let Some(sig_strings) = signer_strings {
absent_signers = sig_strings
.as_array()
.unwrap()
.iter()
.map(|val| {
let s = val.as_str().unwrap();
Pubkey::from_str(s).unwrap()
})
.collect();
}
let mut bad_signers: Vec<Pubkey> = Vec::new();
let signer_strings = object.get("badSig");
if let Some(sig_strings) = signer_strings {
bad_signers = sig_strings
.as_array()
.unwrap()
.iter()
.map(|val| {
let s = val.as_str().unwrap();
Pubkey::from_str(s).unwrap()
})
.collect();
}
SignOnly {
blockhash,
present_signers,

View File

@@ -881,7 +881,7 @@ pub fn process_create_stake_account(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx)
return_signers(&tx, &config)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -894,8 +894,8 @@ pub fn process_create_stake_account(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<SystemError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<SystemError>(result, &config)
}
}
@@ -946,7 +946,7 @@ pub fn process_stake_authorize(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx)
return_signers(&tx, &config)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -959,8 +959,8 @@ pub fn process_stake_authorize(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<StakeError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<StakeError>(result, &config)
}
}
@@ -1000,7 +1000,7 @@ pub fn process_deactivate_stake_account(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx)
return_signers(&tx, &config)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -1013,8 +1013,8 @@ pub fn process_deactivate_stake_account(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<StakeError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<StakeError>(result, &config)
}
}
@@ -1063,7 +1063,7 @@ pub fn process_withdraw_stake(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx)
return_signers(&tx, &config)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -1076,8 +1076,8 @@ pub fn process_withdraw_stake(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<SystemError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<SystemError>(result, &config)
}
}
@@ -1197,7 +1197,7 @@ pub fn process_split_stake(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx)
return_signers(&tx, &config)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -1210,8 +1210,8 @@ pub fn process_split_stake(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<StakeError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<StakeError>(result, &config)
}
}
@@ -1254,7 +1254,7 @@ pub fn process_stake_set_lockup(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx)
return_signers(&tx, &config)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -1267,8 +1267,8 @@ pub fn process_stake_set_lockup(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<StakeError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<StakeError>(result, &config)
}
}
@@ -1341,8 +1341,7 @@ pub fn process_show_stake_account(
match stake_account.state() {
Ok(stake_state) => {
let state = build_stake_state(stake_account.lamports, &stake_state, use_lamports_unit);
config.output_format.formatted_print(&state);
Ok("".to_string())
Ok(config.output_format.formatted_string(&state))
}
Err(err) => Err(CliError::RpcRequestError(format!(
"Account data could not be deserialized to stake state: {}",
@@ -1370,8 +1369,7 @@ pub fn process_show_stake_history(
entries,
use_lamports_unit,
};
config.output_format.formatted_print(&stake_history_output);
Ok("".to_string())
Ok(config.output_format.formatted_string(&stake_history_output))
}
#[allow(clippy::too_many_arguments)]
@@ -1464,7 +1462,7 @@ pub fn process_delegate_stake(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx)
return_signers(&tx, &config)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -1477,8 +1475,8 @@ pub fn process_delegate_stake(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<StakeError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<StakeError>(result, &config)
}
}

View File

@@ -242,8 +242,8 @@ pub fn process_create_storage_account(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<SystemError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<SystemError>(result, &config)
}
pub fn process_claim_storage_reward(
@@ -266,7 +266,7 @@ pub fn process_claim_storage_reward(
&fee_calculator,
&tx.message,
)?;
let signature = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &signers)?;
let signature = rpc_client.send_and_confirm_transaction_with_spinner(&tx)?;
Ok(signature.to_string())
}

View File

@@ -367,7 +367,7 @@ pub fn process_set_validator_info(
&fee_calculator,
&tx.message,
)?;
let signature_str = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &signers)?;
let signature_str = rpc_client.send_and_confirm_transaction_with_spinner(&tx)?;
println!("Success! Validator info published at: {:?}", info_pubkey);
println!("{}", signature_str);
@@ -410,10 +410,9 @@ pub fn process_get_validator_info(
info: validator_info,
});
}
config
Ok(config
.output_format
.formatted_print(&CliValidatorInfoVec::new(validator_info_list));
Ok("".to_string())
.formatted_string(&CliValidatorInfoVec::new(validator_info_list)))
}
#[cfg(test)]

View File

@@ -335,8 +335,7 @@ pub fn parse_vote_get_account_command(
let vote_account_pubkey =
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
let use_lamports_unit = matches.is_present("lamports");
let commitment_config =
commitment_of(matches, COMMITMENT_ARG.long).unwrap_or_else(CommitmentConfig::recent);
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
Ok(CliCommandInfo {
command: CliCommand::ShowVoteAccount {
pubkey: vote_account_pubkey,
@@ -458,8 +457,8 @@ pub fn process_create_vote_account(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<SystemError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<SystemError>(result, &config)
}
pub fn process_vote_authorize(
@@ -498,9 +497,8 @@ pub fn process_vote_authorize(
&fee_calculator,
&tx.message,
)?;
let result =
rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &[config.signers[0]]);
log_instruction_custom_error::<VoteError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<VoteError>(result, &config)
}
pub fn process_vote_update_validator(
@@ -532,8 +530,8 @@ pub fn process_vote_update_validator(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<VoteError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<VoteError>(result, &config)
}
fn get_vote_account(
@@ -607,8 +605,7 @@ pub fn process_show_vote_account(
use_lamports_unit,
};
config.output_format.formatted_print(&vote_account_data);
Ok("".to_string())
Ok(config.output_format.formatted_string(&vote_account_data))
}
pub fn process_withdraw_from_vote_account(
@@ -638,9 +635,8 @@ pub fn process_withdraw_from_vote_account(
&fee_calculator,
&transaction.message,
)?;
let result =
rpc_client.send_and_confirm_transaction_with_spinner(&mut transaction, &config.signers);
log_instruction_custom_error::<VoteError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&transaction);
log_instruction_custom_error::<VoteError>(result, &config)
}
#[cfg(test)]

View File

@@ -1,5 +1,6 @@
use solana_cli::{
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
cli_output::OutputFormat,
nonce,
offline::{
blockhash_query::{self, BlockhashQuery},
@@ -119,6 +120,7 @@ fn full_battery_tests(
&faucet_addr,
&config_payer.signers[0].pubkey(),
2000,
&config_payer,
)
.unwrap();
check_balance(2000, &rpc_client, &config_payer.signers[0].pubkey());
@@ -275,6 +277,7 @@ fn test_create_account_with_seed() {
let offline_nonce_authority_signer = keypair_from_seed(&[1u8; 32]).unwrap();
let online_nonce_creator_signer = keypair_from_seed(&[2u8; 32]).unwrap();
let to_address = Pubkey::new(&[3u8; 32]);
let config = CliConfig::default();
// Setup accounts
let rpc_client = RpcClient::new_socket(leader_data.rpc);
@@ -283,6 +286,7 @@ fn test_create_account_with_seed() {
&faucet_addr,
&offline_nonce_authority_signer.pubkey(),
42,
&config,
)
.unwrap();
request_and_confirm_airdrop(
@@ -290,6 +294,7 @@ fn test_create_account_with_seed() {
&faucet_addr,
&online_nonce_creator_signer.pubkey(),
4242,
&config,
)
.unwrap();
check_balance(42, &rpc_client, &offline_nonce_authority_signer.pubkey());
@@ -344,6 +349,7 @@ fn test_create_account_with_seed() {
nonce_authority: 0,
fee_payer: 0,
};
authority_config.output_format = OutputFormat::JsonCompact;
let sign_only_reply = process_command(&authority_config).unwrap();
let sign_only = parse_sign_only_reply_string(&sign_only_reply);
let authority_presigner = sign_only.presigner_of(&authority_pubkey).unwrap();

View File

@@ -2,6 +2,7 @@ use chrono::prelude::*;
use serde_json::Value;
use solana_cli::{
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig, PayCommand},
cli_output::OutputFormat,
nonce,
offline::{
blockhash_query::{self, BlockhashQuery},
@@ -69,6 +70,7 @@ fn test_cli_timestamp_tx() {
&faucet_addr,
&config_payer.signers[0].pubkey(),
50,
&config_witness,
)
.unwrap();
check_balance(50, &rpc_client, &config_payer.signers[0].pubkey());
@@ -78,6 +80,7 @@ fn test_cli_timestamp_tx() {
&faucet_addr,
&config_witness.signers[0].pubkey(),
1,
&config_witness,
)
.unwrap();
@@ -154,6 +157,7 @@ fn test_cli_witness_tx() {
&faucet_addr,
&config_payer.signers[0].pubkey(),
50,
&config_witness,
)
.unwrap();
request_and_confirm_airdrop(
@@ -161,6 +165,7 @@ fn test_cli_witness_tx() {
&faucet_addr,
&config_witness.signers[0].pubkey(),
1,
&config_witness,
)
.unwrap();
@@ -234,6 +239,7 @@ fn test_cli_cancel_tx() {
&faucet_addr,
&config_payer.signers[0].pubkey(),
50,
&config_witness,
)
.unwrap();
@@ -307,6 +313,7 @@ fn test_offline_pay_tx() {
&faucet_addr,
&config_offline.signers[0].pubkey(),
50,
&config_offline,
)
.unwrap();
@@ -315,6 +322,7 @@ fn test_offline_pay_tx() {
&faucet_addr,
&config_online.signers[0].pubkey(),
50,
&config_offline,
)
.unwrap();
check_balance(50, &rpc_client, &config_offline.signers[0].pubkey());
@@ -328,6 +336,7 @@ fn test_offline_pay_tx() {
sign_only: true,
..PayCommand::default()
});
config_offline.output_format = OutputFormat::JsonCompact;
let sig_response = process_command(&config_offline).unwrap();
check_balance(50, &rpc_client, &config_offline.signers[0].pubkey());
@@ -388,6 +397,7 @@ fn test_nonced_pay_tx() {
&faucet_addr,
&config.signers[0].pubkey(),
50 + minimum_nonce_balance,
&config,
)
.unwrap();
check_balance(

View File

@@ -1,5 +1,6 @@
use solana_cli::{
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
cli_output::OutputFormat,
nonce,
offline::{
blockhash_query::{self, BlockhashQuery},
@@ -59,6 +60,7 @@ fn test_stake_delegation_force() {
&faucet_addr,
&config.signers[0].pubkey(),
100_000,
&config,
)
.unwrap();
@@ -155,6 +157,7 @@ fn test_seed_stake_delegation_and_deactivation() {
&faucet_addr,
&config_validator.signers[0].pubkey(),
100_000,
&config_validator,
)
.unwrap();
check_balance(100_000, &rpc_client, &config_validator.signers[0].pubkey());
@@ -245,6 +248,7 @@ fn test_stake_delegation_and_deactivation() {
&faucet_addr,
&config_validator.signers[0].pubkey(),
100_000,
&config_validator,
)
.unwrap();
check_balance(100_000, &rpc_client, &config_validator.signers[0].pubkey());
@@ -341,6 +345,7 @@ fn test_offline_stake_delegation_and_deactivation() {
&faucet_addr,
&config_validator.signers[0].pubkey(),
100_000,
&config_offline,
)
.unwrap();
check_balance(100_000, &rpc_client, &config_validator.signers[0].pubkey());
@@ -350,6 +355,7 @@ fn test_offline_stake_delegation_and_deactivation() {
&faucet_addr,
&config_offline.signers[0].pubkey(),
100_000,
&config_validator,
)
.unwrap();
check_balance(100_000, &rpc_client, &config_offline.signers[0].pubkey());
@@ -385,6 +391,7 @@ fn test_offline_stake_delegation_and_deactivation() {
nonce_authority: 0,
fee_payer: 0,
};
config_offline.output_format = OutputFormat::JsonCompact;
let sig_response = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
assert!(sign_only.has_all_signers());
@@ -470,6 +477,7 @@ fn test_nonced_stake_delegation_and_deactivation() {
&faucet_addr,
&config.signers[0].pubkey(),
100_000,
&config,
)
.unwrap();
@@ -579,6 +587,7 @@ fn test_stake_authorize() {
&faucet_addr,
&config.signers[0].pubkey(),
100_000,
&config,
)
.unwrap();
@@ -596,6 +605,7 @@ fn test_stake_authorize() {
&faucet_addr,
&config_offline.signers[0].pubkey(),
100_000,
&config,
)
.unwrap();
@@ -703,6 +713,7 @@ fn test_stake_authorize() {
nonce_authority: 0,
fee_payer: 0,
};
config_offline.output_format = OutputFormat::JsonCompact;
let sign_reply = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sign_reply);
assert!(sign_only.has_all_signers());
@@ -841,13 +852,16 @@ fn test_stake_authorize_with_fee_payer() {
config_offline.command = CliCommand::ClusterVersion;
process_command(&config_offline).unwrap_err();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &default_pubkey, 100_000).unwrap();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &default_pubkey, 100_000, &config)
.unwrap();
check_balance(100_000, &rpc_client, &config.signers[0].pubkey());
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &payer_pubkey, 100_000).unwrap();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &payer_pubkey, 100_000, &config)
.unwrap();
check_balance(100_000, &rpc_client, &payer_pubkey);
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 100_000).unwrap();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 100_000, &config)
.unwrap();
check_balance(100_000, &rpc_client, &offline_pubkey);
// Create stake account, identity is authority
@@ -901,6 +915,7 @@ fn test_stake_authorize_with_fee_payer() {
nonce_authority: 0,
fee_payer: 0,
};
config_offline.output_format = OutputFormat::JsonCompact;
let sign_reply = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sign_reply);
assert!(sign_only.has_all_signers());
@@ -966,11 +981,13 @@ fn test_stake_split() {
&faucet_addr,
&config.signers[0].pubkey(),
500_000,
&config,
)
.unwrap();
check_balance(500_000, &rpc_client, &config.signers[0].pubkey());
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 100_000).unwrap();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 100_000, &config)
.unwrap();
check_balance(100_000, &rpc_client, &offline_pubkey);
// Create stake account, identity is authority
@@ -1038,6 +1055,7 @@ fn test_stake_split() {
lamports: 2 * minimum_stake_balance,
fee_payer: 0,
};
config_offline.output_format = OutputFormat::JsonCompact;
let sig_response = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
assert!(sign_only.has_all_signers());
@@ -1114,11 +1132,13 @@ fn test_stake_set_lockup() {
&faucet_addr,
&config.signers[0].pubkey(),
500_000,
&config,
)
.unwrap();
check_balance(500_000, &rpc_client, &config.signers[0].pubkey());
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 100_000).unwrap();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 100_000, &config)
.unwrap();
check_balance(100_000, &rpc_client, &offline_pubkey);
// Create stake account, identity is authority
@@ -1292,6 +1312,7 @@ fn test_stake_set_lockup() {
nonce_authority: 0,
fee_payer: 0,
};
config_offline.output_format = OutputFormat::JsonCompact;
let sig_response = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
assert!(sign_only.has_all_signers());
@@ -1364,11 +1385,13 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
&faucet_addr,
&config.signers[0].pubkey(),
200_000,
&config,
)
.unwrap();
check_balance(200_000, &rpc_client, &config.signers[0].pubkey());
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 100_000).unwrap();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 100_000, &config)
.unwrap();
check_balance(100_000, &rpc_client, &offline_pubkey);
// Create nonce account
@@ -1410,6 +1433,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
fee_payer: 0,
from: 0,
};
config_offline.output_format = OutputFormat::JsonCompact;
let sig_response = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
assert!(sign_only.has_all_signers());

View File

@@ -1,5 +1,6 @@
use solana_cli::{
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
cli_output::OutputFormat,
nonce,
offline::{
blockhash_query::{self, BlockhashQuery},
@@ -59,7 +60,8 @@ fn test_transfer() {
let sender_pubkey = config.signers[0].pubkey();
let recipient_pubkey = Pubkey::new(&[1u8; 32]);
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &sender_pubkey, 50_000).unwrap();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &sender_pubkey, 50_000, &config)
.unwrap();
check_balance(50_000, &rpc_client, &sender_pubkey);
check_balance(0, &rpc_client, &recipient_pubkey);
@@ -87,7 +89,7 @@ fn test_transfer() {
process_command(&offline).unwrap_err();
let offline_pubkey = offline.signers[0].pubkey();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 50).unwrap();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 50, &config).unwrap();
check_balance(50, &rpc_client, &offline_pubkey);
// Offline transfer
@@ -103,6 +105,7 @@ fn test_transfer() {
nonce_authority: 0,
fee_payer: 0,
};
offline.output_format = OutputFormat::JsonCompact;
let sign_only_reply = process_command(&offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sign_only_reply);
assert!(sign_only.has_all_signers());
@@ -247,16 +250,24 @@ fn test_transfer_multisession_signing() {
let offline_from_signer = keypair_from_seed(&[2u8; 32]).unwrap();
let offline_fee_payer_signer = keypair_from_seed(&[3u8; 32]).unwrap();
let from_null_signer = NullSigner::new(&offline_from_signer.pubkey());
let config = CliConfig::default();
// Setup accounts
let rpc_client = RpcClient::new_socket(leader_data.rpc);
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_from_signer.pubkey(), 43)
.unwrap();
request_and_confirm_airdrop(
&rpc_client,
&faucet_addr,
&offline_from_signer.pubkey(),
43,
&config,
)
.unwrap();
request_and_confirm_airdrop(
&rpc_client,
&faucet_addr,
&offline_fee_payer_signer.pubkey(),
3,
&config,
)
.unwrap();
check_balance(43, &rpc_client, &offline_from_signer.pubkey());
@@ -283,6 +294,7 @@ fn test_transfer_multisession_signing() {
nonce_authority: 0,
fee_payer: 0,
};
fee_payer_config.output_format = OutputFormat::JsonCompact;
let sign_only_reply = process_command(&fee_payer_config).unwrap();
let sign_only = parse_sign_only_reply_string(&sign_only_reply);
assert!(!sign_only.has_all_signers());
@@ -308,6 +320,7 @@ fn test_transfer_multisession_signing() {
nonce_authority: 0,
fee_payer: 0,
};
from_config.output_format = OutputFormat::JsonCompact;
let sign_only_reply = process_command(&from_config).unwrap();
let sign_only = parse_sign_only_reply_string(&sign_only_reply);
assert!(sign_only.has_all_signers());

View File

@@ -48,6 +48,7 @@ fn test_vote_authorize_and_withdraw() {
&faucet_addr,
&config.signers[0].pubkey(),
100_000,
&config,
)
.unwrap();

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-client"
version = "1.1.8"
version = "1.1.15"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -19,10 +19,10 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
serde = "1.0.105"
serde_derive = "1.0.103"
serde_json = "1.0.48"
solana-transaction-status = { path = "../transaction-status", version = "1.1.8" }
solana-net-utils = { path = "../net-utils", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.8" }
solana-vote-program = { path = "../programs/vote", version = "1.1.8" }
solana-transaction-status = { path = "../transaction-status", version = "1.1.15" }
solana-net-utils = { path = "../net-utils", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-vote-program = { path = "../programs/vote", version = "1.1.15" }
thiserror = "1.0"
tungstenite = "0.10.1"
url = "2.1.1"
@@ -31,7 +31,7 @@ url = "2.1.1"
assert_matches = "1.3.0"
jsonrpc-core = "14.0.5"
jsonrpc-http-server = "14.0.6"
solana-logger = { path = "../logger", version = "1.1.8" }
solana-logger = { path = "../logger", version = "1.1.15" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -50,28 +50,29 @@ impl Into<TransportError> for ClientErrorKind {
#[derive(Error, Debug)]
#[error("{kind}")]
pub struct ClientError {
command: Option<&'static str>,
request: Option<rpc_request::RpcRequest>,
#[source]
kind: ClientErrorKind,
}
impl ClientError {
pub fn new_with_command(kind: ClientErrorKind, command: &'static str) -> Self {
pub fn new_with_request(kind: ClientErrorKind, request: rpc_request::RpcRequest) -> Self {
Self {
command: Some(command),
request: Some(request),
kind,
}
}
pub fn into_with_command(self, command: &'static str) -> Self {
pub fn into_with_request(self, request: rpc_request::RpcRequest) -> Self {
Self {
command: Some(command),
request: Some(request),
..self
}
}
pub fn command(&self) -> Option<&'static str> {
self.command
pub fn request(&self) -> Option<&rpc_request::RpcRequest> {
self.request.as_ref()
}
pub fn kind(&self) -> &ClientErrorKind {
@@ -82,7 +83,7 @@ impl ClientError {
impl From<ClientErrorKind> for ClientError {
fn from(kind: ClientErrorKind) -> Self {
Self {
command: None,
request: None,
kind,
}
}
@@ -91,7 +92,7 @@ impl From<ClientErrorKind> for ClientError {
impl From<TransportError> for ClientError {
fn from(err: TransportError) -> Self {
Self {
command: None,
request: None,
kind: err.into(),
}
}
@@ -106,7 +107,7 @@ impl Into<TransportError> for ClientError {
impl From<std::io::Error> for ClientError {
fn from(err: std::io::Error) -> Self {
Self {
command: None,
request: None,
kind: err.into(),
}
}
@@ -115,7 +116,7 @@ impl From<std::io::Error> for ClientError {
impl From<reqwest::Error> for ClientError {
fn from(err: reqwest::Error) -> Self {
Self {
command: None,
request: None,
kind: err.into(),
}
}
@@ -124,7 +125,7 @@ impl From<reqwest::Error> for ClientError {
impl From<rpc_request::RpcError> for ClientError {
fn from(err: rpc_request::RpcError) -> Self {
Self {
command: None,
request: None,
kind: err.into(),
}
}
@@ -133,7 +134,7 @@ impl From<rpc_request::RpcError> for ClientError {
impl From<serde_json::error::Error> for ClientError {
fn from(err: serde_json::error::Error) -> Self {
Self {
command: None,
request: None,
kind: err.into(),
}
}
@@ -142,7 +143,7 @@ impl From<serde_json::error::Error> for ClientError {
impl From<SignerError> for ClientError {
fn from(err: SignerError) -> Self {
Self {
command: None,
request: None,
kind: err.into(),
}
}
@@ -151,7 +152,7 @@ impl From<SignerError> for ClientError {
impl From<TransactionError> for ClientError {
fn from(err: TransactionError) -> Self {
Self {
command: None,
request: None,
kind: err.into(),
}
}

View File

@@ -1,10 +1,5 @@
use crate::{client_error::Result, rpc_request::RpcRequest};
pub(crate) trait GenericRpcClientRequest {
fn send(
&self,
request: &RpcRequest,
params: serde_json::Value,
retries: usize,
) -> Result<serde_json::Value>;
fn send(&self, request: RpcRequest, params: serde_json::Value) -> Result<serde_json::Value>;
}

View File

@@ -8,6 +8,7 @@ pub mod perf_utils;
pub mod pubsub_client;
pub mod rpc_client;
pub mod rpc_client_request;
pub mod rpc_config;
pub mod rpc_request;
pub mod rpc_response;
pub mod thin_client;

View File

@@ -38,13 +38,8 @@ impl MockRpcClientRequest {
}
impl GenericRpcClientRequest for MockRpcClientRequest {
fn send(
&self,
request: &RpcRequest,
params: serde_json::Value,
_retries: usize,
) -> Result<serde_json::Value> {
if let Some(value) = self.mocks.write().unwrap().remove(request) {
fn send(&self, request: RpcRequest, params: serde_json::Value) -> Result<serde_json::Value> {
if let Some(value) = self.mocks.write().unwrap().remove(&request) {
return Ok(value);
}
if self.url == "fails" {

View File

@@ -3,6 +3,7 @@ use crate::{
generic_rpc_client_request::GenericRpcClientRequest,
mock_rpc_client_request::{MockRpcClientRequest, Mocks},
rpc_client_request::RpcClientRequest,
rpc_config::RpcLargestAccountsConfig,
rpc_request::{RpcError, RpcRequest},
rpc_response::*,
};
@@ -24,7 +25,7 @@ use solana_sdk::{
pubkey::Pubkey,
signature::Signature,
signers::Signers,
transaction::{self, Transaction, TransactionError},
transaction::{self, Transaction},
};
use solana_transaction_status::{
ConfirmedBlock, ConfirmedTransaction, TransactionEncoding, TransactionStatus,
@@ -96,35 +97,40 @@ impl RpcClient {
pub fn send_transaction(&self, transaction: &Transaction) -> ClientResult<Signature> {
let serialized_encoded = bs58::encode(serialize(transaction).unwrap()).into_string();
let response =
self.client
.send(&RpcRequest::SendTransaction, json!([serialized_encoded]), 5)?;
match response.as_str() {
None => {
Err(RpcError::ForUser("Received result of an unexpected type".to_string()).into())
}
Some(signature_base58_str) => {
let signature = signature_base58_str.parse::<Signature>().map_err(|err| {
Into::<ClientError>::into(RpcError::ParseError(err.to_string()))
})?;
// A mismatching RPC response signature indicates an issue with the RPC node, and
// should not be passed along to confirmation methods. The transaction may or may
// not have been submitted to the cluster, so callers should verify the success of
// the correct transaction signature independently.
if signature != transaction.signatures[0] {
Err(RpcError::RpcRequestError(format!(
"RPC node returned mismatched signature {:?}, expected {:?}",
signature, transaction.signatures[0]
))
.into())
} else {
Ok(transaction.signatures[0])
}
}
let signature_base58_str: String =
self.send(RpcRequest::SendTransaction, json!([serialized_encoded]))?;
let signature = signature_base58_str
.parse::<Signature>()
.map_err(|err| Into::<ClientError>::into(RpcError::ParseError(err.to_string())))?;
// A mismatching RPC response signature indicates an issue with the RPC node, and
// should not be passed along to confirmation methods. The transaction may or may
// not have been submitted to the cluster, so callers should verify the success of
// the correct transaction signature independently.
if signature != transaction.signatures[0] {
Err(RpcError::RpcRequestError(format!(
"RPC node returned mismatched signature {:?}, expected {:?}",
signature, transaction.signatures[0]
))
.into())
} else {
Ok(transaction.signatures[0])
}
}
pub fn simulate_transaction(
&self,
transaction: &Transaction,
sig_verify: bool,
) -> RpcResult<TransactionStatus> {
let serialized_encoded = bs58::encode(serialize(transaction).unwrap()).into_string();
self.send(
RpcRequest::SimulateTransaction,
json!([serialized_encoded, { "sigVerify": sig_verify }]),
)
}
pub fn get_signature_status(
&self,
signature: &Signature,
@@ -137,11 +143,7 @@ impl RpcClient {
signatures: &[Signature],
) -> RpcResult<Vec<Option<TransactionStatus>>> {
let signatures: Vec<_> = signatures.iter().map(|s| s.to_string()).collect();
let signature_status =
self.client
.send(&RpcRequest::GetSignatureStatuses, json!([signatures]), 5)?;
Ok(serde_json::from_value(signature_status)
.map_err(|err| ClientError::new_with_command(err.into(), "GetSignatureStatuses"))?)
self.send(RpcRequest::GetSignatureStatuses, json!([signatures]))
}
pub fn get_signature_status_with_commitment(
@@ -149,14 +151,10 @@ impl RpcClient {
signature: &Signature,
commitment_config: CommitmentConfig,
) -> ClientResult<Option<transaction::Result<()>>> {
let signature_status = self.client.send(
&RpcRequest::GetSignatureStatuses,
let result: Response<Vec<Option<TransactionStatus>>> = self.send(
RpcRequest::GetSignatureStatuses,
json!([[signature.to_string()]]),
5,
)?;
let result: Response<Vec<Option<TransactionStatus>>> =
serde_json::from_value(signature_status)
.map_err(|err| ClientError::new_with_command(err.into(), "GetSignatureStatuses"))?;
Ok(result.value[0]
.clone()
.filter(|result| result.satisfies_commitment(commitment_config))
@@ -169,16 +167,12 @@ impl RpcClient {
commitment_config: CommitmentConfig,
search_transaction_history: bool,
) -> ClientResult<Option<transaction::Result<()>>> {
let signature_status = self.client.send(
&RpcRequest::GetSignatureStatuses,
let result: Response<Vec<Option<TransactionStatus>>> = self.send(
RpcRequest::GetSignatureStatuses,
json!([[signature.to_string()], {
"searchTransactionHistory": search_transaction_history
}]),
5,
)?;
let result: Response<Vec<Option<TransactionStatus>>> =
serde_json::from_value(signature_status)
.map_err(|err| ClientError::new_with_command(err.into(), "GetSignatureStatuses"))?;
Ok(result.value[0]
.clone()
.filter(|result| result.satisfies_commitment(commitment_config))
@@ -193,13 +187,14 @@ impl RpcClient {
&self,
commitment_config: CommitmentConfig,
) -> ClientResult<Slot> {
let response = self
.client
.send(&RpcRequest::GetSlot, json!([commitment_config]), 0)
.map_err(|err| err.into_with_command("GetSlot"))?;
self.send(RpcRequest::GetSlot, json!([commitment_config]))
}
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetSlot"))
pub fn supply_with_commitment(
&self,
commitment_config: CommitmentConfig,
) -> RpcResult<RpcSupply> {
self.send(RpcRequest::GetSupply, json!([commitment_config]))
}
pub fn total_supply(&self) -> ClientResult<u64> {
@@ -210,13 +205,14 @@ impl RpcClient {
&self,
commitment_config: CommitmentConfig,
) -> ClientResult<u64> {
let response = self
.client
.send(&RpcRequest::GetTotalSupply, json!([commitment_config]), 0)
.map_err(|err| err.into_with_command("GetTotalSupply"))?;
self.send(RpcRequest::GetTotalSupply, json!([commitment_config]))
}
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetTotalSupply"))
pub fn get_largest_accounts_with_config(
&self,
config: RpcLargestAccountsConfig,
) -> RpcResult<Vec<RpcAccountBalance>> {
self.send(RpcRequest::GetLargestAccounts, json!([config]))
}
pub fn get_vote_accounts(&self) -> ClientResult<RpcVoteAccountStatus> {
@@ -227,23 +223,11 @@ impl RpcClient {
&self,
commitment_config: CommitmentConfig,
) -> ClientResult<RpcVoteAccountStatus> {
let response = self
.client
.send(&RpcRequest::GetVoteAccounts, json!([commitment_config]), 0)
.map_err(|err| err.into_with_command("GetVoteAccounts"))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetVoteAccounts"))
self.send(RpcRequest::GetVoteAccounts, json!([commitment_config]))
}
pub fn get_cluster_nodes(&self) -> ClientResult<Vec<RpcContactInfo>> {
let response = self
.client
.send(&RpcRequest::GetClusterNodes, Value::Null, 0)
.map_err(|err| err.into_with_command("GetClusterNodes"))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetClusterNodes"))
self.send(RpcRequest::GetClusterNodes, Value::Null)
}
pub fn get_confirmed_block(&self, slot: Slot) -> ClientResult<ConfirmedBlock> {
@@ -255,13 +239,7 @@ impl RpcClient {
slot: Slot,
encoding: TransactionEncoding,
) -> ClientResult<ConfirmedBlock> {
let response = self
.client
.send(&RpcRequest::GetConfirmedBlock, json!([slot, encoding]), 0)
.map_err(|err| err.into_with_command("GetConfirmedBlock"))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetConfirmedBlock"))
self.send(RpcRequest::GetConfirmedBlock, json!([slot, encoding]))
}
pub fn get_confirmed_blocks(
@@ -269,17 +247,10 @@ impl RpcClient {
start_slot: Slot,
end_slot: Option<Slot>,
) -> ClientResult<Vec<Slot>> {
let response = self
.client
.send(
&RpcRequest::GetConfirmedBlocks,
json!([start_slot, end_slot]),
0,
)
.map_err(|err| err.into_with_command("GetConfirmedBlocks"))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetConfirmedBlocks"))
self.send(
RpcRequest::GetConfirmedBlocks,
json!([start_slot, end_slot]),
)
}
pub fn get_confirmed_signatures_for_address(
@@ -288,19 +259,10 @@ impl RpcClient {
start_slot: Slot,
end_slot: Slot,
) -> ClientResult<Vec<Signature>> {
let response = self
.client
.send(
&RpcRequest::GetConfirmedSignaturesForAddress,
json!([address.to_string(), start_slot, end_slot]),
0,
)
.map_err(|err| err.into_with_command("GetConfirmedSignaturesForAddress"))?;
let signatures_base58_str: Vec<String> =
serde_json::from_value(response).map_err(|err| {
ClientError::new_with_command(err.into(), "GetConfirmedSignaturesForAddress")
})?;
let signatures_base58_str: Vec<String> = self.send(
RpcRequest::GetConfirmedSignaturesForAddress,
json!([address.to_string(), start_slot, end_slot]),
)?;
let mut signatures = vec![];
for signature_base58_str in signatures_base58_str {
@@ -318,23 +280,15 @@ impl RpcClient {
signature: &Signature,
encoding: TransactionEncoding,
) -> ClientResult<ConfirmedTransaction> {
let response = self
.client
.send(
&RpcRequest::GetConfirmedTransaction,
json!([signature.to_string(), encoding]),
0,
)
.map_err(|err| err.into_with_command("GetConfirmedTransaction"))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetConfirmedTransaction"))
self.send(
RpcRequest::GetConfirmedTransaction,
json!([signature.to_string(), encoding]),
)
}
pub fn get_block_time(&self, slot: Slot) -> ClientResult<UnixTimestamp> {
let response = self
.client
.send(&RpcRequest::GetBlockTime, json!([slot]), 0);
let request = RpcRequest::GetBlockTime;
let response = self.client.send(request, json!([slot]));
response
.map(|result_json| {
@@ -342,11 +296,11 @@ impl RpcClient {
return Err(RpcError::ForUser(format!("Block Not Found: slot={}", slot)).into());
}
let result = serde_json::from_value(result_json)
.map_err(|err| ClientError::new_with_command(err.into(), "GetBlockTime"))?;
.map_err(|err| ClientError::new_with_request(err.into(), request))?;
trace!("Response block timestamp {:?} {:?}", slot, result);
Ok(result)
})
.map_err(|err| err.into_with_command("GetBlockTime"))?
.map_err(|err| err.into_with_request(request))?
}
pub fn get_epoch_info(&self) -> ClientResult<RpcEpochInfo> {
@@ -357,13 +311,7 @@ impl RpcClient {
&self,
commitment_config: CommitmentConfig,
) -> ClientResult<RpcEpochInfo> {
let response = self
.client
.send(&RpcRequest::GetEpochInfo, json!([commitment_config]), 0)
.map_err(|err| err.into_with_command("GetEpochInfo"))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetEpochInfo"))
self.send(RpcRequest::GetEpochInfo, json!([commitment_config]))
}
pub fn get_leader_schedule(
@@ -378,81 +326,42 @@ impl RpcClient {
slot: Option<Slot>,
commitment_config: CommitmentConfig,
) -> ClientResult<Option<RpcLeaderSchedule>> {
let response = self
.client
.send(
&RpcRequest::GetLeaderSchedule,
json!([slot, commitment_config]),
0,
)
.map_err(|err| err.into_with_command("GetLeaderSchedule"))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetLeaderSchedule"))
self.send(
RpcRequest::GetLeaderSchedule,
json!([slot, commitment_config]),
)
}
pub fn get_epoch_schedule(&self) -> ClientResult<EpochSchedule> {
let response = self
.client
.send(&RpcRequest::GetEpochSchedule, Value::Null, 0)
.map_err(|err| err.into_with_command("GetEpochSchedule"))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetEpochSchedule"))
self.send(RpcRequest::GetEpochSchedule, Value::Null)
}
pub fn get_identity(&self) -> ClientResult<Pubkey> {
let response = self
.client
.send(&RpcRequest::GetIdentity, Value::Null, 0)
.map_err(|err| err.into_with_command("GetIdentity"))?;
let rpc_identity: RpcIdentity = self.send(RpcRequest::GetIdentity, Value::Null)?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetIdentity"))
.and_then(|rpc_identity: RpcIdentity| {
rpc_identity.identity.parse::<Pubkey>().map_err(|_| {
ClientError::new_with_command(
RpcError::ParseError("Pubkey".to_string()).into(),
"GetIdentity",
)
})
})
rpc_identity.identity.parse::<Pubkey>().map_err(|_| {
ClientError::new_with_request(
RpcError::ParseError("Pubkey".to_string()).into(),
RpcRequest::GetIdentity,
)
})
}
pub fn get_inflation(&self) -> ClientResult<Inflation> {
let response = self
.client
.send(&RpcRequest::GetInflation, Value::Null, 0)
.map_err(|err| err.into_with_command("GetInflation"))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetInflation"))
self.send(RpcRequest::GetInflation, Value::Null)
}
pub fn get_version(&self) -> ClientResult<RpcVersionInfo> {
let response = self
.client
.send(&RpcRequest::GetVersion, Value::Null, 0)
.map_err(|err| err.into_with_command("GetVersion"))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetVersion"))
self.send(RpcRequest::GetVersion, Value::Null)
}
pub fn minimum_ledger_slot(&self) -> ClientResult<Slot> {
let response = self
.client
.send(&RpcRequest::MinimumLedgerSlot, Value::Null, 0)
.map_err(|err| err.into_with_command("MinimumLedgerSlot"))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "MinimumLedgerSlot"))
self.send(RpcRequest::MinimumLedgerSlot, Value::Null)
}
pub fn send_and_confirm_transaction<T: Signers>(
pub fn send_and_confirm_transaction(
&self,
transaction: &mut Transaction,
signer_keys: &T,
transaction: &Transaction,
) -> ClientResult<Signature> {
let mut send_retries = 20;
loop {
@@ -476,11 +385,6 @@ impl RpcClient {
send_retries = if let Some(result) = status.clone() {
match result {
Ok(_) => return Ok(signature),
Err(TransactionError::AccountInUse) => {
// Fetch a new blockhash and re-sign the transaction before sending it again
self.resign_transaction(transaction, signer_keys)?;
send_retries - 1
}
Err(_) => 0,
}
} else {
@@ -491,7 +395,9 @@ impl RpcClient {
return Err(err.unwrap_err().into());
} else {
return Err(
RpcError::ForUser("unable to confirm transaction. This can happen in situations such as transaction expiration and insufficient fee-payer funds".to_string()).into(),
RpcError::ForUser("unable to confirm transaction. \
This can happen in situations such as transaction expiration \
and insufficient fee-payer funds".to_string()).into(),
);
}
}
@@ -580,20 +486,17 @@ impl RpcClient {
pub fn retry_get_balance(
&self,
pubkey: &Pubkey,
retries: usize,
_retries: usize,
) -> Result<Option<u64>, Box<dyn error::Error>> {
let request = RpcRequest::GetBalance;
let balance_json = self
.client
.send(
&RpcRequest::GetBalance,
json!([pubkey.to_string()]),
retries,
)
.map_err(|err| err.into_with_command("RetryGetBalance"))?;
.send(request, json!([pubkey.to_string()]))
.map_err(|err| err.into_with_request(request))?;
Ok(Some(
serde_json::from_value::<Response<u64>>(balance_json)
.map_err(|err| ClientError::new_with_command(err.into(), "RetryGetBalance"))?
.map_err(|err| ClientError::new_with_request(err.into(), request))?
.value,
))
}
@@ -610,9 +513,8 @@ impl RpcClient {
commitment_config: CommitmentConfig,
) -> RpcResult<Option<Account>> {
let response = self.client.send(
&RpcRequest::GetAccountInfo,
RpcRequest::GetAccountInfo,
json!([pubkey.to_string(), commitment_config]),
0,
);
response
@@ -646,18 +548,14 @@ impl RpcClient {
}
pub fn get_minimum_balance_for_rent_exemption(&self, data_len: usize) -> ClientResult<u64> {
let request = RpcRequest::GetMinimumBalanceForRentExemption;
let minimum_balance_json = self
.client
.send(
&RpcRequest::GetMinimumBalanceForRentExemption,
json!([data_len]),
0,
)
.map_err(|err| err.into_with_command("GetMinimumBalanceForRentExemption"))?;
.send(request, json!([data_len]))
.map_err(|err| err.into_with_request(request))?;
let minimum_balance: u64 = serde_json::from_value(minimum_balance_json).map_err(|err| {
ClientError::new_with_command(err.into(), "GetMinimumBalanceForRentExemption")
})?;
let minimum_balance: u64 = serde_json::from_value(minimum_balance_json)
.map_err(|err| ClientError::new_with_request(err.into(), request))?;
trace!(
"Response minimum balance {:?} {:?}",
data_len,
@@ -678,39 +576,21 @@ impl RpcClient {
pubkey: &Pubkey,
commitment_config: CommitmentConfig,
) -> RpcResult<u64> {
let balance_json = self
.client
.send(
&RpcRequest::GetBalance,
json!([pubkey.to_string(), commitment_config]),
0,
)
.map_err(|err| err.into_with_command("GetBalance"))?;
serde_json::from_value::<Response<u64>>(balance_json)
.map_err(|err| ClientError::new_with_command(err.into(), "GetBalance"))
self.send(
RpcRequest::GetBalance,
json!([pubkey.to_string(), commitment_config]),
)
}
pub fn get_program_accounts(&self, pubkey: &Pubkey) -> ClientResult<Vec<(Pubkey, Account)>> {
let response = self
.client
.send(
&RpcRequest::GetProgramAccounts,
json!([pubkey.to_string()]),
0,
)
.map_err(|err| err.into_with_command("GetProgramAccounts"))?;
let accounts: Vec<RpcKeyedAccount> =
serde_json::from_value::<Vec<RpcKeyedAccount>>(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetProgramAccounts"))?;
self.send(RpcRequest::GetProgramAccounts, json!([pubkey.to_string()]))?;
let mut pubkey_accounts: Vec<(Pubkey, Account)> = Vec::new();
for RpcKeyedAccount { pubkey, account } in accounts.into_iter() {
let pubkey = pubkey.parse().map_err(|_| {
ClientError::new_with_command(
ClientError::new_with_request(
RpcError::ParseError("Pubkey".to_string()).into(),
"GetProgramAccounts",
RpcRequest::GetProgramAccounts,
)
})?;
pubkey_accounts.push((pubkey, account.decode().unwrap()));
@@ -727,17 +607,7 @@ impl RpcClient {
&self,
commitment_config: CommitmentConfig,
) -> ClientResult<u64> {
let response = self
.client
.send(
&RpcRequest::GetTransactionCount,
json!([commitment_config]),
0,
)
.map_err(|err| err.into_with_command("GetTransactionCount"))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetTransactionCount"))
self.send(RpcRequest::GetTransactionCount, json!([commitment_config]))
}
pub fn get_recent_blockhash(&self) -> ClientResult<(Hash, FeeCalculator)> {
@@ -750,15 +620,6 @@ impl RpcClient {
&self,
commitment_config: CommitmentConfig,
) -> RpcResult<(Hash, FeeCalculator)> {
let response = self
.client
.send(
&RpcRequest::GetRecentBlockhash,
json!([commitment_config]),
0,
)
.map_err(|err| err.into_with_command("GetRecentBlockhash"))?;
let Response {
context,
value:
@@ -766,12 +627,15 @@ impl RpcClient {
blockhash,
fee_calculator,
},
} = serde_json::from_value::<Response<RpcBlockhashFeeCalculator>>(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetRecentBlockhash"))?;
} = self.send::<Response<RpcBlockhashFeeCalculator>>(
RpcRequest::GetRecentBlockhash,
json!([commitment_config]),
)?;
let blockhash = blockhash.parse().map_err(|_| {
ClientError::new_with_command(
ClientError::new_with_request(
RpcError::ParseError("Hash".to_string()).into(),
"GetRecentBlockhash",
RpcRequest::GetRecentBlockhash,
)
})?;
Ok(Response {
@@ -784,31 +648,21 @@ impl RpcClient {
&self,
blockhash: &Hash,
) -> ClientResult<Option<FeeCalculator>> {
let response = self
.client
.send(
&RpcRequest::GetFeeCalculatorForBlockhash,
json!([blockhash.to_string()]),
0,
)
.map_err(|err| err.into_with_command("GetFeeCalculatorForBlockhash"))?;
let Response { value, .. } = serde_json::from_value::<Response<Option<RpcFeeCalculator>>>(
response,
)
.map_err(|e| ClientError::new_with_command(e.into(), "GetFeeCalculatorForBlockhash"))?;
let Response { value, .. } = self.send::<Response<Option<RpcFeeCalculator>>>(
RpcRequest::GetFeeCalculatorForBlockhash,
json!([blockhash.to_string()]),
)?;
Ok(value.map(|rf| rf.fee_calculator))
}
pub fn get_fee_rate_governor(&self) -> RpcResult<FeeRateGovernor> {
let response = self
.client
.send(&RpcRequest::GetFeeRateGovernor, Value::Null, 0)
.map_err(|err| err.into_with_command("GetFeeRateGovernor"))?;
let Response {
context,
value: RpcFeeRateGovernor { fee_rate_governor },
} = serde_json::from_value::<Response<RpcFeeRateGovernor>>(response)
.map_err(|e| ClientError::new_with_command(e.into(), "GetFeeRateGovernor"))?;
} =
self.send::<Response<RpcFeeRateGovernor>>(RpcRequest::GetFeeRateGovernor, Value::Null)?;
Ok(Response {
context,
value: fee_rate_governor,
@@ -842,18 +696,11 @@ impl RpcClient {
}
pub fn get_genesis_hash(&self) -> ClientResult<Hash> {
let response = self
.client
.send(&RpcRequest::GetGenesisHash, Value::Null, 0)
.map_err(|err| err.into_with_command("GetGenesisHash"))?;
let hash = serde_json::from_value::<String>(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetGenesisHash"))?;
let hash = hash.parse().map_err(|_| {
ClientError::new_with_command(
let hash_str: String = self.send(RpcRequest::GetGenesisHash, Value::Null)?;
let hash = hash_str.parse().map_err(|_| {
ClientError::new_with_request(
RpcError::ParseError("Hash".to_string()).into(),
"GetGenesisHash",
RpcRequest::GetGenesisHash,
)
})?;
Ok(hash)
@@ -909,7 +756,7 @@ impl RpcClient {
return balance_result.ok();
}
trace!(
"retry_get_balance[{}] {:?} {:?}",
"wait_for_balance_with_commitment [{}] {:?} {:?}",
run,
balance_result,
expected_balance
@@ -1042,23 +889,17 @@ impl RpcClient {
&self,
signature: &Signature,
) -> ClientResult<usize> {
let response = self
.client
.send(
&RpcRequest::GetSignatureStatuses,
json!([[signature.to_string()]]),
5,
)
.map_err(|err| err.into_with_command("GetSignatureStatuses"))?;
let result: Response<Vec<Option<TransactionStatus>>> = serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetSignatureStatuses"))?;
let result: Response<Vec<Option<TransactionStatus>>> = self.send(
RpcRequest::GetSignatureStatuses,
json!([[signature.to_string()]]),
)?;
let confirmations = result.value[0]
.clone()
.ok_or_else(|| {
ClientError::new_with_command(
ClientError::new_with_request(
ClientErrorKind::Custom("signature not found".to_string()),
"GetSignatureStatuses",
RpcRequest::GetSignatureStatuses,
)
})?
.confirmations
@@ -1066,10 +907,9 @@ impl RpcClient {
Ok(confirmations)
}
pub fn send_and_confirm_transaction_with_spinner<T: Signers>(
pub fn send_and_confirm_transaction_with_spinner(
&self,
transaction: &mut Transaction,
signer_keys: &T,
transaction: &Transaction,
) -> ClientResult<Signature> {
let mut confirmations = 0;
@@ -1106,11 +946,6 @@ impl RpcClient {
send_retries = if let Some(result) = status.clone() {
match result {
Ok(_) => 0,
Err(TransactionError::AccountInUse) => {
// Fetch a new blockhash and re-sign the transaction before sending it again
self.resign_transaction(transaction, signer_keys)?;
send_retries - 1
}
// If transaction errors, return right away; no point in counting confirmations
Err(_) => 0,
}
@@ -1128,9 +963,13 @@ impl RpcClient {
}
}
} else {
return Err(
RpcError::ForUser("unable to confirm transaction. This can happen in situations such as transaction expiration and insufficient fee-payer funds".to_string()).into(),
);
return Err(RpcError::ForUser(
"unable to confirm transaction. \
This can happen in situations such as transaction \
expiration and insufficient fee-payer funds"
.to_string(),
)
.into());
}
}
};
@@ -1155,24 +994,29 @@ impl RpcClient {
.unwrap_or(confirmations);
if now.elapsed().as_secs() >= MAX_HASH_AGE_IN_SECONDS as u64 {
return Err(
RpcError::ForUser("transaction not finalized. This can happen when a transaction lands in an abandoned fork. Please retry.".to_string()).into(),
RpcError::ForUser("transaction not finalized. \
This can happen when a transaction lands in an abandoned fork. \
Please retry.".to_string()).into(),
);
}
}
}
pub fn validator_exit(&self) -> ClientResult<bool> {
let response = self
.client
.send(&RpcRequest::ValidatorExit, Value::Null, 0)
.map_err(|err| err.into_with_command("ValidatorExit"))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "ValidatorExit"))
self.send(RpcRequest::ValidatorExit, Value::Null)
}
pub fn send(&self, request: &RpcRequest, params: Value, retries: usize) -> ClientResult<Value> {
pub fn send<T>(&self, request: RpcRequest, params: Value) -> ClientResult<T>
where
T: serde::de::DeserializeOwned,
{
assert!(params.is_array() || params.is_null());
self.client.send(request, params, retries)
let response = self
.client
.send(request, params)
.map_err(|err| err.into_with_request(request))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_request(err.into(), request))
}
}
@@ -1200,7 +1044,6 @@ mod tests {
use jsonrpc_core::{Error, IoHandler, Params};
use jsonrpc_http_server::{AccessControlAllowOrigin, DomainsValidation, ServerBuilder};
use serde_json::Number;
use solana_logger;
use solana_sdk::{
instruction::InstructionError, signature::Keypair, system_transaction,
transaction::TransactionError,
@@ -1242,62 +1085,25 @@ mod tests {
let rpc_addr = receiver.recv().unwrap();
let rpc_client = RpcClient::new_socket(rpc_addr);
let balance = rpc_client.send(
&RpcRequest::GetBalance,
json!(["deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx"]),
0,
);
assert_eq!(balance.unwrap().as_u64().unwrap(), 50);
let balance: u64 = rpc_client
.send(
RpcRequest::GetBalance,
json!(["deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx"]),
)
.unwrap();
assert_eq!(balance, 50);
let blockhash = rpc_client.send(&RpcRequest::GetRecentBlockhash, Value::Null, 0);
assert_eq!(
blockhash.unwrap().as_str().unwrap(),
"deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx"
);
let blockhash: String = rpc_client
.send(RpcRequest::GetRecentBlockhash, Value::Null)
.unwrap();
assert_eq!(blockhash, "deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx");
// Send erroneous parameter
let blockhash = rpc_client.send(&RpcRequest::GetRecentBlockhash, json!(["parameter"]), 0);
let blockhash: ClientResult<String> =
rpc_client.send(RpcRequest::GetRecentBlockhash, json!(["parameter"]));
assert_eq!(blockhash.is_err(), true);
}
#[test]
fn test_retry_send() {
solana_logger::setup();
let (sender, receiver) = channel();
thread::spawn(move || {
// 1. Pick a random port
// 2. Tell the client to start using it
// 3. Delay for 1.5 seconds before starting the server to ensure the client will fail
// and need to retry
let rpc_addr: SocketAddr = "0.0.0.0:4242".parse().unwrap();
sender.send(rpc_addr.clone()).unwrap();
sleep(Duration::from_millis(1500));
let mut io = IoHandler::default();
io.add_method("getBalance", move |_params: Params| {
Ok(Value::Number(Number::from(5)))
});
let server = ServerBuilder::new(io)
.threads(1)
.cors(DomainsValidation::AllowOnly(vec![
AccessControlAllowOrigin::Any,
]))
.start_http(&rpc_addr)
.expect("Unable to start RPC server");
server.wait();
});
let rpc_addr = receiver.recv().unwrap();
let rpc_client = RpcClient::new_socket(rpc_addr);
let balance = rpc_client.send(
&RpcRequest::GetBalance,
json!(["deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhw"]),
10,
);
assert_eq!(balance.unwrap().as_u64().unwrap(), 5);
}
#[test]
fn test_send_transaction() {
let rpc_client = RpcClient::new_mock("succeeds".to_string());
@@ -1358,17 +1164,16 @@ mod tests {
let key = Keypair::new();
let to = Pubkey::new_rand();
let blockhash = Hash::default();
let mut tx = system_transaction::transfer(&key, &to, 50, blockhash);
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&key]);
let tx = system_transaction::transfer(&key, &to, 50, blockhash);
let result = rpc_client.send_and_confirm_transaction(&tx);
result.unwrap();
let rpc_client = RpcClient::new_mock("account_in_use".to_string());
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&key]);
let result = rpc_client.send_and_confirm_transaction(&tx);
assert!(result.is_err());
let rpc_client = RpcClient::new_mock("instruction_error".to_string());
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&key]);
let result = rpc_client.send_and_confirm_transaction(&tx);
assert_matches!(
result.unwrap_err().kind(),
ClientErrorKind::TransactionError(TransactionError::InstructionError(
@@ -1378,7 +1183,7 @@ mod tests {
);
let rpc_client = RpcClient::new_mock("sig_not_found".to_string());
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&key]);
let result = rpc_client.send_and_confirm_transaction(&tx);
if let ClientErrorKind::Io(err) = result.unwrap_err().kind() {
assert_eq!(err.kind(), io::ErrorKind::Other);
}

View File

@@ -4,8 +4,7 @@ use crate::{
rpc_request::{RpcError, RpcRequest},
};
use log::*;
use reqwest::{self, header::CONTENT_TYPE};
use solana_sdk::clock::{DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT};
use reqwest::{self, header::CONTENT_TYPE, StatusCode};
use std::{thread::sleep, time::Duration};
pub struct RpcClientRequest {
@@ -29,17 +28,13 @@ impl RpcClientRequest {
}
impl GenericRpcClientRequest for RpcClientRequest {
fn send(
&self,
request: &RpcRequest,
params: serde_json::Value,
mut retries: usize,
) -> Result<serde_json::Value> {
fn send(&self, request: RpcRequest, params: serde_json::Value) -> Result<serde_json::Value> {
// Concurrent requests are not supported so reuse the same request id for all requests
let request_id = 1;
let request_json = request.build_request_json(request_id, params);
let mut too_many_requests_retries = 5;
loop {
match self
.client
@@ -50,6 +45,19 @@ impl GenericRpcClientRequest for RpcClientRequest {
{
Ok(response) => {
if !response.status().is_success() {
if response.status() == StatusCode::TOO_MANY_REQUESTS
&& too_many_requests_retries > 0
{
too_many_requests_retries -= 1;
debug!(
"Server responded with {:?}, {} retries left",
response, too_many_requests_retries
);
// Sleep for 500ms to give the server a break
sleep(Duration::from_millis(500));
continue;
}
return Err(response.error_for_status().unwrap_err().into());
}
@@ -63,17 +71,8 @@ impl GenericRpcClientRequest for RpcClientRequest {
}
return Ok(json["result"].clone());
}
Err(e) => {
info!("{:?} failed, {} retries left: {:?}", request, retries, e);
if retries == 0 {
return Err(e.into());
}
retries -= 1;
// Sleep for approximately half a slot
sleep(Duration::from_millis(
500 * DEFAULT_TICKS_PER_SLOT / DEFAULT_TICKS_PER_SECOND,
));
Err(err) => {
return Err(err.into());
}
}
}

31
client/src/rpc_config.rs Normal file
View File

@@ -0,0 +1,31 @@
use solana_sdk::commitment_config::CommitmentConfig;
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcSignatureStatusConfig {
pub search_transaction_history: Option<bool>,
// DEPRECATED
#[serde(flatten)]
pub commitment: Option<CommitmentConfig>,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcSimulateTransactionConfig {
pub sig_verify: bool,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum RpcLargestAccountsFilter {
Circulating,
NonCirculating,
}
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcLargestAccountsConfig {
#[serde(flatten)]
pub commitment: Option<CommitmentConfig>,
pub filter: Option<RpcLargestAccountsFilter>,
}

View File

@@ -1,7 +1,8 @@
use serde_json::{json, Value};
use std::fmt;
use thiserror::Error;
#[derive(Debug, PartialEq, Eq, Hash)]
#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
pub enum RpcRequest {
DeregisterNode,
ValidatorExit,
@@ -18,6 +19,7 @@ pub enum RpcRequest {
GetGenesisHash,
GetIdentity,
GetInflation,
GetLargestAccounts,
GetLeaderSchedule,
GetProgramAccounts,
GetRecentBlockhash,
@@ -30,6 +32,7 @@ pub enum RpcRequest {
GetStorageTurnRate,
GetSlotsPerSegment,
GetStoragePubkeysForSlot,
GetSupply,
GetTotalSupply,
GetTransactionCount,
GetVersion,
@@ -37,17 +40,14 @@ pub enum RpcRequest {
RegisterNode,
RequestAirdrop,
SendTransaction,
SimulateTransaction,
SignVote,
GetMinimumBalanceForRentExemption,
MinimumLedgerSlot,
}
pub const MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS: usize = 256;
pub const MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE: u64 = 10_000;
impl RpcRequest {
pub(crate) fn build_request_json(&self, id: u64, params: Value) -> Value {
let jsonrpc = "2.0";
impl fmt::Display for RpcRequest {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let method = match self {
RpcRequest::DeregisterNode => "deregisterNode",
RpcRequest::ValidatorExit => "validatorExit",
@@ -64,6 +64,7 @@ impl RpcRequest {
RpcRequest::GetGenesisHash => "getGenesisHash",
RpcRequest::GetIdentity => "getIdentity",
RpcRequest::GetInflation => "getInflation",
RpcRequest::GetLargestAccounts => "getLargestAccounts",
RpcRequest::GetLeaderSchedule => "getLeaderSchedule",
RpcRequest::GetProgramAccounts => "getProgramAccounts",
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
@@ -76,6 +77,7 @@ impl RpcRequest {
RpcRequest::GetStorageTurnRate => "getStorageTurnRate",
RpcRequest::GetSlotsPerSegment => "getSlotsPerSegment",
RpcRequest::GetStoragePubkeysForSlot => "getStoragePubkeysForSlot",
RpcRequest::GetSupply => "getSupply",
RpcRequest::GetTotalSupply => "getTotalSupply",
RpcRequest::GetTransactionCount => "getTransactionCount",
RpcRequest::GetVersion => "getVersion",
@@ -83,14 +85,27 @@ impl RpcRequest {
RpcRequest::RegisterNode => "registerNode",
RpcRequest::RequestAirdrop => "requestAirdrop",
RpcRequest::SendTransaction => "sendTransaction",
RpcRequest::SimulateTransaction => "simulateTransaction",
RpcRequest::SignVote => "signVote",
RpcRequest::GetMinimumBalanceForRentExemption => "getMinimumBalanceForRentExemption",
RpcRequest::MinimumLedgerSlot => "minimumLedgerSlot",
};
write!(f, "{}", method)
}
}
pub const NUM_LARGEST_ACCOUNTS: usize = 20;
pub const MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS: usize = 256;
pub const MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE: u64 = 10_000;
impl RpcRequest {
pub(crate) fn build_request_json(self, id: u64, params: Value) -> Value {
let jsonrpc = "2.0";
json!({
"jsonrpc": jsonrpc,
"id": id,
"method": method,
"method": format!("{}", self),
"params": params,
})
}

View File

@@ -108,6 +108,8 @@ pub struct RpcContactInfo {
pub tpu: Option<SocketAddr>,
/// JSON RPC port
pub rpc: Option<SocketAddr>,
/// Software version
pub version: Option<String>,
}
/// Map of leader base58 identity pubkeys to the slot indices relative to the first epoch slot
@@ -192,3 +194,19 @@ pub struct RpcStorageTurn {
pub blockhash: String,
pub slot: Slot,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct RpcAccountBalance {
pub address: String,
pub lamports: u64,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcSupply {
pub total: u64,
pub circulating: u64,
pub non_circulating: u64,
pub non_circulating_accounts: Vec<String>,
}

View File

@@ -1,7 +1,7 @@
[package]
name = "solana-core"
description = "Blockchain, Rebuilt for Scale"
version = "1.1.8"
version = "1.1.15"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "../README.md"
@@ -41,36 +41,37 @@ regex = "1.3.6"
serde = "1.0.105"
serde_derive = "1.0.103"
serde_json = "1.0.48"
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.1.8" }
solana-budget-program = { path = "../programs/budget", version = "1.1.8" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.8" }
solana-client = { path = "../client", version = "1.1.8" }
solana-transaction-status = { path = "../transaction-status", version = "1.1.8" }
solana-faucet = { path = "../faucet", version = "1.1.8" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.1.15" }
solana-budget-program = { path = "../programs/budget", version = "1.1.15" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.15" }
solana-client = { path = "../client", version = "1.1.15" }
solana-transaction-status = { path = "../transaction-status", version = "1.1.15" }
solana-faucet = { path = "../faucet", version = "1.1.15" }
ed25519-dalek = "=1.0.0-pre.3"
solana-ledger = { path = "../ledger", version = "1.1.8" }
solana-logger = { path = "../logger", version = "1.1.8" }
solana-merkle-tree = { path = "../merkle-tree", version = "1.1.8" }
solana-metrics = { path = "../metrics", version = "1.1.8" }
solana-measure = { path = "../measure", version = "1.1.8" }
solana-net-utils = { path = "../net-utils", version = "1.1.8" }
solana-chacha-cuda = { path = "../chacha-cuda", version = "1.1.8" }
solana-perf = { path = "../perf", version = "1.1.8" }
solana-runtime = { path = "../runtime", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.8" }
solana-stake-program = { path = "../programs/stake", version = "1.1.8" }
solana-storage-program = { path = "../programs/storage", version = "1.1.8" }
solana-streamer = { path = "../streamer", version = "1.1.8" }
solana-vote-program = { path = "../programs/vote", version = "1.1.8" }
solana-vote-signer = { path = "../vote-signer", version = "1.1.8" }
solana-sys-tuner = { path = "../sys-tuner", version = "1.1.8" }
solana-ledger = { path = "../ledger", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-merkle-tree = { path = "../merkle-tree", version = "1.1.15" }
solana-metrics = { path = "../metrics", version = "1.1.15" }
solana-measure = { path = "../measure", version = "1.1.15" }
solana-net-utils = { path = "../net-utils", version = "1.1.15" }
solana-chacha-cuda = { path = "../chacha-cuda", version = "1.1.15" }
solana-perf = { path = "../perf", version = "1.1.15" }
solana-runtime = { path = "../runtime", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-stake-program = { path = "../programs/stake", version = "1.1.15" }
solana-storage-program = { path = "../programs/storage", version = "1.1.15" }
solana-streamer = { path = "../streamer", version = "1.1.15" }
solana-version = { path = "../version", version = "1.1.15" }
solana-vote-program = { path = "../programs/vote", version = "1.1.15" }
solana-vote-signer = { path = "../vote-signer", version = "1.1.15" }
solana-sys-tuner = { path = "../sys-tuner", version = "1.1.15" }
tempfile = "3.1.0"
thiserror = "1.0"
tokio = "0.1"
tokio-codec = "0.1"
tokio-fs = "0.1"
tokio-io = "0.1"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.8" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.15" }
trees = "0.2.1"
[dev-dependencies]

View File

@@ -6,7 +6,7 @@ use rand::{thread_rng, Rng};
use solana_core::broadcast_stage::{broadcast_shreds, get_broadcast_peers};
use solana_core::cluster_info::{ClusterInfo, Node};
use solana_core::contact_info::ContactInfo;
use solana_ledger::shred::Shred;
use solana_ledger::shred::{Shred, NONCE_SHRED_PAYLOAD_SIZE};
use solana_sdk::pubkey::Pubkey;
use solana_sdk::timing::timestamp;
use std::sync::RwLock;
@@ -26,7 +26,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
const NUM_SHREDS: usize = 32;
let shreds = vec![Shred::new_empty_data_shred(); NUM_SHREDS];
let shreds = vec![Shred::new_empty_data_shred(NONCE_SHRED_PAYLOAD_SIZE); NUM_SHREDS];
let mut stakes = HashMap::new();
const NUM_PEERS: usize = 200;
for _ in 0..NUM_PEERS {

View File

@@ -5,7 +5,7 @@ extern crate test;
use solana_ledger::entry::{create_ticks, Entry};
use solana_ledger::shred::{
max_entries_per_n_shred, max_ticks_per_n_shreds, Shred, Shredder, RECOMMENDED_FEC_RATE,
SIZE_OF_DATA_SHRED_PAYLOAD,
SIZE_OF_NONCE_DATA_SHRED_PAYLOAD,
};
use solana_perf::test_tx;
use solana_sdk::hash::Hash;
@@ -29,10 +29,11 @@ fn make_large_unchained_entries(txs_per_entry: u64, num_entries: u64) -> Vec<Ent
#[bench]
fn bench_shredder_ticks(bencher: &mut Bencher) {
let kp = Arc::new(Keypair::new());
let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD;
let shred_size = SIZE_OF_NONCE_DATA_SHRED_PAYLOAD;
let num_shreds = ((1000 * 1000) + (shred_size - 1)) / shred_size;
// ~1Mb
let num_ticks = max_ticks_per_n_shreds(1) * num_shreds as u64;
let num_ticks =
max_ticks_per_n_shreds(1, Some(SIZE_OF_NONCE_DATA_SHRED_PAYLOAD)) * num_shreds as u64;
let entries = create_ticks(num_ticks, 0, Hash::default());
bencher.iter(|| {
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp.clone(), 0, 0).unwrap();
@@ -43,10 +44,14 @@ fn bench_shredder_ticks(bencher: &mut Bencher) {
#[bench]
fn bench_shredder_large_entries(bencher: &mut Bencher) {
let kp = Arc::new(Keypair::new());
let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD;
let shred_size = SIZE_OF_NONCE_DATA_SHRED_PAYLOAD;
let num_shreds = ((1000 * 1000) + (shred_size - 1)) / shred_size;
let txs_per_entry = 128;
let num_entries = max_entries_per_n_shred(&make_test_entry(txs_per_entry), num_shreds as u64);
let num_entries = max_entries_per_n_shred(
&make_test_entry(txs_per_entry),
num_shreds as u64,
Some(shred_size),
);
let entries = make_large_unchained_entries(txs_per_entry, num_entries);
// 1Mb
bencher.iter(|| {
@@ -58,10 +63,10 @@ fn bench_shredder_large_entries(bencher: &mut Bencher) {
#[bench]
fn bench_deshredder(bencher: &mut Bencher) {
let kp = Arc::new(Keypair::new());
let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD;
let shred_size = SIZE_OF_NONCE_DATA_SHRED_PAYLOAD;
// ~10Mb
let num_shreds = ((10000 * 1000) + (shred_size - 1)) / shred_size;
let num_ticks = max_ticks_per_n_shreds(1) * num_shreds as u64;
let num_ticks = max_ticks_per_n_shreds(1, Some(shred_size)) * num_shreds as u64;
let entries = create_ticks(num_ticks, 0, Hash::default());
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp, 0, 0).unwrap();
let data_shreds = shredder.entries_to_shreds(&entries, true, 0).0;
@@ -73,7 +78,7 @@ fn bench_deshredder(bencher: &mut Bencher) {
#[bench]
fn bench_deserialize_hdr(bencher: &mut Bencher) {
let data = vec![0; SIZE_OF_DATA_SHRED_PAYLOAD];
let data = vec![0; SIZE_OF_NONCE_DATA_SHRED_PAYLOAD];
let shred = Shred::new_from_data(2, 1, 1, Some(&data), true, true, 0, 0, 1);

View File

@@ -292,7 +292,7 @@ impl BankingStage {
enable_forwarding: bool,
batch_limit: usize,
transaction_status_sender: Option<TransactionStatusSender>,
) {
) -> BufferedPacketsDecision {
let (leader_at_slot_offset, poh_has_bank, would_be_leader) = {
let poh = poh_recorder.lock().unwrap();
(
@@ -349,6 +349,7 @@ impl BankingStage {
}
_ => (),
}
decision
}
pub fn process_loop(
@@ -365,8 +366,8 @@ impl BankingStage {
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut buffered_packets = vec![];
loop {
if !buffered_packets.is_empty() {
Self::process_buffered_packets(
while !buffered_packets.is_empty() {
let decision = Self::process_buffered_packets(
&my_pubkey,
&socket,
poh_recorder,
@@ -376,6 +377,11 @@ impl BankingStage {
batch_limit,
transaction_status_sender.clone(),
);
if decision == BufferedPacketsDecision::Hold {
// If we are waiting on a new bank,
// check the receiver for more transactions/for exiting
break;
}
}
let recv_timeout = if !buffered_packets.is_empty() {

View File

@@ -463,7 +463,7 @@ pub mod test {
Vec<TransmitShreds>,
Vec<TransmitShreds>,
) {
let num_entries = max_ticks_per_n_shreds(num);
let num_entries = max_ticks_per_n_shreds(num, None);
let (data_shreds, _) = make_slot_entries(slot, 0, num_entries);
let keypair = Arc::new(Keypair::new());
let shredder = Shredder::new(slot, 0, RECOMMENDED_FEC_RATE, keypair, 0, 0)

View File

@@ -430,7 +430,7 @@ mod test {
));
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut genesis_config = create_genesis_config(10_000).genesis_config;
genesis_config.ticks_per_slot = max_ticks_per_n_shreds(num_shreds_per_slot) + 1;
genesis_config.ticks_per_slot = max_ticks_per_n_shreds(num_shreds_per_slot, None) + 1;
let bank0 = Arc::new(Bank::new(&genesis_config));
(
blockstore,
@@ -539,7 +539,11 @@ mod test {
// Interrupting the slot should cause the unfinished_slot and stats to reset
let num_shreds = 1;
assert!(num_shreds < num_shreds_per_slot);
let ticks1 = create_ticks(max_ticks_per_n_shreds(num_shreds), 0, genesis_config.hash());
let ticks1 = create_ticks(
max_ticks_per_n_shreds(num_shreds, None),
0,
genesis_config.hash(),
);
let receive_results = ReceiveResults {
entries: ticks1.clone(),
time_elapsed: Duration::new(2, 0),

View File

@@ -18,8 +18,8 @@ use crate::{
crds_gossip_error::CrdsGossipError,
crds_gossip_pull::{CrdsFilter, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS},
crds_value::{
self, CrdsData, CrdsValue, CrdsValueLabel, EpochSlotsIndex, LowestSlot, SnapshotHash, Vote,
MAX_WALLCLOCK,
self, CrdsData, CrdsValue, CrdsValueLabel, EpochSlotsIndex, LowestSlot, SnapshotHash,
Version, Vote, MAX_WALLCLOCK,
},
epoch_slots::EpochSlots,
result::{Error, Result},
@@ -55,7 +55,7 @@ use solana_sdk::{
clock::{Slot, DEFAULT_MS_PER_SLOT, DEFAULT_SLOTS_PER_EPOCH},
pubkey::Pubkey,
signature::{Keypair, Signable, Signature, Signer},
timing::{duration_as_ms, timestamp},
timing::timestamp,
transaction::Transaction,
};
use solana_streamer::sendmmsg::multicast;
@@ -66,8 +66,9 @@ use std::{
collections::{HashMap, HashSet},
fmt,
net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener, UdpSocket},
sync::atomic::{AtomicBool, Ordering},
sync::{Arc, RwLock},
ops::{Deref, DerefMut},
sync::atomic::{AtomicBool, AtomicU64, Ordering},
sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard},
thread::{sleep, Builder, JoinHandle},
time::{Duration, Instant},
};
@@ -108,6 +109,127 @@ pub struct DataBudget {
// used to detect when to up the bytes budget again
}
struct GossipWriteLock<'a> {
gossip: RwLockWriteGuard<'a, CrdsGossip>,
timer: Measure,
counter: &'a Counter,
}
impl<'a> GossipWriteLock<'a> {
fn new(
gossip: RwLockWriteGuard<'a, CrdsGossip>,
label: &'static str,
counter: &'a Counter,
) -> Self {
Self {
gossip,
timer: Measure::start(label),
counter,
}
}
}
impl<'a> Deref for GossipWriteLock<'a> {
type Target = RwLockWriteGuard<'a, CrdsGossip>;
fn deref(&self) -> &Self::Target {
&self.gossip
}
}
impl<'a> DerefMut for GossipWriteLock<'a> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.gossip
}
}
impl<'a> Drop for GossipWriteLock<'a> {
fn drop(&mut self) {
self.timer.stop();
self.counter.add_measure(&mut self.timer);
}
}
struct GossipReadLock<'a> {
gossip: RwLockReadGuard<'a, CrdsGossip>,
timer: Measure,
counter: &'a Counter,
}
impl<'a> GossipReadLock<'a> {
fn new(
gossip: RwLockReadGuard<'a, CrdsGossip>,
label: &'static str,
counter: &'a Counter,
) -> Self {
Self {
gossip,
timer: Measure::start(label),
counter,
}
}
}
impl<'a> Deref for GossipReadLock<'a> {
type Target = RwLockReadGuard<'a, CrdsGossip>;
fn deref(&self) -> &Self::Target {
&self.gossip
}
}
impl<'a> Drop for GossipReadLock<'a> {
fn drop(&mut self) {
self.timer.stop();
self.counter.add_measure(&mut self.timer);
}
}
#[derive(Default)]
struct Counter(AtomicU64);
impl Counter {
fn add_measure(&self, x: &mut Measure) {
x.stop();
self.0.fetch_add(x.as_us(), Ordering::Relaxed);
}
fn add_relaxed(&self, x: u64) {
self.0.fetch_add(x, Ordering::Relaxed);
}
fn clear(&self) -> u64 {
self.0.swap(0, Ordering::Relaxed)
}
}
#[derive(Default)]
struct GossipStats {
entrypoint: Counter,
entrypoint2: Counter,
push_vote_read: Counter,
vote_process_push: Counter,
get_votes: Counter,
get_accounts_hash: Counter,
get_snapshot_hash: Counter,
all_tvu_peers: Counter,
tvu_peers: Counter,
retransmit_peers: Counter,
repair_peers: Counter,
new_push_requests: Counter,
new_push_requests2: Counter,
process_pull_response: Counter,
process_pull_response_count: Counter,
process_pull_response_len: Counter,
process_pull_response_timeout: Counter,
process_pull_requests: Counter,
process_prune: Counter,
process_push_message: Counter,
prune_received_cache: Counter,
purge: Counter,
epoch_slots_lookup: Counter,
epoch_slots_push: Counter,
push_message: Counter,
new_pull_requests: Counter,
mark_pull_request: Counter,
}
pub struct ClusterInfo {
/// The network
pub gossip: RwLock<CrdsGossip>,
@@ -118,6 +240,7 @@ pub struct ClusterInfo {
outbound_budget: RwLock<DataBudget>,
my_contact_info: RwLock<ContactInfo>,
id: Pubkey,
stats: GossipStats,
}
#[derive(Default, Clone)]
@@ -266,8 +389,13 @@ impl ClusterInfo {
}),
my_contact_info: RwLock::new(contact_info),
id,
stats: GossipStats::default(),
};
me.gossip.write().unwrap().set_self(&id);
{
let mut gossip = me.gossip.write().unwrap();
gossip.set_self(&id);
gossip.set_shred_version(me.my_shred_version());
}
me.insert_self();
me.push_self(&HashMap::new());
me
@@ -286,6 +414,7 @@ impl ClusterInfo {
outbound_budget: RwLock::new(self.outbound_budget.read().unwrap().clone()),
my_contact_info: RwLock::new(my_contact_info),
id: *new_id,
stats: GossipStats::default(),
}
}
@@ -361,61 +490,73 @@ impl ClusterInfo {
let now = timestamp();
let mut spy_nodes = 0;
let mut archivers = 0;
let mut different_shred_nodes = 0;
let my_pubkey = self.id();
let my_shred_version = self.my_shred_version();
let nodes: Vec<_> = self
.all_peers()
.into_iter()
.map(|(node, last_updated)| {
.filter_map(|(node, last_updated)| {
if Self::is_spy_node(&node) {
spy_nodes += 1;
} else if Self::is_archiver(&node) {
archivers += 1;
}
fn addr_to_string(default_ip: &IpAddr, addr: &SocketAddr) -> String {
if ContactInfo::is_valid_address(addr) {
if &addr.ip() == default_ip {
addr.port().to_string()
} else {
addr.to_string()
}
} else {
"none".to_string()
}
}
let ip_addr = node.gossip.ip();
format!(
"{:15} {:2}| {:5} | {:44} | {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {}\n",
if ContactInfo::is_valid_address(&node.gossip) {
ip_addr.to_string()
} else {
"none".to_string()
},
if node.id == my_pubkey { "me" } else { "" }.to_string(),
now.saturating_sub(last_updated),
node.id.to_string(),
addr_to_string(&ip_addr, &node.gossip),
addr_to_string(&ip_addr, &node.tpu),
addr_to_string(&ip_addr, &node.tpu_forwards),
addr_to_string(&ip_addr, &node.tvu),
addr_to_string(&ip_addr, &node.tvu_forwards),
addr_to_string(&ip_addr, &node.repair),
addr_to_string(&ip_addr, &node.serve_repair),
addr_to_string(&ip_addr, &node.storage_addr),
addr_to_string(&ip_addr, &node.rpc),
addr_to_string(&ip_addr, &node.rpc_pubsub),
node.shred_version,
)
let node_version = self.get_node_version(&node.id);
if my_shred_version != 0 && (node.shred_version != 0 && node.shred_version != my_shred_version) {
different_shred_nodes += 1;
None
} else {
fn addr_to_string(default_ip: &IpAddr, addr: &SocketAddr) -> String {
if ContactInfo::is_valid_address(addr) {
if &addr.ip() == default_ip {
addr.port().to_string()
} else {
addr.to_string()
}
} else {
"none".to_string()
}
}
let ip_addr = node.gossip.ip();
Some(format!(
"{:15} {:2}| {:5} | {:44} |{:^15}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {}\n",
if ContactInfo::is_valid_address(&node.gossip) {
ip_addr.to_string()
} else {
"none".to_string()
},
if node.id == my_pubkey { "me" } else { "" }.to_string(),
now.saturating_sub(last_updated),
node.id.to_string(),
if let Some(node_version) = node_version {
node_version.to_string()
} else {
"-".to_string()
},
addr_to_string(&ip_addr, &node.gossip),
addr_to_string(&ip_addr, &node.tpu),
addr_to_string(&ip_addr, &node.tpu_forwards),
addr_to_string(&ip_addr, &node.tvu),
addr_to_string(&ip_addr, &node.tvu_forwards),
addr_to_string(&ip_addr, &node.repair),
addr_to_string(&ip_addr, &node.serve_repair),
addr_to_string(&ip_addr, &node.rpc),
addr_to_string(&ip_addr, &node.rpc_pubsub),
node.shred_version,
))
}
})
.collect();
format!(
"IP Address |Age(ms)| Node identifier \
|Gossip| TPU |TPUfwd| TVU |TVUfwd|Repair|ServeR|Storag| RPC |PubSub|ShredVer\n\
------------------+-------+----------------------------------------------+\
------+------+------+------+------+------+------+------+------+------+--------\n\
| Version |Gossip| TPU |TPUfwd| TVU |TVUfwd|Repair|ServeR| RPC |PubSub|ShredVer\n\
------------------+-------+----------------------------------------------+---------------+\
------+------+------+------+------+------+------+------+------+--------\n\
{}\
Nodes: {}{}{}",
Nodes: {}{}{}{}",
nodes.join(""),
nodes.len() - spy_nodes - archivers,
if archivers > 0 {
@@ -427,6 +568,14 @@ impl ClusterInfo {
format!("\nSpies: {}", spy_nodes)
} else {
"".to_string()
},
if different_shred_nodes > 0 {
format!(
"\nNodes with different shred version: {}",
different_shred_nodes
)
} else {
"".to_string()
}
)
}
@@ -459,13 +608,14 @@ impl ClusterInfo {
let mut current_slots: Vec<_> = (0..crds_value::MAX_EPOCH_SLOTS)
.filter_map(|ix| {
Some((
self.gossip
.read()
.unwrap()
.crds
.lookup(&CrdsValueLabel::EpochSlots(ix, self.id()))
.and_then(CrdsValue::epoch_slots)
.and_then(|x| Some((x.wallclock, x.first_slot()?)))?,
self.time_gossip_read_lock(
"lookup_epoch_slots",
&self.stats.epoch_slots_lookup,
)
.crds
.lookup(&CrdsValueLabel::EpochSlots(ix, self.id()))
.and_then(CrdsValue::epoch_slots)
.and_then(|x| Some((x.wallclock, x.first_slot()?)))?,
ix,
))
})
@@ -502,9 +652,7 @@ impl ClusterInfo {
let n = slots.fill(&update[num..], now);
if n > 0 {
let entry = CrdsValue::new_signed(CrdsData::EpochSlots(ix, slots), &self.keypair);
self.gossip
.write()
.unwrap()
self.time_gossip_write_lock("epcoh_slots_push", &self.stats.epoch_slots_push)
.process_push_message(&self.id(), vec![entry], now);
}
num += n;
@@ -515,12 +663,26 @@ impl ClusterInfo {
}
}
fn time_gossip_read_lock<'a>(
&'a self,
label: &'static str,
counter: &'a Counter,
) -> GossipReadLock<'a> {
GossipReadLock::new(self.gossip.read().unwrap(), label, counter)
}
fn time_gossip_write_lock<'a>(
&'a self,
label: &'static str,
counter: &'a Counter,
) -> GossipWriteLock<'a> {
GossipWriteLock::new(self.gossip.write().unwrap(), label, counter)
}
pub fn push_message(&self, message: CrdsValue) {
let now = message.wallclock();
let id = message.pubkey();
self.gossip
.write()
.unwrap()
self.time_gossip_write_lock("process_push_message", &self.stats.push_message)
.process_push_message(&id, vec![message], now);
}
@@ -554,16 +716,15 @@ impl ClusterInfo {
let now = timestamp();
let vote = Vote::new(&self.id(), vote, now);
let vote_ix = {
let r_gossip = self.gossip.read().unwrap();
let r_gossip =
self.time_gossip_read_lock("gossip_read_push_vote", &self.stats.push_vote_read);
let current_votes: Vec<_> = (0..crds_value::MAX_VOTES)
.filter_map(|ix| r_gossip.crds.lookup(&CrdsValueLabel::Vote(ix, self.id())))
.collect();
CrdsValue::compute_vote_index(tower_index, current_votes)
};
let entry = CrdsValue::new_signed(CrdsData::Vote(vote_ix, vote), &self.keypair);
self.gossip
.write()
.unwrap()
self.time_gossip_write_lock("push_vote_process_push", &self.stats.vote_process_push)
.process_push_message(&self.id(), vec![entry], now);
}
@@ -575,9 +736,7 @@ impl ClusterInfo {
pub fn get_votes(&self, since: u64) -> (Vec<CrdsValueLabel>, Vec<Transaction>, u64) {
let mut max_ts = since;
let (labels, txs): (Vec<CrdsValueLabel>, Vec<Transaction>) = self
.gossip
.read()
.unwrap()
.time_gossip_read_lock("get_votes", &self.stats.get_votes)
.crds
.table
.iter()
@@ -594,9 +753,7 @@ impl ClusterInfo {
}
pub fn get_snapshot_hash(&self, slot: Slot) -> Vec<(Pubkey, Hash)> {
self.gossip
.read()
.unwrap()
self.time_gossip_read_lock("get_snapshot_hash", &self.stats.get_snapshot_hash)
.crds
.table
.values()
@@ -616,9 +773,7 @@ impl ClusterInfo {
where
F: FnOnce(&Vec<(Slot, Hash)>) -> Y,
{
self.gossip
.read()
.unwrap()
self.time_gossip_read_lock("get_accounts_hash", &self.stats.get_accounts_hash)
.crds
.table
.get(&CrdsValueLabel::AccountsHashes(*pubkey))
@@ -683,6 +838,18 @@ impl ClusterInfo {
(vec, max)
}
pub fn get_node_version(&self, pubkey: &Pubkey) -> Option<solana_version::Version> {
self.gossip
.read()
.unwrap()
.crds
.table
.get(&CrdsValueLabel::Version(*pubkey))
.map(|x| x.value.version())
.flatten()
.map(|version| version.version.clone())
}
/// all validators that have a valid rpc port regardless of `shred_version`.
pub fn all_rpc_peers(&self) -> Vec<ContactInfo> {
self.gossip
@@ -730,9 +897,7 @@ impl ClusterInfo {
/// all validators that have a valid tvu port regardless of `shred_version`.
pub fn all_tvu_peers(&self) -> Vec<ContactInfo> {
self.gossip
.read()
.unwrap()
self.time_gossip_read_lock("all_tvu_peers", &self.stats.all_tvu_peers)
.crds
.table
.values()
@@ -748,9 +913,7 @@ impl ClusterInfo {
/// all validators that have a valid tvu port and are on the same `shred_version`.
pub fn tvu_peers(&self) -> Vec<ContactInfo> {
self.gossip
.read()
.unwrap()
self.time_gossip_read_lock("tvu_peers", &self.stats.tvu_peers)
.crds
.table
.values()
@@ -799,9 +962,7 @@ impl ClusterInfo {
/// all peers that have a valid tvu
pub fn retransmit_peers(&self) -> Vec<ContactInfo> {
self.gossip
.read()
.unwrap()
self.time_gossip_read_lock("retransmit_peers", &self.stats.retransmit_peers)
.crds
.table
.values()
@@ -818,7 +979,8 @@ impl ClusterInfo {
/// all tvu peers with valid gossip addrs that likely have the slot being requested
pub fn repair_peers(&self, slot: Slot) -> Vec<ContactInfo> {
ClusterInfo::tvu_peers(self)
let mut time = Measure::start("repair_peers");
let ret = ClusterInfo::tvu_peers(self)
.into_iter()
.filter(|x| {
x.id != self.id()
@@ -831,7 +993,9 @@ impl ClusterInfo {
.unwrap_or_else(|| /* fallback to legacy behavior */ true)
}
})
.collect()
.collect();
self.stats.repair_peers.add_measure(&mut time);
ret
}
fn is_spy_node(contact_info: &ContactInfo) -> bool {
@@ -1120,8 +1284,12 @@ impl ClusterInfo {
false
} else {
entrypoint.wallclock = now;
let found_entrypoint =
self.gossip.read().unwrap().crds.table.iter().any(|(_, v)| {
let found_entrypoint = self
.time_gossip_read_lock("entrypoint", &self.stats.entrypoint)
.crds
.table
.iter()
.any(|(_, v)| {
v.value
.contact_info()
.map(|ci| ci.gossip == entrypoint.gossip)
@@ -1144,12 +1312,12 @@ impl ClusterInfo {
.map(|e| (e.id, e.gossip))
};
if let Some((id, gossip)) = id_and_gossip {
let r_gossip = self.gossip.read().unwrap();
let r_gossip = self.time_gossip_read_lock("entrypoint", &self.stats.entrypoint2);
let self_info = r_gossip
.crds
.lookup(&CrdsValueLabel::ContactInfo(self.id()))
.unwrap_or_else(|| panic!("self_id invalid {}", self.id()));
return r_gossip
r_gossip
.pull
.build_crds_filters(&r_gossip.crds, MAX_BLOOM_SIZE)
.into_iter()
@@ -1201,8 +1369,8 @@ impl ClusterInfo {
fn new_pull_requests(&self, stakes: &HashMap<Pubkey, u64>) -> Vec<(SocketAddr, Protocol)> {
let now = timestamp();
let mut pulls: Vec<_> = {
let r_gossip = self.gossip.read().unwrap();
let r_gossip =
self.time_gossip_read_lock("new_pull_reqs", &self.stats.new_pull_requests);
r_gossip
.new_pull_request(now, stakes, MAX_BLOOM_SIZE)
.ok()
@@ -1226,9 +1394,7 @@ impl ClusterInfo {
pulls
.into_iter()
.map(|(peer, filter, gossip, self_info)| {
self.gossip
.write()
.unwrap()
self.time_gossip_write_lock("mark_pull", &self.stats.mark_pull_request)
.mark_pull_request_creation_time(&peer, now);
(gossip, Protocol::PullRequest(filter, self_info))
})
@@ -1236,14 +1402,14 @@ impl ClusterInfo {
}
fn new_push_requests(&self) -> Vec<(SocketAddr, Protocol)> {
let self_id = self.id();
let (_, push_messages) = self.gossip.write().unwrap().new_push_messages(timestamp());
let (_, push_messages) = self
.time_gossip_write_lock("new_push_requests", &self.stats.new_push_requests)
.new_push_messages(timestamp());
push_messages
.into_iter()
.filter_map(|(peer, messages)| {
let peer_label = CrdsValueLabel::ContactInfo(peer);
self.gossip
.read()
.unwrap()
self.time_gossip_read_lock("push_req_lookup", &self.stats.new_push_requests2)
.crds
.lookup(&peer_label)
.and_then(CrdsValue::contact_info)
@@ -1293,6 +1459,9 @@ impl ClusterInfo {
let mut last_contact_info_trace = timestamp();
let mut adopt_shred_version = obj.my_shred_version() == 0;
let recycler = PacketsRecycler::default();
let message = CrdsData::Version(Version::new(obj.id()));
obj.push_message(CrdsValue::new_signed(message, &obj.keypair));
loop {
let start = timestamp();
thread_mem_usage::datapoint("solana-gossip");
@@ -1324,7 +1493,9 @@ impl ClusterInfo {
}
};
let timeouts = obj.gossip.read().unwrap().make_timeouts(&stakes, timeout);
let num_purged = obj.gossip.write().unwrap().purge(timestamp(), &timeouts);
let num_purged = obj
.time_gossip_write_lock("purge", &obj.stats.purge)
.purge(timestamp(), &timeouts);
inc_new_counter_info!("cluster_info-purge-count", num_purged);
let table_size = obj.gossip.read().unwrap().crds.table.len();
datapoint_debug!(
@@ -1350,6 +1521,10 @@ impl ClusterInfo {
);
obj.my_contact_info.write().unwrap().shred_version =
entrypoint.shred_version;
obj.gossip
.write()
.unwrap()
.set_shred_version(entrypoint.shred_version);
obj.insert_self();
adopt_shred_version = false;
}
@@ -1462,13 +1637,15 @@ impl ClusterInfo {
"cluster_info-prune_message-size",
data.prunes.len()
);
match me.gossip.write().unwrap().process_prune_msg(
&from,
&data.destination,
&data.prunes,
data.wallclock,
timestamp(),
) {
match me
.time_gossip_write_lock("process_prune", &me.stats.process_prune)
.process_prune_msg(
&from,
&data.destination,
&data.prunes,
data.wallclock,
timestamp(),
) {
Err(CrdsGossipError::PruneMessageTimeout) => {
inc_new_counter_debug!("cluster_info-prune_message_timeout", 1)
}
@@ -1532,9 +1709,7 @@ impl ClusterInfo {
let now = timestamp();
let self_id = me.id();
let pull_responses = me
.gossip
.write()
.unwrap()
.time_gossip_write_lock("process_pull_reqs", &me.stats.process_pull_requests)
.process_pull_requests(caller_and_filters, now);
// Filter bad to addresses
@@ -1638,17 +1813,15 @@ impl ClusterInfo {
timeouts: &HashMap<Pubkey, u64>,
) {
let len = data.len();
let now = Instant::now();
let self_id = me.gossip.read().unwrap().id;
trace!("PullResponse me: {} from: {} len={}", self_id, from, len);
me.gossip
.write()
.unwrap()
trace!("PullResponse me: {} from: {} len={}", me.id, from, len);
let (_fail, timeout_count) = me
.time_gossip_write_lock("process_pull", &me.stats.process_pull_response)
.process_pull_response(from, timeouts, data, timestamp());
inc_new_counter_debug!("cluster_info-pull_request_response", 1);
inc_new_counter_debug!("cluster_info-pull_request_response-size", len);
report_time_spent("ReceiveUpdates", &now.elapsed(), &format!(" len: {}", len));
me.stats.process_pull_response_count.add_relaxed(1);
me.stats.process_pull_response_len.add_relaxed(len as u64);
me.stats
.process_pull_response_timeout
.add_relaxed(timeout_count as u64);
}
fn handle_push_message(
@@ -1661,17 +1834,13 @@ impl ClusterInfo {
let self_id = me.id();
inc_new_counter_debug!("cluster_info-push_message", 1);
let updated: Vec<_> =
me.gossip
.write()
.unwrap()
.process_push_message(from, data, timestamp());
let updated: Vec<_> = me
.time_gossip_write_lock("process_push", &me.stats.process_push_message)
.process_push_message(from, data, timestamp());
let updated_labels: Vec<_> = updated.into_iter().map(|u| u.value.label()).collect();
let prunes_map: HashMap<Pubkey, HashSet<Pubkey>> = me
.gossip
.write()
.unwrap()
.time_gossip_write_lock("prune_received_cache", &me.stats.prune_received_cache)
.prune_received_cache(updated_labels, stakes);
let rsp: Vec<_> = prunes_map
@@ -1722,6 +1891,7 @@ impl ClusterInfo {
requests_receiver: &PacketReceiver,
response_sender: &PacketSender,
thread_pool: &ThreadPool,
last_print: &mut Instant,
) -> Result<()> {
//TODO cache connections
let timeout = Duration::new(1, 0);
@@ -1735,7 +1905,10 @@ impl ClusterInfo {
requests.push(more_reqs)
}
if num_requests >= MAX_GOSSIP_TRAFFIC {
warn!("Too much gossip traffic, ignoring some messages");
warn!(
"Too much gossip traffic, ignoring some messages (requests={}, max requests={})",
num_requests, MAX_GOSSIP_TRAFFIC
);
}
let epoch_ms;
let stakes: HashMap<_, _> = match bank_forks {
@@ -1759,8 +1932,104 @@ impl ClusterInfo {
});
});
Self::print_reset_stats(obj, last_print);
Ok(())
}
fn print_reset_stats(&self, last_print: &mut Instant) {
if last_print.elapsed().as_millis() > 1000 {
datapoint_info!(
"cluster_info_stats",
("entrypoint", self.stats.entrypoint.clear(), i64),
("entrypoint2", self.stats.entrypoint2.clear(), i64),
("push_vote_read", self.stats.push_vote_read.clear(), i64),
(
"vote_process_push",
self.stats.vote_process_push.clear(),
i64
),
("get_votes", self.stats.get_votes.clear(), i64),
(
"get_accounts_hash",
self.stats.get_accounts_hash.clear(),
i64
),
("all_tvu_peers", self.stats.all_tvu_peers.clear(), i64),
("tvu_peers", self.stats.tvu_peers.clear(), i64),
);
datapoint_info!(
"cluster_info_stats2",
("retransmit_peers", self.stats.retransmit_peers.clear(), i64),
("repair_peers", self.stats.repair_peers.clear(), i64),
(
"new_push_requests",
self.stats.new_push_requests.clear(),
i64
),
(
"new_push_requests2",
self.stats.new_push_requests2.clear(),
i64
),
("purge", self.stats.purge.clear(), i64),
(
"process_pull_resp",
self.stats.process_pull_response.clear(),
i64
),
(
"process_pull_resp_count",
self.stats.process_pull_response_count.clear(),
i64
),
);
datapoint_info!(
"cluster_info_stats3",
(
"process_pull_resp_len",
self.stats.process_pull_response_len.clear(),
i64
),
(
"process_pull_requests",
self.stats.process_pull_requests.clear(),
i64
),
("process_prune", self.stats.process_prune.clear(), i64),
(
"process_push_message",
self.stats.process_push_message.clear(),
i64
),
(
"prune_received_cache",
self.stats.prune_received_cache.clear(),
i64
),
(
"epoch_slots_lookup",
self.stats.epoch_slots_lookup.clear(),
i64
),
("epoch_slots_push", self.stats.epoch_slots_push.clear(), i64),
("push_message", self.stats.push_message.clear(), i64),
(
"new_pull_requests",
self.stats.new_pull_requests.clear(),
i64
),
(
"mark_pull_request",
self.stats.mark_pull_request.clear(),
i64
),
);
*last_print = Instant::now();
}
}
pub fn listen(
me: Arc<Self>,
bank_forks: Option<Arc<RwLock<BankForks>>>,
@@ -1777,6 +2046,7 @@ impl ClusterInfo {
.num_threads(get_thread_count())
.build()
.unwrap();
let mut last_print = Instant::now();
loop {
let e = Self::run_listen(
&me,
@@ -1785,6 +2055,7 @@ impl ClusterInfo {
&requests_receiver,
&response_sender,
&thread_pool,
&mut last_print,
);
if exit.load(Ordering::Relaxed) {
return;
@@ -1803,39 +2074,39 @@ impl ClusterInfo {
.unwrap()
}
pub fn gossip_contact_info(id: &Pubkey, gossip: SocketAddr) -> ContactInfo {
pub fn gossip_contact_info(id: &Pubkey, gossip: SocketAddr, shred_version: u16) -> ContactInfo {
ContactInfo {
id: *id,
gossip,
wallclock: timestamp(),
shred_version,
..ContactInfo::default()
}
}
pub fn spy_contact_info(id: &Pubkey) -> ContactInfo {
let dummy_addr = socketaddr_any!();
Self::gossip_contact_info(id, dummy_addr)
}
/// An alternative to Spy Node that has a valid gossip address and fully participate in Gossip.
pub fn gossip_node(
id: &Pubkey,
gossip_addr: &SocketAddr,
shred_version: u16,
) -> (ContactInfo, UdpSocket, Option<TcpListener>) {
let bind_ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
let (port, (gossip_socket, ip_echo)) =
Node::get_gossip_port(gossip_addr, VALIDATOR_PORT_RANGE, bind_ip_addr);
let contact_info = Self::gossip_contact_info(id, SocketAddr::new(gossip_addr.ip(), port));
let contact_info =
Self::gossip_contact_info(id, SocketAddr::new(gossip_addr.ip(), port), shred_version);
(contact_info, gossip_socket, Some(ip_echo))
}
/// A Node with dummy ports to spy on gossip via pull requests
pub fn spy_node(id: &Pubkey) -> (ContactInfo, UdpSocket, Option<TcpListener>) {
pub fn spy_node(
id: &Pubkey,
shred_version: u16,
) -> (ContactInfo, UdpSocket, Option<TcpListener>) {
let bind_ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
let (_, gossip_socket) = bind_in_range(bind_ip_addr, VALIDATOR_PORT_RANGE).unwrap();
let contact_info = Self::spy_contact_info(id);
let contact_info = Self::gossip_contact_info(id, socketaddr_any!(), shred_version);
(contact_info, gossip_socket, None)
}
@@ -2106,13 +2377,6 @@ impl Node {
}
}
fn report_time_spent(label: &str, time: &Duration, extra: &str) {
let time_ms = duration_as_ms(time);
if time_ms > 100 {
info!("{} took: {} ms {}", label, time_ms, extra);
}
}
pub fn stake_weight_peers<S: std::hash::BuildHasher>(
peers: &mut Vec<ContactInfo>,
stakes: Option<Arc<HashMap<Pubkey, u64, S>>>,
@@ -2135,10 +2399,10 @@ mod tests {
#[test]
fn test_gossip_node() {
//check that a gossip nodes always show up as spies
let (node, _, _) = ClusterInfo::spy_node(&Pubkey::new_rand());
let (node, _, _) = ClusterInfo::spy_node(&Pubkey::new_rand(), 0);
assert!(ClusterInfo::is_spy_node(&node));
let (node, _, _) =
ClusterInfo::gossip_node(&Pubkey::new_rand(), &"1.1.1.1:1111".parse().unwrap());
ClusterInfo::gossip_node(&Pubkey::new_rand(), &"1.1.1.1:1111".parse().unwrap(), 0);
assert!(ClusterInfo::is_spy_node(&node));
}
@@ -2146,7 +2410,7 @@ mod tests {
fn test_cluster_spy_gossip() {
//check that gossip doesn't try to push to invalid addresses
let node = Node::new_localhost();
let (spy, _, _) = ClusterInfo::spy_node(&Pubkey::new_rand());
let (spy, _, _) = ClusterInfo::spy_node(&Pubkey::new_rand(), 0);
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(node.info));
cluster_info.insert_info(spy);
cluster_info

View File

@@ -1,8 +1,10 @@
use crate::{
cluster_info::{ClusterInfo, GOSSIP_SLEEP_MILLIS},
consensus::VOTE_THRESHOLD_SIZE,
crds_value::CrdsValueLabel,
poh_recorder::PohRecorder,
result::{Error, Result},
rpc_subscriptions::RpcSubscriptions,
sigverify,
verified_vote_packets::VerifiedVotePackets,
};
@@ -14,7 +16,10 @@ use log::*;
use solana_ledger::bank_forks::BankForks;
use solana_metrics::inc_new_counter_debug;
use solana_perf::packet::{self, Packets};
use solana_runtime::{bank::Bank, epoch_stakes::EpochAuthorizedVoters};
use solana_runtime::{
bank::Bank,
epoch_stakes::{EpochAuthorizedVoters, EpochStakes},
};
use solana_sdk::{
clock::{Epoch, Slot},
epoch_schedule::EpochSchedule,
@@ -43,6 +48,7 @@ pub type VerifiedVoteTransactionsReceiver = CrossbeamReceiver<Vec<Transaction>>;
pub struct SlotVoteTracker {
voted: HashSet<Arc<Pubkey>>,
updates: Option<Vec<Arc<Pubkey>>>,
total_stake: u64,
}
impl SlotVoteTracker {
@@ -203,6 +209,7 @@ impl ClusterInfoVoteListener {
poh_recorder: &Arc<Mutex<PohRecorder>>,
vote_tracker: Arc<VoteTracker>,
bank_forks: Arc<RwLock<BankForks>>,
subscriptions: Arc<RpcSubscriptions>,
) -> Self {
let exit_ = exit.clone();
@@ -244,6 +251,7 @@ impl ClusterInfoVoteListener {
verified_vote_transactions_receiver,
vote_tracker,
&bank_forks,
subscriptions,
);
})
.unwrap();
@@ -372,6 +380,7 @@ impl ClusterInfoVoteListener {
vote_txs_receiver: VerifiedVoteTransactionsReceiver,
vote_tracker: Arc<VoteTracker>,
bank_forks: &RwLock<BankForks>,
subscriptions: Arc<RpcSubscriptions>,
) -> Result<()> {
loop {
if exit.load(Ordering::Relaxed) {
@@ -380,10 +389,15 @@ impl ClusterInfoVoteListener {
let root_bank = bank_forks.read().unwrap().root_bank().clone();
vote_tracker.process_new_root_bank(&root_bank);
let epoch_stakes = root_bank.epoch_stakes(root_bank.epoch());
if let Err(e) =
Self::get_and_process_votes(&vote_txs_receiver, &vote_tracker, root_bank.slot())
{
if let Err(e) = Self::get_and_process_votes(
&vote_txs_receiver,
&vote_tracker,
root_bank.slot(),
subscriptions.clone(),
epoch_stakes,
) {
match e {
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Disconnected) => {
return Ok(());
@@ -397,21 +411,51 @@ impl ClusterInfoVoteListener {
}
}
#[cfg(test)]
pub fn get_and_process_votes_for_tests(
vote_txs_receiver: &VerifiedVoteTransactionsReceiver,
vote_tracker: &Arc<VoteTracker>,
last_root: Slot,
subscriptions: Arc<RpcSubscriptions>,
) -> Result<()> {
Self::get_and_process_votes(
vote_txs_receiver,
vote_tracker,
last_root,
subscriptions,
None,
)
}
fn get_and_process_votes(
vote_txs_receiver: &VerifiedVoteTransactionsReceiver,
vote_tracker: &Arc<VoteTracker>,
last_root: Slot,
subscriptions: Arc<RpcSubscriptions>,
epoch_stakes: Option<&EpochStakes>,
) -> Result<()> {
let timer = Duration::from_millis(200);
let mut vote_txs = vote_txs_receiver.recv_timeout(timer)?;
while let Ok(new_txs) = vote_txs_receiver.try_recv() {
vote_txs.extend(new_txs);
}
Self::process_votes(vote_tracker, vote_txs, last_root);
Self::process_votes(
vote_tracker,
vote_txs,
last_root,
subscriptions,
epoch_stakes,
);
Ok(())
}
fn process_votes(vote_tracker: &VoteTracker, vote_txs: Vec<Transaction>, root: Slot) {
fn process_votes(
vote_tracker: &VoteTracker,
vote_txs: Vec<Transaction>,
root: Slot,
subscriptions: Arc<RpcSubscriptions>,
epoch_stakes: Option<&EpochStakes>,
) {
let mut diff: HashMap<Slot, HashSet<Arc<Pubkey>>> = HashMap::new();
{
let all_slot_trackers = &vote_tracker.slot_vote_trackers;
@@ -463,7 +507,7 @@ impl ClusterInfoVoteListener {
continue;
}
for slot in vote.slots {
for &slot in vote.slots.iter() {
if slot <= root {
continue;
}
@@ -488,6 +532,8 @@ impl ClusterInfoVoteListener {
.or_default()
.insert(unduplicated_pubkey.unwrap());
}
subscriptions.notify_vote(&vote);
}
}
}
@@ -504,15 +550,35 @@ impl ClusterInfoVoteListener {
if w_slot_tracker.updates.is_none() {
w_slot_tracker.updates = Some(vec![]);
}
for pk in slot_diff {
w_slot_tracker.voted.insert(pk.clone());
w_slot_tracker.updates.as_mut().unwrap().push(pk);
let mut current_stake = 0;
for pubkey in slot_diff {
Self::sum_stake(&mut current_stake, epoch_stakes, &pubkey);
w_slot_tracker.voted.insert(pubkey.clone());
w_slot_tracker.updates.as_mut().unwrap().push(pubkey);
}
Self::notify_for_stake_change(
current_stake,
w_slot_tracker.total_stake,
&subscriptions,
epoch_stakes,
slot,
);
w_slot_tracker.total_stake += current_stake;
} else {
let voted: HashSet<_> = slot_diff.into_iter().collect();
let mut total_stake = 0;
let voted: HashSet<_> = slot_diff
.into_iter()
.map(|pubkey| {
Self::sum_stake(&mut total_stake, epoch_stakes, &pubkey);
pubkey
})
.collect();
Self::notify_for_stake_change(total_stake, 0, &subscriptions, epoch_stakes, slot);
let new_slot_tracker = SlotVoteTracker {
voted: voted.clone(),
updates: Some(voted.into_iter().collect()),
total_stake,
};
vote_tracker
.slot_vote_trackers
@@ -522,11 +588,38 @@ impl ClusterInfoVoteListener {
}
}
}
fn notify_for_stake_change(
current_stake: u64,
previous_stake: u64,
subscriptions: &Arc<RpcSubscriptions>,
epoch_stakes: Option<&EpochStakes>,
slot: Slot,
) {
if let Some(stakes) = epoch_stakes {
let supermajority_stake = (stakes.total_stake() as f64 * VOTE_THRESHOLD_SIZE) as u64;
if previous_stake < supermajority_stake
&& (previous_stake + current_stake) > supermajority_stake
{
subscriptions.notify_gossip_subscribers(slot);
}
}
}
fn sum_stake(sum: &mut u64, epoch_stakes: Option<&EpochStakes>, pubkey: &Pubkey) {
if let Some(stakes) = epoch_stakes {
if let Some(vote_account) = stakes.stakes().vote_accounts().get(pubkey) {
*sum += vote_account.0;
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::commitment::BlockCommitmentCache;
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use solana_perf::packet;
use solana_runtime::{
bank::Bank,
@@ -623,7 +716,7 @@ mod tests {
#[test]
fn test_update_new_root() {
let (vote_tracker, bank, _) = setup();
let (vote_tracker, bank, _, _) = setup();
// Check outdated slots are purged with new root
let new_voter = Arc::new(Pubkey::new_rand());
@@ -664,7 +757,7 @@ mod tests {
#[test]
fn test_update_new_leader_schedule_epoch() {
let (vote_tracker, bank, _) = setup();
let (vote_tracker, bank, _, _) = setup();
// Check outdated slots are purged with new root
let leader_schedule_epoch = bank.get_leader_schedule_epoch(bank.slot());
@@ -706,7 +799,7 @@ mod tests {
#[test]
fn test_process_votes() {
// Create some voters at genesis
let (vote_tracker, _, validator_voting_keypairs) = setup();
let (vote_tracker, _, validator_voting_keypairs, subscriptions) = setup();
let (votes_sender, votes_receiver) = unbounded();
let vote_slots = vec![1, 2];
@@ -725,7 +818,14 @@ mod tests {
});
// Check that all the votes were registered for each validator correctly
ClusterInfoVoteListener::get_and_process_votes(&votes_receiver, &vote_tracker, 0).unwrap();
ClusterInfoVoteListener::get_and_process_votes(
&votes_receiver,
&vote_tracker,
0,
subscriptions,
None,
)
.unwrap();
for vote_slot in vote_slots {
let slot_vote_tracker = vote_tracker.get_slot_vote_tracker(vote_slot).unwrap();
let r_slot_vote_tracker = slot_vote_tracker.read().unwrap();
@@ -744,7 +844,7 @@ mod tests {
#[test]
fn test_process_votes2() {
// Create some voters at genesis
let (vote_tracker, _, validator_voting_keypairs) = setup();
let (vote_tracker, _, validator_voting_keypairs, subscriptions) = setup();
// Send some votes to process
let (votes_sender, votes_receiver) = unbounded();
@@ -769,7 +869,14 @@ mod tests {
}
// Check that all the votes were registered for each validator correctly
ClusterInfoVoteListener::get_and_process_votes(&votes_receiver, &vote_tracker, 0).unwrap();
ClusterInfoVoteListener::get_and_process_votes(
&votes_receiver,
&vote_tracker,
0,
subscriptions,
None,
)
.unwrap();
for (i, keyset) in validator_voting_keypairs.chunks(2).enumerate() {
let slot_vote_tracker = vote_tracker.get_slot_vote_tracker(i as u64 + 1).unwrap();
let r_slot_vote_tracker = &slot_vote_tracker.read().unwrap();
@@ -788,7 +895,7 @@ mod tests {
#[test]
fn test_get_voters_by_epoch() {
// Create some voters at genesis
let (vote_tracker, bank, validator_voting_keypairs) = setup();
let (vote_tracker, bank, validator_voting_keypairs, _) = setup();
let last_known_epoch = bank.get_leader_schedule_epoch(bank.slot());
let last_known_slot = bank
.epoch_schedule()
@@ -859,11 +966,23 @@ mod tests {
100,
);
let bank = Bank::new(&genesis_config);
let exit = Arc::new(AtomicBool::new(false));
let bank_forks = BankForks::new(0, bank);
let bank = bank_forks.get(0).unwrap().clone();
let vote_tracker = VoteTracker::new(&bank);
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(bank_forks)),
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
blockstore.clone(),
))),
));
// Send a vote to process, should add a reference to the pubkey for that voter
// in the tracker
let validator0_keypairs = &validator_voting_keypairs[0];
let vote_tracker = VoteTracker::new(&bank);
let vote_tx = vec![vote_transaction::new_vote_transaction(
// Must vote > root to be processed
vec![bank.slot() + 1],
@@ -874,7 +993,13 @@ mod tests {
&validator0_keypairs.vote_keypair,
)];
ClusterInfoVoteListener::process_votes(&vote_tracker, vote_tx, 0);
ClusterInfoVoteListener::process_votes(
&vote_tracker,
vote_tx,
0,
subscriptions.clone(),
None,
);
let ref_count = Arc::strong_count(
&vote_tracker
.keys
@@ -924,7 +1049,7 @@ mod tests {
})
.collect();
ClusterInfoVoteListener::process_votes(&vote_tracker, vote_txs, 0);
ClusterInfoVoteListener::process_votes(&vote_tracker, vote_txs, 0, subscriptions, None);
let ref_count = Arc::strong_count(
&vote_tracker
@@ -938,7 +1063,12 @@ mod tests {
assert_eq!(ref_count, current_ref_count);
}
fn setup() -> (Arc<VoteTracker>, Arc<Bank>, Vec<ValidatorVoteKeypairs>) {
fn setup() -> (
Arc<VoteTracker>,
Arc<Bank>,
Vec<ValidatorVoteKeypairs>,
Arc<RpcSubscriptions>,
) {
let validator_voting_keypairs: Vec<_> = (0..10)
.map(|_| ValidatorVoteKeypairs::new(Keypair::new(), Keypair::new(), Keypair::new()))
.collect();
@@ -950,6 +1080,18 @@ mod tests {
);
let bank = Bank::new(&genesis_config);
let vote_tracker = VoteTracker::new(&bank);
let exit = Arc::new(AtomicBool::new(false));
let bank_forks = BankForks::new(0, bank);
let bank = bank_forks.get(0).unwrap().clone();
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(bank_forks)),
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
blockstore.clone(),
))),
));
// Integrity Checks
let current_epoch = bank.epoch();
@@ -976,8 +1118,9 @@ mod tests {
assert_eq!(*vote_tracker.current_epoch.read().unwrap(), current_epoch);
(
Arc::new(vote_tracker),
Arc::new(bank),
bank,
validator_voting_keypairs,
subscriptions,
)
}

View File

@@ -1,7 +1,7 @@
use crate::consensus::VOTE_THRESHOLD_SIZE;
use crate::{consensus::VOTE_THRESHOLD_SIZE, rpc_subscriptions::RpcSubscriptions};
use solana_ledger::blockstore::Blockstore;
use solana_measure::measure::Measure;
use solana_metrics::inc_new_counter_info;
use solana_metrics::datapoint_info;
use solana_runtime::bank::Bank;
use solana_sdk::clock::Slot;
use solana_vote_program::{vote_state::VoteState, vote_state::MAX_LOCKOUT_HISTORY};
@@ -14,6 +14,14 @@ use std::{
time::Duration,
};
#[derive(Default)]
pub struct CacheSlotInfo {
pub current_slot: Slot,
pub node_root: Slot,
pub largest_confirmed_root: Slot,
pub highest_confirmed_slot: Slot,
}
pub type BlockCommitmentArray = [u64; MAX_LOCKOUT_HISTORY + 1];
#[derive(Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)]
@@ -53,6 +61,7 @@ pub struct BlockCommitmentCache {
bank: Arc<Bank>,
blockstore: Arc<Blockstore>,
root: Slot,
highest_confirmed_slot: Slot,
}
impl std::fmt::Debug for BlockCommitmentCache {
@@ -77,6 +86,7 @@ impl BlockCommitmentCache {
bank: Arc<Bank>,
blockstore: Arc<Blockstore>,
root: Slot,
highest_confirmed_slot: Slot,
) -> Self {
Self {
block_commitment,
@@ -85,6 +95,7 @@ impl BlockCommitmentCache {
bank,
blockstore,
root,
highest_confirmed_slot,
}
}
@@ -96,6 +107,7 @@ impl BlockCommitmentCache {
bank: Arc::new(Bank::default()),
blockstore,
root: Slot::default(),
highest_confirmed_slot: Slot::default(),
}
}
@@ -123,6 +135,26 @@ impl BlockCommitmentCache {
self.root
}
pub fn highest_confirmed_slot(&self) -> Slot {
self.highest_confirmed_slot
}
fn highest_slot_with_confirmation_count(&self, confirmation_count: usize) -> Slot {
assert!(confirmation_count > 0 && confirmation_count <= MAX_LOCKOUT_HISTORY);
for slot in (self.root()..self.slot()).rev() {
if let Some(count) = self.get_confirmation_count(slot) {
if count >= confirmation_count {
return slot;
}
}
}
self.root
}
fn calculate_highest_confirmed_slot(&self) -> Slot {
self.highest_slot_with_confirmation_count(1)
}
pub fn get_confirmation_count(&self, slot: Slot) -> Option<usize> {
self.get_lockout_count(slot, VOTE_THRESHOLD_SIZE)
}
@@ -159,11 +191,30 @@ impl BlockCommitmentCache {
largest_confirmed_root: Slot::default(),
bank: Arc::new(Bank::default()),
root: Slot::default(),
highest_confirmed_slot: Slot::default(),
}
}
#[cfg(test)]
pub(crate) fn set_get_largest_confirmed_root(&mut self, root: Slot) {
pub fn new_for_tests_with_blockstore_bank(
blockstore: Arc<Blockstore>,
bank: Arc<Bank>,
root: Slot,
) -> Self {
let mut block_commitment: HashMap<Slot, BlockCommitment> = HashMap::new();
block_commitment.insert(0, BlockCommitment::default());
Self {
block_commitment,
blockstore,
total_stake: 42,
largest_confirmed_root: root,
bank,
root,
highest_confirmed_slot: root,
}
}
pub(crate) fn set_largest_confirmed_root(&mut self, root: Slot) {
self.largest_confirmed_root = root;
}
}
@@ -204,6 +255,7 @@ impl AggregateCommitmentService {
pub fn new(
exit: &Arc<AtomicBool>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
subscriptions: Arc<RpcSubscriptions>,
) -> (Sender<CommitmentAggregationData>, Self) {
let (sender, receiver): (
Sender<CommitmentAggregationData>,
@@ -221,7 +273,7 @@ impl AggregateCommitmentService {
}
if let Err(RecvTimeoutError::Disconnected) =
Self::run(&receiver, &block_commitment_cache, &exit_)
Self::run(&receiver, &block_commitment_cache, &subscriptions, &exit_)
{
break;
}
@@ -234,6 +286,7 @@ impl AggregateCommitmentService {
fn run(
receiver: &Receiver<CommitmentAggregationData>,
block_commitment_cache: &RwLock<BlockCommitmentCache>,
subscriptions: &Arc<RpcSubscriptions>,
exit: &Arc<AtomicBool>,
) -> Result<(), RecvTimeoutError> {
loop {
@@ -266,16 +319,30 @@ impl AggregateCommitmentService {
aggregation_data.bank,
block_commitment_cache.read().unwrap().blockstore.clone(),
aggregation_data.root,
aggregation_data.root,
);
new_block_commitment.highest_confirmed_slot =
new_block_commitment.calculate_highest_confirmed_slot();
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
aggregate_commitment_time.stop();
inc_new_counter_info!(
"aggregate-commitment-ms",
aggregate_commitment_time.as_ms() as usize
datapoint_info!(
"block-commitment-cache",
(
"aggregate-commitment-ms",
aggregate_commitment_time.as_ms() as i64,
i64
)
);
subscriptions.notify_subscribers(CacheSlotInfo {
current_slot: w_block_commitment_cache.slot(),
node_root: w_block_commitment_cache.root(),
largest_confirmed_root: w_block_commitment_cache.largest_confirmed_root(),
highest_confirmed_slot: w_block_commitment_cache.highest_confirmed_slot(),
});
}
}
@@ -365,7 +432,7 @@ mod tests {
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
};
use solana_sdk::pubkey::Pubkey;
use solana_sdk::{genesis_config::GenesisConfig, pubkey::Pubkey};
use solana_stake_program::stake_state;
use solana_vote_program::vote_state::{self, VoteStateVersions};
@@ -402,7 +469,7 @@ mod tests {
block_commitment.entry(1).or_insert(cache1.clone());
block_commitment.entry(2).or_insert(cache2.clone());
let block_commitment_cache =
BlockCommitmentCache::new(block_commitment, 0, 50, bank, blockstore, 0);
BlockCommitmentCache::new(block_commitment, 0, 50, bank, blockstore, 0, 0);
assert_eq!(block_commitment_cache.get_confirmation_count(0), Some(2));
assert_eq!(block_commitment_cache.get_confirmation_count(1), Some(1));
@@ -436,6 +503,7 @@ mod tests {
bank,
blockstore,
0,
0,
);
assert!(block_commitment_cache.is_confirmed_rooted(0));
@@ -459,6 +527,114 @@ mod tests {
assert_eq!(get_largest_confirmed_root(rooted_stake, 10), 1);
}
#[test]
fn test_highest_confirmed_slot() {
let bank = Arc::new(Bank::new(&GenesisConfig::default()));
let bank_slot_5 = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 5));
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let total_stake = 50;
// Build cache with confirmation_count 2 given total_stake
let mut cache0 = BlockCommitment::default();
cache0.increase_confirmation_stake(1, 5);
cache0.increase_confirmation_stake(2, 40);
// Build cache with confirmation_count 1 given total_stake
let mut cache1 = BlockCommitment::default();
cache1.increase_confirmation_stake(1, 40);
cache1.increase_confirmation_stake(2, 5);
// Build cache with confirmation_count 0 given total_stake
let mut cache2 = BlockCommitment::default();
cache2.increase_confirmation_stake(1, 20);
cache2.increase_confirmation_stake(2, 5);
let mut block_commitment = HashMap::new();
block_commitment.entry(1).or_insert(cache0.clone()); // Slot 1, conf 2
block_commitment.entry(2).or_insert(cache1.clone()); // Slot 2, conf 1
block_commitment.entry(3).or_insert(cache2.clone()); // Slot 3, conf 0
let block_commitment_cache = BlockCommitmentCache::new(
block_commitment,
0,
total_stake,
bank_slot_5.clone(),
blockstore.clone(),
0,
0,
);
assert_eq!(block_commitment_cache.calculate_highest_confirmed_slot(), 2);
// Build map with multiple slots at conf 1
let mut block_commitment = HashMap::new();
block_commitment.entry(1).or_insert(cache1.clone()); // Slot 1, conf 1
block_commitment.entry(2).or_insert(cache1.clone()); // Slot 2, conf 1
block_commitment.entry(3).or_insert(cache2.clone()); // Slot 3, conf 0
let block_commitment_cache = BlockCommitmentCache::new(
block_commitment,
0,
total_stake,
bank_slot_5.clone(),
blockstore.clone(),
0,
0,
);
assert_eq!(block_commitment_cache.calculate_highest_confirmed_slot(), 2);
// Build map with slot gaps
let mut block_commitment = HashMap::new();
block_commitment.entry(1).or_insert(cache1.clone()); // Slot 1, conf 1
block_commitment.entry(3).or_insert(cache1.clone()); // Slot 3, conf 1
block_commitment.entry(5).or_insert(cache2.clone()); // Slot 5, conf 0
let block_commitment_cache = BlockCommitmentCache::new(
block_commitment,
0,
total_stake,
bank_slot_5.clone(),
blockstore.clone(),
0,
0,
);
assert_eq!(block_commitment_cache.calculate_highest_confirmed_slot(), 3);
// Build map with no conf 1 slots, but one higher
let mut block_commitment = HashMap::new();
block_commitment.entry(1).or_insert(cache0.clone()); // Slot 1, conf 2
block_commitment.entry(2).or_insert(cache2.clone()); // Slot 2, conf 0
block_commitment.entry(3).or_insert(cache2.clone()); // Slot 3, conf 0
let block_commitment_cache = BlockCommitmentCache::new(
block_commitment,
0,
total_stake,
bank_slot_5.clone(),
blockstore.clone(),
0,
0,
);
assert_eq!(block_commitment_cache.calculate_highest_confirmed_slot(), 1);
// Build map with no conf 1 or higher slots
let mut block_commitment = HashMap::new();
block_commitment.entry(1).or_insert(cache2.clone()); // Slot 1, conf 0
block_commitment.entry(2).or_insert(cache2.clone()); // Slot 2, conf 0
block_commitment.entry(3).or_insert(cache2.clone()); // Slot 3, conf 0
let block_commitment_cache = BlockCommitmentCache::new(
block_commitment,
0,
total_stake,
bank_slot_5.clone(),
blockstore.clone(),
0,
0,
);
assert_eq!(block_commitment_cache.calculate_highest_confirmed_slot(), 0);
}
#[test]
fn test_aggregate_commitment_for_vote_account_1() {
let ancestors = vec![3, 4, 5, 7, 9, 11];

View File

@@ -20,6 +20,7 @@ pub const CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS: usize = 500;
pub struct CrdsGossip {
pub crds: Crds,
pub id: Pubkey,
pub shred_version: u16,
pub push: CrdsGossipPush,
pub pull: CrdsGossipPull,
}
@@ -29,6 +30,7 @@ impl Default for CrdsGossip {
CrdsGossip {
crds: Crds::default(),
id: Pubkey::default(),
shred_version: 0,
push: CrdsGossipPush::default(),
pull: CrdsGossipPull::default(),
}
@@ -39,6 +41,9 @@ impl CrdsGossip {
pub fn set_self(&mut self, id: &Pubkey) {
self.id = *id;
}
pub fn set_shred_version(&mut self, shred_version: u16) {
self.shred_version = shred_version;
}
/// process a push message to the network
pub fn process_push_message(
@@ -122,6 +127,7 @@ impl CrdsGossip {
&self.crds,
stakes,
&self.id,
self.shred_version,
self.pull.pull_request_time.len(),
CRDS_GOSSIP_NUM_ACTIVE,
)
@@ -134,8 +140,14 @@ impl CrdsGossip {
stakes: &HashMap<Pubkey, u64>,
bloom_size: usize,
) -> Result<(Pubkey, Vec<CrdsFilter>, CrdsValue), CrdsGossipError> {
self.pull
.new_pull_request(&self.crds, &self.id, now, stakes, bloom_size)
self.pull.new_pull_request(
&self.crds,
&self.id,
self.shred_version,
now,
stakes,
bloom_size,
)
}
/// time when a request to `from` was initiated
@@ -161,7 +173,7 @@ impl CrdsGossip {
timeouts: &HashMap<Pubkey, u64>,
response: Vec<CrdsValue>,
now: u64,
) -> usize {
) -> (usize, usize) {
self.pull
.process_pull_response(&mut self.crds, from, timeouts, response, now)
}

View File

@@ -144,11 +144,12 @@ impl CrdsGossipPull {
&self,
crds: &Crds,
self_id: &Pubkey,
self_shred_version: u16,
now: u64,
stakes: &HashMap<Pubkey, u64>,
bloom_size: usize,
) -> Result<(Pubkey, Vec<CrdsFilter>, CrdsValue), CrdsGossipError> {
let options = self.pull_options(crds, &self_id, now, stakes);
let options = self.pull_options(crds, &self_id, self_shred_version, now, stakes);
if options.is_empty() {
return Err(CrdsGossipError::NoPeers);
}
@@ -165,13 +166,20 @@ impl CrdsGossipPull {
&self,
crds: &'a Crds,
self_id: &Pubkey,
self_shred_version: u16,
now: u64,
stakes: &HashMap<Pubkey, u64>,
) -> Vec<(f32, &'a ContactInfo)> {
crds.table
.values()
.filter_map(|v| v.value.contact_info())
.filter(|v| v.id != *self_id && ContactInfo::is_valid_address(&v.gossip))
.filter(|v| {
v.id != *self_id
&& ContactInfo::is_valid_address(&v.gossip)
&& (self_shred_version == 0
|| v.shred_version == 0
|| self_shred_version == v.shred_version)
})
.map(|item| {
let max_weight = f32::from(u16::max_value()) - 1.0;
let req_time: u64 = *self.pull_request_time.get(&item.id).unwrap_or(&0);
@@ -223,8 +231,9 @@ impl CrdsGossipPull {
timeouts: &HashMap<Pubkey, u64>,
response: Vec<CrdsValue>,
now: u64,
) -> usize {
) -> (usize, usize) {
let mut failed = 0;
let mut timeout_count = 0;
for r in response {
let owner = r.label().pubkey();
// Check if the crds value is older than the msg_timeout
@@ -244,10 +253,7 @@ impl CrdsGossipPull {
if now > r.wallclock().checked_add(timeout).unwrap_or_else(|| 0)
|| now + timeout < r.wallclock()
{
inc_new_counter_warn!(
"cluster_info-gossip_pull_response_value_timeout",
1
);
timeout_count += 1;
failed += 1;
continue;
}
@@ -256,10 +262,7 @@ impl CrdsGossipPull {
// Before discarding this value, check if a ContactInfo for the owner
// exists in the table. If it doesn't, that implies that this value can be discarded
if crds.lookup(&CrdsValueLabel::ContactInfo(owner)).is_none() {
inc_new_counter_warn!(
"cluster_info-gossip_pull_response_value_timeout",
1
);
timeout_count += 1;
failed += 1;
continue;
} else {
@@ -281,7 +284,7 @@ impl CrdsGossipPull {
});
}
crds.update_record_timestamp(from, now);
failed
(failed, timeout_count)
}
// build a set of filters of the current crds table
// num_filters - used to increase the likelyhood of a value in crds being added to some filter
@@ -402,7 +405,7 @@ mod test {
stakes.insert(id, i * 100);
}
let now = 1024;
let mut options = node.pull_options(&crds, &me.label().pubkey(), now, &stakes);
let mut options = node.pull_options(&crds, &me.label().pubkey(), 0, now, &stakes);
assert!(!options.is_empty());
options.sort_by(|(weight_l, _), (weight_r, _)| weight_r.partial_cmp(weight_l).unwrap());
// check that the highest stake holder is also the heaviest weighted.
@@ -412,6 +415,66 @@ mod test {
);
}
#[test]
fn test_no_pulls_from_different_shred_versions() {
let mut crds = Crds::default();
let stakes = HashMap::new();
let node = CrdsGossipPull::default();
let gossip = socketaddr!("127.0.0.1:1234");
let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
shred_version: 123,
gossip: gossip.clone(),
..ContactInfo::default()
}));
let spy = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
shred_version: 0,
gossip: gossip.clone(),
..ContactInfo::default()
}));
let node_123 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
shred_version: 123,
gossip: gossip.clone(),
..ContactInfo::default()
}));
let node_456 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
shred_version: 456,
gossip: gossip.clone(),
..ContactInfo::default()
}));
crds.insert(me.clone(), 0).unwrap();
crds.insert(spy.clone(), 0).unwrap();
crds.insert(node_123.clone(), 0).unwrap();
crds.insert(node_456.clone(), 0).unwrap();
// shred version 123 should ignore 456 nodes
let options = node
.pull_options(&crds, &me.label().pubkey(), 123, 0, &stakes)
.iter()
.map(|(_, c)| c.id)
.collect::<Vec<_>>();
assert_eq!(options.len(), 2);
assert!(options.contains(&spy.pubkey()));
assert!(options.contains(&node_123.pubkey()));
// spy nodes will see all
let options = node
.pull_options(&crds, &spy.label().pubkey(), 0, 0, &stakes)
.iter()
.map(|(_, c)| c.id)
.collect::<Vec<_>>();
assert_eq!(options.len(), 3);
assert!(options.contains(&me.pubkey()));
assert!(options.contains(&node_123.pubkey()));
assert!(options.contains(&node_456.pubkey()));
}
#[test]
fn test_new_pull_request() {
let mut crds = Crds::default();
@@ -422,13 +485,13 @@ mod test {
let id = entry.label().pubkey();
let node = CrdsGossipPull::default();
assert_eq!(
node.new_pull_request(&crds, &id, 0, &HashMap::new(), PACKET_DATA_SIZE),
node.new_pull_request(&crds, &id, 0, 0, &HashMap::new(), PACKET_DATA_SIZE),
Err(CrdsGossipError::NoPeers)
);
crds.insert(entry.clone(), 0).unwrap();
assert_eq!(
node.new_pull_request(&crds, &id, 0, &HashMap::new(), PACKET_DATA_SIZE),
node.new_pull_request(&crds, &id, 0, 0, &HashMap::new(), PACKET_DATA_SIZE),
Err(CrdsGossipError::NoPeers)
);
@@ -437,7 +500,7 @@ mod test {
0,
)));
crds.insert(new.clone(), 0).unwrap();
let req = node.new_pull_request(&crds, &id, 0, &HashMap::new(), PACKET_DATA_SIZE);
let req = node.new_pull_request(&crds, &id, 0, 0, &HashMap::new(), PACKET_DATA_SIZE);
let (to, _, self_info) = req.unwrap();
assert_eq!(to, new.label().pubkey());
assert_eq!(self_info, entry);
@@ -472,6 +535,7 @@ mod test {
let req = node.new_pull_request(
&crds,
&node_pubkey,
0,
u64::max_value(),
&HashMap::new(),
PACKET_DATA_SIZE,
@@ -501,6 +565,7 @@ mod test {
&node_crds,
&node_pubkey,
0,
0,
&HashMap::new(),
PACKET_DATA_SIZE,
);
@@ -573,6 +638,7 @@ mod test {
&node_crds,
&node_pubkey,
0,
0,
&HashMap::new(),
PACKET_DATA_SIZE,
);
@@ -589,13 +655,15 @@ mod test {
continue;
}
assert_eq!(rsp.len(), 1);
let failed = node.process_pull_response(
&mut node_crds,
&node_pubkey,
&node.make_timeouts_def(&node_pubkey, &HashMap::new(), 0, 1),
rsp.pop().unwrap(),
1,
);
let failed = node
.process_pull_response(
&mut node_crds,
&node_pubkey,
&node.make_timeouts_def(&node_pubkey, &HashMap::new(), 0, 1),
rsp.pop().unwrap(),
1,
)
.0;
assert_eq!(failed, 0);
assert_eq!(
node_crds
@@ -756,7 +824,8 @@ mod test {
&timeouts,
vec![peer_entry.clone()],
1,
),
)
.0,
0
);
@@ -772,7 +841,8 @@ mod test {
&timeouts,
vec![peer_entry.clone(), unstaked_peer_entry],
node.msg_timeout + 100,
),
)
.0,
2
);
@@ -785,7 +855,8 @@ mod test {
&timeouts,
vec![peer_entry.clone()],
node.msg_timeout + 1,
),
)
.0,
0
);
@@ -801,7 +872,8 @@ mod test {
&timeouts,
vec![peer_vote.clone()],
node.msg_timeout + 1,
),
)
.0,
0
);
@@ -814,7 +886,8 @@ mod test {
&timeouts,
vec![peer_vote.clone()],
node.msg_timeout + 1,
),
)
.0,
1
);
}

View File

@@ -236,13 +236,14 @@ impl CrdsGossipPush {
crds: &Crds,
stakes: &HashMap<Pubkey, u64>,
self_id: &Pubkey,
self_shred_version: u16,
network_size: usize,
ratio: usize,
) {
let need = Self::compute_need(self.num_active, self.active_set.len(), ratio);
let mut new_items = HashMap::new();
let options: Vec<_> = self.push_options(crds, &self_id, stakes);
let options: Vec<_> = self.push_options(crds, &self_id, self_shred_version, stakes);
if options.is_empty() {
return;
}
@@ -288,13 +289,20 @@ impl CrdsGossipPush {
&self,
crds: &'a Crds,
self_id: &Pubkey,
self_shred_version: u16,
stakes: &HashMap<Pubkey, u64>,
) -> Vec<(f32, &'a ContactInfo)> {
crds.table
.values()
.filter(|v| v.value.contact_info().is_some())
.map(|v| (v.value.contact_info().unwrap(), v))
.filter(|(info, _)| info.id != *self_id && ContactInfo::is_valid_address(&info.gossip))
.filter(|(info, _)| {
info.id != *self_id
&& ContactInfo::is_valid_address(&info.gossip)
&& (self_shred_version == 0
|| info.shred_version == 0
|| self_shred_version == info.shred_version)
})
.map(|(info, value)| {
let max_weight = f32::from(u16::max_value()) - 1.0;
let last_updated: u64 = value.local_timestamp;
@@ -510,7 +518,7 @@ mod test {
)));
assert_eq!(crds.insert(value1.clone(), 0), Ok(None));
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 0, 1, 1);
assert!(push.active_set.get(&value1.label().pubkey()).is_some());
let value2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
@@ -520,7 +528,7 @@ mod test {
assert!(push.active_set.get(&value2.label().pubkey()).is_none());
assert_eq!(crds.insert(value2.clone(), 0), Ok(None));
for _ in 0..30 {
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 0, 1, 1);
if push.active_set.get(&value2.label().pubkey()).is_some() {
break;
}
@@ -533,7 +541,7 @@ mod test {
));
assert_eq!(crds.insert(value2.clone(), 0), Ok(None));
}
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 0, 1, 1);
assert_eq!(push.active_set.len(), push.num_active);
}
#[test]
@@ -551,7 +559,7 @@ mod test {
crds.insert(peer.clone(), time).unwrap();
stakes.insert(id, i * 100);
}
let mut options = push.push_options(&crds, &Pubkey::default(), &stakes);
let mut options = push.push_options(&crds, &Pubkey::default(), 0, &stakes);
assert!(!options.is_empty());
options.sort_by(|(weight_l, _), (weight_r, _)| weight_r.partial_cmp(weight_l).unwrap());
// check that the highest stake holder is also the heaviest weighted.
@@ -560,6 +568,66 @@ mod test {
10_000_u64
);
}
#[test]
fn test_no_pushes_to_from_different_shred_versions() {
let mut crds = Crds::default();
let stakes = HashMap::new();
let node = CrdsGossipPush::default();
let gossip = socketaddr!("127.0.0.1:1234");
let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
shred_version: 123,
gossip: gossip.clone(),
..ContactInfo::default()
}));
let spy = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
shred_version: 0,
gossip: gossip.clone(),
..ContactInfo::default()
}));
let node_123 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
shred_version: 123,
gossip: gossip.clone(),
..ContactInfo::default()
}));
let node_456 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
shred_version: 456,
gossip: gossip.clone(),
..ContactInfo::default()
}));
crds.insert(me.clone(), 0).unwrap();
crds.insert(spy.clone(), 0).unwrap();
crds.insert(node_123.clone(), 0).unwrap();
crds.insert(node_456.clone(), 0).unwrap();
// shred version 123 should ignore 456 nodes
let options = node
.push_options(&crds, &me.label().pubkey(), 123, &stakes)
.iter()
.map(|(_, c)| c.id)
.collect::<Vec<_>>();
assert_eq!(options.len(), 2);
assert!(options.contains(&spy.pubkey()));
assert!(options.contains(&node_123.pubkey()));
// spy nodes will see all
let options = node
.push_options(&crds, &spy.label().pubkey(), 0, &stakes)
.iter()
.map(|(_, c)| c.id)
.collect::<Vec<_>>();
assert_eq!(options.len(), 3);
assert!(options.contains(&me.pubkey()));
assert!(options.contains(&node_123.pubkey()));
assert!(options.contains(&node_456.pubkey()));
}
#[test]
fn test_new_push_messages() {
let mut crds = Crds::default();
@@ -569,7 +637,7 @@ mod test {
0,
)));
assert_eq!(crds.insert(peer.clone(), 0), Ok(None));
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 0, 1, 1);
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
@@ -606,7 +674,7 @@ mod test {
push.process_push_message(&mut crds, &Pubkey::default(), peer_3.clone(), 0),
Ok(None)
);
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 0, 1, 1);
// push 3's contact info to 1 and 2 and 3
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
@@ -628,7 +696,7 @@ mod test {
0,
)));
assert_eq!(crds.insert(peer.clone(), 0), Ok(None));
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 0, 1, 1);
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
@@ -651,7 +719,7 @@ mod test {
0,
)));
assert_eq!(crds.insert(peer.clone(), 0), Ok(None));
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 0, 1, 1);
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
ci.wallclock = 1;

View File

@@ -75,6 +75,7 @@ pub enum CrdsData {
SnapshotHashes(SnapshotHash),
AccountsHashes(SnapshotHash),
EpochSlots(EpochSlotsIndex, EpochSlots),
Version(Version),
}
impl Sanitize for CrdsData {
@@ -101,6 +102,7 @@ impl Sanitize for CrdsData {
}
val.sanitize()
}
CrdsData::Version(version) => version.sanitize(),
}
}
}
@@ -206,6 +208,33 @@ impl Vote {
}
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct Version {
pub from: Pubkey,
pub wallclock: u64,
pub version: solana_version::Version,
}
impl Sanitize for Version {
fn sanitize(&self) -> Result<(), SanitizeError> {
if self.wallclock >= MAX_WALLCLOCK {
return Err(SanitizeError::ValueOutOfBounds);
}
self.from.sanitize()?;
self.version.sanitize()
}
}
impl Version {
pub fn new(from: Pubkey) -> Self {
Self {
from,
wallclock: timestamp(),
version: solana_version::Version::default(),
}
}
}
/// Type of the replicated value
/// These are labels for values in a record that is associated with `Pubkey`
#[derive(PartialEq, Hash, Eq, Clone, Debug)]
@@ -216,6 +245,7 @@ pub enum CrdsValueLabel {
SnapshotHashes(Pubkey),
EpochSlots(EpochSlotsIndex, Pubkey),
AccountsHashes(Pubkey),
Version(Pubkey),
}
impl fmt::Display for CrdsValueLabel {
@@ -227,6 +257,7 @@ impl fmt::Display for CrdsValueLabel {
CrdsValueLabel::SnapshotHashes(_) => write!(f, "SnapshotHash({})", self.pubkey()),
CrdsValueLabel::EpochSlots(ix, _) => write!(f, "EpochSlots({}, {})", ix, self.pubkey()),
CrdsValueLabel::AccountsHashes(_) => write!(f, "AccountsHashes({})", self.pubkey()),
CrdsValueLabel::Version(_) => write!(f, "Version({})", self.pubkey()),
}
}
}
@@ -240,6 +271,7 @@ impl CrdsValueLabel {
CrdsValueLabel::SnapshotHashes(p) => *p,
CrdsValueLabel::EpochSlots(_, p) => *p,
CrdsValueLabel::AccountsHashes(p) => *p,
CrdsValueLabel::Version(p) => *p,
}
}
}
@@ -257,7 +289,7 @@ impl CrdsValue {
value.sign(keypair);
value
}
/// Totally unsecure unverfiable wallclock of the node that generated this message
/// Totally unsecure unverifiable wallclock of the node that generated this message
/// Latest wallclock is always picked.
/// This is used to time out push messages.
pub fn wallclock(&self) -> u64 {
@@ -268,6 +300,7 @@ impl CrdsValue {
CrdsData::SnapshotHashes(hash) => hash.wallclock,
CrdsData::AccountsHashes(hash) => hash.wallclock,
CrdsData::EpochSlots(_, p) => p.wallclock,
CrdsData::Version(version) => version.wallclock,
}
}
pub fn pubkey(&self) -> Pubkey {
@@ -278,6 +311,7 @@ impl CrdsValue {
CrdsData::SnapshotHashes(hash) => hash.from,
CrdsData::AccountsHashes(hash) => hash.from,
CrdsData::EpochSlots(_, p) => p.from,
CrdsData::Version(version) => version.from,
}
}
pub fn label(&self) -> CrdsValueLabel {
@@ -288,6 +322,7 @@ impl CrdsValue {
CrdsData::SnapshotHashes(_) => CrdsValueLabel::SnapshotHashes(self.pubkey()),
CrdsData::AccountsHashes(_) => CrdsValueLabel::AccountsHashes(self.pubkey()),
CrdsData::EpochSlots(ix, _) => CrdsValueLabel::EpochSlots(*ix, self.pubkey()),
CrdsData::Version(_) => CrdsValueLabel::Version(self.pubkey()),
}
}
pub fn contact_info(&self) -> Option<&ContactInfo> {
@@ -338,6 +373,13 @@ impl CrdsValue {
}
}
pub fn version(&self) -> Option<&Version> {
match &self.data {
CrdsData::Version(version) => Some(version),
_ => None,
}
}
/// Return all the possible labels for a record identified by Pubkey.
pub fn record_labels(key: &Pubkey) -> Vec<CrdsValueLabel> {
let mut labels = vec![
@@ -345,6 +387,7 @@ impl CrdsValue {
CrdsValueLabel::LowestSlot(*key),
CrdsValueLabel::SnapshotHashes(*key),
CrdsValueLabel::AccountsHashes(*key),
CrdsValueLabel::Version(*key),
];
labels.extend((0..MAX_VOTES).map(|ix| CrdsValueLabel::Vote(ix, *key)));
labels.extend((0..MAX_EPOCH_SLOTS).map(|ix| CrdsValueLabel::EpochSlots(ix, *key)));
@@ -395,7 +438,7 @@ mod test {
#[test]
fn test_labels() {
let mut hits = [false; 4 + MAX_VOTES as usize + MAX_EPOCH_SLOTS as usize];
let mut hits = [false; 5 + MAX_VOTES as usize + MAX_EPOCH_SLOTS as usize];
// this method should cover all the possible labels
for v in &CrdsValue::record_labels(&Pubkey::default()) {
match v {
@@ -403,9 +446,10 @@ mod test {
CrdsValueLabel::LowestSlot(_) => hits[1] = true,
CrdsValueLabel::SnapshotHashes(_) => hits[2] = true,
CrdsValueLabel::AccountsHashes(_) => hits[3] = true,
CrdsValueLabel::Vote(ix, _) => hits[*ix as usize + 4] = true,
CrdsValueLabel::Version(_) => hits[4] = true,
CrdsValueLabel::Vote(ix, _) => hits[*ix as usize + 5] = true,
CrdsValueLabel::EpochSlots(ix, _) => {
hits[*ix as usize + MAX_VOTES as usize + 4] = true
hits[*ix as usize + MAX_VOTES as usize + 5] = true
}
}
}

View File

@@ -75,6 +75,7 @@ pub fn discover_cluster(
None,
None,
None,
0,
)
}
@@ -85,9 +86,11 @@ pub fn discover(
find_node_by_pubkey: Option<Pubkey>,
find_node_by_gossip_addr: Option<&SocketAddr>,
my_gossip_addr: Option<&SocketAddr>,
my_shred_version: u16,
) -> std::io::Result<(Vec<ContactInfo>, Vec<ContactInfo>)> {
let exit = Arc::new(AtomicBool::new(false));
let (gossip_service, ip_echo, spy_ref) = make_gossip_node(entrypoint, &exit, my_gossip_addr);
let (gossip_service, ip_echo, spy_ref) =
make_gossip_node(entrypoint, &exit, my_gossip_addr, my_shred_version);
let id = spy_ref.id();
info!("Entrypoint: {:?}", entrypoint);
@@ -245,12 +248,13 @@ fn make_gossip_node(
entrypoint: Option<&SocketAddr>,
exit: &Arc<AtomicBool>,
gossip_addr: Option<&SocketAddr>,
shred_version: u16,
) -> (GossipService, Option<TcpListener>, Arc<ClusterInfo>) {
let keypair = Arc::new(Keypair::new());
let (node, gossip_socket, ip_echo) = if let Some(gossip_addr) = gossip_addr {
ClusterInfo::gossip_node(&keypair.pubkey(), gossip_addr)
ClusterInfo::gossip_node(&keypair.pubkey(), gossip_addr, shred_version)
} else {
ClusterInfo::spy_node(&keypair.pubkey())
ClusterInfo::spy_node(&keypair.pubkey(), shred_version)
};
let cluster_info = ClusterInfo::new(node, keypair);
if let Some(entrypoint) = entrypoint {

View File

@@ -3,7 +3,6 @@
use solana_ledger::blockstore::Blockstore;
use solana_ledger::blockstore_db::Result as BlockstoreResult;
use solana_measure::measure::Measure;
use solana_metrics::datapoint_debug;
use solana_sdk::clock::Slot;
use std::string::ToString;
use std::sync::atomic::{AtomicBool, Ordering};
@@ -30,9 +29,8 @@ pub const DEFAULT_MIN_MAX_LEDGER_SHREDS: u64 = 50_000_000;
// and starve other blockstore users.
pub const DEFAULT_PURGE_SLOT_INTERVAL: u64 = 512;
// Remove a limited number of slots at a time, so the operation
// does not take too long and block other blockstore users.
pub const DEFAULT_PURGE_BATCH_SIZE: u64 = 256;
// Delay between purges to cooperate with other blockstore users
pub const DEFAULT_DELAY_BETWEEN_PURGES: Duration = Duration::from_millis(500);
pub struct LedgerCleanupService {
t_cleanup: JoinHandle<()>,
@@ -63,6 +61,7 @@ impl LedgerCleanupService {
max_ledger_slots,
&mut last_purge_slot,
DEFAULT_PURGE_SLOT_INTERVAL,
Some(DEFAULT_DELAY_BETWEEN_PURGES),
) {
match e {
RecvTimeoutError::Disconnected => break,
@@ -78,8 +77,8 @@ impl LedgerCleanupService {
blockstore: &Arc<Blockstore>,
root: Slot,
max_ledger_shreds: u64,
) -> (u64, Slot, Slot) {
let mut shreds = Vec::new();
) -> (bool, Slot, Slot, u64) {
let mut total_slots = Vec::new();
let mut iterate_time = Measure::start("iterate_time");
let mut total_shreds = 0;
let mut first_slot = 0;
@@ -90,33 +89,43 @@ impl LedgerCleanupService {
}
// Not exact since non-full slots will have holes
total_shreds += meta.received;
shreds.push((slot, meta.received));
total_slots.push((slot, meta.received));
if slot > root {
break;
}
}
iterate_time.stop();
info!(
"checking for ledger purge: max_shreds: {} slots: {} total_shreds: {} {}",
max_ledger_shreds,
shreds.len(),
"first_slot={} total_slots={} total_shreds={} max_ledger_shreds={}, {}",
first_slot,
total_slots.len(),
total_shreds,
max_ledger_shreds,
iterate_time
);
if (total_shreds as u64) < max_ledger_shreds {
return (0, 0, 0);
return (false, 0, 0, total_shreds);
}
let mut cur_shreds = 0;
let mut lowest_slot_to_clean = shreds[0].0;
for (slot, num_shreds) in shreds.iter().rev() {
cur_shreds += *num_shreds as u64;
if cur_shreds > max_ledger_shreds {
lowest_slot_to_clean = *slot;
let mut num_shreds_to_clean = 0;
let mut lowest_cleanup_slot = total_slots[0].0;
for (slot, num_shreds) in total_slots.iter().rev() {
num_shreds_to_clean += *num_shreds as u64;
if num_shreds_to_clean > max_ledger_shreds {
lowest_cleanup_slot = *slot;
break;
}
}
(cur_shreds, lowest_slot_to_clean, first_slot)
(true, lowest_cleanup_slot, first_slot, total_shreds)
}
fn receive_new_roots(new_root_receiver: &Receiver<Slot>) -> Result<Slot, RecvTimeoutError> {
let mut root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
// Get the newest root
while let Ok(new_root) = new_root_receiver.try_recv() {
root = new_root;
}
Ok(root)
}
fn cleanup_ledger(
@@ -125,68 +134,78 @@ impl LedgerCleanupService {
max_ledger_shreds: u64,
last_purge_slot: &mut u64,
purge_interval: u64,
delay_between_purges: Option<Duration>,
) -> Result<(), RecvTimeoutError> {
let mut root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
// Get the newest root
while let Ok(new_root) = new_root_receiver.try_recv() {
root = new_root;
let root = Self::receive_new_roots(new_root_receiver)?;
if root - *last_purge_slot <= purge_interval {
return Ok(());
}
if root - *last_purge_slot > purge_interval {
let disk_utilization_pre = blockstore.storage_size();
let disk_utilization_pre = blockstore.storage_size();
info!(
"purge: last_root={}, last_purge_slot={}, purge_interval={}, disk_utilization={:?}",
root, last_purge_slot, purge_interval, disk_utilization_pre
);
*last_purge_slot = root;
let (slots_to_clean, lowest_cleanup_slot, first_slot, total_shreds) =
Self::find_slots_to_clean(&blockstore, root, max_ledger_shreds);
if slots_to_clean {
info!(
"purge: new root: {} last_purge: {} purge_interval: {} disk: {:?}",
root, last_purge_slot, purge_interval, disk_utilization_pre
"purging data from slots {} to {}",
first_slot, lowest_cleanup_slot
);
*last_purge_slot = root;
let (num_shreds_to_clean, lowest_slot_to_clean, mut first_slot) =
Self::find_slots_to_clean(blockstore, root, max_ledger_shreds);
if num_shreds_to_clean > 0 {
debug!(
"cleaning up to: {} shreds: {} first: {}",
lowest_slot_to_clean, num_shreds_to_clean, first_slot
);
loop {
let current_lowest =
std::cmp::min(lowest_slot_to_clean, first_slot + DEFAULT_PURGE_BATCH_SIZE);
let purge_complete = Arc::new(AtomicBool::new(false));
let blockstore = blockstore.clone();
let purge_complete1 = purge_complete.clone();
let _t_purge = Builder::new()
.name("solana-ledger-purge".to_string())
.spawn(move || {
let mut slot_update_time = Measure::start("slot_update");
*blockstore.lowest_cleanup_slot.write().unwrap() = current_lowest;
*blockstore.lowest_cleanup_slot.write().unwrap() = lowest_cleanup_slot;
slot_update_time.stop();
let mut clean_time = Measure::start("ledger_clean");
blockstore.purge_slots(first_slot, Some(current_lowest));
clean_time.stop();
debug!(
"ledger purge {} -> {}: {} {}",
first_slot, current_lowest, slot_update_time, clean_time
let mut purge_time = Measure::start("purge_slots_with_delay");
blockstore.purge_slots_with_delay(
first_slot,
lowest_cleanup_slot,
delay_between_purges,
);
first_slot += DEFAULT_PURGE_BATCH_SIZE;
if current_lowest == lowest_slot_to_clean {
break;
}
thread::sleep(Duration::from_millis(500));
purge_time.stop();
info!("{}", purge_time);
purge_complete1.store(true, Ordering::Relaxed);
})
.unwrap();
// Keep pulling roots off `new_root_receiver` while purging to avoid channel buildup
while !purge_complete.load(Ordering::Relaxed) {
if let Err(err) = Self::receive_new_roots(new_root_receiver) {
debug!("receive_new_roots: {}", err);
}
thread::sleep(Duration::from_secs(1));
}
let disk_utilization_post = blockstore.storage_size();
Self::report_disk_metrics(disk_utilization_pre, disk_utilization_post);
}
let disk_utilization_post = blockstore.storage_size();
Self::report_disk_metrics(disk_utilization_pre, disk_utilization_post, total_shreds);
Ok(())
}
fn report_disk_metrics(pre: BlockstoreResult<u64>, post: BlockstoreResult<u64>) {
fn report_disk_metrics(
pre: BlockstoreResult<u64>,
post: BlockstoreResult<u64>,
total_shreds: u64,
) {
if let (Ok(pre), Ok(post)) = (pre, post) {
datapoint_debug!(
datapoint_info!(
"ledger_disk_utilization",
("disk_utilization_pre", pre as i64, i64),
("disk_utilization_post", post as i64, i64),
("disk_utilization_delta", (pre as i64 - post as i64), i64)
("disk_utilization_delta", (pre as i64 - post as i64), i64),
("total_shreds", total_shreds, i64),
);
}
}
@@ -215,8 +234,15 @@ mod tests {
//send a signal to kill all but 5 shreds, which will be in the newest slots
let mut last_purge_slot = 0;
sender.send(50).unwrap();
LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 5, &mut last_purge_slot, 10)
.unwrap();
LedgerCleanupService::cleanup_ledger(
&receiver,
&blockstore,
5,
&mut last_purge_slot,
10,
None,
)
.unwrap();
//check that 0-40 don't exist
blockstore
@@ -269,6 +295,7 @@ mod tests {
initial_slots,
&mut last_purge_slot,
10,
None,
)
.unwrap();
time.stop();
@@ -311,6 +338,7 @@ mod tests {
max_ledger_shreds,
&mut next_purge_batch,
10,
None,
)
.unwrap();

View File

@@ -30,15 +30,18 @@ pub mod gen_keys;
pub mod gossip_service;
pub mod ledger_cleanup_service;
pub mod local_vote_signer_service;
pub mod non_circulating_supply;
pub mod poh_recorder;
pub mod poh_service;
pub mod progress_map;
pub mod repair_response;
pub mod repair_service;
pub mod replay_stage;
mod result;
pub mod retransmit_stage;
pub mod rewards_recorder_service;
pub mod rpc;
pub mod rpc_error;
pub mod rpc_pubsub;
pub mod rpc_pubsub_service;
pub mod rpc_service;

View File

@@ -0,0 +1,193 @@
use solana_runtime::bank::Bank;
use solana_sdk::pubkey::Pubkey;
use solana_stake_program::stake_state::StakeState;
use std::{collections::HashSet, sync::Arc};
pub struct NonCirculatingSupply {
pub lamports: u64,
pub accounts: Vec<Pubkey>,
}
pub fn calculate_non_circulating_supply(bank: &Arc<Bank>) -> NonCirculatingSupply {
debug!("Updating Bank supply, epoch: {}", bank.epoch());
let mut non_circulating_accounts_set: HashSet<Pubkey> = HashSet::new();
for key in non_circulating_accounts() {
non_circulating_accounts_set.insert(key);
}
let withdraw_authority_list = withdraw_authority();
let clock = bank.clock();
let stake_accounts = bank.get_program_accounts(Some(&solana_stake_program::id()));
for (pubkey, account) in stake_accounts.iter() {
let stake_account = StakeState::from(&account).unwrap_or_default();
match stake_account {
StakeState::Initialized(meta) => {
if meta.lockup.is_in_force(&clock, &HashSet::default())
|| withdraw_authority_list.contains(&meta.authorized.withdrawer)
{
non_circulating_accounts_set.insert(*pubkey);
}
}
StakeState::Stake(meta, _stake) => {
if meta.lockup.is_in_force(&clock, &HashSet::default())
|| withdraw_authority_list.contains(&meta.authorized.withdrawer)
{
non_circulating_accounts_set.insert(*pubkey);
}
}
_ => {}
}
}
let lamports = non_circulating_accounts_set
.iter()
.fold(0, |acc, pubkey| acc + bank.get_balance(&pubkey));
NonCirculatingSupply {
lamports,
accounts: non_circulating_accounts_set.into_iter().collect(),
}
}
// Mainnet-beta accounts that should be considered non-circulating
solana_sdk::pubkeys!(
non_circulating_accounts,
[
"9huDUZfxoJ7wGMTffUE7vh1xePqef7gyrLJu9NApncqA",
"GK2zqSsXLA2rwVZk347RYhh6jJpRsCA69FjLW93ZGi3B",
"HCV5dGFJXRrJ3jhDYA4DCeb9TEDTwGGYXtT3wHksu2Zr",
"25odAafVXnd63L6Hq5Cx6xGmhKqkhE2y6UrLVuqUfWZj",
"14FUT96s9swbmH7ZjpDvfEDywnAYy9zaNhv4xvezySGu",
"HbZ5FfmKWNHC7uwk6TF1hVi6TCs7dtYfdjEcuPGgzFAg",
"C7C8odR8oashR5Feyrq2tJKaXL18id1dSj2zbkDGL2C2",
"Eyr9P5XsjK2NUKNCnfu39eqpGoiLFgVAv1LSQgMZCwiQ",
"DE1bawNcRJB9rVm3buyMVfr8mBEoyyu73NBovf2oXJsJ",
"CakcnaRDHka2gXyfbEd2d3xsvkJkqsLw2akB3zsN1D2S",
"7Np41oeYqPefeNQEHSv1UDhYrehxin3NStELsSKCT4K2",
"GdnSyH3YtwcxFvQrVVJMm1JhTS4QVX7MFsX56uJLUfiZ",
"Mc5XB47H3DKJHym5RLa9mPzWv5snERsF3KNv5AauXK8",
"7cvkjYAkUYs4W8XcXsca7cBrEGFeSUjeZmKoNBvEwyri",
"AG3m2bAibcY8raMt4oXEGqRHwX4FWKPPJVjZxn1LySDX",
"5XdtyEDREHJXXW1CTtCsVjJRjBapAwK78ZquzvnNVRrV",
"6yKHERk8rsbmJxvMpPuwPs1ct3hRiP7xaJF2tvnGU6nK",
"CHmdL15akDcJgBkY6BP3hzs98Dqr6wbdDC5p8odvtSbq",
"FR84wZQy3Y3j2gWz6pgETUiUoJtreMEuWfbg6573UCj9",
"5q54XjQ7vDx4y6KphPeE97LUNiYGtP55spjvXAWPGBuf",
"3o6xgkJ9sTmDeQWyfj3sxwon18fXJB9PV5LDc8sfgR4a",
]
);
// Withdraw authority for autostaked accounts on mainnet-beta
solana_sdk::pubkeys!(
withdraw_authority,
[
"8CUUMKYNGxdgYio5CLHRHyzMEhhVRMcqefgE6dLqnVRK",
"3FFaheyqtyAXZSYxDzsr5CVKvJuvZD1WE1VEsBtDbRqB",
]
);
#[cfg(test)]
mod tests {
use super::*;
use solana_sdk::{
account::Account, epoch_schedule::EpochSchedule, genesis_config::GenesisConfig,
};
use solana_stake_program::stake_state::{Authorized, Lockup, Meta, StakeState};
use std::{collections::BTreeMap, sync::Arc};
fn new_from_parent(parent: &Arc<Bank>) -> Bank {
Bank::new_from_parent(parent, &Pubkey::default(), parent.slot() + 1)
}
#[test]
fn test_calculate_non_circulating_supply() {
let mut accounts: BTreeMap<Pubkey, Account> = BTreeMap::new();
let balance = 10;
let num_genesis_accounts = 10;
for _ in 0..num_genesis_accounts {
accounts.insert(
Pubkey::new_rand(),
Account::new(balance, 0, &Pubkey::default()),
);
}
let non_circulating_accounts = non_circulating_accounts();
let num_non_circulating_accounts = non_circulating_accounts.len() as u64;
for key in non_circulating_accounts.clone() {
accounts.insert(key, Account::new(balance, 0, &Pubkey::default()));
}
let num_stake_accounts = 3;
for _ in 0..num_stake_accounts {
let pubkey = Pubkey::new_rand();
let meta = Meta {
authorized: Authorized::auto(&pubkey),
lockup: Lockup {
epoch: 1,
..Lockup::default()
},
..Meta::default()
};
let stake_account = Account::new_data_with_space(
balance,
&StakeState::Initialized(meta),
std::mem::size_of::<StakeState>(),
&solana_stake_program::id(),
)
.unwrap();
accounts.insert(pubkey, stake_account);
}
let slots_per_epoch = 32;
let genesis_config = GenesisConfig {
accounts,
epoch_schedule: EpochSchedule::new(slots_per_epoch),
..GenesisConfig::default()
};
let mut bank = Arc::new(Bank::new(&genesis_config));
assert_eq!(
bank.capitalization(),
(num_genesis_accounts + num_non_circulating_accounts + num_stake_accounts) * balance
);
let non_circulating_supply = calculate_non_circulating_supply(&bank);
assert_eq!(
non_circulating_supply.lamports,
(num_non_circulating_accounts + num_stake_accounts) * balance
);
assert_eq!(
non_circulating_supply.accounts.len(),
num_non_circulating_accounts as usize + num_stake_accounts as usize
);
bank = Arc::new(new_from_parent(&bank));
let new_balance = 11;
for key in non_circulating_accounts {
bank.store_account(&key, &Account::new(new_balance, 0, &Pubkey::default()));
}
let non_circulating_supply = calculate_non_circulating_supply(&bank);
assert_eq!(
non_circulating_supply.lamports,
(num_non_circulating_accounts * new_balance) + (num_stake_accounts * balance)
);
assert_eq!(
non_circulating_supply.accounts.len(),
num_non_circulating_accounts as usize + num_stake_accounts as usize
);
// Advance bank an epoch, which should unlock stakes
for _ in 0..slots_per_epoch {
bank = Arc::new(new_from_parent(&bank));
}
assert_eq!(bank.epoch(), 1);
let non_circulating_supply = calculate_non_circulating_supply(&bank);
assert_eq!(
non_circulating_supply.lamports,
num_non_circulating_accounts * new_balance
);
assert_eq!(
non_circulating_supply.accounts.len(),
num_non_circulating_accounts as usize
);
}
}

129
core/src/repair_response.rs Normal file
View File

@@ -0,0 +1,129 @@
use solana_ledger::{
blockstore::Blockstore,
shred::{Nonce, Shred, SIZE_OF_NONCE},
};
use solana_perf::packet::limited_deserialize;
use solana_sdk::{clock::Slot, packet::Packet};
use std::{io, net::SocketAddr};
pub fn repair_response_packet(
blockstore: &Blockstore,
slot: Slot,
shred_index: u64,
dest: &SocketAddr,
nonce: Option<Nonce>,
) -> Option<Packet> {
if Shred::is_nonce_unlocked(slot) && nonce.is_none()
|| !Shred::is_nonce_unlocked(slot) && nonce.is_some()
{
return None;
}
let shred = blockstore
.get_data_shred(slot, shred_index)
.expect("Blockstore could not get data shred");
shred.map(|shred| repair_response_packet_from_shred(slot, shred, dest, nonce))
}
pub fn repair_response_packet_from_shred(
slot: Slot,
shred: Vec<u8>,
dest: &SocketAddr,
nonce: Option<Nonce>,
) -> Packet {
let size_of_nonce = {
if Shred::is_nonce_unlocked(slot) {
assert!(nonce.is_some());
SIZE_OF_NONCE
} else {
assert!(nonce.is_none());
0
}
};
let mut packet = Packet::default();
packet.meta.size = shred.len() + size_of_nonce;
packet.meta.set_addr(dest);
packet.data[..shred.len()].copy_from_slice(&shred);
let mut wr = io::Cursor::new(&mut packet.data[shred.len()..]);
if let Some(nonce) = nonce {
bincode::serialize_into(&mut wr, &nonce).expect("Buffer not large enough to fit nonce");
}
packet
}
pub fn nonce(buf: &[u8]) -> Option<Nonce> {
if buf.len() < SIZE_OF_NONCE {
None
} else {
limited_deserialize(&buf[buf.len() - SIZE_OF_NONCE..]).ok()
}
}
#[cfg(test)]
mod test {
use super::*;
use solana_ledger::{
shred::{Shred, Shredder, UNLOCK_NONCE_SLOT},
sigverify_shreds::verify_shred_cpu,
};
use solana_sdk::signature::{Keypair, Signer};
use std::{
collections::HashMap,
net::{IpAddr, Ipv4Addr},
};
fn run_test_sigverify_shred_cpu_repair(slot: Slot) {
solana_logger::setup();
let mut shred = Shred::new_from_data(
slot,
0xc0de,
0xdead,
Some(&[1, 2, 3, 4]),
true,
true,
0,
0,
0xc0de,
);
assert_eq!(shred.slot(), slot);
let keypair = Keypair::new();
Shredder::sign_shred(&keypair, &mut shred);
trace!("signature {}", shred.common_header.signature);
let nonce = if Shred::is_nonce_unlocked(slot) {
Some(9)
} else {
None
};
let mut packet = repair_response_packet_from_shred(
slot,
shred.payload,
&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080),
nonce,
);
packet.meta.repair = true;
let leader_slots = [(slot, keypair.pubkey().to_bytes())]
.iter()
.cloned()
.collect();
let rv = verify_shred_cpu(&packet, &leader_slots);
assert_eq!(rv, Some(1));
let wrong_keypair = Keypair::new();
let leader_slots = [(slot, wrong_keypair.pubkey().to_bytes())]
.iter()
.cloned()
.collect();
let rv = verify_shred_cpu(&packet, &leader_slots);
assert_eq!(rv, Some(0));
let leader_slots = HashMap::new();
let rv = verify_shred_cpu(&packet, &leader_slots);
assert_eq!(rv, None);
}
#[test]
fn test_sigverify_shred_cpu_repair() {
run_test_sigverify_shred_cpu_repair(UNLOCK_NONCE_SLOT);
run_test_sigverify_shred_cpu_repair(UNLOCK_NONCE_SLOT + 1);
}
}

View File

@@ -14,7 +14,6 @@ use solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey};
use std::{
collections::HashMap,
iter::Iterator,
net::SocketAddr,
net::UdpSocket,
sync::atomic::{AtomicBool, Ordering},
sync::{Arc, RwLock},
@@ -92,7 +91,7 @@ impl RepairService {
&blockstore,
&exit,
&repair_socket,
&cluster_info,
cluster_info,
repair_strategy,
&cluster_slots,
)
@@ -106,14 +105,14 @@ impl RepairService {
blockstore: &Blockstore,
exit: &AtomicBool,
repair_socket: &UdpSocket,
cluster_info: &Arc<ClusterInfo>,
cluster_info: Arc<ClusterInfo>,
repair_strategy: RepairStrategy,
cluster_slots: &Arc<ClusterSlots>,
) {
let serve_repair = ServeRepair::new(cluster_info.clone());
let id = cluster_info.id();
if let RepairStrategy::RepairAll { .. } = repair_strategy {
Self::initialize_lowest_slot(id, blockstore, cluster_info);
Self::initialize_lowest_slot(id, blockstore, &cluster_info);
}
let mut repair_stats = RepairStats::default();
let mut last_stats = Instant::now();
@@ -122,7 +121,7 @@ impl RepairService {
..
} = repair_strategy
{
Self::initialize_epoch_slots(blockstore, cluster_info, completed_slots_receiver);
Self::initialize_epoch_slots(blockstore, &cluster_info, completed_slots_receiver);
}
loop {
if exit.load(Ordering::Relaxed) {
@@ -149,7 +148,7 @@ impl RepairService {
let lowest_slot = blockstore.lowest_slot();
Self::update_lowest_slot(&id, lowest_slot, &cluster_info);
Self::update_completed_slots(completed_slots_receiver, &cluster_info);
cluster_slots.update(new_root, cluster_info, bank_forks);
cluster_slots.update(new_root, &cluster_info, bank_forks);
Self::generate_repairs(blockstore, new_root, MAX_REPAIR_LENGTH)
}
}
@@ -157,27 +156,19 @@ impl RepairService {
if let Ok(repairs) = repairs {
let mut cache = HashMap::new();
let reqs: Vec<((SocketAddr, Vec<u8>), RepairType)> = repairs
.into_iter()
.filter_map(|repair_request| {
serve_repair
.repair_request(
&cluster_slots,
&repair_request,
&mut cache,
&mut repair_stats,
)
.map(|result| (result, repair_request))
.ok()
})
.collect();
for ((to, req), _) in reqs {
repair_socket.send_to(&req, to).unwrap_or_else(|e| {
info!("{} repair req send_to({}) error {:?}", id, to, e);
0
});
}
repairs.into_iter().for_each(|repair_request| {
if let Ok((to, req)) = serve_repair.repair_request(
&cluster_slots,
repair_request,
&mut cache,
&mut repair_stats,
) {
repair_socket.send_to(&req, to).unwrap_or_else(|e| {
info!("{} repair req send_to({}) error {:?}", id, to, e);
0
});
}
});
}
if last_stats.elapsed().as_secs() > 1 {
let repair_total = repair_stats.shred.count
@@ -504,7 +495,7 @@ mod test {
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let slots: Vec<u64> = vec![1, 3, 5, 7, 8];
let num_entries_per_slot = max_ticks_per_n_shreds(1) + 1;
let num_entries_per_slot = max_ticks_per_n_shreds(1, None) + 1;
let shreds = make_chaining_slot_entries(&slots, num_entries_per_slot);
for (mut slot_shreds, _) in shreds.into_iter() {

View File

@@ -139,8 +139,11 @@ impl ReplayStage {
let mut tower = Tower::new(&my_pubkey, &vote_account, &bank_forks.read().unwrap());
// Start the replay stage loop
let (lockouts_sender, commitment_service) =
AggregateCommitmentService::new(&exit, block_commitment_cache.clone());
let (lockouts_sender, commitment_service) = AggregateCommitmentService::new(
&exit,
block_commitment_cache.clone(),
subscriptions.clone(),
);
#[allow(clippy::cognitive_complexity)]
let t_replay = Builder::new()
@@ -297,10 +300,6 @@ impl ReplayStage {
// Vote on a fork
if let Some(ref vote_bank) = vote_bank {
subscriptions.notify_subscribers(
block_commitment_cache.read().unwrap().slot(),
&bank_forks,
);
if let Some(votable_leader) =
leader_schedule_cache.slot_leader_at(vote_bank.slot(), Some(vote_bank))
{
@@ -1994,12 +1993,6 @@ pub(crate) mod tests {
);
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank0));
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
blockstore.clone(),
))),
));
let mut bank_forks = BankForks::new(0, bank0);
// Insert a non-root bank so that the propagation logic will update this
@@ -2023,7 +2016,14 @@ pub(crate) mod tests {
assert!(progress.get_propagated_stats(1).unwrap().is_leader_slot);
bank1.freeze();
bank_forks.insert(bank1);
let bank_forks = RwLock::new(bank_forks);
let bank_forks = Arc::new(RwLock::new(bank_forks));
let subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
bank_forks.clone(),
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
blockstore.clone(),
))),
));
// Insert shreds for slot NUM_CONSECUTIVE_LEADER_SLOTS,
// chaining to slot 1
@@ -2364,6 +2364,7 @@ pub(crate) mod tests {
ShredCommonHeader::default(),
data_header,
CodingShredHeader::default(),
PACKET_DATA_SIZE,
);
bincode::serialize_into(
&mut shred.payload[SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER..],
@@ -2442,13 +2443,6 @@ pub(crate) mod tests {
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let block_commitment_cache = Arc::new(RwLock::new(
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
));
let (lockouts_sender, _) = AggregateCommitmentService::new(
&Arc::new(AtomicBool::new(false)),
block_commitment_cache.clone(),
);
let leader_pubkey = Pubkey::new_rand();
let leader_lamports = 3;
@@ -2469,6 +2463,18 @@ pub(crate) mod tests {
vec![0],
)));
let exit = Arc::new(AtomicBool::new(false));
let block_commitment_cache = Arc::new(RwLock::new(
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
));
let subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
bank_forks.clone(),
block_commitment_cache.clone(),
));
let (lockouts_sender, _) =
AggregateCommitmentService::new(&exit, block_commitment_cache.clone(), subscriptions);
assert!(block_commitment_cache
.read()
.unwrap()

View File

@@ -17,12 +17,15 @@ use solana_ledger::{
use solana_measure::measure::Measure;
use solana_metrics::inc_new_counter_error;
use solana_perf::packet::Packets;
use solana_sdk::clock::Slot;
use solana_sdk::epoch_schedule::EpochSchedule;
use solana_sdk::timing::timestamp;
use solana_streamer::streamer::PacketReceiver;
use std::{
cmp,
collections::{BTreeMap, HashMap},
net::UdpSocket,
sync::atomic::{AtomicBool, Ordering},
sync::atomic::{AtomicBool, AtomicU64, Ordering},
sync::mpsc::channel,
sync::mpsc::RecvTimeoutError,
sync::Mutex,
@@ -35,6 +38,115 @@ use std::{
// it doesn't pull up too much work.
const MAX_PACKET_BATCH_SIZE: usize = 100;
#[derive(Default)]
struct RetransmitStats {
total_packets: AtomicU64,
total_batches: AtomicU64,
total_time: AtomicU64,
repair_total: AtomicU64,
discard_total: AtomicU64,
retransmit_total: AtomicU64,
last_ts: AtomicU64,
compute_turbine_peers_total: AtomicU64,
packets_by_slot: Mutex<BTreeMap<Slot, usize>>,
packets_by_source: Mutex<BTreeMap<String, usize>>,
}
#[allow(clippy::too_many_arguments)]
fn update_retransmit_stats(
stats: &Arc<RetransmitStats>,
total_time: u64,
total_packets: usize,
retransmit_total: u64,
discard_total: u64,
repair_total: u64,
compute_turbine_peers_total: u64,
peers_len: usize,
packets_by_slot: HashMap<Slot, usize>,
packets_by_source: HashMap<String, usize>,
) {
stats.total_time.fetch_add(total_time, Ordering::Relaxed);
stats
.total_packets
.fetch_add(total_packets as u64, Ordering::Relaxed);
stats
.retransmit_total
.fetch_add(retransmit_total, Ordering::Relaxed);
stats
.repair_total
.fetch_add(repair_total, Ordering::Relaxed);
stats
.discard_total
.fetch_add(discard_total, Ordering::Relaxed);
stats
.compute_turbine_peers_total
.fetch_add(compute_turbine_peers_total, Ordering::Relaxed);
stats.total_batches.fetch_add(1, Ordering::Relaxed);
{
let mut stats_packets_by_slot = stats.packets_by_slot.lock().unwrap();
for (slot, count) in packets_by_slot {
*stats_packets_by_slot.entry(slot).or_insert(0) += count;
}
}
{
let mut stats_packets_by_source = stats.packets_by_source.lock().unwrap();
for (source, count) in packets_by_source {
*stats_packets_by_source.entry(source).or_insert(0) += count;
}
}
let now = timestamp();
let last = stats.last_ts.load(Ordering::Relaxed);
if now - last > 2000 && stats.last_ts.compare_and_swap(last, now, Ordering::Relaxed) == last {
datapoint_info!("retransmit-num_nodes", ("count", peers_len, i64));
datapoint_info!(
"retransmit-stage",
(
"total_time",
stats.total_time.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"total_batches",
stats.total_batches.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"total_packets",
stats.total_packets.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"retransmit_total",
stats.retransmit_total.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"compute_turbine",
stats.compute_turbine_peers_total.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"repair_total",
stats.repair_total.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"discard_total",
stats.discard_total.swap(0, Ordering::Relaxed) as i64,
i64
),
);
let mut packets_by_slot = stats.packets_by_slot.lock().unwrap();
info!("retransmit: packets_by_slot: {:#?}", packets_by_slot);
packets_by_slot.clear();
drop(packets_by_slot);
let mut packets_by_source = stats.packets_by_source.lock().unwrap();
info!("retransmit: packets_by_source: {:#?}", packets_by_source);
packets_by_source.clear();
}
}
fn retransmit(
bank_forks: &Arc<RwLock<BankForks>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
@@ -42,6 +154,7 @@ fn retransmit(
r: &Arc<Mutex<PacketReceiver>>,
sock: &UdpSocket,
id: u32,
stats: &Arc<RetransmitStats>,
) -> Result<()> {
let timer = Duration::new(1, 0);
let r_lock = r.lock().unwrap();
@@ -69,6 +182,8 @@ fn retransmit(
let mut repair_total = 0;
let mut retransmit_total = 0;
let mut compute_turbine_peers_total = 0;
let mut packets_by_slot: HashMap<Slot, usize> = HashMap::new();
let mut packets_by_source: HashMap<String, usize> = HashMap::new();
for mut packets in packet_v {
for packet in packets.packets.iter_mut() {
// skip discarded packets and repair packets
@@ -103,7 +218,12 @@ fn retransmit(
let neighbors: Vec<_> = neighbors.into_iter().map(|index| &peers[index]).collect();
let children: Vec<_> = children.into_iter().map(|index| &peers[index]).collect();
compute_turbine_peers.stop();
compute_turbine_peers_total += compute_turbine_peers.as_ms();
compute_turbine_peers_total += compute_turbine_peers.as_us();
*packets_by_slot.entry(packet.meta.slot).or_insert(0) += 1;
*packets_by_source
.entry(packet.meta.addr().to_string())
.or_insert(0) += 1;
let leader =
leader_schedule_cache.slot_leader_at(packet.meta.slot, Some(r_bank.as_ref()));
@@ -115,7 +235,7 @@ fn retransmit(
ClusterInfo::retransmit_to(&children, packet, leader, sock, true)?;
}
retransmit_time.stop();
retransmit_total += retransmit_time.as_ms();
retransmit_total += retransmit_time.as_us();
}
}
timer_start.stop();
@@ -126,16 +246,19 @@ fn retransmit(
retransmit_total,
id,
);
datapoint_debug!("cluster_info-num_nodes", ("count", peers_len, i64));
datapoint_debug!(
"retransmit-stage",
("total_time", timer_start.as_ms() as i64, i64),
("total_packets", total_packets as i64, i64),
("retransmit_total", retransmit_total as i64, i64),
("compute_turbine", compute_turbine_peers_total as i64, i64),
("repair_total", i64::from(repair_total), i64),
("discard_total", i64::from(discard_total), i64),
update_retransmit_stats(
stats,
timer_start.as_us(),
total_packets,
retransmit_total,
discard_total,
repair_total,
compute_turbine_peers_total,
peers_len,
packets_by_slot,
packets_by_source,
);
Ok(())
}
@@ -154,6 +277,7 @@ pub fn retransmitter(
cluster_info: Arc<ClusterInfo>,
r: Arc<Mutex<PacketReceiver>>,
) -> Vec<JoinHandle<()>> {
let stats = Arc::new(RetransmitStats::default());
(0..sockets.len())
.map(|s| {
let sockets = sockets.clone();
@@ -161,6 +285,7 @@ pub fn retransmitter(
let leader_schedule_cache = leader_schedule_cache.clone();
let r = r.clone();
let cluster_info = cluster_info.clone();
let stats = stats.clone();
Builder::new()
.name("solana-retransmitter".to_string())
@@ -174,6 +299,7 @@ pub fn retransmitter(
&r,
&sockets[s],
s as u32,
&stats,
) {
match e {
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,

View File

@@ -4,22 +4,28 @@ use crate::{
cluster_info::ClusterInfo,
commitment::{BlockCommitmentArray, BlockCommitmentCache},
contact_info::ContactInfo,
non_circulating_supply::calculate_non_circulating_supply,
rpc_error::RpcCustomError,
storage_stage::StorageState,
validator::ValidatorExit,
};
use bincode::serialize;
use jsonrpc_core::{Error, ErrorCode, Metadata, Result};
use jsonrpc_core::{Error, Metadata, Result};
use jsonrpc_derive::rpc;
use solana_client::{
rpc_config::*,
rpc_request::{
MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE, MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS,
MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE,
MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS, NUM_LARGEST_ACCOUNTS,
},
rpc_response::*,
};
use solana_faucet::faucet::request_airdrop_transaction;
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore};
use solana_ledger::{
bank_forks::BankForks, blockstore::Blockstore, blockstore_db::BlockstoreError,
};
use solana_perf::packet::PACKET_DATA_SIZE;
use solana_runtime::bank::Bank;
use solana_runtime::{accounts::AccountAddressFilter, bank::Bank};
use solana_sdk::{
clock::{Slot, UnixTimestamp},
commitment_config::{CommitmentConfig, CommitmentLevel},
@@ -36,8 +42,8 @@ use solana_transaction_status::{
};
use solana_vote_program::vote_state::{VoteState, MAX_LOCKOUT_HISTORY};
use std::{
cmp::max,
collections::HashMap,
cmp::{max, min},
collections::{HashMap, HashSet},
net::{SocketAddr, UdpSocket},
str::FromStr,
sync::{Arc, RwLock},
@@ -45,8 +51,6 @@ use std::{
time::{Duration, Instant},
};
const JSON_RPC_SERVER_ERROR_0: i64 = -32000;
type RpcResponse<T> = Result<Response<T>>;
fn new_response<T>(bank: &Bank, value: T) -> RpcResponse<T> {
@@ -63,15 +67,6 @@ pub struct JsonRpcConfig {
pub faucet_addr: Option<SocketAddr>,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcSignatureStatusConfig {
pub search_transaction_history: Option<bool>,
// DEPRECATED
#[serde(flatten)]
pub commitment: Option<CommitmentConfig>,
}
#[derive(Clone)]
pub struct JsonRpcRequestProcessor {
bank_forks: Arc<RwLock<BankForks>>,
@@ -86,33 +81,45 @@ impl JsonRpcRequestProcessor {
fn bank(&self, commitment: Option<CommitmentConfig>) -> Result<Arc<Bank>> {
debug!("RPC commitment_config: {:?}", commitment);
let r_bank_forks = self.bank_forks.read().unwrap();
if commitment.is_some() && commitment.unwrap().commitment == CommitmentLevel::Recent {
let bank = r_bank_forks.working_bank();
debug!("RPC using working_bank: {:?}", bank.slot());
Ok(bank)
} else if commitment.is_some() && commitment.unwrap().commitment == CommitmentLevel::Root {
let slot = r_bank_forks.root();
debug!("RPC using node root: {:?}", slot);
Ok(r_bank_forks.get(slot).cloned().unwrap())
} else {
let cluster_root = self
.block_commitment_cache
.read()
.unwrap()
.largest_confirmed_root();
debug!("RPC using block: {:?}", cluster_root);
r_bank_forks
.get(cluster_root)
.cloned()
.ok_or_else(|| Error {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_0),
message: format!(
"Cluster largest_confirmed_root {} does not exist on node. Node root: {}",
match commitment {
Some(commitment_config) if commitment_config.commitment == CommitmentLevel::Recent => {
let bank = r_bank_forks.working_bank();
debug!("RPC using working_bank: {:?}", bank.slot());
Ok(bank)
}
Some(commitment_config) if commitment_config.commitment == CommitmentLevel::Root => {
let slot = r_bank_forks.root();
debug!("RPC using node root: {:?}", slot);
Ok(r_bank_forks.get(slot).cloned().unwrap())
}
Some(commitment_config)
if commitment_config.commitment == CommitmentLevel::Single
|| commitment_config.commitment == CommitmentLevel::SingleGossip =>
{
let slot = self
.block_commitment_cache
.read()
.unwrap()
.highest_confirmed_slot();
debug!("RPC using confirmed slot: {:?}", slot);
Ok(r_bank_forks.get(slot).cloned().unwrap())
}
_ => {
let cluster_root = self
.block_commitment_cache
.read()
.unwrap()
.largest_confirmed_root();
debug!("RPC using block: {:?}", cluster_root);
r_bank_forks.get(cluster_root).cloned().ok_or_else(|| {
RpcCustomError::NonexistentClusterRoot {
cluster_root,
r_bank_forks.root(),
),
data: None,
node_root: r_bank_forks.root(),
}
.into()
})
}
}
}
@@ -283,6 +290,54 @@ impl JsonRpcRequestProcessor {
Ok(self.bank(commitment)?.capitalization())
}
fn get_largest_accounts(
&self,
config: Option<RpcLargestAccountsConfig>,
) -> RpcResponse<Vec<RpcAccountBalance>> {
let config = config.unwrap_or_default();
let bank = self.bank(config.commitment)?;
let (addresses, address_filter) = if let Some(filter) = config.filter {
let non_circulating_supply = calculate_non_circulating_supply(&bank);
let addresses = non_circulating_supply.accounts.into_iter().collect();
let address_filter = match filter {
RpcLargestAccountsFilter::Circulating => AccountAddressFilter::Exclude,
RpcLargestAccountsFilter::NonCirculating => AccountAddressFilter::Include,
};
(addresses, address_filter)
} else {
(HashSet::new(), AccountAddressFilter::Exclude)
};
new_response(
&bank,
bank.get_largest_accounts(NUM_LARGEST_ACCOUNTS, &addresses, address_filter)
.into_iter()
.map(|(address, lamports)| RpcAccountBalance {
address: address.to_string(),
lamports,
})
.collect(),
)
}
fn get_supply(&self, commitment: Option<CommitmentConfig>) -> RpcResponse<RpcSupply> {
let bank = self.bank(commitment)?;
let non_circulating_supply = calculate_non_circulating_supply(&bank);
let total_supply = bank.capitalization();
new_response(
&bank,
RpcSupply {
total: total_supply,
circulating: total_supply - non_circulating_supply.lamports,
non_circulating: non_circulating_supply.lamports,
non_circulating_accounts: non_circulating_supply
.accounts
.iter()
.map(|pubkey| pubkey.to_string())
.collect(),
},
)
}
fn get_vote_accounts(
&self,
commitment: Option<CommitmentConfig>,
@@ -382,13 +437,45 @@ impl JsonRpcRequestProcessor {
}
}
fn check_slot_cleaned_up<T>(
&self,
result: &std::result::Result<T, BlockstoreError>,
slot: Slot,
) -> Result<()>
where
T: std::fmt::Debug,
{
if result.is_err() {
if let BlockstoreError::SlotCleanedUp = result.as_ref().unwrap_err() {
return Err(RpcCustomError::BlockCleanedUp {
slot,
first_available_block: self
.blockstore
.get_first_available_block()
.unwrap_or_default(),
}
.into());
}
}
Ok(())
}
pub fn get_confirmed_block(
&self,
slot: Slot,
encoding: Option<TransactionEncoding>,
) -> Result<Option<ConfirmedBlock>> {
if self.config.enable_rpc_transaction_history {
Ok(self.blockstore.get_confirmed_block(slot, encoding).ok())
if self.config.enable_rpc_transaction_history
&& slot
<= self
.block_commitment_cache
.read()
.unwrap()
.largest_confirmed_root()
{
let result = self.blockstore.get_confirmed_block(slot, encoding);
self.check_slot_cleaned_up(&result, slot)?;
Ok(result.ok())
} else {
Ok(None)
}
@@ -399,11 +486,13 @@ impl JsonRpcRequestProcessor {
start_slot: Slot,
end_slot: Option<Slot>,
) -> Result<Vec<Slot>> {
let end_slot = if let Some(end_slot) = end_slot {
end_slot
} else {
self.bank(None)?.slot()
};
let end_slot = min(
end_slot.unwrap_or(std::u64::MAX),
self.block_commitment_cache
.read()
.unwrap()
.largest_confirmed_root(),
);
if end_slot < start_slot {
return Ok(vec![]);
}
@@ -416,22 +505,30 @@ impl JsonRpcRequestProcessor {
}
pub fn get_block_time(&self, slot: Slot) -> Result<Option<UnixTimestamp>> {
// This calculation currently assumes that bank.slots_per_year will remain unchanged after
// genesis (ie. that this bank's slot_per_year will be applicable to any rooted slot being
// queried). If these values will be variable in the future, those timing parameters will
// need to be stored persistently, and the slot_duration calculation will likely need to be
// moved upstream into blockstore. Also, an explicit commitment level will need to be set.
let bank = self.bank(None)?;
let slot_duration = slot_duration_from_slots_per_year(bank.slots_per_year());
let epoch = bank.epoch_schedule().get_epoch(slot);
let stakes = HashMap::new();
let stakes = bank.epoch_vote_accounts(epoch).unwrap_or(&stakes);
if slot
<= self
.block_commitment_cache
.read()
.unwrap()
.largest_confirmed_root()
{
// This calculation currently assumes that bank.slots_per_year will remain unchanged after
// genesis (ie. that this bank's slot_per_year will be applicable to any rooted slot being
// queried). If these values will be variable in the future, those timing parameters will
// need to be stored persistently, and the slot_duration calculation will likely need to be
// moved upstream into blockstore. Also, an explicit commitment level will need to be set.
let bank = self.bank(None)?;
let slot_duration = slot_duration_from_slots_per_year(bank.slots_per_year());
let epoch = bank.epoch_schedule().get_epoch(slot);
let stakes = HashMap::new();
let stakes = bank.epoch_vote_accounts(epoch).unwrap_or(&stakes);
Ok(self
.blockstore
.get_block_time(slot, slot_duration, stakes)
.ok()
.unwrap_or(None))
let result = self.blockstore.get_block_time(slot, slot_duration, stakes);
self.check_slot_cleaned_up(&result, slot)?;
Ok(result.ok().unwrap_or(None))
} else {
Ok(None)
}
}
pub fn get_signature_confirmation_status(
@@ -488,6 +585,13 @@ impl JsonRpcRequestProcessor {
self.blockstore
.get_transaction_status(signature)
.map_err(|_| Error::internal_error())?
.filter(|(slot, _status_meta)| {
slot <= &self
.block_commitment_cache
.read()
.unwrap()
.largest_confirmed_root()
})
.map(|(slot, status_meta)| {
let err = status_meta.status.clone().err();
TransactionStatus {
@@ -545,7 +649,15 @@ impl JsonRpcRequestProcessor {
Ok(self
.blockstore
.get_confirmed_transaction(signature, encoding)
.unwrap_or(None))
.unwrap_or(None)
.filter(|confirmed_transaction| {
confirmed_transaction.slot
<= self
.block_commitment_cache
.read()
.unwrap()
.largest_confirmed_root()
}))
} else {
Ok(None)
}
@@ -558,6 +670,13 @@ impl JsonRpcRequestProcessor {
end_slot: Slot,
) -> Result<Vec<Signature>> {
if self.config.enable_rpc_transaction_history {
let end_slot = min(
end_slot,
self.block_commitment_cache
.read()
.unwrap()
.largest_confirmed_root(),
);
Ok(self
.blockstore
.get_confirmed_signatures_for_address(pubkey, start_slot, end_slot)
@@ -581,11 +700,15 @@ fn get_tpu_addr(cluster_info: &ClusterInfo) -> Result<SocketAddr> {
}
fn verify_pubkey(input: String) -> Result<Pubkey> {
input.parse().map_err(|_e| Error::invalid_request())
input
.parse()
.map_err(|e| Error::invalid_params(format!("{:?}", e)))
}
fn verify_signature(input: &str) -> Result<Signature> {
input.parse().map_err(|_e| Error::invalid_request())
input
.parse()
.map_err(|e| Error::invalid_params(format!("{:?}", e)))
}
#[derive(Clone)]
@@ -732,6 +855,7 @@ pub trait RpcSol {
commitment: Option<CommitmentConfig>,
) -> Result<u64>;
// DEPRECATED
#[rpc(meta, name = "getTotalSupply")]
fn get_total_supply(
&self,
@@ -739,6 +863,20 @@ pub trait RpcSol {
commitment: Option<CommitmentConfig>,
) -> Result<u64>;
#[rpc(meta, name = "getLargestAccounts")]
fn get_largest_accounts(
&self,
meta: Self::Metadata,
config: Option<RpcLargestAccountsConfig>,
) -> RpcResponse<Vec<RpcAccountBalance>>;
#[rpc(meta, name = "getSupply")]
fn get_supply(
&self,
meta: Self::Metadata,
commitment: Option<CommitmentConfig>,
) -> RpcResponse<RpcSupply>;
#[rpc(meta, name = "requestAirdrop")]
fn request_airdrop(
&self,
@@ -751,6 +889,14 @@ pub trait RpcSol {
#[rpc(meta, name = "sendTransaction")]
fn send_transaction(&self, meta: Self::Metadata, data: String) -> Result<String>;
#[rpc(meta, name = "simulateTransaction")]
fn simulate_transaction(
&self,
meta: Self::Metadata,
data: String,
config: Option<RpcSimulateTransactionConfig>,
) -> RpcResponse<TransactionStatus>;
#[rpc(meta, name = "getSlotLeader")]
fn get_slot_leader(
&self,
@@ -954,6 +1100,9 @@ impl RpcSol for RpcSolImpl {
gossip: Some(contact_info.gossip),
tpu: valid_address_or_none(&contact_info.tpu),
rpc: valid_address_or_none(&contact_info.rpc),
version: cluster_info
.get_node_version(&contact_info.id)
.map(|v| v.to_string()),
})
} else {
None // Exclude spy nodes
@@ -1140,6 +1289,30 @@ impl RpcSol for RpcSolImpl {
.get_total_supply(commitment)
}
fn get_largest_accounts(
&self,
meta: Self::Metadata,
config: Option<RpcLargestAccountsConfig>,
) -> RpcResponse<Vec<RpcAccountBalance>> {
debug!("get_largest_accounts rpc request received");
meta.request_processor
.read()
.unwrap()
.get_largest_accounts(config)
}
fn get_supply(
&self,
meta: Self::Metadata,
commitment: Option<CommitmentConfig>,
) -> RpcResponse<RpcSupply> {
debug!("get_supply rpc request received");
meta.request_processor
.read()
.unwrap()
.get_supply(commitment)
}
fn request_airdrop(
&self,
meta: Self::Metadata,
@@ -1220,41 +1393,67 @@ impl RpcSol for RpcSolImpl {
}
fn send_transaction(&self, meta: Self::Metadata, data: String) -> Result<String> {
let data = bs58::decode(data).into_vec().unwrap();
if data.len() >= PACKET_DATA_SIZE {
info!(
"send_transaction: transaction too large: {} bytes (max: {} bytes)",
data.len(),
PACKET_DATA_SIZE
);
return Err(Error::invalid_request());
}
let tx: Transaction = bincode::config()
.limit(PACKET_DATA_SIZE as u64)
.deserialize(&data)
.map_err(|err| {
info!("send_transaction: deserialize error: {:?}", err);
Error::invalid_request()
})?;
let (wire_transaction, transaction) = deserialize_bs58_transaction(data)?;
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let tpu_addr = get_tpu_addr(&meta.cluster_info)?;
trace!("send_transaction: leader is {:?}", &tpu_addr);
transactions_socket
.send_to(&data, tpu_addr)
.send_to(&wire_transaction, tpu_addr)
.map_err(|err| {
info!("send_transaction: send_to error: {:?}", err);
Error::internal_error()
})?;
let signature = tx.signatures[0].to_string();
let signature = transaction.signatures[0].to_string();
trace!(
"send_transaction: sent {} bytes, signature={}",
data.len(),
wire_transaction.len(),
signature
);
Ok(signature)
}
fn simulate_transaction(
&self,
meta: Self::Metadata,
data: String,
config: Option<RpcSimulateTransactionConfig>,
) -> RpcResponse<TransactionStatus> {
let (_, transaction) = deserialize_bs58_transaction(data)?;
let config = config.unwrap_or(RpcSimulateTransactionConfig { sig_verify: false });
let bank = &*meta.request_processor.read().unwrap().bank(None)?;
assert!(bank.is_frozen());
let mut result = if config.sig_verify {
transaction.verify()
} else {
Ok(())
};
if result.is_ok() {
let transactions = [transaction];
let batch = bank.prepare_batch(&transactions, None);
let (
_loaded_accounts,
executed,
_retryable_transactions,
_transaction_count,
_signature_count,
) = bank.load_and_execute_transactions(&batch, solana_sdk::clock::MAX_PROCESSING_AGE);
result = executed[0].0.clone();
}
new_response(
&bank,
TransactionStatus {
slot: bank.slot(),
confirmations: Some(0),
status: result.clone(),
err: result.err(),
},
)
}
fn get_slot_leader(
&self,
meta: Self::Metadata,
@@ -1332,7 +1531,7 @@ impl RpcSol for RpcSolImpl {
fn get_version(&self, _: Self::Metadata) -> Result<RpcVersionInfo> {
Ok(RpcVersionInfo {
solana_core: solana_clap_utils::version!().to_string(),
solana_core: solana_version::Version::default().to_string(),
})
}
@@ -1424,15 +1623,39 @@ impl RpcSol for RpcSolImpl {
}
}
fn deserialize_bs58_transaction(bs58_transaction: String) -> Result<(Vec<u8>, Transaction)> {
let wire_transaction = bs58::decode(bs58_transaction)
.into_vec()
.map_err(|e| Error::invalid_params(format!("{:?}", e)))?;
if wire_transaction.len() >= PACKET_DATA_SIZE {
let err = format!(
"transaction too large: {} bytes (max: {} bytes)",
wire_transaction.len(),
PACKET_DATA_SIZE
);
info!("{}", err);
return Err(Error::invalid_params(&err));
}
bincode::config()
.limit(PACKET_DATA_SIZE as u64)
.deserialize(&wire_transaction)
.map_err(|err| {
info!("transaction deserialize error: {:?}", err);
Error::invalid_params(&err.to_string())
})
.map(|transaction| (wire_transaction, transaction))
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::{
commitment::BlockCommitment, contact_info::ContactInfo,
non_circulating_supply::non_circulating_accounts,
replay_stage::tests::create_test_transactions_and_populate_blockstore,
};
use bincode::deserialize;
use jsonrpc_core::{MetaIoHandler, Output, Response, Value};
use jsonrpc_core::{ErrorCode, MetaIoHandler, Output, Response, Value};
use solana_ledger::{
blockstore::entries_to_test_shreds,
blockstore_processor::fill_blockstore_slot_with_ticks,
@@ -1520,6 +1743,7 @@ pub mod tests {
bank.clone(),
blockstore.clone(),
0,
0,
)));
// Add timestamp vote to blockstore
@@ -1581,6 +1805,9 @@ pub mod tests {
let blockhash = bank.confirmed_last_blockhash().0;
let tx = system_transaction::transfer(&alice, pubkey, 20, blockhash);
bank.process_transaction(&tx).expect("process transaction");
let tx =
system_transaction::transfer(&alice, &non_circulating_accounts()[0], 20, blockhash);
bank.process_transaction(&tx).expect("process transaction");
let tx = system_transaction::transfer(&alice, pubkey, std::u64::MAX, blockhash);
let _ = bank.process_transaction(&tx);
@@ -1695,7 +1922,7 @@ pub mod tests {
.expect("actual response deserialization");
let expected = format!(
r#"{{"jsonrpc":"2.0","result":[{{"pubkey": "{}", "gossip": "127.0.0.1:1235", "tpu": "127.0.0.1:1234", "rpc": "127.0.0.1:{}"}}],"id":1}}"#,
r#"{{"jsonrpc":"2.0","result":[{{"pubkey": "{}", "gossip": "127.0.0.1:1235", "tpu": "127.0.0.1:1234", "rpc": "127.0.0.1:{}", "version": null}}],"id":1}}"#,
leader_pubkey,
rpc_port::DEFAULT_RPC_PORT
);
@@ -1732,7 +1959,7 @@ pub mod tests {
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getTransactionCount"}}"#);
let res = io.handle_request_sync(&req, meta);
let expected = format!(r#"{{"jsonrpc":"2.0","result":3,"id":1}}"#);
let expected = format!(r#"{{"jsonrpc":"2.0","result":4,"id":1}}"#);
let expected: Response =
serde_json::from_str(&expected).expect("expected response deserialization");
let result: Response = serde_json::from_str(&res.expect("actual response"))
@@ -1780,6 +2007,94 @@ pub mod tests {
assert!(supply >= TEST_MINT_LAMPORTS);
}
#[test]
fn test_get_supply() {
let bob_pubkey = Pubkey::new_rand();
let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey);
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getSupply"}}"#);
let res = io.handle_request_sync(&req, meta.clone());
let json: Value = serde_json::from_str(&res.unwrap()).unwrap();
let supply: RpcSupply = serde_json::from_value(json["result"]["value"].clone())
.expect("actual response deserialization");
assert_eq!(supply.non_circulating, 20);
assert!(supply.circulating >= TEST_MINT_LAMPORTS);
assert!(supply.total >= TEST_MINT_LAMPORTS + 20);
let expected_accounts: Vec<String> = non_circulating_accounts()
.iter()
.map(|pubkey| pubkey.to_string())
.collect();
assert_eq!(
supply.non_circulating_accounts.len(),
expected_accounts.len()
);
for address in supply.non_circulating_accounts {
assert!(expected_accounts.contains(&address));
}
}
#[test]
fn test_get_largest_accounts() {
let bob_pubkey = Pubkey::new_rand();
let RpcHandler {
io, meta, alice, ..
} = start_rpc_handler_with_tx(&bob_pubkey);
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getLargestAccounts"}}"#);
let res = io.handle_request_sync(&req, meta.clone());
let json: Value = serde_json::from_str(&res.unwrap()).unwrap();
let largest_accounts: Vec<RpcAccountBalance> =
serde_json::from_value(json["result"]["value"].clone())
.expect("actual response deserialization");
assert_eq!(largest_accounts.len(), 19);
// Get Alice balance
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getBalance","params":["{}"]}}"#,
alice.pubkey()
);
let res = io.handle_request_sync(&req, meta.clone());
let json: Value = serde_json::from_str(&res.unwrap()).unwrap();
let alice_balance: u64 = serde_json::from_value(json["result"]["value"].clone())
.expect("actual response deserialization");
assert!(largest_accounts.contains(&RpcAccountBalance {
address: alice.pubkey().to_string(),
lamports: alice_balance,
}));
// Get Bob balance
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getBalance","params":["{}"]}}"#,
bob_pubkey
);
let res = io.handle_request_sync(&req, meta.clone());
let json: Value = serde_json::from_str(&res.unwrap()).unwrap();
let bob_balance: u64 = serde_json::from_value(json["result"]["value"].clone())
.expect("actual response deserialization");
assert!(largest_accounts.contains(&RpcAccountBalance {
address: bob_pubkey.to_string(),
lamports: bob_balance,
}));
// Test Circulating/NonCirculating Filter
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getLargestAccounts","params":[{{"filter":"circulating"}}]}}"#
);
let res = io.handle_request_sync(&req, meta.clone());
let json: Value = serde_json::from_str(&res.unwrap()).unwrap();
let largest_accounts: Vec<RpcAccountBalance> =
serde_json::from_value(json["result"]["value"].clone())
.expect("actual response deserialization");
assert_eq!(largest_accounts.len(), 18);
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getLargestAccounts","params":[{{"filter":"nonCirculating"}}]}}"#
);
let res = io.handle_request_sync(&req, meta.clone());
let json: Value = serde_json::from_str(&res.unwrap()).unwrap();
let largest_accounts: Vec<RpcAccountBalance> =
serde_json::from_value(json["result"]["value"].clone())
.expect("actual response deserialization");
assert_eq!(largest_accounts.len(), 1);
}
#[test]
fn test_rpc_get_minimum_balance_for_rent_exemption() {
let bob_pubkey = Pubkey::new_rand();
@@ -1988,6 +2303,133 @@ pub mod tests {
assert_eq!(expected, result);
}
#[test]
fn test_rpc_simulate_transaction() {
let bob_pubkey = Pubkey::new_rand();
let RpcHandler {
io,
meta,
blockhash,
alice,
bank,
..
} = start_rpc_handler_with_tx(&bob_pubkey);
let mut tx = system_transaction::transfer(&alice, &bob_pubkey, 1234, blockhash);
let tx_serialized_encoded = bs58::encode(serialize(&tx).unwrap()).into_string();
tx.signatures[0] = Signature::default();
let tx_badsig_serialized_encoded = bs58::encode(serialize(&tx).unwrap()).into_string();
bank.freeze(); // Ensure the root bank is frozen, `start_rpc_handler_with_tx()` doesn't do this
// Good signature with sigVerify=true
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}", {{"sigVerify": true}}]}}"#,
tx_serialized_encoded,
);
let res = io.handle_request_sync(&req, meta.clone());
let expected = json!({
"jsonrpc": "2.0",
"result": {
"context":{"slot":0},
"value":{"confirmations":0,"slot": 0,"status":{"Ok":null},"err":null}
},
"id": 1,
});
let expected: Response =
serde_json::from_value(expected).expect("expected response deserialization");
let result: Response = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
assert_eq!(expected, result);
// Bad signature with sigVerify=true
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}", {{"sigVerify": true}}]}}"#,
tx_badsig_serialized_encoded,
);
let res = io.handle_request_sync(&req, meta.clone());
let expected = json!({
"jsonrpc": "2.0",
"result": {
"context":{"slot":0},
"value":{"confirmations":0,"slot":0,"status":{"Err":"SignatureFailure"},"err":"SignatureFailure"}
},
"id": 1,
});
let expected: Response =
serde_json::from_value(expected).expect("expected response deserialization");
let result: Response = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
assert_eq!(expected, result);
// Bad signature with sigVerify=false
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}", {{"sigVerify": false}}]}}"#,
tx_serialized_encoded,
);
let res = io.handle_request_sync(&req, meta.clone());
let expected = json!({
"jsonrpc": "2.0",
"result": {
"context":{"slot":0},
"value":{"confirmations":0,"slot": 0,"status":{"Ok":null},"err":null}
},
"id": 1,
});
let expected: Response =
serde_json::from_value(expected).expect("expected response deserialization");
let result: Response = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
assert_eq!(expected, result);
// Bad signature with default sigVerify setting (false)
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}"]}}"#,
tx_serialized_encoded,
);
let res = io.handle_request_sync(&req, meta.clone());
let expected = json!({
"jsonrpc": "2.0",
"result": {
"context":{"slot":0},
"value":{"confirmations":0,"slot": 0,"status":{"Ok":null},"err":null}
},
"id": 1,
});
let expected: Response =
serde_json::from_value(expected).expect("expected response deserialization");
let result: Response = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
assert_eq!(expected, result);
}
#[test]
#[should_panic]
fn test_rpc_simulate_transaction_panic_on_unfrozen_bank() {
let bob_pubkey = Pubkey::new_rand();
let RpcHandler {
io,
meta,
blockhash,
alice,
bank,
..
} = start_rpc_handler_with_tx(&bob_pubkey);
let tx = system_transaction::transfer(&alice, &bob_pubkey, 1234, blockhash);
let tx_serialized_encoded = bs58::encode(serialize(&tx).unwrap()).into_string();
assert!(!bank.is_frozen());
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}", {{"sigVerify": true}}]}}"#,
tx_serialized_encoded,
);
// should panic because `bank` is not frozen
let _ = io.handle_request_sync(&req, meta.clone());
}
#[test]
fn test_rpc_confirm_tx() {
let bob_pubkey = Pubkey::new_rand();
@@ -2304,14 +2746,10 @@ pub mod tests {
};
let req = r#"{"jsonrpc":"2.0","id":1,"method":"sendTransaction","params":["37u9WtQpcm6ULa3Vmu7ySnANv"]}"#;
let res = io.handle_request_sync(req, meta.clone());
let expected =
r#"{"jsonrpc":"2.0","error":{"code":-32600,"message":"Invalid request"},"id":1}"#;
let expected: Response =
serde_json::from_str(expected).expect("expected response deserialization");
let result: Response = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
assert_eq!(expected, result);
let res = io.handle_request_sync(req, meta);
let json: Value = serde_json::from_str(&res.unwrap()).unwrap();
let error = &json["error"];
assert_eq!(error["code"], ErrorCode::InvalidParams.code());
}
#[test]
@@ -2332,7 +2770,7 @@ pub mod tests {
let bad_pubkey = "a1b2c3d4";
assert_eq!(
verify_pubkey(bad_pubkey.to_string()),
Err(Error::invalid_request())
Err(Error::invalid_params("WrongSize"))
);
}
@@ -2346,7 +2784,7 @@ pub mod tests {
let bad_signature = "a1b2c3d4";
assert_eq!(
verify_signature(&bad_signature.to_string()),
Err(Error::invalid_request())
Err(Error::invalid_params("WrongSize"))
);
}
@@ -2452,7 +2890,7 @@ pub mod tests {
let expected = json!({
"jsonrpc": "2.0",
"result": {
"solana-core": solana_clap_utils::version!().to_string()
"solana-core": solana_version::version!().to_string()
},
"id": 1
});
@@ -2487,6 +2925,7 @@ pub mod tests {
bank_forks.read().unwrap().working_bank(),
blockstore.clone(),
0,
0,
)));
let mut config = JsonRpcConfig::default();
@@ -2688,7 +3127,7 @@ pub mod tests {
block_commitment_cache
.write()
.unwrap()
.set_get_largest_confirmed_root(8);
.set_largest_confirmed_root(8);
let req =
format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlocks","params":[0]}}"#);
@@ -2735,11 +3174,21 @@ pub mod tests {
fn test_get_block_time() {
let bob_pubkey = Pubkey::new_rand();
let base_timestamp = 1576183541;
let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx_and_blockstore(
let RpcHandler {
io,
meta,
bank,
block_commitment_cache,
..
} = start_rpc_handler_with_tx_and_blockstore(
&bob_pubkey,
vec![1, 2, 3, 4, 5, 6, 7],
base_timestamp,
);
block_commitment_cache
.write()
.unwrap()
.set_largest_confirmed_root(7);
let slot_duration = slot_duration_from_slots_per_year(bank.slots_per_year());

45
core/src/rpc_error.rs Normal file
View File

@@ -0,0 +1,45 @@
use jsonrpc_core::{Error, ErrorCode};
use solana_sdk::clock::Slot;
const JSON_RPC_SERVER_ERROR_0: i64 = -32000;
const JSON_RPC_SERVER_ERROR_1: i64 = -32001;
pub enum RpcCustomError {
NonexistentClusterRoot {
cluster_root: Slot,
node_root: Slot,
},
BlockCleanedUp {
slot: Slot,
first_available_block: Slot,
},
}
impl From<RpcCustomError> for Error {
fn from(e: RpcCustomError) -> Self {
match e {
RpcCustomError::NonexistentClusterRoot {
cluster_root,
node_root,
} => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_0),
message: format!(
"Cluster largest_confirmed_root {} does not exist on node. Node root: {}",
cluster_root, node_root,
),
data: None,
},
RpcCustomError::BlockCleanedUp {
slot,
first_available_block,
} => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_1),
message: format!(
"Block {} cleaned up, does not exist on node. First available block: {}",
slot, first_available_block,
),
data: None,
},
}
}
}

View File

@@ -1,6 +1,6 @@
//! The `pubsub` module implements a threaded subscription service on client RPC request
use crate::rpc_subscriptions::{Confirmations, RpcSubscriptions, SlotInfo};
use crate::rpc_subscriptions::{RpcSubscriptions, RpcVote, SlotInfo};
use jsonrpc_core::{Error, ErrorCode, Result};
use jsonrpc_derive::rpc;
use jsonrpc_pubsub::{typed::Subscriber, Session, SubscriptionId};
@@ -8,8 +8,12 @@ use solana_client::rpc_response::{
Response as RpcResponse, RpcAccount, RpcKeyedAccount, RpcSignatureResult,
};
#[cfg(test)]
use solana_ledger::blockstore::Blockstore;
use solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature};
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore};
use solana_sdk::{
clock::Slot, commitment_config::CommitmentConfig, pubkey::Pubkey, signature::Signature,
};
#[cfg(test)]
use std::sync::RwLock;
use std::{
str::FromStr,
sync::{atomic, Arc},
@@ -35,7 +39,7 @@ pub trait RpcSolPubSub {
meta: Self::Metadata,
subscriber: Subscriber<RpcResponse<RpcAccount>>,
pubkey_str: String,
confirmations: Option<Confirmations>,
commitment: Option<CommitmentConfig>,
);
// Unsubscribe from account notification subscription.
@@ -59,7 +63,7 @@ pub trait RpcSolPubSub {
meta: Self::Metadata,
subscriber: Subscriber<RpcResponse<RpcKeyedAccount>>,
pubkey_str: String,
confirmations: Option<Confirmations>,
commitment: Option<CommitmentConfig>,
);
// Unsubscribe from account notification subscription.
@@ -83,7 +87,7 @@ pub trait RpcSolPubSub {
meta: Self::Metadata,
subscriber: Subscriber<RpcResponse<RpcSignatureResult>>,
signature_str: String,
confirmations: Option<Confirmations>,
commitment: Option<CommitmentConfig>,
);
// Unsubscribe from signature notification subscription.
@@ -110,6 +114,18 @@ pub trait RpcSolPubSub {
)]
fn slot_unsubscribe(&self, meta: Option<Self::Metadata>, id: SubscriptionId) -> Result<bool>;
// Get notification when vote is encountered
#[pubsub(subscription = "voteNotification", subscribe, name = "voteSubscribe")]
fn vote_subscribe(&self, meta: Self::Metadata, subscriber: Subscriber<RpcVote>);
// Unsubscribe from vote notification subscription.
#[pubsub(
subscription = "voteNotification",
unsubscribe,
name = "voteUnsubscribe"
)]
fn vote_unsubscribe(&self, meta: Option<Self::Metadata>, id: SubscriptionId) -> Result<bool>;
// Get notification when a new root is set
#[pubsub(subscription = "rootNotification", subscribe, name = "rootSubscribe")]
fn root_subscribe(&self, meta: Self::Metadata, subscriber: Subscriber<Slot>);
@@ -135,9 +151,14 @@ impl RpcSolPubSubImpl {
}
#[cfg(test)]
fn default_with_blockstore(blockstore: Arc<Blockstore>) -> Self {
fn default_with_blockstore_bank_forks(
blockstore: Arc<Blockstore>,
bank_forks: Arc<RwLock<BankForks>>,
) -> Self {
let uid = Arc::new(atomic::AtomicUsize::default());
let subscriptions = Arc::new(RpcSubscriptions::default_with_blockstore(blockstore));
let subscriptions = Arc::new(RpcSubscriptions::default_with_blockstore_bank_forks(
blockstore, bank_forks,
));
Self { uid, subscriptions }
}
}
@@ -158,19 +179,15 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
_meta: Self::Metadata,
subscriber: Subscriber<RpcResponse<RpcAccount>>,
pubkey_str: String,
confirmations: Option<Confirmations>,
commitment: Option<CommitmentConfig>,
) {
match param::<Pubkey>(&pubkey_str, "pubkey") {
Ok(pubkey) => {
let id = self.uid.fetch_add(1, atomic::Ordering::Relaxed);
let sub_id = SubscriptionId::Number(id as u64);
info!("account_subscribe: account={:?} id={:?}", pubkey, sub_id);
self.subscriptions.add_account_subscription(
pubkey,
confirmations,
sub_id,
subscriber,
)
self.subscriptions
.add_account_subscription(pubkey, commitment, sub_id, subscriber)
}
Err(e) => subscriber.reject(e).unwrap(),
}
@@ -198,19 +215,15 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
_meta: Self::Metadata,
subscriber: Subscriber<RpcResponse<RpcKeyedAccount>>,
pubkey_str: String,
confirmations: Option<Confirmations>,
commitment: Option<CommitmentConfig>,
) {
match param::<Pubkey>(&pubkey_str, "pubkey") {
Ok(pubkey) => {
let id = self.uid.fetch_add(1, atomic::Ordering::Relaxed);
let sub_id = SubscriptionId::Number(id as u64);
info!("program_subscribe: account={:?} id={:?}", pubkey, sub_id);
self.subscriptions.add_program_subscription(
pubkey,
confirmations,
sub_id,
subscriber,
)
self.subscriptions
.add_program_subscription(pubkey, commitment, sub_id, subscriber)
}
Err(e) => subscriber.reject(e).unwrap(),
}
@@ -238,7 +251,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
_meta: Self::Metadata,
subscriber: Subscriber<RpcResponse<RpcSignatureResult>>,
signature_str: String,
confirmations: Option<Confirmations>,
commitment: Option<CommitmentConfig>,
) {
info!("signature_subscribe");
match param::<Signature>(&signature_str, "signature") {
@@ -249,12 +262,8 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
"signature_subscribe: signature={:?} id={:?}",
signature, sub_id
);
self.subscriptions.add_signature_subscription(
signature,
confirmations,
sub_id,
subscriber,
);
self.subscriptions
.add_signature_subscription(signature, commitment, sub_id, subscriber);
}
Err(e) => subscriber.reject(e).unwrap(),
}
@@ -298,6 +307,27 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
}
}
fn vote_subscribe(&self, _meta: Self::Metadata, subscriber: Subscriber<RpcVote>) {
info!("vote_subscribe");
let id = self.uid.fetch_add(1, atomic::Ordering::Relaxed);
let sub_id = SubscriptionId::Number(id as u64);
info!("vote_subscribe: id={:?}", sub_id);
self.subscriptions.add_vote_subscription(sub_id, subscriber);
}
fn vote_unsubscribe(&self, _meta: Option<Self::Metadata>, id: SubscriptionId) -> Result<bool> {
info!("vote_unsubscribe");
if self.subscriptions.remove_vote_subscription(&id) {
Ok(true)
} else {
Err(Error {
code: ErrorCode::InvalidParams,
message: "Invalid Request: Subscription id does not exist".into(),
data: None,
})
}
}
fn root_subscribe(&self, _meta: Self::Metadata, subscriber: Subscriber<Slot>) {
info!("root_subscribe");
let id = self.uid.fetch_add(1, atomic::Ordering::Relaxed);
@@ -324,9 +354,11 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
mod tests {
use super::*;
use crate::{
commitment::{BlockCommitment, BlockCommitmentCache},
cluster_info_vote_listener::{ClusterInfoVoteListener, VoteTracker},
commitment::{BlockCommitmentCache, CacheSlotInfo},
rpc_subscriptions::tests::robust_poll_or_panic,
};
use crossbeam_channel::unbounded;
use jsonrpc_core::{futures::sync::mpsc, Response};
use jsonrpc_pubsub::{PubSubHandler, Session};
use serial_test_derive::serial;
@@ -336,15 +368,19 @@ mod tests {
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
};
use solana_runtime::bank::Bank;
use solana_runtime::{
bank::Bank,
genesis_utils::{create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs},
};
use solana_sdk::{
hash::Hash,
pubkey::Pubkey,
signature::{Keypair, Signer},
system_program, system_transaction,
transaction::{self, Transaction},
};
use solana_vote_program::vote_transaction;
use std::{
collections::HashMap,
sync::{atomic::AtomicBool, RwLock},
thread::sleep,
time::Duration,
@@ -354,14 +390,17 @@ mod tests {
bank_forks: &Arc<RwLock<BankForks>>,
tx: &Transaction,
subscriptions: &RpcSubscriptions,
current_slot: Slot,
) -> transaction::Result<()> {
bank_forks
.write()
.unwrap()
.get(0)
.get(current_slot)
.unwrap()
.process_transaction(tx)?;
subscriptions.notify_subscribers(0, &bank_forks);
let mut cache_slot_info = CacheSlotInfo::default();
cache_slot_info.current_slot = current_slot;
subscriptions.notify_subscribers(cache_slot_info);
Ok(())
}
@@ -387,6 +426,7 @@ mod tests {
let rpc = RpcSolPubSubImpl {
subscriptions: Arc::new(RpcSubscriptions::new(
&Arc::new(AtomicBool::new(false)),
bank_forks.clone(),
Arc::new(RwLock::new(
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
)),
@@ -401,7 +441,7 @@ mod tests {
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("signatureNotification");
rpc.signature_subscribe(session, subscriber, tx.signatures[0].to_string(), None);
process_transaction_and_notify(&bank_forks, &tx, &rpc.subscriptions).unwrap();
process_transaction_and_notify(&bank_forks, &tx, &rpc.subscriptions, 0).unwrap();
// Test signature confirmation notification
let (response, _) = robust_poll_or_panic(receiver);
@@ -430,15 +470,15 @@ mod tests {
} = create_genesis_config(10_000);
let bob_pubkey = Pubkey::new_rand();
let bank = Bank::new(&genesis_config);
let arc_bank = Arc::new(bank);
let blockhash = arc_bank.last_blockhash();
let blockhash = bank.last_blockhash();
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let session = create_session();
let mut io = PubSubHandler::default();
let rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore);
let rpc = RpcSolPubSubImpl::default_with_blockstore_bank_forks(blockstore, bank_forks);
io.extend_with(rpc.to_delegate());
let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash);
@@ -493,14 +533,22 @@ mod tests {
let bank = Bank::new(&genesis_config);
let blockhash = bank.last_blockhash();
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone();
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.write().unwrap().insert(bank1);
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let rpc = RpcSolPubSubImpl {
subscriptions: Arc::new(RpcSubscriptions::new(
&Arc::new(AtomicBool::new(false)),
bank_forks.clone(),
Arc::new(RwLock::new(
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
BlockCommitmentCache::new_for_tests_with_blockstore_bank(
blockstore,
bank_forks.read().unwrap().get(1).unwrap().clone(),
1,
),
)),
)),
uid: Arc::new(atomic::AtomicUsize::default()),
@@ -511,11 +559,11 @@ mod tests {
session,
subscriber,
contract_state.pubkey().to_string(),
None,
Some(CommitmentConfig::recent()),
);
let tx = system_transaction::transfer(&alice, &contract_funds.pubkey(), 51, blockhash);
process_transaction_and_notify(&bank_forks, &tx, &rpc.subscriptions).unwrap();
process_transaction_and_notify(&bank_forks, &tx, &rpc.subscriptions, 1).unwrap();
let ixs = budget_instruction::when_signed(
&contract_funds.pubkey(),
@@ -530,14 +578,14 @@ mod tests {
ixs,
blockhash,
);
process_transaction_and_notify(&bank_forks, &tx, &rpc.subscriptions).unwrap();
process_transaction_and_notify(&bank_forks, &tx, &rpc.subscriptions, 1).unwrap();
sleep(Duration::from_millis(200));
// Test signature confirmation notification #1
let expected_data = bank_forks
.read()
.unwrap()
.get(0)
.get(1)
.unwrap()
.get_account(&contract_state.pubkey())
.unwrap()
@@ -547,7 +595,7 @@ mod tests {
"method": "accountNotification",
"params": {
"result": {
"context": { "slot": 0 },
"context": { "slot": 1 },
"value": {
"owner": budget_program_id.to_string(),
"lamports": 51,
@@ -564,7 +612,7 @@ mod tests {
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
let tx = system_transaction::transfer(&alice, &witness.pubkey(), 1, blockhash);
process_transaction_and_notify(&bank_forks, &tx, &rpc.subscriptions).unwrap();
process_transaction_and_notify(&bank_forks, &tx, &rpc.subscriptions, 1).unwrap();
sleep(Duration::from_millis(200));
let ix = budget_instruction::apply_signature(
&witness.pubkey(),
@@ -572,14 +620,14 @@ mod tests {
&bob_pubkey,
);
let tx = Transaction::new_signed_instructions(&[&witness], vec![ix], blockhash);
process_transaction_and_notify(&bank_forks, &tx, &rpc.subscriptions).unwrap();
process_transaction_and_notify(&bank_forks, &tx, &rpc.subscriptions, 1).unwrap();
sleep(Duration::from_millis(200));
assert_eq!(
bank_forks
.read()
.unwrap()
.get(0)
.get(1)
.unwrap()
.get_account(&contract_state.pubkey()),
None
@@ -593,9 +641,12 @@ mod tests {
let session = create_session();
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, Bank::new(&genesis_config))));
let mut io = PubSubHandler::default();
let rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore);
let rpc =
RpcSolPubSubImpl::default_with_blockstore_bank_forks(blockstore, bank_forks.clone());
io.extend_with(rpc.to_delegate());
@@ -630,7 +681,7 @@ mod tests {
#[test]
#[should_panic]
fn test_account_confirmations_not_fulfilled() {
fn test_account_commitment_not_fulfilled() {
let GenesisConfigInfo {
genesis_config,
mint_keypair: alice,
@@ -638,15 +689,19 @@ mod tests {
} = create_genesis_config(10_000);
let bank = Bank::new(&genesis_config);
let blockhash = bank.last_blockhash();
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let bank_forks = Arc::new(RwLock::new(BankForks::new(1, bank)));
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let bob = Keypair::new();
let mut rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore.clone());
let mut rpc = RpcSolPubSubImpl::default_with_blockstore_bank_forks(
blockstore.clone(),
bank_forks.clone(),
);
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = RpcSubscriptions::new(
&exit,
bank_forks.clone(),
Arc::new(RwLock::new(
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
)),
@@ -654,24 +709,30 @@ mod tests {
rpc.subscriptions = Arc::new(subscriptions);
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification");
rpc.account_subscribe(session, subscriber, bob.pubkey().to_string(), Some(2));
rpc.account_subscribe(
session,
subscriber,
bob.pubkey().to_string(),
Some(CommitmentConfig::root()),
);
let tx = system_transaction::transfer(&alice, &bob.pubkey(), 100, blockhash);
bank_forks
.write()
.unwrap()
.get(0)
.get(1)
.unwrap()
.process_transaction(&tx)
.unwrap();
rpc.subscriptions.notify_subscribers(0, &bank_forks);
rpc.subscriptions
.notify_subscribers(CacheSlotInfo::default());
// allow 200ms for notification thread to wake
std::thread::sleep(Duration::from_millis(200));
let _panic = robust_poll_or_panic(receiver);
}
#[test]
fn test_account_confirmations() {
fn test_account_commitment() {
let GenesisConfigInfo {
genesis_config,
mint_keypair: alice,
@@ -680,75 +741,59 @@ mod tests {
let bank = Bank::new(&genesis_config);
let blockhash = bank.last_blockhash();
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone();
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.write().unwrap().insert(bank1);
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let bob = Keypair::new();
let mut rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore.clone());
let mut rpc = RpcSolPubSubImpl::default_with_blockstore_bank_forks(
blockstore.clone(),
bank_forks.clone(),
);
let exit = Arc::new(AtomicBool::new(false));
let block_commitment_cache = Arc::new(RwLock::new(
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore.clone()),
));
let subscriptions = RpcSubscriptions::new(&exit, block_commitment_cache.clone());
let subscriptions =
RpcSubscriptions::new(&exit, bank_forks.clone(), block_commitment_cache.clone());
rpc.subscriptions = Arc::new(subscriptions);
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification");
rpc.account_subscribe(session, subscriber, bob.pubkey().to_string(), Some(2));
rpc.account_subscribe(
session,
subscriber,
bob.pubkey().to_string(),
Some(CommitmentConfig::root()),
);
let tx = system_transaction::transfer(&alice, &bob.pubkey(), 100, blockhash);
bank_forks
.write()
.unwrap()
.get(0)
.get(1)
.unwrap()
.process_transaction(&tx)
.unwrap();
rpc.subscriptions.notify_subscribers(0, &bank_forks);
let mut cache_slot_info = CacheSlotInfo::default();
cache_slot_info.current_slot = 1;
rpc.subscriptions.notify_subscribers(cache_slot_info);
let bank0 = bank_forks.read().unwrap()[0].clone();
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.write().unwrap().insert(bank1);
let bank1 = bank_forks.read().unwrap()[1].clone();
let mut cache0 = BlockCommitment::default();
cache0.increase_confirmation_stake(1, 10);
let mut block_commitment = HashMap::new();
block_commitment.entry(0).or_insert(cache0.clone());
let mut new_block_commitment = BlockCommitmentCache::new(
block_commitment,
0,
10,
bank1.clone(),
blockstore.clone(),
0,
);
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
drop(w_block_commitment_cache);
rpc.subscriptions.notify_subscribers(1, &bank_forks);
let bank2 = Bank::new_from_parent(&bank1, &Pubkey::default(), 2);
bank_forks.write().unwrap().insert(bank2);
let bank2 = bank_forks.read().unwrap()[2].clone();
let mut cache0 = BlockCommitment::default();
cache0.increase_confirmation_stake(2, 10);
let mut block_commitment = HashMap::new();
block_commitment.entry(0).or_insert(cache0.clone());
let mut new_block_commitment =
BlockCommitmentCache::new(block_commitment, 0, 10, bank2, blockstore.clone(), 0);
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
drop(w_block_commitment_cache);
rpc.subscriptions.notify_subscribers(2, &bank_forks);
let cache_slot_info = CacheSlotInfo {
current_slot: 2,
node_root: 1,
largest_confirmed_root: 1,
highest_confirmed_slot: 1,
};
rpc.subscriptions.notify_subscribers(cache_slot_info);
let expected = json!({
"jsonrpc": "2.0",
"method": "accountNotification",
"params": {
"result": {
"context": { "slot": 0 },
"context": { "slot": 1 },
"value": {
"owner": system_program::id().to_string(),
"lamports": 100,
@@ -769,7 +814,10 @@ mod tests {
fn test_slot_subscribe() {
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore);
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let bank = Bank::new(&genesis_config);
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let rpc = RpcSolPubSubImpl::default_with_blockstore_bank_forks(blockstore, bank_forks);
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("slotNotification");
rpc.slot_subscribe(session, subscriber);
@@ -796,7 +844,10 @@ mod tests {
fn test_slot_unsubscribe() {
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore);
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let bank = Bank::new(&genesis_config);
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let rpc = RpcSolPubSubImpl::default_with_blockstore_bank_forks(blockstore, bank_forks);
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("slotNotification");
rpc.slot_subscribe(session, subscriber);
@@ -825,4 +876,97 @@ mod tests {
.slot_unsubscribe(Some(session), SubscriptionId::Number(0))
.is_ok());
}
#[test]
#[serial]
fn test_vote_subscribe() {
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let block_commitment_cache = Arc::new(RwLock::new(
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore.clone()),
));
let validator_voting_keypairs: Vec<_> = (0..10)
.map(|_| ValidatorVoteKeypairs::new(Keypair::new(), Keypair::new(), Keypair::new()))
.collect();
let GenesisConfigInfo { genesis_config, .. } =
create_genesis_config_with_vote_accounts(10_000, &validator_voting_keypairs, 100);
let exit = Arc::new(AtomicBool::new(false));
let bank = Bank::new(&genesis_config);
let bank_forks = BankForks::new(0, bank);
let bank = bank_forks.get(0).unwrap().clone();
let bank_forks = Arc::new(RwLock::new(bank_forks));
// Setup RPC
let mut rpc =
RpcSolPubSubImpl::default_with_blockstore_bank_forks(blockstore, bank_forks.clone());
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("voteNotification");
// Setup Subscriptions
let subscriptions =
RpcSubscriptions::new(&exit, bank_forks.clone(), block_commitment_cache.clone());
rpc.subscriptions = Arc::new(subscriptions);
rpc.vote_subscribe(session, subscriber);
// Create some voters at genesis
let vote_tracker = VoteTracker::new(&bank);
let (votes_sender, votes_receiver) = unbounded();
let (vote_tracker, validator_voting_keypairs) =
(Arc::new(vote_tracker), validator_voting_keypairs);
let vote_slots = vec![1, 2];
validator_voting_keypairs.iter().for_each(|keypairs| {
let node_keypair = &keypairs.node_keypair;
let vote_keypair = &keypairs.vote_keypair;
let vote_tx = vote_transaction::new_vote_transaction(
vote_slots.clone(),
Hash::default(),
Hash::default(),
node_keypair,
vote_keypair,
vote_keypair,
);
votes_sender.send(vec![vote_tx]).unwrap();
});
// Process votes and check they were notified.
ClusterInfoVoteListener::get_and_process_votes_for_tests(
&votes_receiver,
&vote_tracker,
0,
rpc.subscriptions.clone(),
)
.unwrap();
let (response, _) = robust_poll_or_panic(receiver);
assert_eq!(
response,
r#"{"jsonrpc":"2.0","method":"voteNotification","params":{"result":{"hash":"11111111111111111111111111111111","slots":[1,2],"timestamp":null},"subscription":0}}"#
);
}
#[test]
#[serial]
fn test_vote_unsubscribe() {
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let bank = Bank::new(&genesis_config);
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let rpc = RpcSolPubSubImpl::default_with_blockstore_bank_forks(blockstore, bank_forks);
let session = create_session();
let (subscriber, _id_receiver, _) = Subscriber::new_test("voteNotification");
rpc.vote_subscribe(session, subscriber);
let session = create_session();
assert!(rpc
.vote_unsubscribe(Some(session), SubscriptionId::Number(42))
.is_err());
let session = create_session();
assert!(rpc
.vote_unsubscribe(Some(session), SubscriptionId::Number(0))
.is_ok());
}
}

View File

@@ -73,7 +73,13 @@ impl PubSubService {
mod tests {
use super::*;
use crate::commitment::BlockCommitmentCache;
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use solana_ledger::{
bank_forks::BankForks,
blockstore::Blockstore,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
};
use solana_runtime::bank::Bank;
use std::{
net::{IpAddr, Ipv4Addr},
sync::RwLock,
@@ -85,8 +91,12 @@ mod tests {
let exit = Arc::new(AtomicBool::new(false));
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let bank = Bank::new(&genesis_config);
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
bank_forks,
Arc::new(RwLock::new(
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
)),

View File

@@ -15,7 +15,7 @@ use solana_ledger::{
blockstore::Blockstore,
snapshot_utils,
};
use solana_sdk::{hash::Hash, pubkey::Pubkey};
use solana_sdk::{hash::Hash, native_token::lamports_to_sol, pubkey::Pubkey};
use std::{
collections::HashSet,
net::SocketAddr,
@@ -44,6 +44,7 @@ struct RpcRequestMiddleware {
snapshot_config: Option<SnapshotConfig>,
cluster_info: Arc<ClusterInfo>,
trusted_validators: Option<HashSet<Pubkey>>,
bank_forks: Arc<RwLock<BankForks>>,
}
impl RpcRequestMiddleware {
@@ -52,6 +53,7 @@ impl RpcRequestMiddleware {
snapshot_config: Option<SnapshotConfig>,
cluster_info: Arc<ClusterInfo>,
trusted_validators: Option<HashSet<Pubkey>>,
bank_forks: Arc<RwLock<BankForks>>,
) -> Self {
Self {
ledger_path,
@@ -60,6 +62,7 @@ impl RpcRequestMiddleware {
snapshot_config,
cluster_info,
trusted_validators,
bank_forks,
}
}
@@ -85,7 +88,7 @@ impl RpcRequestMiddleware {
.unwrap()
}
fn is_get_path(&self, path: &str) -> bool {
fn is_file_get_path(&self, path: &str) -> bool {
match path {
"/genesis.tar.bz2" => true,
_ => {
@@ -98,7 +101,7 @@ impl RpcRequestMiddleware {
}
}
fn get(&self, path: &str) -> RequestMiddlewareAction {
fn process_file_get(&self, path: &str) -> RequestMiddlewareAction {
let stem = path.split_at(1).1; // Drop leading '/' from path
let filename = {
match path {
@@ -217,8 +220,19 @@ impl RequestMiddleware for RpcRequestMiddleware {
};
}
}
if self.is_get_path(request.uri().path()) {
self.get(request.uri().path())
if let Some(result) = process_rest(&self.bank_forks, request.uri().path()) {
RequestMiddlewareAction::Respond {
should_validate_hosts: true,
response: Box::new(jsonrpc_core::futures::future::ok(
hyper::Response::builder()
.status(hyper::StatusCode::OK)
.body(hyper::Body::from(result))
.unwrap(),
)),
}
} else if self.is_file_get_path(request.uri().path()) {
self.process_file_get(request.uri().path())
} else if request.uri().path() == "/health" {
RequestMiddlewareAction::Respond {
should_validate_hosts: true,
@@ -238,6 +252,29 @@ impl RequestMiddleware for RpcRequestMiddleware {
}
}
fn process_rest(bank_forks: &Arc<RwLock<BankForks>>, path: &str) -> Option<String> {
match path {
"/v0/circulating-supply" => {
let r_bank_forks = bank_forks.read().unwrap();
let bank = r_bank_forks.root_bank();
let total_supply = bank.capitalization();
let non_circulating_supply =
crate::non_circulating_supply::calculate_non_circulating_supply(&bank).lamports;
Some(format!(
"{}",
lamports_to_sol(total_supply - non_circulating_supply)
))
}
"/v0/total-supply" => {
let r_bank_forks = bank_forks.read().unwrap();
let bank = r_bank_forks.root_bank();
let total_supply = bank.capitalization();
Some(format!("{}", lamports_to_sol(total_supply)))
}
_ => None,
}
}
impl JsonRpcService {
#[allow(clippy::too_many_arguments)]
pub fn new(
@@ -258,7 +295,7 @@ impl JsonRpcService {
info!("rpc configuration: {:?}", config);
let request_processor = Arc::new(RwLock::new(JsonRpcRequestProcessor::new(
config,
bank_forks,
bank_forks.clone(),
block_commitment_cache,
blockstore,
storage_state,
@@ -283,6 +320,7 @@ impl JsonRpcService {
snapshot_config,
cluster_info.clone(),
trusted_validators,
bank_forks.clone(),
);
let server = ServerBuilder::with_meta_extractor(
io,
@@ -412,11 +450,39 @@ mod tests {
rpc_service.join().unwrap();
}
#[test]
fn test_is_get_path() {
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
fn create_bank_forks() -> Arc<RwLock<BankForks>> {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let bank = Bank::new(&genesis_config);
Arc::new(RwLock::new(BankForks::new(bank.slot(), bank)))
}
let rrm = RpcRequestMiddleware::new(PathBuf::from("/"), None, cluster_info.clone(), None);
#[test]
fn test_process_rest_api() {
let bank_forks = create_bank_forks();
assert_eq!(None, process_rest(&bank_forks, "not-a-supported-rest-api"));
assert_eq!(
Some("0.000010127".to_string()),
process_rest(&bank_forks, "/v0/circulating-supply")
);
assert_eq!(
Some("0.000010127".to_string()),
process_rest(&bank_forks, "/v0/total-supply")
);
}
#[test]
fn test_is_file_get_path() {
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
let bank_forks = create_bank_forks();
let rrm = RpcRequestMiddleware::new(
PathBuf::from("/"),
None,
cluster_info.clone(),
None,
bank_forks.clone(),
);
let rrm_with_snapshot_config = RpcRequestMiddleware::new(
PathBuf::from("/"),
Some(SnapshotConfig {
@@ -426,33 +492,41 @@ mod tests {
}),
cluster_info,
None,
bank_forks,
);
assert!(rrm.is_get_path("/genesis.tar.bz2"));
assert!(!rrm.is_get_path("genesis.tar.bz2"));
assert!(rrm.is_file_get_path("/genesis.tar.bz2"));
assert!(!rrm.is_file_get_path("genesis.tar.bz2"));
assert!(!rrm.is_get_path("/snapshot.tar.bz2")); // This is a redirect
assert!(!rrm.is_file_get_path("/snapshot.tar.bz2")); // This is a redirect
assert!(
!rrm.is_get_path("/snapshot-100-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.bz2")
);
assert!(rrm_with_snapshot_config
.is_get_path("/snapshot-100-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.bz2"));
assert!(!rrm.is_file_get_path(
"/snapshot-100-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.bz2"
));
assert!(rrm_with_snapshot_config.is_file_get_path(
"/snapshot-100-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.bz2"
));
assert!(!rrm.is_get_path(
assert!(!rrm.is_file_get_path(
"/snapshot-notaslotnumber-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.bz2"
));
assert!(!rrm.is_get_path("/"));
assert!(!rrm.is_get_path(".."));
assert!(!rrm.is_get_path("🎣"));
assert!(!rrm.is_file_get_path("/"));
assert!(!rrm.is_file_get_path(".."));
assert!(!rrm.is_file_get_path("🎣"));
}
#[test]
fn test_health_check_with_no_trusted_validators() {
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
let rm = RpcRequestMiddleware::new(PathBuf::from("/"), None, cluster_info.clone(), None);
let rm = RpcRequestMiddleware::new(
PathBuf::from("/"),
None,
cluster_info.clone(),
None,
create_bank_forks(),
);
assert_eq!(rm.health_check(), "ok");
}
@@ -466,6 +540,7 @@ mod tests {
None,
cluster_info.clone(),
Some(trusted_validators.clone().into_iter().collect()),
create_bank_forks(),
);
// No account hashes for this node or any trusted validators == "behind"

File diff suppressed because it is too large Load Diff

View File

@@ -2,16 +2,20 @@ use crate::{
cluster_info::{ClusterInfo, ClusterInfoError},
cluster_slots::ClusterSlots,
contact_info::ContactInfo,
repair_response,
repair_service::RepairStats,
result::{Error, Result},
weighted_shuffle::weighted_best,
};
use bincode::serialize;
use solana_ledger::blockstore::Blockstore;
use solana_ledger::{
blockstore::Blockstore,
shred::{Nonce, Shred},
};
use solana_measure::measure::Measure;
use solana_measure::thread_mem_usage;
use solana_metrics::{datapoint_debug, inc_new_counter_debug};
use solana_perf::packet::{limited_deserialize, Packet, Packets, PacketsRecycler};
use solana_perf::packet::{limited_deserialize, Packets, PacketsRecycler};
use solana_sdk::{
clock::Slot,
pubkey::Pubkey,
@@ -30,6 +34,7 @@ use std::{
/// the number of slots to respond with when responding to `Orphan` requests
pub const MAX_ORPHAN_REPAIR_RESPONSES: usize = 10;
pub const DEFAULT_NONCE: u32 = 42;
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq)]
pub enum RepairType {
@@ -65,6 +70,9 @@ enum RepairProtocol {
WindowIndex(ContactInfo, u64, u64),
HighestWindowIndex(ContactInfo, u64, u64),
Orphan(ContactInfo, u64),
WindowIndexWithNonce(ContactInfo, u64, u64, Nonce),
HighestWindowIndexWithNonce(ContactInfo, u64, u64, Nonce),
OrphanWithNonce(ContactInfo, u64, Nonce),
}
#[derive(Clone)]
@@ -107,6 +115,9 @@ impl ServeRepair {
RepairProtocol::WindowIndex(ref from, _, _) => from,
RepairProtocol::HighestWindowIndex(ref from, _, _) => from,
RepairProtocol::Orphan(ref from, _) => from,
RepairProtocol::WindowIndexWithNonce(ref from, _, _, _) => from,
RepairProtocol::HighestWindowIndexWithNonce(ref from, _, _, _) => from,
RepairProtocol::OrphanWithNonce(ref from, _, _) => from,
}
}
@@ -141,6 +152,7 @@ impl ServeRepair {
&me.read().unwrap().my_info,
*slot,
*shred_index,
None,
),
"WindowIndex",
)
@@ -155,6 +167,7 @@ impl ServeRepair {
blockstore,
*slot,
*highest_index,
None,
),
"HighestWindowIndex",
)
@@ -168,10 +181,55 @@ impl ServeRepair {
blockstore,
*slot,
MAX_ORPHAN_REPAIR_RESPONSES,
None,
),
"Orphan",
)
}
RepairProtocol::WindowIndexWithNonce(_, slot, shred_index, nonce) => {
stats.window_index += 1;
(
Self::run_window_request(
recycler,
from,
&from_addr,
blockstore,
&me.read().unwrap().my_info,
*slot,
*shred_index,
Some(*nonce),
),
"WindowIndexWithNonce",
)
}
RepairProtocol::HighestWindowIndexWithNonce(_, slot, highest_index, nonce) => {
stats.highest_window_index += 1;
(
Self::run_highest_window_request(
recycler,
&from_addr,
blockstore,
*slot,
*highest_index,
Some(*nonce),
),
"HighestWindowIndexWithNonce",
)
}
RepairProtocol::OrphanWithNonce(_, slot, nonce) => {
stats.orphan += 1;
(
Self::run_orphan(
recycler,
&from_addr,
blockstore,
*slot,
MAX_ORPHAN_REPAIR_RESPONSES,
Some(*nonce),
),
"OrphanWithNonce",
)
}
}
};
@@ -331,20 +389,47 @@ impl ServeRepair {
});
}
fn window_index_request_bytes(&self, slot: Slot, shred_index: u64) -> Result<Vec<u8>> {
let req = RepairProtocol::WindowIndex(self.my_info.clone(), slot, shred_index);
fn window_index_request_bytes(
&self,
slot: Slot,
shred_index: u64,
nonce: Option<Nonce>,
) -> Result<Vec<u8>> {
let req = if let Some(nonce) = nonce {
RepairProtocol::WindowIndexWithNonce(self.my_info.clone(), slot, shred_index, nonce)
} else {
RepairProtocol::WindowIndex(self.my_info.clone(), slot, shred_index)
};
let out = serialize(&req)?;
Ok(out)
}
fn window_highest_index_request_bytes(&self, slot: Slot, shred_index: u64) -> Result<Vec<u8>> {
let req = RepairProtocol::HighestWindowIndex(self.my_info.clone(), slot, shred_index);
fn window_highest_index_request_bytes(
&self,
slot: Slot,
shred_index: u64,
nonce: Option<Nonce>,
) -> Result<Vec<u8>> {
let req = if let Some(nonce) = nonce {
RepairProtocol::HighestWindowIndexWithNonce(
self.my_info.clone(),
slot,
shred_index,
nonce,
)
} else {
RepairProtocol::HighestWindowIndex(self.my_info.clone(), slot, shred_index)
};
let out = serialize(&req)?;
Ok(out)
}
fn orphan_bytes(&self, slot: Slot) -> Result<Vec<u8>> {
let req = RepairProtocol::Orphan(self.my_info.clone(), slot);
fn orphan_bytes(&self, slot: Slot, nonce: Option<Nonce>) -> Result<Vec<u8>> {
let req = if let Some(nonce) = nonce {
RepairProtocol::OrphanWithNonce(self.my_info.clone(), slot, nonce)
} else {
RepairProtocol::Orphan(self.my_info.clone(), slot)
};
let out = serialize(&req)?;
Ok(out)
}
@@ -352,24 +437,30 @@ impl ServeRepair {
pub fn repair_request(
&self,
cluster_slots: &ClusterSlots,
repair_request: &RepairType,
repair_request: RepairType,
cache: &mut RepairCache,
repair_stats: &mut RepairStats,
) -> Result<(SocketAddr, Vec<u8>)> {
// find a peer that appears to be accepting replication and has the desired slot, as indicated
// by a valid tvu port location
if cache.get(&repair_request.slot()).is_none() {
let repair_peers: Vec<_> = self.cluster_info.repair_peers(repair_request.slot());
let slot = repair_request.slot();
if cache.get(&slot).is_none() {
let repair_peers: Vec<_> = self.cluster_info.repair_peers(slot);
if repair_peers.is_empty() {
return Err(ClusterInfoError::NoPeers.into());
}
let weights = cluster_slots.compute_weights(repair_request.slot(), &repair_peers);
cache.insert(repair_request.slot(), (repair_peers, weights));
let weights = cluster_slots.compute_weights(slot, &repair_peers);
cache.insert(slot, (repair_peers, weights));
}
let (repair_peers, weights) = cache.get(&repair_request.slot()).unwrap();
let (repair_peers, weights) = cache.get(&slot).unwrap();
let n = weighted_best(&weights, Pubkey::new_rand().to_bytes());
let addr = repair_peers[n].serve_repair; // send the request to the peer's serve_repair port
let out = self.map_repair_request(repair_request, repair_stats)?;
let nonce = if Shred::is_nonce_unlocked(slot) {
Some(DEFAULT_NONCE)
} else {
None
};
let out = self.map_repair_request(&repair_request, repair_stats, nonce)?;
Ok((addr, out))
}
@@ -377,19 +468,24 @@ impl ServeRepair {
&self,
repair_request: &RepairType,
repair_stats: &mut RepairStats,
nonce: Option<Nonce>,
) -> Result<Vec<u8>> {
let slot = repair_request.slot();
if Shred::is_nonce_unlocked(slot) {
assert!(nonce.is_some());
}
match repair_request {
RepairType::Shred(slot, shred_index) => {
repair_stats.shred.update(*slot);
Ok(self.window_index_request_bytes(*slot, *shred_index)?)
Ok(self.window_index_request_bytes(*slot, *shred_index, nonce)?)
}
RepairType::HighestShred(slot, shred_index) => {
repair_stats.highest_shred.update(*slot);
Ok(self.window_highest_index_request_bytes(*slot, *shred_index)?)
Ok(self.window_highest_index_request_bytes(*slot, *shred_index, nonce)?)
}
RepairType::Orphan(slot) => {
repair_stats.orphan.update(*slot);
Ok(self.orphan_bytes(*slot)?)
Ok(self.orphan_bytes(*slot, nonce)?)
}
}
}
@@ -402,12 +498,19 @@ impl ServeRepair {
me: &ContactInfo,
slot: Slot,
shred_index: u64,
nonce: Option<Nonce>,
) -> Option<Packets> {
if let Some(blockstore) = blockstore {
// Try to find the requested index in one of the slots
let packet = Self::get_data_shred_as_packet(blockstore, slot, shred_index, from_addr);
let packet = repair_response::repair_response_packet(
blockstore,
slot,
shred_index,
from_addr,
nonce,
);
if let Ok(Some(packet)) = packet {
if let Some(packet) = packet {
inc_new_counter_debug!("serve_repair-window-request-ledger", 1);
return Some(Packets::new_with_recycler_data(
recycler,
@@ -435,15 +538,20 @@ impl ServeRepair {
blockstore: Option<&Arc<Blockstore>>,
slot: Slot,
highest_index: u64,
nonce: Option<Nonce>,
) -> Option<Packets> {
let blockstore = blockstore?;
// Try to find the requested index in one of the slots
let meta = blockstore.meta(slot).ok()??;
if meta.received > highest_index {
// meta.received must be at least 1 by this point
let packet =
Self::get_data_shred_as_packet(blockstore, slot, meta.received - 1, from_addr)
.ok()??;
let packet = repair_response::repair_response_packet(
blockstore,
slot,
meta.received - 1,
from_addr,
nonce,
)?;
return Some(Packets::new_with_recycler_data(
recycler,
"run_highest_window_request",
@@ -459,6 +567,7 @@ impl ServeRepair {
blockstore: Option<&Arc<Blockstore>>,
mut slot: Slot,
max_responses: usize,
nonce: Option<Nonce>,
) -> Option<Packets> {
let mut res = Packets::new_with_recycler(recycler.clone(), 64, "run_orphan");
if let Some(blockstore) = blockstore {
@@ -467,9 +576,19 @@ impl ServeRepair {
if meta.received == 0 {
break;
}
let packet =
Self::get_data_shred_as_packet(blockstore, slot, meta.received - 1, from_addr);
if let Ok(Some(packet)) = packet {
let nonce = if Shred::is_nonce_unlocked(slot) {
nonce
} else {
None
};
let packet = repair_response::repair_response_packet(
blockstore,
slot,
meta.received - 1,
from_addr,
nonce,
);
if let Some(packet) = packet {
res.packets.push(packet);
}
if meta.is_parent_set() && res.packets.len() <= max_responses {
@@ -484,41 +603,31 @@ impl ServeRepair {
}
Some(res)
}
fn get_data_shred_as_packet(
blockstore: &Arc<Blockstore>,
slot: Slot,
shred_index: u64,
dest: &SocketAddr,
) -> Result<Option<Packet>> {
let data = blockstore.get_data_shred(slot, shred_index)?;
Ok(data.map(|data| {
let mut packet = Packet::default();
packet.meta.size = data.len();
packet.meta.set_addr(dest);
packet.data.copy_from_slice(&data);
packet
}))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::result::Error;
use crate::{repair_response, result::Error};
use solana_ledger::get_tmp_ledger_path;
use solana_ledger::{
blockstore::make_many_slot_entries,
blockstore_processor::fill_blockstore_slot_with_ticks,
shred::{
max_ticks_per_n_shreds, CodingShredHeader, DataShredHeader, Shred, ShredCommonHeader,
NONCE_SHRED_PAYLOAD_SIZE, UNLOCK_NONCE_SLOT,
},
};
use solana_sdk::{hash::Hash, pubkey::Pubkey, timing::timestamp};
/// test run_window_requestwindow requests respond with the right shred, and do not overrun
#[test]
fn run_highest_window_request() {
fn test_run_highest_window_request() {
run_highest_window_request(UNLOCK_NONCE_SLOT + 3, 3, Some(9));
run_highest_window_request(UNLOCK_NONCE_SLOT, 3, None);
}
/// test run_window_request responds with the right shred, and do not overrun
fn run_highest_window_request(slot: Slot, num_slots: u64, nonce: Option<Nonce>) {
let recycler = PacketsRecycler::default();
solana_logger::setup();
let ledger_path = get_tmp_ledger_path!();
@@ -530,41 +639,51 @@ mod tests {
Some(&blockstore),
0,
0,
nonce,
);
assert!(rv.is_none());
let _ = fill_blockstore_slot_with_ticks(
&blockstore,
max_ticks_per_n_shreds(1) + 1,
2,
1,
max_ticks_per_n_shreds(1, None) + 1,
slot,
slot - num_slots + 1,
Hash::default(),
);
let index = 1;
let rv = ServeRepair::run_highest_window_request(
&recycler,
&socketaddr_any!(),
Some(&blockstore),
2,
1,
);
slot,
index,
nonce,
)
.expect("packets");
let rv: Vec<Shred> = rv
.expect("packets")
.packets
.into_iter()
.filter_map(|b| Shred::new_from_serialized_shred(b.data.to_vec()).ok())
.filter_map(|b| {
if nonce.is_some() {
assert_eq!(repair_response::nonce(&b.data[..]), nonce);
}
Shred::new_from_serialized_shred(b.data.to_vec()).ok()
})
.collect();
assert!(!rv.is_empty());
let index = blockstore.meta(2).unwrap().unwrap().received - 1;
let index = blockstore.meta(slot).unwrap().unwrap().received - 1;
assert_eq!(rv[0].index(), index as u32);
assert_eq!(rv[0].slot(), 2);
assert_eq!(rv[0].slot(), slot);
let rv = ServeRepair::run_highest_window_request(
&recycler,
&socketaddr_any!(),
Some(&blockstore),
2,
slot,
index + 1,
nonce,
);
assert!(rv.is_none());
}
@@ -572,9 +691,14 @@ mod tests {
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
/// test window requests respond with the right shred, and do not overrun
#[test]
fn run_window_request() {
fn test_run_window_request() {
run_window_request(UNLOCK_NONCE_SLOT + 1, Some(9));
run_window_request(UNLOCK_NONCE_SLOT - 3, None);
}
/// test window requests respond with the right shred, and do not overrun
fn run_window_request(slot: Slot, nonce: Option<Nonce>) {
let recycler = PacketsRecycler::default();
solana_logger::setup();
let ledger_path = get_tmp_ledger_path!();
@@ -601,12 +725,13 @@ mod tests {
&socketaddr_any!(),
Some(&blockstore),
&me,
slot,
0,
0,
nonce,
);
assert!(rv.is_none());
let mut common_header = ShredCommonHeader::default();
common_header.slot = 2;
common_header.slot = slot;
common_header.index = 1;
let mut data_header = DataShredHeader::default();
data_header.parent_offset = 1;
@@ -614,30 +739,37 @@ mod tests {
common_header,
data_header,
CodingShredHeader::default(),
NONCE_SHRED_PAYLOAD_SIZE,
);
blockstore
.insert_shreds(vec![shred_info], None, false)
.expect("Expect successful ledger write");
let index = 1;
let rv = ServeRepair::run_window_request(
&recycler,
&me,
&socketaddr_any!(),
Some(&blockstore),
&me,
2,
1,
);
assert!(!rv.is_none());
slot,
index,
nonce,
)
.expect("packets");
let rv: Vec<Shred> = rv
.expect("packets")
.packets
.into_iter()
.filter_map(|b| Shred::new_from_serialized_shred(b.data.to_vec()).ok())
.filter_map(|b| {
if nonce.is_some() {
assert_eq!(repair_response::nonce(&b.data[..]), nonce);
}
Shred::new_from_serialized_shred(b.data.to_vec()).ok()
})
.collect();
assert_eq!(rv[0].index(), 1);
assert_eq!(rv[0].slot(), 2);
assert_eq!(rv[0].slot(), slot);
}
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
@@ -651,7 +783,7 @@ mod tests {
let serve_repair = ServeRepair::new(cluster_info.clone());
let rv = serve_repair.repair_request(
&cluster_slots,
&RepairType::Shred(0, 0),
RepairType::Shred(0, 0),
&mut HashMap::new(),
&mut RepairStats::default(),
);
@@ -677,7 +809,7 @@ mod tests {
let rv = serve_repair
.repair_request(
&cluster_slots,
&RepairType::Shred(0, 0),
RepairType::Shred(0, 0),
&mut HashMap::new(),
&mut RepairStats::default(),
)
@@ -709,7 +841,7 @@ mod tests {
let rv = serve_repair
.repair_request(
&cluster_slots,
&RepairType::Shred(0, 0),
RepairType::Shred(0, 0),
&mut HashMap::new(),
&mut RepairStats::default(),
)
@@ -725,52 +857,85 @@ mod tests {
}
#[test]
fn run_orphan() {
fn test_run_orphan() {
run_orphan(UNLOCK_NONCE_SLOT + 1, 3, Some(9));
// Test where the response will be for some slots <= UNLOCK_NONCE_SLOT,
// and some of the response will be for some slots > UNLOCK_NONCE_SLOT.
// Should not panic.
run_orphan(UNLOCK_NONCE_SLOT, 3, None);
run_orphan(UNLOCK_NONCE_SLOT, 3, Some(9));
}
fn run_orphan(slot: Slot, num_slots: u64, nonce: Option<Nonce>) {
solana_logger::setup();
let recycler = PacketsRecycler::default();
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let rv =
ServeRepair::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 2, 0);
let rv = ServeRepair::run_orphan(
&recycler,
&socketaddr_any!(),
Some(&blockstore),
slot,
0,
nonce,
);
assert!(rv.is_none());
// Create slots 1, 2, 3 with 5 shreds apiece
let (shreds, _) = make_many_slot_entries(1, 3, 5);
// Create slots [slot, slot + num_slots) with 5 shreds apiece
let (shreds, _) = make_many_slot_entries(slot, num_slots, 5);
blockstore
.insert_shreds(shreds, None, false)
.expect("Expect successful ledger write");
// We don't have slot 4, so we don't know how to service this requeset
let rv =
ServeRepair::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 4, 5);
// We don't have slot `slot + num_slots`, so we don't know how to service this request
let rv = ServeRepair::run_orphan(
&recycler,
&socketaddr_any!(),
Some(&blockstore),
slot + num_slots,
5,
nonce,
);
assert!(rv.is_none());
// For slot 3, we should return the highest shreds from slots 3, 2, 1 respectively
// for this request
let rv: Vec<_> =
ServeRepair::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 3, 5)
.expect("run_orphan packets")
.packets
.iter()
.map(|b| b.clone())
.collect();
let expected: Vec<_> = (1..=3)
// For a orphan request for `slot + num_slots - 1`, we should return the highest shreds
// from slots in the range [slot, slot + num_slots - 1]
let rv: Vec<_> = ServeRepair::run_orphan(
&recycler,
&socketaddr_any!(),
Some(&blockstore),
slot + num_slots - 1,
5,
nonce,
)
.expect("run_orphan packets")
.packets
.iter()
.map(|b| b.clone())
.collect();
// Verify responses
let expected: Vec<_> = (slot..slot + num_slots)
.rev()
.map(|slot| {
.filter_map(|slot| {
let nonce = if Shred::is_nonce_unlocked(slot) {
nonce
} else {
None
};
let index = blockstore.meta(slot).unwrap().unwrap().received - 1;
ServeRepair::get_data_shred_as_packet(
repair_response::repair_response_packet(
&blockstore,
slot,
index,
&socketaddr_any!(),
nonce,
)
.unwrap()
.unwrap()
})
.collect();
assert_eq!(rv, expected)
assert_eq!(rv, expected);
}
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");

View File

@@ -22,12 +22,23 @@ use std::time::Instant;
pub type ShredsReceived = HashMap<Slot, BitVec<u64>>;
#[derive(Default)]
struct ShredFetchStats {
index_overrun: usize,
shred_count: usize,
index_bad_deserialize: usize,
index_out_of_bounds: usize,
slot_bad_deserialize: usize,
duplicate_shred: usize,
slot_out_of_range: usize,
}
pub struct ShredFetchStage {
thread_hdls: Vec<JoinHandle<()>>,
}
impl ShredFetchStage {
fn get_slot_index(p: &Packet, index_overrun: &mut usize) -> Option<(u64, u32)> {
fn get_slot_index(p: &Packet, stats: &mut ShredFetchStats) -> Option<(u64, u32)> {
let index_start = OFFSET_OF_SHRED_INDEX;
let index_end = index_start + SIZE_OF_SHRED_INDEX;
let slot_start = OFFSET_OF_SHRED_SLOT;
@@ -38,11 +49,17 @@ impl ShredFetchStage {
if index < MAX_DATA_SHREDS_PER_SLOT as u32 && slot_end <= p.meta.size {
if let Ok(slot) = limited_deserialize::<Slot>(&p.data[slot_start..slot_end]) {
return Some((slot, index));
} else {
stats.slot_bad_deserialize += 1;
}
} else {
stats.index_out_of_bounds += 1;
}
} else {
stats.index_bad_deserialize += 1;
}
} else {
*index_overrun += 1;
stats.index_overrun += 1;
}
None
}
@@ -50,7 +67,7 @@ impl ShredFetchStage {
fn process_packet<F>(
p: &mut Packet,
shreds_received: &mut ShredsReceived,
index_overrun: &mut usize,
stats: &mut ShredFetchStats,
last_root: Slot,
last_slot: Slot,
slots_per_epoch: u64,
@@ -59,7 +76,7 @@ impl ShredFetchStage {
F: Fn(&mut Packet),
{
p.meta.discard = true;
if let Some((slot, index)) = Self::get_slot_index(p, index_overrun) {
if let Some((slot, index)) = Self::get_slot_index(p, stats) {
// Seems reasonable to limit shreds to 2 epochs away
if slot > last_root && slot < (last_slot + 2 * slots_per_epoch) {
// Shred filter
@@ -70,7 +87,11 @@ impl ShredFetchStage {
p.meta.discard = false;
modify(p);
slot_received.set(index.into(), true);
} else {
stats.duplicate_shred += 1;
}
} else {
stats.slot_out_of_range += 1;
}
}
}
@@ -80,6 +101,7 @@ impl ShredFetchStage {
recvr: PacketReceiver,
sendr: PacketSender,
bank_forks: Option<Arc<RwLock<BankForks>>>,
name: &'static str,
modify: F,
) where
F: Fn(&mut Packet),
@@ -92,6 +114,9 @@ impl ShredFetchStage {
let mut last_slot = std::u64::MAX;
let mut slots_per_epoch = 0;
let mut last_stats = Instant::now();
let mut stats = ShredFetchStats::default();
while let Some(mut p) = recvr.iter().next() {
if last_cleared.elapsed().as_millis() > 200 {
shreds_received.clear();
@@ -105,22 +130,32 @@ impl ShredFetchStage {
slots_per_epoch = root_bank.get_slots_in_epoch(root_bank.epoch());
}
}
let mut index_overrun = 0;
let mut shred_count = 0;
stats.shred_count += p.packets.len();
p.packets.iter_mut().for_each(|mut packet| {
shred_count += 1;
Self::process_packet(
&mut packet,
&mut shreds_received,
&mut index_overrun,
&mut stats,
last_root,
last_slot,
slots_per_epoch,
&modify,
);
});
inc_new_counter_warn!("shred_fetch_stage-shred_index_overrun", index_overrun);
inc_new_counter_info!("shred_fetch_stage-shred_count", shred_count);
if last_stats.elapsed().as_millis() > 1000 {
datapoint_info!(
name,
("index_overrun", stats.index_overrun, i64),
("shred_count", stats.shred_count, i64),
("slot_bad_deserialize", stats.slot_bad_deserialize, i64),
("index_bad_deserialize", stats.index_bad_deserialize, i64),
("index_out_of_bounds", stats.index_out_of_bounds, i64),
("slot_out_of_range", stats.slot_out_of_range, i64),
("duplicate_shred", stats.duplicate_shred, i64),
);
stats = ShredFetchStats::default();
last_stats = Instant::now();
}
if sendr.send(p).is_err() {
break;
}
@@ -133,6 +168,7 @@ impl ShredFetchStage {
sender: PacketSender,
recycler: Recycler<PinnedVec<Packet>>,
bank_forks: Option<Arc<RwLock<BankForks>>>,
name: &'static str,
modify: F,
) -> (Vec<JoinHandle<()>>, JoinHandle<()>)
where
@@ -154,7 +190,7 @@ impl ShredFetchStage {
let modifier_hdl = Builder::new()
.name("solana-tvu-fetch-stage-packet-modifier".to_string())
.spawn(move || Self::modify_packets(packet_receiver, sender, bank_forks, modify))
.spawn(move || Self::modify_packets(packet_receiver, sender, bank_forks, name, modify))
.unwrap();
(streamers, modifier_hdl)
}
@@ -185,6 +221,7 @@ impl ShredFetchStage {
sender.clone(),
recycler.clone(),
bank_forks.clone(),
"shred_fetch_tvu_forwards",
|p| p.meta.forward = true,
);
@@ -194,6 +231,7 @@ impl ShredFetchStage {
sender.clone(),
recycler.clone(),
bank_forks,
"shred_fetch_repair",
|p| p.meta.repair = true,
);
@@ -225,7 +263,7 @@ mod tests {
solana_logger::setup();
let mut shreds_received = ShredsReceived::default();
let mut packet = Packet::default();
let mut index_overrun = 0;
let mut stats = ShredFetchStats::default();
let last_root = 0;
let last_slot = 100;
let slots_per_epoch = 10;
@@ -233,13 +271,13 @@ mod tests {
ShredFetchStage::process_packet(
&mut packet,
&mut shreds_received,
&mut index_overrun,
&mut stats,
last_root,
last_slot,
slots_per_epoch,
&|_p| {},
);
assert_eq!(index_overrun, 1);
assert_eq!(stats.index_overrun, 1);
assert!(packet.meta.discard);
let shred = Shred::new_from_data(1, 3, 0, None, true, true, 0, 0, 0);
shred.copy_to_packet(&mut packet);
@@ -248,7 +286,7 @@ mod tests {
ShredFetchStage::process_packet(
&mut packet,
&mut shreds_received,
&mut index_overrun,
&mut stats,
3,
last_slot,
slots_per_epoch,
@@ -260,7 +298,7 @@ mod tests {
ShredFetchStage::process_packet(
&mut packet,
&mut shreds_received,
&mut index_overrun,
&mut stats,
last_root,
last_slot,
slots_per_epoch,
@@ -272,7 +310,7 @@ mod tests {
ShredFetchStage::process_packet(
&mut packet,
&mut shreds_received,
&mut index_overrun,
&mut stats,
last_root,
last_slot,
slots_per_epoch,
@@ -287,7 +325,7 @@ mod tests {
ShredFetchStage::process_packet(
&mut packet,
&mut shreds_received,
&mut index_overrun,
&mut stats,
last_root,
last_slot,
slots_per_epoch,
@@ -301,7 +339,7 @@ mod tests {
ShredFetchStage::process_packet(
&mut packet,
&mut shreds_received,
&mut index_overrun,
&mut stats,
last_root,
last_slot,
slots_per_epoch,
@@ -315,10 +353,10 @@ mod tests {
let shred = Shred::new_from_data(1, 3, 0, None, true, true, 0, 0, 0);
let mut packet = Packet::default();
shred.copy_to_packet(&mut packet);
let mut index_overrun = 0;
let mut stats = ShredFetchStats::default();
assert_eq!(
Some((1, 3)),
ShredFetchStage::get_slot_index(&packet, &mut index_overrun)
ShredFetchStage::get_slot_index(&packet, &mut stats)
);
}
}

View File

@@ -8,6 +8,7 @@ use crate::{
cluster_info_vote_listener::{ClusterInfoVoteListener, VoteTracker},
fetch_stage::FetchStage,
poh_recorder::{PohRecorder, WorkingBankEntry},
rpc_subscriptions::RpcSubscriptions,
sigverify::TransactionSigVerifier,
sigverify_stage::{DisabledSigVerifier, SigVerifyStage},
};
@@ -44,6 +45,7 @@ impl Tpu {
tpu_forwards_sockets: Vec<UdpSocket>,
broadcast_sockets: Vec<UdpSocket>,
sigverify_disabled: bool,
subscriptions: &Arc<RpcSubscriptions>,
transaction_status_sender: Option<TransactionStatusSender>,
blockstore: &Arc<Blockstore>,
broadcast_type: &BroadcastStageType,
@@ -79,6 +81,7 @@ impl Tpu {
&poh_recorder,
vote_tracker,
bank_forks,
subscriptions.clone(),
);
let banking_stage = BankingStage::new(

View File

@@ -259,6 +259,7 @@ pub mod tests {
use solana_runtime::bank::Bank;
use std::sync::atomic::Ordering;
#[ignore]
#[test]
#[serial]
fn test_tvu_exit() {
@@ -292,11 +293,12 @@ pub mod tests {
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
));
let (retransmit_slots_sender, _retransmit_slots_receiver) = unbounded();
let bank_forks = Arc::new(RwLock::new(bank_forks));
let tvu = Tvu::new(
&vote_keypair.pubkey(),
vec![Arc::new(vote_keypair)],
&storage_keypair,
&Arc::new(RwLock::new(bank_forks)),
&bank_forks,
&cref1,
{
Sockets {
@@ -309,7 +311,11 @@ pub mod tests {
blockstore,
&StorageState::default(),
l_receiver,
&Arc::new(RpcSubscriptions::new(&exit, block_commitment_cache.clone())),
&Arc::new(RpcSubscriptions::new(
&exit,
bank_forks.clone(),
block_commitment_cache.clone(),
)),
&poh_recorder,
&leader_schedule_cache,
&exit,

View File

@@ -241,7 +241,11 @@ impl Validator {
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
));
let subscriptions = Arc::new(RpcSubscriptions::new(&exit, block_commitment_cache.clone()));
let subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
bank_forks.clone(),
block_commitment_cache.clone(),
));
let rpc_service = config.rpc_ports.map(|(rpc_port, rpc_pubsub_port)| {
if ContactInfo::is_valid_address(&node.info.rpc) {
@@ -314,6 +318,13 @@ impl Validator {
);
if config.dev_halt_at_slot.is_some() {
// Simulate a confirmed root to avoid RPC errors with CommitmentmentConfig::max() and
// to ensure RPC endpoints like getConfirmedBlock, which require a confirmed root, work
block_commitment_cache
.write()
.unwrap()
.set_largest_confirmed_root(bank_forks.read().unwrap().root());
// Park with the RPC service running, ready for inspection!
warn!("Validator halted");
std::thread::park();
@@ -459,6 +470,7 @@ impl Validator {
node.sockets.tpu_forwards,
node.sockets.broadcast,
config.dev_sigverify_disabled,
&subscriptions,
transaction_status_sender,
&blockstore,
&config.broadcast_stage_type,

View File

@@ -4,8 +4,10 @@
use crate::{
cluster_info::ClusterInfo,
cluster_slots::ClusterSlots,
repair_response,
repair_service::{RepairService, RepairStrategy},
result::{Error, Result},
serve_repair::DEFAULT_NONCE,
};
use crossbeam_channel::{
unbounded, Receiver as CrossbeamReceiver, RecvTimeoutError, Sender as CrossbeamSender,
@@ -13,24 +15,25 @@ use crossbeam_channel::{
use rayon::iter::IntoParallelRefMutIterator;
use rayon::iter::ParallelIterator;
use rayon::ThreadPool;
use solana_ledger::bank_forks::BankForks;
use solana_ledger::blockstore::{
self, Blockstore, BlockstoreInsertionMetrics, MAX_DATA_SHREDS_PER_SLOT,
use solana_ledger::{
bank_forks::BankForks,
blockstore::{self, Blockstore, BlockstoreInsertionMetrics, MAX_DATA_SHREDS_PER_SLOT},
leader_schedule_cache::LeaderScheduleCache,
shred::{Nonce, Shred},
};
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
use solana_ledger::shred::Shred;
use solana_metrics::{inc_new_counter_debug, inc_new_counter_error};
use solana_perf::packet::Packets;
use solana_rayon_threadlimit::get_thread_count;
use solana_runtime::bank::Bank;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::timing::duration_as_ms;
use solana_sdk::{packet::PACKET_DATA_SIZE, pubkey::Pubkey, timing::duration_as_ms};
use solana_streamer::streamer::PacketSender;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::{self, Builder, JoinHandle};
use std::time::{Duration, Instant};
use std::{
net::{SocketAddr, UdpSocket},
sync::atomic::{AtomicBool, Ordering},
sync::{Arc, RwLock},
thread::{self, Builder, JoinHandle},
time::{Duration, Instant},
};
fn verify_shred_slot(shred: &Shred, root: u64) -> bool {
if shred.is_data() {
@@ -107,8 +110,15 @@ fn run_check_duplicate(
Ok(())
}
fn verify_repair(_shred: &Shred, repair_info: &Option<RepairMeta>) -> bool {
repair_info
.as_ref()
.map(|repair_info| repair_info.nonce == DEFAULT_NONCE)
.unwrap_or(true)
}
fn run_insert<F>(
shred_receiver: &CrossbeamReceiver<Vec<Shred>>,
shred_receiver: &CrossbeamReceiver<(Vec<Shred>, Vec<Option<RepairMeta>>)>,
blockstore: &Arc<Blockstore>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
handle_duplicate: F,
@@ -118,12 +128,16 @@ where
F: Fn(Shred) -> (),
{
let timer = Duration::from_millis(200);
let mut shreds = shred_receiver.recv_timeout(timer)?;
while let Ok(mut more_shreds) = shred_receiver.try_recv() {
shreds.append(&mut more_shreds)
let (mut shreds, mut repair_infos) = shred_receiver.recv_timeout(timer)?;
while let Ok((more_shreds, more_repair_infos)) = shred_receiver.try_recv() {
shreds.extend(more_shreds);
repair_infos.extend(more_repair_infos);
}
assert_eq!(shreds.len(), repair_infos.len());
let mut i = 0;
shreds.retain(|shred| (verify_repair(&shred, &repair_infos[i]), i += 1).0);
blockstore.insert_shreds_handle_duplicate(
shreds,
Some(leader_schedule_cache),
@@ -136,7 +150,7 @@ where
fn recv_window<F>(
blockstore: &Arc<Blockstore>,
insert_shred_sender: &CrossbeamSender<Vec<Shred>>,
insert_shred_sender: &CrossbeamSender<(Vec<Shred>, Vec<Option<RepairMeta>>)>,
my_pubkey: &Pubkey,
verified_receiver: &CrossbeamReceiver<Vec<Packets>>,
retransmit: &PacketSender,
@@ -160,7 +174,7 @@ where
inc_new_counter_debug!("streamer-recv_window-recv", total_packets);
let last_root = blockstore.last_root();
let shreds: Vec<_> = thread_pool.install(|| {
let (shreds, repair_infos): (Vec<_>, Vec<_>) = thread_pool.install(|| {
packets
.par_iter_mut()
.flat_map(|packets| {
@@ -169,34 +183,59 @@ where
.iter_mut()
.filter_map(|packet| {
if packet.meta.discard {
inc_new_counter_debug!("streamer-recv_window-invalid_signature", 1);
inc_new_counter_debug!(
"streamer-recv_window-invalid_or_unnecessary_packet",
1
);
None
} else if let Ok(shred) =
Shred::new_from_serialized_shred(packet.data.to_vec())
{
if shred_filter(&shred, last_root) {
// Mark slot as dead if the current shred is on the boundary
// of max shreds per slot. However, let the current shred
// get retransmitted. It'll allow peer nodes to see this shred
// and trigger them to mark the slot as dead.
if shred.index() >= (MAX_DATA_SHREDS_PER_SLOT - 1) as u32 {
let _ = blockstore.set_dead_slot(shred.slot());
} else {
// shred fetch stage should be sending packets
// with sufficiently large buffers. Needed to ensure
// call to `new_from_serialized_shred` is safe.
assert_eq!(packet.data.len(), PACKET_DATA_SIZE);
let serialized_shred = packet.data.to_vec();
if let Ok(shred) = Shred::new_from_serialized_shred(serialized_shred) {
let repair_info = {
if packet.meta.repair && Shred::is_nonce_unlocked(shred.slot())
{
if let Some(nonce) = repair_response::nonce(&packet.data) {
let repair_info = RepairMeta {
_from_addr: packet.meta.addr(),
nonce,
};
Some(repair_info)
} else {
// If can't parse the nonce, dump the packet
return None;
}
} else {
None
}
};
if shred_filter(&shred, last_root) {
// Mark slot as dead if the current shred is on the boundary
// of max shreds per slot. However, let the current shred
// get retransmitted. It'll allow peer nodes to see this shred
// and trigger them to mark the slot as dead.
if shred.index() >= (MAX_DATA_SHREDS_PER_SLOT - 1) as u32 {
let _ = blockstore.set_dead_slot(shred.slot());
}
packet.meta.slot = shred.slot();
packet.meta.seed = shred.seed();
Some((shred, repair_info))
} else {
packet.meta.discard = true;
None
}
packet.meta.slot = shred.slot();
packet.meta.seed = shred.seed();
Some(shred)
} else {
packet.meta.discard = true;
None
}
} else {
packet.meta.discard = true;
None
}
})
.collect::<Vec<_>>()
})
.collect()
.unzip()
});
trace!("{:?} shreds from packets", shreds.len());
@@ -210,7 +249,7 @@ where
}
}
insert_shred_sender.send(shreds)?;
insert_shred_sender.send((shreds, repair_infos))?;
trace!(
"Elapsed processing time in recv_window(): {}",
@@ -220,6 +259,11 @@ where
Ok(())
}
struct RepairMeta {
_from_addr: SocketAddr,
nonce: Nonce,
}
// Implement a destructor for the window_service thread to signal it exited
// even on panics
struct Finalizer {
@@ -343,7 +387,7 @@ impl WindowService {
exit: &Arc<AtomicBool>,
blockstore: &Arc<Blockstore>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
insert_receiver: CrossbeamReceiver<Vec<Shred>>,
insert_receiver: CrossbeamReceiver<(Vec<Shred>, Vec<Option<RepairMeta>>)>,
duplicate_sender: CrossbeamSender<Shred>,
) -> JoinHandle<()> {
let exit = exit.clone();
@@ -393,7 +437,7 @@ impl WindowService {
id: Pubkey,
exit: &Arc<AtomicBool>,
blockstore: &Arc<Blockstore>,
insert_sender: CrossbeamSender<Vec<Shred>>,
insert_sender: CrossbeamSender<(Vec<Shred>, Vec<Option<RepairMeta>>)>,
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
shred_filter: F,
bank_forks: Option<Arc<RwLock<BankForks>>>,
@@ -495,13 +539,12 @@ mod test {
cluster_info::ClusterInfo, contact_info::ContactInfo, repair_service::RepairSlotRange,
};
use rand::thread_rng;
use solana_ledger::shred::DataShredHeader;
use solana_ledger::{
blockstore::{make_many_slot_entries, Blockstore},
entry::{create_ticks, Entry},
genesis_utils::create_genesis_config_with_leader,
get_tmp_ledger_path,
shred::Shredder,
shred::{DataShredHeader, Shredder, NONCE_SHRED_PAYLOAD_SIZE},
};
use solana_perf::packet::Packet;
use solana_sdk::{
@@ -573,8 +616,12 @@ mod test {
// If it's a coding shred, test that slot >= root
let (common, coding) = Shredder::new_coding_shred_header(5, 5, 5, 6, 6, 0, 0);
let mut coding_shred =
Shred::new_empty_from_header(common, DataShredHeader::default(), coding);
let mut coding_shred = Shred::new_empty_from_header(
common,
DataShredHeader::default(),
coding,
NONCE_SHRED_PAYLOAD_SIZE,
);
Shredder::sign_shred(&leader_keypair, &mut coding_shred);
assert_eq!(
should_retransmit_and_persist(&coding_shred, Some(bank.clone()), &cache, &me_id, 0, 0),

View File

@@ -19,6 +19,7 @@ mod tests {
};
use solana_sdk::{
clock::Slot,
genesis_config::GenesisConfig,
hash::hashv,
pubkey::Pubkey,
signature::{Keypair, Signer},
@@ -90,6 +91,7 @@ mod tests {
snapshot_package_output_path,
&(old_last_bank.slot(), old_last_bank.get_accounts_hash()),
),
&GenesisConfig::default(),
)
.unwrap();

View File

@@ -6,7 +6,13 @@ use solana_core::{
commitment::BlockCommitmentCache, rpc_pubsub_service::PubSubService,
rpc_subscriptions::RpcSubscriptions, validator::TestValidator,
};
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use solana_ledger::{
bank_forks::BankForks,
blockstore::Blockstore,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
};
use solana_runtime::bank::Bank;
use solana_sdk::{
commitment_config::CommitmentConfig, pubkey::Pubkey, rpc_port, signature::Signer,
system_transaction,
@@ -40,7 +46,7 @@ fn test_rpc_client() {
assert_eq!(
client.get_version().unwrap().solana_core,
solana_clap_utils::version!()
solana_version::version!()
);
assert!(client.get_account(&bob_pubkey).is_err());
@@ -88,8 +94,12 @@ fn test_slot_subscription() {
let exit = Arc::new(AtomicBool::new(false));
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let bank = Bank::new(&genesis_config);
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
bank_forks,
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
blockstore,
))),

View File

@@ -455,7 +455,8 @@ fn network_run_pull(
overhead += node
.lock()
.unwrap()
.process_pull_response(&from, &timeouts, rsp, now);
.process_pull_response(&from, &timeouts, rsp, now)
.0;
});
(bytes, msgs, overhead)
})

View File

@@ -9,7 +9,7 @@ use reqwest::{self, header::CONTENT_TYPE};
use serde_json::{json, Value};
use solana_client::{
rpc_client::{get_rpc_request_str, RpcClient},
rpc_response::{Response, RpcSignatureResult},
rpc_response::{Response, RpcAccount, RpcSignatureResult},
};
use solana_core::{rpc_pubsub::gen_client::Client as PubsubClient, validator::TestValidator};
use solana_sdk::{
@@ -147,7 +147,7 @@ fn test_rpc_invalid_requests() {
.unwrap();
let json: Value = serde_json::from_str(&response.text().unwrap()).unwrap();
let the_error = json["error"]["message"].as_str().unwrap();
assert_eq!(the_error, "Invalid request");
assert_eq!(the_error, "Invalid");
// test invalid get_account_info request
let client = reqwest::blocking::Client::new();
@@ -167,7 +167,7 @@ fn test_rpc_invalid_requests() {
.unwrap();
let json: Value = serde_json::from_str(&response.text().unwrap()).unwrap();
let the_error = json["error"]["message"].as_str().unwrap();
assert_eq!(the_error, "Invalid request");
assert_eq!(the_error, "Invalid");
// test invalid get_account_info request
let client = reqwest::blocking::Client::new();
@@ -217,9 +217,15 @@ fn test_rpc_subscriptions() {
.iter()
.map(|tx| tx.signatures[0].to_string())
.collect();
let account_set: HashSet<String> = transactions
.iter()
.map(|tx| tx.message.account_keys[1].to_string())
.collect();
// Track when subscriptions are ready
let (ready_sender, ready_receiver) = channel::<()>();
// Track account notifications are received
let (account_sender, account_receiver) = channel::<Response<RpcAccount>>();
// Track when status notifications are received
let (status_sender, status_receiver) = channel::<(String, Response<RpcSignatureResult>)>();
@@ -262,6 +268,22 @@ fn test_rpc_subscriptions() {
eprintln!("slot sub err: {:#?}", err);
}),
);
for pubkey in account_set {
let account_sender = account_sender.clone();
tokio::spawn(
client
.account_subscribe(pubkey, None)
.and_then(move |account_stream| {
account_stream.for_each(move |result| {
account_sender.send(result).unwrap();
future::ok(())
})
})
.map_err(|err| {
eprintln!("acct sub err: {:#?}", err);
}),
);
}
future::ok(())
})
.map_err(|_| ())
@@ -296,7 +318,7 @@ fn test_rpc_subscriptions() {
}
// Wait for all signature subscriptions
let deadline = Instant::now() + Duration::from_secs(5);
let deadline = Instant::now() + Duration::from_secs(7);
while !signature_set.is_empty() {
let timeout = deadline.saturating_duration_since(Instant::now());
match status_receiver.recv_timeout(timeout) {
@@ -315,6 +337,26 @@ fn test_rpc_subscriptions() {
}
}
let deadline = Instant::now() + Duration::from_secs(5);
let mut account_notifications = transactions.len();
while account_notifications > 0 {
let timeout = deadline.saturating_duration_since(Instant::now());
match account_receiver.recv_timeout(timeout) {
Ok(result) => {
assert_eq!(result.value.lamports, 1);
account_notifications -= 1;
}
Err(_err) => {
assert!(
false,
"recv_timeout, {}/{} accounts remaining",
account_notifications,
transactions.len()
);
}
}
}
rt.shutdown_now().wait().unwrap();
server.close().unwrap();
remove_dir_all(ledger_path).unwrap();

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-crate-features"
version = "1.1.8"
version = "1.1.15"
description = "Solana Crate Features"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

Binary file not shown.

After

Width:  |  Height:  |  Size: 245 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 190 KiB

View File

@@ -31,6 +31,7 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
* [getGenesisHash](jsonrpc-api.md#getgenesishash)
* [getIdentity](jsonrpc-api.md#getidentity)
* [getInflation](jsonrpc-api.md#getinflation)
* [getLargestAccounts](jsonrpc-api.md#getlargestaccounts)
* [getLeaderSchedule](jsonrpc-api.md#getleaderschedule)
* [getMinimumBalanceForRentExemption](jsonrpc-api.md#getminimumbalanceforrentexemption)
* [getProgramAccounts](jsonrpc-api.md#getprogramaccounts)
@@ -42,13 +43,14 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
* [getStoragePubkeysForSlot](jsonrpc-api.md#getstoragepubkeysforslot)
* [getStorageTurn](jsonrpc-api.md#getstorageturn)
* [getStorageTurnRate](jsonrpc-api.md#getstorageturnrate)
* [getSupply](jsonrpc-api.md#getsupply)
* [getTransactionCount](jsonrpc-api.md#gettransactioncount)
* [getTotalSupply](jsonrpc-api.md#gettotalsupply)
* [getVersion](jsonrpc-api.md#getversion)
* [getVoteAccounts](jsonrpc-api.md#getvoteaccounts)
* [minimumLedgerSlot](jsonrpc-api.md#minimumledgerslot)
* [requestAirdrop](jsonrpc-api.md#requestairdrop)
* [sendTransaction](jsonrpc-api.md#sendtransaction)
* [simulateTransaction](jsonrpc-api.md#simulatetransaction)
* [setLogFilter](jsonrpc-api.md#setlogfilter)
* [validatorExit](jsonrpc-api.md#validatorexit)
* [Subscription Websocket](jsonrpc-api.md#subscription-websocket)
@@ -97,7 +99,8 @@ Solana nodes choose which bank state to query based on a commitment requirement
set by the client. Clients may specify either:
* `{"commitment":"max"}` - the node will query the most recent bank confirmed by the cluster as having reached `MAX_LOCKOUT_HISTORY` confirmations
* `{"commitment":"root"}` - the node will query the most recent bank having reached `MAX_LOCKOUT_HISTORY` confirmations on this node
* `{"commitment":"recent"}` - the node will query its most recent bank state
* `{"commitment":"single"}` - the node will query the most recent bank having reached 1 confirmation
* `{"commitment":"recent"}` - the node will query its most recent bank
The commitment parameter should be included as the last element in the `params` array:
@@ -258,7 +261,8 @@ The result field will be an array of JSON objects, each with the following sub f
* `pubkey: <string>` - Node public key, as base-58 encoded string
* `gossip: <string>` - Gossip network address for the node
* `tpu: <string>` - TPU network address for the node
* `rpc: <string>` - JSON RPC network address for the node, or `null` if the JSON RPC service is not enabled
* `rpc: <string>|null` - JSON RPC network address for the node, or `null` if the JSON RPC service is not enabled
* `version: <string>|null` - The software version of the node, or `null` if the version information is not available
#### Example:
@@ -267,7 +271,7 @@ The result field will be an array of JSON objects, each with the following sub f
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getClusterNodes"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":[{"gossip":"10.239.6.48:8001","pubkey":"9QzsJf7LPLj8GkXbYT3LFDKqsj2hHG7TA3xinJHu8epQ","rpc":"10.239.6.48:8899","tpu":"10.239.6.48:8856"}],"id":1}
{"jsonrpc":"2.0","result":[{"gossip":"10.239.6.48:8001","pubkey":"9QzsJf7LPLj8GkXbYT3LFDKqsj2hHG7TA3xinJHu8epQ","rpc":"10.239.6.48:8899","tpu":"10.239.6.48:8856"},"version":"1.0.0 c375ce1f"],"id":1}
```
### getConfirmedBlock
@@ -634,6 +638,34 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
{"jsonrpc":"2.0","result":{"foundation":0.05,"foundationTerm":7.0,"initial":0.15,"storage":0.1,"taper":0.15,"terminal":0.015},"id":1}
```
### getLargestAccounts
Returns the 20 largest accounts, by lamport balance
#### Parameters:
* `<object>` - (optional) Configuration object containing the following optional fields:
* (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
* (optional) `filter: <string>` - filter results by account type; currently supported: `circulating|nonCirculating`
#### Results:
The result will be an RpcResponse JSON object with `value` equal to an array of:
* `<object>` - otherwise, a JSON object containing:
* `address: <string>`, base-58 encoded address of the account
* `lamports: <u64>`, number of lamports in the account, as a u64
#### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getLargestAccounts"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"context":{"slot":54},"value":[{"lamports":999974,"address":"99P8ZgtJYe1buSK8JXkvpLh8xPsCFuLYhz9hQFNw93WJ"},{"lamports":42,"address":"uPwWLo16MVehpyWqsLkK3Ka8nLowWvAHbBChqv2FZeL"},{"lamports":42,"address":"aYJCgU7REfu3XF8b3QhkqgqQvLizx8zxuLBHA25PzDS"},{"lamports":42,"address":"CTvHVtQ4gd4gUcw3bdVgZJJqApXE9nCbbbP4VTS5wE1D"},{"lamports":20,"address":"4fq3xJ6kfrh9RkJQsmVd5gNMvJbuSHfErywvEjNQDPxu"},{"lamports":4,"address":"AXJADheGVp9cruP8WYu46oNkRbeASngN5fPCMVGQqNHa"},{"lamports":2,"address":"8NT8yS6LiwNprgW4yM1jPPow7CwRUotddBVkrkWgYp24"},{"lamports":1,"address":"SysvarEpochSchedu1e111111111111111111111111"},{"lamports":1,"address":"11111111111111111111111111111111"},{"lamports":1,"address":"Stake11111111111111111111111111111111111111"},{"lamports":1,"address":"SysvarC1ock11111111111111111111111111111111"},{"lamports":1,"address":"StakeConfig11111111111111111111111111111111"},{"lamports":1,"address":"SysvarRent111111111111111111111111111111111"},{"lamports":1,"address":"Config1111111111111111111111111111111111111"},{"lamports":1,"address":"SysvarStakeHistory1111111111111111111111111"},{"lamports":1,"address":"SysvarRecentB1ockHashes11111111111111111111"},{"lamports":1,"address":"SysvarFees111111111111111111111111111111111"},{"lamports":1,"address":"Vote111111111111111111111111111111111111111"}]},"id":1}
```
### getLeaderSchedule
Returns the leader schedule for an epoch
@@ -917,6 +949,32 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
{"jsonrpc":"2.0","result":1024,"id":1}
```
### getSupply
Returns information about the current supply.
#### Parameters:
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results:
The result will be an RpcResponse JSON object with `value` equal to a JSON object containing:
* `total: <u64>` - Total supply in lamports
* `circulating: <u64>` - Circulating supply in lamports
* `nonCirculating: <u64>` - Non-circulating supply in lamports
* `nonCirculatingAccounts: <array>` - an array of account addresses of non-circulating accounts, as strings
#### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getSupply"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"context":{"slot":1114},"value":{"circulating":16000,"nonCirculating":1000000,"nonCirculatingAccounts":["FEy8pTbP5fEoqMV1GdTz83byuA8EKByqYat1PKDgVAq5","9huDUZfxoJ7wGMTffUE7vh1xePqef7gyrLJu9NApncqA","3mi1GmwEE3zo2jmfDuzvjSX9ovRXsDUKHvsntpkhuLJ9","BYxEJTDerkaRWBem3XgnVcdhppktBXa2HbkHPKj2Ui4Z],total:1016000}},"id":1}
```
### getTransactionCount
Returns the current Transaction count from the ledger
@@ -939,28 +997,6 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
{"jsonrpc":"2.0","result":268,"id":1}
```
### getTotalSupply
Returns the current total supply in lamports
#### Parameters:
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results:
* `<u64>` - Total supply
#### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getTotalSupply"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":10126,"id":1}
```
### getVersion
Returns the current solana versions running on the node
@@ -981,7 +1017,7 @@ The result field will be a JSON object with the following fields:
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"solana-core": "1.1.8"},"id":1}
{"jsonrpc":"2.0","result":{"solana-core": "1.1.15"},"id":1}
```
### getVoteAccounts
@@ -1077,10 +1113,34 @@ Creates new transaction
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"sendTransaction", "params":["3gKEMTuxvm3DKEJc4UyiyoNz1sxwdVRW2pyDDXqaCvUjGApnsazGh2y4W92zuaSSdJhBbWLYAkZokBt4N5oW27R7zCVaLLpLxvATL2GgheEh9DmmDR1P9r1ZqirVXM2fF3z5cafmc4EtwWc1UErFdCWj1qYvy4bDGMLXRYLURxaKytEEqrxz6JXj8rUHhDpjTZeFxmC6iAW3hZr6cmaAzewQCQfiEv2HfydriwHDtN95u3Y1EF6SuXxcRqox2aTjGye2Ln9zFj4XbnAtjCmkZhR"]}' http://localhost:8899
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"sendTransaction", "params":["4hXTCkRzt9WyecNzV1XPgCDfGAZzQKNxLXgynz5QDuWWPSAZBZSHptvWRL3BjCvzUXRdKvHL2b7yGrRQcWyaqsaBCncVG7BFggS8w9snUts67BSh3EqKpXLUm5UMHfD7ZBe9GhARjbNQMLJ1QD3Spr6oMTBU6EhdB4RD8CP2xUxr2u3d6fos36PD98XS6oX8TQjLpsMwncs5DAMiD4nNnR8NBfyghGCWvCVifVwvA8B8TJxE1aiyiv2L429BCWfyzAme5sZW8rDb14NeCQHhZbtNqfXhcp2tAnaAT"]}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":"2EBVM6cB8vAAD93Ktr6Vd8p67XPbQzCJX47MpReuiCXJAtcjaxpvWpcg9Ege1Nr5Tk3a2GFrByT7WPBjdsTycY9b","id":1}
{"jsonrpc":"2.0","result":"2id3YC2jK9G5Wo2phDx4gJVAew8DcY5NAojnVuao8rkxwPYPe8cSwE5GzhEgJA2y8fVjDEo6iR6ykBvDxrTQrtpb","id":1}
```
### simulateTransaction
Simulate sending a transaction
#### Parameters:
* `<string>` - Transaction, as base-58 encoded string. The transaction must have a valid blockhash, but is not required to be signed.
* `<object>` - (optional) Configuration object containing the following field:
* `sigVerify: <bool>` - if true the transaction signatures will be verified (default: false)
#### Results:
An RpcResponse containing a TransactionStatus object
#### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"simulateTransaction", "params":["4hXTCkRzt9WyecNzV1XPgCDfGAZzQKNxLXgynz5QDuWWPSAZBZSHptvWRL3BjCvzUXRdKvHL2b7yGrRQcWyaqsaBCncVG7BFggS8w9snUts67BSh3EqKpXLUm5UMHfD7ZBe9GhARjbNQMLJ1QD3Spr6oMTBU6EhdB4RD8CP2xUxr2u3d6fos36PD98XS6oX8TQjLpsMwncs5DAMiD4nNnR8NBfyghGCWvCVifVwvA8B8TJxE1aiyiv2L429BCWfyzAme5sZW8rDb14NeCQHhZbtNqfXhcp2tAnaAT"]}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"context":{"slot":218},"value":{"confirmations":0,"err":null,"slot":218,"status":{"Ok":null}}},"id":1}
```
### setLogFilter
@@ -1129,25 +1189,11 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
### Subscription Websocket
After connect to the RPC PubSub websocket at `ws://<ADDRESS>/`:
After connecting to the RPC PubSub websocket at `ws://<ADDRESS>/`:
* Submit subscription requests to the websocket using the methods below
* Multiple subscriptions may be active at once
* All subscriptions take an optional `confirmations` parameter, which defines
how many confirmed blocks the node should wait before sending a notification.
The greater the number, the more likely the notification is to represent
consensus across the cluster, and the less likely it is to be affected by
forking or rollbacks. If unspecified, the default value is 0; the node will
send a notification as soon as it witnesses the event. The maximum
`confirmations` wait length is the cluster's `MAX_LOCKOUT_HISTORY`, which
represents the economic finality of the chain.
* Many subscriptions take the optional [`commitment` parameter](jsonrpc-api.md#configuring-state-commitment), defining how finalized a change should be to trigger a notification. For subscriptions, if commitment is unspecified, the default value is `"single"`.
### accountSubscribe
@@ -1156,9 +1202,7 @@ Subscribe to an account to receive notifications when the lamports or data for a
#### Parameters:
* `<string>` - account Pubkey, as base-58 encoded string
* `<u64>` - optional, number of confirmed blocks to wait before notification.
Default: 0, Max: `MAX_LOCKOUT_HISTORY` \(greater integers rounded down\)
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results:
@@ -1170,7 +1214,7 @@ Subscribe to an account to receive notifications when the lamports or data for a
// Request
{"jsonrpc":"2.0", "id":1, "method":"accountSubscribe", "params":["CM78CPUeXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNH12"]}
{"jsonrpc":"2.0", "id":1, "method":"accountSubscribe", "params":["CM78CPUeXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNH12", 15]}
{"jsonrpc":"2.0", "id":1, "method":"accountSubscribe", "params":["CM78CPUeXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNH12", {"commitment": "single"}]}
// Result
{"jsonrpc": "2.0","result": 0,"id": 1}
@@ -1179,7 +1223,25 @@ Subscribe to an account to receive notifications when the lamports or data for a
#### Notification Format:
```bash
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":"4uQeVj5tqViQh7yWWGStvkEG1Zmhx6uasJtWCJziofM","lamports":1,"data":"Joig2k8Ax4JPMpWhXRyc2jMa7Wejz4X1xqVi3i7QRkmVj1ChUgNc4VNpGUQePJGBAui3c6886peU9GEbjsyeANN8JGStprwLbLwcw5wpPjuQQb9mwrjVmoDQBjj3MzZKgeHn6wmnQ5k8DBFuoCYKWWsJfH2gv9FvCzrN6K1CRcQZzF","rentEpoch":28},"subscription":0}}
{
"jsonrpc": "2.0",
"method": "accountNotification",
"params": {
"result": {
"context": {
"slot": 5199307
},
"value": {
"data": "9qRxMDwy1ntDhBBoiy4Na9uDLbRTSzUS989mpwz",
"executable": false,
"lamports": 33594,
"owner": "H9oaJujXETwkmjyweuqKPFtk2no4SumoU9A3hi3dC8U6",
"rentEpoch": 635
}
},
"subscription": 23784
}
}
```
### accountUnsubscribe
@@ -1211,9 +1273,7 @@ Subscribe to a program to receive notifications when the lamports or data for a
#### Parameters:
* `<string>` - program\_id Pubkey, as base-58 encoded string
* `<u64>` - optional, number of confirmed blocks to wait before notification.
Default: 0, Max: `MAX_LOCKOUT_HISTORY` \(greater integers rounded down\)
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results:
@@ -1223,9 +1283,9 @@ Subscribe to a program to receive notifications when the lamports or data for a
```bash
// Request
{"jsonrpc":"2.0", "id":1, "method":"programSubscribe", "params":["9gZbPtbtHrs6hEWgd6MbVY9VPFtS5Z8xKtnYwA2NynHV"]}
{"jsonrpc":"2.0", "id":1, "method":"programSubscribe", "params":["7BwE8yitxiWkD8jVPFvPmV7rs2Znzi4NHzJGLu2dzpUq"]}
{"jsonrpc":"2.0", "id":1, "method":"programSubscribe", "params":["9gZbPtbtHrs6hEWgd6MbVY9VPFtS5Z8xKtnYwA2NynHV", 15]}
{"jsonrpc":"2.0", "id":1, "method":"programSubscribe", "params":["7BwE8yitxiWkD8jVPFvPmV7rs2Znzi4NHzJGLu2dzpUq", {"commitment": "single"}]}
// Result
{"jsonrpc": "2.0","result": 0,"id": 1}
@@ -1233,12 +1293,30 @@ Subscribe to a program to receive notifications when the lamports or data for a
#### Notification Format:
* `<string>` - account Pubkey, as base-58 encoded string
* `<object>` - account info JSON object \(see [getAccountInfo](jsonrpc-api.md#getaccountinfo) for field details\)
```bash
{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM",{"executable":false,"lamports":1,"owner":"9gZbPtbtHrs6hEWgd6MbVY9VPFtS5Z8xKtnYwA2NynHV","data":"4SZWhnbSt3njU4QHVgPrWeekz1BudU4ttmdr9ezmrL4X6XeLeL83xVAo6ZdxwU3oXgHNeF2q6tWZbnVnBXmvNyeLVEGt8ZQ4ZmgjHfVNCEwBtzh2aDrHgQSjBFLYAdmM3uwBhcm1EyHJLeUiFqpsoAUhn6Vphwrpf44dWRAGsAJZbzvVrUW9bfucpR7xudHHg2MxQ2CdqsfS3TfWUJY3vaf2A4AUNzfAmNPHBGi99nU2hYubGSVSPcpVPpdRWQkydgqasBmTosd","rentEpoch":28}],"subscription":0}}
```
```bash
{
"jsonrpc": "2.0",
"method": "programNotification",
"params": {
"result": {
"context": {
"slot": 5208469
},
"value": {
"pubkey": "H4vnBqifaSACnKa7acsxstsY1iV1bvJNxsCY7enrd1hq"
"account": {
"data": "9qRxMDwy1ntDhBBoiy4Na9uDLbRTSzUS989m",
"executable": false,
"lamports": 33594,
"owner": "7BwE8yitxiWkD8jVPFvPmV7rs2Znzi4NHzJGLu2dzpUq",
"rentEpoch": 636
},
}
},
"subscription": 24040
}
}
```
### programUnsubscribe
@@ -1269,7 +1347,7 @@ Subscribe to a transaction signature to receive notification when the transactio
#### Parameters:
* `<string>` - Transaction Signature, as base-58 encoded string
* `<integer>` - optional, number of confirmed blocks to wait before notification.
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
Default: 0, Max: `MAX_LOCKOUT_HISTORY` \(greater integers rounded down\)
@@ -1283,7 +1361,7 @@ Subscribe to a transaction signature to receive notification when the transactio
// Request
{"jsonrpc":"2.0", "id":1, "method":"signatureSubscribe", "params":["2EBVM6cB8vAAD93Ktr6Vd8p67XPbQzCJX47MpReuiCXJAtcjaxpvWpcg9Ege1Nr5Tk3a2GFrByT7WPBjdsTycY9b"]}
{"jsonrpc":"2.0", "id":1, "method":"signatureSubscribe", "params":["2EBVM6cB8vAAD93Ktr6Vd8p67XPbQzCJX47MpReuiCXJAtcjaxpvWpcg9Ege1Nr5Tk3a2GFrByT7WPBjdsTycY9b", 15]}
{"jsonrpc":"2.0", "id":1, "method":"signatureSubscribe", "params":["2EBVM6cB8vAAD93Ktr6Vd8p67XPbQzCJX47MpReuiCXJAtcjaxpvWpcg9Ege1Nr5Tk3a2GFrByT7WPBjdsTycY9b", {"commitment": "max"}]}
// Result
{"jsonrpc": "2.0","result": 0,"id": 1}
@@ -1292,7 +1370,21 @@ Subscribe to a transaction signature to receive notification when the transactio
#### Notification Format:
```bash
{"jsonrpc": "2.0","method": "signatureNotification", "params": {"result": {"err": null}, "subscription":0}}
{
"jsonrpc": "2.0",
"method": "signatureNotification",
"params": {
"result": {
"context": {
"slot": 5207624
},
"value": {
"err": null
}
},
"subscription": 24006
}
}
```
### signatureUnsubscribe
@@ -1342,7 +1434,18 @@ None
#### Notification Format:
```bash
{"jsonrpc": "2.0","method": "slotNotification", "params": {"result":{"parent":75,"root":44,"slot":76},"subscription":0}}
{
"jsonrpc": "2.0",
"method": "slotNotification",
"params": {
"result": {
"parent": 75,
"root": 44,
"slot": 76
},
"subscription": 0
}
}
```
### slotUnsubscribe
@@ -1394,7 +1497,14 @@ None
The result is the latest root slot number.
```bash
{"jsonrpc": "2.0","method": "rootNotification", "params": {"result":42,"subscription":0}}
{
"jsonrpc": "2.0",
"method": "rootNotification",
"params": {
"result": 42,
"subscription": 0
}
}
```
### rootUnsubscribe
@@ -1418,3 +1528,57 @@ Unsubscribe from root notifications
// Result
{"jsonrpc": "2.0","result": true,"id": 1}
```
### voteSubscribe
Subscribe to receive notification anytime a new vote is observed in gossip.
These votes are pre-consensus therefore there is no guarantee these votes will
enter the ledger.
#### Parameters:
None
#### Results:
* `integer` - subscription id \(needed to unsubscribe\)
#### Example:
```bash
// Request
{"jsonrpc":"2.0", "id":1, "method":"voteSubscribe"}
// Result
{"jsonrpc": "2.0","result": 0,"id": 1}
```
#### Notification Format:
The result is the latest vote, containing its hash, a list of voted slots, and an optional timestamp.
```bash
{"jsonrpc":"2.0","method":"voteNotification","params":{"result":{"hash":"8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM","slots":[1,2],"timestamp":null},"subscription":0}}
```
### voteUnsubscribe
Unsubscribe from vote notifications
#### Parameters:
* `<integer>` - subscription id to cancel
#### Results:
* `<bool>` - unsubscribe success message
#### Example:
```bash
// Request
{"jsonrpc":"2.0", "id":1, "method":"voteUnsubscribe", "params":[0]}
// Result
{"jsonrpc": "2.0","result": true,"id": 1}
```

View File

@@ -1,6 +1,18 @@
# Install the Solana Tool Suite
Install the Solana release
There are multiple ways to install the Solana tools on your computer
depending on your preferred workflow:
- [Use Solana's Install Tool (Simplest option)](#use-solanas-install-tool)
- [Download Prebuilt Binaries](#download-prebuilt-binaries)
- [Build from Source](#build-from-source)
## Use Solana's Install Tool
### MacOS & Linux
- Open your favorite Terminal application
- Install the Solana release
[LATEST_SOLANA_RELEASE_VERSION](https://github.com/solana-labs/solana/releases/tag/LATEST_SOLANA_RELEASE_VERSION) on your
machine by running:
@@ -8,11 +20,11 @@ machine by running:
curl -sSf https://raw.githubusercontent.com/solana-labs/solana/LATEST_SOLANA_RELEASE_VERSION/install/solana-install-init.sh | sh -s - LATEST_SOLANA_RELEASE_VERSION
```
If you are connecting to a different testnet, you can replace `LATEST_SOLANA_RELEASE_VERSION` with the
- If you are connecting to a different testnet, you can replace `LATEST_SOLANA_RELEASE_VERSION` with the
release tag matching the software version of your desired testnet, or replace it
with the named channel `stable`, `beta`, or `edge`.
The following output indicates a successful update:
- The following output indicates a successful update:
```text
looking for latest release
@@ -24,8 +36,59 @@ Active release directory: /home/solana/.local/share/solana/install/active_releas
Update successful
```
After a successful install, `solana-install update` may be used to easily update
the cluster software to a newer version at any time.
- Depending on your system, the end of the installer messaging may prompt you
to
```bash
Please update your PATH environment variable to include the solana programs:
```
- If you get the above message, copy and paste the recommended command below
it to update `PATH`
- Confirm you have the desired version of `solana` installed by running:
```bash
solana --version
```
- After a successful install, `solana-install update` may be used to easily
update the Solana software to a newer version at any time.
***
###Windows
- Open a Command Prompt (`cmd.exe`) as an Administrator
- Search for Command Prompt in the Windows search bar. When the Command
Prompt app appears, right-click and select “Open as Administrator”.
If you are prompted by a pop-up window asking “Do you want to allow this app to
make changes to your device?”, click Yes.
- Copy and paste the following command, then press Enter to download the Solana
installer into a temporary directory:
```bash
curl http://release.solana.com/LATEST_SOLANA_RELEASE_VERSION/solana-install-init-x86_64-pc-windows-gnu.exe --output C:\solana-install-tmp\solana-install-init.exe --create-dirs
```
- Copy and paste the following command, then press Enter to install the latest
version of Solana. If you see a security pop-up by your system, please select
to allow the program to run.
```bash
C:\solana-install-tmp\solana-install-init.exe LATEST_SOLANA_RELEASE_VERSION
```
- When the installer is finished, press Enter.
- Close the command prompt window and re-open a new command prompt window as a
normal user
- Search for "Command Prompt" in the search bar, then left click on the
Command Prompt app icon, no need to run as Administrator)
- Confirm you have the desired version of `solana` installed by entering:
```bash
solana --version
```
- After a successful install, `solana-install update` may be used to easily
update the Solana software to a newer version at any time.
## Download Prebuilt Binaries
@@ -45,7 +108,7 @@ cd solana-release/
export PATH=$PWD/bin:$PATH
```
### macOS
### MacOS
Download the binaries by navigating to
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
@@ -58,6 +121,21 @@ cd solana-release/
export PATH=$PWD/bin:$PATH
```
### Windows
- Download the binaries by navigating to
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
download **solana-release-x86\_64-pc-windows-gnu.tar.bz2**, then extract the
archive using WinZip or similar.
- Open a Command Prompt and navigate to the directory into which you extracted
the binaries and run:
```bash
cd solana-release/
set PATH=%cd%/bin;%PATH%
```
## Build From Source
If you are unable to use the prebuilt binaries or prefer to build it yourself

View File

@@ -6,7 +6,7 @@ Solana takes a very different approach, which it calls _Proof of History_ or _Po
Solana technically never sends a _block_, but uses the term to describe the sequence of entries that validators vote on to achieve _confirmation_. In that way, Solana's confirmation times can be compared apples to apples to block-based systems. The current implementation sets block time to 800ms.
What's happening under the hood is that entries are streamed to validators as quickly as a leader node can batch a set of valid transactions into an entry. Validators process those entries long before it is time to vote on their validity. By processing the transactions optimistically, there is effectively no delay between the time the last entry is received and the time when the node can vote. In the event consensus is **not** achieved, a node simply rolls back its state. This optimisic processing technique was introduced in 1981 and called [Optimistic Concurrency Control](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.85.4735). It can be applied to blockchain architecture where a cluster votes on a hash that represents the full ledger up to some _block height_. In Solana, it is implemented trivially using the last entry's PoH hash.
What's happening under the hood is that entries are streamed to validators as quickly as a leader node can batch a set of valid transactions into an entry. Validators process those entries long before it is time to vote on their validity. By processing the transactions optimistically, there is effectively no delay between the time the last entry is received and the time when the node can vote. In the event consensus is **not** achieved, a node simply rolls back its state. This optimisic processing technique was introduced in 1981 and called [Optimistic Concurrency Control](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.155.4735). It can be applied to blockchain architecture where a cluster votes on a hash that represents the full ledger up to some _block height_. In Solana, it is implemented trivially using the last entry's PoH hash.
## Relationship to VDFs

View File

@@ -78,7 +78,7 @@ $ solana-validator \
--dynamic-port-range 8000-8010 \
--entrypoint 35.203.170.30:8001 \
--expected-genesis-hash 4uhcVJyU9pJkvQyS88uRDiswHXSCkY3zQawwpjk2NsNY \
--expected-shred-version 28769 \
--expected-shred-version 56096 \
--limit-ledger-size
```
@@ -114,6 +114,6 @@ $ solana-validator \
--dynamic-port-range 8000-8010 \
--entrypoint mainnet-beta.solana.com:8001 \
--expected-genesis-hash 5eykt4UsFv8P8NJdTREpY1vzqKqZKvdpKuc147dw2N9d \
--expected-shred-version 54208 \
--expected-shred-version 64864 \
--limit-ledger-size
```

View File

@@ -18,7 +18,7 @@ hardware wallet.
The Solana CLI supports the following hardware wallets:
- [Ledger Nano S](ledger.md)
## Specify a Hardware Wallet Key
## Specify a Keypair URL
Solana defines a keypair URL format to uniquely locate any Solana keypair on a
hardware wallet connected to your computer.
@@ -36,89 +36,15 @@ usb://<MANUFACTURER>[/<WALLET_ID>][?key=<DERIVATION_PATH>]
The path has the form `<ACCOUNT>[/<CHANGE>]`, where each `ACCOUNT` and `CHANGE`
are positive integers.
All derivation paths implicitly include the prefix `44'/501'`, which indicates
the path follows the [BIP44 specifications](https://github.com/bitcoin/bips/blob/master/bip-0044.mediawiki)
and that any derived keys are Solana keys (Coin type 501). The single quote
indicates a "hardened" derivation. Because Solana uses Ed25519 keypairs, all
derivations are hardened and therefore adding the quote is optional and
unnecessary.
For example, a fully qualified URL for a Ledger device might be:
```text
usb://ledger/BsNsvfXqQTtJnagwFWdBS7FBXgnsK8VZ5CmuznN85swK?key=0/0
```
### Multiple Addresses on a Single Hardware Wallet
You can derive as many wallet addresses as you like. To view them, simply
iterate the `ACCOUNT` and/or `CHANGE` number when specifying the URL path.
Multiple wallet addresses can be useful if you want to transfer tokens between
your own accounts for different purposes.
For example, a first address can be viewed with:
```bash
solana-keygen pubkey usb://ledger?key=0
```
A second address can be viewed with:
```bash
solana-keygen pubkey usb://ledger?key=1
```
A third address:
```bash
solana-keygen pubkey usb://ledger?key=2
```
...and so on.
## Manage Multiple Hardware Wallets
It is sometimes useful to sign a transaction with keys from multiple hardware
wallets. Signing with multiple wallets requires *fully qualified keypair URLs*.
When the URL is not fully qualified, the Solana CLI will prompt you with
the fully qualified URLs of all connected hardware wallets, and ask you to
choose which wallet to use for each signature.
Instead of using the interactive prompts, you can generate fully qualified
URLs using the Solana CLI `resolve-signer` command. For example, try
connecting a Ledger Nano-S to USB, unlock it with your pin, and running the
following command:
```text
solana resolve-signer usb://ledger?key=0/0
```
You will see output similar to:
```text
usb://ledger/BsNsvfXqQTtJnagwFWdBS7FBXgnsK8VZ5CmuznN85swK?key=0/0
```
but where `BsNsvfXqQTtJnagwFWdBS7FBXgnsK8VZ5CmuznN85swK` is your `WALLET_ID`.
With your fully qualified URL, you can connect multiple hardware wallets to
the same computer and uniquely identify a keypair from any of them.
## Troubleshooting
### Keypair URL parameters are ignored in zsh
The question mark character is a special character in zsh. If that's not a
feature you use, add the following line to your `~/.zshrc` to treat it as a
normal character:
```bash
unsetopt nomatch
```
Then either restart your shell window or run `~/.zshrc`:
```bash
source ~/.zshrc
```
If you would prefer not to disable zsh's special handling of the question mark
character, you can disable it explictly with a backslash in your keypair URLs.
For example:
```bash
solana-keygen pubkey usb://ledger\?key=0
```
All derivation paths implicitly include the prefix `44'/501'`, which indicates
the path follows the [BIP44 specifications](https://github.com/bitcoin/bips/blob/master/bip-0044.mediawiki)
and that any derived keys are Solana keys (Coin type 501). The single quote
indicates a "hardened" derivation. Because Solana uses Ed25519 keypairs, all
derivations are hardened and therefore adding the quote is optional and
unnecessary.

View File

@@ -9,14 +9,17 @@ secure transaction signing.
- [Set up a Ledger Nano S with the Solana App](../wallet-guide/ledger-live.md)
- [Install the Solana command-line tools](../cli/install-solana-cli-tools.md)
## Use Ledger Device with Solana CLI
## Use Ledger Nano S with Solana CLI
1. Ensure the Ledger Live application is closed
2. Plug your Ledger device into your computer's USB port
3. Enter your pin and start the Solana app on the Ledger device
4. Press both buttons to advance past the "Pending Ledger review" screen
5. Ensure the screen reads "Application is ready"
6. On your computer, run:
### View your Wallet ID
On your computer, run:
```bash
solana-keygen pubkey usb://ledger
@@ -25,39 +28,172 @@ solana-keygen pubkey usb://ledger
This confirms your Ledger device is connected properly and in the correct state
to interact with the Solana CLI. The command returns your Ledger's unique
*wallet ID*. When you have multiple Nano S devices connected to the same
computer, you can use your wallet key to specify which Ledger hardware wallet
you want to use. Run the same command again, but this time, with its fully
qualified URL:
computer, you can use your wallet ID to specify which Ledger hardware wallet
you want to use. If you only plan to use a single Nano S on your computer
at a time, you don't need to include the wallet ID. For information on
using the wallet ID to use a specific Ledger, see
[Manage Multiple Hardware Wallets](#manage-multiple-hardware-wallets).
### View your Wallet Addresses
Your Nano S supports an arbitrary number of valid wallet addresses and signers.
To view any address, use the `solana-keygen pubkey` command, as shown below,
followed by a valid [keypair URL](README.md#specify-a-keypair-url).
Multiple wallet addresses can be useful if you want to transfer tokens between
your own accounts for different purposes, or use different keypairs on the
device as signing authorities for a stake account, for example.
All of the following commands will display different addresses, associated with
the keypair path given. Try them out!
```bash
solana-keygen pubkey usb://ledger/<WALLET_ID>
solana-keygen pubkey usb://ledger
solana-keygen pubkey usb://ledger?key=0
solana-keygen pubkey usb://ledger?key=1
solana-keygen pubkey usb://ledger?key=2
```
where you replace `<WALLET_ID>` with the output of the first command.
Confirm it prints the same wallet ID as before.
You can use other values for the number after `key=` as well.
Any of the addresses displayed by these commands are valid Solana wallet
addresses. The private portion associated with each address is stored securely
on the Nano S device, and is used to sign transactions from this address.
Just make a note of which keypair URL you used to derive any address you will be
using to receive tokens.
To learn more about keypair URLs, see
[Specify A Hardware Wallet Key](README.md#specify-a-hardware-wallet-key)
If you are only planning to use a single address/keypair on your device, a good
easy-to-remember path might be to use the address at `key=0`. View this address
with:
```bash
solana-keygen pubkey usb://ledger?key=0
```
Read more about [sending and receiving tokens](../cli/transfer-tokens.md) and
[delegating stake](../cli/delegate-stake.md). You can use your Ledger keypair URL
anywhere you see an option or argument that accepts a `<KEYPAIR>`.
Now you have a wallet address (or multiple addresses), you can share any of
these addresses publicly to act as a receiving address, and you can use the
associated keypair URL as the signer for transactions from that address.
### View your Balance
To view the balance of any account, regardless of which wallet it uses, use the
`solana balance` command:
```bash
solana balance SOME_WALLET_ADDRESS
```
For example, if your address is `7cvkjYAkUYs4W8XcXsca7cBrEGFeSUjeZmKoNBvEwyri`,
then enter the following command to view the balance:
```bash
solana balance 7cvkjYAkUYs4W8XcXsca7cBrEGFeSUjeZmKoNBvEwyri
```
You can also view the balance of any account address on the Accounts tab in the
[Explorer](https://explorer.solana.com/accounts)
and paste the address in the box to view the balance in you web browser.
Note: Any address with a balance of 0 SOL, such as a newly created one on your
Ledger, will show as "Not Found" in the explorer. Empty accounts and non-existent
accounts are treated the same in Solana. This will change when your account
address has some SOL in it.
### Send SOL from a Ledger Nano S
To send some tokens from an address controlled by your Nano S device, you will
need to use the device to sign a transaction, using the same keypair URL you
used to derive the address. To do this, make sure your Nano S is plugged in,
unlocked with the PIN, Ledger Live is not running, and the Solana App is open
on the device, showing "Application is Ready".
The `solana transfer` command is used to specify to which address to send tokens,
how many tokens to send, and uses the `--keypair` argument to specify which
keypair is sending the tokens, which will sign the transaction, and the balance
from the associated address will decrease.
```bash
solana transfer RECIPIENT_ADDRESS AMOUNT --keypair KEYPAIR_URL_OF_SENDER
```
Below is a full example. First, an address is viewed at a certain keypair URL.
Second, the balance of tht address is checked. Lastly, a transfer transaction
is entered to send `1` SOL to the recipient address `7cvkjYAkUYs4W8XcXsca7cBrEGFeSUjeZmKoNBvEwyri`.
When you hit Enter for a transfer command, you will be prompted to approve the
transaction details on your Ledger device. On the device, use the right and
left buttons to review the transaction details. If they look correct, click
both buttons on the "Approve" screen, otherwise push both buttons on the "Reject"
screen.
```bash
~$ solana-keygen pubkey usb://ledger?key=42
CjeqzArkZt6xwdnZ9NZSf8D1CNJN1rjeFiyd8q7iLWAV
~$ solana balance CjeqzArkZt6xwdnZ9NZSf8D1CNJN1rjeFiyd8q7iLWAV
1.000005 SOL
~$ solana transfer 7cvkjYAkUYs4W8XcXsca7cBrEGFeSUjeZmKoNBvEwyri 1 --keypair usb://ledger?key=42
Waiting for your approval on Ledger hardware wallet usb://ledger/2JT2Xvy6T8hSmT8g6WdeDbHUgoeGdj6bE2VueCZUJmyN
✅ Approved
Signature: kemu9jDEuPirKNRKiHan7ycybYsZp7pFefAdvWZRq5VRHCLgXTXaFVw3pfh87MQcWX4kQY4TjSBmESrwMApom1V
```
After approving the transaction on your device, the program will display the
transaction signature, and wait for the maximum number of confirmations (32)
before returning. This only takes a few seconds, and then the transaction is
finalized on the Solana network. You can view details of this or any other
transaction by going to the Transaction tab in the
[Explorer](https://explorer.solana.com/transactions)
and paste in the transaction signature.
## Advanced Operations
### Manage Multiple Hardware Wallets
It is sometimes useful to sign a transaction with keys from multiple hardware
wallets. Signing with multiple wallets requires *fully qualified keypair URLs*.
When the URL is not fully qualified, the Solana CLI will prompt you with
the fully qualified URLs of all connected hardware wallets, and ask you to
choose which wallet to use for each signature.
Instead of using the interactive prompts, you can generate fully qualified
URLs using the Solana CLI `resolve-signer` command. For example, try
connecting a Ledger Nano-S to USB, unlock it with your pin, and running the
following command:
```text
solana resolve-signer usb://ledger?key=0/0
```
You will see output similar to:
```text
usb://ledger/BsNsvfXqQTtJnagwFWdBS7FBXgnsK8VZ5CmuznN85swK?key=0/0
```
but where `BsNsvfXqQTtJnagwFWdBS7FBXgnsK8VZ5CmuznN85swK` is your `WALLET_ID`.
With your fully qualified URL, you can connect multiple hardware wallets to
the same computer and uniquely identify a keypair from any of them.
Use the output from the `resolve-signer` command anywhere a `solana` command
expects a `<KEYPAIR>` entry to use that resolved path as the signer for that
part of the given transaction.
### Install the Solana Beta App
You're invited to help us test the latest pre-release version of our Ledger app
on one of the public testnets.
You can use the command-line to install the latest Solana Ledger app release before it has been validated by
You can use the command-line to install the latest Solana Ledger app release
before it has been validated by
the Ledger team and made available via Ledger Live. Note that because the app
is not installed via Ledger Live, you will need to approve installation from an
"unsafe" manager, as well as see the message, "This app is not genuine" each
time you open the app. Once the app is available on Ledger Live, you can
reinstall the app from there, and the message will no longer be displayed.
**WARNING:** Installing an unsigned Ledger app reduces the security of your Ledger device.
If your client is compromised, an attacker will be able to trick you into signing arbitrary
transactions with arbitrary derivation paths. Only use this installation method if you understand
**WARNING:** Installing an unsigned Ledger app reduces the security of your
Ledger device.
If your client is compromised, an attacker will be able to trick you into
signing arbitrary transactions with arbitrary derivation paths.
Only use this installation method if you understand
the security implications. We strongly recommend that you use a separate
Ledger device, with no other wallets/apps sharing the same seed phrase.
@@ -71,6 +207,8 @@ Ledger device, with no other wallets/apps sharing the same seed phrase.
5. An installation window appears and your device will display "Processing..."
6. The app installation is confirmed
#### Installing the Solana Beta App returns an error
If you encounter the following error:
```text
@@ -93,6 +231,40 @@ To fix, check the following:
2. Ensure your Ledger device is unlocked and not waiting for you to enter your pin
3. Ensure the Ledger Live application is not open
## Troubleshooting
### Keypair URL parameters are ignored in zsh
The question mark character is a special character in zsh. If that's not a
feature you use, add the following line to your `~/.zshrc` to treat it as a
normal character:
```bash
unsetopt nomatch
```
Then either restart your shell window or run `~/.zshrc`:
```bash
source ~/.zshrc
```
If you would prefer not to disable zsh's special handling of the question mark
character, you can disable it explictly with a backslash in your keypair URLs.
For example:
```bash
solana-keygen pubkey usb://ledger\?key=0
```
## Support
Check out our [Wallet Support Page](../wallet-guide/support.md) for ways to get help.
Check out our [Wallet Support Page](../wallet-guide/support.md)
for ways to get help.
Read more about [sending and receiving tokens](../cli/transfer-tokens.md) and
[delegating stake](../cli/delegate-stake.md). You can use your Ledger keypair URL
anywhere you see an option or argument that accepts a `<KEYPAIR>`.

View File

@@ -6,9 +6,9 @@ Solana is an open source project implementing a new, high-performance, permissio
## Why Solana?
It is possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 176 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [\[H.T.Kung, J.T.Robinson (1981)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.85.4735). At Solana, we are demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes cannot trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain!
It is possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 176 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [\[H.T.Kung, J.T.Robinson (1981)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.155.4735). At Solana, we are demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes cannot trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain!
> Perhaps the most striking difference between algorithms obtained by our method and ones based upon timeout is that using timeout produces a traditional distributed algorithm in which the processes operate asynchronously, while our method produces a globally synchronous one in which every process does the same thing at (approximately) the same time. Our method seems to contradict the whole purpose of distributed processing, which is to permit different processes to operate independently and perform different functions. However, if a distributed system is really a single system, then the processes must be synchronized in some way. Conceptually, the easiest way to synchronize processes is to get them all to do the same thing at the same time. Therefore, our method is used to implement a kernel that performs the necessary synchronization--for example, making sure that two different processes do not try to modify a file at the same time. Processes might spend only a small fraction of their time executing the synchronizing kernel; the rest of the time, they can operate independently--e.g., accessing different files. This is an approach we have advocated even when fault-tolerance is not required. The method's basic simplicity makes it easier to understand the precise properties of a system, which is crucial if one is to know just how fault-tolerant the system is. [\[L.Lamport (1984)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.81.1078)
> Perhaps the most striking difference between algorithms obtained by our method and ones based upon timeout is that using timeout produces a traditional distributed algorithm in which the processes operate asynchronously, while our method produces a globally synchronous one in which every process does the same thing at (approximately) the same time. Our method seems to contradict the whole purpose of distributed processing, which is to permit different processes to operate independently and perform different functions. However, if a distributed system is really a single system, then the processes must be synchronized in some way. Conceptually, the easiest way to synchronize processes is to get them all to do the same thing at the same time. Therefore, our method is used to implement a kernel that performs the necessary synchronization--for example, making sure that two different processes do not try to modify a file at the same time. Processes might spend only a small fraction of their time executing the synchronizing kernel; the rest of the time, they can operate independently--e.g., accessing different files. This is an approach we have advocated even when fault-tolerance is not required. The method's basic simplicity makes it easier to understand the precise properties of a system, which is crucial if one is to know just how fault-tolerant the system is. [\[L.Lamport (1984)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.151.1078)
Furthermore, and much to our surprise, it can be implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you would use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well en route towards that theoretical limit of 710,000 transactions per second.

View File

@@ -40,7 +40,24 @@ line tools](../hardware-wallets/ledger.md).
- You may be prompted on the Nano S to confirm the install of Solana App
- "Solana" should now show as "Installed" in the Ledger Live Manager
![Installed Solana App in Manager](../.gitbook/assets/ledger-live-install-solana-app.png)
![Installed Solana App in Manager](../.gitbook/assets/ledger-live-latest-version-installed.png)
## Upgrade to the latest version of the Solana App
To make sure you have the latest functionality, if you are using an older version
of the Solana App, please upgrade to version v0.2.2 by following these steps.
- Connect your Nano S to your computer an unlock it by entering your PIN on the
device
- Open Ledger Live and click on "Manager" in the left pane
- On your Nano S, click both buttons when prompted to "Allow Manager"
- Click the "Update All" button to update the Solana app to the latest version
(v.0.2.2)
![Upgrade All button in Manager](../.gitbook/assets/ledger-live-update-available-v0.2.2.png)
- Once the upgrade is finished, confirm v0.2.2 is installed under "Apps Installed"
![Upgrade complete](../.gitbook/assets/ledger-live-latest-version-installed.png)
## Interact with Solana network
- To interact with your Ledger wallet on our live network, please see our

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-dos"
version = "1.1.8"
version = "1.1.15"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -13,10 +13,10 @@ clap = "2.33.0"
log = "0.4.8"
rand = "0.7.0"
rayon = "1.3.0"
solana-core = { path = "../core", version = "1.1.8" }
solana-logger = { path = "../logger", version = "1.1.8" }
solana-net-utils = { path = "../net-utils", version = "1.1.8" }
solana-runtime = { path = "../runtime", version = "1.1.8" }
solana-core = { path = "../core", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-net-utils = { path = "../net-utils", version = "1.1.15" }
solana-runtime = { path = "../runtime", version = "1.1.15" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-download-utils"
version = "1.1.8"
version = "1.1.15"
description = "Solana Download Utils"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -14,8 +14,8 @@ console = "0.10.0"
indicatif = "0.14.0"
log = "0.4.8"
reqwest = { version = "0.10.4", default-features = false, features = ["blocking", "rustls-tls", "json"] }
solana-sdk = { path = "../sdk", version = "1.1.8" }
solana-ledger = { path = "../ledger", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-ledger = { path = "../ledger", version = "1.1.15" }
tar = "0.4.26"
[lib]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-faucet"
version = "1.1.8"
version = "1.1.15"
description = "Solana Faucet"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -19,10 +19,10 @@ clap = "2.33"
log = "0.4.8"
serde = "1.0.105"
serde_derive = "1.0.103"
solana-clap-utils = { path = "../clap-utils", version = "1.1.8" }
solana-logger = { path = "../logger", version = "1.1.8" }
solana-metrics = { path = "../metrics", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.8" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.15" }
solana-logger = { path = "../logger", version = "1.1.15" }
solana-metrics = { path = "../metrics", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
tokio = "0.1"
tokio-codec = "0.1"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-genesis-programs"
version = "1.1.8"
version = "1.1.15"
description = "Solana genesis programs"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -10,13 +10,13 @@ edition = "2018"
[dependencies]
log = { version = "0.4.8" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.1.8" }
solana-budget-program = { path = "../programs/budget", version = "1.1.8" }
solana-exchange-program = { path = "../programs/exchange", version = "1.1.8" }
solana-runtime = { path = "../runtime", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.8" }
solana-storage-program = { path = "../programs/storage", version = "1.1.8" }
solana-vest-program = { path = "../programs/vest", version = "1.1.8" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.1.15" }
solana-budget-program = { path = "../programs/budget", version = "1.1.15" }
solana-exchange-program = { path = "../programs/exchange", version = "1.1.15" }
solana-runtime = { path = "../runtime", version = "1.1.15" }
solana-sdk = { path = "../sdk", version = "1.1.15" }
solana-storage-program = { path = "../programs/storage", version = "1.1.15" }
solana-vest-program = { path = "../programs/vest", version = "1.1.15" }
[lib]
crate-type = ["lib"]

View File

@@ -26,7 +26,17 @@ pub fn get_inflation(operating_mode: OperatingMode, epoch: Epoch) -> Option<Infl
None
}
}
OperatingMode::Stable | OperatingMode::Preview => {
OperatingMode::Preview => {
if epoch == 0 {
// No inflation at epoch 0
Some(Inflation::new_disabled())
} else if epoch == 44 {
Some(Inflation::default())
} else {
None
}
}
OperatingMode::Stable => {
if epoch == 0 {
// No inflation at epoch 0
Some(Inflation::new_disabled())
@@ -62,9 +72,7 @@ pub fn get_programs(operating_mode: OperatingMode, epoch: Epoch) -> Option<Vec<(
}
}
OperatingMode::Stable => {
if epoch == std::u64::MAX - 1 {
// The epoch of std::u64::MAX - 1 is a placeholder and is expected to be reduced in
// a future hard fork.
if epoch == 34 {
Some(vec![solana_bpf_loader_program!()])
} else if epoch == std::u64::MAX {
// The epoch of std::u64::MAX is a placeholder and is expected to be reduced in a
@@ -153,7 +161,7 @@ mod tests {
#[test]
fn test_softlaunch_programs() {
assert_eq!(get_programs(OperatingMode::Stable, 1), None);
assert!(get_programs(OperatingMode::Stable, std::u64::MAX - 1).is_some());
assert!(get_programs(OperatingMode::Stable, 34).is_some());
assert!(get_programs(OperatingMode::Stable, std::u64::MAX).is_some());
}
}

Some files were not shown because too many files have changed in this diff Show More