Compare commits

...

98 Commits

Author SHA1 Message Date
66f006108c CLI: Don't hide errors when fees are disabled (#8204)
automerge

(cherry picked from commit ed87229cec)
2020-02-11 23:33:49 -07:00
47f887bda0 The getConfirmedBlock RPC API is now disabled by default (#8230)
automerge
2020-02-11 22:18:22 -08:00
bb64c73aa2 set_read_timeout() can fail, don't expect() it not to
(cherry picked from commit 36c0cb052b)
2020-02-11 21:08:07 -07:00
1f30d1e77a solana-install init edge when "edge" is not currently installed now works
(cherry picked from commit ed58bcda4c)
2020-02-11 21:08:07 -07:00
04dab9b274 Fix RPC pub sub unsubscribe (#8208) (#8228)
automerge
2020-02-11 18:40:21 -08:00
fb4e102670 Report validator rewards in getConfirmedBlock JSON RPC (#8226)
automerge
2020-02-11 18:20:16 -08:00
67e0ba0356 Add method to sign raw data, enabling easier device app testing (#8221) (#8225)
automerge
2020-02-11 17:59:08 -08:00
22bb4e6462 Factor repair from gossip (#8044) (#8220)
automerge
2020-02-11 14:18:45 -08:00
79035bdbed Upgrade to rust 1.41.0 (bp #8202) (#8219)
automerge
2020-02-11 13:56:58 -08:00
70089a5258 Fixup sign_transaction; pass derivation_path by reference (#8194) (#8217)
automerge
2020-02-11 12:38:31 -08:00
34238d5f1e Reliably track proc macro & build.rs code coverage (#8210) (#8213)
automerge
2020-02-11 09:13:21 -08:00
cab6917cbd Fix nightly clippy warnings (#8199) (#8212)
automerge
2020-02-11 08:43:12 -08:00
2951ee5b1d Channel installs no longer re-download the same release. (#8211)
automerge
2020-02-11 08:24:17 -08:00
fb16a15900 CLI: Add fee-payer parame to stake-split subcommand (#8201) (#8205)
automerge
2020-02-11 01:32:07 -08:00
76b52f4c5d CLI: transfer fix checks pubkeys (#8198) (#8203)
automerge
2020-02-11 00:26:56 -08:00
21a2e643c2 CLI: Harden offline signing and tests (#8052) (#8197)
automerge
2020-02-10 19:23:22 -08:00
733d9cb026 Remove repairman as its spamming cluster with unwanted repairs (#8193) (#8195)
automerge
2020-02-10 17:56:45 -08:00
2f54f57b7a Fix larger than necessary allocations in streamer (#8187) (#8192)
automerge
2020-02-10 13:06:28 -08:00
7bd95019ef Minor logging improvements (bp #8140) (#8190)
automerge
2020-02-10 11:22:26 -08:00
33557c3271 Check for AVX512 at runtime to avoid invalid opcode trap (#8166)
automerge

(cherry picked from commit ef5fb6fa46)
2020-02-07 17:07:10 -07:00
c65b9cd88d Filter old CrdsValues received via Pull Responses in Gossip (#8150) (#8171)
automerge
2020-02-07 14:11:48 -08:00
038db8167f CLI: Implement transfer command (#8108) (#8170)
automerge
2020-02-07 13:18:35 -08:00
030498ced5 Ledger hardware wallet integration (#8068) (#8169)
automerge
2020-02-07 12:14:41 -08:00
28eb8b662a Remove unwanted println 2020-02-07 12:59:44 -07:00
de752eaf80 Lock snapshot version to 0.23.2 (#8167)
automerge
2020-02-07 11:35:54 -08:00
9c5ef19d80 Surface shred version more in tools (#8163) (#8165)
automerge
2020-02-07 10:10:00 -08:00
235bd0a46b CLI: Support offline fee payers (#8009) (#8164)
automerge
2020-02-07 09:41:35 -08:00
465d71a3a3 De-replicode Tower constructors (#8153) (#8154)
automerge
2020-02-06 19:38:47 -08:00
14e6029fae Add libudev-dev to docker image to build remote-wallet (#8149) (#8152)
automerge
2020-02-06 16:51:44 -08:00
75434158ee Ignore flaky test_exchange_local_cluster (#8146) (#8147)
automerge
2020-02-06 12:24:42 -08:00
1cae9fd893 Better surface bank hash verification failures (#8134)
automerge
2020-02-05 12:04:34 -08:00
bea34a812c CLI cosmetic: make config get and verbose prints consistent (#8119) (#8133)
automerge
2020-02-05 11:31:29 -08:00
41a28d7322 Bump version to 0.23.3 2020-02-03 21:10:30 -07:00
235158d2bc CLI: Expose sign-only reply parsing helper (#8107) (#8110)
automerge
2020-02-03 19:55:45 -08:00
521238f7d7 Delete uptime command, report total credits in solana validators instead
(cherry picked from commit 4c0420b884)
2020-02-03 17:15:09 -07:00
384f52a607 Fix consensus threshold when new root is created (#8093)
When a new root is created, the oldest slot is popped off
but when the logic checks for identical slots, it assumes
that any difference means a slot was popped off the front.
2020-02-03 16:54:48 -07:00
49f2d912ab Add split-stake command (#8092)
automerge
2020-02-03 11:04:21 -08:00
8652fe30ce Update book release version 2020-02-03 11:36:19 -07:00
899a14ba51 Disable windows update as windows build artifacts are turned off 2020-02-01 22:25:47 -07:00
466c7dafb3 Bump version to v0.23.2 2020-02-01 21:46:34 -07:00
293bb63ed8 Reduce rpc client pre-flight requests by setting max-age header (#8082) (#8083)
automerge
2020-02-01 08:48:40 -08:00
8f8fb720af CLI: Fix stake-account auth withdrawer output (#8071)
automerge

(cherry picked from commit 9739be9ecf)
2020-02-01 08:58:13 -07:00
19f414d843 Use solana-cli config keypair in solana-keygen (bp #8074) (#8080)
* Use solana-cli config keypair in solana-keygen (#8074)

* Use solana-cli config keypair in solana-keygen

* s/infile/keypair for consistency across modules and more generality across access methods

* Move config into separate crate

(cherry picked from commit fab8ef379f)

# Conflicts:
#	Cargo.lock
#	cli/Cargo.toml
#	keygen/Cargo.toml

* Fixup version numbers for backport

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-01-31 23:08:08 -07:00
eaca1c3170 Add new colo test cases using reduced node count (#8078) (#8079)
automerge
2020-01-31 19:06:36 -08:00
9fc75925f9 CLI: De-replicode SigningAuthority instatiation (#8076) (#8077)
automerge
2020-01-31 17:42:15 -08:00
b5098ac87c Filter repairman peers based on shred_version (#8069) (#8073)
automerge
2020-01-31 15:29:30 -08:00
e23aec9728 Update key (#8062) (#8066)
automerge
2020-01-31 12:55:49 -08:00
57d490c84f Minor cli fixes (bp #8061) (#8065)
automerge
2020-01-31 12:36:35 -08:00
aa8c9f6a98 Remove asteroids and pacman from QA/dev testnet availability (#8050) (#8063)
automerge
2020-01-31 11:28:33 -08:00
57772dc73d s/mint/faucet 2020-01-31 12:15:20 -07:00
21706108e8 Don't exit early if add. validators not found during gce.sh config
(cherry picked from commit 9adf0d4ee0)
2020-01-31 08:36:03 -07:00
50d0caf00f Remove support for 0.22.3 snapshots (#8058)
automerge
2020-01-31 00:15:44 -08:00
2739332306 Fix stale gossip entrypoint (#8053) (#8057)
automerge
2020-01-30 23:13:26 -08:00
c85c4699aa validator: add --private-rpc flag (bp #8037) (#8054)
automerge
2020-01-30 20:44:53 -08:00
81add4d6bf Make tds slots-per-epoch configurable 2020-01-30 21:38:39 -07:00
8e31eeb696 Dial testnet down to a single node 2020-01-30 21:17:38 -07:00
e1ce8b37ff Minor --expected-shred fix, clean up shred-related gossip log messages (#8041) (#8045)
automerge
2020-01-30 14:41:21 -08:00
3f831c05f5 Add different shred test to test_tvu_peers_and_stakes
(cherry picked from commit 0c55b37976)
2020-01-30 11:28:45 -07:00
f0d7ce6bb6 CLI: Disallow blockhash/fee-calc lookups when offline (#7981)
* CLI: Add BlockhashSpec to tighten control over --blockhash

* Use BlockhashSpec

* Add a matches-free constructor

* More descriptive naming

(cherry picked from commit 966d077431)
2020-01-30 09:39:04 -07:00
6ba95b2545 Ignore slow archiver tests (#8032)
automerge

(cherry picked from commit 400412d76c)
2020-01-30 09:38:49 -07:00
6818e68542 Add shred version filters to Crds Accessors (#8027)
* Add shred version filters to Crds Accessors

* Adopt entrypoint shred_version if one isn't provided

(cherry picked from commit 64c42e28dc)
2020-01-30 08:58:36 -07:00
43659d7deb Remove support for stake redelegation (#7995) (#8024)
automerge
2020-01-29 23:46:42 -08:00
f24d8e7d2d Add set_lockup to stake (#7997)
(cherry picked from commit 0d6c233747)
2020-01-29 23:22:04 -07:00
e10fe5e125 Update and fix transaction error documentation (#7998)
(cherry picked from commit fed3817ed3)
2020-01-29 23:20:32 -07:00
0f8c9ab1c4 Various fixes/improvements resulting from SLP 1.1 restart debug (bp #8019) (#8026)
automerge
2020-01-29 20:11:23 -08:00
8a9a9cb991 Log solana-validator args on startup to aid debugging
(cherry picked from commit effe6e3ff3)
2020-01-29 09:40:33 -07:00
44208ffa67 refactored 2020-01-28 20:29:56 -07:00
5df0478fa3 refactored the thread loop
a thread will break if the atomic bool is true
2020-01-28 20:29:56 -07:00
d52567933e refactored grind_parse_args and grind_print_info 2020-01-28 20:29:56 -07:00
a32cdb9f4d updated to slice 2020-01-28 20:29:56 -07:00
eacd8d986c put some logic into functions 2020-01-28 20:29:56 -07:00
1d32603b49 taking care of errors from ./test-check.sh 2020-01-28 20:29:56 -07:00
8c6f7ee5a4 ran cargo fmt 2020-01-28 20:29:56 -07:00
be482eed3f removed whitespace 2020-01-28 20:29:56 -07:00
6e1c53cb0f simplified messaging and if blocks 2020-01-28 20:29:56 -07:00
af92f205cf simplified messaging 2020-01-28 20:29:56 -07:00
87047b08c8 removed found and changed count to AtomicU64 2020-01-28 20:29:56 -07:00
e282161872 updated bs58 decode check 2020-01-28 20:29:56 -07:00
01b1e287ed fixed prefix typo 2020-01-28 20:29:56 -07:00
d7fd1fa467 added informative print statements 2020-01-28 20:29:56 -07:00
bfa34cd494 it works
need to add print out to inform user
2020-01-28 20:29:56 -07:00
915835e224 this command works but wont exit right when the 6th key is found
cargo run grind --starts-with hj:2 --ends-with jk:2 --starts-and-ends-with nⓂ️2
2020-01-28 20:29:56 -07:00
659332e7ac progress on storing parameters 2020-01-28 20:29:56 -07:00
272986c6ac validator methods work 2020-01-28 20:29:56 -07:00
4d8ab45c56 removed includes
added ends-with and starts-and-ends-with
updated help messages
added expected number of values
updated .value_name for each option
2020-01-28 20:29:56 -07:00
932ae86d47 CLI: Fix tests. sign_only requires a blockhash (#8005) (#8007)
automerge
2020-01-28 19:07:47 -08:00
756e6334b0 Add lock to make sure slot-based locktree calls are safe (#7993) (#7999)
automerge
2020-01-28 14:57:37 -08:00
4e6eca9748 Update cargo files to 0.23.1 (#7994)
automerge
2020-01-27 20:44:44 -08:00
d9e37eb30c Fix compute_shred_version() (#7989)
automerge

(cherry picked from commit fd7d5cbe0d)
2020-01-27 19:06:20 -07:00
04d1b35926 Consensus fix, don't consider threshold check if.. (#7948) (#7991)
automerge
2020-01-27 17:52:48 -08:00
d13d609050 Reduce epoch duration from 2 weeks to 2 days (#7987)
automerge
2020-01-27 10:24:20 -08:00
20426cf251 Specify where VM images are coming from across GCE projects (#7985) (#7986)
automerge
2020-01-27 09:02:05 -08:00
4a220d7c8e Remove show- prefix 2020-01-26 21:01:18 -07:00
436eab41ca Remove stray key 2020-01-26 14:35:50 -07:00
c8472d0a96 CLI: --sign-only and --signer require --blockhash (#7982) (#7983)
automerge
2020-01-26 10:19:04 -08:00
1a7db9c17e CLI: Consolidate offline arg declarations (#7979) (#7980)
automerge
2020-01-26 01:24:01 -08:00
b468d9f17c CLI: Deterministic dummy keypair generation for SigningAuthority::Offline (#7971) (#7978)
automerge
2020-01-26 00:13:06 -08:00
41cf1d7d23 s/dervied/derived/ 2020-01-25 23:22:55 -07:00
189 changed files with 7181 additions and 4507 deletions

742
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -7,6 +7,7 @@ members = [
"chacha",
"chacha-cuda",
"chacha-sys",
"cli-config",
"client",
"core",
"faucet",
@ -42,6 +43,7 @@ members = [
"archiver",
"archiver-lib",
"archiver-utils",
"remote-wallet",
"runtime",
"sdk",
"sdk-c",

View File

@ -1,6 +1,6 @@
[package]
name = "solana-archiver-lib"
version = "0.23.0"
version = "0.23.3"
description = "Solana Archiver Library"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -15,22 +15,22 @@ ed25519-dalek = "=1.0.0-pre.1"
log = "0.4.8"
rand = "0.6.5"
rand_chacha = "0.1.1"
solana-client = { path = "../client", version = "0.23.0" }
solana-storage-program = { path = "../programs/storage", version = "0.23.0" }
solana-client = { path = "../client", version = "0.23.3" }
solana-storage-program = { path = "../programs/storage", version = "0.23.3" }
thiserror = "1.0"
serde = "1.0.104"
serde_json = "1.0.44"
serde_derive = "1.0.103"
solana-net-utils = { path = "../net-utils", version = "0.23.0" }
solana-chacha = { path = "../chacha", version = "0.23.0" }
solana-chacha-sys = { path = "../chacha-sys", version = "0.23.0" }
solana-ledger = { path = "../ledger", version = "0.23.0" }
solana-logger = { path = "../logger", version = "0.23.0" }
solana-perf = { path = "../perf", version = "0.23.0" }
solana-sdk = { path = "../sdk", version = "0.23.0" }
solana-core = { path = "../core", version = "0.23.0" }
solana-archiver-utils = { path = "../archiver-utils", version = "0.23.0" }
solana-metrics = { path = "../metrics", version = "0.23.0" }
solana-net-utils = { path = "../net-utils", version = "0.23.3" }
solana-chacha = { path = "../chacha", version = "0.23.3" }
solana-chacha-sys = { path = "../chacha-sys", version = "0.23.3" }
solana-ledger = { path = "../ledger", version = "0.23.3" }
solana-logger = { path = "../logger", version = "0.23.3" }
solana-perf = { path = "../perf", version = "0.23.3" }
solana-sdk = { path = "../sdk", version = "0.23.3" }
solana-core = { path = "../core", version = "0.23.3" }
solana-archiver-utils = { path = "../archiver-utils", version = "0.23.3" }
solana-metrics = { path = "../metrics", version = "0.23.3" }
[dev-dependencies]
hex = "0.4.0"

View File

@ -16,6 +16,7 @@ use solana_core::{
packet::{limited_deserialize, PACKET_DATA_SIZE},
repair_service,
repair_service::{RepairService, RepairSlotRange, RepairStrategy},
serve_repair::ServeRepair,
shred_fetch_stage::ShredFetchStage,
sigverify_stage::{DisabledSigVerifier, SigVerifyStage},
storage_stage::NUM_STORAGE_SAMPLES,
@ -195,13 +196,7 @@ impl Archiver {
Blockstore::open(ledger_path).expect("Expected to be able to open database ledger"),
);
let gossip_service = GossipService::new(
&cluster_info,
Some(blockstore.clone()),
None,
node.sockets.gossip,
&exit,
);
let gossip_service = GossipService::new(&cluster_info, None, node.sockets.gossip, &exit);
info!("Connecting to the cluster via {:?}", cluster_entrypoint);
let (nodes, _) =
@ -522,6 +517,8 @@ impl Archiver {
let mut contact_info = node_info.clone();
contact_info.tvu = "0.0.0.0:0".parse().unwrap();
contact_info.wallclock = timestamp();
// copy over the adopted shred_version from the entrypoint
contact_info.shred_version = cluster_info.read().unwrap().my_data().shred_version;
{
let mut cluster_info_w = cluster_info.write().unwrap();
cluster_info_w.insert_self(contact_info);
@ -701,7 +698,7 @@ impl Archiver {
) -> Result<u64> {
let rpc_peers = {
let cluster_info = cluster_info.read().unwrap();
cluster_info.rpc_peers()
cluster_info.all_rpc_peers()
};
debug!("rpc peers: {:?}", rpc_peers);
if !rpc_peers.is_empty() {
@ -757,7 +754,7 @@ impl Archiver {
loop {
let rpc_peers = {
let cluster_info = cluster_info.read().unwrap();
cluster_info.rpc_peers()
cluster_info.all_rpc_peers()
};
debug!("rpc peers: {:?}", rpc_peers);
if !rpc_peers.is_empty() {
@ -812,7 +809,7 @@ impl Archiver {
/// It is recommended to use a temporary blockstore for this since the download will not verify
/// shreds received and might impact the chaining of shreds across slots
pub fn download_from_archiver(
cluster_info: &Arc<RwLock<ClusterInfo>>,
serve_repair: &ServeRepair,
archiver_info: &ContactInfo,
blockstore: &Arc<Blockstore>,
slots_per_segment: u64,
@ -832,10 +829,10 @@ impl Archiver {
Recycler::default(),
"archiver_reeciver",
);
let id = cluster_info.read().unwrap().id();
let id = serve_repair.keypair().pubkey();
info!(
"Sending repair requests from: {} to: {}",
cluster_info.read().unwrap().my_data().id,
serve_repair.my_info().id,
archiver_info.gossip
);
let repair_slot_range = RepairSlotRange {
@ -855,9 +852,7 @@ impl Archiver {
let reqs: Vec<_> = repairs
.into_iter()
.filter_map(|repair_request| {
cluster_info
.read()
.unwrap()
serve_repair
.map_repair_request(&repair_request)
.map(|result| ((archiver_info.gossip, result), repair_request))
.ok()

View File

@ -1,6 +1,6 @@
[package]
name = "solana-archiver-utils"
version = "0.23.0"
version = "0.23.3"
description = "Solana Archiver Utils"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,12 +12,12 @@ edition = "2018"
log = "0.4.8"
rand = "0.6.5"
rand_chacha = "0.1.1"
solana-chacha = { path = "../chacha", version = "0.23.0" }
solana-chacha-sys = { path = "../chacha-sys", version = "0.23.0" }
solana-ledger = { path = "../ledger", version = "0.23.0" }
solana-logger = { path = "../logger", version = "0.23.0" }
solana-perf = { path = "../perf", version = "0.23.0" }
solana-sdk = { path = "../sdk", version = "0.23.0" }
solana-chacha = { path = "../chacha", version = "0.23.3" }
solana-chacha-sys = { path = "../chacha-sys", version = "0.23.3" }
solana-ledger = { path = "../ledger", version = "0.23.3" }
solana-logger = { path = "../logger", version = "0.23.3" }
solana-perf = { path = "../perf", version = "0.23.3" }
solana-sdk = { path = "../sdk", version = "0.23.3" }
[dev-dependencies]
hex = "0.4.0"

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-archiver"
version = "0.23.0"
version = "0.23.3"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -10,11 +10,11 @@ homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
console = "0.9.1"
solana-clap-utils = { path = "../clap-utils", version = "0.23.0" }
solana-core = { path = "../core", version = "0.23.0" }
solana-logger = { path = "../logger", version = "0.23.0" }
solana-metrics = { path = "../metrics", version = "0.23.0" }
solana-archiver-lib = { path = "../archiver-lib", version = "0.23.0" }
solana-net-utils = { path = "../net-utils", version = "0.23.0" }
solana-sdk = { path = "../sdk", version = "0.23.0" }
solana-clap-utils = { path = "../clap-utils", version = "0.23.3" }
solana-core = { path = "../core", version = "0.23.3" }
solana-logger = { path = "../logger", version = "0.23.3" }
solana-metrics = { path = "../metrics", version = "0.23.3" }
solana-archiver-lib = { path = "../archiver-lib", version = "0.23.3" }
solana-net-utils = { path = "../net-utils", version = "0.23.3" }
solana-sdk = { path = "../sdk", version = "0.23.3" }

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-banking-bench"
version = "0.23.0"
version = "0.23.3"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -10,11 +10,11 @@ homepage = "https://solana.com/"
[dependencies]
log = "0.4.6"
rayon = "1.2.0"
solana-core = { path = "../core", version = "0.23.0" }
solana-ledger = { path = "../ledger", version = "0.23.0" }
solana-logger = { path = "../logger", version = "0.23.0" }
solana-runtime = { path = "../runtime", version = "0.23.0" }
solana-measure = { path = "../measure", version = "0.23.0" }
solana-sdk = { path = "../sdk", version = "0.23.0" }
solana-core = { path = "../core", version = "0.23.3" }
solana-ledger = { path = "../ledger", version = "0.23.3" }
solana-logger = { path = "../logger", version = "0.23.3" }
solana-runtime = { path = "../runtime", version = "0.23.3" }
solana-measure = { path = "../measure", version = "0.23.3" }
solana-sdk = { path = "../sdk", version = "0.23.3" }
rand = "0.6.5"
crossbeam-channel = "0.3"

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-exchange"
version = "0.23.0"
version = "0.23.3"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -23,19 +23,19 @@ serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.44"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "0.23.0" }
solana-core = { path = "../core", version = "0.23.0" }
solana-genesis = { path = "../genesis", version = "0.23.0" }
solana-client = { path = "../client", version = "0.23.0" }
solana-faucet = { path = "../faucet", version = "0.23.0" }
solana-exchange-program = { path = "../programs/exchange", version = "0.23.0" }
solana-logger = { path = "../logger", version = "0.23.0" }
solana-metrics = { path = "../metrics", version = "0.23.0" }
solana-net-utils = { path = "../net-utils", version = "0.23.0" }
solana-runtime = { path = "../runtime", version = "0.23.0" }
solana-sdk = { path = "../sdk", version = "0.23.0" }
solana-clap-utils = { path = "../clap-utils", version = "0.23.3" }
solana-core = { path = "../core", version = "0.23.3" }
solana-genesis = { path = "../genesis", version = "0.23.3" }
solana-client = { path = "../client", version = "0.23.3" }
solana-faucet = { path = "../faucet", version = "0.23.3" }
solana-exchange-program = { path = "../programs/exchange", version = "0.23.3" }
solana-logger = { path = "../logger", version = "0.23.3" }
solana-metrics = { path = "../metrics", version = "0.23.3" }
solana-net-utils = { path = "../net-utils", version = "0.23.3" }
solana-runtime = { path = "../runtime", version = "0.23.3" }
solana-sdk = { path = "../sdk", version = "0.23.3" }
untrusted = "0.7.0"
ws = "0.9.1"
[dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "0.23.0" }
solana-local-cluster = { path = "../local-cluster", version = "0.23.3" }

View File

@ -16,6 +16,7 @@ use std::sync::mpsc::channel;
use std::time::Duration;
#[test]
#[ignore]
fn test_exchange_local_cluster() {
solana_logger::setup();

View File

@ -2,14 +2,14 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-streamer"
version = "0.23.0"
version = "0.23.3"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
solana-clap-utils = { path = "../clap-utils", version = "0.23.0" }
solana-core = { path = "../core", version = "0.23.0" }
solana-logger = { path = "../logger", version = "0.23.0" }
solana-net-utils = { path = "../net-utils", version = "0.23.0" }
solana-clap-utils = { path = "../clap-utils", version = "0.23.3" }
solana-core = { path = "../core", version = "0.23.3" }
solana-logger = { path = "../logger", version = "0.23.3" }
solana-net-utils = { path = "../net-utils", version = "0.23.3" }

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-tps"
version = "0.23.0"
version = "0.23.3"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -16,24 +16,24 @@ serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.44"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "0.23.0" }
solana-core = { path = "../core", version = "0.23.0" }
solana-genesis = { path = "../genesis", version = "0.23.0" }
solana-client = { path = "../client", version = "0.23.0" }
solana-faucet = { path = "../faucet", version = "0.23.0" }
solana-librapay = { path = "../programs/librapay", version = "0.23.0", optional = true }
solana-logger = { path = "../logger", version = "0.23.0" }
solana-metrics = { path = "../metrics", version = "0.23.0" }
solana-measure = { path = "../measure", version = "0.23.0" }
solana-net-utils = { path = "../net-utils", version = "0.23.0" }
solana-runtime = { path = "../runtime", version = "0.23.0" }
solana-sdk = { path = "../sdk", version = "0.23.0" }
solana-move-loader-program = { path = "../programs/move_loader", version = "0.23.0", optional = true }
solana-clap-utils = { path = "../clap-utils", version = "0.23.3" }
solana-core = { path = "../core", version = "0.23.3" }
solana-genesis = { path = "../genesis", version = "0.23.3" }
solana-client = { path = "../client", version = "0.23.3" }
solana-faucet = { path = "../faucet", version = "0.23.3" }
solana-librapay = { path = "../programs/librapay", version = "0.23.3", optional = true }
solana-logger = { path = "../logger", version = "0.23.3" }
solana-metrics = { path = "../metrics", version = "0.23.3" }
solana-measure = { path = "../measure", version = "0.23.3" }
solana-net-utils = { path = "../net-utils", version = "0.23.3" }
solana-runtime = { path = "../runtime", version = "0.23.3" }
solana-sdk = { path = "../sdk", version = "0.23.3" }
solana-move-loader-program = { path = "../programs/move_loader", version = "0.23.3", optional = true }
[dev-dependencies]
serial_test = "0.3.2"
serial_test_derive = "0.3.1"
solana-local-cluster = { path = "../local-cluster", version = "0.23.0" }
solana-local-cluster = { path = "../local-cluster", version = "0.23.3" }
[features]
move = ["solana-librapay", "solana-move-loader-program"]

View File

@ -303,6 +303,9 @@ The result field will be an object with the following fields:
* `fee: <u64>` - fee this transaction was charged, as u64 integer
* `preBalances: <array>` - array of u64 account balances from before the transaction was processed
* `postBalances: <array>` - array of u64 account balances after the transaction was processed
* `rewards: <array>` - an array of JSON objects containing:
* `pubkey: <string>` - The public key, as base-58 encoded string, of the account that received the reward
* `lamports: <i64>`- number of reward lamports credited or debited by the account, as a i64
#### Example:
@ -827,7 +830,7 @@ The result field will be a JSON object with the following fields:
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"solana-core": "0.23.0"},"id":1}
{"jsonrpc":"2.0","result":{"solana-core": "0.23.3"},"id":1}
```
### getVoteAccounts

View File

@ -177,7 +177,7 @@ $ solana send-timestamp <PUBKEY> <PROCESS_ID> --date 2018-12-24T23:59:00
## Usage
### solana-cli
```text
solana-cli 0.23.0 [channel=unknown commit=unknown]
solana-cli 0.23.3 [channel=unknown commit=unknown]
Blockchain, Rebuilt for Scale
USAGE:
@ -212,7 +212,7 @@ SUBCOMMANDS:
cluster-version Get the version of the cluster entrypoint
config Solana command-line tool configuration settings
confirm Confirm transaction by signature
create-address-with-seed Generate a dervied account address with a seed
create-address-with-seed Generate a derived account address with a seed
create-archiver-storage-account Create an archiver storage account
create-nonce-account Create a nonce account
create-stake-account Create a stake account
@ -241,7 +241,6 @@ SUBCOMMANDS:
stakes Show stake account information
storage-account Show the contents of a storage account
transaction-count Get current transaction count
uptime Show the uptime of a validator, based on epoch voting history
validator-info Publish/get Validator info on Solana
validators Show summary information about the current validators
vote-account Show the contents of a vote account
@ -254,7 +253,7 @@ SUBCOMMANDS:
#### solana-account
```text
solana-account
solana-account
Show the contents of an account
USAGE:
@ -283,7 +282,7 @@ ARGS:
#### solana-address
```text
solana-address
solana-address
Get your public key
USAGE:
@ -307,7 +306,7 @@ OPTIONS:
#### solana-airdrop
```text
solana-airdrop
solana-airdrop
Request lamports
USAGE:
@ -337,7 +336,7 @@ ARGS:
#### solana-authorize-nonce-account
```text
solana-authorize-nonce-account
solana-authorize-nonce-account
Assign account authority to a new entity
USAGE:
@ -373,7 +372,7 @@ ARGS:
#### solana-balance
```text
solana-balance
solana-balance
Get your balance
USAGE:
@ -401,7 +400,7 @@ ARGS:
#### solana-block-production
```text
solana-block-production
solana-block-production
Show information about block production
USAGE:
@ -428,7 +427,7 @@ OPTIONS:
#### solana-block-time
```text
solana-block-time
solana-block-time
Get estimated production time of a block
USAGE:
@ -455,7 +454,7 @@ ARGS:
#### solana-cancel
```text
solana-cancel
solana-cancel
Cancel a transfer
USAGE:
@ -482,7 +481,7 @@ ARGS:
#### solana-catchup
```text
solana-catchup
solana-catchup
Wait for a validator to catch up to the cluster
USAGE:
@ -509,7 +508,7 @@ ARGS:
#### solana-claim-storage-reward
```text
solana-claim-storage-reward
solana-claim-storage-reward
Redeem storage reward credits
USAGE:
@ -537,7 +536,7 @@ ARGS:
#### solana-cluster-version
```text
solana-cluster-version
solana-cluster-version
Get the version of the cluster entrypoint
USAGE:
@ -561,7 +560,7 @@ OPTIONS:
#### solana-config
```text
solana-config
solana-config
Solana command-line tool configuration settings
USAGE:
@ -590,7 +589,7 @@ SUBCOMMANDS:
#### solana-confirm
```text
solana-confirm
solana-confirm
Confirm transaction by signature
USAGE:
@ -617,8 +616,8 @@ ARGS:
#### solana-create-address-with-seed
```text
solana-create-address-with-seed
Generate a dervied account address with a seed
solana-create-address-with-seed
Generate a derived account address with a seed
USAGE:
solana create-address-with-seed [FLAGS] [OPTIONS] <SEED_STRING> <PROGRAM_ID>
@ -641,13 +640,13 @@ OPTIONS:
ARGS:
<SEED_STRING> The seed. Must not take more than 32 bytes to encode as utf-8
<PROGRAM_ID> The program_id that the address will ultimately be used for,
<PROGRAM_ID> The program_id that the address will ultimately be used for,
or one of STAKE, VOTE, and STORAGE keywords
```
#### solana-create-archiver-storage-account
```text
solana-create-archiver-storage-account
solana-create-archiver-storage-account
Create an archiver storage account
USAGE:
@ -669,13 +668,13 @@ OPTIONS:
-k, --keypair <PATH> /path/to/id.json
ARGS:
<STORAGE ACCOUNT OWNER PUBKEY>
<STORAGE ACCOUNT>
<STORAGE ACCOUNT OWNER PUBKEY>
<STORAGE ACCOUNT>
```
#### solana-create-nonce-account
```text
solana-create-nonce-account
solana-create-nonce-account
Create a nonce account
USAGE:
@ -705,7 +704,7 @@ ARGS:
#### solana-create-stake-account
```text
solana-create-stake-account
solana-create-stake-account
Create a stake account
USAGE:
@ -741,7 +740,7 @@ ARGS:
#### solana-create-validator-storage-account
```text
solana-create-validator-storage-account
solana-create-validator-storage-account
Create a validator storage account
USAGE:
@ -763,13 +762,13 @@ OPTIONS:
-k, --keypair <PATH> /path/to/id.json
ARGS:
<STORAGE ACCOUNT OWNER PUBKEY>
<STORAGE ACCOUNT>
<STORAGE ACCOUNT OWNER PUBKEY>
<STORAGE ACCOUNT>
```
#### solana-create-vote-account
```text
solana-create-vote-account
solana-create-vote-account
Create a vote account
USAGE:
@ -802,7 +801,7 @@ ARGS:
#### solana-deactivate-stake
```text
solana-deactivate-stake
solana-deactivate-stake
Deactivate the delegated stake from the stake account
USAGE:
@ -827,9 +826,9 @@ OPTIONS:
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce <PUBKEY>
Provide the nonce account to use when creating a nonced
transaction. Nonced transactions are useful when a transaction
requires a lengthy signing process. Learn more about nonced
Provide the nonce account to use when creating a nonced
transaction. Nonced transactions are useful when a transaction
requires a lengthy signing process. Learn more about nonced
transactions at https://docs.solana.com/offline-signing/durable-nonce
--nonce-authority <KEYPAIR or PUBKEY>
Provide the nonce authority keypair to use when signing a nonced transaction
@ -843,7 +842,7 @@ ARGS:
#### solana-delegate-stake
```text
solana-delegate-stake
solana-delegate-stake
Delegate stake to a vote account
USAGE:
@ -868,9 +867,9 @@ OPTIONS:
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce <PUBKEY>
Provide the nonce account to use when creating a nonced
transaction. Nonced transactions are useful when a transaction
requires a lengthy signing process. Learn more about nonced
Provide the nonce account to use when creating a nonced
transaction. Nonced transactions are useful when a transaction
requires a lengthy signing process. Learn more about nonced
transactions at https://docs.solana.com/offline-signing/durable-nonce
--nonce-authority <KEYPAIR or PUBKEY>
Provide the nonce authority keypair to use when signing a nonced transaction
@ -885,7 +884,7 @@ ARGS:
#### solana-deploy
```text
solana-deploy
solana-deploy
Deploy a program
USAGE:
@ -912,7 +911,7 @@ ARGS:
#### solana-epoch-info
```text
solana-epoch-info
solana-epoch-info
Get information about the current epoch
USAGE:
@ -937,7 +936,7 @@ OPTIONS:
#### solana-fees
```text
solana-fees
solana-fees
Display current cluster fees
USAGE:
@ -961,7 +960,7 @@ OPTIONS:
#### solana-genesis-hash
```text
solana-genesis-hash
solana-genesis-hash
Get the genesis hash
USAGE:
@ -985,7 +984,7 @@ OPTIONS:
#### solana-gossip
```text
solana-gossip
solana-gossip
Show the current gossip network nodes
USAGE:
@ -1009,7 +1008,7 @@ OPTIONS:
#### solana-help
```text
solana-help
solana-help
Prints this message or the help of the given subcommand(s)
USAGE:
@ -1021,7 +1020,7 @@ ARGS:
#### solana-new-nonce
```text
solana-new-nonce
solana-new-nonce
Generate a new nonce, rendering the existing nonce useless
USAGE:
@ -1053,7 +1052,7 @@ ARGS:
#### solana-nonce
```text
solana-nonce
solana-nonce
Get the current nonce value
USAGE:
@ -1080,7 +1079,7 @@ ARGS:
#### solana-nonce-account
```text
solana-nonce-account
solana-nonce-account
Show the contents of a nonce account
USAGE:
@ -1108,14 +1107,14 @@ ARGS:
#### solana-pay
```text
solana-pay
solana-pay
Send a payment
USAGE:
solana pay [FLAGS] [OPTIONS] <TO PUBKEY> <AMOUNT> [--] [UNIT]
FLAGS:
--cancelable
--cancelable
-h, --help Prints help information
--sign-only Sign the transaction offline
--skip-seed-phrase-validation Skip validation of seed phrases. Use this if your phrase does not use the BIP39
@ -1134,9 +1133,9 @@ OPTIONS:
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce <PUBKEY>
Provide the nonce account to use when creating a nonced
transaction. Nonced transactions are useful when a transaction
requires a lengthy signing process. Learn more about nonced
Provide the nonce account to use when creating a nonced
transaction. Nonced transactions are useful when a transaction
requires a lengthy signing process. Learn more about nonced
transactions at https://docs.solana.com/offline-signing/durable-nonce
--nonce-authority <KEYPAIR or PUBKEY>
Provide the nonce authority keypair to use when signing a nonced transaction
@ -1154,7 +1153,7 @@ ARGS:
#### solana-ping
```text
solana-ping
solana-ping
Submit transactions sequentially
USAGE:
@ -1183,7 +1182,7 @@ OPTIONS:
#### solana-send-signature
```text
solana-send-signature
solana-send-signature
Send a signature to authorize a transfer
USAGE:
@ -1211,7 +1210,7 @@ ARGS:
#### solana-send-timestamp
```text
solana-send-timestamp
solana-send-timestamp
Send a timestamp to unlock a transfer
USAGE:
@ -1240,7 +1239,7 @@ ARGS:
#### solana-show-stake-account
```text
solana-show-stake-account
solana-show-stake-account
Show the contents of a stake account
USAGE:
@ -1268,7 +1267,7 @@ ARGS:
#### solana-slot
```text
solana-slot
solana-slot
Get current slot
USAGE:
@ -1293,7 +1292,7 @@ OPTIONS:
#### solana-stake-authorize-staker
```text
solana-stake-authorize-staker
solana-stake-authorize-staker
Authorize a new stake signing keypair for the given stake account
USAGE:
@ -1318,9 +1317,9 @@ OPTIONS:
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce <PUBKEY>
Provide the nonce account to use when creating a nonced
transaction. Nonced transactions are useful when a transaction
requires a lengthy signing process. Learn more about nonced
Provide the nonce account to use when creating a nonced
transaction. Nonced transactions are useful when a transaction
requires a lengthy signing process. Learn more about nonced
transactions at https://docs.solana.com/offline-signing/durable-nonce
--nonce-authority <KEYPAIR or PUBKEY>
Provide the nonce authority keypair to use when signing a nonced transaction
@ -1335,7 +1334,7 @@ ARGS:
#### solana-stake-authorize-withdrawer
```text
solana-stake-authorize-withdrawer
solana-stake-authorize-withdrawer
Authorize a new withdraw signing keypair for the given stake account
USAGE:
@ -1360,9 +1359,9 @@ OPTIONS:
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce <PUBKEY>
Provide the nonce account to use when creating a nonced
transaction. Nonced transactions are useful when a transaction
requires a lengthy signing process. Learn more about nonced
Provide the nonce account to use when creating a nonced
transaction. Nonced transactions are useful when a transaction
requires a lengthy signing process. Learn more about nonced
transactions at https://docs.solana.com/offline-signing/durable-nonce
--nonce-authority <KEYPAIR or PUBKEY>
Provide the nonce authority keypair to use when signing a nonced transaction
@ -1377,7 +1376,7 @@ ARGS:
#### solana-stake-history
```text
solana-stake-history
solana-stake-history
Show the stake history
USAGE:
@ -1402,7 +1401,7 @@ OPTIONS:
#### solana-stakes
```text
solana-stakes
solana-stakes
Show stake account information
USAGE:
@ -1430,7 +1429,7 @@ ARGS:
#### solana-storage-account
```text
solana-storage-account
solana-storage-account
Show the contents of a storage account
USAGE:
@ -1457,7 +1456,7 @@ ARGS:
#### solana-transaction-count
```text
solana-transaction-count
solana-transaction-count
Get current transaction count
USAGE:
@ -1480,38 +1479,9 @@ OPTIONS:
-k, --keypair <PATH> /path/to/id.json
```
#### solana-uptime
```text
solana-uptime
Show the uptime of a validator, based on epoch voting history
USAGE:
solana uptime [FLAGS] [OPTIONS] <VOTE ACCOUNT PUBKEY>
FLAGS:
--aggregate Aggregate uptime data across span
-h, --help Prints help information
--skip-seed-phrase-validation Skip validation of seed phrases. Use this if your phrase does not use the BIP39
official English word list
-V, --version Prints version information
-v, --verbose Show extra information header
OPTIONS:
--ask-seed-phrase <KEYPAIR NAME> Recover a keypair using a seed phrase and optional passphrase [possible
values: keypair]
-C, --config <PATH> Configuration file to use [default:
~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--span <NUM OF EPOCHS> Number of recent epochs to examine
ARGS:
<VOTE ACCOUNT PUBKEY> Vote account pubkey
```
#### solana-validator-info
```text
solana-validator-info
solana-validator-info
Publish/get Validator info on Solana
USAGE:
@ -1540,7 +1510,7 @@ SUBCOMMANDS:
#### solana-validators
```text
solana-validators
solana-validators
Show summary information about the current validators
USAGE:
@ -1565,7 +1535,7 @@ OPTIONS:
#### solana-vote-account
```text
solana-vote-account
solana-vote-account
Show the contents of a vote account
USAGE:
@ -1593,7 +1563,7 @@ ARGS:
#### solana-vote-authorize-voter
```text
solana-vote-authorize-voter
solana-vote-authorize-voter
Authorize a new vote signing keypair for the given vote account
USAGE:
@ -1621,7 +1591,7 @@ ARGS:
#### solana-vote-authorize-withdrawer
```text
solana-vote-authorize-withdrawer
solana-vote-authorize-withdrawer
Authorize a new withdraw signing keypair for the given vote account
USAGE:
@ -1649,7 +1619,7 @@ ARGS:
#### solana-vote-update-validator
```text
solana-vote-update-validator
solana-vote-update-validator
Update the vote account's validator identity
USAGE:
@ -1678,7 +1648,7 @@ ARGS:
#### solana-withdraw-from-nonce-account
```text
solana-withdraw-from-nonce-account
solana-withdraw-from-nonce-account
Withdraw lamports from the nonce account
USAGE:
@ -1713,7 +1683,7 @@ ARGS:
#### solana-withdraw-stake
```text
solana-withdraw-stake
solana-withdraw-stake
Withdraw the unstaked lamports from the stake account
USAGE:

View File

@ -94,12 +94,13 @@ The Stakes and the RewardsPool are accounts that are owned by the same `Stake` p
### StakeInstruction::DelegateStake
The Stake account is moved from Ininitialized to StakeState::Stake form. This is how stakers choose their initial delegate validator node and activate their stake account lamports. The transaction must be signed by the stake's `authorized_staker`. If the stake account is already StakeState::Stake \(i.e. already activated\), the stake is re-delegated. Stakes may be re-delegated at any time, and updated stakes are reflected immediately, but only one re-delegation is permitted per epoch.
The Stake account is moved from Initialized to StakeState::Stake form, or from a deactivated (i.e. fully cooled-down) StakeState::Stake to activated StakeState::Stake. This is how stakers choose the vote account and validator node to which their stake account lamports are delegated. The transaction must be signed by the stake's `authorized_staker`.
* `account[0]` - RW - The StakeState::Stake instance. `StakeState::Stake::credits_observed` is initialized to `VoteState::credits`, `StakeState::Stake::voter_pubkey` is initialized to `account[1]`. If this is the initial delegation of stake, `StakeState::Stake::stake` is initialized to the account's balance in lamports, `StakeState::Stake::activated` is initialized to the current Bank epoch, and `StakeState::Stake::deactivated` is initialized to std::u64::MAX
* `account[1]` - R - The VoteState instance.
* `account[2]` - R - sysvar::clock account, carries information about current Bank epoch
* `account[3]` - R - stake::Config accoount, carries warmup, cooldown, and slashing configuration
* `account[3]` - R - sysvar::stakehistory account, carries information about stake history
* `account[4]` - R - stake::Config accoount, carries warmup, cooldown, and slashing configuration
### StakeInstruction::Authorize\(Pubkey, StakeAuthorize\)

View File

@ -2,7 +2,7 @@
Follow this guide to setup Solana's key generation tool called `solana-keygen`
{% hint style="warn" %}
After installation, ensure your version is `0.21.1` or higher by running `solana-keygen -V`
After installation, ensure your version is `0.23.1` or higher by running `solana-keygen -V`
{% endhint %}
## Download

View File

@ -1,14 +1,14 @@
# Installing the Validator Software
Install the Solana release
[v0.21.0](https://github.com/solana-labs/solana/releases/tag/v0.21.0) on your
[v0.23.1](https://github.com/solana-labs/solana/releases/tag/v0.23.1) on your
machine by running:
```bash
curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.21.0/install/solana-install-init.sh | sh -s - 0.21.0
curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.23.1/install/solana-install-init.sh | sh -s - 0.23.1
```
If you are connecting to a different testnet, you can replace `0.21.0` with the
If you are connecting to a different testnet, you can replace `0.23.1` with the
release tag matching the software version of your desired testnet, or replace it
with the named channel `stable`, `beta`, or `edge`.
@ -16,11 +16,11 @@ The following output indicates a successful update:
```text
looking for latest release
downloading v0.21.0 installer
downloading v0.23.1 installer
Configuration: /home/solana/.config/solana/install/config.yml
Active release directory: /home/solana/.local/share/solana/install/active_release
* Release version: 0.21.0
* Release URL: https://github.com/solana-labs/solana/releases/download/v0.21.0/solana-release-x86_64-unknown-linux-gnu.tar.bz2
* Release version: 0.23.1
* Release URL: https://github.com/solana-labs/solana/releases/download/v0.23.1/solana-release-x86_64-unknown-linux-gnu.tar.bz2
Update successful
```

View File

@ -83,7 +83,6 @@ To monitor your validator during its warmup period:
* View your vote account:`solana vote-account ~/validator-vote-keypair.json` This displays the current state of all the votes the validator has submitted to the network.
* View your stake account, the delegation preference and details of your stake:`solana stake-account ~/validator-stake-keypair.json`
* `solana uptime ~/validator-vote-keypair.json` will display the voting history \(aka, uptime\) of your validator over recent Epochs
* `solana validators` displays the current active stake of all validators, including yours
* `solana stake-history ` shows the history of stake warming up and cooling down over recent epochs
* Look for log messages on your validator indicating your next leader slot: `[2019-09-27T20:16:00.319721164Z INFO solana_core::replay_stage] <VALIDATOR_IDENTITY_PUBKEY> voted and reset PoH at tick height ####. My next leader slot is ####`

View File

@ -1,6 +1,6 @@
[package]
name = "solana-chacha-cuda"
version = "0.23.0"
version = "0.23.3"
description = "Solana Chacha Cuda APIs"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -10,12 +10,12 @@ edition = "2018"
[dependencies]
log = "0.4.8"
solana-archiver-utils = { path = "../archiver-utils", version = "0.23.0" }
solana-chacha = { path = "../chacha", version = "0.23.0" }
solana-ledger = { path = "../ledger", version = "0.23.0" }
solana-logger = { path = "../logger", version = "0.23.0" }
solana-perf = { path = "../perf", version = "0.23.0" }
solana-sdk = { path = "../sdk", version = "0.23.0" }
solana-archiver-utils = { path = "../archiver-utils", version = "0.23.3" }
solana-chacha = { path = "../chacha", version = "0.23.3" }
solana-ledger = { path = "../ledger", version = "0.23.3" }
solana-logger = { path = "../logger", version = "0.23.3" }
solana-perf = { path = "../perf", version = "0.23.3" }
solana-sdk = { path = "../sdk", version = "0.23.3" }
[dev-dependencies]
hex-literal = "0.2.1"

View File

@ -1,6 +1,6 @@
[package]
name = "solana-chacha-sys"
version = "0.23.0"
version = "0.23.3"
description = "Solana chacha-sys"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@ -1,6 +1,6 @@
[package]
name = "solana-chacha"
version = "0.23.0"
version = "0.23.3"
description = "Solana Chacha APIs"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,11 +12,11 @@ edition = "2018"
log = "0.4.8"
rand = "0.6.5"
rand_chacha = "0.1.1"
solana-chacha-sys = { path = "../chacha-sys", version = "0.23.0" }
solana-ledger = { path = "../ledger", version = "0.23.0" }
solana-logger = { path = "../logger", version = "0.23.0" }
solana-perf = { path = "../perf", version = "0.23.0" }
solana-sdk = { path = "../sdk", version = "0.23.0" }
solana-chacha-sys = { path = "../chacha-sys", version = "0.23.3" }
solana-ledger = { path = "../ledger", version = "0.23.3" }
solana-logger = { path = "../logger", version = "0.23.3" }
solana-perf = { path = "../perf", version = "0.23.3" }
solana-sdk = { path = "../sdk", version = "0.23.3" }
[dev-dependencies]
hex-literal = "0.2.1"

View File

@ -1,4 +1,4 @@
FROM solanalabs/rust:1.40.0
FROM solanalabs/rust:1.41.0
ARG date
RUN set -x \

View File

@ -1,6 +1,6 @@
# Note: when the rust version is changed also modify
# ci/rust-version.sh to pick up the new image tag
FROM rust:1.40.0
FROM rust:1.41.0
# Add Google Protocol Buffers for Libra's metrics library.
ENV PROTOC_VERSION 3.8.0
@ -17,6 +17,7 @@ RUN set -x \
clang-7 \
cmake \
lcov \
libudev-dev \
libclang-common-7-dev \
mscgen \
net-tools \

View File

@ -16,13 +16,13 @@
if [[ -n $RUST_STABLE_VERSION ]]; then
stable_version="$RUST_STABLE_VERSION"
else
stable_version=1.40.0
stable_version=1.41.0
fi
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
nightly_version="$RUST_NIGHTLY_VERSION"
else
nightly_version=2019-12-19
nightly_version=2020-02-06
fi

View File

@ -378,7 +378,7 @@ deploy() {
(
set -x
ci/testnet-deploy.sh -p testnet-solana-com -C gce -z us-west1-b \
-t "$CHANNEL_OR_TAG" -n 1 -c 0 -u -P \
-t "$CHANNEL_OR_TAG" -n 0 -c 0 -u -P \
-a testnet-solana-com --letsencrypt testnet.solana.com \
--limit-ledger-size \
${skipCreate:+-e} \
@ -389,7 +389,7 @@ deploy() {
(
echo "--- net.sh update"
set -x
time net/net.sh update -t "$CHANNEL_OR_TAG" --platform linux --platform osx --platform windows
time net/net.sh update -t "$CHANNEL_OR_TAG" --platform linux --platform osx #--platform windows
)
;;
testnet-perf)
@ -455,6 +455,10 @@ deploy() {
TDS_CLIENT_COUNT="1"
fi
if [[ -n $TDS_SLOTS_PER_EPOCH ]]; then
maybeSlotsPerEpoch=(--slots-per-epoch "$TDS_SLOTS_PER_EPOCH")
fi
if [[ -z $ENABLE_GPU ]]; then
maybeGpu=(-G "--machine-type n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100")
elif [[ $ENABLE_GPU == skip ]]; then
@ -540,7 +544,7 @@ deploy() {
${maybeInternalNodesLamports} \
${maybeExternalAccountsFile} \
--target-lamports-per-signature 0 \
--slots-per-epoch 4096 \
"${maybeSlotsPerEpoch[@]}" \
${maybeAdditionalDisk}
)
;;

View File

@ -1,6 +1,6 @@
[package]
name = "solana-clap-utils"
version = "0.23.0"
version = "0.23.3"
description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,7 +12,8 @@ edition = "2018"
clap = "2.33.0"
rpassword = "4.0"
semver = "0.9.0"
solana-sdk = { path = "../sdk", version = "0.23.0" }
solana-remote-wallet = { path = "../remote-wallet", version = "0.23.3" }
solana-sdk = { path = "../sdk", version = "0.23.3" }
tiny-bip39 = "0.7.0"
url = "2.1.0"
chrono = "0.4"

View File

@ -1,6 +1,7 @@
use crate::keypair::{keypair_from_seed_phrase, ASK_KEYWORD, SKIP_SEED_PHRASE_VALIDATION_ARG};
use chrono::DateTime;
use clap::ArgMatches;
use solana_remote_wallet::remote_wallet::DerivationPath;
use solana_sdk::{
clock::UnixTimestamp,
native_token::sol_to_lamports,
@ -100,6 +101,16 @@ pub fn amount_of(matches: &ArgMatches<'_>, name: &str, unit: &str) -> Option<u64
}
}
pub fn derivation_of(matches: &ArgMatches<'_>, name: &str) -> Option<DerivationPath> {
matches.value_of(name).map(|derivation_str| {
let derivation_str = derivation_str.replace("'", "");
let mut parts = derivation_str.split('/');
let account = parts.next().unwrap().parse::<u16>().unwrap();
let change = parts.next().map(|change| change.parse::<u16>().unwrap());
DerivationPath { account, change }
})
}
#[cfg(test)]
mod tests {
use super::*;
@ -277,4 +288,40 @@ mod tests {
.get_matches_from(vec!["test", "--single", "1.5", "--unit", "lamports"]);
assert_eq!(amount_of(&matches, "single", "unit"), None);
}
#[test]
fn test_derivation_of() {
let matches = app()
.clone()
.get_matches_from(vec!["test", "--single", "2/3"]);
assert_eq!(
derivation_of(&matches, "single"),
Some(DerivationPath {
account: 2,
change: Some(3)
})
);
assert_eq!(derivation_of(&matches, "another"), None);
let matches = app()
.clone()
.get_matches_from(vec!["test", "--single", "2"]);
assert_eq!(
derivation_of(&matches, "single"),
Some(DerivationPath {
account: 2,
change: None
})
);
assert_eq!(derivation_of(&matches, "another"), None);
let matches = app()
.clone()
.get_matches_from(vec!["test", "--single", "2'/3'"]);
assert_eq!(
derivation_of(&matches, "single"),
Some(DerivationPath {
account: 2,
change: Some(3)
})
);
}
}

View File

@ -1,8 +1,10 @@
use crate::keypair::ASK_KEYWORD;
use chrono::DateTime;
use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{read_keypair_file, Signature};
use solana_sdk::{
hash::Hash,
pubkey::Pubkey,
signature::{read_keypair_file, Signature},
};
use std::str::FromStr;
// Return an error if a pubkey cannot be parsed.
@ -141,3 +143,47 @@ pub fn is_rfc3339_datetime(value: String) -> Result<(), String> {
.map(|_| ())
.map_err(|e| format!("{:?}", e))
}
pub fn is_derivation(value: String) -> Result<(), String> {
let value = value.replace("'", "");
let mut parts = value.split('/');
let account = parts.next().unwrap();
account
.parse::<u16>()
.map_err(|e| {
format!(
"Unable to parse derivation, provided: {}, err: {:?}",
account, e
)
})
.and_then(|_| {
if let Some(change) = parts.next() {
change.parse::<u16>().map_err(|e| {
format!(
"Unable to parse derivation, provided: {}, err: {:?}",
change, e
)
})
} else {
Ok(0)
}
})
.map(|_| ())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_is_derivation() {
assert_eq!(is_derivation("2".to_string()), Ok(()));
assert_eq!(is_derivation("0".to_string()), Ok(()));
assert_eq!(is_derivation("0/2".to_string()), Ok(()));
assert_eq!(is_derivation("0'/2'".to_string()), Ok(()));
assert!(is_derivation("a".to_string()).is_err());
assert!(is_derivation("65537".to_string()).is_err());
assert!(is_derivation("a/b".to_string()).is_err());
assert!(is_derivation("0/65537".to_string()).is_err());
}
}

View File

@ -32,8 +32,8 @@ pub const SKIP_SEED_PHRASE_VALIDATION_ARG: ArgConstant<'static> = ArgConstant {
#[derive(Debug, PartialEq)]
pub enum Source {
File,
Generated,
Path,
SeedPhrase,
}
@ -131,7 +131,12 @@ pub fn keypair_input(
keypair_from_seed_phrase(keypair_name, skip_validation, true)
.map(|keypair| KeypairWithSource::new(keypair, Source::SeedPhrase))
} else if let Some(keypair_file) = matches.value_of(keypair_match_name) {
read_keypair_file(keypair_file).map(|keypair| KeypairWithSource::new(keypair, Source::File))
if keypair_file.starts_with("usb://") {
Ok(KeypairWithSource::new(Keypair::new(), Source::Path))
} else {
read_keypair_file(keypair_file)
.map(|keypair| KeypairWithSource::new(keypair, Source::Path))
}
} else {
Ok(KeypairWithSource::new(Keypair::new(), Source::Generated))
}

16
cli-config/Cargo.toml Normal file
View File

@ -0,0 +1,16 @@
[package]
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-cli-config"
description = "Blockchain, Rebuilt for Scale"
version = "0.23.3"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
dirs = "2.0.2"
lazy_static = "1.4.0"
serde = "1.0.104"
serde_derive = "1.0.103"
serde_yaml = "0.8.11"

View File

@ -1,8 +1,10 @@
// Wallet settings that can be configured for long-term use
use serde_derive::{Deserialize, Serialize};
use std::fs::{create_dir_all, File};
use std::io::{self, Write};
use std::path::Path;
use std::{
fs::{create_dir_all, File},
io::{self, Write},
path::Path,
};
lazy_static! {
pub static ref CONFIG_FILE: Option<String> = {

4
cli-config/src/lib.rs Normal file
View File

@ -0,0 +1,4 @@
#[macro_use]
extern crate lazy_static;
pub mod config;

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-cli"
description = "Blockchain, Rebuilt for Scale"
version = "0.23.0"
version = "0.23.3"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -17,7 +17,6 @@ criterion-stats = "0.3.0"
ctrlc = { version = "3.1.3", features = ["termination"] }
console = "0.9.1"
dirs = "2.0.2"
lazy_static = "1.4.0"
log = "0.4.8"
indicatif = "0.13.0"
humantime = "2.0.0"
@ -27,25 +26,27 @@ reqwest = { version = "0.10.1", default-features = false, features = ["blocking"
serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.44"
serde_yaml = "0.8.11"
solana-budget-program = { path = "../programs/budget", version = "0.23.0" }
solana-clap-utils = { path = "../clap-utils", version = "0.23.0" }
solana-client = { path = "../client", version = "0.23.0" }
solana-config-program = { path = "../programs/config", version = "0.23.0" }
solana-faucet = { path = "../faucet", version = "0.23.0" }
solana-logger = { path = "../logger", version = "0.23.0" }
solana-net-utils = { path = "../net-utils", version = "0.23.0" }
solana-runtime = { path = "../runtime", version = "0.23.0" }
solana-sdk = { path = "../sdk", version = "0.23.0" }
solana-stake-program = { path = "../programs/stake", version = "0.23.0" }
solana-storage-program = { path = "../programs/storage", version = "0.23.0" }
solana-vote-program = { path = "../programs/vote", version = "0.23.0" }
solana-vote-signer = { path = "../vote-signer", version = "0.23.0" }
solana-budget-program = { path = "../programs/budget", version = "0.23.3" }
solana-clap-utils = { path = "../clap-utils", version = "0.23.3" }
solana-cli-config = { path = "../cli-config", version = "0.23.3" }
solana-client = { path = "../client", version = "0.23.3" }
solana-config-program = { path = "../programs/config", version = "0.23.3" }
solana-faucet = { path = "../faucet", version = "0.23.3" }
solana-logger = { path = "../logger", version = "0.23.3" }
solana-net-utils = { path = "../net-utils", version = "0.23.3" }
solana-remote-wallet = { path = "../remote-wallet", version = "0.23.3" }
solana-runtime = { path = "../runtime", version = "0.23.3" }
solana-sdk = { path = "../sdk", version = "0.23.3" }
solana-stake-program = { path = "../programs/stake", version = "0.23.3" }
solana-storage-program = { path = "../programs/storage", version = "0.23.3" }
solana-vote-program = { path = "../programs/vote", version = "0.23.3" }
solana-vote-signer = { path = "../vote-signer", version = "0.23.3" }
titlecase = "1.1.0"
url = "2.1.1"
[dev-dependencies]
solana-core = { path = "../core", version = "0.23.0" }
solana-budget-program = { path = "../programs/budget", version = "0.23.0" }
solana-core = { path = "../core", version = "0.23.3" }
solana-budget-program = { path = "../programs/budget", version = "0.23.3" }
tempfile = "3.1.0"
[[bin]]

File diff suppressed because it is too large Load Diff

View File

@ -14,7 +14,7 @@ use solana_sdk::{
account_utils::StateMut,
clock::{self, Slot},
commitment_config::CommitmentConfig,
epoch_schedule::{Epoch, EpochSchedule},
epoch_schedule::Epoch,
hash::Hash,
pubkey::Pubkey,
signature::{Keypair, KeypairUtil},
@ -67,6 +67,7 @@ impl ClusterQuerySubCommands for App<'_, '_> {
.help("Slot number of the block to query")
)
)
.subcommand(SubCommand::with_name("leader-schedule").about("Display leader schedule"))
.subcommand(
SubCommand::with_name("epoch-info")
.about("Get information about the current epoch")
@ -320,20 +321,6 @@ fn new_spinner_progress_bar() -> ProgressBar {
progress_bar
}
/// Aggregate epoch credit stats and return (total credits, total slots, total epochs)
pub fn aggregate_epoch_credits(
epoch_credits: &[(Epoch, u64, u64)],
epoch_schedule: &EpochSchedule,
) -> (u64, u64, u64) {
epoch_credits
.iter()
.fold((0, 0, 0), |acc, (epoch, credits, prev_credits)| {
let credits_earned = credits - prev_credits;
let slots_in_epoch = epoch_schedule.get_slots_in_epoch(*epoch);
(acc.0 + credits_earned, acc.1 + slots_in_epoch, acc.2 + 1)
})
}
pub fn process_catchup(rpc_client: &RpcClient, node_pubkey: &Pubkey) -> ProcessResult {
let cluster_nodes = rpc_client.get_cluster_nodes()?;
@ -406,6 +393,41 @@ pub fn process_fees(rpc_client: &RpcClient) -> ProcessResult {
))
}
pub fn process_leader_schedule(rpc_client: &RpcClient) -> ProcessResult {
let epoch_info = rpc_client.get_epoch_info()?;
let first_slot_in_epoch = epoch_info.absolute_slot - epoch_info.slot_index;
let leader_schedule = rpc_client.get_leader_schedule(Some(first_slot_in_epoch))?;
if leader_schedule.is_none() {
return Err(format!(
"Unable to fetch leader schedule for slot {}",
first_slot_in_epoch
)
.into());
}
let leader_schedule = leader_schedule.unwrap();
let mut leader_per_slot_index = Vec::new();
for (pubkey, leader_slots) in leader_schedule.iter() {
for slot_index in leader_slots.iter() {
if *slot_index >= leader_per_slot_index.len() {
leader_per_slot_index.resize(*slot_index + 1, "?");
}
leader_per_slot_index[*slot_index] = pubkey;
}
}
for (slot_index, leader) in leader_per_slot_index.iter().enumerate() {
println!(
" {:<15} {:<44}",
first_slot_in_epoch + slot_index as u64,
leader
);
}
Ok("".to_string())
}
pub fn process_get_block_time(rpc_client: &RpcClient, slot: Slot) -> ProcessResult {
let timestamp = rpc_client.get_block_time(slot)?;
Ok(timestamp.to_string())
@ -429,11 +451,11 @@ pub fn process_get_epoch_info(
let start_slot = epoch_info.absolute_slot - epoch_info.slot_index;
let end_slot = start_slot + epoch_info.slots_in_epoch;
println_name_value(
"Epoch slot range:",
"Epoch Slot Range:",
&format!("[{}..{})", start_slot, end_slot),
);
println_name_value(
"Epoch completed percent:",
"Epoch Completed Percent:",
&format!(
"{:>3.3}%",
epoch_info.slot_index as f64 / epoch_info.slots_in_epoch as f64 * 100_f64
@ -441,14 +463,14 @@ pub fn process_get_epoch_info(
);
let remaining_slots_in_epoch = epoch_info.slots_in_epoch - epoch_info.slot_index;
println_name_value(
"Epoch completed slots:",
"Epoch Completed Slots:",
&format!(
"{}/{} ({} remaining)",
epoch_info.slot_index, epoch_info.slots_in_epoch, remaining_slots_in_epoch
),
);
println_name_value(
"Epoch completed time:",
"Epoch Completed Time:",
&format!(
"{}/{} ({} remaining)",
slot_to_human_time(epoch_info.slot_index),
@ -673,8 +695,8 @@ pub fn process_ping(
) -> ProcessResult {
let to = Keypair::new().pubkey();
println_name_value("Source account:", &config.keypair.pubkey().to_string());
println_name_value("Destination account:", &to.to_string());
println_name_value("Source Account:", &config.keypair.pubkey().to_string());
println_name_value("Destination Account:", &to.to_string());
println!();
let (signal_sender, signal_receiver) = std::sync::mpsc::channel();
@ -864,7 +886,7 @@ pub fn process_show_stakes(
}
pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool) -> ProcessResult {
let epoch_schedule = rpc_client.get_epoch_schedule()?;
let epoch_info = rpc_client.get_epoch_info()?;
let vote_accounts = rpc_client.get_vote_accounts()?;
let total_active_stake = vote_accounts
.current
@ -913,7 +935,7 @@ pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool)
"Commission",
"Last Vote",
"Root Block",
"Uptime",
"Credits",
"Active Stake",
))
.bold()
@ -921,7 +943,7 @@ pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool)
fn print_vote_account(
vote_account: RpcVoteAccountInfo,
epoch_schedule: &EpochSchedule,
current_epoch: Epoch,
total_active_stake: f64,
use_lamports_unit: bool,
delinquent: bool,
@ -934,17 +956,6 @@ pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool)
}
}
fn uptime(epoch_credits: Vec<(Epoch, u64, u64)>, epoch_schedule: &EpochSchedule) -> String {
let (total_credits, total_slots, _) =
aggregate_epoch_credits(&epoch_credits, &epoch_schedule);
if total_slots > 0 {
let total_uptime = 100_f64 * total_credits as f64 / total_slots as f64;
format!("{:.2}%", total_uptime)
} else {
"-".into()
}
}
println!(
"{} {:<44} {:<44} {:>9}% {:>8} {:>10} {:>7} {}",
if delinquent {
@ -957,7 +968,15 @@ pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool)
vote_account.commission,
non_zero_or_dash(vote_account.last_vote),
non_zero_or_dash(vote_account.root_slot),
uptime(vote_account.epoch_credits, epoch_schedule),
vote_account
.epoch_credits
.iter()
.find_map(|(epoch, credits, _)| if *epoch == current_epoch {
Some(*credits)
} else {
None
})
.unwrap_or(0),
if vote_account.activated_stake > 0 {
format!(
"{} ({:.2}%)",
@ -973,7 +992,7 @@ pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool)
for vote_account in vote_accounts.current.into_iter() {
print_vote_account(
vote_account,
&epoch_schedule,
epoch_info.epoch,
total_active_stake,
use_lamports_unit,
false,
@ -982,7 +1001,7 @@ pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool)
for vote_account in vote_accounts.delinquent.into_iter() {
print_vote_account(
vote_account,
&epoch_schedule,
epoch_info.epoch,
total_active_stake,
use_lamports_unit,
true,

View File

@ -1,11 +1,8 @@
#[macro_use]
extern crate lazy_static;
pub mod cli;
pub mod cluster_query;
pub mod config;
pub mod display;
pub mod nonce;
pub mod offline;
pub mod stake;
pub mod storage;
pub mod validator_info;

View File

@ -2,7 +2,8 @@ use clap::{crate_description, crate_name, AppSettings, Arg, ArgGroup, ArgMatches
use console::style;
use solana_clap_utils::{
input_validators::is_url,
input_parsers::derivation_of,
input_validators::{is_derivation, is_url},
keypair::{
self, keypair_input, KeypairWithSource, ASK_SEED_PHRASE_ARG,
SKIP_SEED_PHRASE_VALIDATION_ARG,
@ -10,9 +11,9 @@ use solana_clap_utils::{
};
use solana_cli::{
cli::{app, parse_command, process_command, CliCommandInfo, CliConfig, CliError},
config::{self, Config},
display::{println_name_value, println_name_value_or},
};
use solana_cli_config::config::{Config, CONFIG_FILE};
use solana_sdk::signature::read_keypair_file;
use std::error;
@ -24,21 +25,25 @@ fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error
if let Some(config_file) = matches.value_of("config_file") {
let config = Config::load(config_file).unwrap_or_default();
if let Some(field) = subcommand_matches.value_of("specific_setting") {
let (value, default_value) = match field {
"url" => (config.url, CliConfig::default_json_rpc_url()),
"keypair" => (config.keypair_path, CliConfig::default_keypair_path()),
let (field_name, value, default_value) = match field {
"url" => ("RPC URL", config.url, CliConfig::default_json_rpc_url()),
"keypair" => (
"Key Path",
config.keypair_path,
CliConfig::default_keypair_path(),
),
_ => unreachable!(),
};
println_name_value_or(&format!("* {}:", field), &value, &default_value);
println_name_value_or(&format!("{}:", field_name), &value, &default_value);
} else {
println_name_value("Wallet Config:", config_file);
println_name_value("Config File:", config_file);
println_name_value_or(
"* url:",
"RPC URL:",
&config.url,
&CliConfig::default_json_rpc_url(),
);
println_name_value_or(
"* keypair:",
"Keypair Path:",
&config.keypair_path,
&CliConfig::default_keypair_path(),
);
@ -61,9 +66,9 @@ fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error
config.keypair_path = keypair.to_string();
}
config.save(config_file)?;
println_name_value("Wallet Config Updated:", config_file);
println_name_value("* url:", &config.url);
println_name_value("* keypair:", &config.keypair_path);
println_name_value("Config File:", config_file);
println_name_value("RPC URL:", &config.url);
println_name_value("Keypair Path:", &config.keypair_path);
} else {
println!(
"{} Either provide the `--config` arg or ensure home directory exists to use the default config location",
@ -102,7 +107,7 @@ pub fn parse_args(matches: &ArgMatches<'_>) -> Result<CliConfig, Box<dyn error::
let (keypair, keypair_path) = if require_keypair {
let KeypairWithSource { keypair, source } = keypair_input(&matches, "keypair")?;
match source {
keypair::Source::File => (
keypair::Source::Path => (
keypair,
Some(matches.value_of("keypair").unwrap().to_string()),
),
@ -122,12 +127,16 @@ pub fn parse_args(matches: &ArgMatches<'_>) -> Result<CliConfig, Box<dyn error::
default_keypair_path
};
let keypair = read_keypair_file(&keypair_path).or_else(|err| {
Err(CliError::BadParameter(format!(
"{}: Unable to open keypair file: {}",
err, keypair_path
)))
})?;
let keypair = if keypair_path.starts_with("usb://") {
keypair
} else {
read_keypair_file(&keypair_path).or_else(|err| {
Err(CliError::BadParameter(format!(
"{}: Unable to open keypair file: {}",
err, keypair_path
)))
})?
};
(keypair, Some(keypair_path))
}
@ -142,6 +151,7 @@ pub fn parse_args(matches: &ArgMatches<'_>) -> Result<CliConfig, Box<dyn error::
json_rpc_url,
keypair,
keypair_path,
derivation_path: derivation_of(matches, "derivation_path"),
rpc_client: None,
verbose: matches.is_present("verbose"),
})
@ -162,7 +172,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
.takes_value(true)
.global(true)
.help("Configuration file to use");
if let Some(ref config_file) = *config::CONFIG_FILE {
if let Some(ref config_file) = *CONFIG_FILE {
arg.default_value(&config_file)
} else {
arg
@ -185,7 +195,15 @@ fn main() -> Result<(), Box<dyn error::Error>> {
.value_name("PATH")
.global(true)
.takes_value(true)
.help("/path/to/id.json"),
.help("/path/to/id.json or usb://remote/wallet/path"),
)
.arg(
Arg::with_name("derivation_path")
.long("derivation-path")
.value_name("ACCOUNT or ACCOUNT/CHANGE")
.takes_value(true)
.validator(is_derivation)
.help("Derivation path to use: m/44'/501'/ACCOUNT'/CHANGE'; default key is device base pubkey: m/44'/501'/0'")
)
.arg(
Arg::with_name("verbose")

View File

@ -3,6 +3,7 @@ use crate::cli::{
log_instruction_custom_error, required_lamports_from, CliCommand, CliCommandInfo, CliConfig,
CliError, ProcessResult, SigningAuthority,
};
use crate::offline::BLOCKHASH_ARG;
use clap::{App, Arg, ArgMatches, SubCommand};
use solana_clap_utils::{input_parsers::*, input_validators::*, ArgConstant};
use solana_client::rpc_client::RpcClient;
@ -55,7 +56,7 @@ pub fn nonce_arg<'a, 'b>() -> Arg<'a, 'b> {
.long(NONCE_ARG.long)
.takes_value(true)
.value_name("PUBKEY")
.requires("blockhash")
.requires(BLOCKHASH_ARG.name)
.validator(is_pubkey)
.help(NONCE_ARG.help)
}
@ -233,15 +234,8 @@ impl NonceSubCommands for App<'_, '_> {
pub fn parse_authorize_nonce_account(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let nonce_account = pubkey_of(matches, "nonce_account_keypair").unwrap();
let new_authority = pubkey_of(matches, "new_authority").unwrap();
let nonce_authority = if matches.is_present(NONCE_AUTHORITY_ARG.name) {
Some(SigningAuthority::new_from_matches(
&matches,
NONCE_AUTHORITY_ARG.name,
None,
)?)
} else {
None
};
let nonce_authority =
SigningAuthority::new_from_matches(&matches, NONCE_AUTHORITY_ARG.name, None)?;
Ok(CliCommandInfo {
command: CliCommand::AuthorizeNonceAccount {
@ -281,15 +275,8 @@ pub fn parse_get_nonce(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliEr
pub fn parse_new_nonce(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let nonce_account = pubkey_of(matches, "nonce_account_keypair").unwrap();
let nonce_authority = if matches.is_present(NONCE_AUTHORITY_ARG.name) {
Some(SigningAuthority::new_from_matches(
&matches,
NONCE_AUTHORITY_ARG.name,
None,
)?)
} else {
None
};
let nonce_authority =
SigningAuthority::new_from_matches(&matches, NONCE_AUTHORITY_ARG.name, None)?;
Ok(CliCommandInfo {
command: CliCommand::NewNonce {
@ -319,15 +306,8 @@ pub fn parse_withdraw_from_nonce_account(
let nonce_account = pubkey_of(matches, "nonce_account_keypair").unwrap();
let destination_account_pubkey = pubkey_of(matches, "destination_account_pubkey").unwrap();
let lamports = required_lamports_from(matches, "amount", "unit")?;
let nonce_authority = if matches.is_present(NONCE_AUTHORITY_ARG.name) {
Some(SigningAuthority::new_from_matches(
&matches,
NONCE_AUTHORITY_ARG.name,
None,
)?)
} else {
None
};
let nonce_authority =
SigningAuthority::new_from_matches(&matches, NONCE_AUTHORITY_ARG.name, None)?;
Ok(CliCommandInfo {
command: CliCommand::WithdrawFromNonceAccount {
@ -560,11 +540,11 @@ pub fn process_show_nonce_account(
}
let print_account = |data: Option<(Meta, Hash)>| {
println!(
"balance: {}",
"Balance: {}",
build_balance_message(nonce_account.lamports, use_lamports_unit, true)
);
println!(
"minimum balance required: {}",
"Minimum Balance Required: {}",
build_balance_message(
rpc_client.get_minimum_balance_for_rent_exemption(NonceState::size())?,
use_lamports_unit,
@ -573,12 +553,12 @@ pub fn process_show_nonce_account(
);
match data {
Some((meta, hash)) => {
println!("nonce: {}", hash);
println!("authority: {}", meta.nonce_authority);
println!("Nonce: {}", hash);
println!("Authority: {}", meta.nonce_authority);
}
None => {
println!("nonce: uninitialized");
println!("authority: uninitialized");
println!("Nonce: uninitialized");
println!("Authority: uninitialized");
}
}
Ok("".to_string())

271
cli/src/offline.rs Normal file
View File

@ -0,0 +1,271 @@
use clap::{App, Arg, ArgMatches};
use serde_json::Value;
use solana_clap_utils::{
input_parsers::value_of,
input_validators::{is_hash, is_pubkey_sig},
ArgConstant,
};
use solana_client::rpc_client::RpcClient;
use solana_sdk::{fee_calculator::FeeCalculator, hash::Hash, pubkey::Pubkey, signature::Signature};
use std::str::FromStr;
pub const BLOCKHASH_ARG: ArgConstant<'static> = ArgConstant {
name: "blockhash",
long: "blockhash",
help: "Use the supplied blockhash",
};
pub const SIGN_ONLY_ARG: ArgConstant<'static> = ArgConstant {
name: "sign_only",
long: "sign-only",
help: "Sign the transaction offline",
};
pub const SIGNER_ARG: ArgConstant<'static> = ArgConstant {
name: "signer",
long: "signer",
help: "Provid a public-key/signature pair for the transaction",
};
#[derive(Clone, Debug, PartialEq)]
pub enum BlockhashQuery {
None(Hash, FeeCalculator),
FeeCalculator(Hash),
All,
}
impl BlockhashQuery {
pub fn new(blockhash: Option<Hash>, sign_only: bool) -> Self {
match blockhash {
Some(hash) if sign_only => Self::None(hash, FeeCalculator::default()),
Some(hash) if !sign_only => Self::FeeCalculator(hash),
None if !sign_only => Self::All,
_ => panic!("Cannot resolve blockhash"),
}
}
pub fn new_from_matches(matches: &ArgMatches<'_>) -> Self {
let blockhash = value_of(matches, BLOCKHASH_ARG.name);
let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
BlockhashQuery::new(blockhash, sign_only)
}
pub fn get_blockhash_fee_calculator(
&self,
rpc_client: &RpcClient,
) -> Result<(Hash, FeeCalculator), Box<dyn std::error::Error>> {
let (hash, fee_calc) = match self {
BlockhashQuery::None(hash, fee_calc) => (Some(hash), Some(fee_calc)),
BlockhashQuery::FeeCalculator(hash) => (Some(hash), None),
BlockhashQuery::All => (None, None),
};
if None == fee_calc {
let (cluster_hash, fee_calc) = rpc_client.get_recent_blockhash()?;
Ok((*hash.unwrap_or(&cluster_hash), fee_calc))
} else {
Ok((*hash.unwrap(), fee_calc.unwrap().clone()))
}
}
}
impl Default for BlockhashQuery {
fn default() -> Self {
BlockhashQuery::All
}
}
fn blockhash_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name(BLOCKHASH_ARG.name)
.long(BLOCKHASH_ARG.long)
.takes_value(true)
.value_name("BLOCKHASH")
.validator(is_hash)
.help(BLOCKHASH_ARG.help)
}
fn sign_only_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name(SIGN_ONLY_ARG.name)
.long(SIGN_ONLY_ARG.long)
.takes_value(false)
.requires(BLOCKHASH_ARG.name)
.help(SIGN_ONLY_ARG.help)
}
fn signer_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name(SIGNER_ARG.name)
.long(SIGNER_ARG.long)
.takes_value(true)
.value_name("BASE58_PUBKEY=BASE58_SIG")
.validator(is_pubkey_sig)
.requires(BLOCKHASH_ARG.name)
.multiple(true)
.help(SIGNER_ARG.help)
}
pub trait OfflineArgs {
fn offline_args(self) -> Self;
}
impl OfflineArgs for App<'_, '_> {
fn offline_args(self) -> Self {
self.arg(blockhash_arg())
.arg(sign_only_arg())
.arg(signer_arg())
}
}
pub fn parse_sign_only_reply_string(reply: &str) -> (Hash, Vec<(Pubkey, Signature)>) {
let object: Value = serde_json::from_str(&reply).unwrap();
let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap();
let blockhash = blockhash_str.parse::<Hash>().unwrap();
let signer_strings = object.get("signers").unwrap().as_array().unwrap();
let signers = signer_strings
.iter()
.map(|signer_string| {
let mut signer = signer_string.as_str().unwrap().split('=');
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
(key, sig)
})
.collect();
(blockhash, signers)
}
#[cfg(test)]
mod tests {
use super::*;
use clap::App;
use serde_json::{self, json, Value};
use solana_client::{
rpc_request::RpcRequest,
rpc_response::{Response, RpcResponseContext},
};
use solana_sdk::{fee_calculator::FeeCalculator, hash::hash};
use std::collections::HashMap;
#[test]
fn test_blockhashspec_new_ok() {
let blockhash = hash(&[1u8]);
assert_eq!(
BlockhashQuery::new(Some(blockhash), true),
BlockhashQuery::None(blockhash, FeeCalculator::default()),
);
assert_eq!(
BlockhashQuery::new(Some(blockhash), false),
BlockhashQuery::FeeCalculator(blockhash),
);
assert_eq!(BlockhashQuery::new(None, false), BlockhashQuery::All,);
}
#[test]
#[should_panic]
fn test_blockhashspec_new_fail() {
BlockhashQuery::new(None, true);
}
#[test]
fn test_blockhashspec_new_from_matches_ok() {
let test_commands = App::new("blockhashspec_test").offline_args();
let blockhash = hash(&[1u8]);
let blockhash_string = blockhash.to_string();
let matches = test_commands.clone().get_matches_from(vec![
"blockhashspec_test",
"--blockhash",
&blockhash_string,
"--sign-only",
]);
assert_eq!(
BlockhashQuery::new_from_matches(&matches),
BlockhashQuery::None(blockhash, FeeCalculator::default()),
);
let matches = test_commands.clone().get_matches_from(vec![
"blockhashspec_test",
"--blockhash",
&blockhash_string,
]);
assert_eq!(
BlockhashQuery::new_from_matches(&matches),
BlockhashQuery::FeeCalculator(blockhash),
);
let matches = test_commands
.clone()
.get_matches_from(vec!["blockhashspec_test"]);
assert_eq!(
BlockhashQuery::new_from_matches(&matches),
BlockhashQuery::All,
);
}
#[test]
#[should_panic]
fn test_blockhashspec_new_from_matches_fail() {
let test_commands = App::new("blockhashspec_test")
.arg(blockhash_arg())
// We can really only hit this case unless the arg requirements
// are broken, so unset the requires() to recreate that condition
.arg(sign_only_arg().requires(""));
let matches = test_commands
.clone()
.get_matches_from(vec!["blockhashspec_test", "--sign-only"]);
BlockhashQuery::new_from_matches(&matches);
}
#[test]
fn test_blockhashspec_get_blockhash_fee_calc() {
let test_blockhash = hash(&[0u8]);
let rpc_blockhash = hash(&[1u8]);
let rpc_fee_calc = FeeCalculator::new(42, 42);
let get_recent_blockhash_response = json!(Response {
context: RpcResponseContext { slot: 1 },
value: json!((
Value::String(rpc_blockhash.to_string()),
serde_json::to_value(rpc_fee_calc.clone()).unwrap()
)),
});
let mut mocks = HashMap::new();
mocks.insert(
RpcRequest::GetRecentBlockhash,
get_recent_blockhash_response.clone(),
);
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
assert_eq!(
BlockhashQuery::All
.get_blockhash_fee_calculator(&rpc_client)
.unwrap(),
(rpc_blockhash, rpc_fee_calc.clone()),
);
let mut mocks = HashMap::new();
mocks.insert(
RpcRequest::GetRecentBlockhash,
get_recent_blockhash_response.clone(),
);
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
assert_eq!(
BlockhashQuery::FeeCalculator(test_blockhash)
.get_blockhash_fee_calculator(&rpc_client)
.unwrap(),
(test_blockhash, rpc_fee_calc.clone()),
);
let mut mocks = HashMap::new();
mocks.insert(
RpcRequest::GetRecentBlockhash,
get_recent_blockhash_response.clone(),
);
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
assert_eq!(
BlockhashQuery::None(test_blockhash, FeeCalculator::default())
.get_blockhash_fee_calculator(&rpc_client)
.unwrap(),
(test_blockhash, FeeCalculator::default()),
);
let rpc_client = RpcClient::new_mock("fails".to_string());
assert!(BlockhashQuery::All
.get_blockhash_fee_calculator(&rpc_client)
.is_err());
}
}

File diff suppressed because it is too large Load Diff

View File

@ -251,7 +251,7 @@ pub fn process_show_storage_account(
CliError::RpcRequestError(format!("Unable to deserialize storage account: {:?}", err))
})?;
println!("{:#?}", storage_contract);
println!("account lamports: {}", account.lamports);
println!("Account Lamports: {}", account.lamports);
Ok("".to_string())
}

View File

@ -22,8 +22,8 @@ use solana_sdk::{
signature::{Keypair, KeypairUtil},
transaction::Transaction,
};
use std::error;
use titlecase::titlecase;
pub const MAX_SHORT_FIELD_LENGTH: usize = 70;
pub const MAX_LONG_FIELD_LENGTH: usize = 300;
@ -390,9 +390,12 @@ pub fn process_get_validator_info(rpc_client: &RpcClient, pubkey: Option<Pubkey>
parse_validator_info(&validator_info_pubkey, &validator_info_account)?;
println!();
println_name_value("Validator Identity Pubkey:", &validator_pubkey.to_string());
println_name_value(" info pubkey:", &validator_info_pubkey.to_string());
println_name_value(" Info Pubkey:", &validator_info_pubkey.to_string());
for (key, value) in validator_info.iter() {
println_name_value(&format!(" {}:", key), &value.as_str().unwrap_or("?"));
println_name_value(
&format!(" {}:", titlecase(key)),
&value.as_str().unwrap_or("?"),
);
}
}

View File

@ -1,10 +1,6 @@
use crate::{
cli::{
build_balance_message, check_account_for_fee, check_unique_pubkeys,
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError,
ProcessResult,
},
cluster_query::aggregate_epoch_credits,
use crate::cli::{
build_balance_message, check_account_for_fee, check_unique_pubkeys,
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult,
};
use clap::{value_t_or_exit, App, Arg, ArgMatches, SubCommand};
use solana_clap_utils::{input_parsers::*, input_validators::*};
@ -176,31 +172,6 @@ impl VoteSubCommands for App<'_, '_> {
.help("Display balance in lamports instead of SOL"),
),
)
.subcommand(
SubCommand::with_name("uptime")
.about("Show the uptime of a validator, based on epoch voting history")
.arg(
Arg::with_name("vote_account_pubkey")
.index(1)
.value_name("VOTE ACCOUNT PUBKEY")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair)
.help("Vote account pubkey"),
)
.arg(
Arg::with_name("span")
.long("span")
.value_name("NUM OF EPOCHS")
.takes_value(true)
.help("Number of recent epochs to examine"),
)
.arg(
Arg::with_name("aggregate")
.long("aggregate")
.help("Aggregate uptime data across span"),
),
)
}
}
@ -271,24 +242,6 @@ pub fn parse_vote_get_account_command(
})
}
pub fn parse_vote_uptime_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap();
let aggregate = matches.is_present("aggregate");
let span = if matches.is_present("span") {
Some(value_t_or_exit!(matches, "span", u64))
} else {
None
};
Ok(CliCommandInfo {
command: CliCommand::Uptime {
pubkey: vote_account_pubkey,
aggregate,
span,
},
require_keypair: false,
})
}
pub fn process_create_vote_account(
rpc_client: &RpcClient,
config: &CliConfig,
@ -476,25 +429,25 @@ pub fn process_show_vote_account(
let epoch_schedule = rpc_client.get_epoch_schedule()?;
println!(
"account balance: {}",
"Account Balance: {}",
build_balance_message(vote_account.lamports, use_lamports_unit, true)
);
println!("validator identity: {}", vote_state.node_pubkey);
println!("authorized voter: {}", vote_state.authorized_voter);
println!("Validator Identity: {}", vote_state.node_pubkey);
println!("Authorized Voter: {}", vote_state.authorized_voter);
println!(
"authorized withdrawer: {}",
"Authorized Withdrawer: {}",
vote_state.authorized_withdrawer
);
println!("credits: {}", vote_state.credits());
println!("commission: {}%", vote_state.commission);
println!("Credits: {}", vote_state.credits());
println!("Commission: {}%", vote_state.commission);
println!(
"root slot: {}",
"Root Slot: {}",
match vote_state.root_slot {
Some(slot) => slot.to_string(),
None => "~".to_string(),
}
);
println!("recent timestamp: {:?}", vote_state.last_timestamp);
println!("Recent Timestamp: {:?}", vote_state.last_timestamp);
if !vote_state.votes.is_empty() {
println!("recent votes:");
for vote in &vote_state.votes {
@ -504,7 +457,7 @@ pub fn process_show_vote_account(
);
}
println!("epoch voting history:");
println!("Epoch Voting History:");
for (epoch, credits, prev_credits) in vote_state.epoch_credits() {
let credits_earned = credits - prev_credits;
let slots_in_epoch = epoch_schedule.get_slots_in_epoch(*epoch);
@ -517,60 +470,6 @@ pub fn process_show_vote_account(
Ok("".to_string())
}
pub fn process_uptime(
rpc_client: &RpcClient,
_config: &CliConfig,
vote_account_pubkey: &Pubkey,
aggregate: bool,
span: Option<u64>,
) -> ProcessResult {
let (_vote_account, vote_state) = get_vote_account(rpc_client, vote_account_pubkey)?;
let epoch_schedule = rpc_client.get_epoch_schedule()?;
println!("validator identity: {}", vote_state.node_pubkey);
println!("authorized voter: {}", vote_state.authorized_voter);
if !vote_state.votes.is_empty() {
println!("uptime:");
let epoch_credits: Vec<(u64, u64, u64)> = if let Some(x) = span {
vote_state
.epoch_credits()
.iter()
.rev()
.take(x as usize)
.cloned()
.collect()
} else {
vote_state.epoch_credits().iter().rev().cloned().collect()
};
if aggregate {
let (total_credits, total_slots, epochs) =
aggregate_epoch_credits(&epoch_credits, &epoch_schedule);
if total_slots > 0 {
let total_uptime = 100_f64 * total_credits as f64 / total_slots as f64;
println!("{:.2}% over {} epochs", total_uptime, epochs);
} else {
println!("Insufficient voting history available");
}
} else {
for (epoch, credits, prev_credits) in epoch_credits {
let credits_earned = credits - prev_credits;
let slots_in_epoch = epoch_schedule.get_slots_in_epoch(epoch);
let uptime = credits_earned as f64 / slots_in_epoch as f64;
println!("- epoch: {} {:.2}% uptime", epoch, uptime * 100_f64,);
}
}
if let Some(x) = span {
if x > vote_state.epoch_credits().len() as u64 {
println!("(span longer than available epochs)");
}
}
}
Ok("".to_string())
}
#[cfg(test)]
mod tests {
use super::*;
@ -741,27 +640,5 @@ mod tests {
require_keypair: true
}
);
// Test Uptime Subcommand
let pubkey = Pubkey::new_rand();
let matches = test_commands.clone().get_matches_from(vec![
"test",
"uptime",
&pubkey.to_string(),
"--span",
"4",
"--aggregate",
]);
assert_eq!(
parse_command(&matches).unwrap(),
CliCommandInfo {
command: CliCommand::Uptime {
pubkey,
aggregate: true,
span: Some(4)
},
require_keypair: false
}
);
}
}

View File

@ -1,19 +1,19 @@
use chrono::prelude::*;
use serde_json::Value;
use solana_cli::cli::{
process_command, request_and_confirm_airdrop, CliCommand, CliConfig, PayCommand,
use solana_cli::{
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig, PayCommand},
offline::{parse_sign_only_reply_string, BlockhashQuery},
};
use solana_client::rpc_client::RpcClient;
use solana_faucet::faucet::run_local_faucet;
use solana_sdk::{
account_utils::StateMut,
hash::Hash,
fee_calculator::FeeCalculator,
nonce_state::NonceState,
pubkey::Pubkey,
signature::{read_keypair_file, write_keypair, Keypair, KeypairUtil, Signature},
signature::{read_keypair_file, write_keypair, Keypair, KeypairUtil},
};
use std::fs::remove_dir_all;
use std::str::FromStr;
use std::sync::mpsc::channel;
#[cfg(test)]
@ -289,9 +289,11 @@ fn test_offline_pay_tx() {
check_balance(50, &rpc_client, &config_offline.keypair.pubkey());
check_balance(50, &rpc_client, &config_online.keypair.pubkey());
let (blockhash, _) = rpc_client.get_recent_blockhash().unwrap();
config_offline.command = CliCommand::Pay(PayCommand {
lamports: 10,
to: bob_pubkey,
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
sign_only: true,
..PayCommand::default()
});
@ -301,24 +303,12 @@ fn test_offline_pay_tx() {
check_balance(50, &rpc_client, &config_online.keypair.pubkey());
check_balance(0, &rpc_client, &bob_pubkey);
let object: Value = serde_json::from_str(&sig_response).unwrap();
let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap();
let signer_strings = object.get("signers").unwrap().as_array().unwrap();
let signers: Vec<_> = signer_strings
.iter()
.map(|signer_string| {
let mut signer = signer_string.as_str().unwrap().split('=');
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
(key, sig)
})
.collect();
let (blockhash, signers) = parse_sign_only_reply_string(&sig_response);
config_online.command = CliCommand::Pay(PayCommand {
lamports: 10,
to: bob_pubkey,
signers: Some(signers),
blockhash: Some(blockhash_str.parse::<Hash>().unwrap()),
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
..PayCommand::default()
});
process_command(&config_online).unwrap();
@ -389,7 +379,7 @@ fn test_nonced_pay_tx() {
config.command = CliCommand::Pay(PayCommand {
lamports: 10,
to: bob_pubkey,
blockhash: Some(nonce_hash),
blockhash_query: BlockhashQuery::FeeCalculator(nonce_hash),
nonce_account: Some(nonce_account.pubkey()),
..PayCommand::default()
});

View File

@ -1,22 +1,25 @@
use serde_json::Value;
use solana_cli::cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig};
use solana_cli::{
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
offline::{parse_sign_only_reply_string, BlockhashQuery},
};
use solana_client::rpc_client::RpcClient;
use solana_faucet::faucet::run_local_faucet;
use solana_sdk::{
account_utils::StateMut,
hash::Hash,
fee_calculator::FeeCalculator,
nonce_state::NonceState,
pubkey::Pubkey,
signature::{read_keypair_file, write_keypair, Keypair, KeypairUtil, Signature},
signature::{keypair_from_seed, read_keypair_file, write_keypair, Keypair, KeypairUtil},
system_instruction::create_address_with_seed,
};
use solana_stake_program::stake_state::{Lockup, StakeAuthorize, StakeState};
use std::fs::remove_dir_all;
use std::str::FromStr;
use std::sync::mpsc::channel;
#[cfg(test)]
use solana_core::validator::new_validator_for_tests;
use solana_core::validator::{
new_validator_for_tests, new_validator_for_tests_ex, new_validator_for_tests_with_vote_pubkey,
};
use std::thread::sleep;
use std::time::Duration;
@ -40,28 +43,89 @@ fn check_balance(expected_balance: u64, client: &RpcClient, pubkey: &Pubkey) {
});
}
fn parse_sign_only_reply_string(reply: &str) -> (Hash, Vec<(Pubkey, Signature)>) {
let object: Value = serde_json::from_str(&reply).unwrap();
let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap();
let blockhash = blockhash_str.parse::<Hash>().unwrap();
let signer_strings = object.get("signers").unwrap().as_array().unwrap();
let signers = signer_strings
.iter()
.map(|signer_string| {
let mut signer = signer_string.as_str().unwrap().split('=');
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
(key, sig)
})
.collect();
(blockhash, signers)
#[test]
fn test_stake_delegation_force() {
let (server, leader_data, alice, ledger_path) = new_validator_for_tests();
let (sender, receiver) = channel();
run_local_faucet(alice, sender, None);
let faucet_addr = receiver.recv().unwrap();
let rpc_client = RpcClient::new_socket(leader_data.rpc);
let mut config = CliConfig::default();
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &config.keypair.pubkey(), 100_000)
.unwrap();
// Create vote account
let vote_keypair = Keypair::new();
let (vote_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&vote_keypair, tmp_file.as_file_mut()).unwrap();
config.command = CliCommand::CreateVoteAccount {
vote_account: read_keypair_file(&vote_keypair_file).unwrap().into(),
seed: None,
node_pubkey: config.keypair.pubkey(),
authorized_voter: None,
authorized_withdrawer: None,
commission: 0,
};
process_command(&config).unwrap();
// Create stake account
let stake_keypair = Keypair::new();
let (stake_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&stake_keypair, tmp_file.as_file_mut()).unwrap();
config.command = CliCommand::CreateStakeAccount {
stake_account: read_keypair_file(&stake_keypair_file).unwrap().into(),
seed: None,
staker: None,
withdrawer: None,
lockup: Lockup::default(),
lamports: 50_000,
};
process_command(&config).unwrap();
// Delegate stake fails (vote account had never voted)
config.command = CliCommand::DelegateStake {
stake_account_pubkey: stake_keypair.pubkey(),
vote_account_pubkey: vote_keypair.pubkey(),
stake_authority: None,
force: false,
sign_only: false,
signers: None,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: None,
fee_payer: None,
};
process_command(&config).unwrap_err();
// But if we force it, it works anyway!
config.command = CliCommand::DelegateStake {
stake_account_pubkey: stake_keypair.pubkey(),
vote_account_pubkey: vote_keypair.pubkey(),
stake_authority: None,
force: true,
sign_only: false,
signers: None,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: None,
fee_payer: None,
};
process_command(&config).unwrap();
server.close().unwrap();
remove_dir_all(ledger_path).unwrap();
}
#[test]
fn test_seed_stake_delegation_and_deactivation() {
solana_logger::setup();
let (server, leader_data, alice, ledger_path) = new_validator_for_tests();
let (server, leader_data, alice, ledger_path, vote_pubkey) =
new_validator_for_tests_with_vote_pubkey();
let (sender, receiver) = channel();
run_local_faucet(alice, sender, None);
let faucet_addr = receiver.recv().unwrap();
@ -75,12 +139,6 @@ fn test_seed_stake_delegation_and_deactivation() {
let (validator_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&config_validator.keypair, tmp_file.as_file_mut()).unwrap();
let mut config_vote = CliConfig::default();
config_vote.json_rpc_url =
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
let (vote_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&config_vote.keypair, tmp_file.as_file_mut()).unwrap();
let mut config_stake = CliConfig::default();
config_stake.json_rpc_url =
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
@ -94,17 +152,6 @@ fn test_seed_stake_delegation_and_deactivation() {
.unwrap();
check_balance(100_000, &rpc_client, &config_validator.keypair.pubkey());
// Create vote account
config_validator.command = CliCommand::CreateVoteAccount {
vote_account: read_keypair_file(&vote_keypair_file).unwrap().into(),
seed: None,
node_pubkey: config_validator.keypair.pubkey(),
authorized_voter: None,
authorized_withdrawer: None,
commission: 0,
};
process_command(&config_validator).unwrap();
let stake_address = create_address_with_seed(
&config_validator.keypair.pubkey(),
"hi there",
@ -127,14 +174,15 @@ fn test_seed_stake_delegation_and_deactivation() {
// Delegate stake
config_validator.command = CliCommand::DelegateStake {
stake_account_pubkey: stake_address,
vote_account_pubkey: config_vote.keypair.pubkey(),
vote_account_pubkey: vote_pubkey,
stake_authority: None,
force: true,
force: false,
sign_only: false,
signers: None,
blockhash: None,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: None,
fee_payer: None,
};
process_command(&config_validator).unwrap();
@ -144,9 +192,10 @@ fn test_seed_stake_delegation_and_deactivation() {
stake_authority: None,
sign_only: false,
signers: None,
blockhash: None,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: None,
fee_payer: None,
};
process_command(&config_validator).unwrap();
@ -158,7 +207,8 @@ fn test_seed_stake_delegation_and_deactivation() {
fn test_stake_delegation_and_deactivation() {
solana_logger::setup();
let (server, leader_data, alice, ledger_path) = new_validator_for_tests();
let (server, leader_data, alice, ledger_path, vote_pubkey) =
new_validator_for_tests_with_vote_pubkey();
let (sender, receiver) = channel();
run_local_faucet(alice, sender, None);
let faucet_addr = receiver.recv().unwrap();
@ -169,12 +219,6 @@ fn test_stake_delegation_and_deactivation() {
config_validator.json_rpc_url =
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
let mut config_vote = CliConfig::default();
config_vote.json_rpc_url =
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
let (vote_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&config_vote.keypair, tmp_file.as_file_mut()).unwrap();
let mut config_stake = CliConfig::default();
config_stake.json_rpc_url =
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
@ -190,17 +234,6 @@ fn test_stake_delegation_and_deactivation() {
.unwrap();
check_balance(100_000, &rpc_client, &config_validator.keypair.pubkey());
// Create vote account
config_validator.command = CliCommand::CreateVoteAccount {
vote_account: read_keypair_file(&vote_keypair_file).unwrap().into(),
seed: None,
node_pubkey: config_validator.keypair.pubkey(),
authorized_voter: None,
authorized_withdrawer: None,
commission: 0,
};
process_command(&config_validator).unwrap();
// Create stake account
config_validator.command = CliCommand::CreateStakeAccount {
stake_account: read_keypair_file(&stake_keypair_file).unwrap().into(),
@ -215,14 +248,15 @@ fn test_stake_delegation_and_deactivation() {
// Delegate stake
config_validator.command = CliCommand::DelegateStake {
stake_account_pubkey: config_stake.keypair.pubkey(),
vote_account_pubkey: config_vote.keypair.pubkey(),
vote_account_pubkey: vote_pubkey,
stake_authority: None,
force: true,
force: false,
sign_only: false,
signers: None,
blockhash: None,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: None,
fee_payer: None,
};
process_command(&config_validator).unwrap();
@ -232,9 +266,10 @@ fn test_stake_delegation_and_deactivation() {
stake_authority: None,
sign_only: false,
signers: None,
blockhash: None,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: None,
fee_payer: None,
};
process_command(&config_validator).unwrap();
@ -246,7 +281,8 @@ fn test_stake_delegation_and_deactivation() {
fn test_offline_stake_delegation_and_deactivation() {
solana_logger::setup();
let (server, leader_data, alice, ledger_path) = new_validator_for_tests();
let (server, leader_data, alice, ledger_path, vote_pubkey) =
new_validator_for_tests_with_vote_pubkey();
let (sender, receiver) = channel();
run_local_faucet(alice, sender, None);
let faucet_addr = receiver.recv().unwrap();
@ -261,18 +297,18 @@ fn test_offline_stake_delegation_and_deactivation() {
config_payer.json_rpc_url =
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
let mut config_vote = CliConfig::default();
config_vote.json_rpc_url =
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
let (vote_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&config_vote.keypair, tmp_file.as_file_mut()).unwrap();
let mut config_stake = CliConfig::default();
config_stake.json_rpc_url =
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
let (stake_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&config_stake.keypair, tmp_file.as_file_mut()).unwrap();
let mut config_offline = CliConfig::default();
config_offline.json_rpc_url = String::default();
config_offline.command = CliCommand::ClusterVersion;
// Verfiy that we cannot reach the cluster
process_command(&config_offline).unwrap_err();
request_and_confirm_airdrop(
&rpc_client,
&faucet_addr,
@ -282,22 +318,20 @@ fn test_offline_stake_delegation_and_deactivation() {
.unwrap();
check_balance(100_000, &rpc_client, &config_validator.keypair.pubkey());
// Create vote account
config_validator.command = CliCommand::CreateVoteAccount {
vote_account: read_keypair_file(&vote_keypair_file).unwrap().into(),
seed: None,
node_pubkey: config_validator.keypair.pubkey(),
authorized_voter: None,
authorized_withdrawer: None,
commission: 0,
};
process_command(&config_validator).unwrap();
request_and_confirm_airdrop(
&rpc_client,
&faucet_addr,
&config_offline.keypair.pubkey(),
100_000,
)
.unwrap();
check_balance(100_000, &rpc_client, &config_offline.keypair.pubkey());
// Create stake account
config_validator.command = CliCommand::CreateStakeAccount {
stake_account: read_keypair_file(&stake_keypair_file).unwrap().into(),
seed: None,
staker: None,
staker: Some(config_offline.keypair.pubkey().into()),
withdrawer: None,
lockup: Lockup::default(),
lamports: 50_000,
@ -305,56 +339,58 @@ fn test_offline_stake_delegation_and_deactivation() {
process_command(&config_validator).unwrap();
// Delegate stake offline
config_validator.command = CliCommand::DelegateStake {
let (blockhash, _) = rpc_client.get_recent_blockhash().unwrap();
config_offline.command = CliCommand::DelegateStake {
stake_account_pubkey: config_stake.keypair.pubkey(),
vote_account_pubkey: config_vote.keypair.pubkey(),
vote_account_pubkey: vote_pubkey,
stake_authority: None,
force: true,
force: false,
sign_only: true,
signers: None,
blockhash: None,
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
nonce_account: None,
nonce_authority: None,
fee_payer: None,
};
let sig_response = process_command(&config_validator).unwrap();
let sig_response = process_command(&config_offline).unwrap();
let (blockhash, signers) = parse_sign_only_reply_string(&sig_response);
// Delegate stake online
config_payer.command = CliCommand::DelegateStake {
stake_account_pubkey: config_stake.keypair.pubkey(),
vote_account_pubkey: config_vote.keypair.pubkey(),
stake_authority: None,
force: true,
vote_account_pubkey: vote_pubkey,
stake_authority: Some(config_offline.keypair.pubkey().into()),
force: false,
sign_only: false,
signers: Some(signers),
blockhash: Some(blockhash),
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
nonce_account: None,
nonce_authority: None,
fee_payer: Some(config_offline.keypair.pubkey().into()),
};
process_command(&config_payer).unwrap();
// Deactivate stake offline
config_validator.command = CliCommand::DeactivateStake {
let (blockhash, _) = rpc_client.get_recent_blockhash().unwrap();
config_offline.command = CliCommand::DeactivateStake {
stake_account_pubkey: config_stake.keypair.pubkey(),
stake_authority: None,
sign_only: true,
signers: None,
blockhash: None,
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
nonce_account: None,
nonce_authority: None,
fee_payer: None,
};
let sig_response = process_command(&config_validator).unwrap();
let sig_response = process_command(&config_offline).unwrap();
let (blockhash, signers) = parse_sign_only_reply_string(&sig_response);
// Deactivate stake online
config_payer.command = CliCommand::DeactivateStake {
stake_account_pubkey: config_stake.keypair.pubkey(),
stake_authority: None,
stake_authority: Some(config_offline.keypair.pubkey().into()),
sign_only: false,
signers: Some(signers),
blockhash: Some(blockhash),
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
nonce_account: None,
nonce_authority: None,
fee_payer: Some(config_offline.keypair.pubkey().into()),
};
process_command(&config_payer).unwrap();
@ -366,7 +402,8 @@ fn test_offline_stake_delegation_and_deactivation() {
fn test_nonced_stake_delegation_and_deactivation() {
solana_logger::setup();
let (server, leader_data, alice, ledger_path) = new_validator_for_tests();
let (server, leader_data, alice, ledger_path, vote_pubkey) =
new_validator_for_tests_with_vote_pubkey();
let (sender, receiver) = channel();
run_local_faucet(alice, sender, None);
let faucet_addr = receiver.recv().unwrap();
@ -383,20 +420,6 @@ fn test_nonced_stake_delegation_and_deactivation() {
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &config.keypair.pubkey(), 100_000)
.unwrap();
// Create vote account
let vote_keypair = Keypair::new();
let (vote_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&vote_keypair, tmp_file.as_file_mut()).unwrap();
config.command = CliCommand::CreateVoteAccount {
vote_account: read_keypair_file(&vote_keypair_file).unwrap().into(),
seed: None,
node_pubkey: config.keypair.pubkey(),
authorized_voter: None,
authorized_withdrawer: None,
commission: 0,
};
process_command(&config).unwrap();
// Create stake account
let stake_keypair = Keypair::new();
let (stake_keypair_file, mut tmp_file) = make_tmp_file();
@ -434,14 +457,15 @@ fn test_nonced_stake_delegation_and_deactivation() {
// Delegate stake
config.command = CliCommand::DelegateStake {
stake_account_pubkey: stake_keypair.pubkey(),
vote_account_pubkey: vote_keypair.pubkey(),
vote_account_pubkey: vote_pubkey,
stake_authority: None,
force: true,
force: false,
sign_only: false,
signers: None,
blockhash: Some(nonce_hash),
blockhash_query: BlockhashQuery::None(nonce_hash, FeeCalculator::default()),
nonce_account: Some(nonce_account.pubkey()),
nonce_authority: None,
fee_payer: None,
};
process_command(&config).unwrap();
@ -460,9 +484,10 @@ fn test_nonced_stake_delegation_and_deactivation() {
stake_authority: None,
sign_only: false,
signers: None,
blockhash: Some(nonce_hash),
blockhash_query: BlockhashQuery::FeeCalculator(nonce_hash),
nonce_account: Some(nonce_account.pubkey()),
nonce_authority: Some(config_keypair.into()),
fee_payer: None,
};
process_command(&config).unwrap();
@ -487,6 +512,20 @@ fn test_stake_authorize() {
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &config.keypair.pubkey(), 100_000)
.unwrap();
let mut config_offline = CliConfig::default();
config_offline.json_rpc_url = String::default();
config_offline.command = CliCommand::ClusterVersion;
// Verfiy that we cannot reach the cluster
process_command(&config_offline).unwrap_err();
request_and_confirm_airdrop(
&rpc_client,
&faucet_addr,
&config_offline.keypair.pubkey(),
100_000,
)
.unwrap();
// Create stake account, identity is authority
let stake_keypair = Keypair::new();
let stake_account_pubkey = stake_keypair.pubkey();
@ -514,9 +553,10 @@ fn test_stake_authorize() {
authority: None,
sign_only: false,
signers: None,
blockhash: None,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: None,
fee_payer: None,
};
process_command(&config).unwrap();
let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap();
@ -528,10 +568,9 @@ fn test_stake_authorize() {
assert_eq!(current_authority, online_authority_pubkey);
// Assign new offline stake authority
let offline_authority = Keypair::new();
let offline_authority_pubkey = offline_authority.pubkey();
let offline_authority_pubkey = config_offline.keypair.pubkey();
let (offline_authority_file, mut tmp_file) = make_tmp_file();
write_keypair(&offline_authority, tmp_file.as_file_mut()).unwrap();
write_keypair(&config_offline.keypair, tmp_file.as_file_mut()).unwrap();
config.command = CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: offline_authority_pubkey,
@ -539,9 +578,10 @@ fn test_stake_authorize() {
authority: Some(read_keypair_file(&online_authority_file).unwrap().into()),
sign_only: false,
signers: None,
blockhash: None,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: None,
fee_payer: None,
};
process_command(&config).unwrap();
let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap();
@ -557,18 +597,20 @@ fn test_stake_authorize() {
let nonced_authority_pubkey = nonced_authority.pubkey();
let (nonced_authority_file, mut tmp_file) = make_tmp_file();
write_keypair(&nonced_authority, tmp_file.as_file_mut()).unwrap();
config.command = CliCommand::StakeAuthorize {
let (blockhash, _) = rpc_client.get_recent_blockhash().unwrap();
config_offline.command = CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: nonced_authority_pubkey,
stake_authorize: StakeAuthorize::Staker,
authority: Some(read_keypair_file(&offline_authority_file).unwrap().into()),
sign_only: true,
signers: None,
blockhash: None,
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
nonce_account: None,
nonce_authority: None,
fee_payer: None,
};
let sign_reply = process_command(&config).unwrap();
let sign_reply = process_command(&config_offline).unwrap();
let (blockhash, signers) = parse_sign_only_reply_string(&sign_reply);
config.command = CliCommand::StakeAuthorize {
stake_account_pubkey,
@ -577,9 +619,10 @@ fn test_stake_authorize() {
authority: Some(offline_authority_pubkey.into()),
sign_only: false,
signers: Some(signers),
blockhash: Some(blockhash),
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
nonce_account: None,
nonce_authority: None,
fee_payer: Some(offline_authority_pubkey.into()),
};
process_command(&config).unwrap();
let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap();
@ -600,7 +643,7 @@ fn test_stake_authorize() {
config.command = CliCommand::CreateNonceAccount {
nonce_account: read_keypair_file(&nonce_keypair_file).unwrap().into(),
seed: None,
nonce_authority: Some(config.keypair.pubkey()),
nonce_authority: Some(config_offline.keypair.pubkey()),
lamports: minimum_nonce_balance,
};
process_command(&config).unwrap();
@ -618,18 +661,19 @@ fn test_stake_authorize() {
let online_authority_pubkey = online_authority.pubkey();
let (_online_authority_file, mut tmp_file) = make_tmp_file();
write_keypair(&online_authority, tmp_file.as_file_mut()).unwrap();
config.command = CliCommand::StakeAuthorize {
config_offline.command = CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: online_authority_pubkey,
stake_authorize: StakeAuthorize::Staker,
authority: Some(read_keypair_file(&nonced_authority_file).unwrap().into()),
sign_only: true,
signers: None,
blockhash: Some(nonce_hash),
blockhash_query: BlockhashQuery::None(nonce_hash, FeeCalculator::default()),
nonce_account: Some(nonce_account.pubkey()),
nonce_authority: None,
fee_payer: None,
};
let sign_reply = process_command(&config).unwrap();
let sign_reply = process_command(&config_offline).unwrap();
let (blockhash, signers) = parse_sign_only_reply_string(&sign_reply);
assert_eq!(blockhash, nonce_hash);
config.command = CliCommand::StakeAuthorize {
@ -639,9 +683,10 @@ fn test_stake_authorize() {
authority: Some(nonced_authority_pubkey.into()),
sign_only: false,
signers: Some(signers),
blockhash: Some(blockhash),
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
nonce_account: Some(nonce_account.pubkey()),
nonce_authority: None,
nonce_authority: Some(offline_authority_pubkey.into()),
fee_payer: Some(offline_authority_pubkey.into()),
};
process_command(&config).unwrap();
let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap();
@ -658,6 +703,272 @@ fn test_stake_authorize() {
_ => panic!("Nonce is not initialized"),
};
assert_ne!(nonce_hash, new_nonce_hash);
server.close().unwrap();
remove_dir_all(ledger_path).unwrap();
}
#[test]
fn test_stake_authorize_with_fee_payer() {
solana_logger::setup();
const SIG_FEE: u64 = 42;
let (server, leader_data, alice, ledger_path, _voter) =
new_validator_for_tests_ex(SIG_FEE, 42_000);
let (sender, receiver) = channel();
run_local_faucet(alice, sender, None);
let faucet_addr = receiver.recv().unwrap();
let rpc_client = RpcClient::new_socket(leader_data.rpc);
let mut config = CliConfig::default();
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
let mut config_payer = CliConfig::default();
config_payer.json_rpc_url =
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
let payer_pubkey = config_payer.keypair.pubkey();
let (payer_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&config_payer.keypair, tmp_file.as_file_mut()).unwrap();
let mut config_offline = CliConfig::default();
let offline_pubkey = config_offline.keypair.pubkey();
let (_offline_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&config_offline.keypair, tmp_file.as_file_mut()).unwrap();
// Verify we're offline
config_offline.command = CliCommand::ClusterVersion;
process_command(&config_offline).unwrap_err();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &config.keypair.pubkey(), 100_000)
.unwrap();
check_balance(100_000, &rpc_client, &config.keypair.pubkey());
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &payer_pubkey, 100_000).unwrap();
check_balance(100_000, &rpc_client, &payer_pubkey);
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 100_000).unwrap();
check_balance(100_000, &rpc_client, &offline_pubkey);
// Create stake account, identity is authority
let stake_keypair = Keypair::new();
let stake_account_pubkey = stake_keypair.pubkey();
let (stake_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&stake_keypair, tmp_file.as_file_mut()).unwrap();
config.command = CliCommand::CreateStakeAccount {
stake_account: read_keypair_file(&stake_keypair_file).unwrap().into(),
seed: None,
staker: None,
withdrawer: None,
lockup: Lockup::default(),
lamports: 50_000,
};
process_command(&config).unwrap();
// `config` balance should be 50,000 - 1 stake account sig - 1 fee sig
check_balance(
50_000 - SIG_FEE - SIG_FEE,
&rpc_client,
&config.keypair.pubkey(),
);
// Assign authority with separate fee payer
config.command = CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: offline_pubkey,
stake_authorize: StakeAuthorize::Staker,
authority: None,
sign_only: false,
signers: None,
blockhash_query: BlockhashQuery::All,
nonce_account: None,
nonce_authority: None,
fee_payer: Some(read_keypair_file(&payer_keypair_file).unwrap().into()),
};
process_command(&config).unwrap();
// `config` balance has not changed, despite submitting the TX
check_balance(
50_000 - SIG_FEE - SIG_FEE,
&rpc_client,
&config.keypair.pubkey(),
);
// `config_payer` however has paid `config`'s authority sig
// and `config_payer`'s fee sig
check_balance(
100_000 - SIG_FEE - SIG_FEE,
&rpc_client,
&config_payer.keypair.pubkey(),
);
// Assign authority with offline fee payer
let (blockhash, _) = rpc_client.get_recent_blockhash().unwrap();
config_offline.command = CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: payer_pubkey,
stake_authorize: StakeAuthorize::Staker,
authority: None,
sign_only: true,
signers: None,
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
nonce_account: None,
nonce_authority: None,
fee_payer: None,
};
let sign_reply = process_command(&config_offline).unwrap();
let (blockhash, signers) = parse_sign_only_reply_string(&sign_reply);
config.command = CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: payer_pubkey,
stake_authorize: StakeAuthorize::Staker,
authority: Some(offline_pubkey.into()),
sign_only: false,
signers: Some(signers),
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
nonce_account: None,
nonce_authority: None,
fee_payer: Some(offline_pubkey.into()),
};
process_command(&config).unwrap();
// `config`'s balance again has not changed
check_balance(
50_000 - SIG_FEE - SIG_FEE,
&rpc_client,
&config.keypair.pubkey(),
);
// `config_offline` however has paid 1 sig due to being both authority
// and fee payer
check_balance(
100_000 - SIG_FEE,
&rpc_client,
&config_offline.keypair.pubkey(),
);
server.close().unwrap();
remove_dir_all(ledger_path).unwrap();
}
#[test]
fn test_stake_split() {
solana_logger::setup();
let (server, leader_data, alice, ledger_path, _voter) = new_validator_for_tests_ex(1, 42_000);
let (sender, receiver) = channel();
run_local_faucet(alice, sender, None);
let faucet_addr = receiver.recv().unwrap();
let rpc_client = RpcClient::new_socket(leader_data.rpc);
let mut config = CliConfig::default();
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
let mut config_offline = CliConfig::default();
let offline_pubkey = config_offline.keypair.pubkey();
let (_offline_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&config_offline.keypair, tmp_file.as_file_mut()).unwrap();
// Verify we're offline
config_offline.command = CliCommand::ClusterVersion;
process_command(&config_offline).unwrap_err();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &config.keypair.pubkey(), 500_000)
.unwrap();
check_balance(500_000, &rpc_client, &config.keypair.pubkey());
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 100_000).unwrap();
check_balance(100_000, &rpc_client, &offline_pubkey);
// Create stake account, identity is authority
let minimum_stake_balance = rpc_client
.get_minimum_balance_for_rent_exemption(std::mem::size_of::<StakeState>())
.unwrap();
println!("stake min: {}", minimum_stake_balance);
let stake_keypair = keypair_from_seed(&[0u8; 32]).unwrap();
let stake_account_pubkey = stake_keypair.pubkey();
let (stake_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&stake_keypair, tmp_file.as_file_mut()).unwrap();
config.command = CliCommand::CreateStakeAccount {
stake_account: read_keypair_file(&stake_keypair_file).unwrap().into(),
seed: None,
staker: Some(offline_pubkey),
withdrawer: Some(offline_pubkey),
lockup: Lockup::default(),
lamports: 10 * minimum_stake_balance,
};
process_command(&config).unwrap();
check_balance(
10 * minimum_stake_balance,
&rpc_client,
&stake_account_pubkey,
);
// Create nonce account
let minimum_nonce_balance = rpc_client
.get_minimum_balance_for_rent_exemption(NonceState::size())
.unwrap();
println!("nonce min: {}", minimum_nonce_balance);
let nonce_account = keypair_from_seed(&[1u8; 32]).unwrap();
let nonce_account_pubkey = nonce_account.pubkey();
let (nonce_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&nonce_account, tmp_file.as_file_mut()).unwrap();
config.command = CliCommand::CreateNonceAccount {
nonce_account: read_keypair_file(&nonce_keypair_file).unwrap().into(),
seed: None,
nonce_authority: Some(offline_pubkey),
lamports: minimum_nonce_balance,
};
process_command(&config).unwrap();
check_balance(minimum_nonce_balance, &rpc_client, &nonce_account_pubkey);
// Fetch nonce hash
let account = rpc_client.get_account(&nonce_account_pubkey).unwrap();
let nonce_state: NonceState = account.state().unwrap();
let nonce_hash = match nonce_state {
NonceState::Initialized(_meta, hash) => hash,
_ => panic!("Nonce is not initialized"),
};
// Nonced offline split
let split_account = keypair_from_seed(&[2u8; 32]).unwrap();
let (split_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&split_account, tmp_file.as_file_mut()).unwrap();
check_balance(0, &rpc_client, &split_account.pubkey());
config_offline.command = CliCommand::SplitStake {
stake_account_pubkey: stake_account_pubkey,
stake_authority: None,
sign_only: true,
signers: None,
blockhash_query: BlockhashQuery::None(nonce_hash, FeeCalculator::default()),
nonce_account: Some(nonce_account_pubkey.into()),
nonce_authority: None,
split_stake_account: read_keypair_file(&split_keypair_file).unwrap().into(),
seed: None,
lamports: 2 * minimum_stake_balance,
fee_payer: None,
};
let sig_response = process_command(&config_offline).unwrap();
let (blockhash, signers) = parse_sign_only_reply_string(&sig_response);
config.command = CliCommand::SplitStake {
stake_account_pubkey: stake_account_pubkey,
stake_authority: Some(offline_pubkey.into()),
sign_only: false,
signers: Some(signers),
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
nonce_account: Some(nonce_account_pubkey.into()),
nonce_authority: Some(offline_pubkey.into()),
split_stake_account: read_keypair_file(&split_keypair_file).unwrap().into(),
seed: None,
lamports: 2 * minimum_stake_balance,
fee_payer: Some(offline_pubkey.into()),
};
process_command(&config).unwrap();
check_balance(
8 * minimum_stake_balance,
&rpc_client,
&stake_account_pubkey,
);
check_balance(
2 * minimum_stake_balance,
&rpc_client,
&split_account.pubkey(),
);
server.close().unwrap();
remove_dir_all(ledger_path).unwrap();
}

215
cli/tests/transfer.rs Normal file
View File

@ -0,0 +1,215 @@
use solana_cli::{
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
offline::{parse_sign_only_reply_string, BlockhashQuery},
};
use solana_client::rpc_client::RpcClient;
use solana_faucet::faucet::run_local_faucet;
use solana_sdk::{
account_utils::StateMut,
fee_calculator::FeeCalculator,
nonce_state::NonceState,
pubkey::Pubkey,
signature::{keypair_from_seed, read_keypair_file, write_keypair, KeypairUtil},
};
use std::fs::remove_dir_all;
use std::sync::mpsc::channel;
#[cfg(test)]
use solana_core::validator::new_validator_for_tests_ex;
use std::thread::sleep;
use std::time::Duration;
use tempfile::NamedTempFile;
fn make_tmp_file() -> (String, NamedTempFile) {
let tmp_file = NamedTempFile::new().unwrap();
(String::from(tmp_file.path().to_str().unwrap()), tmp_file)
}
fn check_balance(expected_balance: u64, client: &RpcClient, pubkey: &Pubkey) {
(0..5).for_each(|tries| {
let balance = client.retry_get_balance(pubkey, 1).unwrap().unwrap();
if balance == expected_balance {
return;
}
if tries == 4 {
assert_eq!(balance, expected_balance);
}
sleep(Duration::from_millis(500));
});
}
#[test]
fn test_transfer() {
let (server, leader_data, mint_keypair, ledger_path, _) = new_validator_for_tests_ex(1, 42_000);
let (sender, receiver) = channel();
run_local_faucet(mint_keypair, sender, None);
let faucet_addr = receiver.recv().unwrap();
let rpc_client = RpcClient::new_socket(leader_data.rpc);
let mut config = CliConfig::default();
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
let sender_pubkey = config.keypair.pubkey();
let recipient_pubkey = Pubkey::new(&[1u8; 32]);
println!("sender: {:?}", sender_pubkey);
println!("recipient: {:?}", recipient_pubkey);
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &sender_pubkey, 50_000).unwrap();
check_balance(50_000, &rpc_client, &sender_pubkey);
check_balance(0, &rpc_client, &recipient_pubkey);
// Plain ole transfer
config.command = CliCommand::Transfer {
lamports: 10,
to: recipient_pubkey,
from: None,
sign_only: false,
signers: None,
blockhash_query: BlockhashQuery::All,
nonce_account: None,
nonce_authority: None,
fee_payer: None,
};
process_command(&config).unwrap();
check_balance(49_989, &rpc_client, &sender_pubkey);
check_balance(10, &rpc_client, &recipient_pubkey);
let mut offline = CliConfig::default();
offline.json_rpc_url = String::default();
// Verify we cannot contact the cluster
offline.command = CliCommand::ClusterVersion;
process_command(&offline).unwrap_err();
let offline_pubkey = offline.keypair.pubkey();
println!("offline: {:?}", offline_pubkey);
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 50).unwrap();
check_balance(50, &rpc_client, &offline_pubkey);
// Offline transfer
let (blockhash, _) = rpc_client.get_recent_blockhash().unwrap();
offline.command = CliCommand::Transfer {
lamports: 10,
to: recipient_pubkey,
from: None,
sign_only: true,
signers: None,
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
nonce_account: None,
nonce_authority: None,
fee_payer: None,
};
let sign_only_reply = process_command(&offline).unwrap();
let (blockhash, signers) = parse_sign_only_reply_string(&sign_only_reply);
config.command = CliCommand::Transfer {
lamports: 10,
to: recipient_pubkey,
from: Some(offline_pubkey.into()),
sign_only: false,
signers: Some(signers),
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
nonce_account: None,
nonce_authority: None,
fee_payer: Some(offline_pubkey.into()),
};
process_command(&config).unwrap();
check_balance(39, &rpc_client, &offline_pubkey);
check_balance(20, &rpc_client, &recipient_pubkey);
// Create nonce account
let nonce_account = keypair_from_seed(&[3u8; 32]).unwrap();
let (nonce_account_file, mut tmp_file) = make_tmp_file();
write_keypair(&nonce_account, tmp_file.as_file_mut()).unwrap();
let minimum_nonce_balance = rpc_client
.get_minimum_balance_for_rent_exemption(NonceState::size())
.unwrap();
config.command = CliCommand::CreateNonceAccount {
nonce_account: read_keypair_file(&nonce_account_file).unwrap().into(),
seed: None,
nonce_authority: None,
lamports: minimum_nonce_balance,
};
process_command(&config).unwrap();
check_balance(49_987 - minimum_nonce_balance, &rpc_client, &sender_pubkey);
// Fetch nonce hash
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
let nonce_state: NonceState = account.state().unwrap();
let nonce_hash = match nonce_state {
NonceState::Initialized(_meta, hash) => hash,
_ => panic!("Nonce is not initialized"),
};
// Nonced transfer
config.command = CliCommand::Transfer {
lamports: 10,
to: recipient_pubkey,
from: None,
sign_only: false,
signers: None,
blockhash_query: BlockhashQuery::FeeCalculator(nonce_hash),
nonce_account: Some(nonce_account.pubkey()),
nonce_authority: None,
fee_payer: None,
};
process_command(&config).unwrap();
check_balance(49_976 - minimum_nonce_balance, &rpc_client, &sender_pubkey);
check_balance(30, &rpc_client, &recipient_pubkey);
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
let nonce_state: NonceState = account.state().unwrap();
let new_nonce_hash = match nonce_state {
NonceState::Initialized(_meta, hash) => hash,
_ => panic!("Nonce is not initialized"),
};
assert_ne!(nonce_hash, new_nonce_hash);
// Assign nonce authority to offline
config.command = CliCommand::AuthorizeNonceAccount {
nonce_account: nonce_account.pubkey(),
nonce_authority: None,
new_authority: offline_pubkey,
};
process_command(&config).unwrap();
check_balance(49_975 - minimum_nonce_balance, &rpc_client, &sender_pubkey);
// Fetch nonce hash
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
let nonce_state: NonceState = account.state().unwrap();
let nonce_hash = match nonce_state {
NonceState::Initialized(_meta, hash) => hash,
_ => panic!("Nonce is not initialized"),
};
// Offline, nonced transfer
offline.command = CliCommand::Transfer {
lamports: 10,
to: recipient_pubkey,
from: None,
sign_only: true,
signers: None,
blockhash_query: BlockhashQuery::None(nonce_hash, FeeCalculator::default()),
nonce_account: Some(nonce_account.pubkey()),
nonce_authority: None,
fee_payer: None,
};
let sign_only_reply = process_command(&offline).unwrap();
let (blockhash, signers) = parse_sign_only_reply_string(&sign_only_reply);
config.command = CliCommand::Transfer {
lamports: 10,
to: recipient_pubkey,
from: Some(offline_pubkey.into()),
sign_only: false,
signers: Some(signers),
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
nonce_account: Some(nonce_account.pubkey()),
nonce_authority: Some(offline_pubkey.into()),
fee_payer: Some(offline_pubkey.into()),
};
process_command(&config).unwrap();
check_balance(28, &rpc_client, &offline_pubkey);
check_balance(40, &rpc_client, &recipient_pubkey);
server.close().unwrap();
remove_dir_all(ledger_path).unwrap();
}

View File

@ -1,6 +1,6 @@
[package]
name = "solana-client"
version = "0.23.0"
version = "0.23.3"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -19,11 +19,11 @@ reqwest = { version = "0.10.1", default-features = false, features = ["blocking"
serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.44"
solana-net-utils = { path = "../net-utils", version = "0.23.0" }
solana-sdk = { path = "../sdk", version = "0.23.0" }
solana-net-utils = { path = "../net-utils", version = "0.23.3" }
solana-sdk = { path = "../sdk", version = "0.23.3" }
[dev-dependencies]
assert_matches = "1.3.0"
jsonrpc-core = "14.0.5"
jsonrpc-http-server = "14.0.5"
solana-logger = { path = "../logger", version = "0.23.0" }
solana-logger = { path = "../logger", version = "0.23.3" }

View File

@ -60,13 +60,10 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
Value::Null
}
}
RpcRequest::GetBalance => {
let n = if self.url == "airdrop" { 0 } else { 50 };
serde_json::to_value(Response {
context: RpcResponseContext { slot: 1 },
value: Value::Number(Number::from(n)),
})?
}
RpcRequest::GetBalance => serde_json::to_value(Response {
context: RpcResponseContext { slot: 1 },
value: Value::Number(Number::from(50)),
})?,
RpcRequest::GetRecentBlockhash => serde_json::to_value(Response {
context: RpcResponseContext { slot: 1 },
value: (

View File

@ -177,7 +177,7 @@ impl RpcClient {
serde_json::from_value(response).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("GetVoteAccounts parse failure: {}", err),
format!("GetVoteAccounts parse failure: {:?}", err),
)
})
}
@ -196,7 +196,7 @@ impl RpcClient {
serde_json::from_value(response).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("GetClusterNodes parse failure: {}", err),
format!("GetClusterNodes parse failure: {:?}", err),
)
})
}
@ -215,7 +215,7 @@ impl RpcClient {
serde_json::from_value(response).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("GetConfirmedBlock parse failure: {}", err),
format!("GetConfirmedBlock parse failure: {:?}", err),
)
})
}
@ -242,7 +242,7 @@ impl RpcClient {
serde_json::from_value(response).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("GetConfirmedBlocks parse failure: {}", err),
format!("GetConfirmedBlocks parse failure: {:?}", err),
)
})
}
@ -293,7 +293,7 @@ impl RpcClient {
serde_json::from_value(response).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("GetEpochInfo parse failure: {}", err),
format!("GetEpochInfo parse failure: {:?}", err),
)
})
}
@ -324,7 +324,7 @@ impl RpcClient {
serde_json::from_value(response).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("GetLeaderSchedule failure: {}", err),
format!("GetLeaderSchedule failure: {:?}", err),
)
})
}
@ -343,7 +343,7 @@ impl RpcClient {
serde_json::from_value(response).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("GetEpochSchedule parse failure: {}", err),
format!("GetEpochSchedule parse failure: {:?}", err),
)
})
}
@ -381,7 +381,7 @@ impl RpcClient {
serde_json::from_value(response).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("GetVersion parse failure: {}", err),
format!("GetVersion parse failure: {:?}", err),
)
})
}
@ -400,7 +400,7 @@ impl RpcClient {
serde_json::from_value(response).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("MinimumLedgerSlot parse failure: {}", err),
format!("MinimumLedgerSlot parse failure: {:?}", err),
)
})
}
@ -612,7 +612,7 @@ impl RpcClient {
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("AccountNotFound: pubkey={}: {}", pubkey, err),
format!("AccountNotFound: pubkey={}: {:?}", pubkey, err),
)
})?
}
@ -698,7 +698,7 @@ impl RpcClient {
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("AccountNotFound: pubkey={}: {}", pubkey, err),
format!("AccountNotFound: pubkey={}: {:?}", pubkey, err),
)
})?;
@ -749,7 +749,7 @@ impl RpcClient {
serde_json::from_value(response).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("GetTransactionCount parse failure: {}", err),
format!("GetTransactionCount parse failure: {:?}", err),
)
})
}

View File

@ -15,10 +15,7 @@ pub struct RpcClientRequest {
impl RpcClientRequest {
pub fn new(url: String) -> Self {
Self {
client: reqwest::blocking::Client::new(),
url,
}
Self::new_with_timeout(url, Duration::from_secs(20))
}
pub fn new_with_timeout(url: String, timeout: Duration) -> Self {

View File

@ -32,6 +32,14 @@ pub struct RpcBlockCommitment<T> {
pub total_stake: u64,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct RpcReward {
pub pubkey: String,
pub lamports: i64,
}
pub type RpcRewards = Vec<RpcReward>;
#[derive(Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcConfirmedBlock {
@ -39,6 +47,7 @@ pub struct RpcConfirmedBlock {
pub blockhash: String,
pub parent_slot: Slot,
pub transactions: Vec<RpcTransactionWithStatusMeta>,
pub rewards: RpcRewards,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]

View File

@ -1,7 +1,7 @@
[package]
name = "solana-core"
description = "Blockchain, Rebuilt for Scale"
version = "0.23.0"
version = "0.23.3"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "../README.md"
@ -40,26 +40,26 @@ rayon = "1.2.0"
serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.44"
solana-budget-program = { path = "../programs/budget", version = "0.23.0" }
solana-clap-utils = { path = "../clap-utils", version = "0.23.0" }
solana-client = { path = "../client", version = "0.23.0" }
solana-faucet = { path = "../faucet", version = "0.23.0" }
solana-budget-program = { path = "../programs/budget", version = "0.23.3" }
solana-clap-utils = { path = "../clap-utils", version = "0.23.3" }
solana-client = { path = "../client", version = "0.23.3" }
solana-faucet = { path = "../faucet", version = "0.23.3" }
ed25519-dalek = "=1.0.0-pre.1"
solana-ledger = { path = "../ledger", version = "0.23.0" }
solana-logger = { path = "../logger", version = "0.23.0" }
solana-merkle-tree = { path = "../merkle-tree", version = "0.23.0" }
solana-metrics = { path = "../metrics", version = "0.23.0" }
solana-measure = { path = "../measure", version = "0.23.0" }
solana-net-utils = { path = "../net-utils", version = "0.23.0" }
solana-chacha-cuda = { path = "../chacha-cuda", version = "0.23.0" }
solana-perf = { path = "../perf", version = "0.23.0" }
solana-runtime = { path = "../runtime", version = "0.23.0" }
solana-sdk = { path = "../sdk", version = "0.23.0" }
solana-stake-program = { path = "../programs/stake", version = "0.23.0" }
solana-storage-program = { path = "../programs/storage", version = "0.23.0" }
solana-vote-program = { path = "../programs/vote", version = "0.23.0" }
solana-vote-signer = { path = "../vote-signer", version = "0.23.0" }
solana-sys-tuner = { path = "../sys-tuner", version = "0.23.0" }
solana-ledger = { path = "../ledger", version = "0.23.3" }
solana-logger = { path = "../logger", version = "0.23.3" }
solana-merkle-tree = { path = "../merkle-tree", version = "0.23.3" }
solana-metrics = { path = "../metrics", version = "0.23.3" }
solana-measure = { path = "../measure", version = "0.23.3" }
solana-net-utils = { path = "../net-utils", version = "0.23.3" }
solana-chacha-cuda = { path = "../chacha-cuda", version = "0.23.3" }
solana-perf = { path = "../perf", version = "0.23.3" }
solana-runtime = { path = "../runtime", version = "0.23.3" }
solana-sdk = { path = "../sdk", version = "0.23.3" }
solana-stake-program = { path = "../programs/stake", version = "0.23.3" }
solana-storage-program = { path = "../programs/storage", version = "0.23.3" }
solana-vote-program = { path = "../programs/vote", version = "0.23.3" }
solana-vote-signer = { path = "../vote-signer", version = "0.23.3" }
solana-sys-tuner = { path = "../sys-tuner", version = "0.23.3" }
symlink = "0.1.0"
sys-info = "0.5.8"
tempfile = "3.1.0"
@ -69,7 +69,7 @@ tokio-codec = "0.1"
tokio-fs = "0.1"
tokio-io = "0.1"
untrusted = "0.7.0"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.23.0" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.23.3" }
reed-solomon-erasure = { package = "solana-reed-solomon-erasure", version = "4.0.1-3", features = ["simd-accel"] }
[dev-dependencies]

View File

@ -58,31 +58,35 @@ impl BlockstreamService {
let timeout = Duration::new(1, 0);
let (slot, slot_leader) = slot_full_receiver.recv_timeout(timeout)?;
let entries = blockstore.get_slot_entries(slot, 0, None).unwrap();
let blockstore_meta = blockstore.meta(slot).unwrap().unwrap();
let _parent_slot = if slot == 0 {
None
} else {
Some(blockstore_meta.parent_slot)
};
let ticks_per_slot = entries.iter().filter(|entry| entry.is_tick()).count() as u64;
let mut tick_height = ticks_per_slot * slot;
// Slot might not exist due to LedgerCleanupService, check first
let blockstore_meta = blockstore.meta(slot).unwrap();
if let Some(blockstore_meta) = blockstore_meta {
// Return error to main loop. Thread won't exit, will just log the error
let entries = blockstore.get_slot_entries(slot, 0, None)?;
let _parent_slot = if slot == 0 {
None
} else {
Some(blockstore_meta.parent_slot)
};
let ticks_per_slot = entries.iter().filter(|entry| entry.is_tick()).count() as u64;
let mut tick_height = ticks_per_slot * slot;
for (i, entry) in entries.iter().enumerate() {
if entry.is_tick() {
tick_height += 1;
}
blockstream
.emit_entry_event(slot, tick_height, &slot_leader, &entry)
.unwrap_or_else(|e| {
debug!("Blockstream error: {:?}, {:?}", e, blockstream.output);
});
if i == entries.len() - 1 {
for (i, entry) in entries.iter().enumerate() {
if entry.is_tick() {
tick_height += 1;
}
blockstream
.emit_block_event(slot, tick_height, &slot_leader, entry.hash)
.emit_entry_event(slot, tick_height, &slot_leader, &entry)
.unwrap_or_else(|e| {
debug!("Blockstream error: {:?}, {:?}", e, blockstream.output);
});
if i == entries.len() - 1 {
blockstream
.emit_block_event(slot, tick_height, &slot_leader, entry.hash)
.unwrap_or_else(|e| {
debug!("Blockstream error: {:?}, {:?}", e, blockstream.output);
});
}
}
}
Ok(())

View File

@ -21,7 +21,6 @@ use crate::{
crds_gossip_pull::{CrdsFilter, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS},
crds_value::{self, CrdsData, CrdsValue, CrdsValueLabel, EpochSlots, Vote},
packet::{Packet, PACKET_DATA_SIZE},
repair_service::RepairType,
result::{Error, Result},
sendmmsg::{multicast, send_mmsg},
weighted_shuffle::{weighted_best, weighted_shuffle},
@ -29,8 +28,7 @@ use crate::{
use bincode::{serialize, serialized_size};
use core::cmp;
use itertools::Itertools;
use rand::{thread_rng, Rng};
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore, staking_utils};
use solana_ledger::{bank_forks::BankForks, staking_utils};
use solana_measure::thread_mem_usage;
use solana_metrics::{datapoint_debug, inc_new_counter_debug, inc_new_counter_error};
use solana_net_utils::{
@ -63,15 +61,12 @@ pub const VALIDATOR_PORT_RANGE: PortRange = (8000, 10_000);
pub const DATA_PLANE_FANOUT: usize = 200;
/// milliseconds we sleep for between gossip requests
pub const GOSSIP_SLEEP_MILLIS: u64 = 100;
/// the number of slots to respond with when responding to `Orphan` requests
pub const MAX_ORPHAN_REPAIR_RESPONSES: usize = 10;
/// The maximum size of a bloom filter
pub const MAX_BLOOM_SIZE: usize = 1028;
pub const MAX_BLOOM_SIZE: usize = 1018;
/// The maximum size of a protocol payload
const MAX_PROTOCOL_PAYLOAD_SIZE: u64 = PACKET_DATA_SIZE as u64 - MAX_PROTOCOL_HEADER_SIZE;
/// The largest protocol header size
const MAX_PROTOCOL_HEADER_SIZE: u64 = 204;
const MAX_PROTOCOL_HEADER_SIZE: u64 = 214;
#[derive(Debug, PartialEq, Eq)]
pub enum ClusterInfoError {
@ -174,12 +169,6 @@ enum Protocol {
PullResponse(Pubkey, Vec<CrdsValue>),
PushMessage(Pubkey, Vec<CrdsValue>),
PruneMessage(Pubkey, PruneData),
/// Window protocol messages
/// TODO: move this message to a different module
RequestWindowIndex(ContactInfo, u64, u64),
RequestHighestWindowIndex(ContactInfo, u64, u64),
RequestOrphan(ContactInfo, u64),
}
impl ClusterInfo {
@ -272,7 +261,7 @@ impl ClusterInfo {
let ip_addr = node.gossip.ip();
format!(
"{:15} {:2}| {:5} | {:44} | {:5}| {:5}| {:5} | {:5}| {:5} | {:5}| {:5} | {:5}| {:5}| v{}\n",
"{:15} {:2}| {:5} | {:44} | {:5}| {:5}| {:5} | {:5}| {:5} | {:5}| {:5} | {:5}| {:5}| {}\n",
if ContactInfo::is_valid_address(&node.gossip) {
ip_addr.to_string()
} else {
@ -405,7 +394,8 @@ impl ClusterInfo {
.map(|x| x.value.contact_info().unwrap())
}
pub fn rpc_peers(&self) -> Vec<ContactInfo> {
/// all validators that have a valid rpc port regardless of `shred_version`.
pub fn all_rpc_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data();
self.gossip
.crds
@ -440,13 +430,15 @@ impl ClusterInfo {
.values()
.filter_map(|x| x.value.contact_info())
.filter(|x| x.id != me)
/* shred_version not considered for gossip peers (ie, spy nodes do not set
shred_version) */
.filter(|x| ContactInfo::is_valid_address(&x.gossip))
.cloned()
.collect()
}
/// all validators that have a valid tvu port.
pub fn tvu_peers(&self) -> Vec<ContactInfo> {
/// all validators that have a valid tvu port regardless of `shred_version`.
pub fn all_tvu_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data();
self.gossip
.crds
@ -460,7 +452,37 @@ impl ClusterInfo {
.collect()
}
/// all peers that have a valid storage addr
/// all validators that have a valid tvu port and are on the same `shred_version`.
pub fn tvu_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data();
self.gossip
.crds
.table
.values()
.filter_map(|x| x.value.contact_info())
.filter(|x| ContactInfo::is_valid_address(&x.tvu))
.filter(|x| !ClusterInfo::is_archiver(x))
.filter(|x| x.id != me.id)
.filter(|x| x.shred_version == me.shred_version)
.cloned()
.collect()
}
/// all peers that have a valid storage addr regardless of `shred_version`.
pub fn all_storage_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data();
self.gossip
.crds
.table
.values()
.filter_map(|x| x.value.contact_info())
.filter(|x| ContactInfo::is_valid_address(&x.storage_addr))
.filter(|x| x.id != me.id)
.cloned()
.collect()
}
/// all peers that have a valid storage addr and are on the same `shred_version`.
pub fn storage_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data();
self.gossip
@ -470,6 +492,7 @@ impl ClusterInfo {
.filter_map(|x| x.value.contact_info())
.filter(|x| ContactInfo::is_valid_address(&x.storage_addr))
.filter(|x| x.id != me.id)
.filter(|x| x.shred_version == me.shred_version)
.cloned()
.collect()
}
@ -483,6 +506,7 @@ impl ClusterInfo {
.values()
.filter_map(|x| x.value.contact_info())
.filter(|x| x.id != me.id)
.filter(|x| x.shred_version == me.shred_version)
.filter(|x| ContactInfo::is_valid_address(&x.tvu))
.filter(|x| ContactInfo::is_valid_address(&x.tvu_forwards))
.cloned()
@ -490,11 +514,12 @@ impl ClusterInfo {
}
/// all tvu peers with valid gossip addrs that likely have the slot being requested
fn repair_peers(&self, slot: Slot) -> Vec<ContactInfo> {
pub fn repair_peers(&self, slot: Slot) -> Vec<ContactInfo> {
let me = self.my_data();
ClusterInfo::tvu_peers(self)
.into_iter()
.filter(|x| x.id != me.id)
.filter(|x| x.shred_version == me.shred_version)
.filter(|x| ContactInfo::is_valid_address(&x.gossip))
.filter(|x| {
self.get_epoch_state_for_node(&x.id, None)
@ -830,61 +855,6 @@ impl ClusterInfo {
Ok(())
}
pub fn window_index_request_bytes(&self, slot: Slot, shred_index: u64) -> Result<Vec<u8>> {
let req = Protocol::RequestWindowIndex(self.my_data(), slot, shred_index);
let out = serialize(&req)?;
Ok(out)
}
fn window_highest_index_request_bytes(&self, slot: Slot, shred_index: u64) -> Result<Vec<u8>> {
let req = Protocol::RequestHighestWindowIndex(self.my_data(), slot, shred_index);
let out = serialize(&req)?;
Ok(out)
}
fn orphan_bytes(&self, slot: Slot) -> Result<Vec<u8>> {
let req = Protocol::RequestOrphan(self.my_data(), slot);
let out = serialize(&req)?;
Ok(out)
}
pub fn repair_request(&self, repair_request: &RepairType) -> Result<(SocketAddr, Vec<u8>)> {
// find a peer that appears to be accepting replication and has the desired slot, as indicated
// by a valid tvu port location
let valid: Vec<_> = self.repair_peers(repair_request.slot());
if valid.is_empty() {
return Err(ClusterInfoError::NoPeers.into());
}
let n = thread_rng().gen::<usize>() % valid.len();
let addr = valid[n].gossip; // send the request to the peer's gossip port
let out = self.map_repair_request(repair_request)?;
Ok((addr, out))
}
pub fn map_repair_request(&self, repair_request: &RepairType) -> Result<Vec<u8>> {
match repair_request {
RepairType::Shred(slot, shred_index) => {
datapoint_debug!(
"cluster_info-repair",
("repair-slot", *slot, i64),
("repair-ix", *shred_index, i64)
);
Ok(self.window_index_request_bytes(*slot, *shred_index)?)
}
RepairType::HighestShred(slot, shred_index) => {
datapoint_debug!(
"cluster_info-repair_highest",
("repair-highest-slot", *slot, i64),
("repair-highest-ix", *shred_index, i64)
);
Ok(self.window_highest_index_request_bytes(*slot, *shred_index)?)
}
RepairType::Orphan(slot) => {
datapoint_debug!("cluster_info-repair_orphan", ("repair-orphan", *slot, i64));
Ok(self.orphan_bytes(*slot)?)
}
}
}
// If the network entrypoint hasn't been discovered yet, add it to the crds table
fn add_entrypoint(&mut self, pulls: &mut Vec<(Pubkey, CrdsFilter, SocketAddr, CrdsValue)>) {
let pull_from_entrypoint = if let Some(entrypoint) = &mut self.entrypoint {
@ -1057,6 +1027,7 @@ impl ClusterInfo {
.spawn(move || {
let mut last_push = timestamp();
let mut last_contact_info_trace = timestamp();
let mut adopt_shred_version = obj.read().unwrap().my_data().shred_version == 0;
let recycler = PacketsRecycler::default();
loop {
let start = timestamp();
@ -1094,9 +1065,32 @@ impl ClusterInfo {
let table_size = obj.read().unwrap().gossip.crds.table.len();
datapoint_debug!(
"cluster_info-purge",
("tabel_size", table_size as i64, i64),
("table_size", table_size as i64, i64),
("purge_stake_timeout", timeout as i64, i64)
);
// Adopt the entrypoint's `shred_version` if ours is unset
if adopt_shred_version {
// If gossip was given an entrypoint, lookup its id
let entrypoint_id = obj.read().unwrap().entrypoint.as_ref().map(|e| e.id);
if let Some(entrypoint_id) = entrypoint_id {
// If a pull from the entrypoint was successful, it should exist in the crds table
let entrypoint = obj.read().unwrap().lookup(&entrypoint_id).cloned();
if let Some(entrypoint) = entrypoint {
let mut self_info = obj.read().unwrap().my_data();
if entrypoint.shred_version == 0 {
info!("Unable to adopt entrypoint's shred version");
} else {
info!(
"Setting shred version to {:?} from entrypoint {:?}",
entrypoint.shred_version, entrypoint.id
);
self_info.shred_version = entrypoint.shred_version;
obj.write().unwrap().insert_self(self_info);
adopt_shred_version = false;
}
}
}
}
//TODO: possibly tune this parameter
//we saw a deadlock passing an obj.read().unwrap().timeout into sleep
if start - last_push > CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS / 2 {
@ -1113,124 +1107,18 @@ impl ClusterInfo {
.unwrap()
}
fn get_data_shred_as_packet(
blockstore: &Arc<Blockstore>,
slot: Slot,
shred_index: u64,
dest: &SocketAddr,
) -> Result<Option<Packet>> {
let data = blockstore.get_data_shred(slot, shred_index)?;
Ok(data.map(|data| {
let mut packet = Packet::default();
packet.meta.size = data.len();
packet.meta.set_addr(dest);
packet.data.copy_from_slice(&data);
packet
}))
}
fn run_window_request(
recycler: &PacketsRecycler,
from: &ContactInfo,
from_addr: &SocketAddr,
blockstore: Option<&Arc<Blockstore>>,
me: &ContactInfo,
slot: Slot,
shred_index: u64,
) -> Option<Packets> {
if let Some(blockstore) = blockstore {
// Try to find the requested index in one of the slots
let packet = Self::get_data_shred_as_packet(blockstore, slot, shred_index, from_addr);
if let Ok(Some(packet)) = packet {
inc_new_counter_debug!("cluster_info-window-request-ledger", 1);
return Some(Packets::new_with_recycler_data(
recycler,
"run_window_request",
vec![packet],
));
}
}
inc_new_counter_debug!("cluster_info-window-request-fail", 1);
trace!(
"{}: failed RequestWindowIndex {} {} {}",
me.id,
from.id,
slot,
shred_index,
);
None
}
fn run_highest_window_request(
recycler: &PacketsRecycler,
from_addr: &SocketAddr,
blockstore: Option<&Arc<Blockstore>>,
slot: Slot,
highest_index: u64,
) -> Option<Packets> {
let blockstore = blockstore?;
// Try to find the requested index in one of the slots
let meta = blockstore.meta(slot).ok()??;
if meta.received > highest_index {
// meta.received must be at least 1 by this point
let packet =
Self::get_data_shred_as_packet(blockstore, slot, meta.received - 1, from_addr)
.ok()??;
return Some(Packets::new_with_recycler_data(
recycler,
"run_highest_window_request",
vec![packet],
));
}
None
}
fn run_orphan(
recycler: &PacketsRecycler,
from_addr: &SocketAddr,
blockstore: Option<&Arc<Blockstore>>,
mut slot: Slot,
max_responses: usize,
) -> Option<Packets> {
let mut res = Packets::new_with_recycler(recycler.clone(), 64, "run_orphan");
if let Some(blockstore) = blockstore {
// Try to find the next "n" parent slots of the input slot
while let Ok(Some(meta)) = blockstore.meta(slot) {
if meta.received == 0 {
break;
}
let packet =
Self::get_data_shred_as_packet(blockstore, slot, meta.received - 1, from_addr);
if let Ok(Some(packet)) = packet {
res.packets.push(packet);
}
if meta.is_parent_set() && res.packets.len() <= max_responses {
slot = meta.parent_slot;
} else {
break;
}
}
}
if res.is_empty() {
return None;
}
Some(res)
}
fn handle_packets(
me: &Arc<RwLock<Self>>,
recycler: &PacketsRecycler,
blockstore: Option<&Arc<Blockstore>>,
stakes: &HashMap<Pubkey, u64>,
packets: Packets,
response_sender: &PacketSender,
epoch_ms: u64,
) {
// iter over the packets, collect pulls separately and process everything else
let allocated = thread_mem_usage::Allocatedp::default();
let mut gossip_pull_data: Vec<PullData> = vec![];
let timeouts = me.read().unwrap().gossip.make_timeouts(&stakes, epoch_ms);
packets.packets.iter().for_each(|packet| {
let from_addr = packet.meta.addr();
limited_deserialize(&packet.data[..packet.meta.size])
@ -1272,7 +1160,7 @@ impl ClusterInfo {
}
ret
});
Self::handle_pull_response(me, &from, data);
Self::handle_pull_response(me, &from, data, &timeouts);
datapoint_debug!(
"solana-gossip-listen-memory",
("pull_response", (allocated.get() - start) as i64, i64),
@ -1330,13 +1218,6 @@ impl ClusterInfo {
("prune_message", (allocated.get() - start) as i64, i64),
);
}
_ => {
let rsp =
Self::handle_repair(me, recycler, &from_addr, blockstore, request);
if let Some(rsp) = rsp {
let _ignore_disconnect = response_sender.send(rsp);
}
}
})
});
// process the collected pulls together
@ -1391,7 +1272,12 @@ impl ClusterInfo {
Some(packets)
}
fn handle_pull_response(me: &Arc<RwLock<Self>>, from: &Pubkey, data: Vec<CrdsValue>) {
fn handle_pull_response(
me: &Arc<RwLock<Self>>,
from: &Pubkey,
data: Vec<CrdsValue>,
timeouts: &HashMap<Pubkey, u64>,
) {
let len = data.len();
let now = Instant::now();
let self_id = me.read().unwrap().gossip.id;
@ -1399,7 +1285,7 @@ impl ClusterInfo {
me.write()
.unwrap()
.gossip
.process_pull_response(from, data, timestamp());
.process_pull_response(from, timeouts, data, timestamp());
inc_new_counter_debug!("cluster_info-pull_request_response", 1);
inc_new_counter_debug!("cluster_info-pull_request_response-size", len);
@ -1464,104 +1350,10 @@ impl ClusterInfo {
}
}
fn get_repair_sender(request: &Protocol) -> &ContactInfo {
match request {
Protocol::RequestWindowIndex(ref from, _, _) => from,
Protocol::RequestHighestWindowIndex(ref from, _, _) => from,
Protocol::RequestOrphan(ref from, _) => from,
_ => panic!("Not a repair request"),
}
}
fn handle_repair(
me: &Arc<RwLock<Self>>,
recycler: &PacketsRecycler,
from_addr: &SocketAddr,
blockstore: Option<&Arc<Blockstore>>,
request: Protocol,
) -> Option<Packets> {
let now = Instant::now();
//TODO this doesn't depend on cluster_info module, could be moved
//but we are using the listen thread to service these request
//TODO verify from is signed
let self_id = me.read().unwrap().gossip.id;
let from = Self::get_repair_sender(&request);
if from.id == me.read().unwrap().gossip.id {
warn!(
"{}: Ignored received repair request from ME {}",
self_id, from.id,
);
inc_new_counter_debug!("cluster_info-handle-repair--eq", 1);
return None;
}
me.write()
.unwrap()
.gossip
.crds
.update_record_timestamp(&from.id, timestamp());
let my_info = me.read().unwrap().my_data();
let (res, label) = {
match &request {
Protocol::RequestWindowIndex(from, slot, shred_index) => {
inc_new_counter_debug!("cluster_info-request-window-index", 1);
(
Self::run_window_request(
recycler,
from,
&from_addr,
blockstore,
&my_info,
*slot,
*shred_index,
),
"RequestWindowIndex",
)
}
Protocol::RequestHighestWindowIndex(_, slot, highest_index) => {
inc_new_counter_debug!("cluster_info-request-highest-window-index", 1);
(
Self::run_highest_window_request(
recycler,
&from_addr,
blockstore,
*slot,
*highest_index,
),
"RequestHighestWindowIndex",
)
}
Protocol::RequestOrphan(_, slot) => {
inc_new_counter_debug!("cluster_info-request-orphan", 1);
(
Self::run_orphan(
recycler,
&from_addr,
blockstore,
*slot,
MAX_ORPHAN_REPAIR_RESPONSES,
),
"RequestOrphan",
)
}
_ => panic!("Not a repair request"),
}
};
trace!("{}: received repair request: {:?}", self_id, request);
report_time_spent(label, &now.elapsed(), "");
res
}
/// Process messages from the network
fn run_listen(
obj: &Arc<RwLock<Self>>,
recycler: &PacketsRecycler,
blockstore: Option<&Arc<Blockstore>>,
bank_forks: Option<&Arc<RwLock<BankForks>>>,
requests_receiver: &PacketReceiver,
response_sender: &PacketSender,
@ -1569,19 +1361,27 @@ impl ClusterInfo {
//TODO cache connections
let timeout = Duration::new(1, 0);
let reqs = requests_receiver.recv_timeout(timeout)?;
let epoch_ms;
let stakes: HashMap<_, _> = match bank_forks {
Some(ref bank_forks) => {
staking_utils::staked_nodes(&bank_forks.read().unwrap().working_bank())
let bank = bank_forks.read().unwrap().working_bank();
let epoch = bank.epoch();
let epoch_schedule = bank.epoch_schedule();
epoch_ms = epoch_schedule.get_slots_in_epoch(epoch) * DEFAULT_MS_PER_SLOT;
staking_utils::staked_nodes(&bank)
}
None => {
inc_new_counter_info!("cluster_info-purge-no_working_bank", 1);
epoch_ms = CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
HashMap::new()
}
None => HashMap::new(),
};
Self::handle_packets(obj, &recycler, blockstore, &stakes, reqs, response_sender);
Self::handle_packets(obj, &recycler, &stakes, reqs, response_sender, epoch_ms);
Ok(())
}
pub fn listen(
me: Arc<RwLock<Self>>,
blockstore: Option<Arc<Blockstore>>,
bank_forks: Option<Arc<RwLock<BankForks>>>,
requests_receiver: PacketReceiver,
response_sender: PacketSender,
@ -1595,7 +1395,6 @@ impl ClusterInfo {
let e = Self::run_listen(
&me,
&recycler,
blockstore.as_ref(),
bank_forks.as_ref(),
&requests_receiver,
&response_sender,
@ -1630,6 +1429,7 @@ impl ClusterInfo {
dummy_addr,
dummy_addr,
dummy_addr,
dummy_addr,
timestamp(),
)
}
@ -1710,6 +1510,7 @@ pub struct Sockets {
pub repair: UdpSocket,
pub retransmit_sockets: Vec<UdpSocket>,
pub storage: Option<UdpSocket>,
pub serve_repair: UdpSocket,
}
#[derive(Debug)]
@ -1730,9 +1531,10 @@ impl Node {
let storage = UdpSocket::bind("127.0.0.1:0").unwrap();
let empty = "0.0.0.0:0".parse().unwrap();
let repair = UdpSocket::bind("127.0.0.1:0").unwrap();
let broadcast = vec![UdpSocket::bind("0.0.0.0:0").unwrap()];
let retransmit = UdpSocket::bind("0.0.0.0:0").unwrap();
let serve_repair = UdpSocket::bind("127.0.0.1:0").unwrap();
let info = ContactInfo::new(
pubkey,
gossip.local_addr().unwrap(),
@ -1744,6 +1546,7 @@ impl Node {
storage.local_addr().unwrap(),
empty,
empty,
serve_repair.local_addr().unwrap(),
timestamp(),
);
@ -1758,6 +1561,7 @@ impl Node {
broadcast,
repair,
retransmit_sockets: vec![retransmit],
serve_repair,
storage: Some(storage),
ip_echo: None,
},
@ -1780,6 +1584,7 @@ impl Node {
let broadcast = vec![UdpSocket::bind("0.0.0.0:0").unwrap()];
let retransmit_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let storage = UdpSocket::bind("0.0.0.0:0").unwrap();
let serve_repair = UdpSocket::bind("127.0.0.1:0").unwrap();
let info = ContactInfo::new(
pubkey,
gossip_addr,
@ -1791,6 +1596,7 @@ impl Node {
storage.local_addr().unwrap(),
rpc_addr,
rpc_pubsub_addr,
serve_repair.local_addr().unwrap(),
timestamp(),
);
Node {
@ -1806,6 +1612,7 @@ impl Node {
repair,
retransmit_sockets: vec![retransmit_socket],
storage: None,
serve_repair,
},
}
}
@ -1848,6 +1655,8 @@ impl Node {
multi_bind_in_range(port_range, 8).expect("retransmit multi_bind");
let (repair_port, repair) = Self::bind(port_range);
let (serve_repair_port, serve_repair) = Self::bind(port_range);
let (_, broadcast) = multi_bind_in_range(port_range, 4).expect("broadcast multi_bind");
let info = ContactInfo::new(
@ -1858,6 +1667,7 @@ impl Node {
SocketAddr::new(gossip_addr.ip(), repair_port),
SocketAddr::new(gossip_addr.ip(), tpu_port),
SocketAddr::new(gossip_addr.ip(), tpu_forwards_port),
SocketAddr::new(gossip_addr.ip(), serve_repair_port),
socketaddr_any!(),
socketaddr_any!(),
socketaddr_any!(),
@ -1877,6 +1687,7 @@ impl Node {
repair,
retransmit_sockets,
storage: None,
serve_repair,
ip_echo: Some(ip_echo),
},
}
@ -1913,18 +1724,8 @@ fn report_time_spent(label: &str, time: &Duration, extra: &str) {
mod tests {
use super::*;
use crate::crds_value::CrdsValueLabel;
use crate::repair_service::RepairType;
use crate::result::Error;
use rayon::prelude::*;
use solana_ledger::blockstore::make_many_slot_entries;
use solana_ledger::blockstore::Blockstore;
use solana_ledger::blockstore_processor::fill_blockstore_slot_with_ticks;
use solana_ledger::get_tmp_ledger_path;
use solana_ledger::shred::{
max_ticks_per_n_shreds, CodingShredHeader, DataShredHeader, Shred, ShredCommonHeader,
};
use solana_perf::test_tx::test_tx;
use solana_sdk::hash::Hash;
use solana_sdk::signature::{Keypair, KeypairUtil};
use std::collections::HashSet;
use std::net::{IpAddr, Ipv4Addr};
@ -1995,242 +1796,6 @@ mod tests {
let label = CrdsValueLabel::ContactInfo(d.id);
assert!(cluster_info.gossip.crds.lookup(&label).is_none());
}
#[test]
fn window_index_request() {
let me = ContactInfo::new_localhost(&Pubkey::new_rand(), timestamp());
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(me);
let rv = cluster_info.repair_request(&RepairType::Shred(0, 0));
assert_matches!(rv, Err(Error::ClusterInfoError(ClusterInfoError::NoPeers)));
let gossip_addr = socketaddr!([127, 0, 0, 1], 1234);
let nxt = ContactInfo::new(
&Pubkey::new_rand(),
gossip_addr,
socketaddr!([127, 0, 0, 1], 1235),
socketaddr!([127, 0, 0, 1], 1236),
socketaddr!([127, 0, 0, 1], 1237),
socketaddr!([127, 0, 0, 1], 1238),
socketaddr!([127, 0, 0, 1], 1239),
socketaddr!([127, 0, 0, 1], 1240),
socketaddr!([127, 0, 0, 1], 1241),
socketaddr!([127, 0, 0, 1], 1242),
0,
);
cluster_info.insert_info(nxt.clone());
let rv = cluster_info
.repair_request(&RepairType::Shred(0, 0))
.unwrap();
assert_eq!(nxt.gossip, gossip_addr);
assert_eq!(rv.0, nxt.gossip);
let gossip_addr2 = socketaddr!([127, 0, 0, 2], 1234);
let nxt = ContactInfo::new(
&Pubkey::new_rand(),
gossip_addr2,
socketaddr!([127, 0, 0, 1], 1235),
socketaddr!([127, 0, 0, 1], 1236),
socketaddr!([127, 0, 0, 1], 1237),
socketaddr!([127, 0, 0, 1], 1238),
socketaddr!([127, 0, 0, 1], 1239),
socketaddr!([127, 0, 0, 1], 1240),
socketaddr!([127, 0, 0, 1], 1241),
socketaddr!([127, 0, 0, 1], 1242),
0,
);
cluster_info.insert_info(nxt);
let mut one = false;
let mut two = false;
while !one || !two {
//this randomly picks an option, so eventually it should pick both
let rv = cluster_info
.repair_request(&RepairType::Shred(0, 0))
.unwrap();
if rv.0 == gossip_addr {
one = true;
}
if rv.0 == gossip_addr2 {
two = true;
}
}
assert!(one && two);
}
/// test window requests respond with the right shred, and do not overrun
#[test]
fn run_window_request() {
let recycler = PacketsRecycler::default();
solana_logger::setup();
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let me = ContactInfo::new(
&Pubkey::new_rand(),
socketaddr!("127.0.0.1:1234"),
socketaddr!("127.0.0.1:1235"),
socketaddr!("127.0.0.1:1236"),
socketaddr!("127.0.0.1:1237"),
socketaddr!("127.0.0.1:1238"),
socketaddr!("127.0.0.1:1239"),
socketaddr!("127.0.0.1:1240"),
socketaddr!("127.0.0.1:1241"),
socketaddr!("127.0.0.1:1242"),
0,
);
let rv = ClusterInfo::run_window_request(
&recycler,
&me,
&socketaddr_any!(),
Some(&blockstore),
&me,
0,
0,
);
assert!(rv.is_none());
let mut common_header = ShredCommonHeader::default();
common_header.slot = 2;
common_header.index = 1;
let mut data_header = DataShredHeader::default();
data_header.parent_offset = 1;
let shred_info = Shred::new_empty_from_header(
common_header,
data_header,
CodingShredHeader::default(),
);
blockstore
.insert_shreds(vec![shred_info], None, false)
.expect("Expect successful ledger write");
let rv = ClusterInfo::run_window_request(
&recycler,
&me,
&socketaddr_any!(),
Some(&blockstore),
&me,
2,
1,
);
assert!(!rv.is_none());
let rv: Vec<Shred> = rv
.expect("packets")
.packets
.into_iter()
.filter_map(|b| Shred::new_from_serialized_shred(b.data.to_vec()).ok())
.collect();
assert_eq!(rv[0].index(), 1);
assert_eq!(rv[0].slot(), 2);
}
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
/// test run_window_requestwindow requests respond with the right shred, and do not overrun
#[test]
fn run_highest_window_request() {
let recycler = PacketsRecycler::default();
solana_logger::setup();
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let rv = ClusterInfo::run_highest_window_request(
&recycler,
&socketaddr_any!(),
Some(&blockstore),
0,
0,
);
assert!(rv.is_none());
let _ = fill_blockstore_slot_with_ticks(
&blockstore,
max_ticks_per_n_shreds(1) + 1,
2,
1,
Hash::default(),
);
let rv = ClusterInfo::run_highest_window_request(
&recycler,
&socketaddr_any!(),
Some(&blockstore),
2,
1,
);
let rv: Vec<Shred> = rv
.expect("packets")
.packets
.into_iter()
.filter_map(|b| Shred::new_from_serialized_shred(b.data.to_vec()).ok())
.collect();
assert!(!rv.is_empty());
let index = blockstore.meta(2).unwrap().unwrap().received - 1;
assert_eq!(rv[0].index(), index as u32);
assert_eq!(rv[0].slot(), 2);
let rv = ClusterInfo::run_highest_window_request(
&recycler,
&socketaddr_any!(),
Some(&blockstore),
2,
index + 1,
);
assert!(rv.is_none());
}
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]
fn run_orphan() {
solana_logger::setup();
let recycler = PacketsRecycler::default();
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let rv =
ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 2, 0);
assert!(rv.is_none());
// Create slots 1, 2, 3 with 5 shreds apiece
let (shreds, _) = make_many_slot_entries(1, 3, 5);
blockstore
.insert_shreds(shreds, None, false)
.expect("Expect successful ledger write");
// We don't have slot 4, so we don't know how to service this requeset
let rv =
ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 4, 5);
assert!(rv.is_none());
// For slot 3, we should return the highest shreds from slots 3, 2, 1 respectively
// for this request
let rv: Vec<_> =
ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 3, 5)
.expect("run_orphan packets")
.packets
.iter()
.map(|b| b.clone())
.collect();
let expected: Vec<_> = (1..=3)
.rev()
.map(|slot| {
let index = blockstore.meta(slot).unwrap().unwrap().received - 1;
ClusterInfo::get_data_shred_as_packet(
&blockstore,
slot,
index,
&socketaddr_any!(),
)
.unwrap()
.unwrap()
})
.collect();
assert_eq!(rv, expected)
}
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
fn assert_in_range(x: u16, range: (u16, u16)) {
assert!(x >= range.0);
@ -2537,10 +2102,12 @@ mod tests {
let entrypoint_crdsvalue =
CrdsValue::new_unsigned(CrdsData::ContactInfo(entrypoint.clone()));
let cluster_info = Arc::new(RwLock::new(cluster_info));
let timeouts = cluster_info.read().unwrap().gossip.make_timeouts_test();
ClusterInfo::handle_pull_response(
&cluster_info,
&entrypoint_pubkey,
vec![entrypoint_crdsvalue],
&timeouts,
);
let pulls = cluster_info
.write()
@ -2611,13 +2178,16 @@ mod tests {
}
fn test_split_messages(value: CrdsValue) {
const NUM_VALUES: usize = 30;
const NUM_VALUES: u64 = 30;
let value_size = value.size();
let expected_len = NUM_VALUES / (MAX_PROTOCOL_PAYLOAD_SIZE / value_size).max(1) as usize;
let msgs = vec![value; NUM_VALUES];
let num_values_per_payload = (MAX_PROTOCOL_PAYLOAD_SIZE / value_size).max(1);
// Expected len is the ceiling of the division
let expected_len = (NUM_VALUES + num_values_per_payload - 1) / num_values_per_payload;
let msgs = vec![value; NUM_VALUES as usize];
let split = ClusterInfo::split_gossip_messages(msgs);
assert!(split.len() <= expected_len);
assert!(split.len() as u64 <= expected_len);
}
#[test]
@ -2663,6 +2233,14 @@ mod tests {
cluster_info.insert_info(contact_info);
stakes.insert(id3, 10);
// normal but with different shred version
let id4 = Pubkey::new(&[4u8; 32]);
let mut contact_info = ContactInfo::new_localhost(&id4, timestamp());
contact_info.shred_version = 1;
assert_ne!(contact_info.shred_version, d.shred_version);
cluster_info.insert_info(contact_info.clone());
stakes.insert(id4, 10);
let stakes = Arc::new(stakes);
let (peers, peers_and_stakes) = cluster_info.sorted_tvu_peers_and_stakes(Some(stakes));
assert_eq!(peers.len(), 2);
@ -2782,25 +2360,6 @@ mod tests {
- serialized_size(&PruneData::default()).unwrap(),
);
// make sure repairs are always smaller than the gossip messages
assert!(
max_protocol_size
> serialized_size(&Protocol::RequestWindowIndex(ContactInfo::default(), 0, 0))
.unwrap()
);
assert!(
max_protocol_size
> serialized_size(&Protocol::RequestHighestWindowIndex(
ContactInfo::default(),
0,
0
))
.unwrap()
);
assert!(
max_protocol_size
> serialized_size(&Protocol::RequestOrphan(ContactInfo::default(), 0)).unwrap()
);
// finally assert the header size estimation is correct
assert_eq!(MAX_PROTOCOL_HEADER_SIZE, max_protocol_size);
}

File diff suppressed because it is too large Load Diff

View File

@ -37,7 +37,6 @@ impl StakeLockout {
}
}
#[derive(Default)]
pub struct Tower {
node_pubkey: Pubkey,
threshold_depth: usize,
@ -47,15 +46,24 @@ pub struct Tower {
last_timestamp: BlockTimestamp,
}
impl Tower {
pub fn new(node_pubkey: &Pubkey, vote_account_pubkey: &Pubkey, bank_forks: &BankForks) -> Self {
let mut tower = Self {
node_pubkey: *node_pubkey,
impl Default for Tower {
fn default() -> Self {
Self {
node_pubkey: Pubkey::default(),
threshold_depth: VOTE_THRESHOLD_DEPTH,
threshold_size: VOTE_THRESHOLD_SIZE,
lockouts: VoteState::default(),
last_vote: Vote::default(),
last_timestamp: BlockTimestamp::default(),
}
}
}
impl Tower {
pub fn new(node_pubkey: &Pubkey, vote_account_pubkey: &Pubkey, bank_forks: &BankForks) -> Self {
let mut tower = Self {
node_pubkey: *node_pubkey,
..Tower::default()
};
tower.initialize_lockouts_from_bank_forks(&bank_forks, vote_account_pubkey);
@ -321,12 +329,21 @@ impl Tower {
if let Some(fork_stake) = stake_lockouts.get(&vote.slot) {
let lockout = fork_stake.stake as f64 / total_staked as f64;
trace!(
"fork_stake {} {} {} {}",
"fork_stake slot: {} lockout: {} fork_stake: {} total_stake: {}",
slot,
lockout,
fork_stake.stake,
total_staked
);
if vote.confirmation_count as usize > self.threshold_depth {
for old_vote in &self.lockouts.votes {
if old_vote.slot == vote.slot
&& old_vote.confirmation_count == vote.confirmation_count
{
return true;
}
}
}
lockout > self.threshold_size
} else {
false
@ -542,6 +559,24 @@ mod test {
assert!(tower.check_vote_stake_threshold(0, &stakes, 2));
}
#[test]
fn test_check_vote_threshold_no_skip_lockout_with_new_root() {
solana_logger::setup();
let mut tower = Tower::new_for_tests(4, 0.67);
let mut stakes = HashMap::new();
for i in 0..(MAX_LOCKOUT_HISTORY as u64 + 1) {
stakes.insert(
i,
StakeLockout {
stake: 1,
lockout: 8,
},
);
tower.record_vote(i, Hash::default());
}
assert!(!tower.check_vote_stake_threshold(MAX_LOCKOUT_HISTORY as u64 + 1, &stakes, 2));
}
#[test]
fn test_is_slot_confirmed_not_enough_stake_failure() {
let tower = Tower::new_for_tests(1, 0.67);
@ -742,6 +777,34 @@ mod test {
assert!(!tower.check_vote_stake_threshold(1, &stakes, 2));
}
#[test]
fn test_check_vote_threshold_lockouts_not_updated() {
solana_logger::setup();
let mut tower = Tower::new_for_tests(1, 0.67);
let stakes = vec![
(
0,
StakeLockout {
stake: 1,
lockout: 8,
},
),
(
1,
StakeLockout {
stake: 2,
lockout: 8,
},
),
]
.into_iter()
.collect();
tower.record_vote(0, Hash::default());
tower.record_vote(1, Hash::default());
tower.record_vote(2, Hash::default());
assert!(tower.check_vote_stake_threshold(6, &stakes, 2));
}
#[test]
fn test_lockout_is_updated_for_entire_branch() {
let mut stake_lockouts = HashMap::new();

View File

@ -17,7 +17,7 @@ pub struct ContactInfo {
pub tvu: SocketAddr,
/// address to forward shreds to
pub tvu_forwards: SocketAddr,
/// address to send repairs to
/// address to send repair responses to
pub repair: SocketAddr,
/// transactions address
pub tpu: SocketAddr,
@ -29,6 +29,8 @@ pub struct ContactInfo {
pub rpc: SocketAddr,
/// websocket for JSON-RPC push notifications
pub rpc_pubsub: SocketAddr,
/// address to send repair requests to
pub serve_repair: SocketAddr,
/// latest wallclock picked
pub wallclock: u64,
/// node shred version
@ -85,6 +87,7 @@ impl Default for ContactInfo {
storage_addr: socketaddr_any!(),
rpc: socketaddr_any!(),
rpc_pubsub: socketaddr_any!(),
serve_repair: socketaddr_any!(),
wallclock: 0,
shred_version: 0,
}
@ -104,6 +107,7 @@ impl ContactInfo {
storage_addr: SocketAddr,
rpc: SocketAddr,
rpc_pubsub: SocketAddr,
serve_repair: SocketAddr,
now: u64,
) -> Self {
Self {
@ -117,6 +121,7 @@ impl ContactInfo {
storage_addr,
rpc,
rpc_pubsub,
serve_repair,
wallclock: now,
shred_version: 0,
}
@ -134,6 +139,7 @@ impl ContactInfo {
socketaddr!("127.0.0.1:1240"),
socketaddr!("127.0.0.1:1241"),
socketaddr!("127.0.0.1:1242"),
socketaddr!("127.0.0.1:1243"),
now,
)
}
@ -154,6 +160,7 @@ impl ContactInfo {
addr,
addr,
addr,
addr,
0,
)
}
@ -174,6 +181,7 @@ impl ContactInfo {
let repair = next_port(&bind_addr, 5);
let rpc_addr = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PORT);
let rpc_pubsub_addr = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PUBSUB_PORT);
let serve_repair = next_port(&bind_addr, 6);
Self::new(
pubkey,
gossip_addr,
@ -185,6 +193,7 @@ impl ContactInfo {
"0.0.0.0:0".parse().unwrap(),
rpc_addr,
rpc_pubsub_addr,
serve_repair,
timestamp(),
)
}
@ -209,6 +218,7 @@ impl ContactInfo {
daddr,
daddr,
daddr,
daddr,
timestamp(),
)
}
@ -267,6 +277,7 @@ mod tests {
assert!(ci.rpc_pubsub.ip().is_unspecified());
assert!(ci.tpu.ip().is_unspecified());
assert!(ci.storage_addr.ip().is_unspecified());
assert!(ci.serve_repair.ip().is_unspecified());
}
#[test]
fn test_multicast() {
@ -278,6 +289,7 @@ mod tests {
assert!(ci.rpc_pubsub.ip().is_multicast());
assert!(ci.tpu.ip().is_multicast());
assert!(ci.storage_addr.ip().is_multicast());
assert!(ci.serve_repair.ip().is_multicast());
}
#[test]
fn test_entry_point() {
@ -290,6 +302,7 @@ mod tests {
assert!(ci.rpc_pubsub.ip().is_unspecified());
assert!(ci.tpu.ip().is_unspecified());
assert!(ci.storage_addr.ip().is_unspecified());
assert!(ci.serve_repair.ip().is_unspecified());
}
#[test]
fn test_socketaddr() {
@ -299,10 +312,12 @@ mod tests {
assert_eq!(ci.gossip.port(), 11);
assert_eq!(ci.tvu.port(), 12);
assert_eq!(ci.tpu_forwards.port(), 13);
assert_eq!(ci.rpc.port(), 8899);
assert_eq!(ci.rpc_pubsub.port(), 8900);
assert_eq!(ci.rpc.port(), rpc_port::DEFAULT_RPC_PORT);
assert_eq!(ci.rpc_pubsub.port(), rpc_port::DEFAULT_RPC_PUBSUB_PORT);
assert!(ci.storage_addr.ip().is_unspecified());
assert_eq!(ci.serve_repair.port(), 16);
}
#[test]
fn replayed_data_new_with_socketaddr_with_pubkey() {
let keypair = Keypair::new();
@ -315,8 +330,17 @@ mod tests {
assert_eq!(d1.tvu, socketaddr!("127.0.0.1:1236"));
assert_eq!(d1.tpu_forwards, socketaddr!("127.0.0.1:1237"));
assert_eq!(d1.tpu, socketaddr!("127.0.0.1:1234"));
assert_eq!(d1.rpc, socketaddr!("127.0.0.1:8899"));
assert_eq!(d1.rpc_pubsub, socketaddr!("127.0.0.1:8900"));
assert_eq!(
d1.rpc,
socketaddr!(format!("127.0.0.1:{}", rpc_port::DEFAULT_RPC_PORT))
);
assert_eq!(
d1.rpc_pubsub,
socketaddr!(format!("127.0.0.1:{}", rpc_port::DEFAULT_RPC_PUBSUB_PORT))
);
assert_eq!(d1.tvu_forwards, socketaddr!("127.0.0.1:1238"));
assert_eq!(d1.repair, socketaddr!("127.0.0.1:1239"));
assert_eq!(d1.serve_repair, socketaddr!("127.0.0.1:1240"));
}
#[test]

View File

@ -156,11 +156,12 @@ impl CrdsGossip {
pub fn process_pull_response(
&mut self,
from: &Pubkey,
timeouts: &HashMap<Pubkey, u64>,
response: Vec<CrdsValue>,
now: u64,
) -> usize {
self.pull
.process_pull_response(&mut self.crds, from, response, now)
.process_pull_response(&mut self.crds, from, timeouts, response, now)
}
pub fn make_timeouts_test(&self) -> HashMap<Pubkey, u64> {

View File

@ -25,6 +25,8 @@ use std::collections::HashMap;
use std::collections::VecDeque;
pub const CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS: u64 = 15000;
// The maximum age of a value received over pull responses
pub const CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS: u64 = 60000;
pub const FALSE_RATE: f64 = 0.1f64;
pub const KEYS: f64 = 8f64;
@ -117,6 +119,7 @@ pub struct CrdsGossipPull {
/// hash and insert time
purged_values: VecDeque<(Hash, u64)>,
pub crds_timeout: u64,
pub msg_timeout: u64,
}
impl Default for CrdsGossipPull {
@ -125,6 +128,7 @@ impl Default for CrdsGossipPull {
purged_values: VecDeque::new(),
pull_request_time: HashMap::new(),
crds_timeout: CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS,
msg_timeout: CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS,
}
}
}
@ -210,12 +214,56 @@ impl CrdsGossipPull {
&mut self,
crds: &mut Crds,
from: &Pubkey,
timeouts: &HashMap<Pubkey, u64>,
response: Vec<CrdsValue>,
now: u64,
) -> usize {
let mut failed = 0;
for r in response {
let owner = r.label().pubkey();
// Check if the crds value is older than the msg_timeout
if now
> r.wallclock()
.checked_add(self.msg_timeout)
.unwrap_or_else(|| 0)
|| now + self.msg_timeout < r.wallclock()
{
match &r.label() {
CrdsValueLabel::ContactInfo(_) => {
// Check if this ContactInfo is actually too old, it's possible that it has
// stake and so might have a longer effective timeout
let timeout = *timeouts
.get(&owner)
.unwrap_or_else(|| timeouts.get(&Pubkey::default()).unwrap());
if now > r.wallclock().checked_add(timeout).unwrap_or_else(|| 0)
|| now + timeout < r.wallclock()
{
inc_new_counter_warn!(
"cluster_info-gossip_pull_response_value_timeout",
1
);
failed += 1;
continue;
}
}
_ => {
// Before discarding this value, check if a ContactInfo for the owner
// exists in the table. If it doesn't, that implies that this value can be discarded
if crds.lookup(&CrdsValueLabel::ContactInfo(owner)).is_none() {
inc_new_counter_warn!(
"cluster_info-gossip_pull_response_value_timeout",
1
);
failed += 1;
continue;
} else {
// Silently insert this old value without bumping record timestamps
failed += crds.insert(r, now).is_err() as usize;
continue;
}
}
}
}
let old = crds.insert(r, now);
failed += old.is_err() as usize;
old.ok().map(|opt| {
@ -322,8 +370,9 @@ impl CrdsGossipPull {
mod test {
use super::*;
use crate::contact_info::ContactInfo;
use crate::crds_value::CrdsData;
use crate::crds_value::{CrdsData, Vote};
use itertools::Itertools;
use solana_perf::test_tx::test_tx;
use solana_sdk::hash::hash;
use solana_sdk::packet::PACKET_DATA_SIZE;
@ -534,8 +583,13 @@ mod test {
continue;
}
assert_eq!(rsp.len(), 1);
let failed =
node.process_pull_response(&mut node_crds, &node_pubkey, rsp.pop().unwrap(), 1);
let failed = node.process_pull_response(
&mut node_crds,
&node_pubkey,
&node.make_timeouts_def(&node_pubkey, &HashMap::new(), 0, 1),
rsp.pop().unwrap(),
1,
);
assert_eq!(failed, 0);
assert_eq!(
node_crds
@ -675,4 +729,87 @@ mod test {
.collect();
assert_eq!(masks.len(), 2u64.pow(mask_bits) as usize)
}
#[test]
fn test_process_pull_response() {
let mut node_crds = Crds::default();
let mut node = CrdsGossipPull::default();
let peer_pubkey = Pubkey::new_rand();
let peer_entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(
ContactInfo::new_localhost(&peer_pubkey, 0),
));
let mut timeouts = HashMap::new();
timeouts.insert(Pubkey::default(), node.crds_timeout);
timeouts.insert(peer_pubkey, node.msg_timeout + 1);
// inserting a fresh value should be fine.
assert_eq!(
node.process_pull_response(
&mut node_crds,
&peer_pubkey,
&timeouts,
vec![peer_entry.clone()],
1,
),
0
);
let mut node_crds = Crds::default();
let unstaked_peer_entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(
ContactInfo::new_localhost(&peer_pubkey, 0),
));
// check that old contact infos fail if they are too old, regardless of "timeouts"
assert_eq!(
node.process_pull_response(
&mut node_crds,
&peer_pubkey,
&timeouts,
vec![peer_entry.clone(), unstaked_peer_entry],
node.msg_timeout + 100,
),
2
);
let mut node_crds = Crds::default();
// check that old contact infos can still land as long as they have a "timeouts" entry
assert_eq!(
node.process_pull_response(
&mut node_crds,
&peer_pubkey,
&timeouts,
vec![peer_entry.clone()],
node.msg_timeout + 1,
),
0
);
// construct something that's not a contact info
let peer_vote =
CrdsValue::new_unsigned(CrdsData::Vote(0, Vote::new(&peer_pubkey, test_tx(), 0)));
// check that older CrdsValues (non-ContactInfos) infos pass even if are too old,
// but a recent contact info (inserted above) exists
assert_eq!(
node.process_pull_response(
&mut node_crds,
&peer_pubkey,
&timeouts,
vec![peer_vote.clone()],
node.msg_timeout + 1,
),
0
);
let mut node_crds = Crds::default();
// without a contact info, inserting an old value should fail
assert_eq!(
node.process_pull_response(
&mut node_crds,
&peer_pubkey,
&timeouts,
vec![peer_vote.clone()],
node.msg_timeout + 1,
),
1
);
}
}

View File

@ -30,7 +30,10 @@ use std::collections::{HashMap, HashSet};
pub const CRDS_GOSSIP_NUM_ACTIVE: usize = 30;
pub const CRDS_GOSSIP_PUSH_FANOUT: usize = 6;
pub const CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS: u64 = 5000;
// With a fanout of 6, a 1000 node cluster should only take ~4 hops to converge.
// However since pushes are stake weighed, some trailing nodes
// might need more time to receive values. 30 seconds should be plenty.
pub const CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS: u64 = 30000;
pub const CRDS_GOSSIP_PRUNE_MSG_TIMEOUT_MS: u64 = 500;
pub const CRDS_GOSSIP_PRUNE_STAKE_THRESHOLD_PCT: f64 = 0.15;
@ -135,7 +138,12 @@ impl CrdsGossipPush {
value: CrdsValue,
now: u64,
) -> Result<Option<VersionedCrdsValue>, CrdsGossipError> {
if now > value.wallclock() + self.msg_timeout {
if now
> value
.wallclock()
.checked_add(self.msg_timeout)
.unwrap_or_else(|| 0)
{
return Err(CrdsGossipError::PushMessageTimeout);
}
if now + self.msg_timeout < value.wallclock() {

View File

@ -6,7 +6,6 @@ use crate::streamer;
use rand::{thread_rng, Rng};
use solana_client::thin_client::{create_client, ThinClient};
use solana_ledger::bank_forks::BankForks;
use solana_ledger::blockstore::Blockstore;
use solana_perf::recycler::Recycler;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil};
@ -24,7 +23,6 @@ pub struct GossipService {
impl GossipService {
pub fn new(
cluster_info: &Arc<RwLock<ClusterInfo>>,
blockstore: Option<Arc<Blockstore>>,
bank_forks: Option<Arc<RwLock<BankForks>>>,
gossip_socket: UdpSocket,
exit: &Arc<AtomicBool>,
@ -47,7 +45,6 @@ impl GossipService {
let t_responder = streamer::responder("gossip", gossip_socket, response_receiver);
let t_listen = ClusterInfo::listen(
cluster_info.clone(),
blockstore,
bank_forks.clone(),
request_receiver,
response_sender.clone(),
@ -197,10 +194,10 @@ fn spy(
tvu_peers = spy_ref
.read()
.unwrap()
.tvu_peers()
.all_tvu_peers()
.into_iter()
.collect::<Vec<_>>();
archivers = spy_ref.read().unwrap().storage_peers();
archivers = spy_ref.read().unwrap().all_storage_peers();
if let Some(num) = num_nodes {
if tvu_peers.len() + archivers.len() >= num {
if let Some(gossip_addr) = find_node_by_gossip_addr {
@ -283,8 +280,7 @@ fn make_gossip_node(
cluster_info.set_entrypoint(ContactInfo::new_gossip_entry_point(entrypoint));
}
let cluster_info = Arc::new(RwLock::new(cluster_info));
let gossip_service =
GossipService::new(&cluster_info.clone(), None, None, gossip_socket, &exit);
let gossip_service = GossipService::new(&cluster_info.clone(), None, gossip_socket, &exit);
(gossip_service, ip_echo, cluster_info)
}
@ -303,7 +299,7 @@ mod tests {
let tn = Node::new_localhost();
let cluster_info = ClusterInfo::new_with_invalid_keypair(tn.info.clone());
let c = Arc::new(RwLock::new(cluster_info));
let d = GossipService::new(&c, None, None, tn.sockets.gossip, &exit);
let d = GossipService::new(&c, None, tn.sockets.gossip, &exit);
exit.store(true, Ordering::Relaxed);
d.join().unwrap();
}

View File

@ -68,9 +68,13 @@ impl LedgerCleanupService {
let disk_utilization_pre = blockstore.storage_size();
let root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
// Notify blockstore of impending purge
if root > *next_purge_batch {
//cleanup
blockstore.purge_slots(0, Some(root - max_ledger_slots));
let lowest_slot = root - max_ledger_slots;
*blockstore.lowest_cleanup_slot.write().unwrap() = lowest_slot;
blockstore.purge_slots(0, Some(lowest_slot));
*next_purge_batch += DEFAULT_PURGE_BATCH_SIZE;
}

View File

@ -15,7 +15,6 @@ pub mod contact_info;
pub mod blockstream;
pub mod blockstream_service;
pub mod cluster_info;
pub mod cluster_info_repair_listener;
pub mod consensus;
pub mod crds;
pub mod crds_gossip;
@ -30,7 +29,6 @@ pub mod gossip_service;
pub mod ledger_cleanup_service;
pub mod local_vote_signer_service;
pub mod packet;
pub mod partition_cfg;
pub mod poh_recorder;
pub mod poh_service;
pub mod recvmmsg;
@ -38,12 +36,15 @@ pub mod repair_service;
pub mod replay_stage;
mod result;
pub mod retransmit_stage;
pub mod rewards_recorder_service;
pub mod rpc;
pub mod rpc_pubsub;
pub mod rpc_pubsub_service;
pub mod rpc_service;
pub mod rpc_subscriptions;
pub mod sendmmsg;
pub mod serve_repair;
pub mod serve_repair_service;
pub mod sigverify;
pub mod sigverify_shreds;
pub mod sigverify_stage;

View File

@ -15,10 +15,9 @@ pub struct LocalVoteSignerService {
impl LocalVoteSignerService {
#[allow(clippy::new_ret_no_self)]
pub fn new(port_range: PortRange) -> (Self, SocketAddr) {
let addr = match solana_net_utils::find_available_port_in_range(port_range) {
Ok(port) => SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), port),
Err(_e) => panic!("Failed to find an available port for local vote signer service"),
};
let addr = solana_net_utils::find_available_port_in_range(port_range)
.map(|port| SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), port))
.expect("Failed to find an available port for local vote signer service");
let exit = Arc::new(AtomicBool::new(false));
let thread_exit = exit.clone();
let thread = Builder::new()

View File

@ -9,7 +9,7 @@ use solana_metrics::inc_new_counter_debug;
pub use solana_sdk::packet::{Meta, Packet, PACKET_DATA_SIZE};
use std::{io::Result, net::UdpSocket, time::Instant};
pub fn recv_from(obj: &mut Packets, socket: &UdpSocket) -> Result<usize> {
pub fn recv_from(obj: &mut Packets, socket: &UdpSocket, max_wait_ms: usize) -> Result<usize> {
let mut i = 0;
//DOCUMENTED SIDE-EFFECT
//Performance out of the IO without poll
@ -20,9 +20,11 @@ pub fn recv_from(obj: &mut Packets, socket: &UdpSocket) -> Result<usize> {
socket.set_nonblocking(false)?;
trace!("receiving on {}", socket.local_addr().unwrap());
let start = Instant::now();
let mut total_size = 0;
loop {
obj.packets.resize(i + NUM_RCVMMSGS, Packet::default());
obj.packets.resize(
std::cmp::min(i + NUM_RCVMMSGS, PACKETS_PER_BATCH),
Packet::default(),
);
match recv_mmsg(socket, &mut obj.packets[i..]) {
Err(_) if i > 0 => {
if start.elapsed().as_millis() > 1 {
@ -33,16 +35,15 @@ pub fn recv_from(obj: &mut Packets, socket: &UdpSocket) -> Result<usize> {
trace!("recv_from err {:?}", e);
return Err(e);
}
Ok((size, npkts)) => {
Ok((_, npkts)) => {
if i == 0 {
socket.set_nonblocking(true)?;
}
trace!("got {} packets", npkts);
i += npkts;
total_size += size;
// Try to batch into big enough buffers
// will cause less re-shuffling later on.
if start.elapsed().as_millis() > 1 || total_size >= PACKETS_BATCH_SIZE {
if start.elapsed().as_millis() > max_wait_ms as u128 || i >= PACKETS_PER_BATCH {
break;
}
}
@ -95,7 +96,7 @@ mod tests {
}
send_to(&p, &send_socket).unwrap();
let recvd = recv_from(&mut p, &recv_socket).unwrap();
let recvd = recv_from(&mut p, &recv_socket, 1).unwrap();
assert_eq!(recvd, p.packets.len());
@ -127,4 +128,32 @@ mod tests {
p2.data[0] = 4;
assert!(p1 != p2);
}
#[test]
fn test_packet_resize() {
solana_logger::setup();
let recv_socket = UdpSocket::bind("127.0.0.1:0").expect("bind");
let addr = recv_socket.local_addr().unwrap();
let send_socket = UdpSocket::bind("127.0.0.1:0").expect("bind");
let mut p = Packets::default();
p.packets.resize(PACKETS_PER_BATCH, Packet::default());
// Should only get PACKETS_PER_BATCH packets per iteration even
// if a lot more were sent, and regardless of packet size
for _ in 0..2 * PACKETS_PER_BATCH {
let mut p = Packets::default();
p.packets.resize(1, Packet::default());
for m in p.packets.iter_mut() {
m.meta.set_addr(&addr);
m.meta.size = 1;
}
send_to(&p, &send_socket).unwrap();
}
let recvd = recv_from(&mut p, &recv_socket, 100).unwrap();
// Check we only got PACKETS_PER_BATCH packets
assert_eq!(recvd, PACKETS_PER_BATCH);
assert_eq!(p.packets.capacity(), PACKETS_PER_BATCH);
}
}

View File

@ -1,92 +0,0 @@
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
use solana_ledger::shred::Shred;
use solana_runtime::bank::Bank;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::timing::timestamp;
use std::collections::HashSet;
use std::sync::Arc;
use std::sync::RwLock;
///Configure a partition in the retransmit stage
#[derive(Debug, Clone)]
pub struct Partition {
pub num_partitions: usize,
pub my_partition: usize,
pub start_ts: u64,
pub end_ts: u64,
leaders: Arc<RwLock<Vec<Pubkey>>>,
}
impl Default for Partition {
fn default() -> Self {
Self {
num_partitions: 0,
my_partition: 0,
start_ts: 0,
end_ts: 0,
leaders: Arc::new(RwLock::new(vec![])),
}
}
}
#[derive(Default, Debug, Clone)]
pub struct PartitionCfg {
partitions: Vec<Partition>,
}
impl PartitionCfg {
pub fn new(partitions: Vec<Partition>) -> Self {
Self { partitions }
}
pub fn is_connected(
&self,
bank: &Option<Arc<Bank>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
shred: &Shred,
) -> bool {
if bank.is_none() {
return true;
}
let bank = bank.as_ref().unwrap().clone();
let slot_leader_pubkey = leader_schedule_cache.slot_leader_at(shred.slot(), Some(&bank));
let slot_leader_pubkey = slot_leader_pubkey.unwrap_or_default();
let time = timestamp();
for p in &self.partitions {
let is_time = (p.start_ts <= time) && (time < p.end_ts);
if !is_time {
continue;
}
trace!("PARTITION_TEST partition time! {}", p.my_partition);
if p.num_partitions == 0 {
continue;
}
if p.leaders.read().unwrap().is_empty() {
let mut leader_vec = p.leaders.write().unwrap();
let mut leaders: Vec<Pubkey> = bank.vote_accounts().keys().cloned().collect();
leaders.sort();
*leader_vec = leaders;
warn!("PARTITION_TEST partition enabled {}", p.my_partition);
}
let is_connected: bool = {
let leaders = p.leaders.read().unwrap();
let start = p.my_partition * leaders.len() / p.num_partitions;
let partition_size = leaders.len() / p.num_partitions;
let end = start + partition_size;
let end = if leaders.len() - end < partition_size {
leaders.len()
} else {
end
};
let my_leaders: HashSet<_> = leaders[start..end].iter().collect();
my_leaders.contains(&slot_leader_pubkey)
};
if is_connected {
trace!("PARTITION_TEST connected {}", p.my_partition);
continue;
}
trace!("PARTITION_TEST not connected {}", p.my_partition);
return false;
}
trace!("PARTITION_TEST connected");
true
}
}

View File

@ -1,8 +1,9 @@
//! The `repair_service` module implements the tools necessary to generate a thread which
//! regularly finds missing shreds in the ledger and sends repair requests for those shreds
use crate::{
cluster_info::ClusterInfo, cluster_info_repair_listener::ClusterInfoRepairListener,
cluster_info::ClusterInfo,
result::Result,
serve_repair::{RepairType, ServeRepair},
};
use solana_ledger::{
bank_forks::BankForks,
@ -33,23 +34,6 @@ pub enum RepairStrategy {
},
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq)]
pub enum RepairType {
Orphan(Slot),
HighestShred(Slot, u64),
Shred(Slot, u64),
}
impl RepairType {
pub fn slot(&self) -> Slot {
match self {
RepairType::Orphan(slot) => *slot,
RepairType::HighestShred(slot, _) => *slot,
RepairType::Shred(slot, _) => *slot,
}
}
}
pub struct RepairSlotRange {
pub start: Slot,
pub end: Slot,
@ -66,7 +50,6 @@ impl Default for RepairSlotRange {
pub struct RepairService {
t_repair: JoinHandle<()>,
cluster_info_repair_listener: Option<ClusterInfoRepairListener>,
}
impl RepairService {
@ -77,19 +60,6 @@ impl RepairService {
cluster_info: Arc<RwLock<ClusterInfo>>,
repair_strategy: RepairStrategy,
) -> Self {
let cluster_info_repair_listener = match repair_strategy {
RepairStrategy::RepairAll {
ref epoch_schedule, ..
} => Some(ClusterInfoRepairListener::new(
&blockstore,
&exit,
cluster_info.clone(),
*epoch_schedule,
)),
_ => None,
};
let t_repair = Builder::new()
.name("solana-repair-service".to_string())
.spawn(move || {
@ -103,10 +73,7 @@ impl RepairService {
})
.unwrap();
RepairService {
t_repair,
cluster_info_repair_listener,
}
RepairService { t_repair }
}
fn run(
@ -116,6 +83,7 @@ impl RepairService {
cluster_info: &Arc<RwLock<ClusterInfo>>,
repair_strategy: RepairStrategy,
) {
let serve_repair = ServeRepair::new(cluster_info.clone());
let mut epoch_slots: BTreeSet<Slot> = BTreeSet::new();
let id = cluster_info.read().unwrap().id();
let mut current_root = 0;
@ -173,9 +141,7 @@ impl RepairService {
let reqs: Vec<_> = repairs
.into_iter()
.filter_map(|repair_request| {
cluster_info
.read()
.unwrap()
serve_repair
.repair_request(&repair_request)
.map(|result| (result, repair_request))
.ok()
@ -391,14 +357,7 @@ impl RepairService {
}
pub fn join(self) -> thread::Result<()> {
let mut results = vec![self.t_repair.join()];
if let Some(cluster_info_repair_listener) = self.cluster_info_repair_listener {
results.push(cluster_info_repair_listener.join());
}
for r in results {
r?;
}
Ok(())
self.t_repair.join()
}
}

View File

@ -6,6 +6,7 @@ use crate::{
consensus::{StakeLockout, Tower},
poh_recorder::PohRecorder,
result::Result,
rewards_recorder_service::RewardsRecorderSender,
rpc_subscriptions::RpcSubscriptions,
};
use solana_ledger::{
@ -79,6 +80,7 @@ pub struct ReplayStageConfig {
pub snapshot_package_sender: Option<SnapshotPackageSender>,
pub block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
pub transaction_status_sender: Option<TransactionStatusSender>,
pub rewards_recorder_sender: Option<RewardsRecorderSender>,
}
pub struct ReplayStage {
@ -181,6 +183,7 @@ impl ReplayStage {
snapshot_package_sender,
block_commitment_cache,
transaction_status_sender,
rewards_recorder_sender,
} = config;
let (root_bank_sender, root_bank_receiver) = channel();
@ -221,6 +224,7 @@ impl ReplayStage {
&bank_forks,
&leader_schedule_cache,
&subscriptions,
rewards_recorder_sender.clone(),
);
datapoint_debug!(
"replay_stage-memory",
@ -361,6 +365,7 @@ impl ReplayStage {
&poh_recorder,
&leader_schedule_cache,
&subscriptions,
rewards_recorder_sender.clone(),
);
if let Some(bank) = poh_recorder.lock().unwrap().bank() {
@ -434,6 +439,7 @@ impl ReplayStage {
poh_recorder: &Arc<Mutex<PohRecorder>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
subscriptions: &Arc<RpcSubscriptions>,
rewards_recorder_sender: Option<RewardsRecorderSender>,
) {
// all the individual calls to poh_recorder.lock() are designed to
// increase granularity, decrease contention
@ -499,6 +505,7 @@ impl ReplayStage {
.unwrap()
.insert(Bank::new_from_parent(&parent, my_pubkey, poh_slot));
Self::record_rewards(&tpu_bank, &rewards_recorder_sender);
poh_recorder.lock().unwrap().set_bank(&tpu_bank);
} else {
error!("{} No next leader found", my_pubkey);
@ -527,6 +534,9 @@ impl ReplayStage {
let tx_count = tx_count_after - tx_count_before;
confirm_result.map_err(|err| {
// LedgerCleanupService should not be cleaning up anything
// that comes after the root, so we should not see any
// errors related to the slot being purged
let slot = bank.slot();
warn!("Fatal replay error in slot: {}, err: {:?}", slot, err);
datapoint_error!(
@ -931,6 +941,7 @@ impl ReplayStage {
forks_lock: &RwLock<BankForks>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
subscriptions: &Arc<RpcSubscriptions>,
rewards_recorder_sender: Option<RewardsRecorderSender>,
) {
// Find the next slot that chains to the old slot
let forks = forks_lock.read().unwrap();
@ -966,10 +977,10 @@ impl ReplayStage {
forks.root()
);
subscriptions.notify_slot(child_slot, parent_slot, forks.root());
new_banks.insert(
child_slot,
Bank::new_from_parent(&parent_bank, &leader, child_slot),
);
let child_bank = Bank::new_from_parent(&parent_bank, &leader, child_slot);
Self::record_rewards(&child_bank, &rewards_recorder_sender);
new_banks.insert(child_slot, child_bank);
}
}
drop(forks);
@ -980,6 +991,16 @@ impl ReplayStage {
}
}
fn record_rewards(bank: &Bank, rewards_recorder_sender: &Option<RewardsRecorderSender>) {
if let Some(rewards_recorder_sender) = rewards_recorder_sender {
if let Some(ref rewards) = bank.rewards {
rewards_recorder_sender
.send((bank.slot(), rewards.iter().copied().collect()))
.unwrap_or_else(|err| warn!("rewards_recorder_sender failed: {:?}", err));
}
}
}
pub fn join(self) -> thread::Result<()> {
self.commitment_service.join()?;
self.t_replay.join().map(|_| ())
@ -1326,6 +1347,7 @@ pub(crate) mod tests {
&bank_forks,
&leader_schedule_cache,
&subscriptions,
None,
);
assert!(bank_forks.read().unwrap().get(1).is_some());
@ -1338,6 +1360,7 @@ pub(crate) mod tests {
&bank_forks,
&leader_schedule_cache,
&subscriptions,
None,
);
assert!(bank_forks.read().unwrap().get(1).is_some());
assert!(bank_forks.read().unwrap().get(2).is_some());

View File

@ -3,7 +3,6 @@
use crate::{
cluster_info::{compute_retransmit_peers, ClusterInfo, DATA_PLANE_FANOUT},
packet::Packets,
partition_cfg::PartitionCfg,
repair_service::RepairStrategy,
result::{Error, Result},
streamer::PacketReceiver,
@ -22,7 +21,7 @@ use solana_sdk::epoch_schedule::EpochSchedule;
use std::{
cmp,
net::UdpSocket,
sync::atomic::AtomicBool,
sync::atomic::{AtomicBool, Ordering},
sync::mpsc::channel,
sync::mpsc::RecvTimeoutError,
sync::Mutex,
@ -213,7 +212,7 @@ impl RetransmitStage {
exit: &Arc<AtomicBool>,
completed_slots_receiver: CompletedSlotsReceiver,
epoch_schedule: EpochSchedule,
cfg: Option<PartitionCfg>,
cfg: Option<Arc<AtomicBool>>,
shred_version: u16,
) -> Self {
let (retransmit_sender, retransmit_receiver) = channel();
@ -245,7 +244,7 @@ impl RetransmitStage {
move |id, shred, working_bank, last_root| {
let is_connected = cfg
.as_ref()
.map(|x| x.is_connected(&working_bank, &leader_schedule_cache, shred))
.map(|x| x.load(Ordering::Relaxed))
.unwrap_or(true);
let rv = should_retransmit_and_persist(
shred,
@ -332,7 +331,7 @@ mod tests {
// it should send this over the sockets.
retransmit_sender.send(packets).unwrap();
let mut packets = Packets::new(vec![]);
packet::recv_from(&mut packets, &me_retransmit).unwrap();
packet::recv_from(&mut packets, &me_retransmit, 1).unwrap();
assert_eq!(packets.packets.len(), 1);
assert_eq!(packets.packets[0].meta.repair, false);
@ -348,7 +347,7 @@ mod tests {
let packets = Packets::new(vec![repair, Packet::default()]);
retransmit_sender.send(packets).unwrap();
let mut packets = Packets::new(vec![]);
packet::recv_from(&mut packets, &me_retransmit).unwrap();
packet::recv_from(&mut packets, &me_retransmit, 1).unwrap();
assert_eq!(packets.packets.len(), 1);
assert_eq!(packets.packets[0].meta.repair, false);
}

View File

@ -0,0 +1,67 @@
use crossbeam_channel::{Receiver, RecvTimeoutError, Sender};
use solana_client::rpc_response::RpcReward;
use solana_ledger::blockstore::Blockstore;
use solana_sdk::{clock::Slot, pubkey::Pubkey};
use std::{
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
thread::{self, Builder, JoinHandle},
time::Duration,
};
pub type RewardsRecorderReceiver = Receiver<(Slot, Vec<(Pubkey, i64)>)>;
pub type RewardsRecorderSender = Sender<(Slot, Vec<(Pubkey, i64)>)>;
pub struct RewardsRecorderService {
thread_hdl: JoinHandle<()>,
}
impl RewardsRecorderService {
#[allow(clippy::new_ret_no_self)]
pub fn new(
rewards_receiver: RewardsRecorderReceiver,
blockstore: Arc<Blockstore>,
exit: &Arc<AtomicBool>,
) -> Self {
let exit = exit.clone();
let thread_hdl = Builder::new()
.name("solana-rewards-writer".to_string())
.spawn(move || loop {
if exit.load(Ordering::Relaxed) {
break;
}
if let Err(RecvTimeoutError::Disconnected) =
Self::write_rewards(&rewards_receiver, &blockstore)
{
break;
}
})
.unwrap();
Self { thread_hdl }
}
fn write_rewards(
rewards_receiver: &RewardsRecorderReceiver,
blockstore: &Arc<Blockstore>,
) -> Result<(), RecvTimeoutError> {
let (slot, rewards) = rewards_receiver.recv_timeout(Duration::from_secs(1))?;
let rpc_rewards = rewards
.into_iter()
.map(|(pubkey, lamports)| RpcReward {
pubkey: pubkey.to_string(),
lamports,
})
.collect();
blockstore
.write_rewards(slot, rpc_rewards)
.expect("Expect database write to succeed");
Ok(())
}
pub fn join(self) -> thread::Result<()> {
self.thread_hdl.join()
}
}

View File

@ -45,21 +45,13 @@ fn new_response<T>(bank: &Bank, value: T) -> RpcResponse<T> {
Ok(Response { context, value })
}
#[derive(Debug, Clone)]
#[derive(Debug, Default, Clone)]
pub struct JsonRpcConfig {
pub enable_validator_exit: bool, // Enable the 'validatorExit' command
pub enable_validator_exit: bool,
pub enable_get_confirmed_block: bool,
pub faucet_addr: Option<SocketAddr>,
}
impl Default for JsonRpcConfig {
fn default() -> Self {
Self {
enable_validator_exit: false,
faucet_addr: None,
}
}
}
#[derive(Clone)]
pub struct JsonRpcRequestProcessor {
bank_forks: Arc<RwLock<BankForks>>,
@ -345,7 +337,11 @@ impl JsonRpcRequestProcessor {
slot: Slot,
encoding: Option<RpcTransactionEncoding>,
) -> Result<Option<RpcConfirmedBlock>> {
Ok(self.blockstore.get_confirmed_block(slot, encoding).ok())
if self.config.enable_get_confirmed_block {
Ok(self.blockstore.get_confirmed_block(slot, encoding).ok())
} else {
Ok(None)
}
}
pub fn get_confirmed_blocks(
@ -383,7 +379,11 @@ impl JsonRpcRequestProcessor {
let stakes = HashMap::new();
let stakes = bank.epoch_vote_accounts(epoch).unwrap_or(&stakes);
Ok(self.blockstore.get_block_time(slot, slot_duration, stakes))
Ok(self
.blockstore
.get_block_time(slot, slot_duration, stakes)
.ok()
.unwrap_or(None))
}
}
@ -724,11 +724,14 @@ impl RpcSol for RpcSolImpl {
None
}
}
let shred_version = cluster_info.my_data().shred_version;
Ok(cluster_info
.all_peers()
.iter()
.filter_map(|(contact_info, _)| {
if ContactInfo::is_valid_address(&contact_info.gossip) {
if shred_version == contact_info.shred_version
&& ContactInfo::is_valid_address(&contact_info.gossip)
{
Some(RpcContactInfo {
pubkey: contact_info.id.to_string(),
gossip: Some(contact_info.gossip),
@ -1117,6 +1120,7 @@ pub mod tests {
fee_calculator::DEFAULT_BURN_PERCENT,
hash::{hash, Hash},
instruction::InstructionError,
rpc_port,
signature::{Keypair, KeypairUtil},
system_transaction,
transaction::TransactionError,
@ -1247,7 +1251,10 @@ pub mod tests {
let _ = bank.process_transaction(&tx);
let request_processor = Arc::new(RwLock::new(JsonRpcRequestProcessor::new(
JsonRpcConfig::default(),
JsonRpcConfig {
enable_get_confirmed_block: true,
..JsonRpcConfig::default()
},
bank_forks.clone(),
block_commitment_cache.clone(),
blockstore,
@ -1355,8 +1362,9 @@ pub mod tests {
.expect("actual response deserialization");
let expected = format!(
r#"{{"jsonrpc":"2.0","result":[{{"pubkey": "{}", "gossip": "127.0.0.1:1235", "tpu": "127.0.0.1:1234", "rpc": "127.0.0.1:8899"}}],"id":1}}"#,
r#"{{"jsonrpc":"2.0","result":[{{"pubkey": "{}", "gossip": "127.0.0.1:1235", "tpu": "127.0.0.1:1234", "rpc": "127.0.0.1:{}"}}],"id":1}}"#,
leader_pubkey,
rpc_port::DEFAULT_RPC_PORT
);
let expected: Response =

View File

@ -131,6 +131,7 @@ impl JsonRpcService {
.cors(DomainsValidation::AllowOnly(vec![
AccessControlAllowOrigin::Any,
]))
.cors_max_age(86400)
.request_middleware(RpcRequestMiddleware::new(ledger_path))
.start_http(&rpc_addr);
if let Err(e) = server {

View File

@ -95,10 +95,11 @@ where
let mut found = false;
subscriptions.retain(|_, v| {
v.retain(|k, _| {
if *k == *sub_id {
let retain = *k != *sub_id;
if !retain {
found = true;
}
!found
retain
});
!v.is_empty()
});
@ -622,6 +623,7 @@ pub(crate) mod tests {
.unwrap()
.contains_key(&solana_budget_program::id()));
}
#[test]
fn test_check_signature_subscribe() {
let GenesisConfigInfo {
@ -675,6 +677,7 @@ pub(crate) mod tests {
.unwrap()
.contains_key(&signature));
}
#[test]
fn test_check_slot_subscribe() {
let (subscriber, _id_receiver, transport_receiver) =
@ -713,4 +716,49 @@ pub(crate) mod tests {
.unwrap()
.contains_key(&sub_id));
}
#[test]
fn test_add_and_remove_subscription() {
let (subscriber, _id_receiver, _transport_receiver) = Subscriber::new_test("notification");
let sink = subscriber
.assign_id(SubscriptionId::String("test".to_string()))
.unwrap();
let mut subscriptions: HashMap<u64, HashMap<SubscriptionId, (Sink<()>, Confirmations)>> =
HashMap::new();
let num_keys = 5;
let mut next_id: u64 = 0;
for _ in 0..num_keys {
let key = next_id;
let sub_id = SubscriptionId::Number(next_id);
add_subscription(&mut subscriptions, &key, None, &sub_id, &sink.clone());
next_id += 1;
}
// Add another subscription to the "0" key
let sub_id = SubscriptionId::Number(next_id);
add_subscription(&mut subscriptions, &0, None, &sub_id, &sink.clone());
assert_eq!(subscriptions.len(), num_keys);
assert_eq!(subscriptions.get(&0).unwrap().len(), 2);
assert_eq!(subscriptions.get(&1).unwrap().len(), 1);
assert_eq!(
remove_subscription(&mut subscriptions, &SubscriptionId::Number(0)),
true
);
assert_eq!(subscriptions.len(), num_keys);
assert_eq!(subscriptions.get(&0).unwrap().len(), 1);
assert_eq!(
remove_subscription(&mut subscriptions, &SubscriptionId::Number(0)),
false
);
assert_eq!(
remove_subscription(&mut subscriptions, &SubscriptionId::Number(next_id)),
true
);
assert_eq!(subscriptions.len(), num_keys - 1);
assert!(subscriptions.get(&0).is_none());
}
}

676
core/src/serve_repair.rs Normal file
View File

@ -0,0 +1,676 @@
use crate::packet::limited_deserialize;
use crate::streamer::{PacketReceiver, PacketSender};
use crate::{
cluster_info::{ClusterInfo, ClusterInfoError},
contact_info::ContactInfo,
packet::Packet,
result::Result,
};
use bincode::serialize;
use rand::{thread_rng, Rng};
use solana_ledger::blockstore::Blockstore;
use solana_measure::thread_mem_usage;
use solana_metrics::{datapoint_debug, inc_new_counter_debug};
use solana_perf::packet::{Packets, PacketsRecycler};
use solana_sdk::{
clock::Slot,
signature::{Keypair, KeypairUtil},
timing::duration_as_ms,
};
use std::{
net::SocketAddr,
sync::atomic::{AtomicBool, Ordering},
sync::{Arc, RwLock},
thread::{Builder, JoinHandle},
time::{Duration, Instant},
};
/// the number of slots to respond with when responding to `Orphan` requests
pub const MAX_ORPHAN_REPAIR_RESPONSES: usize = 10;
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq)]
pub enum RepairType {
Orphan(Slot),
HighestShred(Slot, u64),
Shred(Slot, u64),
}
impl RepairType {
pub fn slot(&self) -> Slot {
match self {
RepairType::Orphan(slot) => *slot,
RepairType::HighestShred(slot, _) => *slot,
RepairType::Shred(slot, _) => *slot,
}
}
}
/// Window protocol messages
#[derive(Serialize, Deserialize, Debug)]
enum RepairProtocol {
WindowIndex(ContactInfo, u64, u64),
HighestWindowIndex(ContactInfo, u64, u64),
Orphan(ContactInfo, u64),
}
#[derive(Clone)]
pub struct ServeRepair {
/// set the keypair that will be used to sign repair responses
keypair: Arc<Keypair>,
my_info: ContactInfo,
cluster_info: Arc<RwLock<ClusterInfo>>,
}
impl ServeRepair {
/// Without a valid keypair gossip will not function. Only useful for tests.
pub fn new_with_invalid_keypair(contact_info: ContactInfo) -> Self {
Self::new(Arc::new(RwLock::new(
ClusterInfo::new_with_invalid_keypair(contact_info),
)))
}
pub fn new(cluster_info: Arc<RwLock<ClusterInfo>>) -> Self {
let (keypair, my_info) = {
let r_cluster_info = cluster_info.read().unwrap();
(r_cluster_info.keypair.clone(), r_cluster_info.my_data())
};
Self {
keypair,
my_info,
cluster_info,
}
}
pub fn my_info(&self) -> &ContactInfo {
&self.my_info
}
pub fn keypair(&self) -> &Arc<Keypair> {
&self.keypair
}
fn get_repair_sender(request: &RepairProtocol) -> &ContactInfo {
match request {
RepairProtocol::WindowIndex(ref from, _, _) => from,
RepairProtocol::HighestWindowIndex(ref from, _, _) => from,
RepairProtocol::Orphan(ref from, _) => from,
}
}
fn handle_repair(
me: &Arc<RwLock<Self>>,
recycler: &PacketsRecycler,
from_addr: &SocketAddr,
blockstore: Option<&Arc<Blockstore>>,
request: RepairProtocol,
) -> Option<Packets> {
let now = Instant::now();
//TODO verify from is signed
let my_id = me.read().unwrap().keypair.pubkey();
let from = Self::get_repair_sender(&request);
if from.id == my_id {
warn!(
"{}: Ignored received repair request from ME {}",
my_id, from.id,
);
inc_new_counter_debug!("serve_repair-handle-repair--eq", 1);
return None;
}
let (res, label) = {
match &request {
RepairProtocol::WindowIndex(from, slot, shred_index) => {
inc_new_counter_debug!("serve_repair-request-window-index", 1);
(
Self::run_window_request(
recycler,
from,
&from_addr,
blockstore,
&me.read().unwrap().my_info,
*slot,
*shred_index,
),
"WindowIndex",
)
}
RepairProtocol::HighestWindowIndex(_, slot, highest_index) => {
inc_new_counter_debug!("serve_repair-request-highest-window-index", 1);
(
Self::run_highest_window_request(
recycler,
&from_addr,
blockstore,
*slot,
*highest_index,
),
"HighestWindowIndex",
)
}
RepairProtocol::Orphan(_, slot) => {
inc_new_counter_debug!("serve_repair-request-orphan", 1);
(
Self::run_orphan(
recycler,
&from_addr,
blockstore,
*slot,
MAX_ORPHAN_REPAIR_RESPONSES,
),
"Orphan",
)
}
}
};
trace!("{}: received repair request: {:?}", my_id, request);
Self::report_time_spent(label, &now.elapsed(), "");
res
}
fn report_time_spent(label: &str, time: &Duration, extra: &str) {
let count = duration_as_ms(time);
if count > 5 {
info!("{} took: {} ms {}", label, count, extra);
}
}
/// Process messages from the network
fn run_listen(
obj: &Arc<RwLock<Self>>,
recycler: &PacketsRecycler,
blockstore: Option<&Arc<Blockstore>>,
requests_receiver: &PacketReceiver,
response_sender: &PacketSender,
) -> Result<()> {
//TODO cache connections
let timeout = Duration::new(1, 0);
let reqs = requests_receiver.recv_timeout(timeout)?;
Self::handle_packets(obj, &recycler, blockstore, reqs, response_sender);
Ok(())
}
pub fn listen(
me: Arc<RwLock<Self>>,
blockstore: Option<Arc<Blockstore>>,
requests_receiver: PacketReceiver,
response_sender: PacketSender,
exit: &Arc<AtomicBool>,
) -> JoinHandle<()> {
let exit = exit.clone();
let recycler = PacketsRecycler::default();
Builder::new()
.name("solana-repair-listen".to_string())
.spawn(move || loop {
let e = Self::run_listen(
&me,
&recycler,
blockstore.as_ref(),
&requests_receiver,
&response_sender,
);
if exit.load(Ordering::Relaxed) {
return;
}
if e.is_err() {
info!("repair listener error: {:?}", e);
}
thread_mem_usage::datapoint("solana-repair-listen");
})
.unwrap()
}
fn handle_packets(
me: &Arc<RwLock<Self>>,
recycler: &PacketsRecycler,
blockstore: Option<&Arc<Blockstore>>,
packets: Packets,
response_sender: &PacketSender,
) {
// iter over the packets, collect pulls separately and process everything else
let allocated = thread_mem_usage::Allocatedp::default();
packets.packets.iter().for_each(|packet| {
let start = allocated.get();
let from_addr = packet.meta.addr();
limited_deserialize(&packet.data[..packet.meta.size])
.into_iter()
.for_each(|request| {
let rsp = Self::handle_repair(me, recycler, &from_addr, blockstore, request);
if let Some(rsp) = rsp {
let _ignore_disconnect = response_sender.send(rsp);
}
});
datapoint_debug!(
"solana-serve-repair-memory",
("serve_repair", (allocated.get() - start) as i64, i64),
);
});
}
fn window_index_request_bytes(&self, slot: Slot, shred_index: u64) -> Result<Vec<u8>> {
let req = RepairProtocol::WindowIndex(self.my_info.clone(), slot, shred_index);
let out = serialize(&req)?;
Ok(out)
}
fn window_highest_index_request_bytes(&self, slot: Slot, shred_index: u64) -> Result<Vec<u8>> {
let req = RepairProtocol::HighestWindowIndex(self.my_info.clone(), slot, shred_index);
let out = serialize(&req)?;
Ok(out)
}
fn orphan_bytes(&self, slot: Slot) -> Result<Vec<u8>> {
let req = RepairProtocol::Orphan(self.my_info.clone(), slot);
let out = serialize(&req)?;
Ok(out)
}
pub fn repair_request(&self, repair_request: &RepairType) -> Result<(SocketAddr, Vec<u8>)> {
// find a peer that appears to be accepting replication and has the desired slot, as indicated
// by a valid tvu port location
let valid: Vec<_> = self
.cluster_info
.read()
.unwrap()
.repair_peers(repair_request.slot());
if valid.is_empty() {
return Err(ClusterInfoError::NoPeers.into());
}
let n = thread_rng().gen::<usize>() % valid.len();
let addr = valid[n].serve_repair; // send the request to the peer's serve_repair port
let out = self.map_repair_request(repair_request)?;
Ok((addr, out))
}
pub fn map_repair_request(&self, repair_request: &RepairType) -> Result<Vec<u8>> {
match repair_request {
RepairType::Shred(slot, shred_index) => {
datapoint_debug!(
"serve_repair-repair",
("repair-slot", *slot, i64),
("repair-ix", *shred_index, i64)
);
Ok(self.window_index_request_bytes(*slot, *shred_index)?)
}
RepairType::HighestShred(slot, shred_index) => {
datapoint_debug!(
"serve_repair-repair_highest",
("repair-highest-slot", *slot, i64),
("repair-highest-ix", *shred_index, i64)
);
Ok(self.window_highest_index_request_bytes(*slot, *shred_index)?)
}
RepairType::Orphan(slot) => {
datapoint_debug!("serve_repair-repair_orphan", ("repair-orphan", *slot, i64));
Ok(self.orphan_bytes(*slot)?)
}
}
}
fn run_window_request(
recycler: &PacketsRecycler,
from: &ContactInfo,
from_addr: &SocketAddr,
blockstore: Option<&Arc<Blockstore>>,
me: &ContactInfo,
slot: Slot,
shred_index: u64,
) -> Option<Packets> {
if let Some(blockstore) = blockstore {
// Try to find the requested index in one of the slots
let packet = Self::get_data_shred_as_packet(blockstore, slot, shred_index, from_addr);
if let Ok(Some(packet)) = packet {
inc_new_counter_debug!("serve_repair-window-request-ledger", 1);
return Some(Packets::new_with_recycler_data(
recycler,
"run_window_request",
vec![packet],
));
}
}
inc_new_counter_debug!("serve_repair-window-request-fail", 1);
trace!(
"{}: failed WindowIndex {} {} {}",
me.id,
from.id,
slot,
shred_index,
);
None
}
fn run_highest_window_request(
recycler: &PacketsRecycler,
from_addr: &SocketAddr,
blockstore: Option<&Arc<Blockstore>>,
slot: Slot,
highest_index: u64,
) -> Option<Packets> {
let blockstore = blockstore?;
// Try to find the requested index in one of the slots
let meta = blockstore.meta(slot).ok()??;
if meta.received > highest_index {
// meta.received must be at least 1 by this point
let packet =
Self::get_data_shred_as_packet(blockstore, slot, meta.received - 1, from_addr)
.ok()??;
return Some(Packets::new_with_recycler_data(
recycler,
"run_highest_window_request",
vec![packet],
));
}
None
}
fn run_orphan(
recycler: &PacketsRecycler,
from_addr: &SocketAddr,
blockstore: Option<&Arc<Blockstore>>,
mut slot: Slot,
max_responses: usize,
) -> Option<Packets> {
let mut res = Packets::new_with_recycler(recycler.clone(), 64, "run_orphan");
if let Some(blockstore) = blockstore {
// Try to find the next "n" parent slots of the input slot
while let Ok(Some(meta)) = blockstore.meta(slot) {
if meta.received == 0 {
break;
}
let packet =
Self::get_data_shred_as_packet(blockstore, slot, meta.received - 1, from_addr);
if let Ok(Some(packet)) = packet {
res.packets.push(packet);
}
if meta.is_parent_set() && res.packets.len() <= max_responses {
slot = meta.parent_slot;
} else {
break;
}
}
}
if res.is_empty() {
return None;
}
Some(res)
}
fn get_data_shred_as_packet(
blockstore: &Arc<Blockstore>,
slot: Slot,
shred_index: u64,
dest: &SocketAddr,
) -> Result<Option<Packet>> {
let data = blockstore.get_data_shred(slot, shred_index)?;
Ok(data.map(|data| {
let mut packet = Packet::default();
packet.meta.size = data.len();
packet.meta.set_addr(dest);
packet.data.copy_from_slice(&data);
packet
}))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::result::Error;
use solana_ledger::get_tmp_ledger_path;
use solana_ledger::{
blockstore::make_many_slot_entries,
blockstore_processor::fill_blockstore_slot_with_ticks,
shred::{
max_ticks_per_n_shreds, CodingShredHeader, DataShredHeader, Shred, ShredCommonHeader,
},
};
use solana_sdk::{hash::Hash, pubkey::Pubkey, timing::timestamp};
/// test run_window_requestwindow requests respond with the right shred, and do not overrun
#[test]
fn run_highest_window_request() {
let recycler = PacketsRecycler::default();
solana_logger::setup();
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let rv = ServeRepair::run_highest_window_request(
&recycler,
&socketaddr_any!(),
Some(&blockstore),
0,
0,
);
assert!(rv.is_none());
let _ = fill_blockstore_slot_with_ticks(
&blockstore,
max_ticks_per_n_shreds(1) + 1,
2,
1,
Hash::default(),
);
let rv = ServeRepair::run_highest_window_request(
&recycler,
&socketaddr_any!(),
Some(&blockstore),
2,
1,
);
let rv: Vec<Shred> = rv
.expect("packets")
.packets
.into_iter()
.filter_map(|b| Shred::new_from_serialized_shred(b.data.to_vec()).ok())
.collect();
assert!(!rv.is_empty());
let index = blockstore.meta(2).unwrap().unwrap().received - 1;
assert_eq!(rv[0].index(), index as u32);
assert_eq!(rv[0].slot(), 2);
let rv = ServeRepair::run_highest_window_request(
&recycler,
&socketaddr_any!(),
Some(&blockstore),
2,
index + 1,
);
assert!(rv.is_none());
}
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
/// test window requests respond with the right shred, and do not overrun
#[test]
fn run_window_request() {
let recycler = PacketsRecycler::default();
solana_logger::setup();
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let me = ContactInfo::new(
&Pubkey::new_rand(),
socketaddr!("127.0.0.1:1234"),
socketaddr!("127.0.0.1:1235"),
socketaddr!("127.0.0.1:1236"),
socketaddr!("127.0.0.1:1237"),
socketaddr!("127.0.0.1:1238"),
socketaddr!("127.0.0.1:1239"),
socketaddr!("127.0.0.1:1240"),
socketaddr!("127.0.0.1:1241"),
socketaddr!("127.0.0.1:1242"),
socketaddr!("127.0.0.1:1243"),
0,
);
let rv = ServeRepair::run_window_request(
&recycler,
&me,
&socketaddr_any!(),
Some(&blockstore),
&me,
0,
0,
);
assert!(rv.is_none());
let mut common_header = ShredCommonHeader::default();
common_header.slot = 2;
common_header.index = 1;
let mut data_header = DataShredHeader::default();
data_header.parent_offset = 1;
let shred_info = Shred::new_empty_from_header(
common_header,
data_header,
CodingShredHeader::default(),
);
blockstore
.insert_shreds(vec![shred_info], None, false)
.expect("Expect successful ledger write");
let rv = ServeRepair::run_window_request(
&recycler,
&me,
&socketaddr_any!(),
Some(&blockstore),
&me,
2,
1,
);
assert!(!rv.is_none());
let rv: Vec<Shred> = rv
.expect("packets")
.packets
.into_iter()
.filter_map(|b| Shred::new_from_serialized_shred(b.data.to_vec()).ok())
.collect();
assert_eq!(rv[0].index(), 1);
assert_eq!(rv[0].slot(), 2);
}
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]
fn window_index_request() {
let me = ContactInfo::new_localhost(&Pubkey::new_rand(), timestamp());
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(me)));
let serve_repair = ServeRepair::new(cluster_info.clone());
let rv = serve_repair.repair_request(&RepairType::Shred(0, 0));
assert_matches!(rv, Err(Error::ClusterInfoError(ClusterInfoError::NoPeers)));
let serve_repair_addr = socketaddr!([127, 0, 0, 1], 1243);
let nxt = ContactInfo::new(
&Pubkey::new_rand(),
socketaddr!([127, 0, 0, 1], 1234),
socketaddr!([127, 0, 0, 1], 1235),
socketaddr!([127, 0, 0, 1], 1236),
socketaddr!([127, 0, 0, 1], 1237),
socketaddr!([127, 0, 0, 1], 1238),
socketaddr!([127, 0, 0, 1], 1239),
socketaddr!([127, 0, 0, 1], 1240),
socketaddr!([127, 0, 0, 1], 1241),
socketaddr!([127, 0, 0, 1], 1242),
serve_repair_addr,
0,
);
cluster_info.write().unwrap().insert_info(nxt.clone());
let rv = serve_repair
.repair_request(&RepairType::Shred(0, 0))
.unwrap();
assert_eq!(nxt.serve_repair, serve_repair_addr);
assert_eq!(rv.0, nxt.serve_repair);
let serve_repair_addr2 = socketaddr!([127, 0, 0, 2], 1243);
let nxt = ContactInfo::new(
&Pubkey::new_rand(),
socketaddr!([127, 0, 0, 1], 1234),
socketaddr!([127, 0, 0, 1], 1235),
socketaddr!([127, 0, 0, 1], 1236),
socketaddr!([127, 0, 0, 1], 1237),
socketaddr!([127, 0, 0, 1], 1238),
socketaddr!([127, 0, 0, 1], 1239),
socketaddr!([127, 0, 0, 1], 1240),
socketaddr!([127, 0, 0, 1], 1241),
socketaddr!([127, 0, 0, 1], 1242),
serve_repair_addr2,
0,
);
cluster_info.write().unwrap().insert_info(nxt);
let mut one = false;
let mut two = false;
while !one || !two {
//this randomly picks an option, so eventually it should pick both
let rv = serve_repair
.repair_request(&RepairType::Shred(0, 0))
.unwrap();
if rv.0 == serve_repair_addr {
one = true;
}
if rv.0 == serve_repair_addr2 {
two = true;
}
}
assert!(one && two);
}
#[test]
fn run_orphan() {
solana_logger::setup();
let recycler = PacketsRecycler::default();
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let rv =
ServeRepair::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 2, 0);
assert!(rv.is_none());
// Create slots 1, 2, 3 with 5 shreds apiece
let (shreds, _) = make_many_slot_entries(1, 3, 5);
blockstore
.insert_shreds(shreds, None, false)
.expect("Expect successful ledger write");
// We don't have slot 4, so we don't know how to service this requeset
let rv =
ServeRepair::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 4, 5);
assert!(rv.is_none());
// For slot 3, we should return the highest shreds from slots 3, 2, 1 respectively
// for this request
let rv: Vec<_> =
ServeRepair::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 3, 5)
.expect("run_orphan packets")
.packets
.iter()
.map(|b| b.clone())
.collect();
let expected: Vec<_> = (1..=3)
.rev()
.map(|slot| {
let index = blockstore.meta(slot).unwrap().unwrap().received - 1;
ServeRepair::get_data_shred_as_packet(
&blockstore,
slot,
index,
&socketaddr_any!(),
)
.unwrap()
.unwrap()
})
.collect();
assert_eq!(rv, expected)
}
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
}

View File

@ -0,0 +1,57 @@
use crate::serve_repair::ServeRepair;
use crate::streamer;
use solana_ledger::blockstore::Blockstore;
use solana_perf::recycler::Recycler;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use std::thread::{self, JoinHandle};
pub struct ServeRepairService {
thread_hdls: Vec<JoinHandle<()>>,
}
impl ServeRepairService {
pub fn new(
serve_repair: &Arc<RwLock<ServeRepair>>,
blockstore: Option<Arc<Blockstore>>,
serve_repair_socket: UdpSocket,
exit: &Arc<AtomicBool>,
) -> Self {
let (request_sender, request_receiver) = channel();
let serve_repair_socket = Arc::new(serve_repair_socket);
trace!(
"ServeRepairService: id: {}, listening on: {:?}",
&serve_repair.read().unwrap().my_info().id,
serve_repair_socket.local_addr().unwrap()
);
let t_receiver = streamer::receiver(
serve_repair_socket.clone(),
&exit,
request_sender,
Recycler::default(),
"serve_repair_receiver",
);
let (response_sender, response_receiver) = channel();
let t_responder =
streamer::responder("serve-repairs", serve_repair_socket, response_receiver);
let t_listen = ServeRepair::listen(
serve_repair.clone(),
blockstore,
request_receiver,
response_sender,
exit,
);
let thread_hdls = vec![t_receiver, t_responder, t_listen];
Self { thread_hdls }
}
pub fn join(self) -> thread::Result<()> {
for thread_hdl in self.thread_hdls {
thread_hdl.join()?;
}
Ok(())
}
}

View File

@ -49,7 +49,7 @@ fn recv_loop(
if exit.load(Ordering::Relaxed) {
return Ok(());
}
if let Ok(len) = packet::recv_from(&mut msgs, sock) {
if let Ok(len) = packet::recv_from(&mut msgs, sock, 1) {
if len == NUM_RCVMMSGS {
num_max_received += 1;
}

View File

@ -6,10 +6,10 @@ use crate::{
cluster_info::ClusterInfo,
commitment::BlockCommitmentCache,
ledger_cleanup_service::LedgerCleanupService,
partition_cfg::PartitionCfg,
poh_recorder::PohRecorder,
replay_stage::{ReplayStage, ReplayStageConfig},
retransmit_stage::RetransmitStage,
rewards_recorder_service::RewardsRecorderSender,
rpc_subscriptions::RpcSubscriptions,
shred_fetch_stage::ShredFetchStage,
sigverify_shreds::ShredSigVerifier,
@ -84,9 +84,10 @@ impl Tvu {
completed_slots_receiver: CompletedSlotsReceiver,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
sigverify_disabled: bool,
cfg: Option<PartitionCfg>,
cfg: Option<Arc<AtomicBool>>,
shred_version: u16,
transaction_status_sender: Option<TransactionStatusSender>,
rewards_recorder_sender: Option<RewardsRecorderSender>,
) -> Self {
let keypair: Arc<Keypair> = cluster_info
.read()
@ -171,6 +172,7 @@ impl Tvu {
snapshot_package_sender,
block_commitment_cache,
transaction_status_sender,
rewards_recorder_sender,
};
let (replay_stage, root_bank_receiver) = ReplayStage::new(
@ -313,6 +315,7 @@ pub mod tests {
None,
0,
None,
None,
);
exit.store(true, Ordering::Relaxed);
tvu.join().unwrap();

View File

@ -6,13 +6,15 @@ use crate::{
commitment::BlockCommitmentCache,
contact_info::ContactInfo,
gossip_service::{discover_cluster, GossipService},
partition_cfg::PartitionCfg,
poh_recorder::PohRecorder,
poh_service::PohService,
rewards_recorder_service::RewardsRecorderService,
rpc::JsonRpcConfig,
rpc_pubsub_service::PubSubService,
rpc_service::JsonRpcService,
rpc_subscriptions::RpcSubscriptions,
serve_repair::ServeRepair,
serve_repair_service::ServeRepairService,
sigverify,
storage_stage::StorageState,
tpu::Tpu,
@ -20,7 +22,6 @@ use crate::{
tvu::{Sockets, Tvu},
};
use crossbeam_channel::unbounded;
use solana_ledger::shred::Shred;
use solana_ledger::{
bank_forks::{BankForks, SnapshotConfig},
bank_forks_utils,
@ -29,13 +30,14 @@ use solana_ledger::{
create_new_tmp_ledger,
leader_schedule::FixedSchedule,
leader_schedule_cache::LeaderScheduleCache,
shred_version::compute_shred_version,
};
use solana_metrics::datapoint_info;
use solana_runtime::{bank::Bank, hard_forks::HardForks};
use solana_runtime::bank::Bank;
use solana_sdk::{
clock::{Slot, DEFAULT_SLOTS_PER_TURN},
genesis_config::GenesisConfig,
hash::{extend_and_hash, Hash},
hash::Hash,
poh_config::PohConfig,
pubkey::Pubkey,
signature::{Keypair, KeypairUtil},
@ -57,16 +59,17 @@ pub struct ValidatorConfig {
pub dev_sigverify_disabled: bool,
pub dev_halt_at_slot: Option<Slot>,
pub expected_genesis_hash: Option<Hash>,
pub expected_shred_version: Option<u16>,
pub voting_disabled: bool,
pub transaction_status_service_disabled: bool,
pub blockstream_unix_socket: Option<PathBuf>,
pub storage_slots_per_turn: u64,
pub account_paths: Vec<PathBuf>,
pub rpc_config: JsonRpcConfig,
pub rpc_ports: Option<(u16, u16)>, // (API, PubSub)
pub snapshot_config: Option<SnapshotConfig>,
pub max_ledger_slots: Option<u64>,
pub broadcast_stage_type: BroadcastStageType,
pub partition_cfg: Option<PartitionCfg>,
pub enable_partition: Option<Arc<AtomicBool>>,
pub fixed_leader_schedule: Option<FixedSchedule>,
pub wait_for_supermajority: bool,
pub new_hard_forks: Option<Vec<Slot>>,
@ -78,16 +81,17 @@ impl Default for ValidatorConfig {
dev_sigverify_disabled: false,
dev_halt_at_slot: None,
expected_genesis_hash: None,
expected_shred_version: None,
voting_disabled: false,
transaction_status_service_disabled: false,
blockstream_unix_socket: None,
storage_slots_per_turn: DEFAULT_SLOTS_PER_TURN,
max_ledger_slots: None,
account_paths: Vec::new(),
rpc_config: JsonRpcConfig::default(),
rpc_ports: None,
snapshot_config: None,
broadcast_stage_type: BroadcastStageType::Standard,
partition_cfg: None,
enable_partition: None,
fixed_leader_schedule: None,
wait_for_supermajority: false,
new_hard_forks: None,
@ -115,10 +119,11 @@ impl ValidatorExit {
pub struct Validator {
pub id: Pubkey,
validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
rpc_service: Option<JsonRpcService>,
rpc_pubsub_service: Option<PubSubService>,
rpc_service: Option<(JsonRpcService, PubSubService)>,
transaction_status_service: Option<TransactionStatusService>,
rewards_recorder_service: Option<RewardsRecorderService>,
gossip_service: GossipService,
serve_repair_service: ServeRepairService,
poh_recorder: Arc<Mutex<PohRecorder>>,
poh_service: PohService,
tpu: Tpu,
@ -172,6 +177,15 @@ impl Validator {
let exit = Arc::new(AtomicBool::new(false));
let bank_info = &bank_forks_info[0];
let bank = bank_forks[bank_info.bank_slot].clone();
info!("Starting validator from slot {}", bank.slot());
{
let hard_forks: Vec<_> = bank.hard_forks().read().unwrap().iter().copied().collect();
if !hard_forks.is_empty() {
info!("Hard forks: {:?}", hard_forks);
}
}
let bank_forks = Arc::new(RwLock::new(bank_forks));
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
@ -182,9 +196,19 @@ impl Validator {
node.info.wallclock = timestamp();
node.info.shred_version =
compute_shred_version(&genesis_hash, &bank.hard_forks().read().unwrap());
compute_shred_version(&genesis_hash, Some(&bank.hard_forks().read().unwrap()));
Self::print_node_info(&node);
if let Some(expected_shred_version) = config.expected_shred_version {
if expected_shred_version != node.info.shred_version {
error!(
"shred version mismatch: expected {}",
expected_shred_version
);
process::exit(1);
}
}
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new(
node.info.clone(),
keypair.clone(),
@ -198,39 +222,39 @@ impl Validator {
let blockstore = Arc::new(blockstore);
let rpc_service = if node.info.rpc.port() == 0 {
None
} else {
Some(JsonRpcService::new(
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), node.info.rpc.port()),
config.rpc_config.clone(),
bank_forks.clone(),
block_commitment_cache.clone(),
blockstore.clone(),
cluster_info.clone(),
genesis_hash,
ledger_path,
storage_state.clone(),
validator_exit.clone(),
))
};
let subscriptions = Arc::new(RpcSubscriptions::new(&exit));
let rpc_pubsub_service = if node.info.rpc_pubsub.port() == 0 {
None
} else {
Some(PubSubService::new(
&subscriptions,
SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)),
node.info.rpc_pubsub.port(),
let rpc_service = config.rpc_ports.map(|(rpc_port, rpc_pubsub_port)| {
if ContactInfo::is_valid_address(&node.info.rpc) {
assert!(ContactInfo::is_valid_address(&node.info.rpc_pubsub));
assert_eq!(rpc_port, node.info.rpc.port());
assert_eq!(rpc_pubsub_port, node.info.rpc_pubsub.port());
} else {
assert!(!ContactInfo::is_valid_address(&node.info.rpc_pubsub));
}
(
JsonRpcService::new(
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), rpc_port),
config.rpc_config.clone(),
bank_forks.clone(),
block_commitment_cache.clone(),
blockstore.clone(),
cluster_info.clone(),
genesis_hash,
ledger_path,
storage_state.clone(),
validator_exit.clone(),
),
&exit,
))
};
PubSubService::new(
&subscriptions,
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), rpc_pubsub_port),
&exit,
),
)
});
let (transaction_status_sender, transaction_status_service) =
if rpc_service.is_some() && !config.transaction_status_service_disabled {
if rpc_service.is_some() && config.rpc_config.enable_get_confirmed_block {
let (transaction_status_sender, transaction_status_receiver) = unbounded();
(
Some(transaction_status_sender),
@ -244,6 +268,21 @@ impl Validator {
(None, None)
};
let (rewards_recorder_sender, rewards_recorder_service) =
if rpc_service.is_some() && config.rpc_config.enable_get_confirmed_block {
let (rewards_recorder_sender, rewards_receiver) = unbounded();
(
Some(rewards_recorder_sender),
Some(RewardsRecorderService::new(
rewards_receiver,
blockstore.clone(),
&exit,
)),
)
} else {
(None, None)
};
info!(
"Starting PoH: epoch={} slot={} tick_height={} blockhash={} leader={:?}",
bank.epoch(),
@ -281,12 +320,19 @@ impl Validator {
let gossip_service = GossipService::new(
&cluster_info,
Some(blockstore.clone()),
Some(bank_forks.clone()),
node.sockets.gossip,
&exit,
);
let serve_repair = Arc::new(RwLock::new(ServeRepair::new(cluster_info.clone())));
let serve_repair_service = ServeRepairService::new(
&serve_repair,
Some(blockstore.clone()),
node.sockets.serve_repair,
&exit,
);
// Insert the entrypoint info, should only be None if this node
// is the bootstrap validator
if let Some(entrypoint_info) = entrypoint_info_option {
@ -296,47 +342,7 @@ impl Validator {
.set_entrypoint(entrypoint_info.clone());
}
if config.wait_for_supermajority {
info!(
"Waiting for more than 75% of activated stake at slot {} to be in gossip...",
bank.slot()
);
loop {
let gossip_stake_percent = get_stake_percent_in_gossip(&bank, &cluster_info);
info!("{}% of activated stake in gossip", gossip_stake_percent,);
if gossip_stake_percent > 75 {
break;
}
sleep(Duration::new(1, 0));
}
}
let sockets = Sockets {
repair: node
.sockets
.repair
.try_clone()
.expect("Failed to clone repair socket"),
retransmit: node
.sockets
.retransmit_sockets
.iter()
.map(|s| s.try_clone().expect("Failed to clone retransmit socket"))
.collect(),
fetch: node
.sockets
.tvu
.iter()
.map(|s| s.try_clone().expect("Failed to clone TVU Sockets"))
.collect(),
forwards: node
.sockets
.tvu_forwards
.iter()
.map(|s| s.try_clone().expect("Failed to clone TVU forwards Sockets"))
.collect(),
};
wait_for_supermajority(config, &bank, &cluster_info);
let voting_keypair = if config.voting_disabled {
None
@ -357,7 +363,31 @@ impl Validator {
storage_keypair,
&bank_forks,
&cluster_info,
sockets,
Sockets {
repair: node
.sockets
.repair
.try_clone()
.expect("Failed to clone repair socket"),
retransmit: node
.sockets
.retransmit_sockets
.iter()
.map(|s| s.try_clone().expect("Failed to clone retransmit socket"))
.collect(),
fetch: node
.sockets
.tvu
.iter()
.map(|s| s.try_clone().expect("Failed to clone TVU Sockets"))
.collect(),
forwards: node
.sockets
.tvu_forwards
.iter()
.map(|s| s.try_clone().expect("Failed to clone TVU forwards Sockets"))
.collect(),
},
blockstore.clone(),
&storage_state,
config.blockstream_unix_socket.as_ref(),
@ -370,9 +400,10 @@ impl Validator {
completed_slots_receiver,
block_commitment_cache,
config.dev_sigverify_disabled,
config.partition_cfg.clone(),
config.enable_partition.clone(),
node.info.shred_version,
transaction_status_sender.clone(),
rewards_recorder_sender,
);
if config.dev_sigverify_disabled {
@ -398,9 +429,10 @@ impl Validator {
Self {
id,
gossip_service,
serve_repair_service,
rpc_service,
rpc_pubsub_service,
transaction_status_service,
rewards_recorder_service,
tpu,
tvu,
poh_service,
@ -450,17 +482,20 @@ impl Validator {
pub fn join(self) -> Result<()> {
self.poh_service.join()?;
drop(self.poh_recorder);
if let Some(rpc_service) = self.rpc_service {
if let Some((rpc_service, rpc_pubsub_service)) = self.rpc_service {
rpc_service.join()?;
}
if let Some(rpc_pubsub_service) = self.rpc_pubsub_service {
rpc_pubsub_service.join()?;
}
if let Some(transaction_status_service) = self.transaction_status_service {
transaction_status_service.join()?;
}
if let Some(rewards_recorder_service) = self.rewards_recorder_service {
rewards_recorder_service.join()?;
}
self.gossip_service.join()?;
self.serve_repair_service.join()?;
self.tpu.join()?;
self.tvu.join()?;
self.ip_echo_server.shutdown_now();
@ -469,20 +504,6 @@ impl Validator {
}
}
fn compute_shred_version(genesis_hash: &Hash, hard_forks: &HardForks) -> u16 {
use byteorder::{ByteOrder, LittleEndian};
let mut hash = *genesis_hash;
for (slot, count) in hard_forks.iter() {
let mut buf = [0u8; 16];
LittleEndian::write_u64(&mut buf[..7], *slot);
LittleEndian::write_u64(&mut buf[8..], *count as u64);
hash = extend_and_hash(&hash, &buf);
}
Shred::version_from_hash(&hash)
}
fn new_banks_from_blockstore(
expected_genesis_hash: Option<Hash>,
blockstore_path: &Path,
@ -554,8 +575,48 @@ fn new_banks_from_blockstore(
)
}
fn wait_for_supermajority(
config: &ValidatorConfig,
bank: &Arc<Bank>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
) {
if !config.wait_for_supermajority {
return;
}
info!(
"Waiting for more than 75% of activated stake at slot {} to be in gossip...",
bank.slot()
);
loop {
let gossip_stake_percent = get_stake_percent_in_gossip(&bank, &cluster_info);
info!("{}% of activated stake in gossip", gossip_stake_percent,);
if gossip_stake_percent > 75 {
break;
}
sleep(Duration::new(1, 0));
}
}
pub fn new_validator_for_tests() -> (Validator, ContactInfo, Keypair, PathBuf) {
use crate::genesis_utils::{create_genesis_config_with_leader, GenesisConfigInfo};
let (node, contact_info, mint_keypair, ledger_path, _vote_pubkey) =
new_validator_for_tests_with_vote_pubkey();
(node, contact_info, mint_keypair, ledger_path)
}
pub fn new_validator_for_tests_with_vote_pubkey(
) -> (Validator, ContactInfo, Keypair, PathBuf, Pubkey) {
use crate::genesis_utils::BOOTSTRAP_VALIDATOR_LAMPORTS;
new_validator_for_tests_ex(0, BOOTSTRAP_VALIDATOR_LAMPORTS)
}
pub fn new_validator_for_tests_ex(
fees: u64,
bootstrap_validator_lamports: u64,
) -> (Validator, ContactInfo, Keypair, PathBuf, Pubkey) {
use crate::genesis_utils::{create_genesis_config_with_leader_ex, GenesisConfigInfo};
use solana_sdk::fee_calculator::FeeCalculator;
let node_keypair = Arc::new(Keypair::new());
let node = Node::new_localhost_with_pubkey(&node_keypair.pubkey());
@ -565,20 +626,28 @@ pub fn new_validator_for_tests() -> (Validator, ContactInfo, Keypair, PathBuf) {
mut genesis_config,
mint_keypair,
voting_keypair,
} = create_genesis_config_with_leader(1_000_000, &contact_info.id, 42);
} = create_genesis_config_with_leader_ex(
1_000_000,
&contact_info.id,
42,
bootstrap_validator_lamports,
);
genesis_config
.native_instruction_processors
.push(solana_budget_program!());
genesis_config.rent.lamports_per_byte_year = 1;
genesis_config.rent.exemption_threshold = 1.0;
genesis_config.fee_calculator = FeeCalculator::new(fees, 0);
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
let leader_voting_keypair = Arc::new(voting_keypair);
let storage_keypair = Arc::new(Keypair::new());
let mut config = ValidatorConfig::default();
config.transaction_status_service_disabled = true;
let config = ValidatorConfig {
rpc_ports: Some((node.info.rpc.port(), node.info.rpc_pubsub.port())),
..ValidatorConfig::default()
};
let node = Validator::new(
node,
&node_keypair,
@ -591,7 +660,13 @@ pub fn new_validator_for_tests() -> (Validator, ContactInfo, Keypair, PathBuf) {
&config,
);
discover_cluster(&contact_info.gossip, 1).expect("Node startup failed");
(node, contact_info, mint_keypair, ledger_path)
(
node,
contact_info,
mint_keypair,
ledger_path,
leader_voting_keypair.pubkey(),
)
}
fn report_target_features() {
@ -669,8 +744,13 @@ mod tests {
let voting_keypair = Arc::new(Keypair::new());
let storage_keypair = Arc::new(Keypair::new());
let mut config = ValidatorConfig::default();
config.transaction_status_service_disabled = true;
let config = ValidatorConfig {
rpc_ports: Some((
validator_node.info.rpc.port(),
validator_node.info.rpc_pubsub.port(),
)),
..ValidatorConfig::default()
};
let validator = Validator::new(
validator_node,
&Arc::new(validator_keypair),
@ -703,8 +783,13 @@ mod tests {
ledger_paths.push(validator_ledger_path.clone());
let voting_keypair = Arc::new(Keypair::new());
let storage_keypair = Arc::new(Keypair::new());
let mut config = ValidatorConfig::default();
config.transaction_status_service_disabled = true;
let config = ValidatorConfig {
rpc_ports: Some((
validator_node.info.rpc.port(),
validator_node.info.rpc_pubsub.port(),
)),
..ValidatorConfig::default()
};
Validator::new(
validator_node,
&Arc::new(validator_keypair),

View File

@ -5,6 +5,7 @@ use solana_core::cluster_info;
use solana_core::contact_info::ContactInfo;
use solana_core::crds_gossip::*;
use solana_core::crds_gossip_error::CrdsGossipError;
use solana_core::crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
use solana_core::crds_gossip_push::CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS;
use solana_core::crds_value::CrdsValueLabel;
use solana_core::crds_value::{CrdsData, CrdsValue};
@ -396,6 +397,9 @@ fn network_run_pull(
let mut convergance = 0f64;
let num = network.len();
let network_values: Vec<Node> = network.values().cloned().collect();
let mut timeouts = HashMap::new();
timeouts.insert(Pubkey::default(), CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS);
for t in start..end {
let now = t as u64 * 100;
let requests: Vec<_> = {
@ -448,7 +452,10 @@ fn network_run_pull(
node.lock()
.unwrap()
.mark_pull_request_creation_time(&from, now);
overhead += node.lock().unwrap().process_pull_response(&from, rsp, now);
overhead += node
.lock()
.unwrap()
.process_pull_response(&from, &timeouts, rsp, now);
});
(bytes, msgs, overhead)
})

View File

@ -21,8 +21,7 @@ fn test_node(exit: &Arc<AtomicBool>) -> (Arc<RwLock<ClusterInfo>>, GossipService
test_node.info.clone(),
keypair,
)));
let gossip_service =
GossipService::new(&cluster_info, None, None, test_node.sockets.gossip, exit);
let gossip_service = GossipService::new(&cluster_info, None, test_node.sockets.gossip, exit);
let _ = cluster_info.read().unwrap().my_data();
(
cluster_info,

View File

@ -1,6 +1,6 @@
[package]
name = "solana-crate-features"
version = "0.23.0"
version = "0.23.3"
description = "Solana Crate Features"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@ -1,6 +1,6 @@
[package]
name = "solana-faucet"
version = "0.23.0"
version = "0.23.3"
description = "Solana Faucet"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -19,10 +19,10 @@ clap = "2.33"
log = "0.4.8"
serde = "1.0.104"
serde_derive = "1.0.103"
solana-clap-utils = { path = "../clap-utils", version = "0.23.0" }
solana-logger = { path = "../logger", version = "0.23.0" }
solana-metrics = { path = "../metrics", version = "0.23.0" }
solana-sdk = { path = "../sdk", version = "0.23.0" }
solana-clap-utils = { path = "../clap-utils", version = "0.23.3" }
solana-logger = { path = "../logger", version = "0.23.3" }
solana-metrics = { path = "../metrics", version = "0.23.3" }
solana-sdk = { path = "../sdk", version = "0.23.3" }
tokio = "0.1"
tokio-codec = "0.1"

View File

@ -1,6 +1,6 @@
[package]
name = "solana-genesis-programs"
version = "0.23.0"
version = "0.23.3"
description = "Solana genesis programs"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -10,16 +10,16 @@ edition = "2018"
[dependencies]
log = { version = "0.4.8" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "0.23.0" }
solana-budget-program = { path = "../programs/budget", version = "0.23.0" }
solana-config-program = { path = "../programs/config", version = "0.23.0" }
solana-exchange-program = { path = "../programs/exchange", version = "0.23.0" }
solana-runtime = { path = "../runtime", version = "0.23.0" }
solana-sdk = { path = "../sdk", version = "0.23.0" }
solana-stake-program = { path = "../programs/stake", version = "0.23.0" }
solana-storage-program = { path = "../programs/storage", version = "0.23.0" }
solana-vest-program = { path = "../programs/vest", version = "0.23.0" }
solana-vote-program = { path = "../programs/vote", version = "0.23.0" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "0.23.3" }
solana-budget-program = { path = "../programs/budget", version = "0.23.3" }
solana-config-program = { path = "../programs/config", version = "0.23.3" }
solana-exchange-program = { path = "../programs/exchange", version = "0.23.3" }
solana-runtime = { path = "../runtime", version = "0.23.3" }
solana-sdk = { path = "../sdk", version = "0.23.3" }
solana-stake-program = { path = "../programs/stake", version = "0.23.3" }
solana-storage-program = { path = "../programs/storage", version = "0.23.3" }
solana-vest-program = { path = "../programs/vest", version = "0.23.3" }
solana-vote-program = { path = "../programs/vote", version = "0.23.3" }
[lib]
crate-type = ["lib"]

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-genesis"
description = "Blockchain, Rebuilt for Scale"
version = "0.23.0"
version = "0.23.3"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -17,13 +17,13 @@ serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.44"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "0.23.0" }
solana-genesis-programs = { path = "../genesis-programs", version = "0.23.0" }
solana-ledger = { path = "../ledger", version = "0.23.0" }
solana-sdk = { path = "../sdk", version = "0.23.0" }
solana-stake-program = { path = "../programs/stake", version = "0.23.0" }
solana-storage-program = { path = "../programs/storage", version = "0.23.0" }
solana-vote-program = { path = "../programs/vote", version = "0.23.0" }
solana-clap-utils = { path = "../clap-utils", version = "0.23.3" }
solana-genesis-programs = { path = "../genesis-programs", version = "0.23.3" }
solana-ledger = { path = "../ledger", version = "0.23.3" }
solana-sdk = { path = "../sdk", version = "0.23.3" }
solana-stake-program = { path = "../programs/stake", version = "0.23.3" }
solana-storage-program = { path = "../programs/storage", version = "0.23.3" }
solana-vote-program = { path = "../programs/vote", version = "0.23.3" }
tempfile = "3.1.0"
[[bin]]

View File

@ -55,7 +55,7 @@ pub const BATCH_FOUR_STAKER_INFOS: &[StakerInfo] = &[
},
StakerInfo {
name: "unbecoming silver",
staker: "42yapY7Vrs5jqht9TCKZsPoyb4vDFYcPfRkqAP85NSAQ",
staker: "4AcoZa1P8fF5XK21RJsiuMRZPEScbbWNc75oakRFHiBz",
lamports: 28_800 * LAMPORTS_PER_SOL,
},
StakerInfo {
@ -220,7 +220,6 @@ pub const VALIDATOR_PUBKEYS: &[&str] = &[
"7v5DXDvYzkgTdFYXYB12ZLKD6z8QfzR53N9hg6XgEQJE", // Cryptium Labs GmbH
"8LSwP5qYbmuUfKLGwi8XaKJnai9HyZAJTnBovyWebRfd", //
"8UPb8LMWyoJJC9Aeq9QmTzKZKV2ssov739bTJ14M4ws1", //
"8oRw7qpj6XgLGXYCDuNoTMCqoJnDd6A8LTpNyqApSfkA", //
"8wFK4fCAuDoAH1fsgou9yKZPqDMFtJUVoDdkZAAMuhyA", // LunaNova Technologies Ltd
"94eWgQm2k8BXKEWbJP2eScHZeKopXpqkuoVrCofQWBhW", // Node A-Team
"9J8WcnXxo3ArgEwktfk9tsrf4Rp8h5uPUgnQbQHLvtkd", // moonli.me

View File

@ -7,7 +7,10 @@ use solana_clap_utils::{
input_validators::{is_rfc3339_datetime, is_valid_percentage},
};
use solana_genesis::{genesis_accounts::add_genesis_accounts, Base64Account};
use solana_ledger::{blockstore::create_new_ledger, poh::compute_hashes_per_tick};
use solana_ledger::{
blockstore::create_new_ledger, poh::compute_hashes_per_tick,
shred_version::compute_shred_version,
};
use solana_sdk::{
account::Account,
clock,
@ -521,10 +524,19 @@ fn main() -> Result<(), Box<dyn error::Error>> {
create_new_ledger(&ledger_path, &genesis_config)?;
println!(
"Genesis hash: {}\nCreation time: {}\nOperating mode: {:?}\nHashes per tick: {:?}\nSlots per epoch: {}\nCapitalization: {} SOL in {} accounts",
genesis_config.hash(),
"\
Creation time: {}\n\
Operating mode: {:?}\n\
Genesis hash: {}\n\
Shred version: {}\n\
Hashes per tick: {:?}\n\
Slots per epoch: {}\n\
Capitalization: {} SOL in {} accounts\
",
Utc.timestamp(genesis_config.creation_time, 0).to_rfc3339(),
operating_mode,
genesis_config.hash(),
compute_shred_version(&genesis_config.hash(), None),
genesis_config.poh_config.hashes_per_tick,
slots_per_epoch,
lamports_to_sol(
@ -537,7 +549,8 @@ fn main() -> Result<(), Box<dyn error::Error>> {
}
account.lamports
})
.sum::<u64>()),
.sum::<u64>()
),
genesis_config.accounts.len()
);

View File

@ -3,19 +3,19 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-gossip"
description = "Blockchain, Rebuilt for Scale"
version = "0.23.0"
version = "0.23.3"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
solana-clap-utils = { path = "../clap-utils", version = "0.23.0" }
solana-core = { path = "../core", version = "0.23.0" }
solana-client = { path = "../client", version = "0.23.0" }
solana-logger = { path = "../logger", version = "0.23.0" }
solana-net-utils = { path = "../net-utils", version = "0.23.0" }
solana-sdk = { path = "../sdk", version = "0.23.0" }
solana-clap-utils = { path = "../clap-utils", version = "0.23.3" }
solana-core = { path = "../core", version = "0.23.3" }
solana-client = { path = "../client", version = "0.23.3" }
solana-logger = { path = "../logger", version = "0.23.3" }
solana-net-utils = { path = "../net-utils", version = "0.23.3" }
solana-sdk = { path = "../sdk", version = "0.23.3" }

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-install"
description = "The solana cluster software installer"
version = "0.23.0"
version = "0.23.3"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -26,11 +26,11 @@ reqwest = { version = "0.10.1", default-features = false, features = ["blocking"
serde = "1.0.104"
serde_derive = "1.0.103"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "0.23.0" }
solana-client = { path = "../client", version = "0.23.0" }
solana-config-program = { path = "../programs/config", version = "0.23.0" }
solana-logger = { path = "../logger", version = "0.23.0" }
solana-sdk = { path = "../sdk", version = "0.23.0" }
solana-clap-utils = { path = "../clap-utils", version = "0.23.3" }
solana-client = { path = "../client", version = "0.23.3" }
solana-config-program = { path = "../programs/config", version = "0.23.3" }
solana-logger = { path = "../logger", version = "0.23.3" }
solana-sdk = { path = "../sdk", version = "0.23.3" }
tar = "0.4.26"
tempdir = "0.3.7"
url = "2.1.1"

View File

@ -6,6 +6,7 @@ use crate::{
use chrono::{Local, TimeZone};
use console::{style, Emoji};
use indicatif::{ProgressBar, ProgressStyle};
use serde_derive::Deserialize;
use solana_client::rpc_client::RpcClient;
use solana_config_program::{config_instruction, get_config_data, ConfigState};
use solana_sdk::{
@ -25,6 +26,13 @@ use std::{
use tempdir::TempDir;
use url::Url;
#[derive(Deserialize, Debug)]
pub struct ReleaseVersion {
pub target: String,
pub commit: String,
channel: String,
}
static TRUCK: Emoji = Emoji("🚚 ", "");
static LOOKING_GLASS: Emoji = Emoji("🔍 ", "");
static BULLET: Emoji = Emoji("", "* ");
@ -46,15 +54,15 @@ fn println_name_value(name: &str, value: &str) {
println!("{} {}", style(name).bold(), value);
}
/// Downloads the release archive at `url` to a temporary location. If `expected_sha256` is
/// Some(_), produce an error if the release SHA256 doesn't match.
/// Downloads a file at `url` to a temporary location. If `expected_sha256` is
/// Some(_), produce an error if the SHA256 of the file contents doesn't match.
///
/// Returns a tuple consisting of:
/// * TempDir - drop this value to clean up the temporary location
/// * PathBuf - path to the downloaded release (within `TempDir`)
/// * PathBuf - path to the downloaded file (within `TempDir`)
/// * String - SHA256 of the release
///
fn download_to_temp_archive(
fn download_to_temp(
url: &str,
expected_sha256: Option<&Hash>,
) -> Result<(TempDir, PathBuf, Hash), Box<dyn std::error::Error>> {
@ -77,7 +85,7 @@ fn download_to_temp_archive(
let url = Url::parse(url).map_err(|err| format!("Unable to parse {}: {}", url, err))?;
let temp_dir = TempDir::new(clap::crate_name!())?;
let temp_file = temp_dir.path().join("release.tar.bz2");
let temp_file = temp_dir.path().join("download");
let client = reqwest::blocking::Client::new();
@ -162,22 +170,21 @@ fn extract_release_archive(
Ok(())
}
/// Reads the supported TARGET triple for the given release
fn load_release_target(release_dir: &Path) -> Result<String, Box<dyn std::error::Error>> {
use serde_derive::Deserialize;
#[derive(Deserialize, Debug)]
pub struct ReleaseVersion {
pub target: String,
pub commit: String,
channel: String,
}
fn load_release_version(version_yml: &Path) -> Result<ReleaseVersion, String> {
let file = File::open(&version_yml)
.map_err(|err| format!("Unable to open {:?}: {:?}", version_yml, err))?;
let version: ReleaseVersion = serde_yaml::from_reader(file)
.map_err(|err| format!("Unable to parse {:?}: {:?}", version_yml, err))?;
Ok(version)
}
/// Reads the supported TARGET triple for the given release
fn load_release_target(release_dir: &Path) -> Result<String, String> {
let mut version_yml = PathBuf::from(release_dir);
version_yml.push("solana-release");
version_yml.push("version.yml");
let file = File::open(&version_yml)?;
let version: ReleaseVersion = serde_yaml::from_reader(file)?;
let version = load_release_version(&version_yml)?;
Ok(version.target)
}
@ -554,6 +561,14 @@ fn release_channel_download_url(release_channel: &str) -> String {
)
}
fn release_channel_version_url(release_channel: &str) -> String {
format!(
"http://release.solana.com/{}/solana-release-{}.yml",
release_channel,
crate::build_env::TARGET
)
}
pub fn info(config_file: &str, local_info_only: bool) -> Result<Option<UpdateManifest>, String> {
let config = Config::load(config_file)?;
@ -663,9 +678,8 @@ pub fn deploy(
}
// Download the release
let (temp_dir, temp_archive, temp_archive_sha256) =
download_to_temp_archive(download_url, None)
.map_err(|err| format!("Unable to download {}: {}", download_url, err))?;
let (temp_dir, temp_archive, temp_archive_sha256) = download_to_temp(download_url, None)
.map_err(|err| format!("Unable to download {}: {}", download_url, err))?;
if let Ok(update_manifest) = get_update_manifest(&rpc_client, &update_manifest_keypair.pubkey())
{
@ -743,31 +757,57 @@ pub fn update(config_file: &str) -> Result<bool, String> {
let update_manifest = info(config_file, false)?;
let release_dir = if let Some(explicit_release) = &config.explicit_release {
let (download, release_dir) = match explicit_release {
let (download_url, release_dir) = match explicit_release {
ExplicitRelease::Semver(release_semver) => {
let download_url = github_release_download_url(release_semver);
let release_dir = config.release_dir(&release_semver);
let download = if release_dir.join(".ok").exists() {
let download_url = if release_dir.join(".ok").exists() {
// If this release_semver has already been successfully downloaded, no update
// needed
println!("{} is present, no download required.", release_semver);
None
} else {
Some(download_url)
};
(download, release_dir)
(download_url, release_dir)
}
ExplicitRelease::Channel(release_channel) => {
let download_url = release_channel_download_url(release_channel);
let release_dir = config.release_dir(&release_channel);
// Note: There's currently no mechanism to check for an updated binary for a release
// channel so a download always occurs.
(Some(download_url), release_dir)
let current_release_version_yml =
release_dir.join("solana-release").join("version.yml");
let download_url = Some(release_channel_download_url(release_channel));
if !current_release_version_yml.exists() {
(download_url, release_dir)
} else {
let version_url = release_channel_version_url(release_channel);
let (_temp_dir, temp_file, _temp_archive_sha256) =
download_to_temp(&version_url, None).map_err(|err| {
format!("Unable to download {}: {}", version_url, err)
})?;
let update_release_version = load_release_version(&temp_file)?;
let current_release_version =
load_release_version(&current_release_version_yml)?;
if update_release_version.commit == current_release_version.commit {
// Same commit, no update required
println!(
"Latest {} build is already present, no download required.",
release_channel
);
(None, release_dir)
} else {
(download_url, release_dir)
}
}
}
};
if let Some(download_url) = download {
if let Some(download_url) = download_url {
let (_temp_dir, temp_archive, _temp_archive_sha256) =
download_to_temp_archive(&download_url, None)
download_to_temp(&download_url, None)
.map_err(|err| format!("Unable to download {}: {}", download_url, err))?;
extract_release_archive(&temp_archive, &release_dir).map_err(|err| {
format!(
@ -797,7 +837,7 @@ pub fn update(config_file: &str) -> Result<bool, String> {
}
}
let release_dir = config.release_dir(&update_manifest.download_sha256.to_string());
let (_temp_dir, temp_archive, _temp_archive_sha256) = download_to_temp_archive(
let (_temp_dir, temp_archive, _temp_archive_sha256) = download_to_temp(
&update_manifest.download_url,
Some(&update_manifest.download_sha256),
)

View File

@ -1,6 +1,6 @@
[package]
name = "solana-keygen"
version = "0.23.0"
version = "0.23.3"
description = "Solana key generation utility"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -13,9 +13,10 @@ bs58 = "0.3.0"
clap = "2.33"
dirs = "2.0.2"
num_cpus = "1.12.0"
rpassword = "4.0"
solana-clap-utils = { path = "../clap-utils", version = "0.23.0" }
solana-sdk = { path = "../sdk", version = "0.23.0" }
solana-clap-utils = { path = "../clap-utils", version = "0.23.3" }
solana-cli-config = { path = "../cli-config", version = "0.23.3" }
solana-remote-wallet = { path = "../remote-wallet", version = "0.23.3" }
solana-sdk = { path = "../sdk", version = "0.23.3" }
tiny-bip39 = "0.7.0"
[[bin]]

View File

@ -5,11 +5,20 @@ use clap::{
SubCommand,
};
use num_cpus;
use solana_clap_utils::keypair::{
keypair_from_seed_phrase, prompt_passphrase, ASK_KEYWORD, SKIP_SEED_PHRASE_VALIDATION_ARG,
use solana_clap_utils::{
input_parsers::derivation_of,
input_validators::is_derivation,
keypair::{
keypair_from_seed_phrase, prompt_passphrase, ASK_KEYWORD, SKIP_SEED_PHRASE_VALIDATION_ARG,
},
};
use solana_cli_config::config::{Config, CONFIG_FILE};
use solana_remote_wallet::{
ledger::get_ledger_from_info,
remote_wallet::{RemoteWallet, RemoteWalletInfo},
};
use solana_sdk::{
pubkey::write_pubkey_file,
pubkey::{write_pubkey_file, Pubkey},
signature::{
keypair_from_seed, read_keypair, read_keypair_file, write_keypair, write_keypair_file,
Keypair, KeypairUtil, Signature,
@ -21,7 +30,7 @@ use std::{
path::Path,
process::exit,
sync::{
atomic::{AtomicU64, Ordering},
atomic::{AtomicBool, AtomicU64, Ordering},
Arc,
},
thread,
@ -30,6 +39,12 @@ use std::{
const NO_PASSPHRASE: &str = "";
struct GrindMatch {
starts: String,
ends: String,
count: AtomicU64,
}
fn check_for_overwrite(outfile: &str, matches: &ArgMatches) {
let force = matches.is_present("force");
if !force && Path::new(outfile).exists() {
@ -38,23 +53,64 @@ fn check_for_overwrite(outfile: &str, matches: &ArgMatches) {
}
}
fn get_keypair_from_matches(matches: &ArgMatches) -> Result<Keypair, Box<dyn error::Error>> {
fn get_keypair_from_matches(
matches: &ArgMatches,
config: Config,
) -> Result<Keypair, Box<dyn error::Error>> {
let mut path = dirs::home_dir().expect("home directory");
let infile = if matches.is_present("infile") {
matches.value_of("infile").unwrap()
let keypair = if matches.is_present("keypair") {
matches.value_of("keypair").unwrap()
} else if config.keypair_path != "" {
&config.keypair_path
} else {
path.extend(&[".config", "solana", "id.json"]);
path.to_str().unwrap()
};
if infile == "-" {
if keypair == "-" {
let mut stdin = std::io::stdin();
read_keypair(&mut stdin)
} else if infile == ASK_KEYWORD {
} else if keypair == ASK_KEYWORD {
let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name);
keypair_from_seed_phrase("pubkey recovery", skip_validation, false)
} else if keypair.starts_with("usb://") {
Err(String::from("Remote wallet signing not yet implemented").into())
} else {
read_keypair_file(infile)
read_keypair_file(keypair)
}
}
fn get_pubkey_from_matches(
matches: &ArgMatches,
config: Config,
) -> Result<Pubkey, Box<dyn error::Error>> {
let mut path = dirs::home_dir().expect("home directory");
let keypair = if matches.is_present("keypair") {
matches.value_of("keypair").unwrap()
} else if config.keypair_path != "" {
&config.keypair_path
} else {
path.extend(&[".config", "solana", "id.json"]);
path.to_str().unwrap()
};
if keypair == "-" {
let mut stdin = std::io::stdin();
read_keypair(&mut stdin).map(|keypair| keypair.pubkey())
} else if keypair == ASK_KEYWORD {
let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name);
keypair_from_seed_phrase("pubkey recovery", skip_validation, false)
.map(|keypair| keypair.pubkey())
} else if keypair.starts_with("usb://") {
let (remote_wallet_info, mut derivation_path) =
RemoteWalletInfo::parse_path(keypair.to_string())?;
if let Some(derivation) = derivation_of(matches, "derivation_path") {
derivation_path = derivation;
}
let ledger = get_ledger_from_info(remote_wallet_info)?;
Ok(ledger.get_pubkey(&derivation_path)?)
} else {
read_keypair_file(keypair).map(|keypair| keypair.pubkey())
}
}
@ -73,28 +129,152 @@ fn output_keypair(
Ok(())
}
fn grind_validator_starts_with(v: String) -> Result<(), String> {
if v.matches(':').count() != 1 || (v.starts_with(':') || v.ends_with(':')) {
return Err(String::from("Expected : between PREFIX and COUNT"));
}
let args: Vec<&str> = v.split(':').collect();
bs58::decode(&args[0])
.into_vec()
.map_err(|err| format!("{}: {:?}", args[0], err))?;
let count = args[1].parse::<u64>();
if count.is_err() || count.unwrap() == 0 {
return Err(String::from("Expected COUNT to be of type u64"));
}
Ok(())
}
fn grind_validator_ends_with(v: String) -> Result<(), String> {
if v.matches(':').count() != 1 || (v.starts_with(':') || v.ends_with(':')) {
return Err(String::from("Expected : between SUFFIX and COUNT"));
}
let args: Vec<&str> = v.split(':').collect();
bs58::decode(&args[0])
.into_vec()
.map_err(|err| format!("{}: {:?}", args[0], err))?;
let count = args[1].parse::<u64>();
if count.is_err() || count.unwrap() == 0 {
return Err(String::from("Expected COUNT to be of type u64"));
}
Ok(())
}
fn grind_validator_starts_and_ends_with(v: String) -> Result<(), String> {
if v.matches(':').count() != 2 || (v.starts_with(':') || v.ends_with(':')) {
return Err(String::from(
"Expected : between PREFIX and SUFFIX and COUNT",
));
}
let args: Vec<&str> = v.split(':').collect();
bs58::decode(&args[0])
.into_vec()
.map_err(|err| format!("{}: {:?}", args[0], err))?;
bs58::decode(&args[1])
.into_vec()
.map_err(|err| format!("{}: {:?}", args[1], err))?;
let count = args[2].parse::<u64>();
if count.is_err() || count.unwrap() == 0 {
return Err(String::from("Expected COUNT to be a u64"));
}
Ok(())
}
fn grind_print_info(grind_matches: &[GrindMatch]) {
println!("Searching with {} threads for:", num_cpus::get());
for gm in grind_matches {
let mut msg = Vec::<String>::new();
if gm.count.load(Ordering::Relaxed) > 1 {
msg.push("pubkeys".to_string());
msg.push("start".to_string());
msg.push("end".to_string());
} else {
msg.push("pubkey".to_string());
msg.push("starts".to_string());
msg.push("ends".to_string());
}
println!(
"\t{} {} that {} with '{}' and {} with '{}'",
gm.count.load(Ordering::Relaxed),
msg[0],
msg[1],
gm.starts,
msg[2],
gm.ends
);
}
}
fn grind_parse_args(
starts_with_args: HashSet<String>,
ends_with_args: HashSet<String>,
starts_and_ends_with_args: HashSet<String>,
) -> Vec<GrindMatch> {
let mut grind_matches = Vec::<GrindMatch>::new();
for sw in starts_with_args {
let args: Vec<&str> = sw.split(':').collect();
grind_matches.push(GrindMatch {
starts: args[0].to_lowercase(),
ends: "".to_string(),
count: AtomicU64::new(args[1].parse::<u64>().unwrap()),
});
}
for ew in ends_with_args {
let args: Vec<&str> = ew.split(':').collect();
grind_matches.push(GrindMatch {
starts: "".to_string(),
ends: args[0].to_lowercase(),
count: AtomicU64::new(args[1].parse::<u64>().unwrap()),
});
}
for swew in starts_and_ends_with_args {
let args: Vec<&str> = swew.split(':').collect();
grind_matches.push(GrindMatch {
starts: args[0].to_lowercase(),
ends: args[1].to_lowercase(),
count: AtomicU64::new(args[2].parse::<u64>().unwrap()),
});
}
grind_print_info(&grind_matches);
grind_matches
}
fn main() -> Result<(), Box<dyn error::Error>> {
let matches = App::new(crate_name!())
.about(crate_description!())
.version(solana_clap_utils::version!())
.setting(AppSettings::SubcommandRequiredElseHelp)
.arg({
let arg = Arg::with_name("config_file")
.short("C")
.long("config")
.value_name("PATH")
.takes_value(true)
.global(true)
.help("Configuration file to use");
if let Some(ref config_file) = *CONFIG_FILE {
arg.default_value(&config_file)
} else {
arg
}
})
.subcommand(
SubCommand::with_name("verify")
.about("Verify a keypair can sign and verify a message.")
.arg(
Arg::with_name("infile")
Arg::with_name("pubkey")
.index(1)
.value_name("BASE58_PUBKEY")
.takes_value(true)
.required(true)
.help("Public key"),
)
.arg(
Arg::with_name("keypair")
.index(2)
.value_name("PATH")
.takes_value(true)
.help("Path to keypair file"),
)
.arg(
Arg::with_name("pubkey")
.index(2)
.value_name("BASE58_PUBKEY")
.takes_value(true)
.help("Public key"),
)
)
.subcommand(
SubCommand::with_name("new")
@ -148,33 +328,37 @@ fn main() -> Result<(), Box<dyn error::Error>> {
.arg(
Arg::with_name("ignore_case")
.long("ignore-case")
.help("Perform case insensitive matches"),
)
.arg(
Arg::with_name("includes")
.long("includes")
.value_name("BASE58")
.takes_value(true)
.multiple(true)
.validator(|value| {
bs58::decode(&value).into_vec()
.map(|_| ())
.map_err(|err| format!("{}: {:?}", value, err))
})
.help("Save keypair if its public key includes this string\n(may be specified multiple times)"),
.help("Performs case insensitive matches"),
)
.arg(
Arg::with_name("starts_with")
.long("starts-with")
.value_name("BASE58 PREFIX")
.value_name("PREFIX:COUNT")
.number_of_values(1)
.takes_value(true)
.multiple(true)
.validator(|value| {
bs58::decode(&value).into_vec()
.map(|_| ())
.map_err(|err| format!("{}: {:?}", value, err))
})
.help("Save keypair if its public key starts with this prefix\n(may be specified multiple times)"),
.validator(grind_validator_starts_with)
.help("Saves specified number of keypairs whos public key starts with the indicated prefix\nExample: --starts-with sol:4\nPREFIX type is Base58\nCOUNT type is u64"),
)
.arg(
Arg::with_name("ends_with")
.long("ends-with")
.value_name("SUFFIX:COUNT")
.number_of_values(1)
.takes_value(true)
.multiple(true)
.validator(grind_validator_ends_with)
.help("Saves specified number of keypairs whos public key ends with the indicated suffix\nExample: --ends-with ana:4\nSUFFIX type is Base58\nCOUNT type is u64"),
)
.arg(
Arg::with_name("starts_and_ends_with")
.long("starts-and-ends-with")
.value_name("PREFIX:SUFFIX:COUNT")
.number_of_values(1)
.takes_value(true)
.multiple(true)
.validator(grind_validator_starts_and_ends_with)
.help("Saves specified number of keypairs whos public key starts and ends with the indicated perfix and suffix\nExample: --starts-and-ends-with sol:ana:4\nPREFIX and SUFFIX type is Base58\nCOUNT type is u64"),
),
)
.subcommand(
@ -182,11 +366,11 @@ fn main() -> Result<(), Box<dyn error::Error>> {
.about("Display the pubkey from a keypair file")
.setting(AppSettings::DisableVersion)
.arg(
Arg::with_name("infile")
Arg::with_name("keypair")
.index(1)
.value_name("PATH")
.takes_value(true)
.help("Path to keypair file"),
.help("Path to keypair file or remote wallet"),
)
.arg(
Arg::with_name(SKIP_SEED_PHRASE_VALIDATION_ARG.name)
@ -206,6 +390,14 @@ fn main() -> Result<(), Box<dyn error::Error>> {
.short("f")
.long("force")
.help("Overwrite the output file if it exists"),
)
.arg(
Arg::with_name("derivation_path")
.long("derivation-path")
.value_name("ACCOUNT or ACCOUNT/CHANGE")
.takes_value(true)
.validator(is_derivation)
.help("Derivation path to use: m/44'/501'/ACCOUNT'/CHANGE'; default key is device base pubkey: m/44'/501'/0'")
),
)
.subcommand(
@ -234,17 +426,22 @@ fn main() -> Result<(), Box<dyn error::Error>> {
)
.get_matches();
let config = if let Some(config_file) = matches.value_of("config_file") {
Config::load(config_file).unwrap_or_default()
} else {
Config::default()
};
match matches.subcommand() {
("pubkey", Some(matches)) => {
let keypair = get_keypair_from_matches(matches)?;
let pubkey = get_pubkey_from_matches(matches, config)?;
if matches.is_present("outfile") {
let outfile = matches.value_of("outfile").unwrap();
check_for_overwrite(&outfile, &matches);
write_pubkey_file(outfile, keypair.pubkey())?;
write_pubkey_file(outfile, pubkey)?;
} else {
println!("{}", keypair.pubkey());
println!("{}", pubkey);
}
}
("new", Some(matches)) => {
@ -311,16 +508,8 @@ fn main() -> Result<(), Box<dyn error::Error>> {
}
("grind", Some(matches)) => {
let ignore_case = matches.is_present("ignore_case");
let includes = if matches.is_present("includes") {
values_t_or_exit!(matches, "includes", String)
.into_iter()
.map(|s| if ignore_case { s.to_lowercase() } else { s })
.collect()
} else {
HashSet::new()
};
let starts_with = if matches.is_present("starts_with") {
let starts_with_args = if matches.is_present("starts_with") {
values_t_or_exit!(matches, "starts_with", String)
.into_iter()
.map(|s| if ignore_case { s.to_lowercase() } else { s })
@ -328,68 +517,101 @@ fn main() -> Result<(), Box<dyn error::Error>> {
} else {
HashSet::new()
};
let ends_with_args = if matches.is_present("ends_with") {
values_t_or_exit!(matches, "ends_with", String)
.into_iter()
.map(|s| if ignore_case { s.to_lowercase() } else { s })
.collect()
} else {
HashSet::new()
};
let starts_and_ends_with_args = if matches.is_present("starts_and_ends_with") {
values_t_or_exit!(matches, "starts_and_ends_with", String)
.into_iter()
.map(|s| if ignore_case { s.to_lowercase() } else { s })
.collect()
} else {
HashSet::new()
};
if includes.is_empty() && starts_with.is_empty() {
if starts_with_args.is_empty()
&& ends_with_args.is_empty()
&& starts_and_ends_with_args.is_empty()
{
eprintln!(
"Error: No keypair search criteria provided (--includes or --starts-with)"
"Error: No keypair search criteria provided (--starts-with or --ends-with or --starts-and-ends-with)"
);
exit(1);
}
let grind_matches =
grind_parse_args(starts_with_args, ends_with_args, starts_and_ends_with_args);
let grind_matches_thread_safe = Arc::new(grind_matches);
let attempts = Arc::new(AtomicU64::new(1));
let found = Arc::new(AtomicU64::new(0));
let start = Instant::now();
let done = Arc::new(AtomicBool::new(false));
println!(
"Searching with {} threads for a pubkey containing {:?} or starting with {:?}",
num_cpus::get(),
includes,
starts_with
);
for _ in 0..num_cpus::get() {
let done = done.clone();
let attempts = attempts.clone();
let found = found.clone();
let grind_matches_thread_safe = grind_matches_thread_safe.clone();
let _threads = (0..num_cpus::get())
.map(|_| {
let attempts = attempts.clone();
let found = found.clone();
let includes = includes.clone();
let starts_with = starts_with.clone();
thread::spawn(move || loop {
let attempts = attempts.fetch_add(1, Ordering::Relaxed);
if attempts % 5_000_000 == 0 {
println!(
"Searched {} keypairs in {}s. {} matches found",
attempts,
start.elapsed().as_secs(),
found.load(Ordering::Relaxed),
);
let handle = thread::spawn(move || loop {
if done.load(Ordering::Relaxed) {
break;
}
let attempts = attempts.fetch_add(1, Ordering::Relaxed);
if attempts % 1_000_000 == 0 {
println!(
"Searched {} keypairs in {}s. {} matches found.",
attempts,
start.elapsed().as_secs(),
found.load(Ordering::Relaxed),
);
}
let keypair = Keypair::new();
let mut pubkey = bs58::encode(keypair.pubkey()).into_string();
if ignore_case {
pubkey = pubkey.to_lowercase();
}
let mut total_matches_found = 0;
for i in 0..grind_matches_thread_safe.len() {
if grind_matches_thread_safe[i].count.load(Ordering::Relaxed) == 0 {
total_matches_found += 1;
continue;
}
let keypair = Keypair::new();
let mut pubkey = bs58::encode(keypair.pubkey()).into_string();
if ignore_case {
pubkey = pubkey.to_lowercase();
}
if starts_with.iter().any(|s| pubkey.starts_with(s))
|| includes.iter().any(|s| pubkey.contains(s))
if (!grind_matches_thread_safe[i].starts.is_empty()
&& grind_matches_thread_safe[i].ends.is_empty()
&& pubkey.starts_with(&grind_matches_thread_safe[i].starts))
|| (grind_matches_thread_safe[i].starts.is_empty()
&& !grind_matches_thread_safe[i].ends.is_empty()
&& pubkey.ends_with(&grind_matches_thread_safe[i].ends))
|| (!grind_matches_thread_safe[i].starts.is_empty()
&& !grind_matches_thread_safe[i].ends.is_empty()
&& pubkey.starts_with(&grind_matches_thread_safe[i].starts)
&& pubkey.ends_with(&grind_matches_thread_safe[i].ends))
{
let found = found.fetch_add(1, Ordering::Relaxed);
output_keypair(
&keypair,
&format!("{}.json", keypair.pubkey()),
&format!("{}", found),
)
.unwrap();
let _found = found.fetch_add(1, Ordering::Relaxed);
grind_matches_thread_safe[i]
.count
.fetch_sub(1, Ordering::Relaxed);
println!("Wrote keypair to {}", &format!("{}.json", keypair.pubkey()));
write_keypair_file(&keypair, &format!("{}.json", keypair.pubkey()))
.unwrap();
}
});
})
.collect::<Vec<_>>();
thread::park();
}
if total_matches_found == grind_matches_thread_safe.len() {
done.store(true, Ordering::Relaxed);
}
});
handle.join().unwrap();
}
}
("verify", Some(matches)) => {
let keypair = get_keypair_from_matches(matches)?;
let keypair = get_keypair_from_matches(matches, config)?;
let test_data = b"test";
let signature = Signature::new(&keypair.sign(test_data).to_bytes());
let pubkey_bs58 = matches.value_of("pubkey").unwrap();

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-ledger-tool"
description = "Blockchain, Rebuilt for Scale"
version = "0.23.0"
version = "0.23.3"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -16,12 +16,12 @@ serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.44"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "0.23.0" }
solana-ledger = { path = "../ledger", version = "0.23.0" }
solana-logger = { path = "../logger", version = "0.23.0" }
solana-runtime = { path = "../runtime", version = "0.23.0" }
solana-sdk = { path = "../sdk", version = "0.23.0" }
solana-vote-program = { path = "../programs/vote", version = "0.23.0" }
solana-clap-utils = { path = "../clap-utils", version = "0.23.3" }
solana-ledger = { path = "../ledger", version = "0.23.3" }
solana-logger = { path = "../logger", version = "0.23.3" }
solana-runtime = { path = "../runtime", version = "0.23.3" }
solana-sdk = { path = "../sdk", version = "0.23.3" }
solana-vote-program = { path = "../programs/vote", version = "0.23.3" }
tempfile = "3.1.0"
[dev-dependencies]

View File

@ -11,6 +11,7 @@ use solana_ledger::{
blockstore_db::{self, Column, Database},
blockstore_processor::{BankForksInfo, BlockstoreProcessorResult, ProcessOptions},
rooted_slot_iterator::RootedSlotIterator,
shred_version::compute_shred_version,
snapshot_utils,
};
use solana_sdk::{
@ -526,6 +527,7 @@ fn hardforks_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<Slot>> {
fn load_bank_forks(
arg_matches: &ArgMatches,
ledger_path: &PathBuf,
genesis_config: &GenesisConfig,
process_options: ProcessOptions,
) -> BlockstoreProcessorResult {
let snapshot_config = if arg_matches.is_present("no_snapshot") {
@ -544,7 +546,7 @@ fn load_bank_forks(
};
bank_forks_utils::load(
&open_genesis_config(&ledger_path),
&genesis_config,
&open_blockstore(&ledger_path),
account_paths,
snapshot_config.as_ref(),
@ -615,9 +617,14 @@ fn main() {
)
)
.subcommand(
SubCommand::with_name("print-genesis-hash")
SubCommand::with_name("genesis-hash")
.about("Prints the ledger's genesis hash")
)
.subcommand(
SubCommand::with_name("shred-version")
.about("Prints the ledger's shred hash")
.arg(&hard_forks_arg)
)
.subcommand(
SubCommand::with_name("bounds")
.about("Print lowest and highest non-empty slots. Note that there may be empty slots within the bounds")
@ -741,19 +748,46 @@ fn main() {
});
match matches.subcommand() {
("print", Some(args_matches)) => {
let starting_slot = value_t_or_exit!(args_matches, "starting_slot", Slot);
("print", Some(arg_matches)) => {
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
output_ledger(
open_blockstore(&ledger_path),
starting_slot,
LedgerOutputMethod::Print,
);
}
("print-genesis-hash", Some(_args_matches)) => {
("genesis-hash", Some(_arg_matches)) => {
println!("{}", open_genesis_config(&ledger_path).hash());
}
("print-slot", Some(args_matches)) => {
let slots = values_t_or_exit!(args_matches, "slots", Slot);
("shred-version", Some(arg_matches)) => {
let process_options = ProcessOptions {
dev_halt_at_slot: Some(0),
new_hard_forks: hardforks_of(arg_matches, "hard_forks"),
poh_verify: false,
..ProcessOptions::default()
};
let genesis_config = open_genesis_config(&ledger_path);
match load_bank_forks(arg_matches, &ledger_path, &genesis_config, process_options) {
Ok((bank_forks, bank_forks_info, _leader_schedule_cache)) => {
let bank_info = &bank_forks_info[0];
let bank = bank_forks[bank_info.bank_slot].clone();
println!(
"{}",
compute_shred_version(
&genesis_config.hash(),
Some(&bank.hard_forks().read().unwrap())
)
);
}
Err(err) => {
eprintln!("Failed to load ledger: {:?}", err);
exit(1);
}
}
}
("print-slot", Some(arg_matches)) => {
let slots = values_t_or_exit!(arg_matches, "slots", Slot);
for slot in slots {
println!("Slot {}", slot);
output_slot(
@ -763,8 +797,8 @@ fn main() {
);
}
}
("json", Some(args_matches)) => {
let starting_slot = value_t_or_exit!(args_matches, "starting_slot", Slot);
("json", Some(arg_matches)) => {
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
output_ledger(
open_blockstore(&ledger_path),
starting_slot,
@ -778,8 +812,15 @@ fn main() {
poh_verify: !arg_matches.is_present("skip_poh_verify"),
..ProcessOptions::default()
};
println!("{}", open_genesis_config(&ledger_path).hash());
load_bank_forks(arg_matches, &ledger_path, process_options).unwrap_or_else(|err| {
load_bank_forks(
arg_matches,
&ledger_path,
&open_genesis_config(&ledger_path),
process_options,
)
.unwrap_or_else(|err| {
eprintln!("Ledger verification failed: {:?}", err);
exit(1);
});
@ -795,7 +836,12 @@ fn main() {
..ProcessOptions::default()
};
match load_bank_forks(arg_matches, &ledger_path, process_options) {
match load_bank_forks(
arg_matches,
&ledger_path,
&open_genesis_config(&ledger_path),
process_options,
) {
Ok((bank_forks, bank_forks_info, _leader_schedule_cache)) => {
let dot = graph_forks(
&bank_forks,
@ -834,7 +880,8 @@ fn main() {
poh_verify: false,
..ProcessOptions::default()
};
match load_bank_forks(arg_matches, &ledger_path, process_options) {
let genesis_config = open_genesis_config(&ledger_path);
match load_bank_forks(arg_matches, &ledger_path, &genesis_config, process_options) {
Ok((bank_forks, _bank_forks_info, _leader_schedule_cache)) => {
let bank = bank_forks.get(snapshot_slot).unwrap_or_else(|| {
eprintln!("Error: Slot {} is not available", snapshot_slot);
@ -865,6 +912,13 @@ fn main() {
"Successfully created snapshot for slot {}: {:?}",
snapshot_slot, package.tar_output_file
);
println!(
"Shred version: {}",
compute_shred_version(
&genesis_config.hash(),
Some(&bank.hard_forks().read().unwrap())
)
);
ok
})
})
@ -879,8 +933,8 @@ fn main() {
}
}
}
("prune", Some(args_matches)) => {
if let Some(prune_file_path) = args_matches.value_of("slot_list") {
("prune", Some(arg_matches)) => {
if let Some(prune_file_path) = arg_matches.value_of("slot_list") {
let blockstore = open_blockstore(&ledger_path);
let prune_file = File::open(prune_file_path.to_string()).unwrap();
let slot_hashes: BTreeMap<u64, String> =
@ -916,14 +970,14 @@ fn main() {
blockstore.prune(*target_slot);
}
}
("list-roots", Some(args_matches)) => {
("list-roots", Some(arg_matches)) => {
let blockstore = open_blockstore(&ledger_path);
let max_height = if let Some(height) = args_matches.value_of("max_height") {
let max_height = if let Some(height) = arg_matches.value_of("max_height") {
usize::from_str(height).expect("Maximum height must be a number")
} else {
panic!("Maximum height must be provided");
};
let num_roots = if let Some(roots) = args_matches.value_of("num_roots") {
let num_roots = if let Some(roots) = arg_matches.value_of("num_roots") {
usize::from_str(roots).expect("Number of roots must be a number")
} else {
usize::from_str(DEFAULT_ROOT_COUNT).unwrap()
@ -948,7 +1002,7 @@ fn main() {
.collect();
let mut output_file: Box<dyn Write> =
if let Some(path) = args_matches.value_of("slot_list") {
if let Some(path) = arg_matches.value_of("slot_list") {
match File::create(path) {
Ok(file) => Box::new(file),
_ => Box::new(stdout()),
@ -969,10 +1023,10 @@ fn main() {
}
});
}
("bounds", Some(args_matches)) => {
("bounds", Some(arg_matches)) => {
match open_blockstore(&ledger_path).slot_meta_iterator(0) {
Ok(metas) => {
let all = args_matches.is_present("all");
let all = arg_matches.is_present("all");
println!("Collecting Ledger information...");
let slots: Vec<_> = metas.map(|(slot, _)| slot).collect();

View File

@ -1,6 +1,6 @@
[package]
name = "solana-ledger"
version = "0.23.0"
version = "0.23.3"
description = "Solana ledger"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -29,19 +29,19 @@ reed-solomon-erasure = { package = "solana-reed-solomon-erasure", version = "4.0
serde = "1.0.104"
serde_bytes = "0.11.3"
serde_derive = "1.0.103"
solana-client = { path = "../client", version = "0.23.0" }
solana-genesis-programs = { path = "../genesis-programs", version = "0.23.0" }
solana-logger = { path = "../logger", version = "0.23.0" }
solana-measure = { path = "../measure", version = "0.23.0" }
solana-merkle-tree = { path = "../merkle-tree", version = "0.23.0" }
solana-metrics = { path = "../metrics", version = "0.23.0" }
solana-perf = { path = "../perf", version = "0.23.0" }
solana-client = { path = "../client", version = "0.23.3" }
solana-genesis-programs = { path = "../genesis-programs", version = "0.23.3" }
solana-logger = { path = "../logger", version = "0.23.3" }
solana-measure = { path = "../measure", version = "0.23.3" }
solana-merkle-tree = { path = "../merkle-tree", version = "0.23.3" }
solana-metrics = { path = "../metrics", version = "0.23.3" }
solana-perf = { path = "../perf", version = "0.23.3" }
ed25519-dalek = "1.0.0-pre.1"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.23.0" }
solana-runtime = { path = "../runtime", version = "0.23.0" }
solana-sdk = { path = "../sdk", version = "0.23.0" }
solana-stake-program = { path = "../programs/stake", version = "0.23.0" }
solana-vote-program = { path = "../programs/vote", version = "0.23.0" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.23.3" }
solana-runtime = { path = "../runtime", version = "0.23.3" }
solana-sdk = { path = "../sdk", version = "0.23.3" }
solana-stake-program = { path = "../programs/stake", version = "0.23.3" }
solana-vote-program = { path = "../programs/vote", version = "0.23.3" }
sys-info = "0.5.8"
symlink = "0.1.0"
tar = "0.4.26"
@ -59,7 +59,7 @@ features = ["lz4"]
[dev-dependencies]
assert_matches = "1.3.0"
matches = "0.1.6"
solana-budget-program = { path = "../programs/budget", version = "0.23.0" }
solana-budget-program = { path = "../programs/budget", version = "0.23.3" }
[lib]
crate-type = ["lib"]

View File

@ -22,8 +22,8 @@ use rayon::{
};
use rocksdb::DBRawIterator;
use solana_client::rpc_response::{
RpcConfirmedBlock, RpcEncodedTransaction, RpcTransactionEncoding, RpcTransactionStatus,
RpcTransactionWithStatusMeta,
RpcConfirmedBlock, RpcEncodedTransaction, RpcRewards, RpcTransactionEncoding,
RpcTransactionStatus, RpcTransactionWithStatusMeta,
};
use solana_measure::measure::Measure;
use solana_metrics::{datapoint_debug, datapoint_error};
@ -86,10 +86,12 @@ pub struct Blockstore {
data_shred_cf: LedgerColumn<cf::ShredData>,
code_shred_cf: LedgerColumn<cf::ShredCode>,
transaction_status_cf: LedgerColumn<cf::TransactionStatus>,
rewards_cf: LedgerColumn<cf::Rewards>,
last_root: Arc<RwLock<Slot>>,
insert_shreds_lock: Arc<Mutex<()>>,
pub new_shreds_signals: Vec<SyncSender<bool>>,
pub completed_slots_senders: Vec<SyncSender<Vec<Slot>>>,
pub lowest_cleanup_slot: Arc<RwLock<u64>>,
}
pub struct IndexMetaWorkingSetEntry {
@ -194,6 +196,7 @@ impl Blockstore {
let data_shred_cf = db.column();
let code_shred_cf = db.column();
let transaction_status_cf = db.column();
let rewards_cf = db.column();
let db = Arc::new(db);
@ -207,7 +210,7 @@ impl Blockstore {
measure.stop();
info!("{:?} {}", blockstore_path, measure);
Ok(Blockstore {
let blockstore = Blockstore {
db,
meta_cf,
dead_slots_cf,
@ -218,11 +221,14 @@ impl Blockstore {
data_shred_cf,
code_shred_cf,
transaction_status_cf,
rewards_cf,
new_shreds_signals: vec![],
completed_slots_senders: vec![],
insert_shreds_lock: Arc::new(Mutex::new(())),
last_root,
})
lowest_cleanup_slot: Arc::new(RwLock::new(0)),
};
Ok(blockstore)
}
pub fn open_with_signal(
@ -343,6 +349,10 @@ impl Blockstore {
& self
.db
.delete_range_cf::<cf::TransactionStatus>(&mut write_batch, from_slot, to_slot)
.unwrap_or(false)
& self
.db
.delete_range_cf::<cf::Rewards>(&mut write_batch, from_slot, to_slot)
.unwrap_or(false);
if let Err(e) = self.db.write(write_batch) {
error!(
@ -395,6 +405,10 @@ impl Blockstore {
&& self
.transaction_status_cf
.compact_range(from_slot, to_slot)
.unwrap_or(false)
&& self
.rewards_cf
.compact_range(from_slot, to_slot)
.unwrap_or(false);
Ok(result)
}
@ -1059,6 +1073,12 @@ impl Blockstore {
to_index: u64,
buffer: &mut [u8],
) -> Result<(u64, usize)> {
// lowest_cleanup_slot is the last slot that was not cleaned up by
// LedgerCleanupService
let lowest_cleanup_slot = self.lowest_cleanup_slot.read().unwrap();
if *lowest_cleanup_slot > slot {
return Err(BlockstoreError::SlotCleanedUp);
}
let meta_cf = self.db.column::<cf::SlotMeta>();
let mut buffer_offset = 0;
let mut last_index = 0;
@ -1288,14 +1308,26 @@ impl Blockstore {
slot: Slot,
slot_duration: Duration,
stakes: &HashMap<Pubkey, (u64, Account)>,
) -> Option<UnixTimestamp> {
) -> Result<Option<UnixTimestamp>> {
let lowest_cleanup_slot = self.lowest_cleanup_slot.read().unwrap();
// lowest_cleanup_slot is the last slot that was not cleaned up by
// LedgerCleanupService
if *lowest_cleanup_slot > slot {
return Err(BlockstoreError::SlotCleanedUp);
}
let unique_timestamps: HashMap<Pubkey, (Slot, UnixTimestamp)> = self
.get_timestamp_slots(slot, TIMESTAMP_SLOT_INTERVAL, TIMESTAMP_SLOT_RANGE)
.into_iter()
.flat_map(|query_slot| self.get_block_timestamps(query_slot).unwrap_or_default())
.collect();
calculate_stake_weighted_timestamp(unique_timestamps, stakes, slot, slot_duration)
Ok(calculate_stake_weighted_timestamp(
unique_timestamps,
stakes,
slot,
slot_duration,
))
}
fn get_timestamp_slots(
@ -1346,6 +1378,12 @@ impl Blockstore {
slot: Slot,
encoding: Option<RpcTransactionEncoding>,
) -> Result<RpcConfirmedBlock> {
let lowest_cleanup_slot = self.lowest_cleanup_slot.read().unwrap();
// lowest_cleanup_slot is the last slot that was not cleaned up by
// LedgerCleanupService
if *lowest_cleanup_slot > slot {
return Err(BlockstoreError::SlotCleanedUp);
}
let encoding = encoding.unwrap_or(RpcTransactionEncoding::Json);
if self.is_root(slot) {
let slot_meta_cf = self.db.column::<cf::SlotMeta>();
@ -1369,6 +1407,12 @@ impl Blockstore {
let blockhash = get_last_hash(slot_entries.iter())
.unwrap_or_else(|| panic!("Rooted slot {:?} must have blockhash", slot));
let rewards = self
.rewards_cf
.get(slot)
.expect("Expect rewards get to succeed")
.unwrap_or_else(|| vec![]);
let block = RpcConfirmedBlock {
previous_blockhash: previous_blockhash.to_string(),
blockhash: blockhash.to_string(),
@ -1378,6 +1422,7 @@ impl Blockstore {
encoding,
slot_transaction_iterator,
),
rewards,
};
return Ok(block);
}
@ -1415,6 +1460,10 @@ impl Blockstore {
self.transaction_status_cf.put(index, status)
}
pub fn write_rewards(&self, index: Slot, rewards: RpcRewards) -> Result<()> {
self.rewards_cf.put(index, &rewards)
}
fn get_block_timestamps(&self, slot: Slot) -> Result<Vec<(Pubkey, (Slot, UnixTimestamp))>> {
let slot_entries = self.get_slot_entries(slot, 0, None)?;
Ok(slot_entries
@ -1466,6 +1515,14 @@ impl Blockstore {
if self.is_dead(slot) {
return Err(BlockstoreError::DeadSlot);
}
// lowest_cleanup_slot is the last slot that was not cleaned up by
// LedgerCleanupService
let lowest_cleanup_slot = self.lowest_cleanup_slot.read().unwrap();
if *lowest_cleanup_slot > slot {
return Err(BlockstoreError::SlotCleanedUp);
}
let slot_meta_cf = self.db.column::<cf::SlotMeta>();
let slot_meta = slot_meta_cf.get(slot)?;
if slot_meta.is_none() {
@ -2539,6 +2596,13 @@ pub mod tests {
.unwrap()
.next()
.map(|((slot, _), _)| slot >= min_slot)
.unwrap_or(true)
& blockstore
.db
.iter::<cf::Rewards>(IteratorMode::Start)
.unwrap()
.next()
.map(|(slot, _)| slot >= min_slot)
.unwrap_or(true);
assert!(condition_met);
}
@ -4791,6 +4855,7 @@ pub mod tests {
parent_slot: slot - 1,
blockhash: blockhash.to_string(),
previous_blockhash: Hash::default().to_string(),
rewards: vec![],
};
// The previous_blockhash of `expected_block` is default because its parent slot is a
// root, but empty of entries. This is special handling for snapshot root slots.
@ -4811,6 +4876,7 @@ pub mod tests {
parent_slot: slot,
blockhash: blockhash.to_string(),
previous_blockhash: blockhash.to_string(),
rewards: vec![],
};
assert_eq!(confirmed_block, expected_block);
@ -4886,10 +4952,11 @@ pub mod tests {
})
.sum();
expected_time /= total_stake;
assert_eq!(block_time_slot_3.unwrap() as u64, expected_time);
assert_eq!(block_time_slot_3.unwrap().unwrap() as u64, expected_time);
assert_eq!(
blockstore
.get_block_time(8, slot_duration.clone(), &stakes)
.unwrap()
.unwrap() as u64,
expected_time + 2 // At 400ms block duration, 5 slots == 2sec
);

View File

@ -10,7 +10,7 @@ use rocksdb::{
};
use serde::de::DeserializeOwned;
use serde::Serialize;
use solana_client::rpc_response::RpcTransactionStatus;
use solana_client::rpc_response::{RpcRewards, RpcTransactionStatus};
use solana_sdk::{clock::Slot, signature::Signature};
use std::{collections::HashMap, fs, marker::PhantomData, path::Path, sync::Arc};
use thiserror::Error;
@ -38,6 +38,8 @@ const DATA_SHRED_CF: &str = "data_shred";
const CODE_SHRED_CF: &str = "code_shred";
/// Column family for Transaction Status
const TRANSACTION_STATUS_CF: &str = "transaction_status";
/// Column family for Rewards
const REWARDS_CF: &str = "rewards";
#[derive(Error, Debug)]
pub enum BlockstoreError {
@ -49,6 +51,7 @@ pub enum BlockstoreError {
IO(#[from] std::io::Error),
Serialize(#[from] Box<bincode::ErrorKind>),
FsExtraError(#[from] fs_extra::error::Error),
SlotCleanedUp,
}
pub(crate) type Result<T> = std::result::Result<T, BlockstoreError>;
@ -104,6 +107,10 @@ pub mod columns {
#[derive(Debug)]
/// The transaction status column
pub struct TransactionStatus;
#[derive(Debug)]
/// The rewards column
pub struct Rewards;
}
#[derive(Debug)]
@ -112,8 +119,8 @@ struct Rocks(rocksdb::DB);
impl Rocks {
fn open(path: &Path) -> Result<Rocks> {
use columns::{
DeadSlots, DuplicateSlots, ErasureMeta, Index, Orphans, Root, ShredCode, ShredData,
SlotMeta, TransactionStatus,
DeadSlots, DuplicateSlots, ErasureMeta, Index, Orphans, Rewards, Root, ShredCode,
ShredData, SlotMeta, TransactionStatus,
};
fs::create_dir_all(&path)?;
@ -138,6 +145,7 @@ impl Rocks {
ColumnFamilyDescriptor::new(ShredCode::NAME, get_cf_options());
let transaction_status_cf_descriptor =
ColumnFamilyDescriptor::new(TransactionStatus::NAME, get_cf_options());
let rewards_cf_descriptor = ColumnFamilyDescriptor::new(Rewards::NAME, get_cf_options());
let cfs = vec![
meta_cf_descriptor,
@ -150,6 +158,7 @@ impl Rocks {
shred_data_cf_descriptor,
shred_code_cf_descriptor,
transaction_status_cf_descriptor,
rewards_cf_descriptor,
];
// Open the database
@ -160,8 +169,8 @@ impl Rocks {
fn columns(&self) -> Vec<&'static str> {
use columns::{
DeadSlots, DuplicateSlots, ErasureMeta, Index, Orphans, Root, ShredCode, ShredData,
SlotMeta, TransactionStatus,
DeadSlots, DuplicateSlots, ErasureMeta, Index, Orphans, Rewards, Root, ShredCode,
ShredData, SlotMeta, TransactionStatus,
};
vec![
@ -175,6 +184,7 @@ impl Rocks {
ShredData::NAME,
ShredCode::NAME,
TransactionStatus::NAME,
Rewards::NAME,
]
}
@ -315,6 +325,14 @@ impl ColumnName for columns::TransactionStatus {
const NAME: &'static str = TRANSACTION_STATUS_CF;
}
impl SlotColumn for columns::Rewards {}
impl ColumnName for columns::Rewards {
const NAME: &'static str = REWARDS_CF;
}
impl TypedColumn for columns::Rewards {
type Type = RpcRewards;
}
impl Column for columns::ShredCode {
type Index = (u64, u64);

View File

@ -1,5 +1,6 @@
pub use solana_runtime::genesis_utils::{
create_genesis_config_with_leader, GenesisConfigInfo, BOOTSTRAP_VALIDATOR_LAMPORTS,
create_genesis_config_with_leader, create_genesis_config_with_leader_ex, GenesisConfigInfo,
BOOTSTRAP_VALIDATOR_LAMPORTS,
};
use solana_sdk::pubkey::Pubkey;

View File

@ -396,11 +396,11 @@ mod tests {
);
assert_eq!(
cache.next_leader_slot(&pubkey, 0, &bank, None),
Some((1, 6047999))
Some((1, 863999))
);
assert_eq!(
cache.next_leader_slot(&pubkey, 1, &bank, None),
Some((2, 6047999))
Some((2, 863999))
);
assert_eq!(
cache.next_leader_slot(

View File

@ -15,6 +15,7 @@ pub mod leader_schedule_utils;
pub mod poh;
pub mod rooted_slot_iterator;
pub mod shred;
pub mod shred_version;
pub mod sigverify_shreds;
pub mod snapshot_package;
pub mod snapshot_utils;

View File

@ -0,0 +1,44 @@
use crate::shred::Shred;
use solana_runtime::hard_forks::HardForks;
use solana_sdk::hash::{extend_and_hash, Hash};
pub fn compute_shred_version(genesis_hash: &Hash, hard_forks: Option<&HardForks>) -> u16 {
use byteorder::{ByteOrder, LittleEndian};
let mut hash = *genesis_hash;
if let Some(hard_forks) = hard_forks {
for (slot, count) in hard_forks.iter() {
let mut buf = [0u8; 16];
LittleEndian::write_u64(&mut buf[..8], *slot);
LittleEndian::write_u64(&mut buf[8..], *count as u64);
hash = extend_and_hash(&hash, &buf);
}
}
Shred::version_from_hash(&hash)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_compute_shred_version() {
assert_eq!(compute_shred_version(&Hash::default(), None), 1);
let mut hard_forks = HardForks::default();
assert_eq!(
compute_shred_version(&Hash::default(), Some(&hard_forks)),
1
);
hard_forks.register(1);
assert_eq!(
compute_shred_version(&Hash::default(), Some(&hard_forks)),
55551
);
hard_forks.register(1);
assert_eq!(
compute_shred_version(&Hash::default(), Some(&hard_forks)),
46353
);
}
}

View File

@ -137,7 +137,7 @@ fn slot_key_data_for_gpu<
let keyvec_size = keys_to_slots.len() * size_of::<T>();
keyvec.resize(keyvec_size, 0);
for (i, (k, slots)) in keys_to_slots.iter_mut().enumerate() {
for (i, (k, slots)) in keys_to_slots.iter().enumerate() {
let start = i * size_of::<T>();
let end = start + size_of::<T>();
keyvec[start..end].copy_from_slice(k.as_ref());

Some files were not shown because too many files have changed in this diff Show More