Compare commits

..

27 Commits

Author SHA1 Message Date
Michael Vines
a93915f1bd Wait for one slot to be produced 2020-05-26 15:06:34 -07:00
Michael Vines
ee05266a09 Adjust v1.0 CRDS version to be compatible with v1.1 2020-05-26 15:06:34 -07:00
mergify[bot]
8cde8a54ac LedgerCleanupService no longer causes an OOM and actually purges (bp #10199) (#10220)
* LedgerCleanupService no longer causes an OOM and actually purges (#10199)

* cleanup_ledger() now services new_root_receiver while purging
* purge_slots() now fully deletes before compacting
* Add ledger pruning grafana graph

(cherry picked from commit 156387aba4)

* fixup

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-05-25 00:00:59 -07:00
mergify[bot]
65b5bddf4e v1.0: Test ledger-tool commands in run-sanity.sh (bp #10211) (#10213)
automerge
2020-05-24 09:00:21 -07:00
sakridge
88199e77ab Enable disk metrics (#10009) 2020-05-22 22:55:39 -07:00
Michael Vines
b2b10f4989 Update another non-circulating account 2020-05-22 15:10:56 -07:00
mergify[bot]
41d3fbcb60 Add another non-circulating account (#10186) (#10189)
automerge

(cherry picked from commit e2b5cd6d47)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-05-22 14:35:36 -07:00
Tyera Eulberg
33322d9501 Add circ/non-circ filter to getLargestAccounts (#10188) 2020-05-22 15:14:18 -06:00
mergify[bot]
ee1f218e76 Fixup deserialize_bs58_transaction, and make a few error types more targeted (#10171) (#10176)
automerge

(cherry picked from commit 12a3b1ba6a)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-05-21 17:50:54 -07:00
mergify[bot]
b3b32befd7 REST API now returns supply in SOL rather than lamports (#10170) (#10173)
automerge

(cherry picked from commit 18be7a7966)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-05-21 16:53:58 -07:00
Michael Vines
95675f8f42 Bump version to 1.0.24 2020-05-21 13:29:16 -07:00
mergify[bot]
825c0e2b6e Revert "Add AVX2 runtime checks (#10033)" (#10167) (#10168)
This reverts commit cf8eb7700b.

(cherry picked from commit 486168b796)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-05-21 13:19:42 -07:00
mergify[bot]
4334fef955 Add v0 REST APIs for circulating and total supply (#10102) (#10159)
automerge
2020-05-20 22:14:59 -07:00
mergify[bot]
2333468350 Rename getCirculatingSuppy to getSupply in JSON API doc (#10121) (#10122)
automerge
2020-05-19 15:41:30 -07:00
mergify[bot]
86968cb311 Add SimulateTransaction RPC endpoint (bp #10106) (#10115)
automerge
2020-05-19 13:55:07 -07:00
carllin
3ce9a16e7f v1.0: Add nonce to shreds repairs, add shred data size to header (#10110)
* Add nonce to shreds/repairs

* Add data shred size to header

* Align with future epoch

Co-authored-by: Carl <carl@solana.com>
2020-05-19 12:34:26 -07:00
mergify[bot]
3746c0c6ac Update accounts whitelist (#10100) (#10103)
automerge
2020-05-18 14:54:50 -07:00
sakridge
2b71bf37f9 Make repair metrics less chatty (#9094) (#10080) 2020-05-16 10:46:50 -07:00
Michael Vines
3b526cc2de Increase the number of JSON RPC service threads (#10075)
automerge
2020-05-15 15:00:41 -07:00
mergify[bot]
198f87ffea Abort if the open fd limit cannot be increased (bp #10064) (#10073)
automerge
2020-05-15 14:47:41 -07:00
mergify[bot]
859d4db87e validator: Forge a confirmed root before halting for RPC inspection (bp #10061) (#10066)
* Forge a confirmed root before halting for RPC inspection (#10061)

(cherry picked from commit 1da1667920)

# Conflicts:
#	core/src/commitment.rs

* Update commitment.rs

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-05-15 14:36:54 -07:00
mergify[bot]
1163144914 solana-gossip spy can now specify a shred version (#10040) (#10041)
automerge
2020-05-13 21:45:16 -07:00
mergify[bot]
49f212247a Add AVX2 runtime checks (#10033) (#10034)
automerge
2020-05-13 13:34:27 -07:00
mergify[bot]
e26840cb09 Introduce type alias Ancestors (bp #9699) (#10019)
automerge
2020-05-13 04:10:18 -07:00
mergify[bot]
14bbcef722 v1.0: Advertise node version in gossip (bp #9986) (#9995)
automerge
2020-05-12 19:38:52 -07:00
mergify[bot]
5326f3ec73 Check slot cleaned up for RPC blockstore/slot queries (#9982) (#9988)
automerge
2020-05-11 16:29:03 -07:00
Michael Vines
fca4554d3f Bump version to v1.0.23 2020-05-10 21:49:52 -07:00
129 changed files with 4779 additions and 3521 deletions

4770
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -53,6 +53,7 @@ members = [
"transaction-status",
"upload-perf",
"net-utils",
"version",
"vote-signer",
"cli",
"rayon-threadlimit",

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-archiver-lib"
version = "1.0.22"
version = "1.0.24"
description = "Solana Archiver Library"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -15,22 +15,22 @@ ed25519-dalek = "=1.0.0-pre.3"
log = "0.4.8"
rand = "0.7.0"
rand_chacha = "0.2.2"
solana-client = { path = "../client", version = "1.0.22" }
solana-storage-program = { path = "../programs/storage", version = "1.0.22" }
solana-client = { path = "../client", version = "1.0.24" }
solana-storage-program = { path = "../programs/storage", version = "1.0.24" }
thiserror = "1.0"
serde = "1.0.104"
serde_json = "1.0.46"
serde_derive = "1.0.103"
solana-net-utils = { path = "../net-utils", version = "1.0.22" }
solana-chacha = { path = "../chacha", version = "1.0.22" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.22" }
solana-ledger = { path = "../ledger", version = "1.0.22" }
solana-logger = { path = "../logger", version = "1.0.22" }
solana-perf = { path = "../perf", version = "1.0.22" }
solana-sdk = { path = "../sdk", version = "1.0.22" }
solana-core = { path = "../core", version = "1.0.22" }
solana-archiver-utils = { path = "../archiver-utils", version = "1.0.22" }
solana-metrics = { path = "../metrics", version = "1.0.22" }
solana-net-utils = { path = "../net-utils", version = "1.0.24" }
solana-chacha = { path = "../chacha", version = "1.0.24" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.24" }
solana-ledger = { path = "../ledger", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-perf = { path = "../perf", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
solana-core = { path = "../core", version = "1.0.24" }
solana-archiver-utils = { path = "../archiver-utils", version = "1.0.24" }
solana-metrics = { path = "../metrics", version = "1.0.24" }
[dev-dependencies]
hex = "0.4.0"

View File

@@ -13,8 +13,7 @@ use solana_core::{
contact_info::ContactInfo,
gossip_service::GossipService,
packet::{limited_deserialize, PACKET_DATA_SIZE},
repair_service,
repair_service::{RepairService, RepairSlotRange, RepairStrategy},
repair_service::{self, RepairService, RepairSlotRange, RepairStats, RepairStrategy},
serve_repair::ServeRepair,
shred_fetch_stage::ShredFetchStage,
sigverify_stage::{DisabledSigVerifier, SigVerifyStage},
@@ -839,13 +838,14 @@ impl Archiver {
repair_service::MAX_REPAIR_LENGTH,
&repair_slot_range,
);
let mut repair_stats = RepairStats::default();
//iter over the repairs and send them
if let Ok(repairs) = repairs {
let reqs: Vec<_> = repairs
.into_iter()
.filter_map(|repair_request| {
serve_repair
.map_repair_request(&repair_request)
.map_repair_request(&repair_request, &mut repair_stats, Some(0))
.map(|result| ((archiver_info.gossip, result), repair_request))
.ok()
})

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-archiver-utils"
version = "1.0.22"
version = "1.0.24"
description = "Solana Archiver Utils"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,12 +11,12 @@ edition = "2018"
[dependencies]
log = "0.4.8"
rand = "0.7.0"
solana-chacha = { path = "../chacha", version = "1.0.22" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.22" }
solana-ledger = { path = "../ledger", version = "1.0.22" }
solana-logger = { path = "../logger", version = "1.0.22" }
solana-perf = { path = "../perf", version = "1.0.22" }
solana-sdk = { path = "../sdk", version = "1.0.22" }
solana-chacha = { path = "../chacha", version = "1.0.24" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.24" }
solana-ledger = { path = "../ledger", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-perf = { path = "../perf", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
[dev-dependencies]
hex = "0.4.0"

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-archiver"
version = "1.0.22"
version = "1.0.24"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -10,13 +10,13 @@ homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
console = "0.9.2"
solana-clap-utils = { path = "../clap-utils", version = "1.0.22" }
solana-core = { path = "../core", version = "1.0.22" }
solana-logger = { path = "../logger", version = "1.0.22" }
solana-metrics = { path = "../metrics", version = "1.0.22" }
solana-archiver-lib = { path = "../archiver-lib", version = "1.0.22" }
solana-net-utils = { path = "../net-utils", version = "1.0.22" }
solana-sdk = { path = "../sdk", version = "1.0.22" }
solana-clap-utils = { path = "../clap-utils", version = "1.0.24" }
solana-core = { path = "../core", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-metrics = { path = "../metrics", version = "1.0.24" }
solana-archiver-lib = { path = "../archiver-lib", version = "1.0.24" }
solana-net-utils = { path = "../net-utils", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
[package.metadata.docs.rs]

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-banking-bench"
version = "1.0.22"
version = "1.0.24"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -10,12 +10,12 @@ homepage = "https://solana.com/"
[dependencies]
log = "0.4.6"
rayon = "1.2.0"
solana-core = { path = "../core", version = "1.0.22" }
solana-ledger = { path = "../ledger", version = "1.0.22" }
solana-logger = { path = "../logger", version = "1.0.22" }
solana-runtime = { path = "../runtime", version = "1.0.22" }
solana-measure = { path = "../measure", version = "1.0.22" }
solana-sdk = { path = "../sdk", version = "1.0.22" }
solana-core = { path = "../core", version = "1.0.24" }
solana-ledger = { path = "../ledger", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-runtime = { path = "../runtime", version = "1.0.24" }
solana-measure = { path = "../measure", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
rand = "0.7.0"
crossbeam-channel = "0.3"

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-exchange"
version = "1.0.22"
version = "1.0.24"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -18,20 +18,20 @@ rand = "0.7.0"
rayon = "1.2.0"
serde_json = "1.0.46"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "1.0.22" }
solana-core = { path = "../core", version = "1.0.22" }
solana-genesis = { path = "../genesis", version = "1.0.22" }
solana-client = { path = "../client", version = "1.0.22" }
solana-faucet = { path = "../faucet", version = "1.0.22" }
solana-exchange-program = { path = "../programs/exchange", version = "1.0.22" }
solana-logger = { path = "../logger", version = "1.0.22" }
solana-metrics = { path = "../metrics", version = "1.0.22" }
solana-net-utils = { path = "../net-utils", version = "1.0.22" }
solana-runtime = { path = "../runtime", version = "1.0.22" }
solana-sdk = { path = "../sdk", version = "1.0.22" }
solana-clap-utils = { path = "../clap-utils", version = "1.0.24" }
solana-core = { path = "../core", version = "1.0.24" }
solana-genesis = { path = "../genesis", version = "1.0.24" }
solana-client = { path = "../client", version = "1.0.24" }
solana-faucet = { path = "../faucet", version = "1.0.24" }
solana-exchange-program = { path = "../programs/exchange", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-metrics = { path = "../metrics", version = "1.0.24" }
solana-net-utils = { path = "../net-utils", version = "1.0.24" }
solana-runtime = { path = "../runtime", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
[dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "1.0.22" }
solana-local-cluster = { path = "../local-cluster", version = "1.0.24" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,17 +2,17 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-streamer"
version = "1.0.22"
version = "1.0.24"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
solana-clap-utils = { path = "../clap-utils", version = "1.0.22" }
solana-core = { path = "../core", version = "1.0.22" }
solana-logger = { path = "../logger", version = "1.0.22" }
solana-net-utils = { path = "../net-utils", version = "1.0.22" }
solana-clap-utils = { path = "../clap-utils", version = "1.0.24" }
solana-core = { path = "../core", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-net-utils = { path = "../net-utils", version = "1.0.24" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-tps"
version = "1.0.22"
version = "1.0.24"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -14,24 +14,24 @@ log = "0.4.8"
rayon = "1.2.0"
serde_json = "1.0.46"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "1.0.22" }
solana-core = { path = "../core", version = "1.0.22" }
solana-genesis = { path = "../genesis", version = "1.0.22" }
solana-client = { path = "../client", version = "1.0.22" }
solana-faucet = { path = "../faucet", version = "1.0.22" }
solana-clap-utils = { path = "../clap-utils", version = "1.0.24" }
solana-core = { path = "../core", version = "1.0.24" }
solana-genesis = { path = "../genesis", version = "1.0.24" }
solana-client = { path = "../client", version = "1.0.24" }
solana-faucet = { path = "../faucet", version = "1.0.24" }
#solana-librapay = { path = "../programs/librapay", version = "1.0.20", optional = true }
solana-logger = { path = "../logger", version = "1.0.22" }
solana-metrics = { path = "../metrics", version = "1.0.22" }
solana-measure = { path = "../measure", version = "1.0.22" }
solana-net-utils = { path = "../net-utils", version = "1.0.22" }
solana-runtime = { path = "../runtime", version = "1.0.22" }
solana-sdk = { path = "../sdk", version = "1.0.22" }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-metrics = { path = "../metrics", version = "1.0.24" }
solana-measure = { path = "../measure", version = "1.0.24" }
solana-net-utils = { path = "../net-utils", version = "1.0.24" }
solana-runtime = { path = "../runtime", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
#solana-move-loader-program = { path = "../programs/move_loader", version = "1.0.20", optional = true }
[dev-dependencies]
serial_test = "0.3.2"
serial_test_derive = "0.4.0"
solana-local-cluster = { path = "../local-cluster", version = "1.0.22" }
solana-local-cluster = { path = "../local-cluster", version = "1.0.24" }
#[features]
#move = ["solana-librapay", "solana-move-loader-program"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-chacha-cuda"
version = "1.0.22"
version = "1.0.24"
description = "Solana Chacha Cuda APIs"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -10,12 +10,12 @@ edition = "2018"
[dependencies]
log = "0.4.8"
solana-archiver-utils = { path = "../archiver-utils", version = "1.0.22" }
solana-chacha = { path = "../chacha", version = "1.0.22" }
solana-ledger = { path = "../ledger", version = "1.0.22" }
solana-logger = { path = "../logger", version = "1.0.22" }
solana-perf = { path = "../perf", version = "1.0.22" }
solana-sdk = { path = "../sdk", version = "1.0.22" }
solana-archiver-utils = { path = "../archiver-utils", version = "1.0.24" }
solana-chacha = { path = "../chacha", version = "1.0.24" }
solana-ledger = { path = "../ledger", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-perf = { path = "../perf", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
[dev-dependencies]
hex-literal = "0.2.1"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-chacha-sys"
version = "1.0.22"
version = "1.0.24"
description = "Solana chacha-sys"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-chacha"
version = "1.0.22"
version = "1.0.24"
description = "Solana Chacha APIs"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,11 +12,11 @@ edition = "2018"
log = "0.4.8"
rand = "0.7.0"
rand_chacha = "0.2.2"
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.22" }
solana-ledger = { path = "../ledger", version = "1.0.22" }
solana-logger = { path = "../logger", version = "1.0.22" }
solana-perf = { path = "../perf", version = "1.0.22" }
solana-sdk = { path = "../sdk", version = "1.0.22" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.24" }
solana-ledger = { path = "../ledger", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-perf = { path = "../perf", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
[dev-dependencies]
hex-literal = "0.2.1"

View File

@@ -2,8 +2,10 @@
set -e
cd "$(dirname "$0")/.."
# shellcheck source=multinode-demo/common.sh
source multinode-demo/common.sh
rm -f config/run/init-completed
rm -rf config/run/init-completed config/ledger config/snapshot-ledger
timeout 15 ./run.sh &
pid=$!
@@ -17,6 +19,14 @@ while [[ ! -f config/run/init-completed ]]; do
fi
done
while [[ $($solana_cli slot --commitment recent) -eq 0 ]]; do
sleep 1
done
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"validatorExit"}' http://localhost:8899
wait $pid
$solana_ledger_tool create-snapshot --ledger config/ledger 1 config/snapshot-ledger
cp config/ledger/genesis.tar.bz2 config/snapshot-ledger
tar -C config/snapshot-ledger -xvf config/snapshot-ledger/genesis.tar.bz2
$solana_ledger_tool verify --ledger config/snapshot-ledger

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-clap-utils"
version = "1.0.22"
version = "1.0.24"
description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,8 +11,8 @@ edition = "2018"
[dependencies]
clap = "2.33.0"
rpassword = "4.0"
solana-remote-wallet = { path = "../remote-wallet", version = "1.0.22" }
solana-sdk = { path = "../sdk", version = "1.0.22" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
thiserror = "1.0.11"
tiny-bip39 = "0.7.0"
url = "2.1.0"

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-cli-config"
description = "Blockchain, Rebuilt for Scale"
version = "1.0.22"
version = "1.0.24"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-cli"
description = "Blockchain, Rebuilt for Scale"
version = "1.0.22"
version = "1.0.24"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -26,29 +26,29 @@ reqwest = { version = "0.10.1", default-features = false, features = ["blocking"
serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.46"
solana-budget-program = { path = "../programs/budget", version = "1.0.22" }
solana-clap-utils = { path = "../clap-utils", version = "1.0.22" }
solana-cli-config = { path = "../cli-config", version = "1.0.22" }
solana-client = { path = "../client", version = "1.0.22" }
solana-config-program = { path = "../programs/config", version = "1.0.22" }
solana-faucet = { path = "../faucet", version = "1.0.22" }
solana-logger = { path = "../logger", version = "1.0.22" }
solana-net-utils = { path = "../net-utils", version = "1.0.22" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.0.22" }
solana-runtime = { path = "../runtime", version = "1.0.22" }
solana-sdk = { path = "../sdk", version = "1.0.22" }
solana-stake-program = { path = "../programs/stake", version = "1.0.22" }
solana-storage-program = { path = "../programs/storage", version = "1.0.22" }
solana-transaction-status = { path = "../transaction-status", version = "1.0.22" }
solana-vote-program = { path = "../programs/vote", version = "1.0.22" }
solana-vote-signer = { path = "../vote-signer", version = "1.0.22" }
solana-budget-program = { path = "../programs/budget", version = "1.0.24" }
solana-clap-utils = { path = "../clap-utils", version = "1.0.24" }
solana-cli-config = { path = "../cli-config", version = "1.0.24" }
solana-client = { path = "../client", version = "1.0.24" }
solana-config-program = { path = "../programs/config", version = "1.0.24" }
solana-faucet = { path = "../faucet", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-net-utils = { path = "../net-utils", version = "1.0.24" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.0.24" }
solana-runtime = { path = "../runtime", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
solana-stake-program = { path = "../programs/stake", version = "1.0.24" }
solana-storage-program = { path = "../programs/storage", version = "1.0.24" }
solana-transaction-status = { path = "../transaction-status", version = "1.0.24" }
solana-vote-program = { path = "../programs/vote", version = "1.0.24" }
solana-vote-signer = { path = "../vote-signer", version = "1.0.24" }
titlecase = "1.1.0"
thiserror = "1.0.11"
url = "2.1.1"
[dev-dependencies]
solana-core = { path = "../core", version = "1.0.22" }
solana-budget-program = { path = "../programs/budget", version = "1.0.22" }
solana-core = { path = "../core", version = "1.0.24" }
solana-budget-program = { path = "../programs/budget", version = "1.0.24" }
tempfile = "3.1.0"
[[bin]]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-client"
version = "1.0.22"
version = "1.0.24"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -19,10 +19,10 @@ reqwest = { version = "0.10.1", default-features = false, features = ["blocking"
serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.46"
solana-net-utils = { path = "../net-utils", version = "1.0.22" }
solana-sdk = { path = "../sdk", version = "1.0.22" }
solana-transaction-status = { path = "../transaction-status", version = "1.0.22" }
solana-vote-program = { path = "../programs/vote", version = "1.0.22" }
solana-net-utils = { path = "../net-utils", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
solana-transaction-status = { path = "../transaction-status", version = "1.0.24" }
solana-vote-program = { path = "../programs/vote", version = "1.0.24" }
thiserror = "1.0"
tungstenite = "0.10.1"
url = "2.1.1"
@@ -31,7 +31,7 @@ url = "2.1.1"
assert_matches = "1.3.0"
jsonrpc-core = "14.0.5"
jsonrpc-http-server = "14.0.6"
solana-logger = { path = "../logger", version = "1.0.22" }
solana-logger = { path = "../logger", version = "1.0.24" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -8,6 +8,7 @@ pub mod perf_utils;
pub mod pubsub_client;
pub mod rpc_client;
pub mod rpc_client_request;
pub mod rpc_config;
pub mod rpc_request;
pub mod rpc_response;
pub mod thin_client;

View File

@@ -114,6 +114,21 @@ impl RpcClient {
}
}
pub fn simulate_transaction(
&self,
transaction: &Transaction,
sig_verify: bool,
) -> RpcResult<TransactionStatus> {
let serialized_encoded = bs58::encode(serialize(transaction).unwrap()).into_string();
let response = self.send(
&RpcRequest::SimulateTransaction,
json!([serialized_encoded, { "sigVerify": sig_verify }]),
0,
)?;
Ok(serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "SimulateTransaction"))?)
}
pub fn get_signature_status(
&self,
signature: &Signature,

31
client/src/rpc_config.rs Normal file
View File

@@ -0,0 +1,31 @@
use solana_sdk::commitment_config::CommitmentConfig;
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcSignatureStatusConfig {
pub search_transaction_history: Option<bool>,
// DEPRECATED
#[serde(flatten)]
pub commitment: Option<CommitmentConfig>,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcSimulateTransactionConfig {
pub sig_verify: bool,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum RpcLargestAccountsFilter {
Circulating,
NonCirculating,
}
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcLargestAccountsConfig {
#[serde(flatten)]
pub commitment: Option<CommitmentConfig>,
pub filter: Option<RpcLargestAccountsFilter>,
}

View File

@@ -18,6 +18,7 @@ pub enum RpcRequest {
GetGenesisHash,
GetIdentity,
GetInflation,
GetLargestAccounts,
GetLeaderSchedule,
GetProgramAccounts,
GetRecentBlockhash,
@@ -36,6 +37,7 @@ pub enum RpcRequest {
RegisterNode,
RequestAirdrop,
SendTransaction,
SimulateTransaction,
SignVote,
GetMinimumBalanceForRentExemption,
MinimumLedgerSlot,
@@ -60,6 +62,7 @@ impl RpcRequest {
RpcRequest::GetGenesisHash => "getGenesisHash",
RpcRequest::GetIdentity => "getIdentity",
RpcRequest::GetInflation => "getInflation",
RpcRequest::GetLargestAccounts => "getLargestAccounts",
RpcRequest::GetLeaderSchedule => "getLeaderSchedule",
RpcRequest::GetProgramAccounts => "getProgramAccounts",
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
@@ -78,6 +81,7 @@ impl RpcRequest {
RpcRequest::RegisterNode => "registerNode",
RpcRequest::RequestAirdrop => "requestAirdrop",
RpcRequest::SendTransaction => "sendTransaction",
RpcRequest::SimulateTransaction => "simulateTransaction",
RpcRequest::SignVote => "signVote",
RpcRequest::GetMinimumBalanceForRentExemption => "getMinimumBalanceForRentExemption",
RpcRequest::MinimumLedgerSlot => "minimumLedgerSlot",

View File

@@ -108,6 +108,8 @@ pub struct RpcContactInfo {
pub tpu: Option<SocketAddr>,
/// JSON RPC port
pub rpc: Option<SocketAddr>,
/// Software version
pub version: Option<String>,
}
/// Map of leader base58 identity pubkeys to the slot indices relative to the first epoch slot

View File

@@ -1,7 +1,7 @@
[package]
name = "solana-core"
description = "Blockchain, Rebuilt for Scale"
version = "1.0.22"
version = "1.0.24"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "../README.md"
@@ -43,34 +43,35 @@ regex = "1.3.4"
serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.46"
solana-budget-program = { path = "../programs/budget", version = "1.0.22" }
solana-clap-utils = { path = "../clap-utils", version = "1.0.22" }
solana-client = { path = "../client", version = "1.0.22" }
solana-transaction-status = { path = "../transaction-status", version = "1.0.22" }
solana-faucet = { path = "../faucet", version = "1.0.22" }
solana-budget-program = { path = "../programs/budget", version = "1.0.24" }
solana-clap-utils = { path = "../clap-utils", version = "1.0.24" }
solana-client = { path = "../client", version = "1.0.24" }
solana-transaction-status = { path = "../transaction-status", version = "1.0.24" }
solana-faucet = { path = "../faucet", version = "1.0.24" }
ed25519-dalek = "=1.0.0-pre.3"
solana-ledger = { path = "../ledger", version = "1.0.22" }
solana-logger = { path = "../logger", version = "1.0.22" }
solana-merkle-tree = { path = "../merkle-tree", version = "1.0.22" }
solana-metrics = { path = "../metrics", version = "1.0.22" }
solana-measure = { path = "../measure", version = "1.0.22" }
solana-net-utils = { path = "../net-utils", version = "1.0.22" }
solana-chacha-cuda = { path = "../chacha-cuda", version = "1.0.22" }
solana-perf = { path = "../perf", version = "1.0.22" }
solana-runtime = { path = "../runtime", version = "1.0.22" }
solana-sdk = { path = "../sdk", version = "1.0.22" }
solana-stake-program = { path = "../programs/stake", version = "1.0.22" }
solana-storage-program = { path = "../programs/storage", version = "1.0.22" }
solana-vote-program = { path = "../programs/vote", version = "1.0.22" }
solana-vote-signer = { path = "../vote-signer", version = "1.0.22" }
solana-sys-tuner = { path = "../sys-tuner", version = "1.0.22" }
solana-ledger = { path = "../ledger", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-merkle-tree = { path = "../merkle-tree", version = "1.0.24" }
solana-metrics = { path = "../metrics", version = "1.0.24" }
solana-measure = { path = "../measure", version = "1.0.24" }
solana-net-utils = { path = "../net-utils", version = "1.0.24" }
solana-chacha-cuda = { path = "../chacha-cuda", version = "1.0.24" }
solana-perf = { path = "../perf", version = "1.0.24" }
solana-runtime = { path = "../runtime", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
solana-stake-program = { path = "../programs/stake", version = "1.0.24" }
solana-storage-program = { path = "../programs/storage", version = "1.0.24" }
solana-version = { path = "../version", version = "1.0.24" }
solana-vote-program = { path = "../programs/vote", version = "1.0.24" }
solana-vote-signer = { path = "../vote-signer", version = "1.0.24" }
solana-sys-tuner = { path = "../sys-tuner", version = "1.0.24" }
tempfile = "3.1.0"
thiserror = "1.0"
tokio = "0.1"
tokio-codec = "0.1"
tokio-fs = "0.1"
tokio-io = "0.1"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.0.22" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.0.24" }
trees = "0.2.1"
[dev-dependencies]

View File

@@ -5,6 +5,7 @@ extern crate test;
use rand::{thread_rng, Rng};
use solana_core::cluster_info::{ClusterInfo, Node};
use solana_core::contact_info::ContactInfo;
use solana_ledger::shred::{Shred, NONCE_SHRED_PAYLOAD_SIZE};
use solana_sdk::pubkey::Pubkey;
use solana_sdk::timing::timestamp;
use std::collections::HashMap;
@@ -20,9 +21,8 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info.clone());
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
const SHRED_SIZE: usize = 1024;
const NUM_SHREDS: usize = 32;
let shreds = vec![vec![0; SHRED_SIZE]; NUM_SHREDS];
let shreds = vec![vec![0; NONCE_SHRED_PAYLOAD_SIZE]; NUM_SHREDS];
let seeds = vec![[0u8; 32]; NUM_SHREDS];
let mut stakes = HashMap::new();
const NUM_PEERS: usize = 200;

View File

@@ -5,11 +5,11 @@ extern crate test;
use solana_ledger::entry::{create_ticks, Entry};
use solana_ledger::shred::{
max_entries_per_n_shred, max_ticks_per_n_shreds, Shred, Shredder, RECOMMENDED_FEC_RATE,
SIZE_OF_DATA_SHRED_PAYLOAD,
SIZE_OF_NONCE_DATA_SHRED_PAYLOAD,
};
use solana_perf::test_tx;
use solana_sdk::hash::Hash;
use solana_sdk::signature::{Keypair, Signer};
use solana_sdk::signature::Keypair;
use std::sync::Arc;
use test::Bencher;
@@ -29,10 +29,11 @@ fn make_large_unchained_entries(txs_per_entry: u64, num_entries: u64) -> Vec<Ent
#[bench]
fn bench_shredder_ticks(bencher: &mut Bencher) {
let kp = Arc::new(Keypair::new());
let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD;
let shred_size = SIZE_OF_NONCE_DATA_SHRED_PAYLOAD;
let num_shreds = ((1000 * 1000) + (shred_size - 1)) / shred_size;
// ~1Mb
let num_ticks = max_ticks_per_n_shreds(1) * num_shreds as u64;
let num_ticks =
max_ticks_per_n_shreds(1, Some(SIZE_OF_NONCE_DATA_SHRED_PAYLOAD)) * num_shreds as u64;
let entries = create_ticks(num_ticks, 0, Hash::default());
bencher.iter(|| {
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp.clone(), 0, 0).unwrap();
@@ -43,10 +44,14 @@ fn bench_shredder_ticks(bencher: &mut Bencher) {
#[bench]
fn bench_shredder_large_entries(bencher: &mut Bencher) {
let kp = Arc::new(Keypair::new());
let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD;
let shred_size = SIZE_OF_NONCE_DATA_SHRED_PAYLOAD;
let num_shreds = ((1000 * 1000) + (shred_size - 1)) / shred_size;
let txs_per_entry = 128;
let num_entries = max_entries_per_n_shred(&make_test_entry(txs_per_entry), num_shreds as u64);
let num_entries = max_entries_per_n_shred(
&make_test_entry(txs_per_entry),
num_shreds as u64,
Some(shred_size),
);
let entries = make_large_unchained_entries(txs_per_entry, num_entries);
// 1Mb
bencher.iter(|| {
@@ -58,10 +63,10 @@ fn bench_shredder_large_entries(bencher: &mut Bencher) {
#[bench]
fn bench_deshredder(bencher: &mut Bencher) {
let kp = Arc::new(Keypair::new());
let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD;
let shred_size = SIZE_OF_NONCE_DATA_SHRED_PAYLOAD;
// ~10Mb
let num_shreds = ((10000 * 1000) + (shred_size - 1)) / shred_size;
let num_ticks = max_ticks_per_n_shreds(1) * num_shreds as u64;
let num_ticks = max_ticks_per_n_shreds(1, Some(shred_size)) * num_shreds as u64;
let entries = create_ticks(num_ticks, 0, Hash::default());
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp, 0, 0).unwrap();
let data_shreds = shredder.entries_to_shreds(&entries, true, 0).0;
@@ -73,7 +78,7 @@ fn bench_deshredder(bencher: &mut Bencher) {
#[bench]
fn bench_deserialize_hdr(bencher: &mut Bencher) {
let data = vec![0; SIZE_OF_DATA_SHRED_PAYLOAD];
let data = vec![0; SIZE_OF_NONCE_DATA_SHRED_PAYLOAD];
let shred = Shred::new_from_data(2, 1, 1, Some(&data), true, true, 0, 0, 1);

View File

@@ -390,7 +390,7 @@ mod test {
)));
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut genesis_config = create_genesis_config(10_000).genesis_config;
genesis_config.ticks_per_slot = max_ticks_per_n_shreds(num_shreds_per_slot) + 1;
genesis_config.ticks_per_slot = max_ticks_per_n_shreds(num_shreds_per_slot, None) + 1;
let bank0 = Arc::new(Bank::new(&genesis_config));
(
blockstore,
@@ -484,7 +484,11 @@ mod test {
// Interrupting the slot should cause the unfinished_slot and stats to reset
let num_shreds = 1;
assert!(num_shreds < num_shreds_per_slot);
let ticks1 = create_ticks(max_ticks_per_n_shreds(num_shreds), 0, genesis_config.hash());
let ticks1 = create_ticks(
max_ticks_per_n_shreds(num_shreds, None),
0,
genesis_config.hash(),
);
let receive_results = ReceiveResults {
entries: ticks1.clone(),
time_elapsed: Duration::new(2, 0),

View File

@@ -20,7 +20,8 @@ use crate::{
crds_gossip_error::CrdsGossipError,
crds_gossip_pull::{CrdsFilter, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS},
crds_value::{
self, CrdsData, CrdsValue, CrdsValueLabel, EpochSlots, SnapshotHash, Vote, MAX_WALLCLOCK,
self, CrdsData, CrdsValue, CrdsValueLabel, EpochSlots, SnapshotHash, Version, Vote,
MAX_WALLCLOCK,
},
packet::{Packet, PACKET_DATA_SIZE},
result::{Error, Result},
@@ -336,6 +337,7 @@ impl ClusterInfo {
archivers += 1;
}
let node_version = self.get_node_version(&node.id);
if my_shred_version != 0 && (node.shred_version != 0 && node.shred_version != my_shred_version) {
different_shred_nodes += 1;
None
@@ -351,10 +353,9 @@ impl ClusterInfo {
"none".to_string()
}
}
let ip_addr = node.gossip.ip();
Some(format!(
"{:15} {:2}| {:5} | {:44} | {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {}\n",
"{:15} {:2}| {:5} | {:44} |{:^15}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {}\n",
if ContactInfo::is_valid_address(&node.gossip) {
ip_addr.to_string()
} else {
@@ -363,6 +364,11 @@ impl ClusterInfo {
if node.id == my_pubkey { "me" } else { "" }.to_string(),
now.saturating_sub(last_updated),
node.id.to_string(),
if let Some(node_version) = node_version {
node_version.to_string()
} else {
"-".to_string()
},
addr_to_string(&ip_addr, &node.gossip),
addr_to_string(&ip_addr, &node.tpu),
addr_to_string(&ip_addr, &node.tpu_forwards),
@@ -370,7 +376,6 @@ impl ClusterInfo {
addr_to_string(&ip_addr, &node.tvu_forwards),
addr_to_string(&ip_addr, &node.repair),
addr_to_string(&ip_addr, &node.serve_repair),
addr_to_string(&ip_addr, &node.storage_addr),
addr_to_string(&ip_addr, &node.rpc),
addr_to_string(&ip_addr, &node.rpc_pubsub),
node.shred_version,
@@ -381,9 +386,9 @@ impl ClusterInfo {
format!(
"IP Address |Age(ms)| Node identifier \
|Gossip| TPU |TPUfwd| TVU |TVUfwd|Repair|ServeR|Storag| RPC |PubSub|ShredVer\n\
------------------+-------+----------------------------------------------+\
------+------+------+------+------+------+------+------+------+------+--------\n\
| Version |Gossip| TPU |TPUfwd| TVU |TVUfwd|Repair|ServeR| RPC |PubSub|ShredVer\n\
------------------+-------+----------------------------------------------+---------------+\
------+------+------+------+------+------+------+------+------+--------\n\
{}\
Nodes: {}{}{}{}",
nodes.join(""),
@@ -398,7 +403,7 @@ impl ClusterInfo {
} else {
"".to_string()
},
if spy_nodes > 0 {
if different_shred_nodes > 0 {
format!(
"\nNodes with different shred version: {}",
different_shred_nodes
@@ -556,6 +561,16 @@ impl ClusterInfo {
.map(|x| x.value.contact_info().unwrap())
}
pub fn get_node_version(&self, pubkey: &Pubkey) -> Option<solana_version::Version> {
self.gossip
.crds
.table
.get(&CrdsValueLabel::Version(*pubkey))
.map(|x| x.value.version())
.flatten()
.map(|version| version.version.clone())
}
/// all validators that have a valid rpc port regardless of `shred_version`.
pub fn all_rpc_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data();
@@ -1201,6 +1216,14 @@ impl ClusterInfo {
let mut last_contact_info_trace = timestamp();
let mut adopt_shred_version = obj.read().unwrap().my_data().shred_version == 0;
let recycler = PacketsRecycler::default();
{
let mut obj = obj.write().unwrap();
let message = CrdsValue::new_signed(
CrdsData::Version(Version::new(obj.id())),
&obj.keypair,
);
obj.push_message(message);
}
loop {
let start = timestamp();
thread_mem_usage::datapoint("solana-gossip");
@@ -1722,39 +1745,39 @@ impl ClusterInfo {
.unwrap()
}
pub fn gossip_contact_info(id: &Pubkey, gossip: SocketAddr) -> ContactInfo {
pub fn gossip_contact_info(id: &Pubkey, gossip: SocketAddr, shred_version: u16) -> ContactInfo {
ContactInfo {
id: *id,
gossip,
wallclock: timestamp(),
shred_version,
..ContactInfo::default()
}
}
pub fn spy_contact_info(id: &Pubkey) -> ContactInfo {
let dummy_addr = socketaddr_any!();
Self::gossip_contact_info(id, dummy_addr)
}
/// An alternative to Spy Node that has a valid gossip address and fully participate in Gossip.
pub fn gossip_node(
id: &Pubkey,
gossip_addr: &SocketAddr,
shred_version: u16,
) -> (ContactInfo, UdpSocket, Option<TcpListener>) {
let bind_ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
let (port, (gossip_socket, ip_echo)) =
Node::get_gossip_port(gossip_addr, VALIDATOR_PORT_RANGE, bind_ip_addr);
let contact_info = Self::gossip_contact_info(id, SocketAddr::new(gossip_addr.ip(), port));
let contact_info =
Self::gossip_contact_info(id, SocketAddr::new(gossip_addr.ip(), port), shred_version);
(contact_info, gossip_socket, Some(ip_echo))
}
/// A Node with dummy ports to spy on gossip via pull requests
pub fn spy_node(id: &Pubkey) -> (ContactInfo, UdpSocket, Option<TcpListener>) {
pub fn spy_node(
id: &Pubkey,
shred_version: u16,
) -> (ContactInfo, UdpSocket, Option<TcpListener>) {
let bind_ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
let (_, gossip_socket) = bind_in_range(bind_ip_addr, VALIDATOR_PORT_RANGE).unwrap();
let contact_info = Self::spy_contact_info(id);
let contact_info = Self::gossip_contact_info(id, socketaddr_any!(), shred_version);
(contact_info, gossip_socket, None)
}
@@ -2046,10 +2069,10 @@ mod tests {
#[test]
fn test_gossip_node() {
//check that a gossip nodes always show up as spies
let (node, _, _) = ClusterInfo::spy_node(&Pubkey::new_rand());
let (node, _, _) = ClusterInfo::spy_node(&Pubkey::new_rand(), 0);
assert!(ClusterInfo::is_spy_node(&node));
let (node, _, _) =
ClusterInfo::gossip_node(&Pubkey::new_rand(), &"1.1.1.1:1111".parse().unwrap());
ClusterInfo::gossip_node(&Pubkey::new_rand(), &"1.1.1.1:1111".parse().unwrap(), 0);
assert!(ClusterInfo::is_spy_node(&node));
}
@@ -2057,7 +2080,7 @@ mod tests {
fn test_cluster_spy_gossip() {
//check that gossip doesn't try to push to invalid addresses
let node = Node::new_localhost();
let (spy, _, _) = ClusterInfo::spy_node(&Pubkey::new_rand());
let (spy, _, _) = ClusterInfo::spy_node(&Pubkey::new_rand(), 0);
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
node.info,
)));

View File

@@ -162,7 +162,6 @@ impl BlockCommitmentCache {
}
}
#[cfg(test)]
pub(crate) fn set_get_largest_confirmed_root(&mut self, root: Slot) {
self.largest_confirmed_root = root;
}

View File

@@ -71,6 +71,8 @@ pub enum CrdsData {
EpochSlots(EpochSlotIndex, EpochSlots),
SnapshotHashes(SnapshotHash),
AccountsHashes(SnapshotHash),
NewEpochSlotsPlaceholder, // Reserve this enum entry for the v1.1 version of EpochSlots
Version(Version),
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
@@ -122,6 +124,8 @@ impl Sanitize for CrdsData {
}
val.sanitize()
}
CrdsData::NewEpochSlotsPlaceholder => Err(SanitizeError::InvalidValue), // Not supported on v1.0
CrdsData::Version(version) => version.sanitize(),
}
}
}
@@ -228,6 +232,33 @@ impl Vote {
}
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct Version {
pub from: Pubkey,
pub wallclock: u64,
pub version: solana_version::Version,
}
impl Sanitize for Version {
fn sanitize(&self) -> Result<(), SanitizeError> {
if self.wallclock >= MAX_WALLCLOCK {
return Err(SanitizeError::ValueOutOfBounds);
}
self.from.sanitize()?;
self.version.sanitize()
}
}
impl Version {
pub fn new(from: Pubkey) -> Self {
Self {
from,
wallclock: timestamp(),
version: solana_version::Version::default(),
}
}
}
/// Type of the replicated value
/// These are labels for values in a record that is associated with `Pubkey`
#[derive(PartialEq, Hash, Eq, Clone, Debug)]
@@ -237,6 +268,8 @@ pub enum CrdsValueLabel {
EpochSlots(Pubkey),
SnapshotHashes(Pubkey),
AccountsHashes(Pubkey),
NewEpochSlotsPlaceholder,
Version(Pubkey),
}
impl fmt::Display for CrdsValueLabel {
@@ -247,6 +280,8 @@ impl fmt::Display for CrdsValueLabel {
CrdsValueLabel::EpochSlots(_) => write!(f, "EpochSlots({})", self.pubkey()),
CrdsValueLabel::SnapshotHashes(_) => write!(f, "SnapshotHashes({})", self.pubkey()),
CrdsValueLabel::AccountsHashes(_) => write!(f, "AccountsHashes({})", self.pubkey()),
CrdsValueLabel::NewEpochSlotsPlaceholder => write!(f, "NewEpochSlotsPlaceholder"),
CrdsValueLabel::Version(_) => write!(f, "Version({})", self.pubkey()),
}
}
}
@@ -259,6 +294,8 @@ impl CrdsValueLabel {
CrdsValueLabel::EpochSlots(p) => *p,
CrdsValueLabel::SnapshotHashes(p) => *p,
CrdsValueLabel::AccountsHashes(p) => *p,
CrdsValueLabel::NewEpochSlotsPlaceholder => Pubkey::default(),
CrdsValueLabel::Version(p) => *p,
}
}
}
@@ -276,7 +313,7 @@ impl CrdsValue {
value.sign(keypair);
value
}
/// Totally unsecure unverfiable wallclock of the node that generated this message
/// Totally unsecure unverifiable wallclock of the node that generated this message
/// Latest wallclock is always picked.
/// This is used to time out push messages.
pub fn wallclock(&self) -> u64 {
@@ -286,6 +323,8 @@ impl CrdsValue {
CrdsData::EpochSlots(_, vote) => vote.wallclock,
CrdsData::SnapshotHashes(hash) => hash.wallclock,
CrdsData::AccountsHashes(hash) => hash.wallclock,
CrdsData::NewEpochSlotsPlaceholder => 0,
CrdsData::Version(version) => version.wallclock,
}
}
pub fn pubkey(&self) -> Pubkey {
@@ -295,6 +334,8 @@ impl CrdsValue {
CrdsData::EpochSlots(_, slots) => slots.from,
CrdsData::SnapshotHashes(hash) => hash.from,
CrdsData::AccountsHashes(hash) => hash.from,
CrdsData::NewEpochSlotsPlaceholder => Pubkey::default(),
CrdsData::Version(version) => version.from,
}
}
pub fn label(&self) -> CrdsValueLabel {
@@ -304,6 +345,8 @@ impl CrdsValue {
CrdsData::EpochSlots(_, _) => CrdsValueLabel::EpochSlots(self.pubkey()),
CrdsData::SnapshotHashes(_) => CrdsValueLabel::SnapshotHashes(self.pubkey()),
CrdsData::AccountsHashes(_) => CrdsValueLabel::AccountsHashes(self.pubkey()),
CrdsData::NewEpochSlotsPlaceholder => CrdsValueLabel::NewEpochSlotsPlaceholder,
CrdsData::Version(_) => CrdsValueLabel::Version(self.pubkey()),
}
}
pub fn contact_info(&self) -> Option<&ContactInfo> {
@@ -347,6 +390,13 @@ impl CrdsValue {
}
}
pub fn version(&self) -> Option<&Version> {
match &self.data {
CrdsData::Version(version) => Some(version),
_ => None,
}
}
/// Return all the possible labels for a record identified by Pubkey.
pub fn record_labels(key: &Pubkey) -> Vec<CrdsValueLabel> {
let mut labels = vec![
@@ -354,6 +404,7 @@ impl CrdsValue {
CrdsValueLabel::EpochSlots(*key),
CrdsValueLabel::SnapshotHashes(*key),
CrdsValueLabel::AccountsHashes(*key),
CrdsValueLabel::Version(*key),
];
labels.extend((0..MAX_VOTES).map(|ix| CrdsValueLabel::Vote(ix, *key)));
labels
@@ -403,7 +454,7 @@ mod test {
#[test]
fn test_labels() {
let mut hits = [false; 4 + MAX_VOTES as usize];
let mut hits = [false; 5 + MAX_VOTES as usize];
// this method should cover all the possible labels
for v in &CrdsValue::record_labels(&Pubkey::default()) {
match v {
@@ -411,7 +462,9 @@ mod test {
CrdsValueLabel::EpochSlots(_) => hits[1] = true,
CrdsValueLabel::SnapshotHashes(_) => hits[2] = true,
CrdsValueLabel::AccountsHashes(_) => hits[3] = true,
CrdsValueLabel::Vote(ix, _) => hits[*ix as usize + 4] = true,
CrdsValueLabel::Version(_) => hits[4] = true,
CrdsValueLabel::Vote(ix, _) => hits[*ix as usize + 5] = true,
CrdsValueLabel::NewEpochSlotsPlaceholder => unreachable!(),
}
}
assert!(hits.iter().all(|x| *x));

View File

@@ -75,6 +75,7 @@ pub fn discover_cluster(
None,
None,
None,
0,
)
}
@@ -85,9 +86,11 @@ pub fn discover(
find_node_by_pubkey: Option<Pubkey>,
find_node_by_gossip_addr: Option<&SocketAddr>,
my_gossip_addr: Option<&SocketAddr>,
my_shred_version: u16,
) -> std::io::Result<(Vec<ContactInfo>, Vec<ContactInfo>)> {
let exit = Arc::new(AtomicBool::new(false));
let (gossip_service, ip_echo, spy_ref) = make_gossip_node(entrypoint, &exit, my_gossip_addr);
let (gossip_service, ip_echo, spy_ref) =
make_gossip_node(entrypoint, &exit, my_gossip_addr, my_shred_version);
let id = spy_ref.read().unwrap().keypair.pubkey();
info!("Entrypoint: {:?}", entrypoint);
@@ -256,12 +259,13 @@ fn make_gossip_node(
entrypoint: Option<&SocketAddr>,
exit: &Arc<AtomicBool>,
gossip_addr: Option<&SocketAddr>,
shred_version: u16,
) -> (GossipService, Option<TcpListener>, Arc<RwLock<ClusterInfo>>) {
let keypair = Arc::new(Keypair::new());
let (node, gossip_socket, ip_echo) = if let Some(gossip_addr) = gossip_addr {
ClusterInfo::gossip_node(&keypair.pubkey(), gossip_addr)
ClusterInfo::gossip_node(&keypair.pubkey(), gossip_addr, shred_version)
} else {
ClusterInfo::spy_node(&keypair.pubkey())
ClusterInfo::spy_node(&keypair.pubkey(), shred_version)
};
let mut cluster_info = ClusterInfo::new(node, keypair);
if let Some(entrypoint) = entrypoint {

View File

@@ -3,7 +3,6 @@
use solana_ledger::blockstore::Blockstore;
use solana_ledger::blockstore_db::Result as BlockstoreResult;
use solana_measure::measure::Measure;
use solana_metrics::datapoint_debug;
use solana_sdk::clock::Slot;
use std::string::ToString;
use std::sync::atomic::{AtomicBool, Ordering};
@@ -30,9 +29,8 @@ pub const DEFAULT_MIN_MAX_LEDGER_SHREDS: u64 = 50_000_000;
// and starve other blockstore users.
pub const DEFAULT_PURGE_SLOT_INTERVAL: u64 = 512;
// Remove a limited number of slots at a time, so the operation
// does not take too long and block other blockstore users.
pub const DEFAULT_PURGE_BATCH_SIZE: u64 = 256;
// Delay between purges to cooperate with other blockstore users
pub const DEFAULT_DELAY_BETWEEN_PURGES: Duration = Duration::from_millis(500);
pub struct LedgerCleanupService {
t_cleanup: JoinHandle<()>,
@@ -63,6 +61,7 @@ impl LedgerCleanupService {
max_ledger_slots,
&mut last_purge_slot,
DEFAULT_PURGE_SLOT_INTERVAL,
Some(DEFAULT_DELAY_BETWEEN_PURGES),
) {
match e {
RecvTimeoutError::Disconnected => break,
@@ -78,8 +77,8 @@ impl LedgerCleanupService {
blockstore: &Arc<Blockstore>,
root: Slot,
max_ledger_shreds: u64,
) -> (u64, Slot, Slot) {
let mut shreds = Vec::new();
) -> (bool, Slot, Slot, u64) {
let mut total_slots = Vec::new();
let mut iterate_time = Measure::start("iterate_time");
let mut total_shreds = 0;
let mut first_slot = 0;
@@ -90,33 +89,43 @@ impl LedgerCleanupService {
}
// Not exact since non-full slots will have holes
total_shreds += meta.received;
shreds.push((slot, meta.received));
total_slots.push((slot, meta.received));
if slot > root {
break;
}
}
iterate_time.stop();
info!(
"checking for ledger purge: max_shreds: {} slots: {} total_shreds: {} {}",
max_ledger_shreds,
shreds.len(),
"first_slot={} total_slots={} total_shreds={} max_ledger_shreds={}, {}",
first_slot,
total_slots.len(),
total_shreds,
max_ledger_shreds,
iterate_time
);
if (total_shreds as u64) < max_ledger_shreds {
return (0, 0, 0);
return (false, 0, 0, total_shreds);
}
let mut cur_shreds = 0;
let mut lowest_slot_to_clean = shreds[0].0;
for (slot, num_shreds) in shreds.iter().rev() {
cur_shreds += *num_shreds as u64;
if cur_shreds > max_ledger_shreds {
lowest_slot_to_clean = *slot;
let mut num_shreds_to_clean = 0;
let mut lowest_cleanup_slot = total_slots[0].0;
for (slot, num_shreds) in total_slots.iter().rev() {
num_shreds_to_clean += *num_shreds as u64;
if num_shreds_to_clean > max_ledger_shreds {
lowest_cleanup_slot = *slot;
break;
}
}
(cur_shreds, lowest_slot_to_clean, first_slot)
(true, lowest_cleanup_slot, first_slot, total_shreds)
}
fn receive_new_roots(new_root_receiver: &Receiver<Slot>) -> Result<Slot, RecvTimeoutError> {
let mut root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
// Get the newest root
while let Ok(new_root) = new_root_receiver.try_recv() {
root = new_root;
}
Ok(root)
}
fn cleanup_ledger(
@@ -125,68 +134,78 @@ impl LedgerCleanupService {
max_ledger_shreds: u64,
last_purge_slot: &mut u64,
purge_interval: u64,
delay_between_purges: Option<Duration>,
) -> Result<(), RecvTimeoutError> {
let mut root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
// Get the newest root
while let Ok(new_root) = new_root_receiver.try_recv() {
root = new_root;
let root = Self::receive_new_roots(new_root_receiver)?;
if root - *last_purge_slot <= purge_interval {
return Ok(());
}
if root - *last_purge_slot > purge_interval {
let disk_utilization_pre = blockstore.storage_size();
let disk_utilization_pre = blockstore.storage_size();
info!(
"purge: last_root={}, last_purge_slot={}, purge_interval={}, disk_utilization={:?}",
root, last_purge_slot, purge_interval, disk_utilization_pre
);
*last_purge_slot = root;
let (slots_to_clean, lowest_cleanup_slot, first_slot, total_shreds) =
Self::find_slots_to_clean(&blockstore, root, max_ledger_shreds);
if slots_to_clean {
info!(
"purge: new root: {} last_purge: {} purge_interval: {} disk: {:?}",
root, last_purge_slot, purge_interval, disk_utilization_pre
"purging data from slots {} to {}",
first_slot, lowest_cleanup_slot
);
*last_purge_slot = root;
let (num_shreds_to_clean, lowest_slot_to_clean, mut first_slot) =
Self::find_slots_to_clean(blockstore, root, max_ledger_shreds);
if num_shreds_to_clean > 0 {
debug!(
"cleaning up to: {} shreds: {} first: {}",
lowest_slot_to_clean, num_shreds_to_clean, first_slot
);
loop {
let current_lowest =
std::cmp::min(lowest_slot_to_clean, first_slot + DEFAULT_PURGE_BATCH_SIZE);
let purge_complete = Arc::new(AtomicBool::new(false));
let blockstore = blockstore.clone();
let purge_complete1 = purge_complete.clone();
let _t_purge = Builder::new()
.name("solana-ledger-purge".to_string())
.spawn(move || {
let mut slot_update_time = Measure::start("slot_update");
*blockstore.lowest_cleanup_slot.write().unwrap() = current_lowest;
*blockstore.lowest_cleanup_slot.write().unwrap() = lowest_cleanup_slot;
slot_update_time.stop();
let mut clean_time = Measure::start("ledger_clean");
blockstore.purge_slots(first_slot, Some(current_lowest));
clean_time.stop();
debug!(
"ledger purge {} -> {}: {} {}",
first_slot, current_lowest, slot_update_time, clean_time
let mut purge_time = Measure::start("purge_slots_with_delay");
blockstore.purge_slots_with_delay(
first_slot,
lowest_cleanup_slot,
delay_between_purges,
);
first_slot += DEFAULT_PURGE_BATCH_SIZE;
if current_lowest == lowest_slot_to_clean {
break;
}
thread::sleep(Duration::from_millis(500));
purge_time.stop();
info!("{}", purge_time);
purge_complete1.store(true, Ordering::Relaxed);
})
.unwrap();
// Keep pulling roots off `new_root_receiver` while purging to avoid channel buildup
while !purge_complete.load(Ordering::Relaxed) {
if let Err(err) = Self::receive_new_roots(new_root_receiver) {
debug!("receive_new_roots: {}", err);
}
thread::sleep(Duration::from_secs(1));
}
let disk_utilization_post = blockstore.storage_size();
Self::report_disk_metrics(disk_utilization_pre, disk_utilization_post);
}
let disk_utilization_post = blockstore.storage_size();
Self::report_disk_metrics(disk_utilization_pre, disk_utilization_post, total_shreds);
Ok(())
}
fn report_disk_metrics(pre: BlockstoreResult<u64>, post: BlockstoreResult<u64>) {
fn report_disk_metrics(
pre: BlockstoreResult<u64>,
post: BlockstoreResult<u64>,
total_shreds: u64,
) {
if let (Ok(pre), Ok(post)) = (pre, post) {
datapoint_debug!(
datapoint_info!(
"ledger_disk_utilization",
("disk_utilization_pre", pre as i64, i64),
("disk_utilization_post", post as i64, i64),
("disk_utilization_delta", (pre as i64 - post as i64), i64)
("disk_utilization_delta", (pre as i64 - post as i64), i64),
("total_shreds", total_shreds, i64),
);
}
}
@@ -215,8 +234,15 @@ mod tests {
//send a signal to kill all but 5 shreds, which will be in the newest slots
let mut last_purge_slot = 0;
sender.send(50).unwrap();
LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 5, &mut last_purge_slot, 10)
.unwrap();
LedgerCleanupService::cleanup_ledger(
&receiver,
&blockstore,
5,
&mut last_purge_slot,
10,
None,
)
.unwrap();
//check that 0-40 don't exist
blockstore
@@ -269,6 +295,7 @@ mod tests {
initial_slots,
&mut last_purge_slot,
10,
None,
)
.unwrap();
time.stop();
@@ -311,6 +338,7 @@ mod tests {
max_ledger_shreds,
&mut next_purge_batch,
10,
None,
)
.unwrap();

View File

@@ -34,12 +34,14 @@ pub mod packet;
pub mod poh_recorder;
pub mod poh_service;
pub mod recvmmsg;
pub mod repair_response;
pub mod repair_service;
pub mod replay_stage;
mod result;
pub mod retransmit_stage;
pub mod rewards_recorder_service;
pub mod rpc;
pub mod rpc_error;
pub mod rpc_pubsub;
pub mod rpc_pubsub_service;
pub mod rpc_service;

View File

@@ -8,13 +8,14 @@ pub struct NonCirculatingSupply {
pub accounts: Vec<Pubkey>,
}
pub fn calculate_non_circulating_supply(bank: Arc<Bank>) -> NonCirculatingSupply {
pub fn calculate_non_circulating_supply(bank: &Arc<Bank>) -> NonCirculatingSupply {
debug!("Updating Bank supply, epoch: {}", bank.epoch());
let mut non_circulating_accounts_set: HashSet<Pubkey> = HashSet::new();
for key in non_circulating_accounts() {
non_circulating_accounts_set.insert(key);
}
let withdraw_authority_list = withdraw_authority();
let clock = bank.clock();
let stake_accounts = bank.get_program_accounts(Some(&solana_stake_program::id()));
@@ -23,14 +24,14 @@ pub fn calculate_non_circulating_supply(bank: Arc<Bank>) -> NonCirculatingSupply
match stake_account {
StakeState::Initialized(meta) => {
if meta.lockup.is_in_force(&clock, &HashSet::default())
|| meta.authorized.withdrawer == withdraw_authority()
|| withdraw_authority_list.contains(&meta.authorized.withdrawer)
{
non_circulating_accounts_set.insert(*pubkey);
}
}
StakeState::Stake(meta, _stake) => {
if meta.lockup.is_in_force(&clock, &HashSet::default())
|| meta.authorized.withdrawer == withdraw_authority()
|| withdraw_authority_list.contains(&meta.authorized.withdrawer)
{
non_circulating_accounts_set.insert(*pubkey);
}
@@ -60,11 +61,6 @@ solana_sdk::pubkeys!(
"14FUT96s9swbmH7ZjpDvfEDywnAYy9zaNhv4xvezySGu",
"HbZ5FfmKWNHC7uwk6TF1hVi6TCs7dtYfdjEcuPGgzFAg",
"C7C8odR8oashR5Feyrq2tJKaXL18id1dSj2zbkDGL2C2",
"APnSR52EC1eH676m7qTBHUJ1nrGpHYpV7XKPxgRDD8gX",
"9ibqedFVnu5k4wo1mJRbH6KJ5HLBCyjpA9omPYkDeeT5",
"FopBKzQkG9pkyQqjdMFBLMQ995pSkjy83ziR4aism4c6",
"AiUHvJhTbMCcgFE2K26Ea9qCe74y3sFwqUt38iD5sfoR",
"3DndE3W53QdHSfBJiSJgzDKGvKJBoQLVmRHvy5LtqYfG",
"Eyr9P5XsjK2NUKNCnfu39eqpGoiLFgVAv1LSQgMZCwiQ",
"DE1bawNcRJB9rVm3buyMVfr8mBEoyyu73NBovf2oXJsJ",
"CakcnaRDHka2gXyfbEd2d3xsvkJkqsLw2akB3zsN1D2S",
@@ -78,13 +74,17 @@ solana_sdk::pubkeys!(
"CHmdL15akDcJgBkY6BP3hzs98Dqr6wbdDC5p8odvtSbq",
"FR84wZQy3Y3j2gWz6pgETUiUoJtreMEuWfbg6573UCj9",
"5q54XjQ7vDx4y6KphPeE97LUNiYGtP55spjvXAWPGBuf",
"3o6xgkJ9sTmDeQWyfj3sxwon18fXJB9PV5LDc8sfgR4a",
]
);
// Withdraw authority for autostaked accounts on mainnet-beta
solana_sdk::pubkeys!(
withdraw_authority,
"8CUUMKYNGxdgYio5CLHRHyzMEhhVRMcqefgE6dLqnVRK"
[
"8CUUMKYNGxdgYio5CLHRHyzMEhhVRMcqefgE6dLqnVRK",
"3FFaheyqtyAXZSYxDzsr5CVKvJuvZD1WE1VEsBtDbRqB",
]
);
#[cfg(test)]
@@ -150,7 +150,7 @@ mod tests {
(num_genesis_accounts + num_non_circulating_accounts + num_stake_accounts) * balance
);
let non_circulating_supply = calculate_non_circulating_supply(bank.clone());
let non_circulating_supply = calculate_non_circulating_supply(&bank);
assert_eq!(
non_circulating_supply.lamports,
(num_non_circulating_accounts + num_stake_accounts) * balance
@@ -165,7 +165,7 @@ mod tests {
for key in non_circulating_accounts {
bank.store_account(&key, &Account::new(new_balance, 0, &Pubkey::default()));
}
let non_circulating_supply = calculate_non_circulating_supply(bank.clone());
let non_circulating_supply = calculate_non_circulating_supply(&bank);
assert_eq!(
non_circulating_supply.lamports,
(num_non_circulating_accounts * new_balance) + (num_stake_accounts * balance)
@@ -180,7 +180,7 @@ mod tests {
bank = Arc::new(new_from_parent(&bank));
}
assert_eq!(bank.epoch(), 1);
let non_circulating_supply = calculate_non_circulating_supply(bank.clone());
let non_circulating_supply = calculate_non_circulating_supply(&bank);
assert_eq!(
non_circulating_supply.lamports,
num_non_circulating_accounts * new_balance

129
core/src/repair_response.rs Normal file
View File

@@ -0,0 +1,129 @@
use solana_ledger::{
blockstore::Blockstore,
shred::{Nonce, Shred, SIZE_OF_NONCE},
};
use solana_perf::packet::limited_deserialize;
use solana_sdk::{clock::Slot, packet::Packet};
use std::{io, net::SocketAddr};
pub fn repair_response_packet(
blockstore: &Blockstore,
slot: Slot,
shred_index: u64,
dest: &SocketAddr,
nonce: Option<Nonce>,
) -> Option<Packet> {
if Shred::is_nonce_unlocked(slot) && nonce.is_none()
|| !Shred::is_nonce_unlocked(slot) && nonce.is_some()
{
return None;
}
let shred = blockstore
.get_data_shred(slot, shred_index)
.expect("Blockstore could not get data shred");
shred.map(|shred| repair_response_packet_from_shred(slot, shred, dest, nonce))
}
pub fn repair_response_packet_from_shred(
slot: Slot,
shred: Vec<u8>,
dest: &SocketAddr,
nonce: Option<Nonce>,
) -> Packet {
let size_of_nonce = {
if Shred::is_nonce_unlocked(slot) {
assert!(nonce.is_some());
SIZE_OF_NONCE
} else {
assert!(nonce.is_none());
0
}
};
let mut packet = Packet::default();
packet.meta.size = shred.len() + size_of_nonce;
packet.meta.set_addr(dest);
packet.data[..shred.len()].copy_from_slice(&shred);
let mut wr = io::Cursor::new(&mut packet.data[shred.len()..]);
if let Some(nonce) = nonce {
bincode::serialize_into(&mut wr, &nonce).expect("Buffer not large enough to fit nonce");
}
packet
}
pub fn nonce(buf: &[u8]) -> Option<Nonce> {
if buf.len() < SIZE_OF_NONCE {
None
} else {
limited_deserialize(&buf[buf.len() - SIZE_OF_NONCE..]).ok()
}
}
#[cfg(test)]
mod test {
use super::*;
use solana_ledger::{
shred::{Shred, Shredder, UNLOCK_NONCE_SLOT},
sigverify_shreds::verify_shred_cpu,
};
use solana_sdk::signature::{Keypair, Signer};
use std::{
collections::HashMap,
net::{IpAddr, Ipv4Addr},
};
fn run_test_sigverify_shred_cpu_repair(slot: Slot) {
solana_logger::setup();
let mut shred = Shred::new_from_data(
slot,
0xc0de,
0xdead,
Some(&[1, 2, 3, 4]),
true,
true,
0,
0,
0xc0de,
);
assert_eq!(shred.slot(), slot);
let keypair = Keypair::new();
Shredder::sign_shred(&keypair, &mut shred);
trace!("signature {}", shred.common_header.signature);
let nonce = if Shred::is_nonce_unlocked(slot) {
Some(9)
} else {
None
};
let mut packet = repair_response_packet_from_shred(
slot,
shred.payload,
&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080),
nonce,
);
packet.meta.repair = true;
let leader_slots = [(slot, keypair.pubkey().to_bytes())]
.iter()
.cloned()
.collect();
let rv = verify_shred_cpu(&packet, &leader_slots);
assert_eq!(rv, Some(1));
let wrong_keypair = Keypair::new();
let leader_slots = [(slot, wrong_keypair.pubkey().to_bytes())]
.iter()
.cloned()
.collect();
let rv = verify_shred_cpu(&packet, &leader_slots);
assert_eq!(rv, Some(0));
let leader_slots = HashMap::new();
let rv = verify_shred_cpu(&packet, &leader_slots);
assert_eq!(rv, None);
}
#[test]
fn test_sigverify_shred_cpu_repair() {
run_test_sigverify_shred_cpu_repair(UNLOCK_NONCE_SLOT);
run_test_sigverify_shred_cpu_repair(UNLOCK_NONCE_SLOT + 1);
}
}

View File

@@ -20,9 +20,31 @@ use std::{
sync::{Arc, RwLock},
thread::sleep,
thread::{self, Builder, JoinHandle},
time::Duration,
time::{Duration, Instant},
};
#[derive(Default)]
pub struct RepairStatsGroup {
pub count: u64,
pub min: u64,
pub max: u64,
}
impl RepairStatsGroup {
pub fn update(&mut self, slot: u64) {
self.count += 1;
self.min = std::cmp::min(self.min, slot);
self.max = std::cmp::max(self.max, slot);
}
}
#[derive(Default)]
pub struct RepairStats {
pub shred: RepairStatsGroup,
pub highest_shred: RepairStatsGroup,
pub orphan: RepairStatsGroup,
}
pub const MAX_REPAIR_LENGTH: usize = 512;
pub const REPAIR_MS: u64 = 100;
pub const MAX_ORPHANS: usize = 5;
@@ -107,6 +129,8 @@ impl RepairService {
cluster_info,
);
}
let mut repair_stats = RepairStats::default();
let mut last_stats = Instant::now();
loop {
if exit.load(Ordering::Relaxed) {
break;
@@ -144,22 +168,34 @@ impl RepairService {
};
if let Ok(repairs) = repairs {
let reqs: Vec<_> = repairs
.into_iter()
.filter_map(|repair_request| {
serve_repair
.repair_request(&repair_request)
.map(|result| (result, repair_request))
.ok()
})
.collect();
for ((to, req), _) in reqs {
repair_socket.send_to(&req, to).unwrap_or_else(|e| {
info!("{} repair req send_to({}) error {:?}", id, to, e);
0
});
repairs.into_iter().for_each(|repair_request| {
if let Ok((to, req)) =
serve_repair.repair_request(&repair_request, &mut repair_stats)
{
repair_socket.send_to(&req, to).unwrap_or_else(|e| {
info!("{} repair req send_to({}) error {:?}", id, to, e);
0
});
}
});
}
if last_stats.elapsed().as_secs() > 1 {
let repair_total = repair_stats.shred.count
+ repair_stats.highest_shred.count
+ repair_stats.orphan.count;
if repair_total > 0 {
datapoint_info!(
"serve_repair-repair",
("repair-total", repair_total, i64),
("shred-count", repair_stats.shred.count, i64),
("highest-shred-count", repair_stats.highest_shred.count, i64),
("orphan-count", repair_stats.orphan.count, i64),
("repair-highest-slot", repair_stats.highest_shred.max, i64),
("repair-orphan", repair_stats.orphan.max, i64),
);
}
repair_stats = RepairStats::default();
last_stats = Instant::now();
}
sleep(Duration::from_millis(REPAIR_MS));
}
@@ -565,7 +601,7 @@ mod test {
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let slots: Vec<u64> = vec![1, 3, 5, 7, 8];
let num_entries_per_slot = max_ticks_per_n_shreds(1) + 1;
let num_entries_per_slot = max_ticks_per_n_shreds(1, None) + 1;
let shreds = make_chaining_slot_entries(&slots, num_entries_per_slot);
for (mut slot_shreds, _) in shreds.into_iter() {

View File

@@ -1738,6 +1738,7 @@ pub(crate) mod tests {
ShredCommonHeader::default(),
data_header,
CodingShredHeader::default(),
PACKET_DATA_SIZE,
);
bincode::serialize_into(
&mut shred.payload[SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER..],

View File

@@ -6,15 +6,18 @@ use crate::{
contact_info::ContactInfo,
non_circulating_supply::calculate_non_circulating_supply,
packet::PACKET_DATA_SIZE,
rpc_error::RpcCustomError,
storage_stage::StorageState,
validator::ValidatorExit,
};
use bincode::serialize;
use jsonrpc_core::{Error, ErrorCode, Metadata, Result};
use jsonrpc_core::{Error, Metadata, Result};
use jsonrpc_derive::rpc;
use solana_client::rpc_response::*;
use solana_client::{rpc_config::*, rpc_response::*};
use solana_faucet::faucet::request_airdrop_transaction;
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore};
use solana_ledger::{
bank_forks::BankForks, blockstore::Blockstore, blockstore_db::BlockstoreError,
};
use solana_runtime::{accounts::AccountAddressFilter, bank::Bank};
use solana_sdk::{
clock::{Slot, UnixTimestamp},
@@ -41,7 +44,6 @@ use std::{
time::{Duration, Instant},
};
const JSON_RPC_SERVER_ERROR_0: i64 = -32000;
const MAX_QUERY_ITEMS: usize = 256;
const MAX_SLOT_RANGE: u64 = 10_000;
const NUM_LARGEST_ACCOUNTS: usize = 20;
@@ -62,15 +64,6 @@ pub struct JsonRpcConfig {
pub faucet_addr: Option<SocketAddr>,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcSignatureStatusConfig {
pub search_transaction_history: Option<bool>,
// DEPRECATED
#[serde(flatten)]
pub commitment: Option<CommitmentConfig>,
}
#[derive(Clone)]
pub struct JsonRpcRequestProcessor {
bank_forks: Arc<RwLock<BankForks>>,
@@ -100,18 +93,13 @@ impl JsonRpcRequestProcessor {
.unwrap()
.largest_confirmed_root();
debug!("RPC using block: {:?}", cluster_root);
r_bank_forks
.get(cluster_root)
.cloned()
.ok_or_else(|| Error {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_0),
message: format!(
"Cluster largest_confirmed_root {} does not exist on node. Node root: {}",
cluster_root,
r_bank_forks.root(),
),
data: None,
})
r_bank_forks.get(cluster_root).cloned().ok_or_else(|| {
RpcCustomError::NonexistentClusterRoot {
cluster_root,
node_root: r_bank_forks.root(),
}
.into()
})
}
}
@@ -284,28 +272,36 @@ impl JsonRpcRequestProcessor {
fn get_largest_accounts(
&self,
commitment: Option<CommitmentConfig>,
config: Option<RpcLargestAccountsConfig>,
) -> RpcResponse<Vec<RpcAccountBalance>> {
let bank = self.bank(commitment)?;
let config = config.unwrap_or_default();
let bank = self.bank(config.commitment)?;
let (addresses, address_filter) = if let Some(filter) = config.filter {
let non_circulating_supply = calculate_non_circulating_supply(&bank);
let addresses = non_circulating_supply.accounts.into_iter().collect();
let address_filter = match filter {
RpcLargestAccountsFilter::Circulating => AccountAddressFilter::Exclude,
RpcLargestAccountsFilter::NonCirculating => AccountAddressFilter::Include,
};
(addresses, address_filter)
} else {
(HashSet::new(), AccountAddressFilter::Exclude)
};
new_response(
&bank,
bank.get_largest_accounts(
NUM_LARGEST_ACCOUNTS,
&HashSet::new(),
AccountAddressFilter::Exclude,
)
.into_iter()
.map(|(address, lamports)| RpcAccountBalance {
address: address.to_string(),
lamports,
})
.collect(),
bank.get_largest_accounts(NUM_LARGEST_ACCOUNTS, &addresses, address_filter)
.into_iter()
.map(|(address, lamports)| RpcAccountBalance {
address: address.to_string(),
lamports,
})
.collect(),
)
}
fn get_supply(&self, commitment: Option<CommitmentConfig>) -> RpcResponse<RpcSupply> {
let bank = self.bank(commitment)?;
let non_circulating_supply = calculate_non_circulating_supply(bank.clone());
let non_circulating_supply = calculate_non_circulating_supply(&bank);
let total_supply = bank.capitalization();
new_response(
&bank,
@@ -421,6 +417,29 @@ impl JsonRpcRequestProcessor {
}
}
fn check_slot_cleaned_up<T>(
&self,
result: &std::result::Result<T, BlockstoreError>,
slot: Slot,
) -> Result<()>
where
T: std::fmt::Debug,
{
if result.is_err() {
if let BlockstoreError::SlotCleanedUp = result.as_ref().unwrap_err() {
return Err(RpcCustomError::BlockCleanedUp {
slot,
first_available_block: self
.blockstore
.get_first_available_block()
.unwrap_or_default(),
}
.into());
}
}
Ok(())
}
pub fn get_confirmed_block(
&self,
slot: Slot,
@@ -434,7 +453,9 @@ impl JsonRpcRequestProcessor {
.unwrap()
.largest_confirmed_root()
{
Ok(self.blockstore.get_confirmed_block(slot, encoding).ok())
let result = self.blockstore.get_confirmed_block(slot, encoding);
self.check_slot_cleaned_up(&result, slot)?;
Ok(result.ok())
} else {
Ok(None)
}
@@ -482,11 +503,9 @@ impl JsonRpcRequestProcessor {
let stakes = HashMap::new();
let stakes = bank.epoch_vote_accounts(epoch).unwrap_or(&stakes);
Ok(self
.blockstore
.get_block_time(slot, slot_duration, stakes)
.ok()
.unwrap_or(None))
let result = self.blockstore.get_block_time(slot, slot_duration, stakes);
self.check_slot_cleaned_up(&result, slot)?;
Ok(result.ok().unwrap_or(None))
} else {
Ok(None)
}
@@ -658,11 +677,15 @@ fn get_tpu_addr(cluster_info: &Arc<RwLock<ClusterInfo>>) -> Result<SocketAddr> {
}
fn verify_pubkey(input: String) -> Result<Pubkey> {
input.parse().map_err(|_e| Error::invalid_request())
input
.parse()
.map_err(|e| Error::invalid_params(format!("{:?}", e)))
}
fn verify_signature(input: &str) -> Result<Signature> {
input.parse().map_err(|_e| Error::invalid_request())
input
.parse()
.map_err(|e| Error::invalid_params(format!("{:?}", e)))
}
#[derive(Clone)]
@@ -821,7 +844,7 @@ pub trait RpcSol {
fn get_largest_accounts(
&self,
meta: Self::Metadata,
commitment: Option<CommitmentConfig>,
config: Option<RpcLargestAccountsConfig>,
) -> RpcResponse<Vec<RpcAccountBalance>>;
#[rpc(meta, name = "getSupply")]
@@ -843,6 +866,14 @@ pub trait RpcSol {
#[rpc(meta, name = "sendTransaction")]
fn send_transaction(&self, meta: Self::Metadata, data: String) -> Result<String>;
#[rpc(meta, name = "simulateTransaction")]
fn simulate_transaction(
&self,
meta: Self::Metadata,
data: String,
config: Option<RpcSimulateTransactionConfig>,
) -> RpcResponse<TransactionStatus>;
#[rpc(meta, name = "getSlotLeader")]
fn get_slot_leader(
&self,
@@ -1046,6 +1077,9 @@ impl RpcSol for RpcSolImpl {
gossip: Some(contact_info.gossip),
tpu: valid_address_or_none(&contact_info.tpu),
rpc: valid_address_or_none(&contact_info.rpc),
version: cluster_info
.get_node_version(&contact_info.id)
.map(|v| v.to_string()),
})
} else {
None // Exclude spy nodes
@@ -1235,13 +1269,13 @@ impl RpcSol for RpcSolImpl {
fn get_largest_accounts(
&self,
meta: Self::Metadata,
commitment: Option<CommitmentConfig>,
config: Option<RpcLargestAccountsConfig>,
) -> RpcResponse<Vec<RpcAccountBalance>> {
debug!("get_largest_accounts rpc request received");
meta.request_processor
.read()
.unwrap()
.get_largest_accounts(commitment)
.get_largest_accounts(config)
}
fn get_supply(
@@ -1336,41 +1370,67 @@ impl RpcSol for RpcSolImpl {
}
fn send_transaction(&self, meta: Self::Metadata, data: String) -> Result<String> {
let data = bs58::decode(data).into_vec().unwrap();
if data.len() >= PACKET_DATA_SIZE {
info!(
"send_transaction: transaction too large: {} bytes (max: {} bytes)",
data.len(),
PACKET_DATA_SIZE
);
return Err(Error::invalid_request());
}
let tx: Transaction = bincode::config()
.limit(PACKET_DATA_SIZE as u64)
.deserialize(&data)
.map_err(|err| {
info!("send_transaction: deserialize error: {:?}", err);
Error::invalid_request()
})?;
let (wire_transaction, transaction) = deserialize_bs58_transaction(data)?;
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let tpu_addr = get_tpu_addr(&meta.cluster_info)?;
trace!("send_transaction: leader is {:?}", &tpu_addr);
transactions_socket
.send_to(&data, tpu_addr)
.send_to(&wire_transaction, tpu_addr)
.map_err(|err| {
info!("send_transaction: send_to error: {:?}", err);
Error::internal_error()
})?;
let signature = tx.signatures[0].to_string();
let signature = transaction.signatures[0].to_string();
trace!(
"send_transaction: sent {} bytes, signature={}",
data.len(),
wire_transaction.len(),
signature
);
Ok(signature)
}
fn simulate_transaction(
&self,
meta: Self::Metadata,
data: String,
config: Option<RpcSimulateTransactionConfig>,
) -> RpcResponse<TransactionStatus> {
let (_, transaction) = deserialize_bs58_transaction(data)?;
let config = config.unwrap_or(RpcSimulateTransactionConfig { sig_verify: false });
let bank = &*meta.request_processor.read().unwrap().bank(None)?;
assert!(bank.is_frozen());
let mut result = if config.sig_verify {
transaction.verify()
} else {
Ok(())
};
if result.is_ok() {
let transactions = [transaction];
let batch = bank.prepare_batch(&transactions, None);
let (
_loaded_accounts,
executed,
_retryable_transactions,
_transaction_count,
_signature_count,
) = bank.load_and_execute_transactions(&batch, solana_sdk::clock::MAX_PROCESSING_AGE);
result = executed[0].0.clone();
}
new_response(
&bank,
TransactionStatus {
slot: bank.slot(),
confirmations: Some(0),
status: result.clone(),
err: result.err(),
},
)
}
fn get_slot_leader(
&self,
meta: Self::Metadata,
@@ -1448,7 +1508,7 @@ impl RpcSol for RpcSolImpl {
fn get_version(&self, _: Self::Metadata) -> Result<RpcVersionInfo> {
Ok(RpcVersionInfo {
solana_core: solana_clap_utils::version!().to_string(),
solana_core: solana_version::Version::default().to_string(),
})
}
@@ -1540,6 +1600,29 @@ impl RpcSol for RpcSolImpl {
}
}
fn deserialize_bs58_transaction(bs58_transaction: String) -> Result<(Vec<u8>, Transaction)> {
let wire_transaction = bs58::decode(bs58_transaction)
.into_vec()
.map_err(|e| Error::invalid_params(format!("{:?}", e)))?;
if wire_transaction.len() >= PACKET_DATA_SIZE {
let err = format!(
"transaction too large: {} bytes (max: {} bytes)",
wire_transaction.len(),
PACKET_DATA_SIZE
);
info!("{}", err);
return Err(Error::invalid_params(&err));
}
bincode::config()
.limit(PACKET_DATA_SIZE as u64)
.deserialize(&wire_transaction)
.map_err(|err| {
info!("transaction deserialize error: {:?}", err);
Error::invalid_params(&err.to_string())
})
.map(|transaction| (wire_transaction, transaction))
}
#[cfg(test)]
pub mod tests {
use super::*;
@@ -1547,10 +1630,11 @@ pub mod tests {
commitment::BlockCommitment,
contact_info::ContactInfo,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
non_circulating_supply::non_circulating_accounts,
replay_stage::tests::create_test_transactions_and_populate_blockstore,
};
use bincode::deserialize;
use jsonrpc_core::{MetaIoHandler, Output, Response, Value};
use jsonrpc_core::{ErrorCode, MetaIoHandler, Output, Response, Value};
use solana_ledger::{
blockstore::entries_to_test_shreds, blockstore_processor::fill_blockstore_slot_with_ticks,
entry::next_entry_mut, get_tmp_ledger_path,
@@ -1696,6 +1780,9 @@ pub mod tests {
let blockhash = bank.confirmed_last_blockhash().0;
let tx = system_transaction::transfer(&alice, pubkey, 20, blockhash);
bank.process_transaction(&tx).expect("process transaction");
let tx =
system_transaction::transfer(&alice, &non_circulating_accounts()[0], 20, blockhash);
bank.process_transaction(&tx).expect("process transaction");
let tx = system_transaction::transfer(&alice, pubkey, std::u64::MAX, blockhash);
let _ = bank.process_transaction(&tx);
@@ -1815,7 +1902,7 @@ pub mod tests {
.expect("actual response deserialization");
let expected = format!(
r#"{{"jsonrpc":"2.0","result":[{{"pubkey": "{}", "gossip": "127.0.0.1:1235", "tpu": "127.0.0.1:1234", "rpc": "127.0.0.1:{}"}}],"id":1}}"#,
r#"{{"jsonrpc":"2.0","result":[{{"pubkey": "{}", "gossip": "127.0.0.1:1235", "tpu": "127.0.0.1:1234", "rpc": "127.0.0.1:{}", "version": null}}],"id":1}}"#,
leader_pubkey,
rpc_port::DEFAULT_RPC_PORT
);
@@ -1852,7 +1939,7 @@ pub mod tests {
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getTransactionCount"}}"#);
let res = io.handle_request_sync(&req, meta);
let expected = format!(r#"{{"jsonrpc":"2.0","result":3,"id":1}}"#);
let expected = format!(r#"{{"jsonrpc":"2.0","result":4,"id":1}}"#);
let expected: Response =
serde_json::from_str(&expected).expect("expected response deserialization");
let result: Response = serde_json::from_str(&res.expect("actual response"))
@@ -1900,6 +1987,31 @@ pub mod tests {
assert!(supply >= TEST_MINT_LAMPORTS);
}
#[test]
fn test_get_supply() {
let bob_pubkey = Pubkey::new_rand();
let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey);
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getSupply"}}"#);
let res = io.handle_request_sync(&req, meta.clone());
let json: Value = serde_json::from_str(&res.unwrap()).unwrap();
let supply: RpcSupply = serde_json::from_value(json["result"]["value"].clone())
.expect("actual response deserialization");
assert_eq!(supply.non_circulating, 20);
assert!(supply.circulating >= TEST_MINT_LAMPORTS);
assert!(supply.total >= TEST_MINT_LAMPORTS + 20);
let expected_accounts: Vec<String> = non_circulating_accounts()
.iter()
.map(|pubkey| pubkey.to_string())
.collect();
assert_eq!(
supply.non_circulating_accounts.len(),
expected_accounts.len()
);
for address in supply.non_circulating_accounts {
assert!(expected_accounts.contains(&address));
}
}
#[test]
fn test_get_largest_accounts() {
let bob_pubkey = Pubkey::new_rand();
@@ -1912,7 +2024,7 @@ pub mod tests {
let largest_accounts: Vec<RpcAccountBalance> =
serde_json::from_value(json["result"]["value"].clone())
.expect("actual response deserialization");
assert_eq!(largest_accounts.len(), 18);
assert_eq!(largest_accounts.len(), 19);
// Get Alice balance
let req = format!(
@@ -1933,7 +2045,7 @@ pub mod tests {
r#"{{"jsonrpc":"2.0","id":1,"method":"getBalance","params":["{}"]}}"#,
bob_pubkey
);
let res = io.handle_request_sync(&req, meta);
let res = io.handle_request_sync(&req, meta.clone());
let json: Value = serde_json::from_str(&res.unwrap()).unwrap();
let bob_balance: u64 = serde_json::from_value(json["result"]["value"].clone())
.expect("actual response deserialization");
@@ -1941,6 +2053,26 @@ pub mod tests {
address: bob_pubkey.to_string(),
lamports: bob_balance,
}));
// Test Circulating/NonCirculating Filter
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getLargestAccounts","params":[{{"filter":"circulating"}}]}}"#
);
let res = io.handle_request_sync(&req, meta.clone());
let json: Value = serde_json::from_str(&res.unwrap()).unwrap();
let largest_accounts: Vec<RpcAccountBalance> =
serde_json::from_value(json["result"]["value"].clone())
.expect("actual response deserialization");
assert_eq!(largest_accounts.len(), 18);
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getLargestAccounts","params":[{{"filter":"nonCirculating"}}]}}"#
);
let res = io.handle_request_sync(&req, meta.clone());
let json: Value = serde_json::from_str(&res.unwrap()).unwrap();
let largest_accounts: Vec<RpcAccountBalance> =
serde_json::from_value(json["result"]["value"].clone())
.expect("actual response deserialization");
assert_eq!(largest_accounts.len(), 1);
}
#[test]
@@ -2151,6 +2283,133 @@ pub mod tests {
assert_eq!(expected, result);
}
#[test]
fn test_rpc_simulate_transaction() {
let bob_pubkey = Pubkey::new_rand();
let RpcHandler {
io,
meta,
blockhash,
alice,
bank,
..
} = start_rpc_handler_with_tx(&bob_pubkey);
let mut tx = system_transaction::transfer(&alice, &bob_pubkey, 1234, blockhash);
let tx_serialized_encoded = bs58::encode(serialize(&tx).unwrap()).into_string();
tx.signatures[0] = Signature::default();
let tx_badsig_serialized_encoded = bs58::encode(serialize(&tx).unwrap()).into_string();
bank.freeze(); // Ensure the root bank is frozen, `start_rpc_handler_with_tx()` doesn't do this
// Good signature with sigVerify=true
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}", {{"sigVerify": true}}]}}"#,
tx_serialized_encoded,
);
let res = io.handle_request_sync(&req, meta.clone());
let expected = json!({
"jsonrpc": "2.0",
"result": {
"context":{"slot":0},
"value":{"confirmations":0,"slot": 0,"status":{"Ok":null},"err":null}
},
"id": 1,
});
let expected: Response =
serde_json::from_value(expected).expect("expected response deserialization");
let result: Response = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
assert_eq!(expected, result);
// Bad signature with sigVerify=true
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}", {{"sigVerify": true}}]}}"#,
tx_badsig_serialized_encoded,
);
let res = io.handle_request_sync(&req, meta.clone());
let expected = json!({
"jsonrpc": "2.0",
"result": {
"context":{"slot":0},
"value":{"confirmations":0,"slot":0,"status":{"Err":"SignatureFailure"},"err":"SignatureFailure"}
},
"id": 1,
});
let expected: Response =
serde_json::from_value(expected).expect("expected response deserialization");
let result: Response = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
assert_eq!(expected, result);
// Bad signature with sigVerify=false
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}", {{"sigVerify": false}}]}}"#,
tx_serialized_encoded,
);
let res = io.handle_request_sync(&req, meta.clone());
let expected = json!({
"jsonrpc": "2.0",
"result": {
"context":{"slot":0},
"value":{"confirmations":0,"slot": 0,"status":{"Ok":null},"err":null}
},
"id": 1,
});
let expected: Response =
serde_json::from_value(expected).expect("expected response deserialization");
let result: Response = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
assert_eq!(expected, result);
// Bad signature with default sigVerify setting (false)
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}"]}}"#,
tx_serialized_encoded,
);
let res = io.handle_request_sync(&req, meta.clone());
let expected = json!({
"jsonrpc": "2.0",
"result": {
"context":{"slot":0},
"value":{"confirmations":0,"slot": 0,"status":{"Ok":null},"err":null}
},
"id": 1,
});
let expected: Response =
serde_json::from_value(expected).expect("expected response deserialization");
let result: Response = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
assert_eq!(expected, result);
}
#[test]
#[should_panic]
fn test_rpc_simulate_transaction_panic_on_unfrozen_bank() {
let bob_pubkey = Pubkey::new_rand();
let RpcHandler {
io,
meta,
blockhash,
alice,
bank,
..
} = start_rpc_handler_with_tx(&bob_pubkey);
let tx = system_transaction::transfer(&alice, &bob_pubkey, 1234, blockhash);
let tx_serialized_encoded = bs58::encode(serialize(&tx).unwrap()).into_string();
assert!(!bank.is_frozen());
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}", {{"sigVerify": true}}]}}"#,
tx_serialized_encoded,
);
// should panic because `bank` is not frozen
let _ = io.handle_request_sync(&req, meta.clone());
}
#[test]
fn test_rpc_confirm_tx() {
let bob_pubkey = Pubkey::new_rand();
@@ -2469,14 +2728,10 @@ pub mod tests {
};
let req = r#"{"jsonrpc":"2.0","id":1,"method":"sendTransaction","params":["37u9WtQpcm6ULa3Vmu7ySnANv"]}"#;
let res = io.handle_request_sync(req, meta.clone());
let expected =
r#"{"jsonrpc":"2.0","error":{"code":-32600,"message":"Invalid request"},"id":1}"#;
let expected: Response =
serde_json::from_str(expected).expect("expected response deserialization");
let result: Response = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
assert_eq!(expected, result);
let res = io.handle_request_sync(req, meta);
let json: Value = serde_json::from_str(&res.unwrap()).unwrap();
let error = &json["error"];
assert_eq!(error["code"], ErrorCode::InvalidParams.code());
}
#[test]
@@ -2497,7 +2752,7 @@ pub mod tests {
let bad_pubkey = "a1b2c3d4";
assert_eq!(
verify_pubkey(bad_pubkey.to_string()),
Err(Error::invalid_request())
Err(Error::invalid_params("WrongSize"))
);
}
@@ -2511,7 +2766,7 @@ pub mod tests {
let bad_signature = "a1b2c3d4";
assert_eq!(
verify_signature(&bad_signature.to_string()),
Err(Error::invalid_request())
Err(Error::invalid_params("WrongSize"))
);
}
@@ -2617,7 +2872,7 @@ pub mod tests {
let expected = json!({
"jsonrpc": "2.0",
"result": {
"solana-core": solana_clap_utils::version!().to_string()
"solana-core": solana_version::version!().to_string()
},
"id": 1
});

45
core/src/rpc_error.rs Normal file
View File

@@ -0,0 +1,45 @@
use jsonrpc_core::{Error, ErrorCode};
use solana_sdk::clock::Slot;
const JSON_RPC_SERVER_ERROR_0: i64 = -32000;
const JSON_RPC_SERVER_ERROR_1: i64 = -32001;
pub enum RpcCustomError {
NonexistentClusterRoot {
cluster_root: Slot,
node_root: Slot,
},
BlockCleanedUp {
slot: Slot,
first_available_block: Slot,
},
}
impl From<RpcCustomError> for Error {
fn from(e: RpcCustomError) -> Self {
match e {
RpcCustomError::NonexistentClusterRoot {
cluster_root,
node_root,
} => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_0),
message: format!(
"Cluster largest_confirmed_root {} does not exist on node. Node root: {}",
cluster_root, node_root,
),
data: None,
},
RpcCustomError::BlockCleanedUp {
slot,
first_available_block,
} => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_1),
message: format!(
"Block {} cleaned up, does not exist on node. First available block: {}",
slot, first_available_block,
),
data: None,
},
}
}
}

View File

@@ -15,7 +15,7 @@ use solana_ledger::{
blockstore::Blockstore,
snapshot_utils,
};
use solana_sdk::{hash::Hash, pubkey::Pubkey};
use solana_sdk::{hash::Hash, native_token::lamports_to_sol, pubkey::Pubkey};
use std::{
collections::HashSet,
net::SocketAddr,
@@ -44,6 +44,7 @@ struct RpcRequestMiddleware {
snapshot_config: Option<SnapshotConfig>,
cluster_info: Arc<RwLock<ClusterInfo>>,
trusted_validators: Option<HashSet<Pubkey>>,
bank_forks: Arc<RwLock<BankForks>>,
}
impl RpcRequestMiddleware {
@@ -52,6 +53,7 @@ impl RpcRequestMiddleware {
snapshot_config: Option<SnapshotConfig>,
cluster_info: Arc<RwLock<ClusterInfo>>,
trusted_validators: Option<HashSet<Pubkey>>,
bank_forks: Arc<RwLock<BankForks>>,
) -> Self {
Self {
ledger_path,
@@ -60,6 +62,7 @@ impl RpcRequestMiddleware {
snapshot_config,
cluster_info,
trusted_validators,
bank_forks,
}
}
@@ -85,7 +88,7 @@ impl RpcRequestMiddleware {
.unwrap()
}
fn is_get_path(&self, path: &str) -> bool {
fn is_file_get_path(&self, path: &str) -> bool {
match path {
"/genesis.tar.bz2" => true,
_ => {
@@ -98,7 +101,7 @@ impl RpcRequestMiddleware {
}
}
fn get(&self, path: &str) -> RequestMiddlewareAction {
fn process_file_get(&self, path: &str) -> RequestMiddlewareAction {
let filename = self.ledger_path.join(
path.split_at(1).1, // Drop leading '/' from path
);
@@ -202,8 +205,19 @@ impl RequestMiddleware for RpcRequestMiddleware {
};
}
}
if self.is_get_path(request.uri().path()) {
self.get(request.uri().path())
if let Some(result) = process_rest(&self.bank_forks, request.uri().path()) {
RequestMiddlewareAction::Respond {
should_validate_hosts: true,
response: Box::new(jsonrpc_core::futures::future::ok(
hyper::Response::builder()
.status(hyper::StatusCode::OK)
.body(hyper::Body::from(result))
.unwrap(),
)),
}
} else if self.is_file_get_path(request.uri().path()) {
self.process_file_get(request.uri().path())
} else if request.uri().path() == "/health" {
RequestMiddlewareAction::Respond {
should_validate_hosts: true,
@@ -223,6 +237,29 @@ impl RequestMiddleware for RpcRequestMiddleware {
}
}
fn process_rest(bank_forks: &Arc<RwLock<BankForks>>, path: &str) -> Option<String> {
match path {
"/v0/circulating-supply" => {
let r_bank_forks = bank_forks.read().unwrap();
let bank = r_bank_forks.root_bank();
let total_supply = bank.capitalization();
let non_circulating_supply =
crate::non_circulating_supply::calculate_non_circulating_supply(&bank).lamports;
Some(format!(
"{}",
lamports_to_sol(total_supply - non_circulating_supply)
))
}
"/v0/total-supply" => {
let r_bank_forks = bank_forks.read().unwrap();
let bank = r_bank_forks.root_bank();
let total_supply = bank.capitalization();
Some(format!("{}", lamports_to_sol(total_supply)))
}
_ => None,
}
}
impl JsonRpcService {
#[allow(clippy::too_many_arguments)]
pub fn new(
@@ -243,7 +280,7 @@ impl JsonRpcService {
info!("rpc configuration: {:?}", config);
let request_processor = Arc::new(RwLock::new(JsonRpcRequestProcessor::new(
config,
bank_forks,
bank_forks.clone(),
block_commitment_cache,
blockstore,
storage_state,
@@ -268,6 +305,7 @@ impl JsonRpcService {
snapshot_config,
cluster_info.clone(),
trusted_validators,
bank_forks.clone(),
);
let server = ServerBuilder::with_meta_extractor(
io,
@@ -277,7 +315,7 @@ impl JsonRpcService {
genesis_hash,
},
)
.threads(4)
.threads(num_cpus::get())
.cors(DomainsValidation::AllowOnly(vec![
AccessControlAllowOrigin::Any,
]))
@@ -397,13 +435,41 @@ mod tests {
rpc_service.join().unwrap();
}
fn create_bank_forks() -> Arc<RwLock<BankForks>> {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let bank = Bank::new(&genesis_config);
Arc::new(RwLock::new(BankForks::new(bank.slot(), bank)))
}
#[test]
fn test_is_get_path() {
fn test_process_rest_api() {
let bank_forks = create_bank_forks();
assert_eq!(None, process_rest(&bank_forks, "not-a-supported-rest-api"));
assert_eq!(
Some("0.000010127".to_string()),
process_rest(&bank_forks, "/v0/circulating-supply")
);
assert_eq!(
Some("0.000010127".to_string()),
process_rest(&bank_forks, "/v0/total-supply")
);
}
#[test]
fn test_is_file_get_path() {
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
ContactInfo::default(),
)));
let bank_forks = create_bank_forks();
let rrm = RpcRequestMiddleware::new(PathBuf::from("/"), None, cluster_info.clone(), None);
let rrm = RpcRequestMiddleware::new(
PathBuf::from("/"),
None,
cluster_info.clone(),
None,
bank_forks.clone(),
);
let rrm_with_snapshot_config = RpcRequestMiddleware::new(
PathBuf::from("/"),
Some(SnapshotConfig {
@@ -413,26 +479,28 @@ mod tests {
}),
cluster_info,
None,
bank_forks,
);
assert!(rrm.is_get_path("/genesis.tar.bz2"));
assert!(!rrm.is_get_path("genesis.tar.bz2"));
assert!(rrm.is_file_get_path("/genesis.tar.bz2"));
assert!(!rrm.is_file_get_path("genesis.tar.bz2"));
assert!(!rrm.is_get_path("/snapshot.tar.bz2")); // This is a redirect
assert!(!rrm.is_file_get_path("/snapshot.tar.bz2")); // This is a redirect
assert!(
!rrm.is_get_path("/snapshot-100-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.bz2")
);
assert!(rrm_with_snapshot_config
.is_get_path("/snapshot-100-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.bz2"));
assert!(!rrm.is_file_get_path(
"/snapshot-100-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.bz2"
));
assert!(rrm_with_snapshot_config.is_file_get_path(
"/snapshot-100-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.bz2"
));
assert!(!rrm.is_get_path(
assert!(!rrm.is_file_get_path(
"/snapshot-notaslotnumber-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.bz2"
));
assert!(!rrm.is_get_path("/"));
assert!(!rrm.is_get_path(".."));
assert!(!rrm.is_get_path("🎣"));
assert!(!rrm.is_file_get_path("/"));
assert!(!rrm.is_file_get_path(".."));
assert!(!rrm.is_file_get_path("🎣"));
}
#[test]
@@ -441,7 +509,13 @@ mod tests {
ContactInfo::default(),
)));
let rm = RpcRequestMiddleware::new(PathBuf::from("/"), None, cluster_info.clone(), None);
let rm = RpcRequestMiddleware::new(
PathBuf::from("/"),
None,
cluster_info.clone(),
None,
create_bank_forks(),
);
assert_eq!(rm.health_check(), "ok");
}
@@ -457,6 +531,7 @@ mod tests {
None,
cluster_info.clone(),
Some(trusted_validators.clone().into_iter().collect()),
create_bank_forks(),
);
// No account hashes for this node or any trusted validators == "behind"

View File

@@ -1,18 +1,21 @@
use crate::packet::limited_deserialize;
use crate::streamer::{PacketReceiver, PacketSender};
use crate::{
cluster_info::{ClusterInfo, ClusterInfoError},
contact_info::ContactInfo,
packet::Packet,
repair_response,
repair_service::RepairStats,
result::{Error, Result},
};
use bincode::serialize;
use rand::{thread_rng, Rng};
use solana_ledger::blockstore::Blockstore;
use solana_ledger::{
blockstore::Blockstore,
shred::{Nonce, Shred},
};
use solana_measure::measure::Measure;
use solana_measure::thread_mem_usage;
use solana_metrics::{datapoint_debug, inc_new_counter_debug};
use solana_perf::packet::{Packets, PacketsRecycler};
use solana_perf::packet::{limited_deserialize, Packets, PacketsRecycler};
use solana_sdk::{
clock::Slot,
signature::{Keypair, Signer},
@@ -28,6 +31,7 @@ use std::{
/// the number of slots to respond with when responding to `Orphan` requests
pub const MAX_ORPHAN_REPAIR_RESPONSES: usize = 10;
pub const DEFAULT_NONCE: u32 = 42;
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq)]
pub enum RepairType {
@@ -46,12 +50,25 @@ impl RepairType {
}
}
#[derive(Default)]
pub struct ServeRepairStats {
pub total_packets: usize,
pub processed: usize,
pub self_repair: usize,
pub window_index: usize,
pub highest_window_index: usize,
pub orphan: usize,
}
/// Window protocol messages
#[derive(Serialize, Deserialize, Debug)]
enum RepairProtocol {
WindowIndex(ContactInfo, u64, u64),
HighestWindowIndex(ContactInfo, u64, u64),
Orphan(ContactInfo, u64),
WindowIndexWithNonce(ContactInfo, u64, u64, Nonce),
HighestWindowIndexWithNonce(ContactInfo, u64, u64, Nonce),
OrphanWithNonce(ContactInfo, u64, Nonce),
}
#[derive(Clone)]
@@ -95,6 +112,9 @@ impl ServeRepair {
RepairProtocol::WindowIndex(ref from, _, _) => from,
RepairProtocol::HighestWindowIndex(ref from, _, _) => from,
RepairProtocol::Orphan(ref from, _) => from,
RepairProtocol::WindowIndexWithNonce(ref from, _, _, _) => from,
RepairProtocol::HighestWindowIndexWithNonce(ref from, _, _, _) => from,
RepairProtocol::OrphanWithNonce(ref from, _, _) => from,
}
}
@@ -104,6 +124,7 @@ impl ServeRepair {
from_addr: &SocketAddr,
blockstore: Option<&Arc<Blockstore>>,
request: RepairProtocol,
stats: &mut ServeRepairStats,
) -> Option<Packets> {
let now = Instant::now();
@@ -111,18 +132,14 @@ impl ServeRepair {
let my_id = me.read().unwrap().keypair.pubkey();
let from = Self::get_repair_sender(&request);
if from.id == my_id {
warn!(
"{}: Ignored received repair request from ME {}",
my_id, from.id,
);
inc_new_counter_debug!("serve_repair-handle-repair--eq", 1);
stats.self_repair += 1;
return None;
}
let (res, label) = {
match &request {
RepairProtocol::WindowIndex(from, slot, shred_index) => {
inc_new_counter_debug!("serve_repair-request-window-index", 1);
stats.window_index += 1;
(
Self::run_window_request(
recycler,
@@ -132,13 +149,14 @@ impl ServeRepair {
&me.read().unwrap().my_info,
*slot,
*shred_index,
None,
),
"WindowIndex",
)
}
RepairProtocol::HighestWindowIndex(_, slot, highest_index) => {
inc_new_counter_debug!("serve_repair-request-highest-window-index", 1);
stats.highest_window_index += 1;
(
Self::run_highest_window_request(
recycler,
@@ -146,12 +164,13 @@ impl ServeRepair {
blockstore,
*slot,
*highest_index,
None,
),
"HighestWindowIndex",
)
}
RepairProtocol::Orphan(_, slot) => {
inc_new_counter_debug!("serve_repair-request-orphan", 1);
stats.orphan += 1;
(
Self::run_orphan(
recycler,
@@ -159,10 +178,55 @@ impl ServeRepair {
blockstore,
*slot,
MAX_ORPHAN_REPAIR_RESPONSES,
None,
),
"Orphan",
)
}
RepairProtocol::WindowIndexWithNonce(_, slot, shred_index, nonce) => {
stats.window_index += 1;
(
Self::run_window_request(
recycler,
from,
&from_addr,
blockstore,
&me.read().unwrap().my_info,
*slot,
*shred_index,
Some(*nonce),
),
"WindowIndexWithNonce",
)
}
RepairProtocol::HighestWindowIndexWithNonce(_, slot, highest_index, nonce) => {
stats.highest_window_index += 1;
(
Self::run_highest_window_request(
recycler,
&from_addr,
blockstore,
*slot,
*highest_index,
Some(*nonce),
),
"HighestWindowIndexWithNonce",
)
}
RepairProtocol::OrphanWithNonce(_, slot, nonce) => {
stats.orphan += 1;
(
Self::run_orphan(
recycler,
&from_addr,
blockstore,
*slot,
MAX_ORPHAN_REPAIR_RESPONSES,
Some(*nonce),
),
"OrphanWithNonce",
)
}
}
};
@@ -186,6 +250,7 @@ impl ServeRepair {
requests_receiver: &PacketReceiver,
response_sender: &PacketSender,
max_packets: &mut usize,
stats: &mut ServeRepairStats,
) -> Result<()> {
//TODO cache connections
let timeout = Duration::new(1, 0);
@@ -202,7 +267,7 @@ impl ServeRepair {
let mut time = Measure::start("repair::handle_packets");
for reqs in reqs_v {
Self::handle_packets(obj, &recycler, blockstore, reqs, response_sender);
Self::handle_packets(obj, &recycler, blockstore, reqs, response_sender, stats);
}
time.stop();
if total_packets >= *max_packets {
@@ -215,6 +280,31 @@ impl ServeRepair {
Ok(())
}
fn report_reset_stats(me: &Arc<RwLock<Self>>, stats: &mut ServeRepairStats) {
if stats.self_repair > 0 {
let my_id = me.read().unwrap().keypair.pubkey();
warn!(
"{}: Ignored received repair requests from ME: {}",
my_id, stats.self_repair,
);
inc_new_counter_debug!("serve_repair-handle-repair--eq", stats.self_repair);
}
debug!(
"repair_listener: total_packets: {} passed: {}",
stats.total_packets, stats.processed
);
inc_new_counter_debug!("serve_repair-request-window-index", stats.window_index);
inc_new_counter_debug!(
"serve_repair-request-highest-window-index",
stats.highest_window_index
);
inc_new_counter_debug!("serve_repair-request-orphan", stats.orphan);
*stats = ServeRepairStats::default();
}
pub fn listen(
me: Arc<RwLock<Self>>,
blockstore: Option<Arc<Blockstore>>,
@@ -228,6 +318,8 @@ impl ServeRepair {
.name("solana-repair-listen".to_string())
.spawn(move || {
let mut max_packets = 1024;
let mut last_print = Instant::now();
let mut stats = ServeRepairStats::default();
loop {
let result = Self::run_listen(
&me,
@@ -236,6 +328,7 @@ impl ServeRepair {
&requests_receiver,
&response_sender,
&mut max_packets,
&mut stats,
);
match result {
Err(Error::RecvTimeoutError(_)) | Ok(_) => {}
@@ -244,6 +337,10 @@ impl ServeRepair {
if exit.load(Ordering::Relaxed) {
return;
}
if last_print.elapsed().as_secs() > 2 {
Self::report_reset_stats(&me, &mut stats);
last_print = Instant::now();
}
thread_mem_usage::datapoint("solana-repair-listen");
}
})
@@ -256,6 +353,7 @@ impl ServeRepair {
blockstore: Option<&Arc<Blockstore>>,
packets: Packets,
response_sender: &PacketSender,
stats: &mut ServeRepairStats,
) {
// iter over the packets, collect pulls separately and process everything else
let allocated = thread_mem_usage::Allocatedp::default();
@@ -265,7 +363,9 @@ impl ServeRepair {
limited_deserialize(&packet.data[..packet.meta.size])
.into_iter()
.for_each(|request| {
let rsp = Self::handle_repair(me, recycler, &from_addr, blockstore, request);
stats.processed += 1;
let rsp =
Self::handle_repair(me, recycler, &from_addr, blockstore, request, stats);
if let Some(rsp) = rsp {
let _ignore_disconnect = response_sender.send(rsp);
}
@@ -277,27 +377,59 @@ impl ServeRepair {
});
}
fn window_index_request_bytes(&self, slot: Slot, shred_index: u64) -> Result<Vec<u8>> {
let req = RepairProtocol::WindowIndex(self.my_info.clone(), slot, shred_index);
fn window_index_request_bytes(
&self,
slot: Slot,
shred_index: u64,
nonce: Option<Nonce>,
) -> Result<Vec<u8>> {
let req = if let Some(nonce) = nonce {
RepairProtocol::WindowIndexWithNonce(self.my_info.clone(), slot, shred_index, nonce)
} else {
RepairProtocol::WindowIndex(self.my_info.clone(), slot, shred_index)
};
let out = serialize(&req)?;
Ok(out)
}
fn window_highest_index_request_bytes(&self, slot: Slot, shred_index: u64) -> Result<Vec<u8>> {
let req = RepairProtocol::HighestWindowIndex(self.my_info.clone(), slot, shred_index);
fn window_highest_index_request_bytes(
&self,
slot: Slot,
shred_index: u64,
nonce: Option<Nonce>,
) -> Result<Vec<u8>> {
let req = if let Some(nonce) = nonce {
RepairProtocol::HighestWindowIndexWithNonce(
self.my_info.clone(),
slot,
shred_index,
nonce,
)
} else {
RepairProtocol::HighestWindowIndex(self.my_info.clone(), slot, shred_index)
};
let out = serialize(&req)?;
Ok(out)
}
fn orphan_bytes(&self, slot: Slot) -> Result<Vec<u8>> {
let req = RepairProtocol::Orphan(self.my_info.clone(), slot);
fn orphan_bytes(&self, slot: Slot, nonce: Option<Nonce>) -> Result<Vec<u8>> {
let req = if let Some(nonce) = nonce {
RepairProtocol::OrphanWithNonce(self.my_info.clone(), slot, nonce)
} else {
RepairProtocol::Orphan(self.my_info.clone(), slot)
};
let out = serialize(&req)?;
Ok(out)
}
pub fn repair_request(&self, repair_request: &RepairType) -> Result<(SocketAddr, Vec<u8>)> {
pub fn repair_request(
&self,
repair_request: &RepairType,
repair_stats: &mut RepairStats,
) -> Result<(SocketAddr, Vec<u8>)> {
// find a peer that appears to be accepting replication and has the desired slot, as indicated
// by a valid tvu port location
let slot = repair_request.slot();
let valid: Vec<_> = self
.cluster_info
.read()
@@ -308,32 +440,38 @@ impl ServeRepair {
}
let n = thread_rng().gen::<usize>() % valid.len();
let addr = valid[n].serve_repair; // send the request to the peer's serve_repair port
let out = self.map_repair_request(repair_request)?;
let nonce = if Shred::is_nonce_unlocked(slot) {
Some(DEFAULT_NONCE)
} else {
None
};
let out = self.map_repair_request(&repair_request, repair_stats, nonce)?;
Ok((addr, out))
}
pub fn map_repair_request(&self, repair_request: &RepairType) -> Result<Vec<u8>> {
pub fn map_repair_request(
&self,
repair_request: &RepairType,
repair_stats: &mut RepairStats,
nonce: Option<Nonce>,
) -> Result<Vec<u8>> {
let slot = repair_request.slot();
if Shred::is_nonce_unlocked(slot) {
assert!(nonce.is_some());
}
match repair_request {
RepairType::Shred(slot, shred_index) => {
datapoint_debug!(
"serve_repair-repair",
("repair-slot", *slot, i64),
("repair-ix", *shred_index, i64)
);
Ok(self.window_index_request_bytes(*slot, *shred_index)?)
repair_stats.shred.update(*slot);
Ok(self.window_index_request_bytes(*slot, *shred_index, nonce)?)
}
RepairType::HighestShred(slot, shred_index) => {
datapoint_info!(
"serve_repair-repair_highest",
("repair-highest-slot", *slot, i64),
("repair-highest-ix", *shred_index, i64)
);
Ok(self.window_highest_index_request_bytes(*slot, *shred_index)?)
repair_stats.highest_shred.update(*slot);
Ok(self.window_highest_index_request_bytes(*slot, *shred_index, nonce)?)
}
RepairType::Orphan(slot) => {
datapoint_info!("serve_repair-repair_orphan", ("repair-orphan", *slot, i64));
Ok(self.orphan_bytes(*slot)?)
repair_stats.orphan.update(*slot);
Ok(self.orphan_bytes(*slot, nonce)?)
}
}
}
@@ -346,12 +484,19 @@ impl ServeRepair {
me: &ContactInfo,
slot: Slot,
shred_index: u64,
nonce: Option<Nonce>,
) -> Option<Packets> {
if let Some(blockstore) = blockstore {
// Try to find the requested index in one of the slots
let packet = Self::get_data_shred_as_packet(blockstore, slot, shred_index, from_addr);
let packet = repair_response::repair_response_packet(
blockstore,
slot,
shred_index,
from_addr,
nonce,
);
if let Ok(Some(packet)) = packet {
if let Some(packet) = packet {
inc_new_counter_debug!("serve_repair-window-request-ledger", 1);
return Some(Packets::new_with_recycler_data(
recycler,
@@ -379,15 +524,20 @@ impl ServeRepair {
blockstore: Option<&Arc<Blockstore>>,
slot: Slot,
highest_index: u64,
nonce: Option<Nonce>,
) -> Option<Packets> {
let blockstore = blockstore?;
// Try to find the requested index in one of the slots
let meta = blockstore.meta(slot).ok()??;
if meta.received > highest_index {
// meta.received must be at least 1 by this point
let packet =
Self::get_data_shred_as_packet(blockstore, slot, meta.received - 1, from_addr)
.ok()??;
let packet = repair_response::repair_response_packet(
blockstore,
slot,
meta.received - 1,
from_addr,
nonce,
)?;
return Some(Packets::new_with_recycler_data(
recycler,
"run_highest_window_request",
@@ -403,6 +553,7 @@ impl ServeRepair {
blockstore: Option<&Arc<Blockstore>>,
mut slot: Slot,
max_responses: usize,
nonce: Option<Nonce>,
) -> Option<Packets> {
let mut res = Packets::new_with_recycler(recycler.clone(), 64, "run_orphan");
if let Some(blockstore) = blockstore {
@@ -411,9 +562,19 @@ impl ServeRepair {
if meta.received == 0 {
break;
}
let packet =
Self::get_data_shred_as_packet(blockstore, slot, meta.received - 1, from_addr);
if let Ok(Some(packet)) = packet {
let nonce = if Shred::is_nonce_unlocked(slot) {
nonce
} else {
None
};
let packet = repair_response::repair_response_packet(
blockstore,
slot,
meta.received - 1,
from_addr,
nonce,
);
if let Some(packet) = packet {
res.packets.push(packet);
}
if meta.is_parent_set() && res.packets.len() <= max_responses {
@@ -428,41 +589,31 @@ impl ServeRepair {
}
Some(res)
}
fn get_data_shred_as_packet(
blockstore: &Arc<Blockstore>,
slot: Slot,
shred_index: u64,
dest: &SocketAddr,
) -> Result<Option<Packet>> {
let data = blockstore.get_data_shred(slot, shred_index)?;
Ok(data.map(|data| {
let mut packet = Packet::default();
packet.meta.size = data.len();
packet.meta.set_addr(dest);
packet.data.copy_from_slice(&data);
packet
}))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::result::Error;
use crate::{repair_response, result::Error};
use solana_ledger::get_tmp_ledger_path;
use solana_ledger::{
blockstore::make_many_slot_entries,
blockstore_processor::fill_blockstore_slot_with_ticks,
shred::{
max_ticks_per_n_shreds, CodingShredHeader, DataShredHeader, Shred, ShredCommonHeader,
NONCE_SHRED_PAYLOAD_SIZE, UNLOCK_NONCE_SLOT,
},
};
use solana_sdk::{hash::Hash, pubkey::Pubkey, timing::timestamp};
/// test run_window_requestwindow requests respond with the right shred, and do not overrun
#[test]
fn run_highest_window_request() {
fn test_run_highest_window_request() {
run_highest_window_request(UNLOCK_NONCE_SLOT + 3, 3, Some(9));
run_highest_window_request(UNLOCK_NONCE_SLOT, 3, None);
}
/// test run_window_request responds with the right shred, and do not overrun
fn run_highest_window_request(slot: Slot, num_slots: u64, nonce: Option<Nonce>) {
let recycler = PacketsRecycler::default();
solana_logger::setup();
let ledger_path = get_tmp_ledger_path!();
@@ -474,41 +625,51 @@ mod tests {
Some(&blockstore),
0,
0,
nonce,
);
assert!(rv.is_none());
let _ = fill_blockstore_slot_with_ticks(
&blockstore,
max_ticks_per_n_shreds(1) + 1,
2,
1,
max_ticks_per_n_shreds(1, None) + 1,
slot,
slot - num_slots + 1,
Hash::default(),
);
let index = 1;
let rv = ServeRepair::run_highest_window_request(
&recycler,
&socketaddr_any!(),
Some(&blockstore),
2,
1,
);
slot,
index,
nonce,
)
.expect("packets");
let rv: Vec<Shred> = rv
.expect("packets")
.packets
.into_iter()
.filter_map(|b| Shred::new_from_serialized_shred(b.data.to_vec()).ok())
.filter_map(|b| {
if nonce.is_some() {
assert_eq!(repair_response::nonce(&b.data[..]), nonce);
}
Shred::new_from_serialized_shred(b.data.to_vec()).ok()
})
.collect();
assert!(!rv.is_empty());
let index = blockstore.meta(2).unwrap().unwrap().received - 1;
let index = blockstore.meta(slot).unwrap().unwrap().received - 1;
assert_eq!(rv[0].index(), index as u32);
assert_eq!(rv[0].slot(), 2);
assert_eq!(rv[0].slot(), slot);
let rv = ServeRepair::run_highest_window_request(
&recycler,
&socketaddr_any!(),
Some(&blockstore),
2,
slot,
index + 1,
nonce,
);
assert!(rv.is_none());
}
@@ -516,9 +677,14 @@ mod tests {
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
/// test window requests respond with the right shred, and do not overrun
#[test]
fn run_window_request() {
fn test_run_window_request() {
run_window_request(UNLOCK_NONCE_SLOT + 1, Some(9));
run_window_request(UNLOCK_NONCE_SLOT - 3, None);
}
/// test window requests respond with the right shred, and do not overrun
fn run_window_request(slot: Slot, nonce: Option<Nonce>) {
let recycler = PacketsRecycler::default();
solana_logger::setup();
let ledger_path = get_tmp_ledger_path!();
@@ -545,12 +711,13 @@ mod tests {
&socketaddr_any!(),
Some(&blockstore),
&me,
slot,
0,
0,
nonce,
);
assert!(rv.is_none());
let mut common_header = ShredCommonHeader::default();
common_header.slot = 2;
common_header.slot = slot;
common_header.index = 1;
let mut data_header = DataShredHeader::default();
data_header.parent_offset = 1;
@@ -558,30 +725,37 @@ mod tests {
common_header,
data_header,
CodingShredHeader::default(),
NONCE_SHRED_PAYLOAD_SIZE,
);
blockstore
.insert_shreds(vec![shred_info], None, false)
.expect("Expect successful ledger write");
let index = 1;
let rv = ServeRepair::run_window_request(
&recycler,
&me,
&socketaddr_any!(),
Some(&blockstore),
&me,
2,
1,
);
assert!(!rv.is_none());
slot,
index,
nonce,
)
.expect("packets");
let rv: Vec<Shred> = rv
.expect("packets")
.packets
.into_iter()
.filter_map(|b| Shred::new_from_serialized_shred(b.data.to_vec()).ok())
.filter_map(|b| {
if nonce.is_some() {
assert_eq!(repair_response::nonce(&b.data[..]), nonce);
}
Shred::new_from_serialized_shred(b.data.to_vec()).ok()
})
.collect();
assert_eq!(rv[0].index(), 1);
assert_eq!(rv[0].slot(), 2);
assert_eq!(rv[0].slot(), slot);
}
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
@@ -592,7 +766,7 @@ mod tests {
let me = ContactInfo::new_localhost(&Pubkey::new_rand(), timestamp());
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(me)));
let serve_repair = ServeRepair::new(cluster_info.clone());
let rv = serve_repair.repair_request(&RepairType::Shred(0, 0));
let rv = serve_repair.repair_request(&RepairType::Shred(0, 0), &mut RepairStats::default());
assert_matches!(rv, Err(Error::ClusterInfoError(ClusterInfoError::NoPeers)));
let serve_repair_addr = socketaddr!([127, 0, 0, 1], 1243);
@@ -613,7 +787,7 @@ mod tests {
};
cluster_info.write().unwrap().insert_info(nxt.clone());
let rv = serve_repair
.repair_request(&RepairType::Shred(0, 0))
.repair_request(&RepairType::Shred(0, 0), &mut RepairStats::default())
.unwrap();
assert_eq!(nxt.serve_repair, serve_repair_addr);
assert_eq!(rv.0, nxt.serve_repair);
@@ -640,7 +814,7 @@ mod tests {
while !one || !two {
//this randomly picks an option, so eventually it should pick both
let rv = serve_repair
.repair_request(&RepairType::Shred(0, 0))
.repair_request(&RepairType::Shred(0, 0), &mut RepairStats::default())
.unwrap();
if rv.0 == serve_repair_addr {
one = true;
@@ -653,52 +827,85 @@ mod tests {
}
#[test]
fn run_orphan() {
fn test_run_orphan() {
run_orphan(UNLOCK_NONCE_SLOT + 1, 3, Some(9));
// Test where the response will be for some slots <= UNLOCK_NONCE_SLOT,
// and some of the response will be for some slots > UNLOCK_NONCE_SLOT.
// Should not panic.
run_orphan(UNLOCK_NONCE_SLOT, 3, None);
run_orphan(UNLOCK_NONCE_SLOT, 3, Some(9));
}
fn run_orphan(slot: Slot, num_slots: u64, nonce: Option<Nonce>) {
solana_logger::setup();
let recycler = PacketsRecycler::default();
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let rv =
ServeRepair::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 2, 0);
let rv = ServeRepair::run_orphan(
&recycler,
&socketaddr_any!(),
Some(&blockstore),
slot,
0,
nonce,
);
assert!(rv.is_none());
// Create slots 1, 2, 3 with 5 shreds apiece
let (shreds, _) = make_many_slot_entries(1, 3, 5);
// Create slots [slot, slot + num_slots) with 5 shreds apiece
let (shreds, _) = make_many_slot_entries(slot, num_slots, 5);
blockstore
.insert_shreds(shreds, None, false)
.expect("Expect successful ledger write");
// We don't have slot 4, so we don't know how to service this requeset
let rv =
ServeRepair::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 4, 5);
// We don't have slot `slot + num_slots`, so we don't know how to service this request
let rv = ServeRepair::run_orphan(
&recycler,
&socketaddr_any!(),
Some(&blockstore),
slot + num_slots,
5,
nonce,
);
assert!(rv.is_none());
// For slot 3, we should return the highest shreds from slots 3, 2, 1 respectively
// for this request
let rv: Vec<_> =
ServeRepair::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 3, 5)
.expect("run_orphan packets")
.packets
.iter()
.map(|b| b.clone())
.collect();
let expected: Vec<_> = (1..=3)
// For a orphan request for `slot + num_slots - 1`, we should return the highest shreds
// from slots in the range [slot, slot + num_slots - 1]
let rv: Vec<_> = ServeRepair::run_orphan(
&recycler,
&socketaddr_any!(),
Some(&blockstore),
slot + num_slots - 1,
5,
nonce,
)
.expect("run_orphan packets")
.packets
.iter()
.map(|b| b.clone())
.collect();
// Verify responses
let expected: Vec<_> = (slot..slot + num_slots)
.rev()
.map(|slot| {
.filter_map(|slot| {
let nonce = if Shred::is_nonce_unlocked(slot) {
nonce
} else {
None
};
let index = blockstore.meta(slot).unwrap().unwrap().received - 1;
ServeRepair::get_data_shred_as_packet(
repair_response::repair_response_packet(
&blockstore,
slot,
index,
&socketaddr_any!(),
nonce,
)
.unwrap()
.unwrap()
})
.collect();
assert_eq!(rv, expected)
assert_eq!(rv, expected);
}
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");

View File

@@ -314,6 +314,13 @@ impl Validator {
);
if config.dev_halt_at_slot.is_some() {
// Simulate a confirmed root to avoid RPC errors with CommitmentmentConfig::max() and
// to ensure RPC endpoints like getConfirmedBlock, which require a confirmed root, work
block_commitment_cache
.write()
.unwrap()
.set_get_largest_confirmed_root(bank_forks.read().unwrap().root());
// Park with the RPC service running, ready for inspection!
warn!("Validator halted");
std::thread::park();

View File

@@ -1,31 +1,37 @@
//! `window_service` handles the data plane incoming shreds, storing them in
//! blockstore and retransmitting where required
//!
use crate::cluster_info::ClusterInfo;
use crate::packet::Packets;
use crate::repair_service::{RepairService, RepairStrategy};
use crate::result::{Error, Result};
use crate::streamer::PacketSender;
use crate::{
cluster_info::ClusterInfo,
packet::Packets,
repair_response,
repair_service::{RepairService, RepairStrategy},
result::{Error, Result},
serve_repair::DEFAULT_NONCE,
streamer::PacketSender,
};
use crossbeam_channel::{
unbounded, Receiver as CrossbeamReceiver, RecvTimeoutError, Sender as CrossbeamSender,
};
use rayon::iter::IntoParallelRefMutIterator;
use rayon::iter::ParallelIterator;
use rayon::ThreadPool;
use solana_ledger::bank_forks::BankForks;
use solana_ledger::blockstore::{self, Blockstore, MAX_DATA_SHREDS_PER_SLOT};
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
use solana_ledger::shred::Shred;
use solana_ledger::{
bank_forks::BankForks,
blockstore::{self, Blockstore, MAX_DATA_SHREDS_PER_SLOT},
leader_schedule_cache::LeaderScheduleCache,
shred::{Nonce, Shred},
};
use solana_metrics::{inc_new_counter_debug, inc_new_counter_error};
use solana_rayon_threadlimit::get_thread_count;
use solana_runtime::bank::Bank;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::timing::duration_as_ms;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::{self, Builder, JoinHandle};
use std::time::{Duration, Instant};
use solana_sdk::{packet::PACKET_DATA_SIZE, pubkey::Pubkey, timing::duration_as_ms};
use std::{
net::{SocketAddr, UdpSocket},
sync::atomic::{AtomicBool, Ordering},
sync::{Arc, RwLock},
thread::{self, Builder, JoinHandle},
time::{Duration, Instant},
};
fn verify_shred_slot(shred: &Shred, root: u64) -> bool {
if shred.is_data() {
@@ -102,8 +108,15 @@ fn run_check_duplicate(
Ok(())
}
fn verify_repair(_shred: &Shred, repair_info: &Option<RepairMeta>) -> bool {
repair_info
.as_ref()
.map(|repair_info| repair_info.nonce == DEFAULT_NONCE)
.unwrap_or(true)
}
fn run_insert<F>(
shred_receiver: &CrossbeamReceiver<Vec<Shred>>,
shred_receiver: &CrossbeamReceiver<(Vec<Shred>, Vec<Option<RepairMeta>>)>,
blockstore: &Arc<Blockstore>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
handle_duplicate: F,
@@ -112,12 +125,16 @@ where
F: Fn(Shred) -> (),
{
let timer = Duration::from_millis(200);
let mut shreds = shred_receiver.recv_timeout(timer)?;
while let Ok(mut more_shreds) = shred_receiver.try_recv() {
shreds.append(&mut more_shreds)
let (mut shreds, mut repair_infos) = shred_receiver.recv_timeout(timer)?;
while let Ok((more_shreds, more_repair_infos)) = shred_receiver.try_recv() {
shreds.extend(more_shreds);
repair_infos.extend(more_repair_infos);
}
assert_eq!(shreds.len(), repair_infos.len());
let mut i = 0;
shreds.retain(|shred| (verify_repair(&shred, &repair_infos[i]), i += 1).0);
let blockstore_insert_metrics = blockstore.insert_shreds_handle_duplicate(
shreds,
Some(leader_schedule_cache),
@@ -131,7 +148,7 @@ where
fn recv_window<F>(
blockstore: &Arc<Blockstore>,
insert_shred_sender: &CrossbeamSender<Vec<Shred>>,
insert_shred_sender: &CrossbeamSender<(Vec<Shred>, Vec<Option<RepairMeta>>)>,
my_pubkey: &Pubkey,
verified_receiver: &CrossbeamReceiver<Vec<Packets>>,
retransmit: &PacketSender,
@@ -155,7 +172,7 @@ where
inc_new_counter_debug!("streamer-recv_window-recv", total_packets);
let last_root = blockstore.last_root();
let shreds: Vec<_> = thread_pool.install(|| {
let (shreds, repair_infos): (Vec<_>, Vec<_>) = thread_pool.install(|| {
packets
.par_iter_mut()
.flat_map(|packets| {
@@ -164,34 +181,59 @@ where
.iter_mut()
.filter_map(|packet| {
if packet.meta.discard {
inc_new_counter_debug!("streamer-recv_window-invalid_signature", 1);
inc_new_counter_debug!(
"streamer-recv_window-invalid_or_unnecessary_packet",
1
);
None
} else if let Ok(shred) =
Shred::new_from_serialized_shred(packet.data.to_vec())
{
if shred_filter(&shred, last_root) {
// Mark slot as dead if the current shred is on the boundary
// of max shreds per slot. However, let the current shred
// get retransmitted. It'll allow peer nodes to see this shred
// and trigger them to mark the slot as dead.
if shred.index() >= (MAX_DATA_SHREDS_PER_SLOT - 1) as u32 {
let _ = blockstore.set_dead_slot(shred.slot());
} else {
// shred fetch stage should be sending packets
// with sufficiently large buffers. Needed to ensure
// call to `new_from_serialized_shred` is safe.
assert_eq!(packet.data.len(), PACKET_DATA_SIZE);
let serialized_shred = packet.data.to_vec();
if let Ok(shred) = Shred::new_from_serialized_shred(serialized_shred) {
let repair_info = {
if packet.meta.repair && Shred::is_nonce_unlocked(shred.slot())
{
if let Some(nonce) = repair_response::nonce(&packet.data) {
let repair_info = RepairMeta {
_from_addr: packet.meta.addr(),
nonce,
};
Some(repair_info)
} else {
// If can't parse the nonce, dump the packet
return None;
}
} else {
None
}
};
if shred_filter(&shred, last_root) {
// Mark slot as dead if the current shred is on the boundary
// of max shreds per slot. However, let the current shred
// get retransmitted. It'll allow peer nodes to see this shred
// and trigger them to mark the slot as dead.
if shred.index() >= (MAX_DATA_SHREDS_PER_SLOT - 1) as u32 {
let _ = blockstore.set_dead_slot(shred.slot());
}
packet.meta.slot = shred.slot();
packet.meta.seed = shred.seed();
Some((shred, repair_info))
} else {
packet.meta.discard = true;
None
}
packet.meta.slot = shred.slot();
packet.meta.seed = shred.seed();
Some(shred)
} else {
packet.meta.discard = true;
None
}
} else {
packet.meta.discard = true;
None
}
})
.collect::<Vec<_>>()
})
.collect()
.unzip()
});
trace!("{:?} shreds from packets", shreds.len());
@@ -205,7 +247,7 @@ where
}
}
insert_shred_sender.send(shreds)?;
insert_shred_sender.send((shreds, repair_infos))?;
trace!(
"Elapsed processing time in recv_window(): {}",
@@ -215,6 +257,11 @@ where
Ok(())
}
struct RepairMeta {
_from_addr: SocketAddr,
nonce: Nonce,
}
// Implement a destructor for the window_service thread to signal it exited
// even on panics
struct Finalizer {
@@ -336,7 +383,7 @@ impl WindowService {
exit: &Arc<AtomicBool>,
blockstore: &Arc<Blockstore>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
insert_receiver: CrossbeamReceiver<Vec<Shred>>,
insert_receiver: CrossbeamReceiver<(Vec<Shred>, Vec<Option<RepairMeta>>)>,
duplicate_sender: CrossbeamSender<Shred>,
) -> JoinHandle<()> {
let exit = exit.clone();
@@ -377,7 +424,7 @@ impl WindowService {
id: Pubkey,
exit: &Arc<AtomicBool>,
blockstore: &Arc<Blockstore>,
insert_sender: CrossbeamSender<Vec<Shred>>,
insert_sender: CrossbeamSender<(Vec<Shred>, Vec<Option<RepairMeta>>)>,
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
shred_filter: F,
bank_forks: Option<Arc<RwLock<BankForks>>>,
@@ -483,12 +530,11 @@ mod test {
repair_service::RepairSlotRange,
};
use rand::thread_rng;
use solana_ledger::shred::DataShredHeader;
use solana_ledger::{
blockstore::{make_many_slot_entries, Blockstore},
entry::{create_ticks, Entry},
get_tmp_ledger_path,
shred::Shredder,
shred::{DataShredHeader, Shredder, NONCE_SHRED_PAYLOAD_SIZE},
};
use solana_sdk::{
clock::Slot,
@@ -562,8 +608,12 @@ mod test {
// If it's a coding shred, test that slot >= root
let (common, coding) = Shredder::new_coding_shred_header(5, 5, 5, 6, 6, 0, 0);
let mut coding_shred =
Shred::new_empty_from_header(common, DataShredHeader::default(), coding);
let mut coding_shred = Shred::new_empty_from_header(
common,
DataShredHeader::default(),
coding,
NONCE_SHRED_PAYLOAD_SIZE,
);
Shredder::sign_shred(&leader_keypair, &mut coding_shred);
assert_eq!(
should_retransmit_and_persist(&coding_shred, Some(bank.clone()), &cache, &me_id, 0, 0),

View File

@@ -40,7 +40,7 @@ fn test_rpc_client() {
assert_eq!(
client.get_version().unwrap().solana_core,
solana_clap_utils::version!()
solana_version::version!()
);
assert!(client.get_account(&bob_pubkey).is_err());

View File

@@ -148,7 +148,7 @@ fn test_rpc_invalid_requests() {
.unwrap();
let json: Value = serde_json::from_str(&response.text().unwrap()).unwrap();
let the_error = json["error"]["message"].as_str().unwrap();
assert_eq!(the_error, "Invalid request");
assert_eq!(the_error, "Invalid");
// test invalid get_account_info request
let client = reqwest::blocking::Client::new();
@@ -168,7 +168,7 @@ fn test_rpc_invalid_requests() {
.unwrap();
let json: Value = serde_json::from_str(&response.text().unwrap()).unwrap();
let the_error = json["error"]["message"].as_str().unwrap();
assert_eq!(the_error, "Invalid request");
assert_eq!(the_error, "Invalid");
// test invalid get_account_info request
let client = reqwest::blocking::Client::new();

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-crate-features"
version = "1.0.22"
version = "1.0.24"
description = "Solana Crate Features"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@@ -50,6 +50,7 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
* [minimumLedgerSlot](jsonrpc-api.md#minimumledgerslot)
* [requestAirdrop](jsonrpc-api.md#requestairdrop)
* [sendTransaction](jsonrpc-api.md#sendtransaction)
* [simulateTransaction](jsonrpc-api.md#simulatetransaction)
* [setLogFilter](jsonrpc-api.md#setlogfilter)
* [validatorExit](jsonrpc-api.md#validatorexit)
* [Subscription Websocket](jsonrpc-api.md#subscription-websocket)
@@ -258,7 +259,8 @@ The result field will be an array of JSON objects, each with the following sub f
* `pubkey: <string>` - Node public key, as base-58 encoded string
* `gossip: <string>` - Gossip network address for the node
* `tpu: <string>` - TPU network address for the node
* `rpc: <string>` - JSON RPC network address for the node, or `null` if the JSON RPC service is not enabled
* `rpc: <string>|null` - JSON RPC network address for the node, or `null` if the JSON RPC service is not enabled
* `version: <string>|null` - The software version of the node, or `null` if the version information is not available
#### Example:
@@ -267,7 +269,7 @@ The result field will be an array of JSON objects, each with the following sub f
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getClusterNodes"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":[{"gossip":"10.239.6.48:8001","pubkey":"9QzsJf7LPLj8GkXbYT3LFDKqsj2hHG7TA3xinJHu8epQ","rpc":"10.239.6.48:8899","tpu":"10.239.6.48:8856"}],"id":1}
{"jsonrpc":"2.0","result":[{"gossip":"10.239.6.48:8001","pubkey":"9QzsJf7LPLj8GkXbYT3LFDKqsj2hHG7TA3xinJHu8epQ","rpc":"10.239.6.48:8899","tpu":"10.239.6.48:8856"},"version":"1.0.0 c375ce1f"],"id":1}
```
### getConfirmedBlock
@@ -960,7 +962,7 @@ The result will be an RpcResponse JSON object with `value` equal to a JSON objec
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getCirculatingSupply"}' http://localhost:8899
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getSupply"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"context":{"slot":1114},"value":{"circulating":16000,"nonCirculating":1000000,"nonCirculatingAccounts":["FEy8pTbP5fEoqMV1GdTz83byuA8EKByqYat1PKDgVAq5","9huDUZfxoJ7wGMTffUE7vh1xePqef7gyrLJu9NApncqA","3mi1GmwEE3zo2jmfDuzvjSX9ovRXsDUKHvsntpkhuLJ9","BYxEJTDerkaRWBem3XgnVcdhppktBXa2HbkHPKj2Ui4Z],total:1016000}},"id":1}
```
@@ -1007,7 +1009,7 @@ The result field will be a JSON object with the following fields:
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"solana-core": "1.0.22"},"id":1}
{"jsonrpc":"2.0","result":{"solana-core": "1.0.24"},"id":1}
```
### getVoteAccounts
@@ -1103,10 +1105,34 @@ Creates new transaction
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"sendTransaction", "params":["3gKEMTuxvm3DKEJc4UyiyoNz1sxwdVRW2pyDDXqaCvUjGApnsazGh2y4W92zuaSSdJhBbWLYAkZokBt4N5oW27R7zCVaLLpLxvATL2GgheEh9DmmDR1P9r1ZqirVXM2fF3z5cafmc4EtwWc1UErFdCWj1qYvy4bDGMLXRYLURxaKytEEqrxz6JXj8rUHhDpjTZeFxmC6iAW3hZr6cmaAzewQCQfiEv2HfydriwHDtN95u3Y1EF6SuXxcRqox2aTjGye2Ln9zFj4XbnAtjCmkZhR"]}' http://localhost:8899
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"sendTransaction", "params":["4hXTCkRzt9WyecNzV1XPgCDfGAZzQKNxLXgynz5QDuWWPSAZBZSHptvWRL3BjCvzUXRdKvHL2b7yGrRQcWyaqsaBCncVG7BFggS8w9snUts67BSh3EqKpXLUm5UMHfD7ZBe9GhARjbNQMLJ1QD3Spr6oMTBU6EhdB4RD8CP2xUxr2u3d6fos36PD98XS6oX8TQjLpsMwncs5DAMiD4nNnR8NBfyghGCWvCVifVwvA8B8TJxE1aiyiv2L429BCWfyzAme5sZW8rDb14NeCQHhZbtNqfXhcp2tAnaAT"]}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":"2EBVM6cB8vAAD93Ktr6Vd8p67XPbQzCJX47MpReuiCXJAtcjaxpvWpcg9Ege1Nr5Tk3a2GFrByT7WPBjdsTycY9b","id":1}
{"jsonrpc":"2.0","result":"2id3YC2jK9G5Wo2phDx4gJVAew8DcY5NAojnVuao8rkxwPYPe8cSwE5GzhEgJA2y8fVjDEo6iR6ykBvDxrTQrtpb","id":1}
```
### simulateTransaction
Simulate sending a transaction
#### Parameters:
* `<string>` - Transaction, as base-58 encoded string. The transaction must have a valid blockhash, but is not required to be signed.
* `<object>` - (optional) Configuration object containing the following field:
* `sigVerify: <bool>` - if true the transaction signatures will be verified (default: false)
#### Results:
An RpcResponse containing a TransactionStatus object
#### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"simulateTransaction", "params":["4hXTCkRzt9WyecNzV1XPgCDfGAZzQKNxLXgynz5QDuWWPSAZBZSHptvWRL3BjCvzUXRdKvHL2b7yGrRQcWyaqsaBCncVG7BFggS8w9snUts67BSh3EqKpXLUm5UMHfD7ZBe9GhARjbNQMLJ1QD3Spr6oMTBU6EhdB4RD8CP2xUxr2u3d6fos36PD98XS6oX8TQjLpsMwncs5DAMiD4nNnR8NBfyghGCWvCVifVwvA8B8TJxE1aiyiv2L429BCWfyzAme5sZW8rDb14NeCQHhZbtNqfXhcp2tAnaAT"]}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"context":{"slot":218},"value":{"confirmations":0,"err":null,"slot":218,"status":{"Ok":null}}},"id":1}
```
### setLogFilter

View File

@@ -171,7 +171,7 @@ $ solana send-timestamp <PUBKEY> <PROCESS_ID> --date 2018-12-24T23:59:00
## Usage
### solana-cli
```text
solana-cli 1.0.22 [channel=unknown commit=unknown]
solana-cli 1.0.24 [channel=unknown commit=unknown]
Blockchain, Rebuilt for Scale
USAGE:

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-faucet"
version = "1.0.22"
version = "1.0.24"
description = "Solana Faucet"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -19,10 +19,10 @@ clap = "2.33"
log = "0.4.8"
serde = "1.0.104"
serde_derive = "1.0.103"
solana-clap-utils = { path = "../clap-utils", version = "1.0.22" }
solana-logger = { path = "../logger", version = "1.0.22" }
solana-metrics = { path = "../metrics", version = "1.0.22" }
solana-sdk = { path = "../sdk", version = "1.0.22" }
solana-clap-utils = { path = "../clap-utils", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-metrics = { path = "../metrics", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
tokio = "0.1"
tokio-codec = "0.1"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-genesis-programs"
version = "1.0.22"
version = "1.0.24"
description = "Solana genesis programs"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -10,16 +10,16 @@ edition = "2018"
[dependencies]
log = { version = "0.4.8" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.0.22" }
solana-budget-program = { path = "../programs/budget", version = "1.0.22" }
solana-config-program = { path = "../programs/config", version = "1.0.22" }
solana-exchange-program = { path = "../programs/exchange", version = "1.0.22" }
solana-runtime = { path = "../runtime", version = "1.0.22" }
solana-sdk = { path = "../sdk", version = "1.0.22" }
solana-stake-program = { path = "../programs/stake", version = "1.0.22" }
solana-storage-program = { path = "../programs/storage", version = "1.0.22" }
solana-vest-program = { path = "../programs/vest", version = "1.0.22" }
solana-vote-program = { path = "../programs/vote", version = "1.0.22" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.0.24" }
solana-budget-program = { path = "../programs/budget", version = "1.0.24" }
solana-config-program = { path = "../programs/config", version = "1.0.24" }
solana-exchange-program = { path = "../programs/exchange", version = "1.0.24" }
solana-runtime = { path = "../runtime", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
solana-stake-program = { path = "../programs/stake", version = "1.0.24" }
solana-storage-program = { path = "../programs/storage", version = "1.0.24" }
solana-vest-program = { path = "../programs/vest", version = "1.0.24" }
solana-vote-program = { path = "../programs/vote", version = "1.0.24" }
[lib]
crate-type = ["lib"]

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-genesis"
description = "Blockchain, Rebuilt for Scale"
version = "1.0.22"
version = "1.0.24"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -15,13 +15,13 @@ chrono = "0.4"
serde = "1.0.104"
serde_json = "1.0.46"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "1.0.22" }
solana-genesis-programs = { path = "../genesis-programs", version = "1.0.22" }
solana-ledger = { path = "../ledger", version = "1.0.22" }
solana-sdk = { path = "../sdk", version = "1.0.22" }
solana-stake-program = { path = "../programs/stake", version = "1.0.22" }
solana-storage-program = { path = "../programs/storage", version = "1.0.22" }
solana-vote-program = { path = "../programs/vote", version = "1.0.22" }
solana-clap-utils = { path = "../clap-utils", version = "1.0.24" }
solana-genesis-programs = { path = "../genesis-programs", version = "1.0.24" }
solana-ledger = { path = "../ledger", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
solana-stake-program = { path = "../programs/stake", version = "1.0.24" }
solana-storage-program = { path = "../programs/storage", version = "1.0.24" }
solana-vote-program = { path = "../programs/vote", version = "1.0.24" }
tempfile = "3.1.0"
[[bin]]

View File

@@ -3,19 +3,19 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-gossip"
description = "Blockchain, Rebuilt for Scale"
version = "1.0.22"
version = "1.0.24"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
solana-clap-utils = { path = "../clap-utils", version = "1.0.22" }
solana-core = { path = "../core", version = "1.0.22" }
solana-client = { path = "../client", version = "1.0.22" }
solana-logger = { path = "../logger", version = "1.0.22" }
solana-net-utils = { path = "../net-utils", version = "1.0.22" }
solana-sdk = { path = "../sdk", version = "1.0.22" }
solana-clap-utils = { path = "../clap-utils", version = "1.0.24" }
solana-core = { path = "../core", version = "1.0.24" }
solana-client = { path = "../client", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-net-utils = { path = "../net-utils", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }

View File

@@ -15,6 +15,13 @@ use std::process::exit;
fn main() -> Result<(), Box<dyn error::Error>> {
solana_logger::setup_with_default("solana=info");
let shred_version_arg = Arg::with_name("shred_version")
.long("shred-version")
.value_name("VERSION")
.takes_value(true)
.default_value("0")
.help("Filter gossip nodes by this shred version");
let matches = App::new(crate_name!())
.about(crate_description!())
.version(solana_clap_utils::version!())
@@ -53,6 +60,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
.default_value("5")
.help("Timeout in seconds"),
)
.arg(&shred_version_arg)
.setting(AppSettings::DisableVersion),
)
.subcommand(
@@ -110,6 +118,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
.validator(is_pubkey)
.help("Public key of a specific node to wait for"),
)
.arg(&shred_version_arg)
.arg(
Arg::with_name("timeout")
.long("timeout")
@@ -167,6 +176,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
let pubkey = matches
.value_of("node_pubkey")
.map(|pubkey_str| pubkey_str.parse::<Pubkey>().unwrap());
let shred_version = value_t_or_exit!(matches, "shred_version", u16);
let entrypoint_addr = parse_entrypoint(&matches);
@@ -212,6 +222,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
pubkey,
None,
Some(&gossip_addr),
shred_version,
)?;
if timeout.is_some() {
@@ -251,6 +262,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
let all = matches.is_present("all");
let entrypoint_addr = parse_entrypoint(&matches);
let timeout = value_t_or_exit!(matches, "timeout", u64);
let shred_version = value_t_or_exit!(matches, "shred_version", u16);
let (nodes, _archivers) = discover(
entrypoint_addr.as_ref(),
Some(1),
@@ -258,6 +270,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
None,
entrypoint_addr.as_ref(),
None,
shred_version,
)?;
let rpc_addrs: Vec<_> = nodes
@@ -298,6 +311,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
Some(pubkey),
None,
None,
0,
)?;
let node = nodes.iter().find(|x| x.id == pubkey).unwrap();

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-install"
description = "The solana cluster software installer"
version = "1.0.22"
version = "1.0.24"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -24,11 +24,11 @@ reqwest = { version = "0.10.1", default-features = false, features = ["blocking"
serde = "1.0.104"
serde_derive = "1.0.103"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "1.0.22" }
solana-client = { path = "../client", version = "1.0.22" }
solana-config-program = { path = "../programs/config", version = "1.0.22" }
solana-logger = { path = "../logger", version = "1.0.22" }
solana-sdk = { path = "../sdk", version = "1.0.22" }
solana-clap-utils = { path = "../clap-utils", version = "1.0.24" }
solana-client = { path = "../client", version = "1.0.24" }
solana-config-program = { path = "../programs/config", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
semver = "0.9.0"
tar = "0.4.26"
tempdir = "0.3.7"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-keygen"
version = "1.0.22"
version = "1.0.24"
description = "Solana key generation utility"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -13,10 +13,10 @@ bs58 = "0.3.0"
clap = "2.33"
dirs = "2.0.2"
num_cpus = "1.12.0"
solana-clap-utils = { path = "../clap-utils", version = "1.0.22" }
solana-cli-config = { path = "../cli-config", version = "1.0.22" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.0.22" }
solana-sdk = { path = "../sdk", version = "1.0.22" }
solana-clap-utils = { path = "../clap-utils", version = "1.0.24" }
solana-cli-config = { path = "../cli-config", version = "1.0.24" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
tiny-bip39 = "0.7.0"
[[bin]]

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-ledger-tool"
description = "Blockchain, Rebuilt for Scale"
version = "1.0.22"
version = "1.0.24"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -14,15 +14,15 @@ clap = "2.33.0"
histogram = "*"
serde_json = "1.0.46"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "1.0.22" }
solana-cli = { path = "../cli", version = "1.0.22" }
solana-ledger = { path = "../ledger", version = "1.0.22" }
solana-logger = { path = "../logger", version = "1.0.22" }
solana-runtime = { path = "../runtime", version = "1.0.22" }
solana-sdk = { path = "../sdk", version = "1.0.22" }
solana-vote-program = { path = "../programs/vote", version = "1.0.22" }
solana-stake-program = { path = "../programs/stake", version = "1.0.22" }
solana-transaction-status = { path = "../transaction-status", version = "1.0.22" }
solana-clap-utils = { path = "../clap-utils", version = "1.0.24" }
solana-cli = { path = "../cli", version = "1.0.24" }
solana-ledger = { path = "../ledger", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-runtime = { path = "../runtime", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
solana-vote-program = { path = "../programs/vote", version = "1.0.24" }
solana-stake-program = { path = "../programs/stake", version = "1.0.24" }
solana-transaction-status = { path = "../transaction-status", version = "1.0.24" }
tempfile = "3.1.0"
[dev-dependencies]

View File

@@ -731,17 +731,6 @@ fn main() {
.arg(&account_paths_arg)
.arg(&halt_at_slot_arg)
.arg(&hard_forks_arg)
).subcommand(
SubCommand::with_name("prune")
.about("Prune the ledger at the block height")
.arg(
Arg::with_name("slot_list")
.long("slot-list")
.value_name("FILENAME")
.takes_value(true)
.required(true)
.help("The location of the YAML file with a list of rollback slot heights and hashes"),
)
).subcommand(
SubCommand::with_name("purge")
.about("Purge the ledger at the block height")
@@ -751,14 +740,14 @@ fn main() {
.value_name("SLOT")
.takes_value(true)
.required(true)
.help("Start slot to purge from."),
.help("Start slot to purge from (inclusive)"),
)
.arg(
Arg::with_name("end_slot")
.index(2)
.value_name("SLOT")
.takes_value(true)
.help("Optional ending slot to stop purging."),
.required(true)
.help("Ending slot to stop purging (inclusive)"),
)
)
.subcommand(
@@ -1133,48 +1122,10 @@ fn main() {
}
("purge", Some(arg_matches)) => {
let start_slot = value_t_or_exit!(arg_matches, "start_slot", Slot);
let end_slot = value_t!(arg_matches, "end_slot", Slot);
let end_slot = end_slot.map_or(None, Some);
let end_slot = value_t_or_exit!(arg_matches, "end_slot", Slot);
let blockstore = open_blockstore(&ledger_path);
blockstore.purge_slots(start_slot, end_slot);
}
("prune", Some(arg_matches)) => {
if let Some(prune_file_path) = arg_matches.value_of("slot_list") {
let blockstore = open_blockstore(&ledger_path);
let prune_file = File::open(prune_file_path.to_string()).unwrap();
let slot_hashes: BTreeMap<u64, String> =
serde_yaml::from_reader(prune_file).unwrap();
let iter =
RootedSlotIterator::new(0, &blockstore).expect("Failed to get rooted slot");
let potential_hashes: Vec<_> = iter
.filter_map(|(slot, _meta)| {
let blockhash = blockstore
.get_slot_entries(slot, 0, None)
.unwrap()
.last()
.unwrap()
.hash
.to_string();
slot_hashes.get(&slot).and_then(|hash| {
if *hash == blockhash {
Some((slot, blockhash))
} else {
None
}
})
})
.collect();
let (target_slot, target_hash) = potential_hashes
.last()
.expect("Failed to find a valid slot");
println!("Prune at slot {:?} hash {:?}", target_slot, target_hash);
blockstore.prune(*target_slot);
}
}
("list-roots", Some(arg_matches)) => {
let blockstore = open_blockstore(&ledger_path);
let max_height = if let Some(height) = arg_matches.value_of("max_height") {

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-ledger"
version = "1.0.22"
version = "1.0.24"
description = "Solana ledger"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -28,19 +28,19 @@ reed-solomon-erasure = { package = "solana-reed-solomon-erasure", version = "4.0
regex = "1.3.4"
serde = "1.0.104"
serde_bytes = "0.11.3"
solana-transaction-status = { path = "../transaction-status", version = "1.0.22" }
solana-genesis-programs = { path = "../genesis-programs", version = "1.0.22" }
solana-logger = { path = "../logger", version = "1.0.22" }
solana-measure = { path = "../measure", version = "1.0.22" }
solana-merkle-tree = { path = "../merkle-tree", version = "1.0.22" }
solana-metrics = { path = "../metrics", version = "1.0.22" }
solana-perf = { path = "../perf", version = "1.0.22" }
solana-transaction-status = { path = "../transaction-status", version = "1.0.24" }
solana-genesis-programs = { path = "../genesis-programs", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-measure = { path = "../measure", version = "1.0.24" }
solana-merkle-tree = { path = "../merkle-tree", version = "1.0.24" }
solana-metrics = { path = "../metrics", version = "1.0.24" }
solana-perf = { path = "../perf", version = "1.0.24" }
ed25519-dalek = "1.0.0-pre.1"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.0.22" }
solana-runtime = { path = "../runtime", version = "1.0.22" }
solana-sdk = { path = "../sdk", version = "1.0.22" }
solana-stake-program = { path = "../programs/stake", version = "1.0.22" }
solana-vote-program = { path = "../programs/vote", version = "1.0.22" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.0.24" }
solana-runtime = { path = "../runtime", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
solana-stake-program = { path = "../programs/stake", version = "1.0.24" }
solana-vote-program = { path = "../programs/vote", version = "1.0.24" }
symlink = "0.1.0"
tar = "0.4.26"
thiserror = "1.0"
@@ -57,7 +57,7 @@ features = ["lz4"]
[dev-dependencies]
assert_matches = "1.3.0"
matches = "0.1.6"
solana-budget-program = { path = "../programs/budget", version = "1.0.22" }
solana-budget-program = { path = "../programs/budget", version = "1.0.24" }
[lib]
crate-type = ["lib"]

View File

@@ -222,6 +222,10 @@ impl BankForks {
self.root
}
pub fn root_bank(&self) -> &Arc<Bank> {
self.banks.get(&self.root()).expect("Root bank must exist")
}
pub fn purge_old_snapshots(&self) {
// Remove outdated snapshots
let config = self.snapshot_config.as_ref().unwrap();

View File

@@ -177,7 +177,7 @@ impl Blockstore {
fs::create_dir_all(&ledger_path)?;
let blockstore_path = ledger_path.join(BLOCKSTORE_DIRECTORY);
adjust_ulimit_nofile();
adjust_ulimit_nofile()?;
// Open the database
let mut measure = Measure::start("open");
@@ -298,47 +298,56 @@ impl Blockstore {
false
}
/// Silently deletes all blockstore column families starting at the given slot until the `to` slot
/// Silently deletes all blockstore column families in the range [from_slot,to_slot]
/// Dangerous; Use with care:
/// Does not check for integrity and does not update slot metas that refer to deleted slots
/// Modifies multiple column families simultaneously
pub fn purge_slots(&self, mut from_slot: Slot, to_slot: Option<Slot>) {
pub fn purge_slots_with_delay(
&self,
from_slot: Slot,
to_slot: Slot,
delay_between_purges: Option<Duration>,
) {
// if there's no upper bound, split the purge request into batches of 1000 slots
const PURGE_BATCH_SIZE: u64 = 1000;
let mut batch_end = to_slot.unwrap_or(from_slot + PURGE_BATCH_SIZE);
while from_slot < batch_end {
match self.run_purge(from_slot, batch_end) {
Ok(end) => {
if !self.no_compaction {
if let Err(e) = self.compact_storage(from_slot, batch_end) {
// This error is not fatal and indicates an internal error
error!(
"Error: {:?}; Couldn't compact storage from {:?} to {:?}",
e, from_slot, batch_end
);
}
}
let mut batch_start = from_slot;
while batch_start < to_slot {
let batch_end = (batch_start + PURGE_BATCH_SIZE).min(to_slot);
match self.run_purge(batch_start, batch_end) {
Ok(_all_columns_purged) => {
batch_start = batch_end;
if end {
break;
} else {
// update the next batch bounds
from_slot = batch_end;
batch_end = to_slot.unwrap_or(batch_end + PURGE_BATCH_SIZE);
if let Some(ref duration) = delay_between_purges {
// Cooperate with other blockstore users
std::thread::sleep(*duration);
}
}
Err(e) => {
error!(
"Error: {:?}; Purge failed in range {:?} to {:?}",
e, from_slot, batch_end
e, batch_start, batch_end
);
break;
}
}
}
if !self.no_compaction {
if let Err(e) = self.compact_storage(from_slot, to_slot) {
// This error is not fatal and indicates an internal error
error!(
"Error: {:?}; Couldn't compact storage from {:?} to {:?}",
e, from_slot, to_slot
);
}
}
}
// Returns whether or not all columns have been purged until their end
pub fn purge_slots(&self, from_slot: Slot, to_slot: Slot) {
self.purge_slots_with_delay(from_slot, to_slot, None)
}
// Returns whether or not all columns successfully purged the slot range
fn run_purge(&self, from_slot: Slot, to_slot: Slot) -> Result<bool> {
let mut write_batch = self
.db
@@ -346,6 +355,8 @@ impl Blockstore {
.expect("Database Error: Failed to get write batch");
// delete range cf is not inclusive
let to_slot = to_slot.checked_add(1).unwrap_or_else(|| std::u64::MAX);
let mut delete_range_timer = Measure::start("delete_range");
let mut columns_empty = self
.db
.delete_range_cf::<cf::SlotMeta>(&mut write_batch, from_slot, to_slot)
@@ -402,6 +413,7 @@ impl Blockstore {
.delete_range_cf::<cf::AddressSignatures>(&mut write_batch, index, index + 1)
.unwrap_or(false);
}
delete_range_timer.stop();
let mut write_timer = Measure::start("write_batch");
if let Err(e) = self.db.write(write_batch) {
error!(
@@ -413,12 +425,17 @@ impl Blockstore {
write_timer.stop();
datapoint_info!(
"blockstore-purge",
("from_slot", from_slot as i64, i64),
("to_slot", to_slot as i64, i64),
("delete_range_us", delete_range_timer.as_us() as i64, i64),
("write_batch_us", write_timer.as_us() as i64, i64)
);
Ok(columns_empty)
}
pub fn compact_storage(&self, from_slot: Slot, to_slot: Slot) -> Result<bool> {
info!("compact_storage: from {} to {}", from_slot, to_slot);
let mut compact_timer = Measure::start("compact_range");
let result = self
.meta_cf
.compact_range(from_slot, to_slot)
@@ -472,6 +489,14 @@ impl Blockstore {
.rewards_cf
.compact_range(from_slot, to_slot)
.unwrap_or(false);
compact_timer.stop();
if !result {
info!("compact_storage incomplete");
}
datapoint_info!(
"blockstore-compact",
("compact_range_us", compact_timer.as_us() as i64, i64),
);
Ok(result)
}
@@ -1982,10 +2007,11 @@ impl Blockstore {
let data_shreds = data_shreds?;
assert!(data_shreds.last().unwrap().data_complete());
let deshred_payload = Shredder::deshred(&data_shreds).map_err(|_| {
BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom(
"Could not reconstruct data block from constituent shreds".to_string(),
)))
let deshred_payload = Shredder::deshred(&data_shreds).map_err(|e| {
BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom(format!(
"Could not reconstruct data block from constituent shreds, error: {:?}",
e
))))
})?;
debug!("{:?} shreds in last FEC set", data_shreds.len(),);
@@ -2107,39 +2133,6 @@ impl Blockstore {
Ok(orphans_iter.map(|(slot, _)| slot))
}
/// Prune blockstore such that slots higher than `target_slot` are deleted and all references to
/// higher slots are removed
pub fn prune(&self, target_slot: Slot) {
let mut meta = self
.meta(target_slot)
.expect("couldn't read slot meta")
.expect("no meta for target slot");
meta.next_slots.clear();
self.put_meta_bytes(
target_slot,
&bincode::serialize(&meta).expect("couldn't get meta bytes"),
)
.expect("unable to update meta for target slot");
self.purge_slots(target_slot + 1, None);
// fixup anything that refers to non-root slots and delete the rest
for (slot, mut meta) in self
.slot_meta_iterator(0)
.expect("unable to iterate over meta")
{
if slot > target_slot {
break;
}
meta.next_slots.retain(|slot| *slot <= target_slot);
self.put_meta_bytes(
slot,
&bincode::serialize(&meta).expect("couldn't update meta"),
)
.expect("couldn't update meta");
}
}
pub fn last_root(&self) -> Slot {
*self.last_root.read().unwrap()
}
@@ -2780,10 +2773,12 @@ pub fn make_chaining_slot_entries(
}
#[cfg(not(unix))]
fn adjust_ulimit_nofile() {}
fn adjust_ulimit_nofile() -> Result<()> {
Ok(())
}
#[cfg(unix)]
fn adjust_ulimit_nofile() {
fn adjust_ulimit_nofile() -> Result<()> {
// Rocks DB likes to have many open files. The default open file descriptor limit is
// usually not enough
let desired_nofile = 65000;
@@ -2811,11 +2806,13 @@ fn adjust_ulimit_nofile() {
if cfg!(target_os = "macos") {
error!("On mac OS you may need to run |sudo launchctl limit maxfiles 65536 200000| first");
}
return Err(BlockstoreError::UnableToSetOpenFileDescriptorLimit);
}
nofile = get_nofile();
}
info!("Maximum open file descriptors: {}", nofile.rlim_cur);
Ok(())
}
#[cfg(test)]
@@ -2826,7 +2823,7 @@ pub mod tests {
entry::{next_entry, next_entry_mut},
genesis_utils::{create_genesis_config, GenesisConfigInfo},
leader_schedule::{FixedSchedule, LeaderSchedule},
shred::{max_ticks_per_n_shreds, DataShredHeader},
shred::{max_ticks_per_n_shreds, DataShredHeader, NONCE_SHRED_PAYLOAD_SIZE},
};
use assert_matches::assert_matches;
use bincode::serialize;
@@ -2976,7 +2973,7 @@ pub mod tests {
#[test]
fn test_insert_get_bytes() {
// Create enough entries to ensure there are at least two shreds created
let num_entries = max_ticks_per_n_shreds(1) + 1;
let num_entries = max_ticks_per_n_shreds(1, None) + 1;
assert!(num_entries > 1);
let (mut shreds, _) = make_slot_entries(0, 0, num_entries);
@@ -3216,7 +3213,7 @@ pub mod tests {
#[test]
fn test_insert_data_shreds_basic() {
// Create enough entries to ensure there are at least two shreds created
let num_entries = max_ticks_per_n_shreds(1) + 1;
let num_entries = max_ticks_per_n_shreds(1, None) + 1;
assert!(num_entries > 1);
let (mut shreds, entries) = make_slot_entries(0, 0, num_entries);
@@ -3263,7 +3260,7 @@ pub mod tests {
#[test]
fn test_insert_data_shreds_reverse() {
let num_shreds = 10;
let num_entries = max_ticks_per_n_shreds(num_shreds);
let num_entries = max_ticks_per_n_shreds(num_shreds, None);
let (mut shreds, entries) = make_slot_entries(0, 0, num_entries);
let num_shreds = shreds.len() as u64;
@@ -3440,7 +3437,7 @@ pub mod tests {
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
// Create enough entries to ensure there are at least two shreds created
let min_entries = max_ticks_per_n_shreds(1) + 1;
let min_entries = max_ticks_per_n_shreds(1, None) + 1;
for i in 0..4 {
let slot = i;
let parent_slot = if i == 0 { 0 } else { i - 1 };
@@ -3867,7 +3864,7 @@ pub mod tests {
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let num_slots = 15;
// Create enough entries to ensure there are at least two shreds created
let entries_per_slot = max_ticks_per_n_shreds(1) + 1;
let entries_per_slot = max_ticks_per_n_shreds(1, None) + 1;
assert!(entries_per_slot > 1);
let (mut shreds, _) = make_many_slot_entries(0, num_slots, entries_per_slot);
@@ -4237,7 +4234,7 @@ pub mod tests {
let gap: u64 = 10;
assert!(gap > 3);
// Create enough entries to ensure there are at least two shreds created
let num_entries = max_ticks_per_n_shreds(1) + 1;
let num_entries = max_ticks_per_n_shreds(1, None) + 1;
let entries = create_ticks(num_entries, 0, Hash::default());
let mut shreds = entries_to_test_shreds(entries, slot, 0, true, 0);
let num_shreds = shreds.len();
@@ -4549,6 +4546,7 @@ pub mod tests {
shred.clone(),
DataShredHeader::default(),
coding.clone(),
NONCE_SHRED_PAYLOAD_SIZE,
);
// Insert a good coding shred
@@ -4581,6 +4579,7 @@ pub mod tests {
shred.clone(),
DataShredHeader::default(),
coding.clone(),
NONCE_SHRED_PAYLOAD_SIZE,
);
let index = index_cf.get(shred.slot).unwrap().unwrap();
assert!(Blockstore::should_insert_coding_shred(
@@ -4596,6 +4595,7 @@ pub mod tests {
shred.clone(),
DataShredHeader::default(),
coding.clone(),
NONCE_SHRED_PAYLOAD_SIZE,
);
let index = coding_shred.coding_header.position - 1;
coding_shred.set_index(index as u32);
@@ -4614,6 +4614,7 @@ pub mod tests {
shred.clone(),
DataShredHeader::default(),
coding.clone(),
NONCE_SHRED_PAYLOAD_SIZE,
);
coding_shred.coding_header.num_coding_shreds = 0;
let index = index_cf.get(coding_shred.slot()).unwrap().unwrap();
@@ -4630,6 +4631,7 @@ pub mod tests {
shred.clone(),
DataShredHeader::default(),
coding.clone(),
NONCE_SHRED_PAYLOAD_SIZE,
);
coding_shred.coding_header.num_coding_shreds = coding_shred.coding_header.position;
let index = index_cf.get(coding_shred.slot()).unwrap().unwrap();
@@ -4647,6 +4649,7 @@ pub mod tests {
shred.clone(),
DataShredHeader::default(),
coding.clone(),
NONCE_SHRED_PAYLOAD_SIZE,
);
coding_shred.common_header.fec_set_index = std::u32::MAX - 1;
coding_shred.coding_header.num_coding_shreds = 3;
@@ -4679,6 +4682,7 @@ pub mod tests {
shred.clone(),
DataShredHeader::default(),
coding.clone(),
NONCE_SHRED_PAYLOAD_SIZE,
);
let index = index_cf.get(coding_shred.slot()).unwrap().unwrap();
coding_shred.set_slot(*last_root.read().unwrap());
@@ -4770,42 +4774,6 @@ pub mod tests {
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_prune() {
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let (shreds, _) = make_many_slot_entries(0, 50, 6);
let shreds_per_slot = shreds.len() as u64 / 50;
blockstore.insert_shreds(shreds, None, false).unwrap();
blockstore
.slot_meta_iterator(0)
.unwrap()
.for_each(|(_, meta)| assert_eq!(meta.last_index, shreds_per_slot - 1));
blockstore.prune(5);
blockstore
.slot_meta_iterator(0)
.unwrap()
.for_each(|(slot, meta)| {
assert!(slot <= 5);
assert_eq!(meta.last_index, shreds_per_slot - 1)
});
let data_iter = blockstore
.data_shred_cf
.iter(IteratorMode::From((0, 0), IteratorDirection::Forward))
.unwrap();
for ((slot, _), _) in data_iter {
if slot > 5 {
assert!(false);
}
}
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_purge_slots() {
let blockstore_path = get_tmp_ledger_path!();
@@ -4813,11 +4781,11 @@ pub mod tests {
let (shreds, _) = make_many_slot_entries(0, 50, 5);
blockstore.insert_shreds(shreds, None, false).unwrap();
blockstore.purge_slots(0, Some(5));
blockstore.purge_slots(0, 5);
test_all_empty_or_min(&blockstore, 6);
blockstore.purge_slots(0, None);
blockstore.purge_slots(0, 50);
// min slot shouldn't matter, blockstore should be empty
test_all_empty_or_min(&blockstore, 100);
@@ -4841,7 +4809,7 @@ pub mod tests {
let (shreds, _) = make_many_slot_entries(0, 5000, 10);
blockstore.insert_shreds(shreds, None, false).unwrap();
blockstore.purge_slots(0, Some(4999));
blockstore.purge_slots(0, 4999);
test_all_empty_or_min(&blockstore, 5000);
@@ -4849,19 +4817,6 @@ pub mod tests {
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[should_panic]
#[test]
fn test_prune_out_of_bounds() {
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
// slot 5 does not exist, prune should panic
blockstore.prune(5);
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_iter_bounds() {
let blockstore_path = get_tmp_ledger_path!();
@@ -6362,14 +6317,14 @@ pub mod tests {
.insert_shreds(all_shreds, Some(&leader_schedule_cache), false)
.unwrap();
verify_index_integrity(&blockstore, slot);
blockstore.purge_slots(0, Some(slot));
blockstore.purge_slots(0, slot);
// Test inserting just the codes, enough for recovery
blockstore
.insert_shreds(coding_shreds.clone(), Some(&leader_schedule_cache), false)
.unwrap();
verify_index_integrity(&blockstore, slot);
blockstore.purge_slots(0, Some(slot));
blockstore.purge_slots(0, slot);
// Test inserting some codes, but not enough for recovery
blockstore
@@ -6380,7 +6335,7 @@ pub mod tests {
)
.unwrap();
verify_index_integrity(&blockstore, slot);
blockstore.purge_slots(0, Some(slot));
blockstore.purge_slots(0, slot);
// Test inserting just the codes, and some data, enough for recovery
let shreds: Vec<_> = data_shreds[..data_shreds.len() - 1]
@@ -6392,7 +6347,7 @@ pub mod tests {
.insert_shreds(shreds, Some(&leader_schedule_cache), false)
.unwrap();
verify_index_integrity(&blockstore, slot);
blockstore.purge_slots(0, Some(slot));
blockstore.purge_slots(0, slot);
// Test inserting some codes, and some data, but enough for recovery
let shreds: Vec<_> = data_shreds[..data_shreds.len() / 2 - 1]
@@ -6404,7 +6359,7 @@ pub mod tests {
.insert_shreds(shreds, Some(&leader_schedule_cache), false)
.unwrap();
verify_index_integrity(&blockstore, slot);
blockstore.purge_slots(0, Some(slot));
blockstore.purge_slots(0, slot);
// Test inserting all shreds in 2 rounds, make sure nothing is lost
let shreds1: Vec<_> = data_shreds[..data_shreds.len() / 2 - 1]
@@ -6424,7 +6379,7 @@ pub mod tests {
.insert_shreds(shreds2, Some(&leader_schedule_cache), false)
.unwrap();
verify_index_integrity(&blockstore, slot);
blockstore.purge_slots(0, Some(slot));
blockstore.purge_slots(0, slot);
// Test not all, but enough data and coding shreds in 2 rounds to trigger recovery,
// make sure nothing is lost
@@ -6449,7 +6404,7 @@ pub mod tests {
.insert_shreds(shreds2, Some(&leader_schedule_cache), false)
.unwrap();
verify_index_integrity(&blockstore, slot);
blockstore.purge_slots(0, Some(slot));
blockstore.purge_slots(0, slot);
// Test insert shreds in 2 rounds, but not enough to trigger
// recovery, make sure nothing is lost
@@ -6474,7 +6429,7 @@ pub mod tests {
.insert_shreds(shreds2, Some(&leader_schedule_cache), false)
.unwrap();
verify_index_integrity(&blockstore, slot);
blockstore.purge_slots(0, Some(slot));
blockstore.purge_slots(0, slot);
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}

View File

@@ -55,6 +55,7 @@ pub enum BlockstoreError {
Serialize(#[from] Box<bincode::ErrorKind>),
FsExtraError(#[from] fs_extra::error::Error),
SlotCleanedUp,
UnableToSetOpenFileDescriptorLimit,
}
pub type Result<T> = std::result::Result<T, BlockstoreError>;

View File

@@ -9,7 +9,7 @@ use rayon::{
slice::ParallelSlice,
ThreadPool,
};
use serde::{Deserialize, Serialize};
use serde::{Deserialize, Serialize, Serializer};
use solana_metrics::datapoint_debug;
use solana_perf::packet::Packet;
use solana_rayon_threadlimit::get_thread_count;
@@ -24,25 +24,33 @@ use std::mem::size_of;
use std::{sync::Arc, time::Instant};
use thiserror::Error;
pub type Nonce = u32;
/// The following constants are computed by hand, and hardcoded.
/// `test_shred_constants` ensures that the values are correct.
/// Constants are used over lazy_static for performance reasons.
pub const SIZE_OF_COMMON_SHRED_HEADER: usize = 83;
pub const SIZE_OF_DATA_SHRED_HEADER: usize = 3;
pub const SIZE_OF_DATA_SHRED_HEADER_SIZE_FIELD: usize = 2;
pub const SIZE_OF_CODING_SHRED_HEADER: usize = 6;
pub const SIZE_OF_SIGNATURE: usize = 64;
pub const SIZE_OF_SHRED_TYPE: usize = 1;
pub const SIZE_OF_SHRED_SLOT: usize = 8;
pub const SIZE_OF_SHRED_INDEX: usize = 4;
pub const SIZE_OF_NONCE: usize = 4;
pub const SIZE_OF_DATA_SHRED_IGNORED_TAIL: usize =
SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_CODING_SHRED_HEADER;
pub const SIZE_OF_DATA_SHRED_PAYLOAD: usize = PACKET_DATA_SIZE
- SIZE_OF_COMMON_SHRED_HEADER
- SIZE_OF_DATA_SHRED_HEADER
- SIZE_OF_DATA_SHRED_IGNORED_TAIL;
pub const SIZE_OF_NONCE_DATA_SHRED_PAYLOAD: usize =
SIZE_OF_DATA_SHRED_PAYLOAD - SIZE_OF_NONCE - SIZE_OF_DATA_SHRED_HEADER_SIZE_FIELD;
pub const OFFSET_OF_SHRED_SLOT: usize = SIZE_OF_SIGNATURE + SIZE_OF_SHRED_TYPE;
pub const OFFSET_OF_SHRED_INDEX: usize = OFFSET_OF_SHRED_SLOT + SIZE_OF_SHRED_SLOT;
pub const NONCE_SHRED_PAYLOAD_SIZE: usize = PACKET_DATA_SIZE - SIZE_OF_NONCE;
pub const UNLOCK_NONCE_SLOT: Slot = 13_115_515;
thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new()
.num_threads(get_thread_count())
@@ -107,6 +115,20 @@ pub struct ShredCommonHeader {
pub struct DataShredHeader {
pub parent_offset: u16,
pub flags: u8,
#[serde(skip_deserializing)]
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(serialize_with = "option_as_u16_serialize")]
pub size: Option<u16>,
}
#[allow(clippy::trivially_copy_pass_by_ref)]
fn option_as_u16_serialize<S>(x: &Option<u16>, s: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
assert!(x.is_some());
let num = x.unwrap();
s.serialize_u16(num)
}
/// The coding shred header has FEC information
@@ -168,7 +190,8 @@ impl Shred {
version: u16,
fec_set_index: u32,
) -> Self {
let mut payload = vec![0; PACKET_DATA_SIZE];
let payload_size = Self::get_expected_payload_size_from_slot(slot);
let mut payload = vec![0; payload_size];
let common_header = ShredCommonHeader {
slot,
index,
@@ -177,9 +200,20 @@ impl Shred {
..ShredCommonHeader::default()
};
let size = if Self::is_nonce_unlocked(slot) {
Some(
(data.map(|d| d.len()).unwrap_or(0)
+ SIZE_OF_DATA_SHRED_HEADER
+ SIZE_OF_DATA_SHRED_HEADER_SIZE_FIELD
+ SIZE_OF_COMMON_SHRED_HEADER) as u16,
)
} else {
None
};
let mut data_header = DataShredHeader {
parent_offset,
flags: reference_tick.min(SHRED_TICK_REFERENCE_MASK),
size,
};
if is_last_data {
@@ -198,9 +232,10 @@ impl Shred {
&common_header,
)
.expect("Failed to write header into shred buffer");
let size_of_data_shred_header = Shredder::get_expected_data_header_size_from_slot(slot);
Self::serialize_obj_into(
&mut start,
SIZE_OF_DATA_SHRED_HEADER,
size_of_data_shred_header,
&mut payload,
&data_header,
)
@@ -218,11 +253,21 @@ impl Shred {
}
}
pub fn new_from_serialized_shred(payload: Vec<u8>) -> Result<Self> {
pub fn new_from_serialized_shred(mut payload: Vec<u8>) -> Result<Self> {
let mut start = 0;
let common_header: ShredCommonHeader =
Self::deserialize_obj(&mut start, SIZE_OF_COMMON_SHRED_HEADER, &payload)?;
let slot = common_header.slot;
let expected_data_size = Self::get_expected_payload_size_from_slot(slot);
// Safe because any payload from the network must have passed through
// window service, which implies payload wll be of size
// PACKET_DATA_SIZE, and `expected_data_size` <= PACKET_DATA_SIZE.
//
// On the other hand, if this function is called locally, the payload size should match
// the `expected_data_size`.
assert!(payload.len() >= expected_data_size);
payload.truncate(expected_data_size);
let shred = if common_header.shred_type == ShredType(CODING_SHRED) {
let coding_header: CodingShredHeader =
Self::deserialize_obj(&mut start, SIZE_OF_CODING_SHRED_HEADER, &payload)?;
@@ -233,11 +278,14 @@ impl Shred {
payload,
}
} else if common_header.shred_type == ShredType(DATA_SHRED) {
// This doesn't need to change since we skip deserialization of the
// "size" field in the header for now
let size_of_data_shred_header = SIZE_OF_DATA_SHRED_HEADER;
let data_header: DataShredHeader =
Self::deserialize_obj(&mut start, SIZE_OF_DATA_SHRED_HEADER, &payload)?;
Self::deserialize_obj(&mut start, size_of_data_shred_header, &payload)?;
if u64::from(data_header.parent_offset) > common_header.slot {
return Err(ShredError::InvalidParentOffset {
slot: common_header.slot,
slot,
parent_offset: data_header.parent_offset,
});
}
@@ -258,8 +306,10 @@ impl Shred {
common_header: ShredCommonHeader,
data_header: DataShredHeader,
coding_header: CodingShredHeader,
payload_size: usize,
) -> Self {
let mut payload = vec![0; PACKET_DATA_SIZE];
assert!(payload_size == NONCE_SHRED_PAYLOAD_SIZE || payload_size == PACKET_DATA_SIZE);
let mut payload = vec![0; payload_size];
let mut start = 0;
Self::serialize_obj_into(
&mut start,
@@ -268,10 +318,15 @@ impl Shred {
&common_header,
)
.expect("Failed to write header into shred buffer");
let expected_data_header_size = if payload_size == NONCE_SHRED_PAYLOAD_SIZE {
SIZE_OF_DATA_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER_SIZE_FIELD
} else {
SIZE_OF_DATA_SHRED_HEADER
};
if common_header.shred_type == ShredType(DATA_SHRED) {
Self::serialize_obj_into(
&mut start,
SIZE_OF_DATA_SHRED_HEADER,
expected_data_header_size,
&mut payload,
&data_header,
)
@@ -293,11 +348,13 @@ impl Shred {
}
}
pub fn new_empty_data_shred() -> Self {
pub fn new_empty_data_shred(payload_size: usize) -> Self {
assert!(payload_size == NONCE_SHRED_PAYLOAD_SIZE || payload_size == PACKET_DATA_SIZE);
Self::new_empty_from_header(
ShredCommonHeader::default(),
DataShredHeader::default(),
CodingShredHeader::default(),
payload_size,
)
}
@@ -403,6 +460,18 @@ impl Shred {
self.signature()
.verify(pubkey.as_ref(), &self.payload[SIZE_OF_SIGNATURE..])
}
pub fn is_nonce_unlocked(slot: Slot) -> bool {
slot > UNLOCK_NONCE_SLOT
}
fn get_expected_payload_size_from_slot(slot: Slot) -> usize {
if Self::is_nonce_unlocked(slot) {
NONCE_SHRED_PAYLOAD_SIZE
} else {
PACKET_DATA_SIZE
}
}
}
#[derive(Debug)]
@@ -467,7 +536,7 @@ impl Shredder {
let now = Instant::now();
let no_header_size = SIZE_OF_DATA_SHRED_PAYLOAD;
let no_header_size = Self::get_expected_data_shred_payload_size_from_slot(self.slot);
let num_shreds = (serialized_shreds.len() + no_header_size - 1) / no_header_size;
let last_shred_index = next_shred_index + num_shreds as u32 - 1;
@@ -628,7 +697,8 @@ impl Shredder {
let start_index = data_shred_batch[0].common_header.index;
// All information after coding shred field in a data shred is encoded
let valid_data_len = PACKET_DATA_SIZE - SIZE_OF_DATA_SHRED_IGNORED_TAIL;
let expected_payload_size = Shred::get_expected_payload_size_from_slot(slot);
let valid_data_len = expected_payload_size - SIZE_OF_DATA_SHRED_IGNORED_TAIL;
let data_ptrs: Vec<_> = data_shred_batch
.iter()
.map(|data| &data.payload[..valid_data_len])
@@ -646,8 +716,12 @@ impl Shredder {
i,
version,
);
let shred =
Shred::new_empty_from_header(header, DataShredHeader::default(), coding_header);
let shred = Shred::new_empty_from_header(
header,
DataShredHeader::default(),
coding_header,
expected_payload_size,
);
coding_shreds.push(shred.payload);
});
@@ -701,7 +775,10 @@ impl Shredder {
expected_index: usize,
index_found: usize,
present: &mut [bool],
payload_size: usize,
) -> Vec<Vec<u8>> {
// Safe to assert because `new_from_serialized_shred` guarantees the size
assert!(payload_size == NONCE_SHRED_PAYLOAD_SIZE || payload_size == PACKET_DATA_SIZE);
let end_index = index_found.saturating_sub(1);
// The index of current shred must be within the range of shreds that are being
// recovered
@@ -715,9 +792,9 @@ impl Shredder {
.map(|missing| {
present[missing.saturating_sub(first_index_in_fec_set)] = false;
if missing < first_index_in_fec_set + num_data {
Shred::new_empty_data_shred().payload
Shred::new_empty_data_shred(payload_size).payload
} else {
vec![0; PACKET_DATA_SIZE]
vec![0; payload_size]
}
})
.collect();
@@ -732,6 +809,8 @@ impl Shredder {
first_code_index: usize,
slot: Slot,
) -> std::result::Result<Vec<Shred>, reed_solomon_erasure::Error> {
let expected_payload_size =
Self::verify_consistent_shred_payload_sizes(&"try_recovery()", &shreds)?;
let mut recovered_data = vec![];
let fec_set_size = num_data + num_coding;
@@ -751,6 +830,7 @@ impl Shredder {
next_expected_index,
index,
&mut present,
expected_payload_size,
);
blocks.push(shred.payload);
next_expected_index = index + 1;
@@ -767,6 +847,7 @@ impl Shredder {
next_expected_index,
first_index + fec_set_size,
&mut present,
expected_payload_size,
);
shred_bufs.append(&mut pending_shreds);
@@ -777,7 +858,7 @@ impl Shredder {
let session = Session::new(num_data, num_coding)?;
let valid_data_len = PACKET_DATA_SIZE - SIZE_OF_DATA_SHRED_IGNORED_TAIL;
let valid_data_len = expected_payload_size - SIZE_OF_DATA_SHRED_IGNORED_TAIL;
let coding_block_offset = SIZE_OF_CODING_SHRED_HEADER + SIZE_OF_COMMON_SHRED_HEADER;
let mut blocks: Vec<(&mut [u8], bool)> = shred_bufs
.iter_mut()
@@ -822,8 +903,11 @@ impl Shredder {
/// Combines all shreds to recreate the original buffer
pub fn deshred(shreds: &[Shred]) -> std::result::Result<Vec<u8>, reed_solomon_erasure::Error> {
let num_data = shreds.len();
let data_shred_bufs = {
let expected_payload_size =
Self::verify_consistent_shred_payload_sizes(&"deshred()", shreds)?;
let (data_shred_bufs, slot) = {
let first_index = shreds.first().unwrap().index() as usize;
let slot = shreds.first().unwrap().slot();
let last_shred = shreds.last().unwrap();
let last_index = if last_shred.data_complete() || last_shred.last_in_slot() {
last_shred.index() as usize
@@ -835,10 +919,32 @@ impl Shredder {
return Err(reed_solomon_erasure::Error::TooFewDataShards);
}
shreds.iter().map(|shred| &shred.payload).collect()
(shreds.iter().map(|shred| &shred.payload).collect(), slot)
};
Ok(Self::reassemble_payload(num_data, data_shred_bufs))
let expected_data_header_size = Self::get_expected_data_header_size_from_slot(slot);
Ok(Self::reassemble_payload(
num_data,
data_shred_bufs,
expected_payload_size,
expected_data_header_size,
))
}
pub fn get_expected_data_shred_payload_size_from_slot(slot: Slot) -> usize {
if Shred::is_nonce_unlocked(slot) {
SIZE_OF_NONCE_DATA_SHRED_PAYLOAD
} else {
SIZE_OF_DATA_SHRED_PAYLOAD
}
}
pub fn get_expected_data_header_size_from_slot(slot: Slot) -> usize {
if Shred::is_nonce_unlocked(slot) {
SIZE_OF_DATA_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER_SIZE_FIELD
} else {
SIZE_OF_DATA_SHRED_HEADER
}
}
fn get_shred_index(
@@ -854,26 +960,60 @@ impl Shredder {
}
}
fn reassemble_payload(num_data: usize, data_shred_bufs: Vec<&Vec<u8>>) -> Vec<u8> {
let valid_data_len = PACKET_DATA_SIZE - SIZE_OF_DATA_SHRED_IGNORED_TAIL;
fn reassemble_payload(
num_data: usize,
data_shred_bufs: Vec<&Vec<u8>>,
expected_payload_size: usize,
expected_data_header_size: usize,
) -> Vec<u8> {
let valid_data_len = expected_payload_size - SIZE_OF_DATA_SHRED_IGNORED_TAIL;
data_shred_bufs[..num_data]
.iter()
.flat_map(|data| {
let offset = SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER;
let offset = SIZE_OF_COMMON_SHRED_HEADER + expected_data_header_size;
data[offset..valid_data_len].iter()
})
.cloned()
.collect()
}
fn verify_consistent_shred_payload_sizes(
caller: &str,
shreds: &[Shred],
) -> std::result::Result<usize, reed_solomon_erasure::Error> {
if shreds.is_empty() {
return Err(reed_solomon_erasure::Error::TooFewShardsPresent);
}
let slot = shreds[0].slot();
let expected_payload_size = Shred::get_expected_payload_size_from_slot(slot);
for shred in shreds {
if shred.payload.len() != expected_payload_size {
error!(
"{} Shreds for slot: {} are inconsistent sizes. One shred: {} Another shred: {}",
caller,
slot,
expected_payload_size,
shred.payload.len()
);
return Err(reed_solomon_erasure::Error::IncorrectShardSize);
}
}
Ok(expected_payload_size)
}
}
pub fn max_ticks_per_n_shreds(num_shreds: u64) -> u64 {
pub fn max_ticks_per_n_shreds(num_shreds: u64, shred_data_size: Option<usize>) -> u64 {
let ticks = create_ticks(1, 0, Hash::default());
max_entries_per_n_shred(&ticks[0], num_shreds)
max_entries_per_n_shred(&ticks[0], num_shreds, shred_data_size)
}
pub fn max_entries_per_n_shred(entry: &Entry, num_shreds: u64) -> u64 {
let shred_data_size = SIZE_OF_DATA_SHRED_PAYLOAD as u64;
pub fn max_entries_per_n_shred(
entry: &Entry,
num_shreds: u64,
shred_data_size: Option<usize>,
) -> u64 {
let shred_data_size = shred_data_size.unwrap_or(SIZE_OF_NONCE_DATA_SHRED_PAYLOAD) as u64;
let vec_size = bincode::serialized_size(&vec![entry]).unwrap();
let entry_size = bincode::serialized_size(entry).unwrap();
let count_size = vec_size - entry_size;
@@ -891,7 +1031,8 @@ pub fn verify_test_data_shred(
is_last_in_slot: bool,
is_last_in_fec_set: bool,
) {
assert_eq!(shred.payload.len(), PACKET_DATA_SIZE);
let expected_payload_size = Shred::get_expected_payload_size_from_slot(slot);
assert_eq!(shred.payload.len(), expected_payload_size);
assert!(shred.is_data());
assert_eq!(shred.index(), index);
assert_eq!(shred.slot(), slot);
@@ -932,6 +1073,14 @@ pub mod tests {
SIZE_OF_DATA_SHRED_HEADER,
serialized_size(&DataShredHeader::default()).unwrap() as usize
);
let data_shred_header_with_size = DataShredHeader {
size: Some(1000),
..DataShredHeader::default()
};
assert_eq!(
SIZE_OF_DATA_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER_SIZE_FIELD,
serialized_size(&data_shred_header_with_size).unwrap() as usize
);
assert_eq!(
SIZE_OF_SIGNATURE,
bincode::serialized_size(&Signature::default()).unwrap() as usize
@@ -951,17 +1100,16 @@ pub mod tests {
}
fn verify_test_code_shred(shred: &Shred, index: u32, slot: Slot, pk: &Pubkey, verify: bool) {
assert_eq!(shred.payload.len(), PACKET_DATA_SIZE);
let expected_payload_size = Shred::get_expected_payload_size_from_slot(slot);
assert_eq!(shred.payload.len(), expected_payload_size);
assert!(!shred.is_data());
assert_eq!(shred.index(), index);
assert_eq!(shred.slot(), slot);
assert_eq!(verify, shred.verify(pk));
}
#[test]
fn test_data_shredder() {
fn run_test_data_shredder(slot: Slot) {
let keypair = Arc::new(Keypair::new());
let slot = 0x123456789abcdef0;
// Test that parent cannot be > current slot
assert_matches!(
@@ -996,7 +1144,7 @@ pub mod tests {
.collect();
let size = serialized_size(&entries).unwrap();
let no_header_size = SIZE_OF_DATA_SHRED_PAYLOAD as u64;
let no_header_size = Shredder::get_expected_data_shred_payload_size_from_slot(slot) as u64;
let num_expected_data_shreds = (size + no_header_size - 1) / no_header_size;
let num_expected_coding_shreds =
Shredder::calculate_num_coding_shreds(num_expected_data_shreds as f32, fec_rate);
@@ -1051,6 +1199,12 @@ pub mod tests {
assert_eq!(entries, deshred_entries);
}
#[test]
fn test_data_shredder() {
run_test_data_shredder(UNLOCK_NONCE_SLOT);
run_test_data_shredder(UNLOCK_NONCE_SLOT + 1);
}
#[test]
fn test_deserialize_shred_payload() {
let keypair = Arc::new(Keypair::new());
@@ -1077,12 +1231,10 @@ pub mod tests {
assert_eq!(deserialized_shred, *data_shreds.last().unwrap());
}
#[test]
fn test_shred_reference_tick() {
fn run_test_shred_reference_tick(slot: Slot) {
let keypair = Arc::new(Keypair::new());
let slot = 1;
let parent_slot = 0;
let parent_slot = slot - 1;
let shredder = Shredder::new(slot, parent_slot, 0.0, keypair.clone(), 5, 0)
.expect("Failed in creating shredder");
@@ -1107,6 +1259,12 @@ pub mod tests {
assert_eq!(deserialized_shred.reference_tick(), 5);
}
#[test]
fn test_shred_reference_tick() {
run_test_shred_reference_tick(UNLOCK_NONCE_SLOT);
run_test_shred_reference_tick(UNLOCK_NONCE_SLOT + 1);
}
#[test]
fn test_shred_reference_tick_overflow() {
let keypair = Arc::new(Keypair::new());
@@ -1143,22 +1301,21 @@ pub mod tests {
);
}
#[test]
fn test_data_and_code_shredder() {
fn run_test_data_and_code_shredder(slot: Slot) {
let keypair = Arc::new(Keypair::new());
let slot = 0x123456789abcdef0;
// Test that FEC rate cannot be > 1.0
assert_matches!(
Shredder::new(slot, slot - 5, 1.001, keypair.clone(), 0, 0),
Err(ShredError::InvalidFecRate(_))
);
let shredder = Shredder::new(0x123456789abcdef0, slot - 5, 1.0, keypair.clone(), 0, 0)
let shredder = Shredder::new(slot, slot - 5, 1.0, keypair.clone(), 0, 0)
.expect("Failed in creating shredder");
// Create enough entries to make > 1 shred
let num_entries = max_ticks_per_n_shreds(1) + 1;
let no_header_size = Shredder::get_expected_data_shred_payload_size_from_slot(slot);
let num_entries = max_ticks_per_n_shreds(1, Some(no_header_size)) + 1;
let entries: Vec<_> = (0..num_entries)
.map(|_| {
let keypair0 = Keypair::new();
@@ -1190,9 +1347,13 @@ pub mod tests {
}
#[test]
fn test_recovery_and_reassembly() {
fn test_data_and_code_shredder() {
run_test_data_and_code_shredder(UNLOCK_NONCE_SLOT);
run_test_data_and_code_shredder(UNLOCK_NONCE_SLOT + 1);
}
fn run_test_recovery_and_reassembly(slot: Slot) {
let keypair = Arc::new(Keypair::new());
let slot = 0x123456789abcdef0;
let shredder = Shredder::new(slot, slot - 5, 1.0, keypair.clone(), 0, 0)
.expect("Failed in creating shredder");
@@ -1202,7 +1363,9 @@ pub mod tests {
let entry = Entry::new(&Hash::default(), 1, vec![tx0]);
let num_data_shreds: usize = 5;
let num_entries = max_entries_per_n_shred(&entry, num_data_shreds as u64);
let no_header_size = Shredder::get_expected_data_shred_payload_size_from_slot(slot);
let num_entries =
max_entries_per_n_shred(&entry, num_data_shreds as u64, Some(no_header_size));
let entries: Vec<_> = (0..num_entries)
.map(|_| {
let keypair0 = Keypair::new();
@@ -1441,6 +1604,12 @@ pub mod tests {
);
}
#[test]
fn test_recovery_and_reassembly() {
run_test_recovery_and_reassembly(UNLOCK_NONCE_SLOT);
run_test_recovery_and_reassembly(UNLOCK_NONCE_SLOT + 1);
}
#[test]
fn test_shred_version() {
let keypair = Arc::new(Keypair::new());

View File

@@ -1,5 +1,5 @@
#![allow(clippy::implicit_hasher)]
use crate::shred::ShredType;
use crate::shred::{Shred, ShredType, SIZE_OF_NONCE};
use rayon::{
iter::{
IndexedParallelIterator, IntoParallelIterator, IntoParallelRefMutIterator, ParallelIterator,
@@ -16,9 +16,12 @@ use solana_perf::{
sigverify::{self, batch_size, TxOffset},
};
use solana_rayon_threadlimit::get_thread_count;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::Signature;
use solana_sdk::signature::{Keypair, Signer};
use solana_sdk::{
clock::Slot,
pubkey::Pubkey,
signature::Signature,
signature::{Keypair, Signer},
};
use std::sync::Arc;
use std::{collections::HashMap, mem::size_of};
@@ -40,13 +43,12 @@ lazy_static! {
/// ...
/// }
/// Signature is the first thing in the packet, and slot is the first thing in the signed message.
fn verify_shred_cpu(packet: &Packet, slot_leaders: &HashMap<u64, [u8; 32]>) -> Option<u8> {
pub fn verify_shred_cpu(packet: &Packet, slot_leaders: &HashMap<u64, [u8; 32]>) -> Option<u8> {
let sig_start = 0;
let sig_end = size_of::<Signature>();
let slot_start = sig_end + size_of::<ShredType>();
let slot_end = slot_start + size_of::<u64>();
let msg_start = sig_end;
let msg_end = packet.meta.size;
if packet.meta.discard {
return Some(0);
}
@@ -55,6 +57,11 @@ fn verify_shred_cpu(packet: &Packet, slot_leaders: &HashMap<u64, [u8; 32]>) -> O
return Some(0);
}
let slot: u64 = limited_deserialize(&packet.data[slot_start..slot_end]).ok()?;
let msg_end = if packet.meta.repair && Shred::is_nonce_unlocked(slot) {
packet.meta.size.saturating_sub(SIZE_OF_NONCE)
} else {
packet.meta.size
};
trace!("slot {}", slot);
let pubkey = slot_leaders.get(&slot)?;
if packet.meta.size < sig_end {
@@ -94,10 +101,10 @@ fn slot_key_data_for_gpu<
batches: &[Packets],
slot_keys: &HashMap<u64, T>,
recycler_cache: &RecyclerCache,
) -> (PinnedVec<u8>, TxOffset, usize) {
) -> (PinnedVec<u8>, TxOffset, usize, Vec<Vec<Slot>>) {
//TODO: mark Pubkey::default shreds as failed after the GPU returns
assert_eq!(slot_keys.get(&std::u64::MAX), Some(&T::default()));
let slots: Vec<Vec<u64>> = SIGVERIFY_THREAD_POOL.install(|| {
let slots: Vec<Vec<Slot>> = SIGVERIFY_THREAD_POOL.install(|| {
batches
.into_par_iter()
.map(|p| {
@@ -157,7 +164,7 @@ fn slot_key_data_for_gpu<
trace!("keyvec.len: {}", keyvec.len());
trace!("keyvec: {:?}", keyvec);
trace!("offsets: {:?}", offsets);
(keyvec, offsets, num_in_packets)
(keyvec, offsets, num_in_packets, slots)
}
fn vec_size_in_packets(keyvec: &PinnedVec<u8>) -> usize {
@@ -177,6 +184,7 @@ fn shred_gpu_offsets(
mut pubkeys_end: usize,
batches: &[Packets],
recycler_cache: &RecyclerCache,
slots: Option<Vec<Vec<Slot>>>,
) -> (TxOffset, TxOffset, TxOffset, Vec<Vec<u32>>) {
let mut signature_offsets = recycler_cache.offsets().allocate("shred_signatures");
signature_offsets.set_pinnable();
@@ -185,13 +193,30 @@ fn shred_gpu_offsets(
let mut msg_sizes = recycler_cache.offsets().allocate("shred_msg_sizes");
msg_sizes.set_pinnable();
let mut v_sig_lens = vec![];
for batch in batches {
let mut slots_iter;
let mut slots_iter_ref: &mut dyn Iterator<Item = Vec<Slot>> = &mut std::iter::repeat(vec![]);
if let Some(slots) = slots {
slots_iter = slots.into_iter();
slots_iter_ref = &mut slots_iter;
}
for (batch, slots) in batches.iter().zip(slots_iter_ref) {
let mut sig_lens = Vec::new();
for packet in &batch.packets {
let mut inner_slot_iter;
let mut inner_slot_iter_ref: &mut dyn Iterator<Item = Slot> = &mut std::iter::repeat(0);
if !slots.is_empty() {
inner_slot_iter = slots.into_iter();
inner_slot_iter_ref = &mut inner_slot_iter;
};
for (packet, slot) in batch.packets.iter().zip(inner_slot_iter_ref) {
let sig_start = pubkeys_end;
let sig_end = sig_start + size_of::<Signature>();
let msg_start = sig_end;
let msg_end = sig_start + packet.meta.size;
let msg_end = if packet.meta.repair && Shred::is_nonce_unlocked(slot) {
sig_start + packet.meta.size.saturating_sub(SIZE_OF_NONCE)
} else {
sig_start + packet.meta.size
};
signature_offsets.push(sig_start as u32);
msg_start_offsets.push(msg_start as u32);
let msg_size = if msg_end < msg_start {
@@ -222,7 +247,7 @@ pub fn verify_shreds_gpu(
let mut elems = Vec::new();
let mut rvs = Vec::new();
let count = batch_size(batches);
let (pubkeys, pubkey_offsets, mut num_packets) =
let (pubkeys, pubkey_offsets, mut num_packets, slots) =
slot_key_data_for_gpu(0, batches, slot_leaders, recycler_cache);
//HACK: Pubkeys vector is passed along as a `Packets` buffer to the GPU
//TODO: GPU needs a more opaque interface, which can handle variable sized structures for data
@@ -230,7 +255,7 @@ pub fn verify_shreds_gpu(
trace!("num_packets: {}", num_packets);
trace!("pubkeys_len: {}", pubkeys_len);
let (signature_offsets, msg_start_offsets, msg_sizes, v_sig_lens) =
shred_gpu_offsets(pubkeys_len, batches, recycler_cache);
shred_gpu_offsets(pubkeys_len, batches, recycler_cache, Some(slots));
let mut out = recycler_cache.buffer().allocate("out_buffer");
out.set_pinnable();
elems.push(
@@ -367,7 +392,7 @@ pub fn sign_shreds_gpu(
trace!("offset: {}", offset);
let (signature_offsets, msg_start_offsets, msg_sizes, _v_sig_lens) =
shred_gpu_offsets(offset, batches, recycler_cache);
shred_gpu_offsets(offset, batches, recycler_cache, None);
let total_sigs = signature_offsets.len();
let mut signatures_out = recycler_cache.buffer().allocate("ed25519 signatures");
signatures_out.set_pinnable();
@@ -445,14 +470,12 @@ pub fn sign_shreds_gpu(
#[cfg(test)]
pub mod tests {
use super::*;
use crate::shred::SIZE_OF_DATA_SHRED_PAYLOAD;
use crate::shred::{Shred, Shredder};
use crate::shred::{Shred, Shredder, SIZE_OF_DATA_SHRED_PAYLOAD, UNLOCK_NONCE_SLOT};
use solana_sdk::signature::{Keypair, Signer};
#[test]
fn test_sigverify_shred_cpu() {
fn run_test_sigverify_shred_cpu(slot: Slot) {
solana_logger::setup();
let mut packet = Packet::default();
let slot = 0xdeadc0de;
let mut shred = Shred::new_from_data(
slot,
0xc0de,
@@ -492,10 +515,14 @@ pub mod tests {
}
#[test]
fn test_sigverify_shreds_cpu() {
fn test_sigverify_shred_cpu() {
run_test_sigverify_shred_cpu(UNLOCK_NONCE_SLOT);
run_test_sigverify_shred_cpu(UNLOCK_NONCE_SLOT + 1);
}
fn run_test_sigverify_shreds_cpu(slot: Slot) {
solana_logger::setup();
let mut batch = [Packets::default()];
let slot = 0xdeadc0de;
let mut shred = Shred::new_from_data(
slot,
0xc0de,
@@ -542,12 +569,16 @@ pub mod tests {
}
#[test]
fn test_sigverify_shreds_gpu() {
fn test_sigverify_shreds_cpu() {
run_test_sigverify_shreds_cpu(UNLOCK_NONCE_SLOT);
run_test_sigverify_shreds_cpu(UNLOCK_NONCE_SLOT + 1);
}
fn run_test_sigverify_shreds_gpu(slot: Slot) {
solana_logger::setup();
let recycler_cache = RecyclerCache::default();
let mut batch = [Packets::default()];
let slot = 0xdeadc0de;
let mut shred = Shred::new_from_data(
slot,
0xc0de,
@@ -603,14 +634,18 @@ pub mod tests {
}
#[test]
fn test_sigverify_shreds_sign_gpu() {
fn test_sigverify_shreds_gpu() {
run_test_sigverify_shreds_gpu(UNLOCK_NONCE_SLOT);
run_test_sigverify_shreds_gpu(UNLOCK_NONCE_SLOT + 1);
}
fn run_test_sigverify_shreds_sign_gpu(slot: Slot) {
solana_logger::setup();
let recycler_cache = RecyclerCache::default();
let mut packets = Packets::default();
let num_packets = 32;
let num_batches = 100;
let slot = 0xdeadc0de;
packets.packets.resize(num_packets, Packet::default());
for (i, p) in packets.packets.iter_mut().enumerate() {
let shred = Shred::new_from_data(
@@ -650,11 +685,15 @@ pub mod tests {
}
#[test]
fn test_sigverify_shreds_sign_cpu() {
fn test_sigverify_shreds_sign_gpu() {
run_test_sigverify_shreds_sign_gpu(UNLOCK_NONCE_SLOT);
run_test_sigverify_shreds_sign_gpu(UNLOCK_NONCE_SLOT + 1);
}
fn run_test_sigverify_shreds_sign_cpu(slot: Slot) {
solana_logger::setup();
let mut batch = [Packets::default()];
let slot = 0xdeadc0de;
let keypair = Keypair::new();
let shred = Shred::new_from_data(
slot,
@@ -685,4 +724,10 @@ pub mod tests {
let rv = verify_shreds_cpu(&batch, &pubkeys);
assert_eq!(rv, vec![vec![1]]);
}
#[test]
fn test_sigverify_shreds_sign_cpu() {
run_test_sigverify_shreds_sign_cpu(UNLOCK_NONCE_SLOT);
run_test_sigverify_shreds_sign_cpu(UNLOCK_NONCE_SLOT + 1);
}
}

View File

@@ -15,7 +15,7 @@ fn test_multiple_threads_insert_shred() {
for _ in 0..100 {
let num_threads = 10;
// Create `num_threads` different ticks in slots 1..num_therads + 1, all
// Create `num_threads` different ticks in slots 1..num_threads + 1, all
// with parent = slot 0
let threads: Vec<_> = (0..num_threads)
.map(|i| {
@@ -42,7 +42,7 @@ fn test_multiple_threads_insert_shred() {
assert_eq!(meta0.next_slots, expected_next_slots);
// Delete slots for next iteration
blockstore.purge_slots(0, None);
blockstore.purge_slots(0, num_threads + 1);
}
// Cleanup

View File

@@ -1,16 +1,15 @@
use solana_ledger::entry::Entry;
use solana_ledger::shred::{
max_entries_per_n_shred, verify_test_data_shred, Shred, Shredder, MAX_DATA_SHREDS_PER_FEC_BLOCK,
max_entries_per_n_shred, verify_test_data_shred, Shred, Shredder,
MAX_DATA_SHREDS_PER_FEC_BLOCK, UNLOCK_NONCE_SLOT,
};
use solana_sdk::signature::{Keypair, Signer};
use solana_sdk::{hash::Hash, system_transaction};
use solana_sdk::{clock::Slot, hash::Hash, system_transaction};
use std::convert::TryInto;
use std::sync::Arc;
#[test]
fn test_multi_fec_block_coding() {
fn run_test_multi_fec_block_coding(slot: Slot) {
let keypair = Arc::new(Keypair::new());
let slot = 0x123456789abcdef0;
let shredder = Shredder::new(slot, slot - 5, 1.0, keypair.clone(), 0, 0)
.expect("Failed in creating shredder");
@@ -20,7 +19,8 @@ fn test_multi_fec_block_coding() {
let keypair1 = Keypair::new();
let tx0 = system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default());
let entry = Entry::new(&Hash::default(), 1, vec![tx0]);
let num_entries = max_entries_per_n_shred(&entry, num_data_shreds as u64);
let no_header_size = Shredder::get_expected_data_shred_payload_size_from_slot(slot);
let num_entries = max_entries_per_n_shred(&entry, num_data_shreds as u64, Some(no_header_size));
let entries: Vec<_> = (0..num_entries)
.map(|_| {
@@ -94,3 +94,9 @@ fn test_multi_fec_block_coding() {
let result = Shredder::deshred(&all_shreds[..]).unwrap();
assert_eq!(serialized_entries[..], result[..serialized_entries.len()]);
}
#[test]
fn test_multi_fec_block_coding() {
run_test_multi_fec_block_coding(UNLOCK_NONCE_SLOT);
run_test_multi_fec_block_coding(UNLOCK_NONCE_SLOT + 1);
}

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-local-cluster"
description = "Blockchain, Rebuilt for Scale"
version = "1.0.22"
version = "1.0.24"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -12,23 +12,23 @@ homepage = "https://solana.com/"
itertools = "0.8.1"
log = "0.4.8"
rand = "0.7.0"
solana-archiver-lib = { path = "../archiver-lib", version = "1.0.22" }
solana-config-program = { path = "../programs/config", version = "1.0.22" }
solana-core = { path = "../core", version = "1.0.22" }
solana-client = { path = "../client", version = "1.0.22" }
solana-faucet = { path = "../faucet", version = "1.0.22" }
solana-exchange-program = { path = "../programs/exchange", version = "1.0.22" }
solana-genesis-programs = { path = "../genesis-programs", version = "1.0.22" }
solana-ledger = { path = "../ledger", version = "1.0.22" }
solana-logger = { path = "../logger", version = "1.0.22" }
solana-runtime = { path = "../runtime", version = "1.0.22" }
solana-sdk = { path = "../sdk", version = "1.0.22" }
solana-stake-program = { path = "../programs/stake", version = "1.0.22" }
solana-storage-program = { path = "../programs/storage", version = "1.0.22" }
solana-vest-program = { path = "../programs/vest", version = "1.0.22" }
solana-vote-program = { path = "../programs/vote", version = "1.0.22" }
solana-archiver-lib = { path = "../archiver-lib", version = "1.0.24" }
solana-config-program = { path = "../programs/config", version = "1.0.24" }
solana-core = { path = "../core", version = "1.0.24" }
solana-client = { path = "../client", version = "1.0.24" }
solana-faucet = { path = "../faucet", version = "1.0.24" }
solana-exchange-program = { path = "../programs/exchange", version = "1.0.24" }
solana-genesis-programs = { path = "../genesis-programs", version = "1.0.24" }
solana-ledger = { path = "../ledger", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-runtime = { path = "../runtime", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
solana-stake-program = { path = "../programs/stake", version = "1.0.24" }
solana-storage-program = { path = "../programs/storage", version = "1.0.24" }
solana-vest-program = { path = "../programs/vest", version = "1.0.24" }
solana-vote-program = { path = "../programs/vote", version = "1.0.24" }
tempfile = "3.1.0"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.0.22" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.0.24" }
[dev-dependencies]
assert_matches = "1.3.0"

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-log-analyzer"
description = "The solana cluster network analysis tool"
version = "1.0.22"
version = "1.0.24"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -14,8 +14,8 @@ byte-unit = "3.0.3"
clap = "2.33.0"
serde = "1.0.104"
serde_json = "1.0.46"
solana-clap-utils = { path = "../clap-utils", version = "1.0.22" }
solana-logger = { path = "../logger", version = "1.0.22" }
solana-clap-utils = { path = "../clap-utils", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
[[bin]]
name = "solana-log-analyzer"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-logger"
version = "1.0.22"
version = "1.0.24"
description = "Solana Logger"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@@ -1,7 +1,7 @@
[package]
name = "solana-measure"
description = "Blockchain, Rebuilt for Scale"
version = "1.0.22"
version = "1.0.24"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "../README.md"
@@ -12,8 +12,8 @@ edition = "2018"
[dependencies]
log = "0.4.8"
solana-sdk = { path = "../sdk", version = "1.0.22" }
solana-metrics = { path = "../metrics", version = "1.0.22" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
solana-metrics = { path = "../metrics", version = "1.0.24" }
[target."cfg(unix)".dependencies]
jemallocator = "0.3.2"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-merkle-tree"
version = "1.0.22"
version = "1.0.24"
description = "Solana Merkle Tree"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -9,7 +9,7 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../sdk", version = "1.0.22" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
[dev-dependencies]
hex = "0.4.0"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-metrics"
version = "1.0.22"
version = "1.0.24"
description = "Solana Metrics"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -14,7 +14,7 @@ gethostname = "0.2.1"
lazy_static = "1.4.0"
log = "0.4.8"
reqwest = { version = "0.10.1", default-features = false, features = ["blocking", "rustls-tls"] }
solana-sdk = { path = "../sdk", version = "1.0.22" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
[dev-dependencies]
rand = "0.7.0"

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-net-shaper"
description = "The solana cluster network shaping tool"
version = "1.0.22"
version = "1.0.24"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -13,8 +13,8 @@ publish = false
clap = "2.33.0"
serde = "1.0.104"
serde_json = "1.0.46"
solana-clap-utils = { path = "../clap-utils", version = "1.0.22" }
solana-logger = { path = "../logger", version = "1.0.22" }
solana-clap-utils = { path = "../clap-utils", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
rand = "0.7.0"
[[bin]]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-net-utils"
version = "1.0.22"
version = "1.0.24"
description = "Solana Network Utilities"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -18,8 +18,8 @@ rand = "0.7.0"
serde = "1.0.104"
serde_derive = "1.0.103"
socket2 = "0.3.11"
solana-clap-utils = { path = "../clap-utils", version = "1.0.22" }
solana-logger = { path = "../logger", version = "1.0.22" }
solana-clap-utils = { path = "../clap-utils", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
tokio = "0.1"
tokio-codec = "0.1"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-perf"
version = "1.0.22"
version = "1.0.24"
description = "Solana Performance APIs"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -17,11 +17,11 @@ serde = "1.0.104"
dlopen_derive = "0.1.4"
lazy_static = "1.4.0"
log = "0.4.8"
solana-sdk = { path = "../sdk", version = "1.0.22" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.0.22" }
solana-budget-program = { path = "../programs/budget", version = "1.0.22" }
solana-logger = { path = "../logger", version = "1.0.22" }
solana-metrics = { path = "../metrics", version = "1.0.22" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.0.24" }
solana-budget-program = { path = "../programs/budget", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-metrics = { path = "../metrics", version = "1.0.24" }
[lib]
name = "solana_perf"

View File

@@ -1,7 +1,7 @@
[package]
name = "solana-bpf-programs"
description = "Blockchain, Rebuilt for Scale"
version = "1.0.22"
version = "1.0.24"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "README.md"
@@ -22,10 +22,10 @@ walkdir = "2"
bincode = "1.1.4"
byteorder = "1.3.2"
elf = "0.0.10"
solana-bpf-loader-program = { path = "../bpf_loader", version = "1.0.22" }
solana-logger = { path = "../../logger", version = "1.0.22" }
solana-runtime = { path = "../../runtime", version = "1.0.22" }
solana-sdk = { path = "../../sdk", version = "1.0.22" }
solana-bpf-loader-program = { path = "../bpf_loader", version = "1.0.24" }
solana-logger = { path = "../../logger", version = "1.0.24" }
solana-runtime = { path = "../../runtime", version = "1.0.24" }
solana-sdk = { path = "../../sdk", version = "1.0.24" }
solana_rbpf = "=0.1.21"
[[bench]]

View File

@@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-128bit"
version = "1.0.22"
version = "1.0.24"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,11 +12,11 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "1.0.22", default-features = false }
solana-bpf-rust-128bit-dep = { path = "../128bit_dep", version = "1.0.22" }
solana-sdk = { path = "../../../../sdk/", version = "1.0.24", default-features = false }
solana-bpf-rust-128bit-dep = { path = "../128bit_dep", version = "1.0.24" }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.22" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.24" }
[features]
program = ["solana-sdk/program"]

View File

@@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-128bit-dep"
version = "1.0.22"
version = "1.0.24"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "1.0.22", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "1.0.24", default-features = false }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.22" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.24" }
[features]
program = ["solana-sdk/program"]

View File

@@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-alloc"
version = "1.0.22"
version = "1.0.24"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "1.0.22", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "1.0.24", default-features = false }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.22" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.24" }
[features]
program = ["solana-sdk/program"]

View File

@@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-dep-crate"
version = "1.0.22"
version = "1.0.24"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -13,10 +13,10 @@ edition = "2018"
[dependencies]
byteorder = { version = "1", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "1.0.22", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "1.0.24", default-features = false }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.22" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.24" }
[features]
program = ["solana-sdk/program"]

View File

@@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-dup-accounts"
version = "1.0.22"
version = "1.0.24"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "1.0.22", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "1.0.24", default-features = false }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.22" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.24" }
[features]
program = ["solana-sdk/program"]

View File

@@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-error-handling"
version = "1.0.22"
version = "1.0.24"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -14,11 +14,11 @@ edition = "2018"
[dependencies]
num-derive = "0.2"
num-traits = "0.2"
solana-sdk = { path = "../../../../sdk/", version = "1.0.22", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "1.0.24", default-features = false }
thiserror = "1.0"
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.22" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.24" }
[features]
program = ["solana-sdk/program"]

View File

@@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-external-spend"
version = "1.0.22"
version = "1.0.24"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "1.0.22", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "1.0.24", default-features = false }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.22" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.24" }
[features]
program = ["solana-sdk/program"]

View File

@@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-iter"
version = "1.0.22"
version = "1.0.24"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "1.0.22", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "1.0.24", default-features = false }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.22" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.24" }
[features]
program = ["solana-sdk/program"]

View File

@@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-many-args"
version = "1.0.22"
version = "1.0.24"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,11 +12,11 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "1.0.22", default-features = false }
solana-bpf-rust-many-args-dep = { path = "../many_args_dep", version = "1.0.22" }
solana-sdk = { path = "../../../../sdk/", version = "1.0.24", default-features = false }
solana-bpf-rust-many-args-dep = { path = "../many_args_dep", version = "1.0.24" }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.22" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.24" }
[features]
program = ["solana-sdk/program"]

View File

@@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-many-args-dep"
version = "1.0.22"
version = "1.0.24"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "1.0.22", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "1.0.24", default-features = false }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.22" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.24" }
[features]
program = ["solana-sdk/program"]

View File

@@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-noop"
version = "1.0.22"
version = "1.0.24"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "1.0.22", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "1.0.24", default-features = false }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.22" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.24" }
[features]
program = ["solana-sdk/program"]

View File

@@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-panic"
version = "1.0.22"
version = "1.0.24"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "1.0.22", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "1.0.24", default-features = false }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.22" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.24" }
[features]
program = ["solana-sdk/program"]

View File

@@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-param-passing"
version = "1.0.22"
version = "1.0.24"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,11 +12,11 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "1.0.22", default-features = false }
solana-bpf-rust-param-passing-dep = { path = "../param_passing_dep", version = "1.0.22" }
solana-sdk = { path = "../../../../sdk/", version = "1.0.24", default-features = false }
solana-bpf-rust-param-passing-dep = { path = "../param_passing_dep", version = "1.0.24" }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.22" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.24" }
[features]
program = ["solana-sdk/program"]

View File

@@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-param-passing-dep"
version = "1.0.22"
version = "1.0.24"
description = "Solana BPF program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "1.0.22", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "1.0.24", default-features = false }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.22" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.24" }
[features]
program = ["solana-sdk/program"]

View File

@@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-sysval"
version = "1.0.22"
version = "1.0.24"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "1.0.22", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "1.0.24", default-features = false }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.22" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "1.0.24" }
[features]
program = ["solana-sdk/program"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-bpf-loader-program"
version = "1.0.22"
version = "1.0.24"
description = "Solana BPF loader"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -15,8 +15,8 @@ libc = "0.2.66"
log = "0.4.8"
num-derive = { version = "0.3" }
num-traits = { version = "0.2" }
solana-logger = { path = "../../logger", version = "1.0.22" }
solana-sdk = { path = "../../sdk", version = "1.0.22" }
solana-logger = { path = "../../logger", version = "1.0.24" }
solana-sdk = { path = "../../sdk", version = "1.0.24" }
solana_rbpf = "=0.1.21"
thiserror = "1.0"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-btc-spv-program"
version = "1.0.22"
version = "1.0.24"
description = "Solana Bitcoin spv parsing program"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -15,7 +15,7 @@ num-derive = "0.3"
num-traits = "0.2"
serde = "1.0.104"
serde_derive = "1.0.103"
solana-sdk = { path = "../../sdk", version = "1.0.22"}
solana-sdk = { path = "../../sdk", version = "1.0.24"}
hex = "0.3.2"
[lib]

View File

@@ -1,6 +1,6 @@
[package]
name = "btc_spv_bin"
version = "1.0.22"
version = "1.0.24"
description = "Solana Bitcoin spv parsing program"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-budget-program"
version = "1.0.22"
version = "1.0.24"
description = "Solana Budget program"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -16,11 +16,11 @@ num-derive = "0.3"
num-traits = "0.2"
serde = "1.0.104"
serde_derive = "1.0.103"
solana-sdk = { path = "../../sdk", version = "1.0.22" }
solana-sdk = { path = "../../sdk", version = "1.0.24" }
thiserror = "1.0"
[dev-dependencies]
solana-runtime = { path = "../../runtime", version = "1.0.22" }
solana-runtime = { path = "../../runtime", version = "1.0.24" }
[lib]
crate-type = ["lib", "cdylib"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-config-program"
version = "1.0.22"
version = "1.0.24"
description = "Solana Config program"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -14,8 +14,8 @@ chrono = { version = "0.4.10", features = ["serde"] }
log = "0.4.8"
serde = "1.0.104"
serde_derive = "1.0.103"
solana-logger = { path = "../../logger", version = "1.0.22" }
solana-sdk = { path = "../../sdk", version = "1.0.22" }
solana-logger = { path = "../../logger", version = "1.0.24" }
solana-sdk = { path = "../../sdk", version = "1.0.24" }
[lib]
crate-type = ["lib", "cdylib"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-exchange-program"
version = "1.0.22"
version = "1.0.24"
description = "Solana Exchange program"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -15,13 +15,13 @@ num-derive = { version = "0.3" }
num-traits = { version = "0.2" }
serde = "1.0.104"
serde_derive = "1.0.103"
solana-logger = { path = "../../logger", version = "1.0.22" }
solana-metrics = { path = "../../metrics", version = "1.0.22" }
solana-sdk = { path = "../../sdk", version = "1.0.22" }
solana-logger = { path = "../../logger", version = "1.0.24" }
solana-metrics = { path = "../../metrics", version = "1.0.24" }
solana-sdk = { path = "../../sdk", version = "1.0.24" }
thiserror = "1.0"
[dev-dependencies]
solana-runtime = { path = "../../runtime", version = "1.0.22" }
solana-runtime = { path = "../../runtime", version = "1.0.24" }
[lib]
crate-type = ["lib", "cdylib"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-failure-program"
version = "1.0.22"
version = "1.0.24"
description = "Solana failure program"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -9,10 +9,10 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../sdk", version = "1.0.22" }
solana-sdk = { path = "../../sdk", version = "1.0.24" }
[dev-dependencies]
solana-runtime = { path = "../../runtime", version = "1.0.22" }
solana-runtime = { path = "../../runtime", version = "1.0.24" }
[lib]
crate-type = ["lib", "cdylib"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-librapay"
version = "1.0.22"
version = "1.0.24"
description = "Solana Libra Payment"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,10 +11,10 @@ edition = "2018"
[dependencies]
bincode = "1.2.0"
log = "0.4.8"
solana-logger = { path = "../../logger", version = "1.0.22" }
solana-move-loader-program = { path = "../move_loader", version = "1.0.22" }
solana-runtime = { path = "../../runtime", version = "1.0.22" }
solana-sdk = { path = "../../sdk", version = "1.0.22" }
solana-logger = { path = "../../logger", version = "1.0.24" }
solana-move-loader-program = { path = "../move_loader", version = "1.0.24" }
solana-runtime = { path = "../../runtime", version = "1.0.24" }
solana-sdk = { path = "../../sdk", version = "1.0.24" }
types = { version = "0.0.1-sol4", package = "solana_libra_types" }
[lib]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-move-loader-program"
version = "1.0.22"
version = "1.0.24"
description = "Solana Move loader"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -16,8 +16,8 @@ serde = "1.0.104"
serde_bytes = "0.11"
serde_derive = "1.0.103"
serde_json = "1.0.46"
solana-logger = { path = "../../logger", version = "1.0.22" }
solana-sdk = { path = "../../sdk", version = "1.0.22" }
solana-logger = { path = "../../logger", version = "1.0.24" }
solana-sdk = { path = "../../sdk", version = "1.0.24" }
bytecode_verifier = { version = "0.0.1-sol4", package = "solana_libra_bytecode_verifier" }
canonical_serialization = { version = "0.0.1-sol4", package = "solana_libra_canonical_serialization" }

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-noop-program"
version = "1.0.22"
version = "1.0.24"
description = "Solana Noop program"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -10,8 +10,8 @@ edition = "2018"
[dependencies]
log = "0.4.8"
solana-logger = { path = "../../logger", version = "1.0.22" }
solana-sdk = { path = "../../sdk", version = "1.0.22" }
solana-logger = { path = "../../logger", version = "1.0.24" }
solana-sdk = { path = "../../sdk", version = "1.0.24" }
[lib]
crate-type = ["lib", "cdylib"]

Some files were not shown because too many files have changed in this diff Show More