Compare commits

...

108 Commits

Author SHA1 Message Date
dependabot[bot]
1e3652243f chore: bump nanoid from 3.1.23 to 3.3.1 in /docs
Bumps [nanoid](https://github.com/ai/nanoid) from 3.1.23 to 3.3.1.
- [Release notes](https://github.com/ai/nanoid/releases)
- [Changelog](https://github.com/ai/nanoid/blob/main/CHANGELOG.md)
- [Commits](https://github.com/ai/nanoid/compare/3.1.23...3.3.1)

---
updated-dependencies:
- dependency-name: nanoid
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-03-23 20:00:41 +00:00
dependabot[bot]
154b828287 chore:(deps): bump nanoid from 3.1.23 to 3.3.1 in /explorer (#23884)
Bumps [nanoid](https://github.com/ai/nanoid) from 3.1.23 to 3.3.1.
- [Release notes](https://github.com/ai/nanoid/releases)
- [Changelog](https://github.com/ai/nanoid/blob/main/CHANGELOG.md)
- [Commits](https://github.com/ai/nanoid/compare/3.1.23...3.3.1)

---
updated-dependencies:
- dependency-name: nanoid
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-03-23 19:59:16 +00:00
Andrey Frolov
59290c08aa fix: add type-check script to web3.js package (#23109) 2022-03-23 12:58:42 -07:00
microwavedcola1
1b7b261460 feat(explorer): render program name, ix name, and account names from on chain idl for specific anchor programs (#23499)
* show titles of ix, from idl

Signed-off-by: microwavedcola1 <microwavedcola@gmail.com>

* remove unused

Signed-off-by: microwavedcola1 <microwavedcola@gmail.com>

* remaining accounts

Signed-off-by: microwavedcola1 <microwavedcola@gmail.com>

* fallback

Signed-off-by: microwavedcola1 <microwavedcola@gmail.com>

* fix from code review: remove default for the non fallback case

Signed-off-by: microwavedcola1 <microwavedcola@gmail.com>

* keep camelcase

Signed-off-by: microwavedcola1 <microwavedcola@gmail.com>

* formatting

Signed-off-by: microwavedcola1 <microwavedcola@gmail.com>
2022-03-23 12:14:26 -07:00
Jeff Washington (jwash)
dc3863ef14 flush_slot_cache_with_clean (#23868) 2022-03-23 14:09:56 -05:00
Jeff Washington (jwash)
260f899eda write cache: hashmap to set (#23866) 2022-03-23 14:05:45 -05:00
Jeff Washington (jwash)
9e61fe7583 add AccountsHashConfig to manage parameters (#23850) 2022-03-23 13:44:23 -05:00
HaoranYi
db49b826f0 seperate blockstore metrics from window service metrics (#23871) 2022-03-23 13:38:17 -05:00
HaoranYi
7ff8ed869c typos (#23870) 2022-03-23 13:36:55 -05:00
Sammy
26da64184a feat(web3.js): expose rpcEndpoint in client for web3.js (#23719)
Adds a getter to the commitment class to expose the rpcEndpoint property.
2022-03-23 11:05:37 -07:00
Will Hickey
a573cfa39d Revert "Remove unneeded unit expression"
This reverts commit e8e0097046.
2022-03-23 10:22:18 -07:00
Jeff Washington (jwash)
b1280b670a calculate_accounts_hash_without_index takes &self (#23846)
* calculate_accounts_hash_without_index takes &self

* Update runtime/src/snapshot_package.rs

Co-authored-by: Brooks Prumo <brooks@prumo.org>

Co-authored-by: Brooks Prumo <brooks@prumo.org>
2022-03-23 11:57:32 -05:00
Jeff Washington (jwash)
7b89222fde don't start extra threads for shrink/clean/hash (#23858) 2022-03-23 11:53:37 -05:00
Josh
911aa5bad3 fix(explorer): can't convert too large of stake to number (#23876) 2022-03-23 09:34:43 -07:00
Josh
5541a5873b fix(explorer): serum init open orders has optional openOrdersMarketAuthority (#23875) 2022-03-23 09:32:24 -07:00
Josh
6b76391ed2 fix(explorer): add sync native to token program decode (#23874) 2022-03-23 09:31:58 -07:00
Jack May
6962a667e5 add-u8-align-check (#23860) 2022-03-23 09:16:29 -07:00
Jack May
27b66db88d Use sat math for ptr calcs (#23861) 2022-03-23 09:16:03 -07:00
Jeff Washington (jwash)
493a8e2348 remove random flushing of write cache (#23845) 2022-03-23 08:45:44 -05:00
klykov
9859eb83b5 upd Cargo.lock for bpf 2022-03-23 09:25:36 +01:00
klykov
36807d5fa3 update clap to v3: poh-bench 2022-03-23 09:25:36 +01:00
klykov
22404ca1fc update clap to v3: bench-streamer 2022-03-23 09:25:36 +01:00
klykov
01317395e9 update Cargo.lock 2022-03-23 09:25:36 +01:00
klykov
3f2971692d update clap to v3: net-utils 2022-03-23 09:25:36 +01:00
klykov
300c50798f update clap to v3: log-analyzer 2022-03-23 09:25:36 +01:00
klykov
12e24a90a0 update clap to v3: net-sharper 2022-03-23 09:25:36 +01:00
Edgar Xi
d8be0d9430 make get_protobuf_or_bincode_cells accept IntoIter on row_keys, make get_confirmed_blocks_with_data return an Iterator 2022-03-22 22:47:25 -06:00
Edgar Xi
f717fda9a3 modify get_protobuf_or_bincode_cells to accept and return an iterator 2022-03-22 22:47:25 -06:00
Edgar Xi
fbcf6a0802 use &[T] instead of Vec<T> where appropriate
clippy
2022-03-22 22:47:25 -06:00
Edgar Xi
5533e9393c appease clippy 2022-03-22 22:47:25 -06:00
Edgar Xi
f3219fb695 add get_confirmed_blocks_with_data and get_protobuf_or_bincode_cells 2022-03-22 22:47:25 -06:00
Jeff Washington (jwash)
bc35e1c5f5 snapshot code needs all storages for hash calc (#23840) 2022-03-22 21:27:54 -05:00
Justin Starry
92462ae031 Manually serialize and use send_wire_transaction for votes (#23826)
* Revert "core: partial versioned transaction support for voting service"

This reverts commit eb3df4c20e.

* Manually serialize vote tx before sending to TPU
2022-03-23 09:47:55 +08:00
Alexander Meißner
9f0ca6d88a Refactor: Remove trait from nonce keyed account (#23811)
* Removes the trait `NonceKeyedAccount`.
2022-03-23 02:09:30 +01:00
Jack May
3d7c8442c7 add size check for from_raw_parts (#23781) 2022-03-22 15:20:39 -07:00
Jon Cinque
7af48465fa transaction-status: Add return data to meta (#23688)
* transaction-status: Add return data to meta

* Add return data to simulation results

* Use pretty-hex for printing return data

* Update arg name, make TransactionRecord struct

* Rename TransactionRecord -> ExecutionRecord
2022-03-22 23:17:05 +01:00
Kirill Lykov
359e2de090 ignore heavy tests in dos 2022-03-22 20:19:28 +01:00
Jeff Washington (jwash)
1089a38aaf AcctIdx: rework scan and write to disk (#23794) 2022-03-22 11:54:12 -05:00
Jeff Washington (jwash)
89ba3ff139 log fail to evict (#23815) 2022-03-22 09:19:38 -05:00
axleiro
16b73a998b Increasing timeout in local-cluster-slow by 10 min 2022-03-22 17:52:06 +05:30
axleiro
9347d57973 increasing timeout of local-cluster-slow test by 10 min 2022-03-22 17:51:13 +05:30
Yueh-Hsuan Chiang
ae75b1a25f (LedgerStore) Add compression type (#23578)
This PR adds `--rocksdb-ledger-compression` as a hidden argument to the validator
for specifying the compression algorithm for TransactionStatus.  Available compression
algorithms include `lz4`, `snappy`, `zlib`. The default value is `none`.

Experimental results show that with lz4 compression, we can achieve ~37% size-reduction
on the TransactionStatus column family, or ~8% size-reduction of the ledger store size.
2022-03-22 02:27:09 -07:00
Lijun Wang
49228573f4 Use connection cache in send transaction (#23712)
Use connection cache in send transaction (#23712)
2022-03-21 23:24:21 -07:00
Trent Nelson
eb3df4c20e core: partial versioned transaction support for voting service 2022-03-21 22:59:05 -06:00
Justin Starry
016d3c450a Update TpuConnection interface to be compatible with versioned txs (#23760)
* Update TpuConnection interface to be compatible with versioned txs

* Add convenience method for sending txs

* use parallel iterator to serialize transactions
2022-03-22 09:45:22 +08:00
HaoranYi
45a7c6edfb Fix typos and a small refactor (#23805)
* fix typo

* remove packet_has_more_unprocessed_transactions function
2022-03-21 18:35:31 -05:00
Will Hickey
c4ecfa5716 Bump version to v1.11 (#23807)
* Revert crossbeam_epoch to stable. 0.9.8 only works with nightly
* Remove unneeded unit expression
2022-03-21 17:40:50 -05:00
Jeff Washington (jwash)
24f6855f86 AcctIdx: only remove a fixed number of items per write lock (#23795) 2022-03-21 16:55:04 -05:00
samkim-crypto
10eeafd3d6 zk-token-sdk: handle edge cases for transfer with fee (#23804)
* zk-token-sdk: handle edge cases for transfer with fee

* zk-token-sdk: clippy

* zk-token-sdk: clippy

* zk-token-sdk: cargo fmt
2022-03-21 16:10:33 -04:00
Brooks Prumo
cb06126388 Set accounts_data_len on feature activation (#23730) 2022-03-21 12:28:26 -05:00
Tyera Eulberg
9c60991cd3 Add ability to query bigtable via solana-test-validator, with hidden params 2022-03-21 11:26:49 -06:00
Trent Nelson
9b32b72990 bigtable: allow custom instance names 2022-03-21 11:26:49 -06:00
Trent Nelson
f513195468 bigtable: add a config ctor for LedgerStorage 2022-03-21 11:26:49 -06:00
Tyera Eulberg
63ee00e647 Refactor validator bigtable config 2022-03-21 11:26:49 -06:00
Michael Vines
99f1a43262 Add v1.10 backport label, remove v1.8 backport label 2022-03-21 09:50:55 -07:00
DimAn
739e43ba58 Add ability to get the latest incremental snapshot via RPC (#23788) 2022-03-21 11:48:49 -05:00
Lijun Wang
ae76fe2bd7 Made connection cache configurable. (#23783)
Added command-line argument tpu-use-quic argument.
Changed connection cache to return different connections based on the config.
2022-03-21 09:31:37 -07:00
Pankaj Garg
5d03b188c8 Use QUIC client in voting service (#23713)
* Use QUIC client in voting service

* guard quic-client usage with a flag

* add measure to time the quic client

* move time measure outside if block

* remove quic vs UDP flag from voting service
2022-03-21 09:10:16 -07:00
Jeff Washington (jwash)
965ab9186d AcctIdx: fix infinite loop (#23806) 2022-03-21 10:58:36 -05:00
Justin Starry
15357480ec Refactor instruction compilation and update message account key ordering (#23729)
* Refactor: Make instruction compilation usable for other message versions

* apply trents feedback

* Fix tests

* Fix bpf compatiblity
2022-03-21 20:53:32 +08:00
axleiro
a1a29b0b86 Increased timeout limit of coverage and stable-perf by 10 mins each (#23797)
* Increased timeout limit of coverage and stable-perf by 10 mins each

* Increasing timeout for in disk CI by 10 min
2022-03-21 15:08:23 +05:30
Jeff Washington (jwash)
258db77100 AcctIdx: factor 'scan' out of flush_internal (#23777) 2022-03-20 22:00:38 -05:00
carllin
f34434f96b Drop lock (#23765) 2022-03-20 21:27:24 -04:00
Jeff Washington (jwash)
dd69f3baf5 throttle index adding to allow disk flushing to keep up and reduce startup ram usage (#23773) 2022-03-20 19:56:20 -05:00
Brooks Prumo
335c4b668b Fix bug in bank/sysvar_cache tests (#23780) 2022-03-19 21:38:18 -05:00
Ikko Ashimine
848093b9fd Fix typo in processor.rs (#23786)
relavant -> relevant
2022-03-19 15:24:40 -05:00
Jeff Washington (jwash)
df29276eb0 AcctIdx: remove -> evict (#23775) 2022-03-18 17:13:21 -05:00
Tao Zhu
71ea05c176 replace nested for_each with flat_map 2022-03-18 16:37:41 -05:00
Tao Zhu
1c369fb55f Scan entire UnprocessedPacketBatches buffer to produce stake and locator of each packet 2022-03-18 16:37:41 -05:00
Jack May
1f052c6234 disable deprecated BPF loader deploys (#23757) 2022-03-18 14:29:49 -07:00
Jack May
7e358c654f add test to assert type assumption (#23769) 2022-03-18 14:15:59 -07:00
g1stavo
c556811c0f docs: fix stake state typo (#23776) 2022-03-18 13:45:07 -06:00
Jeff Washington (jwash)
a419374fa4 factor out function (#23742) 2022-03-18 14:10:52 -05:00
Jack May
0e64fb1fab don't rely on align_offset to check alignment (#23770) 2022-03-18 11:30:52 -07:00
Brian Anderson
fcea92ec6c Improve correctness of Rust-side type definitions for C invoke syscall (#23624)
* Make Rust definitions of C types repr(C)

* Make SolInstruction field types agree with C definitions

* Use correct SolSignerSeedsC type in SyscallInvokeSignedC

* rustfmt

* Change asserts to debug asserts in syscall.rs
2022-03-18 11:30:30 -07:00
Yueh-Hsuan Chiang
f999eef452 (LedgerStore) Rename BlockstoreAdvancedOptions to LedgerColumnOptions (#23764)
This PR renames BlockstoreAdvancedOptions to LedgerColumnOptions, as we will
pass-down this struct to LedgerColumn to allow it to perform metric reporting.
2022-03-18 11:13:35 -07:00
Tao Zhu
56428be629 Not exposing inner cost_table to encapsulating implementation details,
making future change easier.
2022-03-18 12:58:43 -05:00
dependabot[bot]
00ddf6576c chore: bump crossbeam-channel from 0.5.2 to 0.5.3 (#23698)
* chore: bump crossbeam-channel from 0.5.2 to 0.5.3

Bumps [crossbeam-channel](https://github.com/crossbeam-rs/crossbeam) from 0.5.2 to 0.5.3.
- [Release notes](https://github.com/crossbeam-rs/crossbeam/releases)
- [Changelog](https://github.com/crossbeam-rs/crossbeam/blob/master/CHANGELOG.md)
- [Commits](https://github.com/crossbeam-rs/crossbeam/compare/crossbeam-channel-0.5.2...crossbeam-channel-0.5.3)

---
updated-dependencies:
- dependency-name: crossbeam-channel
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <you@example.com>
2022-03-18 11:44:33 -06:00
Jeff Washington (jwash)
998e7d18f9 AcctIdx: never retry a bucket flush (#23732) 2022-03-18 12:20:42 -05:00
Brian Anderson
c9b8977226 Add crate docs for solana-program (#23363)
* Add crate docs for solana-program

* Rework solana-program docs for pr feedback

* Clarify log module docs

* Remove address lookup table program from solana-program docs
2022-03-18 08:27:51 -07:00
HaoranYi
f54e746fc5 Support u8 slice digester in frozen abi struct. (#23726)
* support u8 slice in frozen abi digester

* use slice in account struct

* add bpf cargo lock file

* no need to pass account.data to serializer

* fix comments
2022-03-18 09:31:07 -05:00
Kirill Lykov
c694703e14 address PR comments 2022-03-18 14:55:33 +01:00
Kirill Lykov
2da896fa40 add documentation 2022-03-18 14:55:33 +01:00
Kirill Lykov
7074ebf45a address PR comments 2022-03-18 14:55:33 +01:00
klykov
957bc0db6b add tests to dos tool 2022-03-18 14:55:33 +01:00
klykov
d9dbfc83d5 upd Cargo.lock 2022-03-18 14:55:33 +01:00
klykov
f5339882cb refactor cmdline interface 2022-03-18 14:55:33 +01:00
klykov
a63dee87ec add transaction parameters dump 2022-03-18 14:55:33 +01:00
klykov
1b0c9ad4c0 add option payer to dos tool 2022-03-18 14:55:33 +01:00
klykov
cf73f6dc74 fix typo in dos 2022-03-18 14:55:33 +01:00
klykov
dce5d1c1fa avoid signatures if unnecessary in dos 2022-03-18 14:55:33 +01:00
klykov
1641d1d329 fix: cache blockhash in dos tool 2022-03-18 14:55:33 +01:00
klykov
cb537e80d7 add transaction options to dos 2022-03-18 14:55:33 +01:00
klykov
d4d95f1811 add valid blockhash option to dos 2022-03-18 14:55:33 +01:00
klykov
797c3324f0 add number of signatures to dos 2022-03-18 14:55:33 +01:00
Tao Zhu
0ed23899e7 directly use compute_budget MAX_UNITS and DEFAULT_UNITS 2022-03-18 08:53:11 -05:00
Tao Zhu
a4cacf3389 add deterministic default cost 2022-03-18 08:53:11 -05:00
Trent Nelson
ce2e82cfb6 validator: --only-known-rpc requires a --known-validator ... 2022-03-18 07:02:16 +00:00
Jeff Washington (jwash)
857576d76f AcctIdx: move write to disk outside in mem write lock (#23731) 2022-03-17 23:09:41 -05:00
Brooks Prumo
7ff8c80e25 Add accounts_data_len to bank snapshot (#23714) 2022-03-17 20:14:54 -05:00
Tao Zhu
c478fe2047 add timing metrics, some renaming 2022-03-17 19:31:28 -05:00
Tao Zhu
fd515097d8 leader qos part 2: add stage to find sender stake, set to packet meta 2022-03-17 19:31:28 -05:00
Stephen Akridge
976b138e76 Add tx weighting stage 2022-03-17 19:31:28 -05:00
Jeff Washington (jwash)
664deb2157 AcctIdx: get rid of unused is_dirty (#23733) 2022-03-17 16:29:36 -05:00
Lijun Wang
8b230b86cc Use borrow instead of move in interfaces defined by TpuConnection (#23734)
* Use borrow instead of move in interfaces defined by TpuConnection to avoid data copy

* Removed a few more unnecessary whole array slicing.
2022-03-17 13:31:11 -07:00
behzad nouri
6b0d34d70d removes redundant Arcs from Blockstore (#23735) 2022-03-17 19:43:57 +00:00
Jeff Washington (jwash)
342f1ab1cb clean up/add comments (#23727) 2022-03-17 14:23:08 -05:00
Will Hickey
2f58c9e501 Bump version to 1.10.4 (#23743) 2022-03-17 14:02:13 -05:00
266 changed files with 8628 additions and 5436 deletions

View File

@@ -97,14 +97,6 @@ pull_request_rules:
label:
add:
- automerge
- name: v1.8 backport
conditions:
- label=v1.8
actions:
backport:
ignore_conflicts: true
branches:
- v1.8
- name: v1.9 backport
conditions:
- label=v1.9
@@ -113,6 +105,14 @@ pull_request_rules:
ignore_conflicts: true
branches:
- v1.9
- name: v1.10 backport
conditions:
- label=v1.10
actions:
backport:
ignore_conflicts: true
branches:
- v1.10
commands_restrictions:
# The author of copied PRs is the Mergify user.

1556
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-account-decoder"
version = "1.10.3"
version = "1.11.0"
description = "Solana account decoder"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -19,9 +19,9 @@ lazy_static = "1.4.0"
serde = "1.0.136"
serde_derive = "1.0.103"
serde_json = "1.0.79"
solana-config-program = { path = "../programs/config", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.3" }
solana-config-program = { path = "../programs/config", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.11.0" }
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
thiserror = "1.0"
zstd = "0.11.1"

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accounts-bench"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -12,11 +12,11 @@ publish = false
clap = "2.33.1"
log = "0.4.14"
rayon = "1.5.1"
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-measure = { path = "../measure", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.11.0" }
solana-measure = { path = "../measure", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accounts-cluster-bench"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -13,25 +13,25 @@ clap = "2.33.1"
log = "0.4.14"
rand = "0.7.0"
rayon = "1.5.1"
solana-account-decoder = { path = "../account-decoder", version = "=1.10.3" }
solana-clap-utils = { path = "../clap-utils", version = "=1.10.3" }
solana-client = { path = "../client", version = "=1.10.3" }
solana-faucet = { path = "../faucet", version = "=1.10.3" }
solana-gossip = { path = "../gossip", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-measure = { path = "../measure", version = "=1.10.3" }
solana-net-utils = { path = "../net-utils", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-streamer = { path = "../streamer", version = "=1.10.3" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-account-decoder = { path = "../account-decoder", version = "=1.11.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.11.0" }
solana-client = { path = "../client", version = "=1.11.0" }
solana-faucet = { path = "../faucet", version = "=1.11.0" }
solana-gossip = { path = "../gossip", version = "=1.11.0" }
solana-logger = { path = "../logger", version = "=1.11.0" }
solana-measure = { path = "../measure", version = "=1.11.0" }
solana-net-utils = { path = "../net-utils", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-streamer = { path = "../streamer", version = "=1.11.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
[dev-dependencies]
solana-core = { path = "../core", version = "=1.10.3" }
solana-local-cluster = { path = "../local-cluster", version = "=1.10.3" }
solana-test-validator = { path = "../test-validator", version = "=1.10.3" }
solana-core = { path = "../core", version = "=1.11.0" }
solana-local-cluster = { path = "../local-cluster", version = "=1.11.0" }
solana-test-validator = { path = "../test-validator", version = "=1.11.0" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-banking-bench"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -14,17 +14,17 @@ crossbeam-channel = "0.5"
log = "0.4.14"
rand = "0.7.0"
rayon = "1.5.1"
solana-core = { path = "../core", version = "=1.10.3" }
solana-gossip = { path = "../gossip", version = "=1.10.3" }
solana-ledger = { path = "../ledger", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-measure = { path = "../measure", version = "=1.10.3" }
solana-perf = { path = "../perf", version = "=1.10.3" }
solana-poh = { path = "../poh", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-streamer = { path = "../streamer", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-core = { path = "../core", version = "=1.11.0" }
solana-gossip = { path = "../gossip", version = "=1.11.0" }
solana-ledger = { path = "../ledger", version = "=1.11.0" }
solana-logger = { path = "../logger", version = "=1.11.0" }
solana-measure = { path = "../measure", version = "=1.11.0" }
solana-perf = { path = "../perf", version = "=1.11.0" }
solana-poh = { path = "../poh", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-streamer = { path = "../streamer", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-client"
version = "1.10.3"
version = "1.11.0"
description = "Solana banks client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,17 +12,17 @@ edition = "2021"
[dependencies]
borsh = "0.9.3"
futures = "0.3"
solana-banks-interface = { path = "../banks-interface", version = "=1.10.3" }
solana-program = { path = "../sdk/program", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-banks-interface = { path = "../banks-interface", version = "=1.11.0" }
solana-program = { path = "../sdk/program", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
tarpc = { version = "0.27.2", features = ["full"] }
thiserror = "1.0"
tokio = { version = "1", features = ["full"] }
tokio-serde = { version = "0.8", features = ["bincode"] }
[dev-dependencies]
solana-banks-server = { path = "../banks-server", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-banks-server = { path = "../banks-server", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
[lib]
crate-type = ["lib"]

View File

@@ -1,5 +1,8 @@
use {
solana_sdk::{transaction::TransactionError, transport::TransportError},
solana_sdk::{
transaction::TransactionError, transaction_context::TransactionReturnData,
transport::TransportError,
},
std::io,
tarpc::client::RpcError,
thiserror::Error,
@@ -25,6 +28,7 @@ pub enum BanksClientError {
err: TransactionError,
logs: Vec<String>,
units_consumed: u64,
return_data: Option<TransactionReturnData>,
},
}

View File

@@ -247,6 +247,7 @@ impl BanksClient {
err,
logs: simulation_details.logs,
units_consumed: simulation_details.units_consumed,
return_data: simulation_details.return_data,
}),
BanksTransactionResultWithSimulation {
result: Some(result),

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-interface"
version = "1.10.3"
version = "1.11.0"
description = "Solana banks RPC interface"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,7 +11,7 @@ edition = "2021"
[dependencies]
serde = { version = "1.0.136", features = ["derive"] }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
tarpc = { version = "0.27.2", features = ["full"] }
[lib]

View File

@@ -12,6 +12,7 @@ use {
pubkey::Pubkey,
signature::Signature,
transaction::{self, Transaction, TransactionError},
transaction_context::TransactionReturnData,
},
};
@@ -35,6 +36,7 @@ pub struct TransactionStatus {
pub struct TransactionSimulationDetails {
pub logs: Vec<String>,
pub units_consumed: u64,
pub return_data: Option<TransactionReturnData>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-server"
version = "1.10.3"
version = "1.11.0"
description = "Solana banks server"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -13,10 +13,10 @@ edition = "2021"
bincode = "1.3.3"
crossbeam-channel = "0.5"
futures = "0.3"
solana-banks-interface = { path = "../banks-interface", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.10.3" }
solana-banks-interface = { path = "../banks-interface", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.11.0" }
tarpc = { version = "0.27.2", features = ["full"] }
tokio = { version = "1", features = ["full"] }
tokio-serde = { version = "0.8", features = ["bincode"] }

View File

@@ -24,7 +24,7 @@ use {
transaction::{self, SanitizedTransaction, Transaction},
},
solana_send_transaction_service::{
send_transaction_service::{SendTransactionService, TransactionInfo},
send_transaction_service::{SendTransactionService, TransactionInfo, DEFAULT_TPU_USE_QUIC},
tpu_info::NullTpuInfo,
},
std::{
@@ -266,6 +266,7 @@ impl Banks for BanksServer {
logs,
post_simulation_accounts: _,
units_consumed,
return_data,
} = self
.bank(commitment)
.simulate_transaction_unchecked(sanitized_transaction)
@@ -275,6 +276,7 @@ impl Banks for BanksServer {
simulation_details: Some(TransactionSimulationDetails {
logs,
units_consumed,
return_data,
}),
};
}
@@ -399,6 +401,7 @@ pub async fn start_tcp_server(
receiver,
5_000,
0,
DEFAULT_TPU_USE_QUIC,
);
let server = BanksServer::new(

View File

@@ -2,18 +2,18 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-bench-streamer"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
publish = false
[dependencies]
clap = "2.33.1"
crossbeam-channel = "0.5"
solana-net-utils = { path = "../net-utils", version = "=1.10.3" }
solana-streamer = { path = "../streamer", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
clap = { version = "3.1.5", features = ["cargo"] }
solana-net-utils = { path = "../net-utils", version = "=1.11.0" }
solana-streamer = { path = "../streamer", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,6 +1,6 @@
#![allow(clippy::integer_arithmetic)]
use {
clap::{crate_description, crate_name, value_t, App, Arg},
clap::{crate_description, crate_name, Arg, Command},
crossbeam_channel::unbounded,
solana_streamer::{
packet::{Packet, PacketBatch, PacketBatchRecycler, PACKET_DATA_SIZE},
@@ -57,18 +57,18 @@ fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketBatchReceiver) ->
fn main() -> Result<()> {
let mut num_sockets = 1usize;
let matches = App::new(crate_name!())
let matches = Command::new(crate_name!())
.about(crate_description!())
.version(solana_version::version!())
.arg(
Arg::with_name("num-recv-sockets")
Arg::new("num-recv-sockets")
.long("num-recv-sockets")
.value_name("NUM")
.takes_value(true)
.help("Use NUM receive sockets"),
)
.arg(
Arg::with_name("num-producers")
Arg::new("num-producers")
.long("num-producers")
.value_name("NUM")
.takes_value(true)
@@ -80,7 +80,7 @@ fn main() -> Result<()> {
num_sockets = max(num_sockets, n.to_string().parse().expect("integer"));
}
let num_producers = value_t!(matches, "num_producers", u64).unwrap_or(4);
let num_producers: u64 = matches.value_of_t("num_producers").unwrap_or(4);
let port = 0;
let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-bench-tps"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -15,23 +15,23 @@ log = "0.4.14"
rayon = "1.5.1"
serde_json = "1.0.79"
serde_yaml = "0.8.23"
solana-client = { path = "../client", version = "=1.10.3" }
solana-core = { path = "../core", version = "=1.10.3" }
solana-faucet = { path = "../faucet", version = "=1.10.3" }
solana-genesis = { path = "../genesis", version = "=1.10.3" }
solana-gossip = { path = "../gossip", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-measure = { path = "../measure", version = "=1.10.3" }
solana-metrics = { path = "../metrics", version = "=1.10.3" }
solana-net-utils = { path = "../net-utils", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-streamer = { path = "../streamer", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-client = { path = "../client", version = "=1.11.0" }
solana-core = { path = "../core", version = "=1.11.0" }
solana-faucet = { path = "../faucet", version = "=1.11.0" }
solana-genesis = { path = "../genesis", version = "=1.11.0" }
solana-gossip = { path = "../gossip", version = "=1.11.0" }
solana-logger = { path = "../logger", version = "=1.11.0" }
solana-measure = { path = "../measure", version = "=1.11.0" }
solana-metrics = { path = "../metrics", version = "=1.11.0" }
solana-net-utils = { path = "../net-utils", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-streamer = { path = "../streamer", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
[dev-dependencies]
serial_test = "0.6.0"
solana-local-cluster = { path = "../local-cluster", version = "=1.10.3" }
solana-local-cluster = { path = "../local-cluster", version = "=1.11.0" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-bloom"
version = "1.10.3"
version = "1.11.0"
description = "Solana bloom filter"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -17,9 +17,9 @@ rand = "0.7.0"
rayon = "1.5.1"
serde = { version = "1.0.136", features = ["rc"] }
serde_derive = "1.0.103"
solana-frozen-abi = { path = "../frozen-abi", version = "=1.10.3" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.0" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
[lib]
crate-type = ["lib"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-bucket-map"
version = "1.10.3"
version = "1.11.0"
description = "solana-bucket-map"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-bucket-map"
@@ -15,14 +15,14 @@ log = { version = "0.4.11" }
memmap2 = "0.5.3"
modular-bitfield = "0.11.2"
rand = "0.7.0"
solana-measure = { path = "../measure", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-measure = { path = "../measure", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
tempfile = "3.3.0"
[dev-dependencies]
fs_extra = "1.2.0"
rayon = "1.5.0"
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.11.0" }
[lib]
crate-type = ["lib"]

View File

@@ -137,7 +137,7 @@ all_test_steps() {
^ci/test-coverage.sh \
^scripts/coverage.sh \
; then
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 40
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 50
wait_step
else
annotate --style info --context test-coverage \
@@ -152,14 +152,14 @@ all_test_steps() {
^ci/test-coverage.sh \
^scripts/coverage-in-disk.sh \
; then
command_step coverage-in-disk ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 40
command_step coverage-in-disk ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 50
wait_step
else
annotate --style info --context test-coverage \
"Coverage skipped as no .rs files were modified"
fi
# Full test suite
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 60
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 70
wait_step
# BPF test suite
@@ -303,7 +303,7 @@ EOF
command_step "local-cluster-slow" \
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-slow.sh" \
30
40
}
pull_or_push_steps() {

View File

@@ -139,7 +139,7 @@ all_test_steps() {
^ci/test-coverage.sh \
^scripts/coverage.sh \
; then
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 40
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 50
wait_step
else
annotate --style info --context test-coverage \
@@ -147,7 +147,7 @@ all_test_steps() {
fi
# Full test suite
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 60
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 70
wait_step
# BPF test suite
@@ -295,7 +295,7 @@ EOF
command_step "local-cluster-slow" \
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-slow.sh" \
30
40
}
pull_or_push_steps() {

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-clap-utils"
version = "1.10.3"
version = "1.11.0"
description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -13,9 +13,9 @@ edition = "2021"
chrono = "0.4"
clap = "2.33.0"
rpassword = "6.0"
solana-perf = { path = "../perf", version = "=1.10.3" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.10.3", default-features = false }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-perf = { path = "../perf", version = "=1.11.0" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.11.0", default-features = false }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
thiserror = "1.0.30"
tiny-bip39 = "0.8.2"
uriparse = "0.6.3"

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-cli-config"
description = "Blockchain, Rebuilt for Scale"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-cli-output"
description = "Blockchain, Rebuilt for Scale"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -17,14 +17,15 @@ clap = "2.33.0"
console = "0.15.0"
humantime = "2.0.1"
indicatif = "0.16.2"
pretty-hex = "0.2.1"
serde = "1.0.136"
serde_json = "1.0.79"
solana-account-decoder = { path = "../account-decoder", version = "=1.10.3" }
solana-clap-utils = { path = "../clap-utils", version = "=1.10.3" }
solana-client = { path = "../client", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.3" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.3" }
solana-account-decoder = { path = "../account-decoder", version = "=1.11.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.11.0" }
solana-client = { path = "../client", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.11.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.11.0" }
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
[dev-dependencies]

View File

@@ -2776,10 +2776,10 @@ mod tests {
let expected_msg = "AwECBwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDgTl3Dqh9\
F19Wo1Rmw0x+zMuNipG07jeiXfYPW4/Js5QEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQE\
BAQEBAYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBQUFBQUFBQUFBQUFBQUFBQUF\
BQUFBQUFBQUFBQUFBQUGp9UXGSxWjuCKhF9z0peIzwNcMUWyGrNE2AYuqUAAAAAAAAAAAAAA\
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcH\
BwcCBgMDBQIEBAAAAAYCAQQMAgAAACoAAAAAAAAA"
BAQEBAUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBgYGBgYGBgYGBgYGBgYGBgYG\
BgYGBgYGBgYGBgYGBgYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAan1RcZLFaO\
4IqEX3PSl4jPA1wxRbIas0TYBi6pQAAABwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcH\
BwcCBQMEBgIEBAAAAAUCAQMMAgAAACoAAAAAAAAA"
.to_string();
let config = ReturnSignersConfig {
dump_transaction_message: true,

View File

@@ -14,6 +14,7 @@ use {
signature::Signature,
stake,
transaction::{TransactionError, TransactionVersion, VersionedTransaction},
transaction_context::TransactionReturnData,
},
solana_transaction_status::{Rewards, UiTransactionStatusMeta},
spl_memo::{id as spl_memo_id, v1::id as spl_memo_v1_id},
@@ -246,6 +247,7 @@ fn write_transaction<W: io::Write>(
write_fees(w, transaction_status.fee, prefix)?;
write_balances(w, transaction_status, prefix)?;
write_log_messages(w, transaction_status.log_messages.as_ref(), prefix)?;
write_return_data(w, transaction_status.return_data.as_ref(), prefix)?;
write_rewards(w, transaction_status.rewards.as_ref(), prefix)?;
} else {
writeln!(w, "{}Status: Unavailable", prefix)?;
@@ -576,6 +578,25 @@ fn write_balances<W: io::Write>(
Ok(())
}
fn write_return_data<W: io::Write>(
w: &mut W,
return_data: Option<&TransactionReturnData>,
prefix: &str,
) -> io::Result<()> {
if let Some(return_data) = return_data {
if !return_data.data.is_empty() {
use pretty_hex::*;
writeln!(
w,
"{}Return Data from Program {}:",
prefix, return_data.program_id
)?;
writeln!(w, "{} {:?}", prefix, return_data.data.hex_dump())?;
}
}
Ok(())
}
fn write_log_messages<W: io::Write>(
w: &mut W,
log_messages: Option<&Vec<String>>,
@@ -750,6 +771,10 @@ mod test {
commission: None,
}]),
loaded_addresses: LoadedAddresses::default(),
return_data: Some(TransactionReturnData {
program_id: Pubkey::new_from_array([2u8; 32]),
data: vec![1, 2, 3],
}),
};
let output = {
@@ -786,6 +811,9 @@ Status: Ok
Account 1 balance: ◎0.00001 -> ◎0.0000099
Log Messages:
Test message
Return Data from Program 8qbHbw2BbbTHBW1sbeqakYXVKRQM8Ne7pLK7m6CVfeR:
Length: 3 (0x3) bytes
0000: 01 02 03 ...
Rewards:
Address Type Amount New Balance \0
4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi rent -◎0.000000100 ◎0.000009900 \0
@@ -820,6 +848,10 @@ Rewards:
commission: None,
}]),
loaded_addresses,
return_data: Some(TransactionReturnData {
program_id: Pubkey::new_from_array([2u8; 32]),
data: vec![1, 2, 3],
}),
};
let output = {
@@ -865,6 +897,9 @@ Status: Ok
Account 3 balance: ◎0.00002
Log Messages:
Test message
Return Data from Program 8qbHbw2BbbTHBW1sbeqakYXVKRQM8Ne7pLK7m6CVfeR:
Length: 3 (0x3) bytes
0000: 01 02 03 ...
Rewards:
Address Type Amount New Balance \0
CktRuQ2mttgRGkXJtyksdKHjUdc2C4TgDzyB98oEzy8 rent -◎0.000000100 ◎0.000014900 \0

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-cli"
description = "Blockchain, Rebuilt for Scale"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -27,29 +27,29 @@ semver = "1.0.6"
serde = "1.0.136"
serde_derive = "1.0.103"
serde_json = "1.0.79"
solana-account-decoder = { path = "../account-decoder", version = "=1.10.3" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.10.3" }
solana-clap-utils = { path = "../clap-utils", version = "=1.10.3" }
solana-cli-config = { path = "../cli-config", version = "=1.10.3" }
solana-cli-output = { path = "../cli-output", version = "=1.10.3" }
solana-client = { path = "../client", version = "=1.10.3" }
solana-config-program = { path = "../programs/config", version = "=1.10.3" }
solana-faucet = { path = "../faucet", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-program-runtime = { path = "../program-runtime", version = "=1.10.3" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.3" }
solana-account-decoder = { path = "../account-decoder", version = "=1.11.0" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.11.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.11.0" }
solana-cli-config = { path = "../cli-config", version = "=1.11.0" }
solana-cli-output = { path = "../cli-output", version = "=1.11.0" }
solana-client = { path = "../client", version = "=1.11.0" }
solana-config-program = { path = "../programs/config", version = "=1.11.0" }
solana-faucet = { path = "../faucet", version = "=1.11.0" }
solana-logger = { path = "../logger", version = "=1.11.0" }
solana-program-runtime = { path = "../program-runtime", version = "=1.11.0" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.11.0" }
solana_rbpf = "=0.2.24"
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
thiserror = "1.0.30"
tiny-bip39 = "0.8.2"
[dev-dependencies]
solana-streamer = { path = "../streamer", version = "=1.10.3" }
solana-test-validator = { path = "../test-validator", version = "=1.10.3" }
solana-streamer = { path = "../streamer", version = "=1.11.0" }
solana-test-validator = { path = "../test-validator", version = "=1.11.0" }
tempfile = "3.3.0"
[[bin]]

View File

@@ -33,7 +33,7 @@ use {
rpc_request::DELINQUENT_VALIDATOR_SLOT_DISTANCE,
rpc_response::SlotInfo,
},
solana_program_runtime::compute_budget::ComputeBudget,
solana_program_runtime::compute_budget,
solana_remote_wallet::remote_wallet::RemoteWalletManager,
solana_sdk::{
account::from_account,
@@ -1409,7 +1409,7 @@ pub fn process_ping(
)];
if let Some(additional_fee) = additional_fee {
ixs.push(ComputeBudgetInstruction::request_units(
ComputeBudget::new(false).max_units as u32,
compute_budget::DEFAULT_UNITS,
*additional_fee,
));
}

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-client-test"
version = "1.10.3"
version = "1.11.0"
description = "Solana RPC Test"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -14,25 +14,25 @@ publish = false
futures-util = "0.3.21"
serde_json = "1.0.79"
serial_test = "0.6.0"
solana-client = { path = "../client", version = "=1.10.3" }
solana-ledger = { path = "../ledger", version = "=1.10.3" }
solana-measure = { path = "../measure", version = "=1.10.3" }
solana-merkle-tree = { path = "../merkle-tree", version = "=1.10.3" }
solana-metrics = { path = "../metrics", version = "=1.10.3" }
solana-perf = { path = "../perf", version = "=1.10.3" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.10.3" }
solana-rpc = { path = "../rpc", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-streamer = { path = "../streamer", version = "=1.10.3" }
solana-test-validator = { path = "../test-validator", version = "=1.10.3" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-client = { path = "../client", version = "=1.11.0" }
solana-ledger = { path = "../ledger", version = "=1.11.0" }
solana-measure = { path = "../measure", version = "=1.11.0" }
solana-merkle-tree = { path = "../merkle-tree", version = "=1.11.0" }
solana-metrics = { path = "../metrics", version = "=1.11.0" }
solana-perf = { path = "../perf", version = "=1.11.0" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.11.0" }
solana-rpc = { path = "../rpc", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-streamer = { path = "../streamer", version = "=1.11.0" }
solana-test-validator = { path = "../test-validator", version = "=1.11.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
systemstat = "0.1.10"
tokio = { version = "1", features = ["full"] }
[dev-dependencies]
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.11.0" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-client"
version = "1.10.3"
version = "1.11.0"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -35,15 +35,15 @@ semver = "1.0.6"
serde = "1.0.136"
serde_derive = "1.0.103"
serde_json = "1.0.79"
solana-account-decoder = { path = "../account-decoder", version = "=1.10.3" }
solana-clap-utils = { path = "../clap-utils", version = "=1.10.3" }
solana-faucet = { path = "../faucet", version = "=1.10.3" }
solana-measure = { path = "../measure", version = "=1.10.3" }
solana-net-utils = { path = "../net-utils", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.3" }
solana-account-decoder = { path = "../account-decoder", version = "=1.11.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.11.0" }
solana-faucet = { path = "../faucet", version = "=1.11.0" }
solana-measure = { path = "../measure", version = "=1.11.0" }
solana-net-utils = { path = "../net-utils", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.11.0" }
thiserror = "1.0"
tokio = { version = "1", features = ["full"] }
tokio-stream = "0.1.8"
@@ -54,7 +54,7 @@ url = "2.2.2"
[dev-dependencies]
assert_matches = "1.5.0"
jsonrpc-http-server = "18.0.0"
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.11.0" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,5 +1,7 @@
use {
crate::{tpu_connection::TpuConnection, udp_client::UdpTpuConnection},
crate::{
quic_client::QuicTpuConnection, tpu_connection::TpuConnection, udp_client::UdpTpuConnection,
},
lazy_static::lazy_static,
std::{
collections::{hash_map::Entry, BTreeMap, HashMap},
@@ -23,6 +25,7 @@ struct ConnMap {
// that seems non-"Rust-y" and low bang/buck. This is still pretty terrible though...
last_used_times: BTreeMap<u64, SocketAddr>,
ticks: u64,
use_quic: bool,
}
impl ConnMap {
@@ -31,21 +34,31 @@ impl ConnMap {
map: HashMap::new(),
last_used_times: BTreeMap::new(),
ticks: 0,
use_quic: false,
}
}
pub fn set_use_quic(&mut self, use_quic: bool) {
self.use_quic = use_quic;
}
}
lazy_static! {
static ref CONNECTION_MAP: Mutex<ConnMap> = Mutex::new(ConnMap::new());
}
pub fn set_use_quic(use_quic: bool) {
let mut map = (*CONNECTION_MAP).lock().unwrap();
map.set_use_quic(use_quic);
}
#[allow(dead_code)]
// TODO: see https://github.com/solana-labs/solana/issues/23661
// remove lazy_static and optimize and refactor this
pub fn get_connection(addr: &SocketAddr) -> Arc<dyn TpuConnection + 'static + Sync + Send> {
let mut map = (*CONNECTION_MAP).lock().unwrap();
let ticks = map.ticks;
let use_quic = map.use_quic;
let (conn, target_ticks) = match map.map.entry(*addr) {
Entry::Occupied(mut entry) => {
let mut pair = entry.get_mut();
@@ -57,12 +70,15 @@ pub fn get_connection(addr: &SocketAddr) -> Arc<dyn TpuConnection + 'static + Sy
let send_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
// TODO: see https://github.com/solana-labs/solana/issues/23659
// make it configurable (e.g. via the command line) whether to use UDP or Quic
let conn = Arc::new(UdpTpuConnection::new(send_socket, *addr));
let conn: Arc<dyn TpuConnection + 'static + Sync + Send> = if use_quic {
Arc::new(QuicTpuConnection::new(send_socket, *addr))
} else {
Arc::new(UdpTpuConnection::new(send_socket, *addr))
};
entry.insert((conn.clone(), ticks));
(
conn as Arc<dyn TpuConnection + 'static + Sync + Send>,
ticks,
)
(conn, ticks)
}
};

View File

@@ -229,6 +229,7 @@ impl RpcSender for MockSender {
post_token_balances: None,
rewards: None,
loaded_addresses: None,
return_data: None,
}),
},
block_time: Some(1628633791),
@@ -340,6 +341,7 @@ impl RpcSender for MockSender {
logs: None,
accounts: None,
units_consumed: None,
return_data: None,
},
})?,
"getMinimumBalanceForRentExemption" => json![20],

View File

@@ -7,10 +7,8 @@ use {
futures::future::join_all,
itertools::Itertools,
quinn::{ClientConfig, Endpoint, EndpointConfig, NewConnection, WriteError},
rayon::iter::{IntoParallelIterator, ParallelIterator},
solana_sdk::{
quic::{QUIC_MAX_CONCURRENT_STREAMS, QUIC_PORT_OFFSET},
transaction::Transaction,
transport::Result as TransportResult,
},
std::{
@@ -65,21 +63,19 @@ impl TpuConnection for QuicTpuConnection {
&self.client.addr
}
fn send_wire_transaction(&self, data: Vec<u8>) -> TransportResult<()> {
fn send_wire_transaction(&self, wire_transaction: &[u8]) -> TransportResult<()> {
let _guard = self.client.runtime.enter();
let send_buffer = self.client.send_buffer(&data[..]);
let send_buffer = self.client.send_buffer(wire_transaction);
self.client.runtime.block_on(send_buffer)?;
Ok(())
}
fn send_batch(&self, transactions: Vec<Transaction>) -> TransportResult<()> {
let buffers = transactions
.into_par_iter()
.map(|tx| bincode::serialize(&tx).expect("serialize Transaction in send_batch"))
.collect::<Vec<_>>();
fn send_wire_transaction_batch(
&self,
wire_transaction_batch: &[Vec<u8>],
) -> TransportResult<()> {
let _guard = self.client.runtime.enter();
let send_batch = self.client.send_batch(&buffers[..]);
let send_batch = self.client.send_batch(wire_transaction_batch);
self.client.runtime.block_on(send_batch)?;
Ok(())
}
@@ -182,7 +178,7 @@ impl QuicClient {
if buffers.is_empty() {
return Ok(());
}
let connection = self._send_buffer(&buffers[0][..]).await?;
let connection = self._send_buffer(&buffers[0]).await?;
// Used to avoid dereferencing the Arc multiple times below
// by just getting a reference to the NewConnection once
@@ -196,7 +192,7 @@ impl QuicClient {
join_all(
buffs
.into_iter()
.map(|buf| Self::_send_buffer_using_conn(&buf[..], connection_ref)),
.map(|buf| Self::_send_buffer_using_conn(buf, connection_ref)),
)
});

View File

@@ -7,6 +7,7 @@ use {
hash::Hash,
inflation::Inflation,
transaction::{Result, TransactionError},
transaction_context::TransactionReturnData,
},
solana_transaction_status::{
ConfirmedTransactionStatusWithSignature, TransactionConfirmationStatus, UiConfirmedBlock,
@@ -347,6 +348,7 @@ pub struct RpcSimulateTransactionResult {
pub logs: Option<Vec<String>>,
pub accounts: Option<Vec<Option<UiAccount>>>,
pub units_consumed: Option<u64>,
pub return_data: Option<TransactionReturnData>,
}
#[derive(Serialize, Deserialize, Clone, Debug)]

View File

@@ -24,7 +24,7 @@ use {
signers::Signers,
system_instruction,
timing::duration_as_ms,
transaction::{self, Transaction},
transaction::{self, Transaction, VersionedTransaction},
transport::Result as TransportResult,
},
std::{
@@ -215,10 +215,13 @@ impl<C: 'static + TpuConnection> ThinClient<C> {
let mut num_confirmed = 0;
let mut wait_time = MAX_PROCESSING_AGE;
// resend the same transaction until the transaction has no chance of succeeding
let wire_transaction =
bincode::serialize(&transaction).expect("transaction serialization failed");
while now.elapsed().as_secs() < wait_time as u64 {
if num_confirmed == 0 {
// Send the transaction if there has been no confirmation (e.g. the first time)
self.tpu_connection().send_transaction(transaction)?;
self.tpu_connection()
.send_wire_transaction(&wire_transaction)?;
}
if let Ok(confirmed_blocks) = self.poll_for_signature_confirmation(
@@ -601,12 +604,17 @@ impl<C: 'static + TpuConnection> SyncClient for ThinClient<C> {
impl<C: 'static + TpuConnection> AsyncClient for ThinClient<C> {
fn async_send_transaction(&self, transaction: Transaction) -> TransportResult<Signature> {
self.tpu_connection().send_transaction(&transaction)?;
let transaction = VersionedTransaction::from(transaction);
self.tpu_connection()
.serialize_and_send_transaction(&transaction)?;
Ok(transaction.signatures[0])
}
fn async_send_batch(&self, transactions: Vec<Transaction>) -> TransportResult<()> {
self.tpu_connection().send_batch(transactions)
let batch: Vec<VersionedTransaction> = transactions.into_iter().map(Into::into).collect();
self.tpu_connection()
.par_serialize_and_send_transaction_batch(&batch)?;
Ok(())
}
fn async_send_message<T: Signers>(

View File

@@ -1,5 +1,6 @@
use {
solana_sdk::{transaction::Transaction, transport::Result as TransportResult},
rayon::iter::{IntoParallelRefIterator, ParallelIterator},
solana_sdk::{transaction::VersionedTransaction, transport::Result as TransportResult},
std::net::{SocketAddr, UdpSocket},
};
@@ -10,12 +11,35 @@ pub trait TpuConnection {
fn tpu_addr(&self) -> &SocketAddr;
fn send_transaction(&self, tx: &Transaction) -> TransportResult<()> {
let data = bincode::serialize(tx).expect("serialize Transaction in send_transaction");
self.send_wire_transaction(data)
fn serialize_and_send_transaction(
&self,
transaction: &VersionedTransaction,
) -> TransportResult<()> {
let wire_transaction =
bincode::serialize(transaction).expect("serialize Transaction in send_batch");
self.send_wire_transaction(&wire_transaction)
}
fn send_wire_transaction(&self, data: Vec<u8>) -> TransportResult<()>;
fn send_wire_transaction(&self, wire_transaction: &[u8]) -> TransportResult<()>;
fn send_batch(&self, transactions: Vec<Transaction>) -> TransportResult<()>;
fn par_serialize_and_send_transaction_batch(
&self,
transaction_batch: &[VersionedTransaction],
) -> TransportResult<()> {
let wire_transaction_batch: Vec<_> = transaction_batch
.par_iter()
.map(|tx| bincode::serialize(&tx).expect("serialize Transaction in send_batch"))
.collect();
self.send_wire_transaction_batch(&wire_transaction_batch)
}
fn send_wire_transaction_batch(
&self,
wire_transaction_batch: &[Vec<u8>],
) -> TransportResult<()> {
for wire_transaction in wire_transaction_batch {
self.send_wire_transaction(wire_transaction)?;
}
Ok(())
}
}

View File

@@ -3,7 +3,7 @@
use {
crate::tpu_connection::TpuConnection,
solana_sdk::{transaction::Transaction, transport::Result as TransportResult},
solana_sdk::transport::Result as TransportResult,
std::net::{SocketAddr, UdpSocket},
};
@@ -24,19 +24,8 @@ impl TpuConnection for UdpTpuConnection {
&self.addr
}
fn send_wire_transaction(&self, data: Vec<u8>) -> TransportResult<()> {
self.socket.send_to(&data[..], self.addr)?;
Ok(())
}
fn send_batch(&self, transactions: Vec<Transaction>) -> TransportResult<()> {
transactions
.into_iter()
.map(|tx| bincode::serialize(&tx).expect("serialize Transaction in send_batch"))
.try_for_each(|buff| -> TransportResult<()> {
self.socket.send_to(&buff[..], self.addr)?;
Ok(())
})?;
fn send_wire_transaction(&self, wire_transaction: &[u8]) -> TransportResult<()> {
self.socket.send_to(wire_transaction, self.addr)?;
Ok(())
}
}

View File

@@ -1,7 +1,7 @@
[package]
name = "solana-core"
description = "Blockchain, Rebuilt for Scale"
version = "1.10.3"
version = "1.11.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-core"
readme = "../README.md"
@@ -33,30 +33,30 @@ rayon = "1.5.1"
retain_mut = "0.1.7"
serde = "1.0.136"
serde_derive = "1.0.103"
solana-address-lookup-table-program = { path = "../programs/address-lookup-table", version = "=1.10.3" }
solana-bloom = { path = "../bloom", version = "=1.10.3" }
solana-client = { path = "../client", version = "=1.10.3" }
solana-entry = { path = "../entry", version = "=1.10.3" }
solana-frozen-abi = { path = "../frozen-abi", version = "=1.10.3" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.10.3" }
solana-geyser-plugin-manager = { path = "../geyser-plugin-manager", version = "=1.10.3" }
solana-gossip = { path = "../gossip", version = "=1.10.3" }
solana-ledger = { path = "../ledger", version = "=1.10.3" }
solana-measure = { path = "../measure", version = "=1.10.3" }
solana-metrics = { path = "../metrics", version = "=1.10.3" }
solana-net-utils = { path = "../net-utils", version = "=1.10.3" }
solana-perf = { path = "../perf", version = "=1.10.3" }
solana-poh = { path = "../poh", version = "=1.10.3" }
solana-program-runtime = { path = "../program-runtime", version = "=1.10.3" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.10.3" }
solana-replica-lib = { path = "../replica-lib", version = "=1.10.3" }
solana-rpc = { path = "../rpc", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.10.3" }
solana-streamer = { path = "../streamer", version = "=1.10.3" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.3" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.3" }
solana-address-lookup-table-program = { path = "../programs/address-lookup-table", version = "=1.11.0" }
solana-bloom = { path = "../bloom", version = "=1.11.0" }
solana-client = { path = "../client", version = "=1.11.0" }
solana-entry = { path = "../entry", version = "=1.11.0" }
solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.0" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.0" }
solana-geyser-plugin-manager = { path = "../geyser-plugin-manager", version = "=1.11.0" }
solana-gossip = { path = "../gossip", version = "=1.11.0" }
solana-ledger = { path = "../ledger", version = "=1.11.0" }
solana-measure = { path = "../measure", version = "=1.11.0" }
solana-metrics = { path = "../metrics", version = "=1.11.0" }
solana-net-utils = { path = "../net-utils", version = "=1.11.0" }
solana-perf = { path = "../perf", version = "=1.11.0" }
solana-poh = { path = "../poh", version = "=1.11.0" }
solana-program-runtime = { path = "../program-runtime", version = "=1.11.0" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.11.0" }
solana-replica-lib = { path = "../replica-lib", version = "=1.11.0" }
solana-rpc = { path = "../rpc", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.11.0" }
solana-streamer = { path = "../streamer", version = "=1.11.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.11.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.11.0" }
sys-info = "0.9.1"
tempfile = "3.3.0"
thiserror = "1.0"
@@ -69,10 +69,10 @@ raptorq = "1.6.5"
reqwest = { version = "0.11.10", default-features = false, features = ["blocking", "rustls-tls", "json"] }
serde_json = "1.0.79"
serial_test = "0.6.0"
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-program-runtime = { path = "../program-runtime", version = "=1.10.3" }
solana-stake-program = { path = "../programs/stake", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.11.0" }
solana-program-runtime = { path = "../program-runtime", version = "=1.11.0" }
solana-stake-program = { path = "../programs/stake", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
static_assertions = "1.1.0"
systemstat = "0.1.10"

View File

@@ -159,7 +159,7 @@ fn bench_sigverify_stage(bencher: &mut Bencher) {
for _ in 0..batches.len() {
if let Some(batch) = batches.pop() {
sent_len += batch.packets.len();
packet_s.send(batch).unwrap();
packet_s.send(vec![batch]).unwrap();
}
}
let mut received = 0;

View File

@@ -10,8 +10,8 @@ use {
solana_gossip::cluster_info::{ClusterInfo, MAX_SNAPSHOT_HASHES},
solana_measure::measure::Measure,
solana_runtime::{
accounts_db::{self, AccountsDb},
accounts_hash::HashStats,
accounts_db,
accounts_hash::{CalcAccountsHashConfig, HashStats},
snapshot_config::SnapshotConfig,
snapshot_package::{
AccountsPackage, AccountsPackageReceiver, PendingSnapshotPackage, SnapshotPackage,
@@ -129,17 +129,20 @@ impl AccountsHashVerifier {
let mut measure_hash = Measure::start("hash");
if let Some(expected_hash) = accounts_package.hash_for_testing {
let sorted_storages = SortedStorages::new(&accounts_package.snapshot_storages);
let (hash, lamports) = AccountsDb::calculate_accounts_hash_without_index(
ledger_path,
&sorted_storages,
thread_pool,
HashStats::default(),
false,
None,
None, // this will fail with filler accounts
None, // this code path is only for testing, so use default # passes here
)
.unwrap();
let (hash, lamports) = accounts_package
.accounts
.accounts_db
.calculate_accounts_hash_without_index(&mut CalcAccountsHashConfig {
accounts_hash_cache_path: ledger_path,
storages: &sorted_storages,
thread_pool,
stats: HashStats::default(),
check_hash: false,
accounts_cache_and_ancestors: None,
filler_account_suffix: None, // this will fail with filler accounts
num_hash_scan_passes: None, // this code path is only for testing, so use default # passes here
})
.unwrap();
assert_eq!(accounts_package.expected_capitalization, lamports);
assert_eq!(expected_hash, hash);
@@ -353,6 +356,7 @@ mod tests {
incremental_snapshot_archive_interval_slots: Slot::MAX,
..SnapshotConfig::default()
};
let accounts = Arc::new(solana_runtime::accounts::Accounts::default_for_tests());
for i in 0..MAX_SNAPSHOT_HASHES + 1 {
let accounts_package = AccountsPackage {
slot: full_snapshot_archive_interval_slots + i as u64,
@@ -368,6 +372,7 @@ mod tests {
hash_for_testing: None,
cluster_type: ClusterType::MainnetBeta,
snapshot_type: None,
accounts: Arc::clone(&accounts),
};
let ledger_path = TempDir::new().unwrap();

View File

@@ -1,5 +1,5 @@
//! The `banking_stage` processes Transaction messages. It is intended to be used
//! to contruct a software pipeline. The stage uses all available CPU cores and
//! to construct a software pipeline. The stage uses all available CPU cores and
//! can do its processing in parallel with signature verification on the GPU.
use {
crate::{
@@ -194,7 +194,7 @@ impl BankingStageStats {
}
fn report(&mut self, report_interval_ms: u64) {
// skip repoting metrics if stats is empty
// skip reporting metrics if stats is empty
if self.is_empty() {
return;
}
@@ -700,14 +700,11 @@ impl BankingStage {
// `original_unprocessed_indexes` must have remaining packets to process
// if not yet processed.
assert!(Self::packet_has_more_unprocessed_transactions(
&original_unprocessed_indexes
));
assert!(!original_unprocessed_indexes.is_empty());
true
}
}
});
proc_start.stop();
debug!(
@@ -1183,6 +1180,7 @@ impl BankingStage {
MAX_PROCESSING_AGE,
transaction_status_sender.is_some(),
transaction_status_sender.is_some(),
transaction_status_sender.is_some(),
&mut execute_and_commit_timings.execute_timings,
)
},
@@ -2015,7 +2013,7 @@ impl BankingStage {
banking_stage_stats: &mut BankingStageStats,
slot_metrics_tracker: &mut LeaderSlotMetricsTracker,
) {
if Self::packet_has_more_unprocessed_transactions(&packet_indexes) {
if !packet_indexes.is_empty() {
if unprocessed_packet_batches.len() >= batch_limit {
*dropped_packet_batches_count += 1;
if let Some(dropped_batch) = unprocessed_packet_batches.pop_front() {
@@ -2041,10 +2039,6 @@ impl BankingStage {
}
}
fn packet_has_more_unprocessed_transactions(packet_indexes: &[usize]) -> bool {
!packet_indexes.is_empty()
}
pub fn join(self) -> thread::Result<()> {
for bank_thread_hdl in self.bank_thread_hdls {
bank_thread_hdl.join()?;
@@ -2156,6 +2150,7 @@ mod tests {
log_messages: None,
inner_instructions: None,
durable_nonce_fee: None,
return_data: None,
})
}

View File

@@ -134,10 +134,6 @@ impl CostUpdateService {
.upsert_instruction_cost(program_id, units);
update_count += 1;
}
debug!(
"after replayed into bank, updated cost model instruction cost table, current values: {:?}",
cost_model.read().unwrap().get_instruction_cost_table()
);
update_count
}
}
@@ -150,15 +146,10 @@ mod tests {
fn test_update_cost_model_with_empty_execute_timings() {
let cost_model = Arc::new(RwLock::new(CostModel::default()));
let mut empty_execute_timings = ExecuteTimings::default();
CostUpdateService::update_cost_model(&cost_model, &mut empty_execute_timings);
assert_eq!(
0,
cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.len()
CostUpdateService::update_cost_model(&cost_model, &mut empty_execute_timings),
);
}
@@ -188,22 +179,16 @@ mod tests {
total_errored_units,
},
);
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
assert_eq!(
1,
cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.len()
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings),
);
assert_eq!(
Some(&expected_cost),
expected_cost,
cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.get(&program_key_1)
.find_instruction_cost(&program_key_1)
);
}
@@ -225,22 +210,16 @@ mod tests {
total_errored_units: 0,
},
);
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
assert_eq!(
1,
cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.len()
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings),
);
assert_eq!(
Some(&expected_cost),
expected_cost,
cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.get(&program_key_1)
.find_instruction_cost(&program_key_1)
);
}
}
@@ -264,20 +243,46 @@ mod tests {
total_errored_units: 0,
},
);
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
// If both the `errored_txs_compute_consumed` is empty and `count == 0`, then
// nothing should be inserted into the cost model
assert!(cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.is_empty());
assert_eq!(
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings),
0
);
}
// set up current instruction cost to 100
let current_program_cost = 100;
{
execute_timings.details.per_program_timings.insert(
program_key_1,
ProgramTiming {
accumulated_us: 1000,
accumulated_units: current_program_cost,
count: 1,
errored_txs_compute_consumed: vec![],
total_errored_units: 0,
},
);
assert_eq!(
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings),
1
);
assert_eq!(
current_program_cost,
cost_model
.read()
.unwrap()
.find_instruction_cost(&program_key_1)
);
}
// Test updating cost model with only erroring compute costs where the `cost_per_error` is
// greater than the current instruction cost for the program. Should update with the
// new erroring compute costs
let cost_per_error = 1000;
// the expect cost is (previous_cost + new_cost)/2 = (100 + 1000)/2 = 550
let expected_units = 550;
{
let errored_txs_compute_consumed = vec![cost_per_error; 3];
let total_errored_units = errored_txs_compute_consumed.iter().sum();
@@ -291,29 +296,23 @@ mod tests {
total_errored_units,
},
);
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
assert_eq!(
1,
cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.len()
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings),
1
);
assert_eq!(
Some(&cost_per_error),
expected_units,
cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.get(&program_key_1)
.find_instruction_cost(&program_key_1)
);
}
// Test updating cost model with only erroring compute costs where the error cost is
// `smaller_cost_per_error`, less than the current instruction cost for the program.
// The cost should not decrease for these new lesser errors
let smaller_cost_per_error = cost_per_error - 10;
let smaller_cost_per_error = expected_units - 10;
{
let errored_txs_compute_consumed = vec![smaller_cost_per_error; 3];
let total_errored_units = errored_txs_compute_consumed.iter().sum();
@@ -327,22 +326,16 @@ mod tests {
total_errored_units,
},
);
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
assert_eq!(
1,
cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.len()
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings),
1
);
assert_eq!(
Some(&cost_per_error),
expected_units,
cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.get(&program_key_1)
.find_instruction_cost(&program_key_1)
);
}
}

View File

@@ -0,0 +1,185 @@
use {
crossbeam_channel::{Receiver, RecvTimeoutError, Sender},
rayon::{prelude::*, ThreadPool},
solana_gossip::cluster_info::ClusterInfo,
solana_measure::measure::Measure,
solana_perf::packet::PacketBatch,
solana_rayon_threadlimit::get_thread_count,
solana_runtime::bank_forks::BankForks,
solana_sdk::timing::timestamp,
solana_streamer::streamer::{self, StreamerError},
std::{
cell::RefCell,
collections::HashMap,
net::IpAddr,
sync::{Arc, RwLock},
thread::{self, Builder, JoinHandle},
time::{Duration, Instant},
},
};
const IP_TO_STAKE_REFRESH_DURATION: Duration = Duration::from_secs(5);
thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new()
.num_threads(get_thread_count())
.thread_name(|ix| format!("transaction_sender_stake_stage_{}", ix))
.build()
.unwrap()));
pub type FindPacketSenderStakeSender = Sender<Vec<PacketBatch>>;
pub type FindPacketSenderStakeReceiver = Receiver<Vec<PacketBatch>>;
#[derive(Debug, Default)]
struct FindPacketSenderStakeStats {
last_print: u64,
refresh_ip_to_stake_time: u64,
apply_sender_stakes_time: u64,
send_batches_time: u64,
receive_batches_time: u64,
total_batches: u64,
total_packets: u64,
}
impl FindPacketSenderStakeStats {
fn report(&mut self) {
let now = timestamp();
let elapsed_ms = now - self.last_print;
if elapsed_ms > 2000 {
datapoint_info!(
"find_packet_sender_stake-services_stats",
(
"refresh_ip_to_stake_time",
self.refresh_ip_to_stake_time as i64,
i64
),
(
"apply_sender_stakes_time",
self.apply_sender_stakes_time as i64,
i64
),
("send_batches_time", self.send_batches_time as i64, i64),
(
"receive_batches_time",
self.receive_batches_time as i64,
i64
),
("total_batches", self.total_batches as i64, i64),
("total_packets", self.total_packets as i64, i64),
);
*self = FindPacketSenderStakeStats::default();
self.last_print = now;
}
}
}
pub struct FindPacketSenderStakeStage {
thread_hdl: JoinHandle<()>,
}
impl FindPacketSenderStakeStage {
pub fn new(
packet_receiver: streamer::PacketBatchReceiver,
sender: FindPacketSenderStakeSender,
bank_forks: Arc<RwLock<BankForks>>,
cluster_info: Arc<ClusterInfo>,
) -> Self {
let mut stats = FindPacketSenderStakeStats::default();
let thread_hdl = Builder::new()
.name("find-packet-sender-stake".to_string())
.spawn(move || {
let mut last_stakes = Instant::now();
let mut ip_to_stake: HashMap<IpAddr, u64> = HashMap::new();
loop {
let mut refresh_ip_to_stake_time = Measure::start("refresh_ip_to_stake_time");
Self::try_refresh_ip_to_stake(
&mut last_stakes,
&mut ip_to_stake,
bank_forks.clone(),
cluster_info.clone(),
);
refresh_ip_to_stake_time.stop();
stats.refresh_ip_to_stake_time = stats
.refresh_ip_to_stake_time
.saturating_add(refresh_ip_to_stake_time.as_us());
match streamer::recv_packet_batches(&packet_receiver) {
Ok((mut batches, num_packets, recv_duration)) => {
let num_batches = batches.len();
let mut apply_sender_stakes_time =
Measure::start("apply_sender_stakes_time");
Self::apply_sender_stakes(&mut batches, &ip_to_stake);
apply_sender_stakes_time.stop();
let mut send_batches_time = Measure::start("send_batches_time");
if let Err(e) = sender.send(batches) {
info!("Sender error: {:?}", e);
}
send_batches_time.stop();
stats.apply_sender_stakes_time = stats
.apply_sender_stakes_time
.saturating_add(apply_sender_stakes_time.as_us());
stats.send_batches_time = stats
.send_batches_time
.saturating_add(send_batches_time.as_us());
stats.receive_batches_time = stats
.receive_batches_time
.saturating_add(recv_duration.as_nanos() as u64);
stats.total_batches =
stats.total_batches.saturating_add(num_batches as u64);
stats.total_packets =
stats.total_packets.saturating_add(num_packets as u64);
}
Err(e) => match e {
StreamerError::RecvTimeout(RecvTimeoutError::Disconnected) => break,
StreamerError::RecvTimeout(RecvTimeoutError::Timeout) => (),
_ => error!("error: {:?}", e),
},
}
stats.report();
}
})
.unwrap();
Self { thread_hdl }
}
fn try_refresh_ip_to_stake(
last_stakes: &mut Instant,
ip_to_stake: &mut HashMap<IpAddr, u64>,
bank_forks: Arc<RwLock<BankForks>>,
cluster_info: Arc<ClusterInfo>,
) {
if last_stakes.elapsed() > IP_TO_STAKE_REFRESH_DURATION {
let root_bank = bank_forks.read().unwrap().root_bank();
let staked_nodes = root_bank.staked_nodes();
*ip_to_stake = cluster_info
.tvu_peers()
.into_iter()
.filter_map(|node| {
let stake = staked_nodes.get(&node.id)?;
Some((node.tvu.ip(), *stake))
})
.collect();
*last_stakes = Instant::now();
}
}
fn apply_sender_stakes(batches: &mut [PacketBatch], ip_to_stake: &HashMap<IpAddr, u64>) {
PAR_THREAD_POOL.with(|thread_pool| {
thread_pool.borrow().install(|| {
batches
.into_par_iter()
.flat_map(|batch| batch.packets.par_iter_mut())
.for_each(|packet| {
packet.meta.sender_stake =
*ip_to_stake.get(&packet.meta.addr().ip()).unwrap_or(&0);
});
})
});
}
pub fn join(self) -> thread::Result<()> {
self.thread_hdl.join()
}
}

View File

@@ -24,6 +24,7 @@ pub mod cost_update_service;
pub mod drop_bank_service;
pub mod duplicate_repair_status;
pub mod fetch_stage;
pub mod find_packet_sender_stake_stage;
pub mod fork_choice;
pub mod gen_keys;
pub mod heaviest_subtree_fork_choice;

View File

@@ -2301,7 +2301,7 @@ impl ReplayStage {
}
}
// send accumulated excute-timings to cost_update_service
// send accumulated execute-timings to cost_update_service
if !execute_timings.details.per_program_timings.is_empty() {
cost_update_sender
.send(CostUpdate::ExecuteTiming {
@@ -2589,7 +2589,7 @@ impl ReplayStage {
*/
// Imagine 90% of validators voted on slot 4, but only 9% landed. If everybody that fails
// the switch theshold abandons slot 4 to build on slot 8 (because it's *currently* heavier),
// the switch threshold abandons slot 4 to build on slot 8 (because it's *currently* heavier),
// then there will be no blocks to include the votes for slot 4, and the network halts
// because 90% of validators can't vote
info!(

View File

@@ -2,17 +2,17 @@
use {
crate::packet_hasher::PacketHasher,
crossbeam_channel::unbounded,
crossbeam_channel::{unbounded, Sender},
lru::LruCache,
solana_ledger::shred::{get_shred_slot_index_type, ShredFetchStats},
solana_perf::{
cuda_runtime::PinnedVec,
packet::{Packet, PacketBatchRecycler, PacketFlags},
packet::{Packet, PacketBatch, PacketBatchRecycler, PacketFlags},
recycler::Recycler,
},
solana_runtime::bank_forks::BankForks,
solana_sdk::clock::{Slot, DEFAULT_MS_PER_SLOT},
solana_streamer::streamer::{self, PacketBatchReceiver, PacketBatchSender},
solana_streamer::streamer::{self, PacketBatchReceiver},
std::{
net::UdpSocket,
sync::{atomic::AtomicBool, Arc, RwLock},
@@ -65,7 +65,7 @@ impl ShredFetchStage {
// updates packets received on a channel and sends them on another channel
fn modify_packets<F>(
recvr: PacketBatchReceiver,
sendr: PacketBatchSender,
sendr: Sender<Vec<PacketBatch>>,
bank_forks: Option<Arc<RwLock<BankForks>>>,
name: &'static str,
modify: F,
@@ -125,7 +125,7 @@ impl ShredFetchStage {
stats = ShredFetchStats::default();
last_stats = Instant::now();
}
if sendr.send(packet_batch).is_err() {
if sendr.send(vec![packet_batch]).is_err() {
break;
}
}
@@ -134,7 +134,7 @@ impl ShredFetchStage {
fn packet_modifier<F>(
sockets: Vec<Arc<UdpSocket>>,
exit: &Arc<AtomicBool>,
sender: PacketBatchSender,
sender: Sender<Vec<PacketBatch>>,
recycler: Recycler<PinnedVec<Packet>>,
bank_forks: Option<Arc<RwLock<BankForks>>>,
name: &'static str,
@@ -170,7 +170,7 @@ impl ShredFetchStage {
sockets: Vec<Arc<UdpSocket>>,
forward_sockets: Vec<Arc<UdpSocket>>,
repair_socket: Arc<UdpSocket>,
sender: &PacketBatchSender,
sender: &Sender<Vec<PacketBatch>>,
bank_forks: Option<Arc<RwLock<BankForks>>>,
exit: &Arc<AtomicBool>,
) -> Self {

View File

@@ -6,9 +6,9 @@
//! if perf-libs are available
use {
crate::sigverify,
crate::{find_packet_sender_stake_stage, sigverify},
core::time::Duration,
crossbeam_channel::{Receiver, RecvTimeoutError, SendError, Sender},
crossbeam_channel::{RecvTimeoutError, SendError, Sender},
itertools::Itertools,
solana_measure::measure::Measure,
solana_perf::{
@@ -16,7 +16,7 @@ use {
sigverify::{count_valid_packets, shrink_batches, Deduper},
},
solana_sdk::timing,
solana_streamer::streamer::{self, PacketBatchReceiver, StreamerError},
solana_streamer::streamer::{self, StreamerError},
std::{
thread::{self, Builder, JoinHandle},
time::Instant,
@@ -192,7 +192,7 @@ impl SigVerifier for DisabledSigVerifier {
impl SigVerifyStage {
#[allow(clippy::new_ret_no_self)]
pub fn new<T: SigVerifier + 'static + Send + Clone>(
packet_receiver: Receiver<PacketBatch>,
packet_receiver: find_packet_sender_stake_stage::FindPacketSenderStakeReceiver,
verified_sender: Sender<Vec<PacketBatch>>,
verifier: T,
) -> Self {
@@ -227,12 +227,12 @@ impl SigVerifyStage {
fn verifier<T: SigVerifier>(
deduper: &Deduper,
recvr: &PacketBatchReceiver,
recvr: &find_packet_sender_stake_stage::FindPacketSenderStakeReceiver,
sendr: &Sender<Vec<PacketBatch>>,
verifier: &T,
stats: &mut SigVerifierStats,
) -> Result<()> {
let (mut batches, num_packets, recv_duration) = streamer::recv_packet_batches(recvr)?;
let (mut batches, num_packets, recv_duration) = streamer::recv_vec_packet_batches(recvr)?;
let batches_len = batches.len();
debug!(
@@ -312,7 +312,7 @@ impl SigVerifyStage {
}
fn verifier_service<T: SigVerifier + 'static + Send + Clone>(
packet_receiver: PacketBatchReceiver,
packet_receiver: find_packet_sender_stake_stage::FindPacketSenderStakeReceiver,
verified_sender: Sender<Vec<PacketBatch>>,
verifier: &T,
) -> JoinHandle<()> {
@@ -358,7 +358,7 @@ impl SigVerifyStage {
}
fn verifier_services<T: SigVerifier + 'static + Send + Clone>(
packet_receiver: PacketBatchReceiver,
packet_receiver: find_packet_sender_stake_stage::FindPacketSenderStakeReceiver,
verified_sender: Sender<Vec<PacketBatch>>,
verifier: T,
) -> JoinHandle<()> {
@@ -445,7 +445,7 @@ mod tests {
for _ in 0..batches.len() {
if let Some(batch) = batches.pop() {
sent_len += batch.packets.len();
packet_s.send(batch).unwrap();
packet_s.send(vec![batch]).unwrap();
}
}
let mut received = 0;

View File

@@ -10,6 +10,7 @@ use {
GossipVerifiedVoteHashSender, VerifiedVoteSender, VoteTracker,
},
fetch_stage::FetchStage,
find_packet_sender_stake_stage::FindPacketSenderStakeStage,
sigverify::TransactionSigVerifier,
sigverify_stage::SigVerifyStage,
},
@@ -55,6 +56,8 @@ pub struct Tpu {
cluster_info_vote_listener: ClusterInfoVoteListener,
broadcast_stage: BroadcastStage,
tpu_quic_t: thread::JoinHandle<()>,
find_packet_sender_stake_stage: FindPacketSenderStakeStage,
vote_find_packet_sender_stake_stage: FindPacketSenderStakeStage,
}
impl Tpu {
@@ -103,6 +106,26 @@ impl Tpu {
poh_recorder,
tpu_coalesce_ms,
);
let (find_packet_sender_stake_sender, find_packet_sender_stake_receiver) = unbounded();
let find_packet_sender_stake_stage = FindPacketSenderStakeStage::new(
packet_receiver,
find_packet_sender_stake_sender,
bank_forks.clone(),
cluster_info.clone(),
);
let (vote_find_packet_sender_stake_sender, vote_find_packet_sender_stake_receiver) =
unbounded();
let vote_find_packet_sender_stake_stage = FindPacketSenderStakeStage::new(
vote_packet_receiver,
vote_find_packet_sender_stake_sender,
bank_forks.clone(),
cluster_info.clone(),
);
let (verified_sender, verified_receiver) = unbounded();
let tpu_quic_t = solana_streamer::quic::spawn_server(
@@ -117,7 +140,7 @@ impl Tpu {
let sigverify_stage = {
let verifier = TransactionSigVerifier::default();
SigVerifyStage::new(packet_receiver, verified_sender, verifier)
SigVerifyStage::new(find_packet_sender_stake_receiver, verified_sender, verifier)
};
let (verified_tpu_vote_packets_sender, verified_tpu_vote_packets_receiver) = unbounded();
@@ -125,7 +148,7 @@ impl Tpu {
let vote_sigverify_stage = {
let verifier = TransactionSigVerifier::new_reject_non_vote();
SigVerifyStage::new(
vote_packet_receiver,
vote_find_packet_sender_stake_receiver,
verified_tpu_vote_packets_sender,
verifier,
)
@@ -179,6 +202,8 @@ impl Tpu {
cluster_info_vote_listener,
broadcast_stage,
tpu_quic_t,
find_packet_sender_stake_stage,
vote_find_packet_sender_stake_stage,
}
}
@@ -189,6 +214,8 @@ impl Tpu {
self.vote_sigverify_stage.join(),
self.cluster_info_vote_listener.join(),
self.banking_stage.join(),
self.find_packet_sender_stake_stage.join(),
self.vote_find_packet_sender_stake_stage.join(),
];
self.tpu_quic_t.join()?;
let broadcast_result = self.broadcast_stage.join();

View File

@@ -37,6 +37,17 @@ pub struct DeserializedPacketBatch {
pub unprocessed_packets: HashMap<usize, DeserializedPacket>,
}
/// References to a packet in `UnprocessedPacketBatches`, where
/// - batch_index references to `DeserializedPacketBatch`,
/// - packet_index references to `packet` within `DeserializedPacketBatch.packet_batch`
#[derive(Debug, Default)]
pub struct PacketLocator {
#[allow(dead_code)]
batch_index: usize,
#[allow(dead_code)]
packet_index: usize,
}
/// Currently each banking_stage thread has a `UnprocessedPacketBatches` buffer to store
/// PacketBatch's received from sigverify. Banking thread continuously scans the buffer
/// to pick proper packets to add to the block.
@@ -79,6 +90,50 @@ impl UnprocessedPacketBatches {
pub fn with_capacity(capacity: usize) -> Self {
UnprocessedPacketBatches(VecDeque::with_capacity(capacity))
}
/// Returns total number of all packets (including unprocessed and processed) in buffer
#[allow(dead_code)]
fn get_packets_count(&self) -> usize {
self.iter()
.map(|deserialized_packet_batch| deserialized_packet_batch.packet_batch.packets.len())
.sum()
}
/// Returns total number of unprocessed packets in buffer
#[allow(dead_code)]
fn get_unprocessed_packets_count(&self) -> usize {
self.iter()
.map(|deserialized_packet_batch| deserialized_packet_batch.unprocessed_packets.len())
.sum()
}
/// Iterates the inner `Vec<DeserializedPacketBatch>`.
/// Returns the flattened result of mapping each
/// `DeserializedPacketBatch` to a list the batch's inner
/// packets' sender's stake and their `PacketLocator`'s within the
/// `Vec<DeserializedPacketBatch>`.
#[allow(dead_code)]
fn get_stakes_and_locators(&self) -> (Vec<u64>, Vec<PacketLocator>) {
self.iter()
.enumerate()
.flat_map(|(batch_index, deserialized_packet_batch)| {
let packet_batch = &deserialized_packet_batch.packet_batch;
deserialized_packet_batch
.unprocessed_packets
.keys()
.map(move |packet_index| {
let p = &packet_batch.packets[*packet_index];
(
p.meta.sender_stake,
PacketLocator {
batch_index,
packet_index: *packet_index,
},
)
})
})
.unzip()
}
}
impl DeserializedPacketBatch {
@@ -135,8 +190,8 @@ impl DeserializedPacketBatch {
Some(&packet.data[msg_start..msg_end])
}
// Returns whether the given `PacketBatch` has any more remaining unprocessed
// transactions
/// Returns whether the given `PacketBatch` has any more remaining unprocessed
/// transactions
pub fn update_buffered_packets_with_new_unprocessed(
&mut self,
_original_unprocessed_indexes: &[usize],
@@ -159,8 +214,24 @@ mod tests {
use {
super::*,
solana_sdk::{signature::Keypair, system_transaction},
std::net::IpAddr,
};
fn packet_with_sender_stake(sender_stake: u64, ip: Option<IpAddr>) -> Packet {
let tx = system_transaction::transfer(
&Keypair::new(),
&solana_sdk::pubkey::new_rand(),
1,
Hash::new_unique(),
);
let mut packet = Packet::from_data(None, &tx).unwrap();
packet.meta.sender_stake = sender_stake;
if let Some(ip) = ip {
packet.meta.addr = ip;
}
packet
}
#[test]
fn test_packet_message() {
let keypair = Keypair::new();
@@ -175,4 +246,92 @@ mod tests {
transaction.message_data()
);
}
#[test]
fn test_get_packets_count() {
// create a buffer with 3 batches, each has 2 packets but only first one is valid
let batch_size = 2usize;
let batch_count = 3usize;
let unprocessed_packet_batches: UnprocessedPacketBatches = (0..batch_count)
.map(|_batch_index| {
DeserializedPacketBatch::new(
PacketBatch::new(
(0..batch_size)
.map(|packet_index| packet_with_sender_stake(packet_index as u64, None))
.collect(),
),
vec![0],
false,
)
})
.collect();
// Assert total packets count, and unprocessed packets count
assert_eq!(
batch_size * batch_count,
unprocessed_packet_batches.get_packets_count()
);
assert_eq!(
batch_count,
unprocessed_packet_batches.get_unprocessed_packets_count()
);
}
#[test]
fn test_get_stakes_and_locators_from_empty_buffer() {
let unprocessed_packet_batches = UnprocessedPacketBatches::default();
let (stakes, locators) = unprocessed_packet_batches.get_stakes_and_locators();
assert!(stakes.is_empty());
assert!(locators.is_empty());
}
#[test]
fn test_get_stakes_and_locators() {
solana_logger::setup();
// setup senders' address and stake
let senders: Vec<(IpAddr, u64)> = vec![
(IpAddr::from([127, 0, 0, 1]), 1),
(IpAddr::from([127, 0, 0, 2]), 2),
(IpAddr::from([127, 0, 0, 3]), 3),
];
// create a buffer with 3 batches, each has 2 packet from above sender.
// buffer looks like:
// [127.0.0.1, 127.0.0.2]
// [127.0.0.3, 127.0.0.1]
// [127.0.0.2, 127.0.0.3]
let batch_size = 2usize;
let batch_count = 3usize;
let unprocessed_packet_batches: UnprocessedPacketBatches = (0..batch_count)
.map(|batch_index| {
DeserializedPacketBatch::new(
PacketBatch::new(
(0..batch_size)
.map(|packet_index| {
let n = (batch_index * batch_size + packet_index) % senders.len();
packet_with_sender_stake(senders[n].1, Some(senders[n].0))
})
.collect(),
),
(0..batch_size).collect(),
false,
)
})
.collect();
let (stakes, locators) = unprocessed_packet_batches.get_stakes_and_locators();
// Produced stakes and locators should both have "batch_size * batch_count" entries;
assert_eq!(batch_size * batch_count, stakes.len());
assert_eq!(batch_size * batch_count, locators.len());
// Assert stakes and locators are in good order
locators.iter().enumerate().for_each(|(index, locator)| {
assert_eq!(
stakes[index],
senders[(locator.batch_index * batch_size + locator.packet_index) % senders.len()]
.1
);
});
}
}

View File

@@ -38,7 +38,7 @@ use {
blockstore::{
Blockstore, BlockstoreError, BlockstoreSignals, CompletedSlotsReceiver, PurgeType,
},
blockstore_db::{BlockstoreAdvancedOptions, BlockstoreOptions, BlockstoreRecoveryMode},
blockstore_db::{BlockstoreOptions, BlockstoreRecoveryMode, LedgerColumnOptions},
blockstore_processor::{self, TransactionStatusSender},
leader_schedule::FixedSchedule,
leader_schedule_cache::LeaderScheduleCache,
@@ -168,7 +168,7 @@ pub struct ValidatorConfig {
pub no_wait_for_vote_to_start_leader: bool,
pub accounts_shrink_ratio: AccountShrinkThreshold,
pub wait_to_vote_slot: Option<Slot>,
pub blockstore_advanced_options: BlockstoreAdvancedOptions,
pub ledger_column_options: LedgerColumnOptions,
}
impl Default for ValidatorConfig {
@@ -230,7 +230,7 @@ impl Default for ValidatorConfig {
accounts_shrink_ratio: AccountShrinkThreshold::default(),
accounts_db_config: None,
wait_to_vote_slot: None,
blockstore_advanced_options: BlockstoreAdvancedOptions::default(),
ledger_column_options: LedgerColumnOptions::default(),
}
}
}
@@ -1297,7 +1297,7 @@ fn load_blockstore(
ledger_path,
BlockstoreOptions {
recovery_mode: config.wal_recovery_mode.clone(),
advanced_options: config.blockstore_advanced_options.clone(),
column_options: config.ledger_column_options.clone(),
..BlockstoreOptions::default()
},
)
@@ -1331,7 +1331,7 @@ fn load_blockstore(
blockstore.clone(),
exit,
enable_rpc_transaction_history,
config.rpc_config.enable_cpi_and_log_storage,
config.rpc_config.enable_extended_tx_metadata_storage,
transaction_notifier,
)
} else {
@@ -1538,7 +1538,7 @@ fn initialize_rpc_transaction_history_services(
blockstore: Arc<Blockstore>,
exit: &Arc<AtomicBool>,
enable_rpc_transaction_history: bool,
enable_cpi_and_log_storage: bool,
enable_extended_tx_metadata_storage: bool,
transaction_notifier: Option<TransactionNotifierLock>,
) -> TransactionHistoryServices {
let max_complete_transaction_status_slot = Arc::new(AtomicU64::new(blockstore.max_root()));
@@ -1552,7 +1552,7 @@ fn initialize_rpc_transaction_history_services(
enable_rpc_transaction_history,
transaction_notifier.clone(),
blockstore.clone(),
enable_cpi_and_log_storage,
enable_extended_tx_metadata_storage,
exit,
));

View File

@@ -1,6 +1,7 @@
use {
crate::tower_storage::{SavedTowerVersions, TowerStorage},
crossbeam_channel::Receiver,
solana_client::connection_cache::get_connection,
solana_gossip::cluster_info::ClusterInfo,
solana_measure::measure::Measure,
solana_poh::poh_recorder::PohRecorder,
@@ -86,7 +87,13 @@ impl VotingService {
} else {
crate::banking_stage::next_leader_tpu(cluster_info, poh_recorder)
};
let _ = cluster_info.send_transaction(vote_op.tx(), target_address);
let mut measure = Measure::start("vote_tx_send-ms");
let target_address = target_address.unwrap_or_else(|| cluster_info.my_contact_info().tpu);
let wire_vote_tx = bincode::serialize(vote_op.tx()).expect("vote serialization failure");
let _ = get_connection(&target_address).send_wire_transaction(&wire_vote_tx);
measure.stop();
inc_new_counter_info!("vote_tx_send-ms", measure.as_ms() as usize);
match vote_op {
VoteOp::PushVote {

View File

@@ -604,7 +604,7 @@ impl WindowService {
}
if last_print.elapsed().as_secs() > 2 {
metrics.report_metrics("recv-window-insert-shreds");
metrics.report_metrics("blockstore-insert-shreds");
metrics = BlockstoreInsertionMetrics::default();
ws_metrics.report_metrics("recv-window-insert-shreds");
ws_metrics = WindowServiceMetrics::default();

View File

@@ -10,7 +10,7 @@ mod tests {
solana_ledger::{
blockstore::{make_many_slot_shreds, Blockstore},
blockstore_db::{
BlockstoreAdvancedOptions, BlockstoreOptions, BlockstoreRocksFifoOptions,
BlockstoreOptions, BlockstoreRocksFifoOptions, LedgerColumnOptions,
ShredStorageType,
},
get_tmp_ledger_path,
@@ -351,13 +351,14 @@ mod tests {
&ledger_path,
if config.fifo_compaction {
BlockstoreOptions {
advanced_options: BlockstoreAdvancedOptions {
column_options: LedgerColumnOptions {
shred_storage_type: ShredStorageType::RocksFifo(
BlockstoreRocksFifoOptions {
shred_data_cf_size: config.shred_data_cf_size,
..BlockstoreRocksFifoOptions::default()
},
),
..LedgerColumnOptions::default()
},
..BlockstoreOptions::default()
}

12
docs/package-lock.json generated
View File

@@ -9216,9 +9216,9 @@
"optional": true
},
"node_modules/nanoid": {
"version": "3.1.23",
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.1.23.tgz",
"integrity": "sha512-FiB0kzdP0FFVGDKlRLEQ1BgDzU87dy5NnzjeW9YZNt+/c3+q82EQDUwniSAUxp/F0gFNI1ZhKU1FqYsMuqZVnw==",
"version": "3.3.1",
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.1.tgz",
"integrity": "sha512-n6Vs/3KGyxPQd6uO0eH4Bv0ojGSUvuLlIHtC3Y0kEO23YRge8H9x1GCzLn28YX0H66pMkxuaeESFq4tKISKwdw==",
"bin": {
"nanoid": "bin/nanoid.cjs"
},
@@ -22768,9 +22768,9 @@
"optional": true
},
"nanoid": {
"version": "3.1.23",
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.1.23.tgz",
"integrity": "sha512-FiB0kzdP0FFVGDKlRLEQ1BgDzU87dy5NnzjeW9YZNt+/c3+q82EQDUwniSAUxp/F0gFNI1ZhKU1FqYsMuqZVnw=="
"version": "3.3.1",
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.1.tgz",
"integrity": "sha512-n6Vs/3KGyxPQd6uO0eH4Bv0ojGSUvuLlIHtC3Y0kEO23YRge8H9x1GCzLn28YX0H66pMkxuaeESFq4tKISKwdw=="
},
"nanomatch": {
"version": "1.2.13",

View File

@@ -681,8 +681,8 @@ console.log(`Stake balance: ${stakeBalance}`)
// We can verify the state of our stake. This may take some time to become active
let stakeState = await connection.getStakeActivation(stakeAccount.publicKey);
console.log(`Stake Stake: ${stakeState.state}`);
// Stake State: inactive
console.log(`Stake state: ${stakeState.state}`);
// Stake state: inactive
// To delegate our stake, we get the current vote accounts and choose the first
let voteAccounts = await connection.getVoteAccounts();

View File

@@ -3059,7 +3059,7 @@ curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d '
Result:
```json
{ "jsonrpc": "2.0", "result": { "solana-core": "1.10.3" }, "id": 1 }
{ "jsonrpc": "2.0", "result": { "solana-core": "1.11.0" }, "id": 1 }
```
### getVoteAccounts

View File

@@ -6054,9 +6054,9 @@ nan@^2.12.1:
integrity sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ==
nanoid@^3.1.23:
version "3.1.23"
resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.1.23.tgz#f744086ce7c2bc47ee0a8472574d5c78e4183a81"
integrity sha512-FiB0kzdP0FFVGDKlRLEQ1BgDzU87dy5NnzjeW9YZNt+/c3+q82EQDUwniSAUxp/F0gFNI1ZhKU1FqYsMuqZVnw==
version "3.3.1"
resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.1.tgz#6347a18cac88af88f58af0b3594b723d5e99bb35"
integrity sha512-n6Vs/3KGyxPQd6uO0eH4Bv0ojGSUvuLlIHtC3Y0kEO23YRge8H9x1GCzLn28YX0H66pMkxuaeESFq4tKISKwdw==
nanomatch@^1.2.9:
version "1.2.13"

View File

@@ -2,29 +2,31 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-dos"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
publish = false
description = "Tool to send various requests to cluster in order to evaluate the effect on performance"
[dependencies]
bincode = "1.3.3"
clap = "2.33.1"
clap = {version = "3.1.5", features = ["derive", "cargo"]}
log = "0.4.14"
rand = "0.7.0"
solana-client = { path = "../client", version = "=1.10.3" }
solana-core = { path = "../core", version = "=1.10.3" }
solana-gossip = { path = "../gossip", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-net-utils = { path = "../net-utils", version = "=1.10.3" }
solana-perf = { path = "../perf", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-streamer = { path = "../streamer", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
serde = "1.0.136"
solana-client = { path = "../client", version = "=1.11.0" }
solana-core = { path = "../core", version = "=1.11.0" }
solana-gossip = { path = "../gossip", version = "=1.11.0" }
solana-logger = { path = "../logger", version = "=1.11.0" }
solana-net-utils = { path = "../net-utils", version = "=1.11.0" }
solana-perf = { path = "../perf", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-streamer = { path = "../streamer", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]
[dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "=1.10.3" }
solana-local-cluster = { path = "../local-cluster", version = "=1.11.0" }

View File

@@ -1,12 +1,50 @@
//! DoS tool
//!
//! Sends requests to cluster in a loop to measure
//! the effect of handling these requests on the performance of the cluster.
//!
//! * `mode` argument defines interface to use (e.g. rpc, tvu, tpu)
//! * `data-type` argument specifies the type of the request.
//! Some request types might be used only with particular `mode` value.
//! For example, `get-account-info` is valid only with `mode=rpc`.
//!
//! Most options are provided for `data-type = transaction`.
//! These options allow to compose transaction which fails at
//! a particular stage of the processing pipeline.
//!
//! Example 1: send random transactions to TPU
//! ```bash
//! solana-dos --entrypoint 127.0.0.1:8001 --mode tpu --data-type random
//! ```
//!
//! Example 2: send unique transactions with valid recent blockhash to TPU
//! ```bash
//! solana-dos --entrypoint 127.0.0.1:8001 --mode tpu --data-type random
//! solana-dos --entrypoint 127.0.0.1:8001 --mode tpu \
//! --data-type transaction --generate-unique-transactions
//! --payer config/bootstrap-validator/identity.json \
//! --generate-valid-blockhash
//! ```
//!
#![allow(clippy::integer_arithmetic)]
use {
clap::{crate_description, crate_name, value_t, value_t_or_exit, App, Arg},
clap::{crate_description, crate_name, crate_version, ArgEnum, Args, Parser},
log::*,
rand::{thread_rng, Rng},
serde::{Deserialize, Serialize},
solana_client::rpc_client::RpcClient,
solana_core::serve_repair::RepairProtocol,
solana_gossip::{contact_info::ContactInfo, gossip_service::discover},
solana_sdk::pubkey::Pubkey,
solana_sdk::{
hash::Hash,
instruction::{AccountMeta, CompiledInstruction, Instruction},
pubkey::Pubkey,
signature::{read_keypair_file, Keypair, Signature, Signer},
stake,
system_instruction::SystemInstruction,
system_program,
transaction::Transaction,
},
solana_streamer::socket::SocketAddrSpace,
std::{
net::{SocketAddr, UdpSocket},
@@ -23,38 +61,151 @@ fn get_repair_contact(nodes: &[ContactInfo]) -> ContactInfo {
contact
}
struct TransactionGenerator {
blockhash: Hash,
last_generated: Instant,
transaction_params: TransactionParams,
cached_transaction: Option<Transaction>,
}
impl TransactionGenerator {
fn new(transaction_params: TransactionParams) -> Self {
TransactionGenerator {
blockhash: Hash::default(),
last_generated: (Instant::now() - Duration::from_secs(100)),
transaction_params,
cached_transaction: None,
}
}
fn generate(&mut self, payer: Option<&Keypair>, rpc_client: &Option<RpcClient>) -> Transaction {
if !self.transaction_params.unique_transactions && self.cached_transaction.is_some() {
return self.cached_transaction.as_ref().unwrap().clone();
}
// generate a new blockhash every 1sec
if self.transaction_params.valid_blockhash
&& self.last_generated.elapsed().as_millis() > 1000
{
self.blockhash = rpc_client.as_ref().unwrap().get_latest_blockhash().unwrap();
self.last_generated = Instant::now();
}
// in order to evaluate the performance implications of the different transactions
// we create here transactions which are filtered out on different stages of processing pipeline
// create an arbitrary valid instruction
let lamports = 5;
let transfer_instruction = SystemInstruction::Transfer { lamports };
let program_ids = vec![system_program::id(), stake::program::id()];
// transaction with payer, in this case signatures are valid and num_signatures is irrelevant
// random payer will cause error "attempt to debit an account but found no record of a prior credit"
// if payer is correct, it will trigger error with not enough signatures
let transaction = if let Some(payer) = payer {
let instruction = Instruction::new_with_bincode(
program_ids[0],
&transfer_instruction,
vec![
AccountMeta::new(program_ids[0], false),
AccountMeta::new(program_ids[1], false),
],
);
Transaction::new_signed_with_payer(
&[instruction],
Some(&payer.pubkey()),
&[payer],
self.blockhash,
)
} else if self.transaction_params.valid_signatures {
// Since we don't provide a payer, this transaction will end up
// filtered at legacy.rs sanitize method (banking_stage) with error "a program cannot be payer"
let kpvals: Vec<Keypair> = (0..self.transaction_params.num_signatures)
.map(|_| Keypair::new())
.collect();
let keypairs: Vec<&Keypair> = kpvals.iter().collect();
let instructions = vec![CompiledInstruction::new(
0,
&transfer_instruction,
vec![0, 1],
)];
Transaction::new_with_compiled_instructions(
&keypairs,
&[],
self.blockhash,
program_ids,
instructions,
)
} else {
// Since we provided invalid signatures
// this transaction will end up filtered at legacy.rs (banking_stage) because
// num_required_signatures == 0
let instructions = vec![CompiledInstruction::new(
0,
&transfer_instruction,
vec![0, 1],
)];
let mut tx = Transaction::new_with_compiled_instructions(
&[] as &[&Keypair; 0],
&[],
self.blockhash,
program_ids,
instructions,
);
tx.signatures = vec![Signature::new_unique(); self.transaction_params.num_signatures];
tx
};
// if we need to generate only one transaction, we cache it to reuse later
if !self.transaction_params.unique_transactions {
self.cached_transaction = Some(transaction.clone());
}
transaction
}
}
fn run_dos(
nodes: &[ContactInfo],
iterations: usize,
entrypoint_addr: SocketAddr,
data_type: String,
data_size: usize,
mode: String,
data_input: Option<String>,
payer: Option<&Keypair>,
params: DosClientParameters,
) {
let mut target = None;
let mut rpc_client = None;
if nodes.is_empty() {
if mode == "rpc" {
rpc_client = Some(RpcClient::new_socket(entrypoint_addr));
if params.mode == Mode::Rpc {
rpc_client = Some(RpcClient::new_socket(params.entrypoint_addr));
}
target = Some(entrypoint_addr);
target = Some(params.entrypoint_addr);
} else {
info!("************ NODE ***********");
for node in nodes {
if node.gossip == entrypoint_addr {
target = match mode.as_str() {
"gossip" => Some(node.gossip),
"tvu" => Some(node.tvu),
"tvu_forwards" => Some(node.tvu_forwards),
"tpu" => Some(node.tpu),
"tpu_forwards" => Some(node.tpu_forwards),
"repair" => Some(node.repair),
"serve_repair" => Some(node.serve_repair),
"rpc" => {
info!("{:?}", node);
}
info!("ADDR = {}", params.entrypoint_addr);
for node in nodes {
if node.gossip == params.entrypoint_addr {
info!("{}", node.gossip);
target = match params.mode {
Mode::Gossip => Some(node.gossip),
Mode::Tvu => Some(node.tvu),
Mode::TvuForwards => Some(node.tvu_forwards),
Mode::Tpu => {
rpc_client = Some(RpcClient::new_socket(node.rpc));
Some(node.tpu)
}
Mode::TpuForwards => Some(node.tpu_forwards),
Mode::Repair => Some(node.repair),
Mode::ServeRepair => Some(node.serve_repair),
Mode::Rpc => {
rpc_client = Some(RpcClient::new_socket(node.rpc));
None
}
&_ => panic!("Unknown mode"),
};
break;
}
@@ -62,74 +213,83 @@ fn run_dos(
}
let target = target.expect("should have target");
info!("Targetting {}", target);
info!("Targeting {}", target);
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut data = Vec::new();
let mut transaction_generator = None;
match data_type.as_str() {
"repair_highest" => {
match params.data_type {
DataType::RepairHighest => {
let slot = 100;
let req = RepairProtocol::WindowIndexWithNonce(get_repair_contact(nodes), slot, 0, 0);
data = bincode::serialize(&req).unwrap();
}
"repair_shred" => {
DataType::RepairShred => {
let slot = 100;
let req =
RepairProtocol::HighestWindowIndexWithNonce(get_repair_contact(nodes), slot, 0, 0);
data = bincode::serialize(&req).unwrap();
}
"repair_orphan" => {
DataType::RepairOrphan => {
let slot = 100;
let req = RepairProtocol::OrphanWithNonce(get_repair_contact(nodes), slot, 0);
data = bincode::serialize(&req).unwrap();
}
"random" => {
data.resize(data_size, 0);
DataType::Random => {
data.resize(params.data_size, 0);
}
"transaction" => {
let tx = solana_perf::test_tx::test_tx();
DataType::Transaction => {
let tp = params.transaction_params;
info!("{:?}", tp);
transaction_generator = Some(TransactionGenerator::new(tp));
let tx = transaction_generator
.as_mut()
.unwrap()
.generate(payer, &rpc_client);
info!("{:?}", tx);
data = bincode::serialize(&tx).unwrap();
}
"get_account_info" => {}
"get_program_accounts" => {}
&_ => {
panic!("unknown data type");
}
DataType::GetAccountInfo => {}
DataType::GetProgramAccounts => {}
}
let mut last_log = Instant::now();
let mut count = 0;
let mut error_count = 0;
loop {
if mode == "rpc" {
match data_type.as_str() {
"get_account_info" => {
let res = rpc_client
.as_ref()
.unwrap()
.get_account(&Pubkey::from_str(data_input.as_ref().unwrap()).unwrap());
if res.is_err() {
error_count += 1;
}
}
"get_program_accounts" => {
let res = rpc_client.as_ref().unwrap().get_program_accounts(
&Pubkey::from_str(data_input.as_ref().unwrap()).unwrap(),
if params.mode == Mode::Rpc {
match params.data_type {
DataType::GetAccountInfo => {
let res = rpc_client.as_ref().unwrap().get_account(
&Pubkey::from_str(params.data_input.as_ref().unwrap()).unwrap(),
);
if res.is_err() {
error_count += 1;
}
}
&_ => {
DataType::GetProgramAccounts => {
let res = rpc_client.as_ref().unwrap().get_program_accounts(
&Pubkey::from_str(params.data_input.as_ref().unwrap()).unwrap(),
);
if res.is_err() {
error_count += 1;
}
}
_ => {
panic!("unsupported data type");
}
}
} else {
if data_type == "random" {
if params.data_type == DataType::Random {
thread_rng().fill(&mut data[..]);
}
if let Some(tg) = transaction_generator.as_mut() {
let tx = tg.generate(payer, &rpc_client);
info!("{:?}", tx);
data = bincode::serialize(&tx).unwrap();
}
let res = socket.send_to(&data, target);
if res.is_err() {
error_count += 1;
@@ -147,126 +307,173 @@ fn run_dos(
}
}
// command line parsing
#[derive(Parser)]
#[clap(name = crate_name!(),
version = crate_version!(),
about = crate_description!(),
rename_all = "kebab-case"
)]
struct DosClientParameters {
#[clap(long, arg_enum, help = "Interface to DoS")]
mode: Mode,
#[clap(long, arg_enum, help = "Type of data to send")]
data_type: DataType,
#[clap(
long = "entrypoint",
parse(try_from_str = addr_parser),
default_value = "127.0.0.1:8001",
help = "Gossip entrypoint address. Usually <ip>:8001"
)]
entrypoint_addr: SocketAddr,
#[clap(
long,
default_value = "128",
required_if_eq("data-type", "random"),
help = "Size of packet to DoS with, relevant only for data-type=random"
)]
data_size: usize,
#[clap(long, help = "Data to send [Optional]")]
data_input: Option<String>,
#[clap(long, help = "Just use entrypoint address directly")]
skip_gossip: bool,
#[clap(long, help = "Allow contacting private ip addresses")]
allow_private_addr: bool,
#[clap(flatten)]
transaction_params: TransactionParams,
}
#[derive(Args, Serialize, Deserialize, Debug, Default)]
#[clap(rename_all = "kebab-case")]
struct TransactionParams {
#[clap(
long,
default_value = "2",
help = "Number of signatures in transaction"
)]
num_signatures: usize,
#[clap(long, help = "Generate a valid blockhash for transaction")]
valid_blockhash: bool,
#[clap(long, help = "Generate valid signature(s) for transaction")]
valid_signatures: bool,
#[clap(long, help = "Generate unique transactions")]
unique_transactions: bool,
#[clap(
long = "payer",
help = "Payer's keypair file to fund transactions [Optional]"
)]
payer_filename: Option<String>,
}
#[derive(ArgEnum, Clone, Eq, PartialEq)]
enum Mode {
Gossip,
Tvu,
TvuForwards,
Tpu,
TpuForwards,
Repair,
ServeRepair,
Rpc,
}
#[derive(ArgEnum, Clone, Eq, PartialEq)]
enum DataType {
RepairHighest,
RepairShred,
RepairOrphan,
Random,
GetAccountInfo,
GetProgramAccounts,
Transaction,
}
fn addr_parser(addr: &str) -> Result<SocketAddr, &'static str> {
match solana_net_utils::parse_host_port(addr) {
Ok(v) => Ok(v),
Err(_) => Err("failed to parse entrypoint address"),
}
}
/// input checks which are not covered by Clap
fn validate_input(params: &DosClientParameters) {
if params.mode == Mode::Rpc
&& (params.data_type != DataType::GetAccountInfo
&& params.data_type != DataType::GetProgramAccounts)
{
panic!("unsupported data type");
}
if params.data_type != DataType::Transaction {
let tp = &params.transaction_params;
if tp.valid_blockhash
|| tp.valid_signatures
|| tp.unique_transactions
|| tp.payer_filename.is_some()
{
println!("Arguments valid-blockhash, valid-sign, unique-trans, payer are ignored if data-type != transaction");
}
}
if params.transaction_params.payer_filename.is_some()
&& params.transaction_params.valid_signatures
{
println!("Arguments valid-signatures is ignored if payer is provided");
}
}
fn main() {
solana_logger::setup_with_default("solana=info");
let matches = App::new(crate_name!())
.about(crate_description!())
.version(solana_version::version!())
.arg(
Arg::with_name("entrypoint")
.long("entrypoint")
.takes_value(true)
.value_name("HOST:PORT")
.help("Gossip entrypoint address. Usually <ip>:8001"),
)
.arg(
Arg::with_name("mode")
.long("mode")
.takes_value(true)
.value_name("MODE")
.possible_values(&[
"gossip",
"tvu",
"tvu_forwards",
"tpu",
"tpu_forwards",
"repair",
"serve_repair",
"rpc",
])
.help("Interface to DoS"),
)
.arg(
Arg::with_name("data_size")
.long("data-size")
.takes_value(true)
.value_name("BYTES")
.help("Size of packet to DoS with"),
)
.arg(
Arg::with_name("data_type")
.long("data-type")
.takes_value(true)
.value_name("TYPE")
.possible_values(&[
"repair_highest",
"repair_shred",
"repair_orphan",
"random",
"get_account_info",
"get_program_accounts",
"transaction",
])
.help("Type of data to send"),
)
.arg(
Arg::with_name("data_input")
.long("data-input")
.takes_value(true)
.value_name("TYPE")
.help("Data to send"),
)
.arg(
Arg::with_name("skip_gossip")
.long("skip-gossip")
.help("Just use entrypoint address directly"),
)
.arg(
Arg::with_name("allow_private_addr")
.long("allow-private-addr")
.takes_value(false)
.help("Allow contacting private ip addresses")
.hidden(true),
)
.get_matches();
let mut entrypoint_addr = SocketAddr::from(([127, 0, 0, 1], 8001));
if let Some(addr) = matches.value_of("entrypoint") {
entrypoint_addr = solana_net_utils::parse_host_port(addr).unwrap_or_else(|e| {
eprintln!("failed to parse entrypoint address: {}", e);
exit(1)
});
}
let data_size = value_t!(matches, "data_size", usize).unwrap_or(128);
let skip_gossip = matches.is_present("skip_gossip");
let mode = value_t_or_exit!(matches, "mode", String);
let data_type = value_t_or_exit!(matches, "data_type", String);
let data_input = value_t!(matches, "data_input", String).ok();
let cmd_params = DosClientParameters::parse();
validate_input(&cmd_params);
let mut nodes = vec![];
if !skip_gossip {
info!("Finding cluster entry: {:?}", entrypoint_addr);
let socket_addr_space = SocketAddrSpace::new(matches.is_present("allow_private_addr"));
if !cmd_params.skip_gossip {
info!("Finding cluster entry: {:?}", cmd_params.entrypoint_addr);
let socket_addr_space = SocketAddrSpace::new(cmd_params.allow_private_addr);
let (gossip_nodes, _validators) = discover(
None, // keypair
Some(&entrypoint_addr),
None, // num_nodes
Duration::from_secs(60), // timeout
None, // find_node_by_pubkey
Some(&entrypoint_addr), // find_node_by_gossip_addr
None, // my_gossip_addr
0, // my_shred_version
Some(&cmd_params.entrypoint_addr),
None, // num_nodes
Duration::from_secs(60), // timeout
None, // find_node_by_pubkey
Some(&cmd_params.entrypoint_addr), // find_node_by_gossip_addr
None, // my_gossip_addr
0, // my_shred_version
socket_addr_space,
)
.unwrap_or_else(|err| {
eprintln!("Failed to discover {} node: {:?}", entrypoint_addr, err);
eprintln!(
"Failed to discover {} node: {:?}",
cmd_params.entrypoint_addr, err
);
exit(1);
});
nodes = gossip_nodes;
}
info!("done found {} nodes", nodes.len());
let payer = cmd_params
.transaction_params
.payer_filename
.as_ref()
.map(|keypair_file_name| {
read_keypair_file(&keypair_file_name)
.unwrap_or_else(|_| panic!("bad keypair {:?}", keypair_file_name))
});
run_dos(
&nodes,
0,
entrypoint_addr,
data_type,
data_size,
mode,
data_input,
);
run_dos(&nodes, 0, payer.as_ref(), cmd_params);
}
#[cfg(test)]
@@ -284,34 +491,207 @@ pub mod test {
timestamp(),
)];
let entrypoint_addr = nodes[0].gossip;
run_dos(
&nodes,
1,
entrypoint_addr,
"random".to_string(),
10,
"tvu".to_string(),
None,
DosClientParameters {
entrypoint_addr,
mode: Mode::Tvu,
data_size: 10,
data_type: DataType::Random,
data_input: None,
skip_gossip: false,
allow_private_addr: false,
transaction_params: TransactionParams::default(),
},
);
run_dos(
&nodes,
1,
entrypoint_addr,
"repair_highest".to_string(),
10,
"repair".to_string(),
None,
DosClientParameters {
entrypoint_addr,
mode: Mode::Repair,
data_size: 10,
data_type: DataType::RepairHighest,
data_input: None,
skip_gossip: false,
allow_private_addr: false,
transaction_params: TransactionParams::default(),
},
);
run_dos(
&nodes,
1,
entrypoint_addr,
"repair_shred".to_string(),
10,
"serve_repair".to_string(),
None,
DosClientParameters {
entrypoint_addr,
mode: Mode::ServeRepair,
data_size: 10,
data_type: DataType::RepairShred,
data_input: None,
skip_gossip: false,
allow_private_addr: false,
transaction_params: TransactionParams::default(),
},
);
}
#[test]
#[ignore]
fn test_dos_local_cluster_transactions() {
let num_nodes = 1;
let cluster =
LocalCluster::new_with_equal_stakes(num_nodes, 100, 3, SocketAddrSpace::Unspecified);
assert_eq!(cluster.validators.len(), num_nodes);
let nodes = cluster.get_node_pubkeys();
let node = cluster.get_contact_info(&nodes[0]).unwrap().clone();
let nodes_slice = [node];
// send random transactions to TPU
// will be discarded on sigverify stage
run_dos(
&nodes_slice,
1,
None,
DosClientParameters {
entrypoint_addr: cluster.entry_point_info.gossip,
mode: Mode::Tpu,
data_size: 1024,
data_type: DataType::Random,
data_input: None,
skip_gossip: false,
allow_private_addr: false,
transaction_params: TransactionParams::default(),
},
);
// send transactions to TPU with 2 random signatures
// will be filtered on dedup (because transactions are not unique)
run_dos(
&nodes_slice,
1,
None,
DosClientParameters {
entrypoint_addr: cluster.entry_point_info.gossip,
mode: Mode::Tpu,
data_size: 0, // irrelevant if not random
data_type: DataType::Transaction,
data_input: None,
skip_gossip: false,
allow_private_addr: false,
transaction_params: TransactionParams {
num_signatures: 2,
valid_blockhash: false,
valid_signatures: false,
unique_transactions: false,
payer_filename: None,
},
},
);
// send *unique* transactions to TPU with 4 random signatures
// will be discarded on banking stage in legacy.rs
// ("there should be at least 1 RW fee-payer account")
run_dos(
&nodes_slice,
1,
None,
DosClientParameters {
entrypoint_addr: cluster.entry_point_info.gossip,
mode: Mode::Tpu,
data_size: 0, // irrelevant if not random
data_type: DataType::Transaction,
data_input: None,
skip_gossip: false,
allow_private_addr: false,
transaction_params: TransactionParams {
num_signatures: 4,
valid_blockhash: false,
valid_signatures: false,
unique_transactions: true,
payer_filename: None,
},
},
);
// send unique transactions to TPU with 2 random signatures
// will be discarded on banking stage in legacy.rs (A program cannot be a payer)
// because we haven't provided a valid payer
run_dos(
&nodes_slice,
1,
None,
DosClientParameters {
entrypoint_addr: cluster.entry_point_info.gossip,
mode: Mode::Tpu,
data_size: 0, // irrelevant if not random
data_type: DataType::Transaction,
data_input: None,
skip_gossip: false,
allow_private_addr: false,
transaction_params: TransactionParams {
num_signatures: 2,
valid_blockhash: false, // irrelevant without valid payer, because
// it will be filtered before blockhash validity checks
valid_signatures: true,
unique_transactions: true,
payer_filename: None,
},
},
);
// send unique transaction to TPU with valid blockhash
// will be discarded due to invalid hash
run_dos(
&nodes_slice,
1,
Some(&cluster.funding_keypair),
DosClientParameters {
entrypoint_addr: cluster.entry_point_info.gossip,
mode: Mode::Tpu,
data_size: 0, // irrelevant if not random
data_type: DataType::Transaction,
data_input: None,
skip_gossip: false,
allow_private_addr: false,
transaction_params: TransactionParams {
num_signatures: 2,
valid_blockhash: false,
valid_signatures: true,
unique_transactions: true,
payer_filename: None,
},
},
);
// send unique transaction to TPU with valid blockhash
// will fail with error processing Instruction 0: missing required signature for instruction
run_dos(
&nodes_slice,
1,
Some(&cluster.funding_keypair),
DosClientParameters {
entrypoint_addr: cluster.entry_point_info.gossip,
mode: Mode::Tpu,
data_size: 0, // irrelevant if not random
data_type: DataType::Transaction,
data_input: None,
skip_gossip: false,
allow_private_addr: false,
transaction_params: TransactionParams {
num_signatures: 2,
valid_blockhash: true,
valid_signatures: true,
unique_transactions: true,
payer_filename: None,
},
},
);
}
@@ -330,11 +710,23 @@ pub mod test {
run_dos(
&[node],
10_000_000,
cluster.entry_point_info.gossip,
"transaction".to_string(),
1000,
"tpu".to_string(),
None,
Some(&cluster.funding_keypair),
DosClientParameters {
entrypoint_addr: cluster.entry_point_info.gossip,
mode: Mode::Tpu,
data_size: 0, // irrelevant if not random
data_type: DataType::Transaction,
data_input: None,
skip_gossip: false,
allow_private_addr: false,
transaction_params: TransactionParams {
num_signatures: 2,
valid_blockhash: true,
valid_signatures: true,
unique_transactions: true,
payer_filename: None,
},
},
);
}
}

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-download-utils"
version = "1.10.3"
version = "1.11.0"
description = "Solana Download Utils"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -14,8 +14,8 @@ console = "0.15.0"
indicatif = "0.16.2"
log = "0.4.14"
reqwest = { version = "0.11.10", default-features = false, features = ["blocking", "rustls-tls", "json"] }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
[lib]
crate-type = ["lib"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-entry"
version = "1.10.3"
version = "1.11.0"
description = "Solana Entry"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -18,16 +18,16 @@ log = "0.4.11"
rand = "0.7.0"
rayon = "1.5.1"
serde = "1.0.136"
solana-measure = { path = "../measure", version = "=1.10.3" }
solana-merkle-tree = { path = "../merkle-tree", version = "=1.10.3" }
solana-metrics = { path = "../metrics", version = "=1.10.3" }
solana-perf = { path = "../perf", version = "=1.10.3" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-measure = { path = "../measure", version = "=1.11.0" }
solana-merkle-tree = { path = "../merkle-tree", version = "=1.11.0" }
solana-metrics = { path = "../metrics", version = "=1.11.0" }
solana-perf = { path = "../perf", version = "=1.11.0" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
[dev-dependencies]
matches = "0.1.9"
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.11.0" }
[lib]
crate-type = ["lib"]

View File

@@ -14,6 +14,7 @@
"@cloudflare/stream-react": "^1.2.0",
"@metamask/jazzicon": "^2.0.0",
"@metaplex/js": "4.12.0",
"@project-serum/anchor": "^0.22.1",
"@project-serum/serum": "^0.13.61",
"@react-hook/debounce": "^4.0.0",
"@sentry/react": "^6.16.1",
@@ -4489,17 +4490,18 @@
}
},
"node_modules/@project-serum/anchor": {
"version": "0.11.1",
"resolved": "https://registry.npmjs.org/@project-serum/anchor/-/anchor-0.11.1.tgz",
"integrity": "sha512-oIdm4vTJkUy6GmE6JgqDAuQPKI7XM4TPJkjtoIzp69RZe0iAD9JP2XHx7lV1jLdYXeYHqDXfBt3zcq7W91K6PA==",
"version": "0.22.1",
"resolved": "https://registry.npmjs.org/@project-serum/anchor/-/anchor-0.22.1.tgz",
"integrity": "sha512-5pHeyvQhzLahIQ8aZymmDMZJAJFklN0joZdI+YIqFkK2uU/mlKr6rBLQjxysf/j1mLLiNG00tdyLfUtTAdQz7w==",
"dependencies": {
"@project-serum/borsh": "^0.2.2",
"@project-serum/borsh": "^0.2.5",
"@solana/web3.js": "^1.17.0",
"base64-js": "^1.5.1",
"bn.js": "^5.1.2",
"bs58": "^4.0.1",
"buffer-layout": "^1.2.0",
"buffer-layout": "^1.2.2",
"camelcase": "^5.3.1",
"cross-fetch": "^3.1.5",
"crypto-hash": "^1.3.0",
"eventemitter3": "^4.0.7",
"find": "^0.3.0",
@@ -4547,6 +4549,30 @@
"node": ">=10"
}
},
"node_modules/@project-serum/serum/node_modules/@project-serum/anchor": {
"version": "0.11.1",
"resolved": "https://registry.npmjs.org/@project-serum/anchor/-/anchor-0.11.1.tgz",
"integrity": "sha512-oIdm4vTJkUy6GmE6JgqDAuQPKI7XM4TPJkjtoIzp69RZe0iAD9JP2XHx7lV1jLdYXeYHqDXfBt3zcq7W91K6PA==",
"dependencies": {
"@project-serum/borsh": "^0.2.2",
"@solana/web3.js": "^1.17.0",
"base64-js": "^1.5.1",
"bn.js": "^5.1.2",
"bs58": "^4.0.1",
"buffer-layout": "^1.2.0",
"camelcase": "^5.3.1",
"crypto-hash": "^1.3.0",
"eventemitter3": "^4.0.7",
"find": "^0.3.0",
"js-sha256": "^0.9.0",
"pako": "^2.0.3",
"snake-case": "^3.0.4",
"toml": "^3.0.0"
},
"engines": {
"node": ">=11"
}
},
"node_modules/@project-serum/serum/node_modules/@solana/spl-token": {
"version": "0.1.6",
"resolved": "https://registry.npmjs.org/@solana/spl-token/-/spl-token-0.1.6.tgz",
@@ -4594,6 +4620,11 @@
"node": ">=10"
}
},
"node_modules/@project-serum/serum/node_modules/pako": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/pako/-/pako-2.0.4.tgz",
"integrity": "sha512-v8tweI900AUkZN6heMU/4Uy4cXRc2AYNRggVmTR+dEncawDJgCdLMximOVA2p4qO57WMynangsfGRb5WD6L1Bg=="
},
"node_modules/@project-serum/sol-wallet-adapter": {
"version": "0.1.8",
"resolved": "https://registry.npmjs.org/@project-serum/sol-wallet-adapter/-/sol-wallet-adapter-0.1.8.tgz",
@@ -18707,9 +18738,9 @@
"integrity": "sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ=="
},
"node_modules/nanoid": {
"version": "3.1.23",
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.1.23.tgz",
"integrity": "sha512-FiB0kzdP0FFVGDKlRLEQ1BgDzU87dy5NnzjeW9YZNt+/c3+q82EQDUwniSAUxp/F0gFNI1ZhKU1FqYsMuqZVnw==",
"version": "3.3.1",
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.1.tgz",
"integrity": "sha512-n6Vs/3KGyxPQd6uO0eH4Bv0ojGSUvuLlIHtC3Y0kEO23YRge8H9x1GCzLn28YX0H66pMkxuaeESFq4tKISKwdw==",
"bin": {
"nanoid": "bin/nanoid.cjs"
},
@@ -30606,17 +30637,18 @@
"peer": true
},
"@project-serum/anchor": {
"version": "0.11.1",
"resolved": "https://registry.npmjs.org/@project-serum/anchor/-/anchor-0.11.1.tgz",
"integrity": "sha512-oIdm4vTJkUy6GmE6JgqDAuQPKI7XM4TPJkjtoIzp69RZe0iAD9JP2XHx7lV1jLdYXeYHqDXfBt3zcq7W91K6PA==",
"version": "0.22.1",
"resolved": "https://registry.npmjs.org/@project-serum/anchor/-/anchor-0.22.1.tgz",
"integrity": "sha512-5pHeyvQhzLahIQ8aZymmDMZJAJFklN0joZdI+YIqFkK2uU/mlKr6rBLQjxysf/j1mLLiNG00tdyLfUtTAdQz7w==",
"requires": {
"@project-serum/borsh": "^0.2.2",
"@project-serum/borsh": "^0.2.5",
"@solana/web3.js": "^1.17.0",
"base64-js": "^1.5.1",
"bn.js": "^5.1.2",
"bs58": "^4.0.1",
"buffer-layout": "^1.2.0",
"buffer-layout": "^1.2.2",
"camelcase": "^5.3.1",
"cross-fetch": "^3.1.5",
"crypto-hash": "^1.3.0",
"eventemitter3": "^4.0.7",
"find": "^0.3.0",
@@ -30654,6 +30686,27 @@
"buffer-layout": "^1.2.0"
},
"dependencies": {
"@project-serum/anchor": {
"version": "0.11.1",
"resolved": "https://registry.npmjs.org/@project-serum/anchor/-/anchor-0.11.1.tgz",
"integrity": "sha512-oIdm4vTJkUy6GmE6JgqDAuQPKI7XM4TPJkjtoIzp69RZe0iAD9JP2XHx7lV1jLdYXeYHqDXfBt3zcq7W91K6PA==",
"requires": {
"@project-serum/borsh": "^0.2.2",
"@solana/web3.js": "^1.17.0",
"base64-js": "^1.5.1",
"bn.js": "^5.1.2",
"bs58": "^4.0.1",
"buffer-layout": "^1.2.0",
"camelcase": "^5.3.1",
"crypto-hash": "^1.3.0",
"eventemitter3": "^4.0.7",
"find": "^0.3.0",
"js-sha256": "^0.9.0",
"pako": "^2.0.3",
"snake-case": "^3.0.4",
"toml": "^3.0.0"
}
},
"@solana/spl-token": {
"version": "0.1.6",
"resolved": "https://registry.npmjs.org/@solana/spl-token/-/spl-token-0.1.6.tgz",
@@ -30680,6 +30733,11 @@
"version": "10.0.0",
"resolved": "https://registry.npmjs.org/dotenv/-/dotenv-10.0.0.tgz",
"integrity": "sha512-rlBi9d8jpv9Sf1klPjNfFAuWDjKLwTIJJ/VxtoTwIR6hnZxcEOQCZg2oIL3MWBYw5GpUDKOEnND7LXTbIpQ03Q=="
},
"pako": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/pako/-/pako-2.0.4.tgz",
"integrity": "sha512-v8tweI900AUkZN6heMU/4Uy4cXRc2AYNRggVmTR+dEncawDJgCdLMximOVA2p4qO57WMynangsfGRb5WD6L1Bg=="
}
}
},
@@ -41622,9 +41680,9 @@
"integrity": "sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ=="
},
"nanoid": {
"version": "3.1.23",
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.1.23.tgz",
"integrity": "sha512-FiB0kzdP0FFVGDKlRLEQ1BgDzU87dy5NnzjeW9YZNt+/c3+q82EQDUwniSAUxp/F0gFNI1ZhKU1FqYsMuqZVnw=="
"version": "3.3.1",
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.1.tgz",
"integrity": "sha512-n6Vs/3KGyxPQd6uO0eH4Bv0ojGSUvuLlIHtC3Y0kEO23YRge8H9x1GCzLn28YX0H66pMkxuaeESFq4tKISKwdw=="
},
"nanomatch": {
"version": "1.2.13",

View File

@@ -9,6 +9,7 @@
"@cloudflare/stream-react": "^1.2.0",
"@metamask/jazzicon": "^2.0.0",
"@metaplex/js": "4.12.0",
"@project-serum/anchor": "^0.22.1",
"@project-serum/serum": "^0.13.61",
"@react-hook/debounce": "^4.0.0",
"@sentry/react": "^6.16.1",

View File

@@ -296,11 +296,11 @@ function isFullyInactivated(
return false;
}
const delegatedStake = stake.delegation.stake.toNumber();
const inactiveStake = activation.inactive;
const delegatedStake = stake.delegation.stake;
const inactiveStake = new BN(activation.inactive);
return (
!stake.delegation.deactivationEpoch.eq(MAX_EPOCH) &&
delegatedStake === inactiveStake
delegatedStake.eq(inactiveStake)
);
}

View File

@@ -0,0 +1,167 @@
import {
Connection,
SignatureResult,
TransactionInstruction,
} from "@solana/web3.js";
import { InstructionCard } from "./InstructionCard";
import {
BorshInstructionCoder,
Idl,
Program,
Provider,
} from "@project-serum/anchor";
import React, { useEffect, useState } from "react";
import { useCluster } from "../../providers/cluster";
import { Address } from "../common/Address";
import { snakeCase } from "snake-case";
export function GenericAnchorDetailsCard(props: {
ix: TransactionInstruction;
index: number;
result: SignatureResult;
signature: string;
innerCards?: JSX.Element[];
childIndex?: number;
}) {
const { ix, index, result, innerCards, childIndex } = props;
const cluster = useCluster();
const [idl, setIdl] = useState<Idl | null>();
useEffect(() => {
async function fetchIdl() {
if (idl) {
return;
}
// fetch on chain idl
const idl_: Idl | null = await Program.fetchIdl(ix.programId, {
connection: new Connection(cluster.url),
} as Provider);
setIdl(idl_);
}
fetchIdl();
}, [ix.programId, cluster.url, idl]);
const [programName, setProgramName] = useState<string | null>(null);
const [ixTitle, setIxTitle] = useState<string | null>(null);
const [ixAccounts, setIxAccounts] = useState<
{ name: string; isMut: boolean; isSigner: boolean; pda?: Object }[] | null
>(null);
useEffect(() => {
async function parseIxDetailsUsingCoder() {
if (!idl || (programName && ixTitle && ixAccounts)) {
return;
}
// e.g. voter_stake_registry -> voter stake registry
var _programName = idl.name.replaceAll("_", " ").trim();
// e.g. voter stake registry -> Voter Stake Registry
_programName = _programName
.toLowerCase()
.split(" ")
.map((word) => word.charAt(0).toUpperCase() + word.substring(1))
.join(" ");
setProgramName(_programName);
const coder = new BorshInstructionCoder(idl);
const decodedIx = coder.decode(ix.data);
if (!decodedIx) {
return;
}
// get ix title, pascal case it
var _ixTitle = decodedIx.name;
_ixTitle = _ixTitle.charAt(0).toUpperCase() + _ixTitle.slice(1);
setIxTitle(_ixTitle);
// get ix accounts
const idlInstructions = idl.instructions.filter(
(ix) => ix.name === decodedIx.name
);
if (idlInstructions.length === 0) {
return;
}
setIxAccounts(
idlInstructions[0].accounts as {
// type coercing since anchor doesn't export the underlying type
name: string;
isMut: boolean;
isSigner: boolean;
pda?: Object;
}[]
);
}
parseIxDetailsUsingCoder();
}, [
ix.programId,
ix.keys,
ix.data,
idl,
cluster,
programName,
ixTitle,
ixAccounts,
]);
return (
<div>
{idl && (
<InstructionCard
ix={ix}
index={index}
result={result}
title={`${programName || "Unknown"}: ${ixTitle || "Unknown"}`}
innerCards={innerCards}
childIndex={childIndex}
>
<tr key={ix.programId.toBase58()}>
<td>Program</td>
<td className="text-lg-end">
<Address pubkey={ix.programId} alignRight link />
</td>
</tr>
{ixAccounts != null &&
ix.keys.map((am, keyIndex) => (
<tr key={keyIndex}>
<td>
<div className="me-2 d-md-inline">
{/* remaining accounts would not have a name */}
{ixAccounts[keyIndex] &&
snakeCase(ixAccounts[keyIndex].name)}
{!ixAccounts[keyIndex] &&
"remaining account #" +
(keyIndex - ixAccounts.length + 1)}
</div>
{am.isWritable && (
<span className="badge bg-info-soft me-1">Writable</span>
)}
{am.isSigner && (
<span className="badge bg-info-soft me-1">Signer</span>
)}
</td>
<td>
<Address pubkey={am.pubkey} alignRight link />
</td>
</tr>
))}
</InstructionCard>
)}
{!idl && (
<InstructionCard
ix={ix}
index={index}
result={result}
title={`Unknown Program: Unknown Instruction`}
innerCards={innerCards}
childIndex={childIndex}
defaultRaw
/>
)}
</div>
);
}

View File

@@ -0,0 +1,16 @@
import { TransactionInstruction } from "@solana/web3.js";
// list of programs written in anchor
// - should have idl on-chain for GenericAnchorDetailsCard to work out of the box
// - before adding another program to this list, please make sure that the ix
// are decoding without any errors
const knownAnchorPrograms = [
// https://github.com/blockworks-foundation/voter-stake-registry
"4Q6WW2ouZ6V3iaNm56MTd5n2tnTm4C5fiH8miFHnAFHo",
];
export const isInstructionFromAnAnchorProgram = (
instruction: TransactionInstruction
) => {
return knownAnchorPrograms.includes(instruction.programId.toBase58());
};

View File

@@ -500,7 +500,7 @@ export function decodeInitOpenOrders(
openOrders: ix.keys[0].pubkey,
openOrdersOwner: ix.keys[1].pubkey,
market: ix.keys[2].pubkey,
openOrdersMarketAuthority: ix.keys[4].pubkey,
openOrdersMarketAuthority: ix.keys[4]?.pubkey,
},
};
}

View File

@@ -166,6 +166,10 @@ const BurnChecked = type({
tokenAmount: TokenAmountUi,
});
const SyncNative = type({
account: PublicKeyFromString,
});
export type TokenInstructionType = Infer<typeof TokenInstructionType>;
export const TokenInstructionType = enums([
"initializeMint",
@@ -188,6 +192,7 @@ export const TokenInstructionType = enums([
"approveChecked",
"mintToChecked",
"burnChecked",
"syncNative",
]);
export const IX_STRUCTS = {
@@ -211,6 +216,7 @@ export const IX_STRUCTS = {
approveChecked: ApproveChecked,
mintToChecked: MintToChecked,
burnChecked: BurnChecked,
syncNative: SyncNative,
};
export const IX_TITLES = {
@@ -234,4 +240,5 @@ export const IX_TITLES = {
approveChecked: "Approve (Checked)",
mintToChecked: "Mint To (Checked)",
burnChecked: "Burn (Checked)",
syncNative: "Sync Native",
};

View File

@@ -21,8 +21,8 @@ import { WormholeDetailsCard } from "components/instruction/WormholeDetailsCard"
import { UnknownDetailsCard } from "components/instruction/UnknownDetailsCard";
import { BonfidaBotDetailsCard } from "components/instruction/BonfidaBotDetails";
import {
SignatureProps,
INNER_INSTRUCTIONS_START_SLOT,
SignatureProps,
} from "pages/TransactionDetailsPage";
import { intoTransactionInstruction } from "utils/tx";
import { isSerumInstruction } from "components/instruction/serum/types";
@@ -39,10 +39,12 @@ import { BpfUpgradeableLoaderDetailsCard } from "components/instruction/bpf-upgr
import { VoteDetailsCard } from "components/instruction/vote/VoteDetailsCard";
import { isWormholeInstruction } from "components/instruction/wormhole/types";
import { AssociatedTokenDetailsCard } from "components/instruction/AssociatedTokenDetailsCard";
import { isMangoInstruction } from "components/instruction/mango/types";
import { MangoDetailsCard } from "components/instruction/MangoDetails";
import { isPythInstruction } from "components/instruction/pyth/types";
import { PythDetailsCard } from "components/instruction/pyth/PythDetailsCard";
import { isInstructionFromAnAnchorProgram } from "../instruction/anchor/types";
import { GenericAnchorDetailsCard } from "../instruction/GenericAnchorDetails";
import { isMangoInstruction } from "../instruction/mango/types";
export type InstructionDetailsProps = {
tx: ParsedTransaction;
@@ -214,6 +216,8 @@ function renderInstructionCard({
if (isBonfidaBotInstruction(transactionIx)) {
return <BonfidaBotDetailsCard key={key} {...props} />;
} else if (isInstructionFromAnAnchorProgram(transactionIx)) {
return <GenericAnchorDetailsCard key={key} {...props} />;
} else if (isMangoInstruction(transactionIx)) {
return <MangoDetailsCard key={key} {...props} />;
} else if (isSerumInstruction(transactionIx)) {

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-faucet"
version = "1.10.3"
version = "1.11.0"
description = "Solana Faucet"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -17,12 +17,12 @@ crossbeam-channel = "0.5"
log = "0.4.14"
serde = "1.0.136"
serde_derive = "1.0.103"
solana-clap-utils = { path = "../clap-utils", version = "=1.10.3" }
solana-cli-config = { path = "../cli-config", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-metrics = { path = "../metrics", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-clap-utils = { path = "../clap-utils", version = "=1.11.0" }
solana-cli-config = { path = "../cli-config", version = "=1.11.0" }
solana-logger = { path = "../logger", version = "=1.11.0" }
solana-metrics = { path = "../metrics", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
thiserror = "1.0"
tokio = { version = "1", features = ["full"] }

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-frozen-abi"
version = "1.10.3"
version = "1.11.0"
description = "Solana Frozen ABI"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -16,8 +16,9 @@ lazy_static = "1.4.0"
log = "0.4.14"
serde = "1.0.136"
serde_derive = "1.0.103"
serde_bytes = "0.11"
sha2 = "0.10.2"
solana-frozen-abi-macro = { path = "macro", version = "=1.10.3" }
solana-frozen-abi-macro = { path = "macro", version = "=1.11.0" }
thiserror = "1.0"
[target.'cfg(not(target_arch = "bpf"))'.dependencies]
@@ -26,7 +27,7 @@ im = { version = "15.0.0", features = ["rayon", "serde"] }
memmap2 = "0.5.3"
[target.'cfg(not(target_arch = "bpf"))'.dev-dependencies]
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.11.0" }
[build-dependencies]
rustc_version = "0.4"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-frozen-abi-macro"
version = "1.10.3"
version = "1.11.0"
description = "Solana Frozen ABI Macro"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"

View File

@@ -562,6 +562,17 @@ mod tests {
#[derive(Serialize, AbiExample)]
struct TestNewtypeStruct(i8);
#[frozen_abi(digest = "Hbs1X2X7TF2gFEfsspwfZ1JKr8ZGbLY3uidQBebqcMYt")]
#[derive(Serialize, AbiExample)]
struct Foo<'a> {
#[serde(with = "serde_bytes")]
data1: Vec<u8>,
#[serde(with = "serde_bytes")]
data2: &'a [u8],
#[serde(with = "serde_bytes")]
data3: &'a Vec<u8>,
}
#[frozen_abi(digest = "5qio5qYurHDv6fq5kcwP2ue2RBEazSZF8CPk2kUuwC2j")]
#[derive(Serialize, AbiExample)]
struct TestStructReversed {

View File

@@ -410,11 +410,18 @@ lazy_static! {
impl AbiExample for &Vec<u8> {
fn example() -> Self {
info!("AbiExample for (&Vec<T>): {}", type_name::<Self>());
info!("AbiExample for (&Vec<u8>): {}", type_name::<Self>());
&*VEC_U8
}
}
impl AbiExample for &[u8] {
fn example() -> Self {
info!("AbiExample for (&[u8]): {}", type_name::<Self>());
&VEC_U8[..]
}
}
impl<T: AbiExample> AbiExample for VecDeque<T> {
fn example() -> Self {
info!("AbiExample for (Vec<T>): {}", type_name::<Self>());

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-genesis-utils"
version = "1.10.3"
version = "1.11.0"
description = "Solana Genesis Utils"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -10,9 +10,9 @@ documentation = "https://docs.rs/solana-download-utils"
edition = "2021"
[dependencies]
solana-download-utils = { path = "../download-utils", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-download-utils = { path = "../download-utils", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
[lib]
crate-type = ["lib"]

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-genesis"
description = "Blockchain, Rebuilt for Scale"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -15,16 +15,16 @@ clap = "2.33.1"
serde = "1.0.136"
serde_json = "1.0.79"
serde_yaml = "0.8.23"
solana-clap-utils = { path = "../clap-utils", version = "=1.10.3" }
solana-cli-config = { path = "../cli-config", version = "=1.10.3" }
solana-entry = { path = "../entry", version = "=1.10.3" }
solana-ledger = { path = "../ledger", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-stake-program = { path = "../programs/stake", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.3" }
solana-clap-utils = { path = "../clap-utils", version = "=1.11.0" }
solana-cli-config = { path = "../cli-config", version = "=1.11.0" }
solana-entry = { path = "../entry", version = "=1.11.0" }
solana-ledger = { path = "../ledger", version = "=1.11.0" }
solana-logger = { path = "../logger", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-stake-program = { path = "../programs/stake", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.11.0" }
tempfile = "3.3.0"
[[bin]]

View File

@@ -13,7 +13,7 @@ use {
},
solana_entry::poh::compute_hashes_per_tick,
solana_genesis::{genesis_accounts::add_genesis_accounts, Base64Account},
solana_ledger::{blockstore::create_new_ledger, blockstore_db::BlockstoreAdvancedOptions},
solana_ledger::{blockstore::create_new_ledger, blockstore_db::LedgerColumnOptions},
solana_runtime::hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
solana_sdk::{
account::{Account, AccountSharedData, ReadableAccount, WritableAccount},
@@ -629,7 +629,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
&ledger_path,
&genesis_config,
max_genesis_archive_unpacked_size,
BlockstoreAdvancedOptions::default(),
LedgerColumnOptions::default(),
)?;
println!("{}", genesis_config);

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-geyser-plugin-interface"
description = "The Solana Geyser plugin interface."
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -11,8 +11,8 @@ documentation = "https://docs.rs/solana-geyser-plugin-interface"
[dependencies]
log = "0.4.11"
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.11.0" }
thiserror = "1.0.30"
[package.metadata.docs.rs]

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-geyser-plugin-manager"
description = "The Solana Geyser plugin manager."
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -16,13 +16,13 @@ json5 = "0.4.1"
libloading = "0.7.3"
log = "0.4.11"
serde_json = "1.0.79"
solana-geyser-plugin-interface = { path = "../geyser-plugin-interface", version = "=1.10.3" }
solana-measure = { path = "../measure", version = "=1.10.3" }
solana-metrics = { path = "../metrics", version = "=1.10.3" }
solana-rpc = { path = "../rpc", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.3" }
solana-geyser-plugin-interface = { path = "../geyser-plugin-interface", version = "=1.11.0" }
solana-measure = { path = "../measure", version = "=1.11.0" }
solana-metrics = { path = "../metrics", version = "=1.11.0" }
solana-rpc = { path = "../rpc", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.11.0" }
thiserror = "1.0.30"
[package.metadata.docs.rs]

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-gossip"
description = "Blockchain, Rebuilt for Scale"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -27,24 +27,24 @@ rayon = "1.5.1"
serde = "1.0.136"
serde_bytes = "0.11"
serde_derive = "1.0.103"
solana-bloom = { path = "../bloom", version = "=1.10.3" }
solana-clap-utils = { path = "../clap-utils", version = "=1.10.3" }
solana-client = { path = "../client", version = "=1.10.3" }
solana-entry = { path = "../entry", version = "=1.10.3" }
solana-frozen-abi = { path = "../frozen-abi", version = "=1.10.3" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.10.3" }
solana-ledger = { path = "../ledger", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-measure = { path = "../measure", version = "=1.10.3" }
solana-metrics = { path = "../metrics", version = "=1.10.3" }
solana-net-utils = { path = "../net-utils", version = "=1.10.3" }
solana-perf = { path = "../perf", version = "=1.10.3" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-streamer = { path = "../streamer", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.3" }
solana-bloom = { path = "../bloom", version = "=1.11.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.11.0" }
solana-client = { path = "../client", version = "=1.11.0" }
solana-entry = { path = "../entry", version = "=1.11.0" }
solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.0" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.0" }
solana-ledger = { path = "../ledger", version = "=1.11.0" }
solana-logger = { path = "../logger", version = "=1.11.0" }
solana-measure = { path = "../measure", version = "=1.11.0" }
solana-metrics = { path = "../metrics", version = "=1.11.0" }
solana-net-utils = { path = "../net-utils", version = "=1.11.0" }
solana-perf = { path = "../perf", version = "=1.11.0" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-streamer = { path = "../streamer", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.11.0" }
thiserror = "1.0"
[dev-dependencies]

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-install"
description = "The solana cluster software installer"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -26,12 +26,12 @@ reqwest = { version = "0.11.10", default-features = false, features = ["blocking
semver = "1.0.6"
serde = { version = "1.0.136", features = ["derive"] }
serde_yaml = "0.8.23"
solana-clap-utils = { path = "../clap-utils", version = "=1.10.3" }
solana-client = { path = "../client", version = "=1.10.3" }
solana-config-program = { path = "../programs/config", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-clap-utils = { path = "../clap-utils", version = "=1.11.0" }
solana-client = { path = "../client", version = "=1.11.0" }
solana-config-program = { path = "../programs/config", version = "=1.11.0" }
solana-logger = { path = "../logger", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
tar = "0.4.38"
tempfile = "3.3.0"
url = "2.2.2"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-keygen"
version = "1.10.3"
version = "1.11.0"
description = "Solana key generation utility"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -14,11 +14,11 @@ bs58 = "0.4.0"
clap = "2.33"
dirs-next = "2.0.0"
num_cpus = "1.13.1"
solana-clap-utils = { path = "../clap-utils", version = "=1.10.3" }
solana-cli-config = { path = "../cli-config", version = "=1.10.3" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-clap-utils = { path = "../clap-utils", version = "=1.11.0" }
solana-cli-config = { path = "../cli-config", version = "=1.11.0" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
tiny-bip39 = "0.8.2"
[[bin]]

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-ledger-tool"
description = "Blockchain, Rebuilt for Scale"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -21,20 +21,20 @@ log = { version = "0.4.14" }
regex = "1"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0.79"
solana-clap-utils = { path = "../clap-utils", version = "=1.10.3" }
solana-cli-output = { path = "../cli-output", version = "=1.10.3" }
solana-core = { path = "../core", version = "=1.10.3" }
solana-entry = { path = "../entry", version = "=1.10.3" }
solana-ledger = { path = "../ledger", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-measure = { path = "../measure", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-stake-program = { path = "../programs/stake", version = "=1.10.3" }
solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.10.3" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.3" }
solana-clap-utils = { path = "../clap-utils", version = "=1.11.0" }
solana-cli-output = { path = "../cli-output", version = "=1.11.0" }
solana-core = { path = "../core", version = "=1.11.0" }
solana-entry = { path = "../entry", version = "=1.11.0" }
solana-ledger = { path = "../ledger", version = "=1.11.0" }
solana-logger = { path = "../logger", version = "=1.11.0" }
solana-measure = { path = "../measure", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-stake-program = { path = "../programs/stake", version = "=1.11.0" }
solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.11.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.11.0" }
tokio = { version = "1", features = ["full"] }
[target.'cfg(not(target_env = "msvc"))'.dependencies]

View File

@@ -34,8 +34,9 @@ async fn upload(
starting_slot: Slot,
ending_slot: Option<Slot>,
force_reupload: bool,
config: solana_storage_bigtable::LedgerStorageConfig,
) -> Result<(), Box<dyn std::error::Error>> {
let bigtable = solana_storage_bigtable::LedgerStorage::new(false, None, None)
let bigtable = solana_storage_bigtable::LedgerStorage::new_with_config(config)
.await
.map_err(|err| format!("Failed to connect to storage: {:?}", err))?;
@@ -50,17 +51,22 @@ async fn upload(
.await
}
async fn delete_slots(slots: Vec<Slot>, dry_run: bool) -> Result<(), Box<dyn std::error::Error>> {
let read_only = dry_run;
let bigtable = solana_storage_bigtable::LedgerStorage::new(read_only, None, None)
async fn delete_slots(
slots: Vec<Slot>,
config: solana_storage_bigtable::LedgerStorageConfig,
) -> Result<(), Box<dyn std::error::Error>> {
let dry_run = config.read_only;
let bigtable = solana_storage_bigtable::LedgerStorage::new_with_config(config)
.await
.map_err(|err| format!("Failed to connect to storage: {:?}", err))?;
solana_ledger::bigtable_delete::delete_confirmed_blocks(bigtable, slots, dry_run).await
}
async fn first_available_block() -> Result<(), Box<dyn std::error::Error>> {
let bigtable = solana_storage_bigtable::LedgerStorage::new(true, None, None).await?;
async fn first_available_block(
config: solana_storage_bigtable::LedgerStorageConfig,
) -> Result<(), Box<dyn std::error::Error>> {
let bigtable = solana_storage_bigtable::LedgerStorage::new_with_config(config).await?;
match bigtable.get_first_available_block().await? {
Some(block) => println!("{}", block),
None => println!("No blocks available"),
@@ -69,8 +75,12 @@ async fn first_available_block() -> Result<(), Box<dyn std::error::Error>> {
Ok(())
}
async fn block(slot: Slot, output_format: OutputFormat) -> Result<(), Box<dyn std::error::Error>> {
let bigtable = solana_storage_bigtable::LedgerStorage::new(false, None, None)
async fn block(
slot: Slot,
output_format: OutputFormat,
config: solana_storage_bigtable::LedgerStorageConfig,
) -> Result<(), Box<dyn std::error::Error>> {
let bigtable = solana_storage_bigtable::LedgerStorage::new_with_config(config)
.await
.map_err(|err| format!("Failed to connect to storage: {:?}", err))?;
@@ -101,8 +111,12 @@ async fn block(slot: Slot, output_format: OutputFormat) -> Result<(), Box<dyn st
Ok(())
}
async fn blocks(starting_slot: Slot, limit: usize) -> Result<(), Box<dyn std::error::Error>> {
let bigtable = solana_storage_bigtable::LedgerStorage::new(false, None, None)
async fn blocks(
starting_slot: Slot,
limit: usize,
config: solana_storage_bigtable::LedgerStorageConfig,
) -> Result<(), Box<dyn std::error::Error>> {
let bigtable = solana_storage_bigtable::LedgerStorage::new_with_config(config)
.await
.map_err(|err| format!("Failed to connect to storage: {:?}", err))?;
@@ -116,11 +130,10 @@ async fn blocks(starting_slot: Slot, limit: usize) -> Result<(), Box<dyn std::er
async fn compare_blocks(
starting_slot: Slot,
limit: usize,
credential_path: String,
config: solana_storage_bigtable::LedgerStorageConfig,
ref_config: solana_storage_bigtable::LedgerStorageConfig,
) -> Result<(), Box<dyn std::error::Error>> {
assert!(!credential_path.is_empty());
let owned_bigtable = solana_storage_bigtable::LedgerStorage::new(false, None, None)
let owned_bigtable = solana_storage_bigtable::LedgerStorage::new_with_config(config)
.await
.map_err(|err| format!("failed to connect to owned bigtable: {:?}", err))?;
let owned_bigtable_slots = owned_bigtable
@@ -130,10 +143,9 @@ async fn compare_blocks(
"owned bigtable {} blocks found ",
owned_bigtable_slots.len()
);
let reference_bigtable =
solana_storage_bigtable::LedgerStorage::new(false, None, Some(credential_path))
.await
.map_err(|err| format!("failed to connect to reference bigtable: {:?}", err))?;
let reference_bigtable = solana_storage_bigtable::LedgerStorage::new_with_config(ref_config)
.await
.map_err(|err| format!("failed to connect to reference bigtable: {:?}", err))?;
let reference_bigtable_slots = reference_bigtable
.get_confirmed_blocks(starting_slot, limit)
@@ -160,8 +172,9 @@ async fn confirm(
signature: &Signature,
verbose: bool,
output_format: OutputFormat,
config: solana_storage_bigtable::LedgerStorageConfig,
) -> Result<(), Box<dyn std::error::Error>> {
let bigtable = solana_storage_bigtable::LedgerStorage::new(false, None, None)
let bigtable = solana_storage_bigtable::LedgerStorage::new_with_config(config)
.await
.map_err(|err| format!("Failed to connect to storage: {:?}", err))?;
@@ -211,8 +224,9 @@ pub async fn transaction_history(
verbose: bool,
show_transactions: bool,
query_chunk_size: usize,
config: solana_storage_bigtable::LedgerStorageConfig,
) -> Result<(), Box<dyn std::error::Error>> {
let bigtable = solana_storage_bigtable::LedgerStorage::new(true, None, None).await?;
let bigtable = solana_storage_bigtable::LedgerStorage::new_with_config(config).await?;
let mut loaded_block: Option<(Slot, ConfirmedBlock)> = None;
while limit > 0 {
@@ -308,6 +322,15 @@ impl BigTableSubCommand for App<'_, '_> {
.about("Ledger data on a BigTable instance")
.setting(AppSettings::InferSubcommands)
.setting(AppSettings::SubcommandRequiredElseHelp)
.arg(
Arg::with_name("rpc_bigtable_instance_name")
.global(true)
.long("rpc-bigtable-instance-name")
.takes_value(true)
.value_name("INSTANCE_NAME")
.default_value(solana_storage_bigtable::DEFAULT_INSTANCE_NAME)
.help("Name of the target Bigtable instance")
)
.subcommand(
SubCommand::with_name("upload")
.about("Upload the ledger to BigTable")
@@ -417,7 +440,8 @@ impl BigTableSubCommand for App<'_, '_> {
.required(true)
.default_value("1000")
.help("Maximum number of slots to check"),
).arg(
)
.arg(
Arg::with_name("reference_credential")
.long("reference-credential")
.short("c")
@@ -425,6 +449,14 @@ impl BigTableSubCommand for App<'_, '_> {
.takes_value(true)
.required(true)
.help("File path for a credential to a reference bigtable"),
)
.arg(
Arg::with_name("reference_instance_name")
.long("reference-instance-name")
.takes_value(true)
.value_name("INSTANCE_NAME")
.default_value(solana_storage_bigtable::DEFAULT_INSTANCE_NAME)
.help("Name of the reference Bigtable instance to compare to")
),
)
.subcommand(
@@ -521,7 +553,28 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) {
let verbose = matches.is_present("verbose");
let output_format = OutputFormat::from_matches(matches, "output_format", verbose);
let future = match matches.subcommand() {
// this is kinda stupid, but there seems to be a bug in clap when a subcommand
// arg is marked both `global(true)` and `default_value("default_value")`.
// despite the "global", when the arg is specified on the subcommand, its value
// is not propagated down to the (sub)subcommand args, resulting in the default
// value when queried there. similarly, if the arg is specified on the
// (sub)subcommand, the value is not propagated back up to the subcommand args,
// again resulting in the default value. the arg having declared a
// `default_value()` obviates `is_present(...)` tests since they will always
// return true. so we consede and compare against the expected default. :/
let (subcommand, sub_matches) = matches.subcommand();
let on_command = matches
.value_of("rpc_bigtable_instance_name")
.map(|v| v != solana_storage_bigtable::DEFAULT_INSTANCE_NAME)
.unwrap_or(false);
let instance_name = if on_command {
value_t_or_exit!(matches, "rpc_bigtable_instance_name", String)
} else {
let sub_matches = sub_matches.as_ref().unwrap();
value_t_or_exit!(sub_matches, "rpc_bigtable_instance_name", String)
};
let future = match (subcommand, sub_matches) {
("upload", Some(arg_matches)) => {
let starting_slot = value_t!(arg_matches, "starting_slot", Slot).unwrap_or(0);
let ending_slot = value_t!(arg_matches, "ending_slot", Slot).ok();
@@ -531,41 +584,79 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) {
AccessType::TryPrimaryThenSecondary,
None,
);
let config = solana_storage_bigtable::LedgerStorageConfig {
read_only: false,
instance_name,
..solana_storage_bigtable::LedgerStorageConfig::default()
};
runtime.block_on(upload(
blockstore,
starting_slot,
ending_slot,
force_reupload,
config,
))
}
("delete-slots", Some(arg_matches)) => {
let slots = values_t_or_exit!(arg_matches, "slots", Slot);
let dry_run = !arg_matches.is_present("force");
runtime.block_on(delete_slots(slots, dry_run))
let config = solana_storage_bigtable::LedgerStorageConfig {
read_only: !arg_matches.is_present("force"),
instance_name,
..solana_storage_bigtable::LedgerStorageConfig::default()
};
runtime.block_on(delete_slots(slots, config))
}
("first-available-block", Some(_arg_matches)) => {
let config = solana_storage_bigtable::LedgerStorageConfig {
read_only: true,
instance_name,
..solana_storage_bigtable::LedgerStorageConfig::default()
};
runtime.block_on(first_available_block(config))
}
("first-available-block", Some(_arg_matches)) => runtime.block_on(first_available_block()),
("block", Some(arg_matches)) => {
let slot = value_t_or_exit!(arg_matches, "slot", Slot);
runtime.block_on(block(slot, output_format))
let config = solana_storage_bigtable::LedgerStorageConfig {
read_only: false,
instance_name,
..solana_storage_bigtable::LedgerStorageConfig::default()
};
runtime.block_on(block(slot, output_format, config))
}
("blocks", Some(arg_matches)) => {
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
let limit = value_t_or_exit!(arg_matches, "limit", usize);
let config = solana_storage_bigtable::LedgerStorageConfig {
read_only: false,
instance_name,
..solana_storage_bigtable::LedgerStorageConfig::default()
};
runtime.block_on(blocks(starting_slot, limit))
runtime.block_on(blocks(starting_slot, limit, config))
}
("compare-blocks", Some(arg_matches)) => {
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
let limit = value_t_or_exit!(arg_matches, "limit", usize);
let reference_credential_filepath =
value_t_or_exit!(arg_matches, "reference_credential", String);
let config = solana_storage_bigtable::LedgerStorageConfig {
read_only: false,
instance_name,
..solana_storage_bigtable::LedgerStorageConfig::default()
};
let credential_path = Some(value_t_or_exit!(
arg_matches,
"reference_credential",
String
));
let ref_instance_name =
value_t_or_exit!(arg_matches, "reference_instance_name", String);
let ref_config = solana_storage_bigtable::LedgerStorageConfig {
read_only: false,
credential_path,
instance_name: ref_instance_name,
..solana_storage_bigtable::LedgerStorageConfig::default()
};
runtime.block_on(compare_blocks(
starting_slot,
limit,
reference_credential_filepath,
))
runtime.block_on(compare_blocks(starting_slot, limit, config, ref_config))
}
("confirm", Some(arg_matches)) => {
let signature = arg_matches
@@ -573,8 +664,13 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) {
.unwrap()
.parse()
.expect("Invalid signature");
let config = solana_storage_bigtable::LedgerStorageConfig {
read_only: false,
instance_name,
..solana_storage_bigtable::LedgerStorageConfig::default()
};
runtime.block_on(confirm(&signature, verbose, output_format))
runtime.block_on(confirm(&signature, verbose, output_format, config))
}
("transaction-history", Some(arg_matches)) => {
let address = pubkey_of(arg_matches, "address").unwrap();
@@ -587,6 +683,11 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) {
.value_of("until")
.map(|signature| signature.parse().expect("Invalid signature"));
let show_transactions = arg_matches.is_present("show_transactions");
let config = solana_storage_bigtable::LedgerStorageConfig {
read_only: true,
instance_name,
..solana_storage_bigtable::LedgerStorageConfig::default()
};
runtime.block_on(transaction_history(
&address,
@@ -596,6 +697,7 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) {
verbose,
show_transactions,
query_chunk_size,
config,
))
}
_ => unreachable!(),

View File

@@ -25,8 +25,8 @@ use {
bank_forks_utils,
blockstore::{create_new_ledger, Blockstore, PurgeType},
blockstore_db::{
self, AccessType, BlockstoreAdvancedOptions, BlockstoreOptions, BlockstoreRecoveryMode,
Database,
self, AccessType, BlockstoreOptions, BlockstoreRecoveryMode, Database,
LedgerColumnOptions,
},
blockstore_processor::{BlockstoreProcessorError, ProcessOptions},
shred::Shred,
@@ -1719,7 +1719,7 @@ fn main() {
&output_directory,
&genesis_config,
solana_runtime::hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
BlockstoreAdvancedOptions::default(),
LedgerColumnOptions::default(),
)
.unwrap_or_else(|err| {
eprintln!("Failed to write genesis config: {:?}", err);

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-ledger"
version = "1.10.3"
version = "1.11.0"
description = "Solana ledger"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -32,21 +32,21 @@ reed-solomon-erasure = { version = "5.0.1", features = ["simd-accel"] }
serde = "1.0.136"
serde_bytes = "0.11.5"
sha2 = "0.10.2"
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.10.3" }
solana-entry = { path = "../entry", version = "=1.10.3" }
solana-frozen-abi = { path = "../frozen-abi", version = "=1.10.3" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.10.3" }
solana-measure = { path = "../measure", version = "=1.10.3" }
solana-metrics = { path = "../metrics", version = "=1.10.3" }
solana-perf = { path = "../perf", version = "=1.10.3" }
solana-program-runtime = { path = "../program-runtime", version = "=1.10.3" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.10.3" }
solana-storage-proto = { path = "../storage-proto", version = "=1.10.3" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.3" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.3" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.11.0" }
solana-entry = { path = "../entry", version = "=1.11.0" }
solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.0" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.0" }
solana-measure = { path = "../measure", version = "=1.11.0" }
solana-metrics = { path = "../metrics", version = "=1.11.0" }
solana-perf = { path = "../perf", version = "=1.11.0" }
solana-program-runtime = { path = "../program-runtime", version = "=1.11.0" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.11.0" }
solana-storage-proto = { path = "../storage-proto", version = "=1.11.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.11.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.11.0" }
tempfile = "3.3.0"
thiserror = "1.0"
tokio = { version = "1", features = ["full"] }
@@ -63,8 +63,8 @@ features = ["lz4"]
[dev-dependencies]
assert_matches = "1.5.0"
matches = "0.1.9"
solana-account-decoder = { path = "../account-decoder", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-account-decoder = { path = "../account-decoder", version = "=1.11.0" }
solana-logger = { path = "../logger", version = "=1.11.0" }
[build-dependencies]
rustc_version = "0.4"

View File

@@ -5,9 +5,9 @@ use {
crate::{
ancestor_iterator::AncestorIterator,
blockstore_db::{
columns as cf, AccessType, BlockstoreAdvancedOptions, BlockstoreOptions, Column,
ColumnName, Database, IteratorDirection, IteratorMode, LedgerColumn, Result,
ShredStorageType, WriteBatch,
columns as cf, AccessType, BlockstoreCompressionType, BlockstoreOptions, Column,
ColumnName, Database, IteratorDirection, IteratorMode, LedgerColumn,
LedgerColumnOptions, Result, ShredStorageType, WriteBatch,
},
blockstore_meta::*,
leader_schedule_cache::LeaderScheduleCache,
@@ -171,14 +171,14 @@ pub struct Blockstore {
block_height_cf: LedgerColumn<cf::BlockHeight>,
program_costs_cf: LedgerColumn<cf::ProgramCosts>,
bank_hash_cf: LedgerColumn<cf::BankHash>,
last_root: Arc<RwLock<Slot>>,
insert_shreds_lock: Arc<Mutex<()>>,
last_root: RwLock<Slot>,
insert_shreds_lock: Mutex<()>,
pub new_shreds_signals: Vec<Sender<bool>>,
pub completed_slots_senders: Vec<CompletedSlotsSender>,
pub lowest_cleanup_slot: Arc<RwLock<Slot>>,
pub lowest_cleanup_slot: RwLock<Slot>,
no_compaction: bool,
slots_stats: Arc<Mutex<SlotsStats>>,
advanced_options: BlockstoreAdvancedOptions,
slots_stats: Mutex<SlotsStats>,
column_options: LedgerColumnOptions,
}
struct SlotsStats {
@@ -521,19 +521,49 @@ impl BlockstoreRocksDbColumnFamilyMetrics {
}
macro_rules! rocksdb_metric_header {
($metric_name:literal, $cf_name:literal, $advanced_options:expr) => {
match $advanced_options.shred_storage_type {
($metric_name:literal, $cf_name:literal, $column_options:expr) => {
match $column_options.shred_storage_type {
ShredStorageType::RocksLevel =>
rocksdb_metric_header!(@all_fields $metric_name, $cf_name, "rocks_level"),
rocksdb_metric_header!(@compression_type $metric_name, $cf_name, $column_options, "rocks_level"),
ShredStorageType::RocksFifo(_) =>
rocksdb_metric_header!(@all_fields $metric_name, $cf_name, "rocks_fifo"),
rocksdb_metric_header!(@compression_type $metric_name, $cf_name, $column_options, "rocks_fifo"),
}
};
(@all_fields $metric_name:literal, $cf_name:literal, $storage_type:literal) => {
(@compression_type $metric_name:literal, $cf_name:literal, $column_options:expr, $storage_type:literal) => {
match $column_options.compression_type {
BlockstoreCompressionType::None => rocksdb_metric_header!(@all_fields
$metric_name,
$cf_name,
$storage_type,
"None"
),
BlockstoreCompressionType::Snappy => rocksdb_metric_header!(@all_fields
$metric_name,
$cf_name,
$storage_type,
"Snappy"
),
BlockstoreCompressionType::Lz4 => rocksdb_metric_header!(@all_fields
$metric_name,
$cf_name,
$storage_type,
"Lz4"
),
BlockstoreCompressionType::Zlib => rocksdb_metric_header!(@all_fields
$metric_name,
$cf_name,
$storage_type,
"Zlib"
),
}
};
(@all_fields $metric_name:literal, $cf_name:literal, $storage_type:literal, $compression_type:literal) => {
concat!($metric_name,
",cf_name=", $cf_name,
",storage=", $storage_type,
",compression=", $compression_type,
)
};
}
@@ -569,9 +599,9 @@ impl Blockstore {
fn do_open(ledger_path: &Path, options: BlockstoreOptions) -> Result<Blockstore> {
fs::create_dir_all(&ledger_path)?;
let blockstore_path = ledger_path.join(Self::blockstore_directory(
&options.advanced_options.shred_storage_type,
&options.column_options.shred_storage_type,
));
let advanced_options = options.advanced_options.clone();
let column_options = options.column_options.clone();
adjust_ulimit_nofile(options.enforce_ulimit_nofile)?;
@@ -615,7 +645,7 @@ impl Blockstore {
.next()
.map(|(slot, _)| slot)
.unwrap_or(0);
let last_root = Arc::new(RwLock::new(max_root));
let last_root = RwLock::new(max_root);
// Get active transaction-status index or 0
let active_transaction_status_index = db
@@ -659,12 +689,12 @@ impl Blockstore {
bank_hash_cf,
new_shreds_signals: vec![],
completed_slots_senders: vec![],
insert_shreds_lock: Arc::new(Mutex::new(())),
insert_shreds_lock: Mutex::<()>::default(),
last_root,
lowest_cleanup_slot: Arc::new(RwLock::new(0)),
lowest_cleanup_slot: RwLock::<Slot>::default(),
no_compaction: false,
slots_stats: Arc::new(Mutex::new(SlotsStats::default())),
advanced_options,
slots_stats: Mutex::<SlotsStats>::default(),
column_options,
};
if initialize_transaction_status_index {
blockstore.initialize_transaction_status_index()?;
@@ -961,101 +991,101 @@ impl Blockstore {
/// Collects and reports [`BlockstoreRocksDbColumnFamilyMetrics`] for the
/// all the column families.
pub fn submit_rocksdb_cf_metrics_for_all_cfs(&self) {
let advanced_options = &self.advanced_options;
let column_options = &self.column_options;
self.submit_rocksdb_cf_metrics::<cf::SlotMeta>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"slot_meta",
advanced_options
column_options
));
self.submit_rocksdb_cf_metrics::<cf::DeadSlots>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"dead_slots",
advanced_options
column_options
));
self.submit_rocksdb_cf_metrics::<cf::DuplicateSlots>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"duplicate_slots",
advanced_options
column_options
));
self.submit_rocksdb_cf_metrics::<cf::ErasureMeta>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"erasure_meta",
advanced_options
column_options
));
self.submit_rocksdb_cf_metrics::<cf::Orphans>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"orphans",
advanced_options
column_options
));
self.submit_rocksdb_cf_metrics::<cf::BankHash>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"bank_hash",
advanced_options
column_options
));
self.submit_rocksdb_cf_metrics::<cf::Root>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"root",
advanced_options
column_options
));
self.submit_rocksdb_cf_metrics::<cf::Index>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"index",
advanced_options
column_options
));
self.submit_rocksdb_cf_metrics::<cf::ShredData>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"shred_data",
advanced_options
column_options
));
self.submit_rocksdb_cf_metrics::<cf::ShredCode>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"shred_code",
advanced_options
column_options
));
self.submit_rocksdb_cf_metrics::<cf::TransactionStatus>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"transaction_status",
advanced_options
column_options
));
self.submit_rocksdb_cf_metrics::<cf::AddressSignatures>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"address_signature",
advanced_options
column_options
));
self.submit_rocksdb_cf_metrics::<cf::TransactionMemos>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"transaction_memos",
advanced_options
column_options
));
self.submit_rocksdb_cf_metrics::<cf::TransactionStatusIndex>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"transaction_status_index",
advanced_options
column_options
));
self.submit_rocksdb_cf_metrics::<cf::Rewards>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"rewards",
advanced_options
column_options
));
self.submit_rocksdb_cf_metrics::<cf::Blocktime>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"blocktime",
advanced_options
column_options
));
self.submit_rocksdb_cf_metrics::<cf::PerfSamples>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"perf_sample",
advanced_options
column_options
));
self.submit_rocksdb_cf_metrics::<cf::BlockHeight>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"block_height",
advanced_options
column_options
));
self.submit_rocksdb_cf_metrics::<cf::ProgramCosts>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"program_costs",
advanced_options
column_options
));
}
@@ -2109,7 +2139,7 @@ impl Blockstore {
ticks_per_slot: u64,
parent: Option<u64>,
is_full_slot: bool,
keypair: &Arc<Keypair>,
keypair: &Keypair,
entries: Vec<Entry>,
version: u16,
) -> Result<usize /*num of data shreds*/> {
@@ -3574,7 +3604,7 @@ impl Blockstore {
self.db.is_primary_access()
}
pub fn scan_and_fix_roots(&self, exit: &Arc<AtomicBool>) -> Result<()> {
pub fn scan_and_fix_roots(&self, exit: &AtomicBool) -> Result<()> {
let ancestor_iterator = AncestorIterator::new(self.last_root(), self)
.take_while(|&slot| slot >= self.lowest_cleanup_slot());
@@ -4150,20 +4180,20 @@ pub fn create_new_ledger(
ledger_path: &Path,
genesis_config: &GenesisConfig,
max_genesis_archive_unpacked_size: u64,
advanced_options: BlockstoreAdvancedOptions,
column_options: LedgerColumnOptions,
) -> Result<Hash> {
Blockstore::destroy(ledger_path)?;
genesis_config.write(ledger_path)?;
// Fill slot 0 with ticks that link back to the genesis_config to bootstrap the ledger.
let blockstore_dir = Blockstore::blockstore_directory(&advanced_options.shred_storage_type);
let blockstore_dir = Blockstore::blockstore_directory(&column_options.shred_storage_type);
let blockstore = Blockstore::open_with_options(
ledger_path,
BlockstoreOptions {
access_type: AccessType::PrimaryOnly,
recovery_mode: None,
enforce_ulimit_nofile: false,
advanced_options: advanced_options.clone(),
column_options: column_options.clone(),
},
)?;
let ticks_per_slot = genesis_config.ticks_per_slot;
@@ -4332,7 +4362,7 @@ macro_rules! create_new_tmp_ledger {
$crate::blockstore::create_new_ledger_from_name(
$crate::tmp_ledger_name!(),
$genesis_config,
$crate::blockstore_db::BlockstoreAdvancedOptions::default(),
$crate::blockstore_db::LedgerColumnOptions::default(),
)
};
}
@@ -4343,7 +4373,7 @@ macro_rules! create_new_tmp_ledger_auto_delete {
$crate::blockstore::create_new_ledger_from_name_auto_delete(
$crate::tmp_ledger_name!(),
$genesis_config,
$crate::blockstore_db::BlockstoreAdvancedOptions::default(),
$crate::blockstore_db::LedgerColumnOptions::default(),
)
};
}
@@ -4354,10 +4384,11 @@ macro_rules! create_new_tmp_ledger_fifo_auto_delete {
$crate::blockstore::create_new_ledger_from_name_auto_delete(
$crate::tmp_ledger_name!(),
$genesis_config,
$crate::blockstore_db::BlockstoreAdvancedOptions {
$crate::blockstore_db::LedgerColumnOptions {
shred_storage_type: $crate::blockstore_db::ShredStorageType::RocksFifo(
$crate::blockstore_db::BlockstoreRocksFifoOptions::default(),
),
..$crate::blockstore_db::LedgerColumnOptions::default()
},
)
};
@@ -4388,10 +4419,10 @@ pub fn verify_shred_slots(slot: Slot, parent_slot: Slot, last_root: Slot) -> boo
pub fn create_new_ledger_from_name(
name: &str,
genesis_config: &GenesisConfig,
advanced_options: BlockstoreAdvancedOptions,
column_options: LedgerColumnOptions,
) -> (PathBuf, Hash) {
let (ledger_path, blockhash) =
create_new_ledger_from_name_auto_delete(name, genesis_config, advanced_options);
create_new_ledger_from_name_auto_delete(name, genesis_config, column_options);
(ledger_path.into_path(), blockhash)
}
@@ -4402,14 +4433,14 @@ pub fn create_new_ledger_from_name(
pub fn create_new_ledger_from_name_auto_delete(
name: &str,
genesis_config: &GenesisConfig,
advanced_options: BlockstoreAdvancedOptions,
column_options: LedgerColumnOptions,
) -> (TempDir, Hash) {
let ledger_path = get_ledger_path_from_name_auto_delete(name);
let blockhash = create_new_ledger(
ledger_path.path(),
genesis_config,
MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
advanced_options,
column_options,
)
.unwrap();
(ledger_path, blockhash)
@@ -4666,6 +4697,7 @@ pub mod tests {
pubkey::Pubkey,
signature::Signature,
transaction::{Transaction, TransactionError},
transaction_context::TransactionReturnData,
},
solana_storage_proto::convert::generated,
solana_transaction_status::{InnerInstructions, Reward, Rewards, TransactionTokenBalance},
@@ -4718,10 +4750,11 @@ pub mod tests {
let blockstore = Blockstore::open_with_options(
ledger_path.path(),
BlockstoreOptions {
advanced_options: BlockstoreAdvancedOptions {
column_options: LedgerColumnOptions {
shred_storage_type: ShredStorageType::RocksFifo(
BlockstoreRocksFifoOptions::default(),
),
..LedgerColumnOptions::default()
},
..BlockstoreOptions::default()
},
@@ -6826,6 +6859,7 @@ pub mod tests {
post_token_balances: Some(vec![]),
rewards: Some(vec![]),
loaded_addresses: LoadedAddresses::default(),
return_data: Some(TransactionReturnData::default()),
}
.into();
blockstore
@@ -6843,6 +6877,7 @@ pub mod tests {
post_token_balances: Some(vec![]),
rewards: Some(vec![]),
loaded_addresses: LoadedAddresses::default(),
return_data: Some(TransactionReturnData::default()),
}
.into();
blockstore
@@ -6860,6 +6895,7 @@ pub mod tests {
post_token_balances: Some(vec![]),
rewards: Some(vec![]),
loaded_addresses: LoadedAddresses::default(),
return_data: Some(TransactionReturnData::default()),
}
.into();
blockstore
@@ -6879,6 +6915,7 @@ pub mod tests {
post_token_balances: Some(vec![]),
rewards: Some(vec![]),
loaded_addresses: LoadedAddresses::default(),
return_data: Some(TransactionReturnData::default()),
},
}
})
@@ -6991,6 +7028,10 @@ pub mod tests {
writable: vec![Pubkey::new_unique()],
readonly: vec![Pubkey::new_unique()],
};
let test_return_data = TransactionReturnData {
program_id: Pubkey::new_unique(),
data: vec![1, 2, 3],
};
// result not found
assert!(transaction_status_cf
@@ -7010,6 +7051,7 @@ pub mod tests {
post_token_balances: Some(post_token_balances_vec.clone()),
rewards: Some(rewards_vec.clone()),
loaded_addresses: test_loaded_addresses.clone(),
return_data: Some(test_return_data.clone()),
}
.into();
assert!(transaction_status_cf
@@ -7028,6 +7070,7 @@ pub mod tests {
post_token_balances,
rewards,
loaded_addresses,
return_data,
} = transaction_status_cf
.get_protobuf_or_bincode::<StoredTransactionStatusMeta>((0, Signature::default(), 0))
.unwrap()
@@ -7044,6 +7087,7 @@ pub mod tests {
assert_eq!(post_token_balances.unwrap(), post_token_balances_vec);
assert_eq!(rewards.unwrap(), rewards_vec);
assert_eq!(loaded_addresses, test_loaded_addresses);
assert_eq!(return_data.unwrap(), test_return_data);
// insert value
let status = TransactionStatusMeta {
@@ -7057,6 +7101,7 @@ pub mod tests {
post_token_balances: Some(post_token_balances_vec.clone()),
rewards: Some(rewards_vec.clone()),
loaded_addresses: test_loaded_addresses.clone(),
return_data: Some(test_return_data.clone()),
}
.into();
assert!(transaction_status_cf
@@ -7075,6 +7120,7 @@ pub mod tests {
post_token_balances,
rewards,
loaded_addresses,
return_data,
} = transaction_status_cf
.get_protobuf_or_bincode::<StoredTransactionStatusMeta>((
0,
@@ -7097,6 +7143,7 @@ pub mod tests {
assert_eq!(post_token_balances.unwrap(), post_token_balances_vec);
assert_eq!(rewards.unwrap(), rewards_vec);
assert_eq!(loaded_addresses, test_loaded_addresses);
assert_eq!(return_data.unwrap(), test_return_data);
}
#[test]
@@ -7325,6 +7372,7 @@ pub mod tests {
post_token_balances: Some(vec![]),
rewards: Some(vec![]),
loaded_addresses: LoadedAddresses::default(),
return_data: Some(TransactionReturnData::default()),
}
.into();
@@ -7520,6 +7568,7 @@ pub mod tests {
post_token_balances: Some(vec![]),
rewards: Some(vec![]),
loaded_addresses: LoadedAddresses::default(),
return_data: Some(TransactionReturnData::default()),
}
.into();
@@ -7691,6 +7740,10 @@ pub mod tests {
let post_token_balances = Some(vec![]);
let rewards = Some(vec![]);
let signature = transaction.signatures[0];
let return_data = Some(TransactionReturnData {
program_id: Pubkey::new_unique(),
data: vec![1, 2, 3],
});
let status = TransactionStatusMeta {
status: Ok(()),
fee: 42,
@@ -7702,6 +7755,7 @@ pub mod tests {
post_token_balances: post_token_balances.clone(),
rewards: rewards.clone(),
loaded_addresses: LoadedAddresses::default(),
return_data: return_data.clone(),
}
.into();
blockstore
@@ -7721,6 +7775,7 @@ pub mod tests {
post_token_balances,
rewards,
loaded_addresses: LoadedAddresses::default(),
return_data,
},
}
})
@@ -7792,6 +7847,10 @@ pub mod tests {
let pre_token_balances = Some(vec![]);
let post_token_balances = Some(vec![]);
let rewards = Some(vec![]);
let return_data = Some(TransactionReturnData {
program_id: Pubkey::new_unique(),
data: vec![1, 2, 3],
});
let signature = transaction.signatures[0];
let status = TransactionStatusMeta {
status: Ok(()),
@@ -7804,6 +7863,7 @@ pub mod tests {
post_token_balances: post_token_balances.clone(),
rewards: rewards.clone(),
loaded_addresses: LoadedAddresses::default(),
return_data: return_data.clone(),
}
.into();
blockstore
@@ -7823,6 +7883,7 @@ pub mod tests {
post_token_balances,
rewards,
loaded_addresses: LoadedAddresses::default(),
return_data,
},
}
})
@@ -8582,6 +8643,7 @@ pub mod tests {
post_token_balances: Some(vec![]),
rewards: Some(vec![]),
loaded_addresses: LoadedAddresses::default(),
return_data: Some(TransactionReturnData::default()),
}
.into();
transaction_status_cf
@@ -9139,6 +9201,10 @@ pub mod tests {
commission: None,
}]),
loaded_addresses: LoadedAddresses::default(),
return_data: Some(TransactionReturnData {
program_id: Pubkey::new_unique(),
data: vec![1, 2, 3],
}),
};
let deprecated_status: StoredTransactionStatusMeta = status.clone().try_into().unwrap();
let protobuf_status: generated::TransactionStatusMeta = status.into();

View File

@@ -9,9 +9,10 @@ use {
self,
compaction_filter::CompactionFilter,
compaction_filter_factory::{CompactionFilterContext, CompactionFilterFactory},
ColumnFamily, ColumnFamilyDescriptor, CompactionDecision, DBCompactionStyle, DBIterator,
DBRawIterator, DBRecoveryMode, FifoCompactOptions, IteratorMode as RocksIteratorMode,
Options, WriteBatch as RWriteBatch, DB,
ColumnFamily, ColumnFamilyDescriptor, CompactionDecision, DBCompactionStyle,
DBCompressionType as RocksCompressionType, DBIterator, DBRawIterator, DBRecoveryMode,
FifoCompactOptions, IteratorMode as RocksIteratorMode, Options, WriteBatch as RWriteBatch,
DB,
},
serde::{de::DeserializeOwned, Serialize},
solana_runtime::hardened_unpack::UnpackError,
@@ -991,19 +992,49 @@ impl Default for ShredStorageType {
}
}
/// Advanced options for blockstore.
/// The each advanced option might also be used as a tag that supports
/// group-by operation when reporting Blockstore metrics.
#[derive(Clone)]
pub struct BlockstoreAdvancedOptions {
// Determine how to store both data and coding shreds. Default: RocksLevel.
pub shred_storage_type: ShredStorageType,
pub enum BlockstoreCompressionType {
None,
Snappy,
Lz4,
Zlib,
}
impl Default for BlockstoreAdvancedOptions {
impl Default for BlockstoreCompressionType {
fn default() -> Self {
Self::None
}
}
impl BlockstoreCompressionType {
fn to_rocksdb_compression_type(&self) -> RocksCompressionType {
match self {
Self::None => RocksCompressionType::None,
Self::Snappy => RocksCompressionType::Snappy,
Self::Lz4 => RocksCompressionType::Lz4,
Self::Zlib => RocksCompressionType::Zlib,
}
}
}
/// Options for LedgerColumn.
/// Each field might also be used as a tag that supports group-by operation when
/// reporting metrics.
#[derive(Clone)]
pub struct LedgerColumnOptions {
// Determine how to store both data and coding shreds. Default: RocksLevel.
pub shred_storage_type: ShredStorageType,
// Determine the way to compress column families which are eligible for
// compression.
pub compression_type: BlockstoreCompressionType,
}
impl Default for LedgerColumnOptions {
fn default() -> Self {
Self {
shred_storage_type: ShredStorageType::RocksLevel,
compression_type: BlockstoreCompressionType::default(),
}
}
}
@@ -1015,7 +1046,7 @@ pub struct BlockstoreOptions {
pub recovery_mode: Option<BlockstoreRecoveryMode>,
// Whether to allow unlimited number of open files. Default: true.
pub enforce_ulimit_nofile: bool,
pub advanced_options: BlockstoreAdvancedOptions,
pub column_options: LedgerColumnOptions,
}
impl Default for BlockstoreOptions {
@@ -1025,7 +1056,7 @@ impl Default for BlockstoreOptions {
access_type: AccessType::PrimaryOnly,
recovery_mode: None,
enforce_ulimit_nofile: true,
advanced_options: BlockstoreAdvancedOptions::default(),
column_options: LedgerColumnOptions::default(),
}
}
}
@@ -1444,9 +1475,24 @@ fn get_cf_options<C: 'static + Column + ColumnName>(
});
}
process_cf_options_advanced::<C>(&mut cf_options, &options.column_options);
cf_options
}
fn process_cf_options_advanced<C: 'static + Column + ColumnName>(
cf_options: &mut Options,
column_options: &LedgerColumnOptions,
) {
if should_enable_compression::<C>() {
cf_options.set_compression_type(
column_options
.compression_type
.to_rocksdb_compression_type(),
);
}
}
/// Creates and returns the column family descriptors for both data shreds and
/// coding shreds column families.
///
@@ -1459,23 +1505,27 @@ fn new_cf_descriptor_pair_shreds<
options: &BlockstoreOptions,
oldest_slot: &OldestSlot,
) -> (ColumnFamilyDescriptor, ColumnFamilyDescriptor) {
match &options.advanced_options.shred_storage_type {
match &options.column_options.shred_storage_type {
ShredStorageType::RocksLevel => (
new_cf_descriptor::<D>(options, oldest_slot),
new_cf_descriptor::<C>(options, oldest_slot),
),
ShredStorageType::RocksFifo(fifo_options) => (
new_cf_descriptor_fifo::<D>(&fifo_options.shred_data_cf_size),
new_cf_descriptor_fifo::<C>(&fifo_options.shred_code_cf_size),
new_cf_descriptor_fifo::<D>(&fifo_options.shred_data_cf_size, &options.column_options),
new_cf_descriptor_fifo::<C>(&fifo_options.shred_code_cf_size, &options.column_options),
),
}
}
fn new_cf_descriptor_fifo<C: 'static + Column + ColumnName>(
max_cf_size: &u64,
column_options: &LedgerColumnOptions,
) -> ColumnFamilyDescriptor {
if *max_cf_size > FIFO_WRITE_BUFFER_SIZE {
ColumnFamilyDescriptor::new(C::NAME, get_cf_options_fifo::<C>(max_cf_size))
ColumnFamilyDescriptor::new(
C::NAME,
get_cf_options_fifo::<C>(max_cf_size, column_options),
)
} else {
panic!(
"{} cf_size must be greater than write buffer size {} when using ShredStorageType::RocksFifo.",
@@ -1495,7 +1545,10 @@ fn new_cf_descriptor_fifo<C: 'static + Column + ColumnName>(
/// rocksdb will start deleting the oldest SST file when the column family
/// size reaches `max_cf_size` - `FIFO_WRITE_BUFFER_SIZE` to strictly
/// maintain the size limit.
fn get_cf_options_fifo<C: 'static + Column + ColumnName>(max_cf_size: &u64) -> Options {
fn get_cf_options_fifo<C: 'static + Column + ColumnName>(
max_cf_size: &u64,
column_options: &LedgerColumnOptions,
) -> Options {
let mut options = Options::default();
options.set_max_write_buffer_number(8);
@@ -1520,6 +1573,8 @@ fn get_cf_options_fifo<C: 'static + Column + ColumnName>(max_cf_size: &u64) -> O
options.set_compaction_style(DBCompactionStyle::Fifo);
options.set_fifo_compaction_options(&fifo_compact_options);
process_cf_options_advanced::<C>(&mut options, column_options);
options
}
@@ -1576,6 +1631,11 @@ fn should_exclude_from_compaction(cf_name: &str) -> bool {
no_compaction_cfs.get(cf_name).is_some()
}
// Returns true if the column family enables compression.
fn should_enable_compression<C: 'static + Column + ColumnName>() -> bool {
C::NAME == columns::TransactionStatus::NAME
}
#[cfg(test)]
pub mod tests {
use {super::*, crate::blockstore_db::columns::ShredData};

View File

@@ -181,6 +181,7 @@ fn execute_batch(
transaction_status_sender.is_some(),
transaction_status_sender.is_some(),
transaction_status_sender.is_some(),
transaction_status_sender.is_some(),
timings,
);
@@ -3510,6 +3511,7 @@ pub mod tests {
false,
false,
false,
false,
&mut ExecuteTimings::default(),
);
let (err, signature) = get_first_error(&batch, fee_collection_results).unwrap();

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-local-cluster"
description = "Blockchain, Rebuilt for Scale"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -16,25 +16,25 @@ itertools = "0.10.3"
log = "0.4.14"
rand = "0.7.0"
rayon = "1.5.1"
solana-client = { path = "../client", version = "=1.10.3" }
solana-config-program = { path = "../programs/config", version = "=1.10.3" }
solana-core = { path = "../core", version = "=1.10.3" }
solana-entry = { path = "../entry", version = "=1.10.3" }
solana-gossip = { path = "../gossip", version = "=1.10.3" }
solana-ledger = { path = "../ledger", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-stake-program = { path = "../programs/stake", version = "=1.10.3" }
solana-streamer = { path = "../streamer", version = "=1.10.3" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.3" }
solana-client = { path = "../client", version = "=1.11.0" }
solana-config-program = { path = "../programs/config", version = "=1.11.0" }
solana-core = { path = "../core", version = "=1.11.0" }
solana-entry = { path = "../entry", version = "=1.11.0" }
solana-gossip = { path = "../gossip", version = "=1.11.0" }
solana-ledger = { path = "../ledger", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-stake-program = { path = "../programs/stake", version = "=1.11.0" }
solana-streamer = { path = "../streamer", version = "=1.11.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.11.0" }
tempfile = "3.3.0"
[dev-dependencies]
assert_matches = "1.5.0"
gag = "1.0.0"
serial_test = "0.6.0"
solana-download-utils = { path = "../download-utils", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-download-utils = { path = "../download-utils", version = "=1.11.0" }
solana-logger = { path = "../logger", version = "=1.11.0" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -63,7 +63,7 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig {
accounts_shrink_ratio: config.accounts_shrink_ratio,
accounts_db_config: config.accounts_db_config.clone(),
wait_to_vote_slot: config.wait_to_vote_slot,
blockstore_advanced_options: config.blockstore_advanced_options.clone(),
ledger_column_options: config.ledger_column_options.clone(),
}
}

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2021"
name = "solana-log-analyzer"
description = "The solana cluster network analysis tool"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -11,11 +11,11 @@ publish = false
[dependencies]
byte-unit = "4.0.14"
clap = "2.33.1"
clap = { version = "3.1.5", features = ["cargo"] }
serde = "1.0.136"
serde_json = "1.0.79"
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
[[bin]]
name = "solana-log-analyzer"

View File

@@ -3,7 +3,7 @@ extern crate byte_unit;
use {
byte_unit::Byte,
clap::{crate_description, crate_name, value_t_or_exit, App, Arg, ArgMatches, SubCommand},
clap::{crate_description, crate_name, Arg, ArgMatches, Command},
serde::{Deserialize, Serialize},
std::{collections::HashMap, fs, ops::Sub, path::PathBuf},
};
@@ -97,7 +97,7 @@ fn map_ip_address(mappings: &[IpAddrMapping], target: String) -> String {
fn process_iftop_logs(matches: &ArgMatches) {
let mut map_list: Vec<IpAddrMapping> = vec![];
if let ("map-IP", Some(args_matches)) = matches.subcommand() {
if let Some(("map-IP", args_matches)) = matches.subcommand() {
let mut list = args_matches
.value_of("list")
.expect("Missing list of IP address mappings")
@@ -112,7 +112,7 @@ fn process_iftop_logs(matches: &ArgMatches) {
map_list = serde_json::from_str(&list).expect("Failed to parse IP address mapping list");
};
let log_path = PathBuf::from(value_t_or_exit!(matches, "file", String));
let log_path = PathBuf::from(matches.value_of_t_or_exit::<String>("file"));
let mut log = fs::read_to_string(&log_path).expect("Unable to read log file");
log.insert(0, '[');
let terminate_at = log.rfind('}').expect("Didn't find a terminating '}'") + 1;
@@ -148,7 +148,7 @@ fn process_iftop_logs(matches: &ArgMatches) {
}
fn analyze_logs(matches: &ArgMatches) {
let dir_path = PathBuf::from(value_t_or_exit!(matches, "folder", String));
let dir_path = PathBuf::from(matches.value_of_t_or_exit::<String>("folder"));
assert!(
dir_path.is_dir(),
"Need a folder that contains all log files"
@@ -196,26 +196,26 @@ fn analyze_logs(matches: &ArgMatches) {
fn main() {
solana_logger::setup();
let matches = App::new(crate_name!())
let matches = Command::new(crate_name!())
.about(crate_description!())
.version(solana_version::version!())
.subcommand(
SubCommand::with_name("iftop")
Command::new("iftop")
.about("Process iftop log file")
.arg(
Arg::with_name("file")
.short("f")
Arg::new("file")
.short('f')
.long("file")
.value_name("iftop log file")
.takes_value(true)
.help("Location of the log file generated by iftop"),
)
.subcommand(
SubCommand::with_name("map-IP")
Command::new("map-IP")
.about("Map private IP to public IP Address")
.arg(
Arg::with_name("list")
.short("l")
Arg::new("list")
.short('l')
.long("list")
.value_name("JSON string")
.takes_value(true)
@@ -225,19 +225,19 @@ fn main() {
),
)
.subcommand(
SubCommand::with_name("analyze")
Command::new("analyze")
.about("Compare processed network log files")
.arg(
Arg::with_name("folder")
.short("f")
Arg::new("folder")
.short('f')
.long("folder")
.value_name("DIR")
.takes_value(true)
.help("Location of processed log files"),
)
.arg(
Arg::with_name("all")
.short("a")
Arg::new("all")
.short('a')
.long("all")
.takes_value(false)
.help("List all differences"),
@@ -246,8 +246,8 @@ fn main() {
.get_matches();
match matches.subcommand() {
("iftop", Some(args_matches)) => process_iftop_logs(args_matches),
("analyze", Some(args_matches)) => analyze_logs(args_matches),
Some(("iftop", args_matches)) => process_iftop_logs(args_matches),
Some(("analyze", args_matches)) => analyze_logs(args_matches),
_ => {}
};
}

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-logger"
version = "1.10.3"
version = "1.11.0"
description = "Solana Logger"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"

View File

@@ -1,7 +1,7 @@
[package]
name = "solana-measure"
description = "Blockchain, Rebuilt for Scale"
version = "1.10.3"
version = "1.11.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-measure"
readme = "../README.md"
@@ -12,7 +12,7 @@ edition = "2021"
[dependencies]
log = "0.4.14"
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-merkle-root-bench"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -11,11 +11,11 @@ publish = false
[dependencies]
clap = "2.33.1"
log = "0.4.14"
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-measure = { path = "../measure", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.11.0" }
solana-measure = { path = "../measure", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-merkle-tree"
version = "1.10.3"
version = "1.11.0"
description = "Solana Merkle Tree"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,7 +11,7 @@ edition = "2021"
[dependencies]
fast-math = "0.1"
solana-program = { path = "../sdk/program", version = "=1.10.3" }
solana-program = { path = "../sdk/program", version = "=1.11.0" }
# This can go once the BPF toolchain target Rust 1.42.0+
[target.bpfel-unknown-unknown.dependencies]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-metrics"
version = "1.10.3"
version = "1.11.0"
description = "Solana Metrics"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -15,7 +15,7 @@ gethostname = "0.2.3"
lazy_static = "1.4.0"
log = "0.4.14"
reqwest = { version = "0.11.10", default-features = false, features = ["blocking", "rustls-tls", "json"] }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
[dev-dependencies]
env_logger = "0.9.0"

View File

@@ -55,6 +55,9 @@ while [[ -n $1 ]]; do
elif [[ $1 = --enable-cpi-and-log-storage ]]; then
args+=("$1")
shift
elif [[ $1 = --enable-extended-tx-metadata-storage ]]; then
args+=("$1")
shift
elif [[ $1 = --enable-rpc-bigtable-ledger-storage ]]; then
args+=("$1")
shift

View File

@@ -141,6 +141,9 @@ while [[ -n $1 ]]; do
elif [[ $1 = --enable-cpi-and-log-storage ]]; then
args+=("$1")
shift
elif [[ $1 = --enable-extended-tx-metadata-storage ]]; then
args+=("$1")
shift
elif [[ $1 = --skip-poh-verify ]]; then
args+=("$1")
shift

View File

@@ -3,18 +3,18 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-net-shaper"
description = "The solana cluster network shaping tool"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
publish = false
[dependencies]
clap = "2.33.1"
rand = "0.7.0"
serde = "1.0.136"
clap = { version = "3.1.5", features = ["cargo"] }
serde = { version = "1.0.136", features = ["derive"] }
serde_json = "1.0.79"
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.11.0" }
[[bin]]
name = "solana-net-shaper"

Some files were not shown because too many files have changed in this diff Show More