Compare commits

...

172 Commits

Author SHA1 Message Date
dependabot[bot]
d1c06a25e7 chore: bump eslint-plugin-import from 2.25.3 to 2.25.4 in /web3.js
Bumps [eslint-plugin-import](https://github.com/import-js/eslint-plugin-import) from 2.25.3 to 2.25.4.
- [Release notes](https://github.com/import-js/eslint-plugin-import/releases)
- [Changelog](https://github.com/import-js/eslint-plugin-import/blob/main/CHANGELOG.md)
- [Commits](https://github.com/import-js/eslint-plugin-import/compare/v2.25.3...v2.25.4)

---
updated-dependencies:
- dependency-name: eslint-plugin-import
  dependency-type: direct:development
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-03-26 21:35:41 -07:00
stellaw1
c08cfafd6c feat: adds getBlockProduction RPC call 2022-03-26 18:31:40 -07:00
Brooks Prumo
31b707b625 Specify if archive size datapoint is for full or incremental snapshots (#23941) 2022-03-26 12:29:13 -05:00
steveluscher
5e08701189 feat: the search bar now auto-focuses when you first visit the site 2022-03-26 00:05:15 -07:00
Michael Vines
87e0aa1b74 improve arg documentation 2022-03-25 21:37:10 -07:00
Trent Nelson
bd27eedd15 cli: allow skipping fee-checks when writing program buffers (hidden) 2022-03-25 18:19:03 -06:00
Jeff Washington (jwash)
c24de17278 remove index hash calculation as an option (#23928) 2022-03-25 15:32:53 -05:00
Jeff Washington (jwash)
ec78702bc8 RollingBitField::get_all_less_than (#23919) 2022-03-25 15:20:22 -05:00
HaoranYi
01af40d6b6 Fix intermittent validator_exit test failure (#23594)
* run validator_exit_test sequentially

* limit validator exit run to its own serial run subset
add 10ms delay in the validator exit tests

* fix intermittent validator exit failure

* no sleep

* undo the code move
2022-03-25 14:38:19 -05:00
behzad nouri
1f9c89c1e8 expands lifetime of SlotStats (#23872)
Current slot stats are removed when the slot is full or every 30 seconds
if the slot is before root:
https://github.com/solana-labs/solana/blob/493a8e234/ledger/src/blockstore.rs#L2017-L2027

In order to track if the slot is ultimately marked as dead or rooted and
emit more metrics, this commit expands lifetime of SlotStats while
bounding total size of cache using an LRU eviction policy.
2022-03-25 19:32:22 +00:00
Will Hickey
c6dda3b324 Add solana-faucet to the list of dependencies referenced by downstream projects (#23935) 2022-03-25 13:27:31 -05:00
Trent Nelson
e34c52934c ci: don't allow mergify to add automerge label to merged PRs 2022-03-25 16:19:11 +00:00
Jeff Washington (jwash)
acfd22712b RollingBitFIeld to its own file (#23917) 2022-03-25 10:37:00 -05:00
ryleung-solana
6b85c2104c Implement forwarding via TpuConnection (#23817) 2022-03-25 11:31:40 -04:00
Steven Luscher
f44c8f296f fix: thread enforce_ulimit_nofile config down when opening blockstore (#23925) 2022-03-25 03:13:33 -05:00
steveluscher
9cf7720922 fix: when there is no instruction index, default to the current instruction by supplying u16:MAX 2022-03-24 22:55:52 -07:00
steveluscher
c73cdfd6ce fix: add TypeScript buffer type to nonce-account.ts 2022-03-24 22:55:52 -07:00
steveluscher
477355df3b fix: add TypeScript buffer type to stake-program.ts 2022-03-24 22:55:52 -07:00
steveluscher
6686b7c534 fix: add TypeScript buffer type to message.ts 2022-03-24 22:55:52 -07:00
steveluscher
741c85ca7c fix: add TypeScript buffer type to loader.ts 2022-03-24 22:55:52 -07:00
steveluscher
6bb02cdcc1 fix: add TypeScript buffer type to secp256k1-program.ts 2022-03-24 22:55:52 -07:00
steveluscher
96361295aa fix: add TypeScript buffer type to ed25519-program.ts 2022-03-24 22:55:52 -07:00
steveluscher
3333f37e88 fix: add TypeScript buffer type to vote-account.ts 2022-03-24 22:55:52 -07:00
steveluscher
b2f2a68b86 fix: fix spelling of timestamp in BlockTimestamp type 2022-03-24 22:55:52 -07:00
steveluscher
c227b8ca4d fix: add TypeScript buffer type to vote-program.ts 2022-03-24 22:55:52 -07:00
steveluscher
607a5c05de fix: add TypeScript buffer type to system-program.ts 2022-03-24 22:55:52 -07:00
steveluscher
807f88e547 fix: add TypeScript types to the rustString buffer layout helper 2022-03-24 22:55:52 -07:00
steveluscher
d34fe3dba3 fix: add TypeScript buffer type to layout.ts 2022-03-24 22:55:52 -07:00
steveluscher
b516a25132 fix: add TypeScript buffer type to instruction.ts 2022-03-24 22:55:52 -07:00
steveluscher
023fc028bc chore: Upgrade buffer-layout to v4.0.0 2022-03-24 22:55:52 -07:00
Steven Luscher
412d9be445 fix: repair web3 connection tests by making fewer assumptions about the existence of particular blocks (#23921)
* fix: repair 'get confirmed signatures for address' test in web3.js

* fix: repair 'get signatures for address' test in web3.js

* fix: repair 'get parsed confirmed transactions' test in web3.js

* fix: repair 'get transaction' test in web3.js

* fix: repair 'get confirmed transaction' test in web3.js

* fix: repair 'get block' test in web3.js

* fix: repair 'get confirmed block' test in web3.js

* fix: repair 'get block signatures' test in web3.js

* fix: repair 'get block time' test in web3.js

Co-authored-by: steveluscher <github@steveluscher.com>
2022-03-24 22:21:14 -07:00
Michael Vines
c8c3c4359f vote-authorize-voter now accepts either the vote or withdraw authority 2022-03-24 16:46:41 -07:00
Jeff Washington (jwash)
51f5524e2f make verify_accounts_package_hash like other hash calc (#23906) 2022-03-24 17:49:48 -05:00
Brian Anderson
492c54a28f Fix example mock Signer API in solana-program (#23911) 2022-03-24 17:58:51 -04:00
Jeff Washington (jwash)
55d61023f7 document 'accounts' hash (#23907) 2022-03-24 15:58:52 -05:00
HaoranYi
fedf4e984f typo (#23910) 2022-03-24 15:21:59 -05:00
Josh
9dbb950a25 feat(explorer): show ping server metrics unavailable (#23914)
* feat: show ping server metrics unavailable

* fix: formatting
2022-03-24 13:54:51 -06:00
steviez
b61c0a4a21 Add accounts arg to genesis command to dump genesis account info (#23879) 2022-03-24 14:26:08 -05:00
Alexander Meißner
140c8dd01f Refactor: Replaces KeyedAccount in_get_sysvar_with_account_check (#23905)
* Replaces all use sites of get_sysvar_with_account_check by get_sysvar_with_account_check2.

* Removes get_sysvar_with_account_check.

* Renames get_sysvar_with_account_check2 to get_sysvar_with_account_check.
2022-03-24 19:30:42 +01:00
Jeff Washington (jwash)
37c36ce3fa pass stats separately from CalcAccountsHashConfig (#23892) 2022-03-24 12:48:47 -05:00
Jeff Washington (jwash)
82328fd9d8 move max_clean_root deeper in flush cache (#23869) 2022-03-24 12:45:49 -05:00
steviez
c31db81ac4 Use VoteAccountsHashMap type alias in all applicable spots (#23904) 2022-03-24 12:09:48 -05:00
Jeff Washington (jwash)
a22a2384bf fix ci test error (#23908) 2022-03-24 11:30:20 -05:00
ryleung-solana
82945ba973 Optimize TpuConnection and its implementations and refactor connection-cache to not use dyn in order to enable those changes (#23877) 2022-03-24 11:40:26 -04:00
Jeff Washington (jwash)
5b916961b5 HashCalc uses self.accounts_cache (#23890) 2022-03-24 10:34:28 -05:00
Jeff Washington (jwash)
f2aea3b7c7 flush_slot_cache takes [Slot] (#23865) 2022-03-24 10:24:36 -05:00
Jeff Washington (jwash)
9d3b17c635 HashCalc uses self.accounts_index (#23888) 2022-03-24 10:06:32 -05:00
Jeff Washington (jwash)
396b49a7c1 Start saving/loading prior_roots(_with_hash) to snapshot (#23844)
* Start saving/loading prior_roots(_with_hash) to snapshot

* Update runtime/src/accounts_index.rs

Co-authored-by: Michael Vines <mvines@gmail.com>

* Update runtime/src/accounts_index.rs

Co-authored-by: Michael Vines <mvines@gmail.com>

* update comment

Co-authored-by: Michael Vines <mvines@gmail.com>
2022-03-24 10:06:24 -05:00
Jeff Washington (jwash)
b22165ad69 hash calc uses self.filler_account_suffix (#23887) 2022-03-24 09:58:06 -05:00
Jeff Washington (jwash)
9022931689 calc hash uses self.num_hash_scan_passes (#23883) 2022-03-24 09:44:42 -05:00
Jeff Washington (jwash)
e3eb002f66 Log storage size stats at hash calc (#23843) 2022-03-24 09:40:35 -05:00
Jeff Washington (jwash)
f1a411c897 add epoch_schedule and rent_collector to hash calc (#23857) 2022-03-24 09:39:22 -05:00
Jeff Washington (jwash)
db5d68f01f HashCalc uses self.accounts_hash_cache_path (#23882) 2022-03-24 09:31:55 -05:00
HaoranYi
90009f330b small refactor to shorten the lock on slot_under_contention hashset (#23891)
* small refactor to shorten the lock on slot_under_contention hashset

* adding comments

* comments
2022-03-24 08:20:56 -05:00
Alexander Meißner
91c2729856 Replaces keyed_account get_signers() by InstructionContext::get_signers(). (#23863) 2022-03-24 12:57:51 +01:00
Yueh-Hsuan Chiang
c83c95b56b (LedgerStore) Create ColumnMetrics trait for CF metric reporting (#23763)
This PR does a refactoring on column family-related metrics reporting.
As the metric reporting is per column family basis, the PR creates
ColumnMetrics trait and move the metric reporting logic into it.

This refactoring will make future column metric reporting (such as
read PerfContext) much cleaner.
2022-03-23 20:51:49 -07:00
Jeff Washington (jwash)
5a892af2fe disable 'check_hash' on accounts hash calc (#23873) 2022-03-23 21:03:31 -05:00
Jeff Washington (jwash)
3e22d4b286 calc hash uses self.thread_pool_clean (#23881) 2022-03-23 20:52:38 -05:00
Brian Anderson
6428602cd9 Make find_program_address client example runnable (#23492) 2022-03-23 19:37:12 -06:00
steveluscher
260fdf7ba3 Revert "chore: Upgrade buffer-layout package in web3.js (#23897)"
Fixing up the types is going to take me a bit longer than I anticipated, so I'll back this out for now.
2022-03-23 18:34:01 -07:00
Jack May
486f7b7673 use array access function (#23895) 2022-03-23 17:03:01 -07:00
Steven Luscher
0c0db9308b chore: Upgrade buffer-layout package in web3.js (#23897) 2022-03-23 14:56:13 -07:00
Trent Nelson
9dae5551a1 Revert transient dependency bumps from c4ecfa5 2022-03-23 21:08:26 +00:00
Josh
100fd03f3e feat(explorer): solana ping set minBarHeight (#23894) 2022-03-23 20:35:59 +00:00
dependabot[bot]
7af7c15802 chore:(deps): bump minimist from 1.2.5 to 1.2.6 in /explorer (#23886)
Bumps [minimist](https://github.com/substack/minimist) from 1.2.5 to 1.2.6.
- [Release notes](https://github.com/substack/minimist/releases)
- [Commits](https://github.com/substack/minimist/compare/1.2.5...1.2.6)

---
updated-dependencies:
- dependency-name: minimist
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-03-23 20:00:24 +00:00
dependabot[bot]
154b828287 chore:(deps): bump nanoid from 3.1.23 to 3.3.1 in /explorer (#23884)
Bumps [nanoid](https://github.com/ai/nanoid) from 3.1.23 to 3.3.1.
- [Release notes](https://github.com/ai/nanoid/releases)
- [Changelog](https://github.com/ai/nanoid/blob/main/CHANGELOG.md)
- [Commits](https://github.com/ai/nanoid/compare/3.1.23...3.3.1)

---
updated-dependencies:
- dependency-name: nanoid
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-03-23 19:59:16 +00:00
Andrey Frolov
59290c08aa fix: add type-check script to web3.js package (#23109) 2022-03-23 12:58:42 -07:00
microwavedcola1
1b7b261460 feat(explorer): render program name, ix name, and account names from on chain idl for specific anchor programs (#23499)
* show titles of ix, from idl

Signed-off-by: microwavedcola1 <microwavedcola@gmail.com>

* remove unused

Signed-off-by: microwavedcola1 <microwavedcola@gmail.com>

* remaining accounts

Signed-off-by: microwavedcola1 <microwavedcola@gmail.com>

* fallback

Signed-off-by: microwavedcola1 <microwavedcola@gmail.com>

* fix from code review: remove default for the non fallback case

Signed-off-by: microwavedcola1 <microwavedcola@gmail.com>

* keep camelcase

Signed-off-by: microwavedcola1 <microwavedcola@gmail.com>

* formatting

Signed-off-by: microwavedcola1 <microwavedcola@gmail.com>
2022-03-23 12:14:26 -07:00
Jeff Washington (jwash)
dc3863ef14 flush_slot_cache_with_clean (#23868) 2022-03-23 14:09:56 -05:00
Jeff Washington (jwash)
260f899eda write cache: hashmap to set (#23866) 2022-03-23 14:05:45 -05:00
Jeff Washington (jwash)
9e61fe7583 add AccountsHashConfig to manage parameters (#23850) 2022-03-23 13:44:23 -05:00
HaoranYi
db49b826f0 seperate blockstore metrics from window service metrics (#23871) 2022-03-23 13:38:17 -05:00
HaoranYi
7ff8ed869c typos (#23870) 2022-03-23 13:36:55 -05:00
Sammy
26da64184a feat(web3.js): expose rpcEndpoint in client for web3.js (#23719)
Adds a getter to the commitment class to expose the rpcEndpoint property.
2022-03-23 11:05:37 -07:00
Will Hickey
a573cfa39d Revert "Remove unneeded unit expression"
This reverts commit e8e0097046.
2022-03-23 10:22:18 -07:00
Jeff Washington (jwash)
b1280b670a calculate_accounts_hash_without_index takes &self (#23846)
* calculate_accounts_hash_without_index takes &self

* Update runtime/src/snapshot_package.rs

Co-authored-by: Brooks Prumo <brooks@prumo.org>

Co-authored-by: Brooks Prumo <brooks@prumo.org>
2022-03-23 11:57:32 -05:00
Jeff Washington (jwash)
7b89222fde don't start extra threads for shrink/clean/hash (#23858) 2022-03-23 11:53:37 -05:00
Josh
911aa5bad3 fix(explorer): can't convert too large of stake to number (#23876) 2022-03-23 09:34:43 -07:00
Josh
5541a5873b fix(explorer): serum init open orders has optional openOrdersMarketAuthority (#23875) 2022-03-23 09:32:24 -07:00
Josh
6b76391ed2 fix(explorer): add sync native to token program decode (#23874) 2022-03-23 09:31:58 -07:00
Jack May
6962a667e5 add-u8-align-check (#23860) 2022-03-23 09:16:29 -07:00
Jack May
27b66db88d Use sat math for ptr calcs (#23861) 2022-03-23 09:16:03 -07:00
Jeff Washington (jwash)
493a8e2348 remove random flushing of write cache (#23845) 2022-03-23 08:45:44 -05:00
klykov
9859eb83b5 upd Cargo.lock for bpf 2022-03-23 09:25:36 +01:00
klykov
36807d5fa3 update clap to v3: poh-bench 2022-03-23 09:25:36 +01:00
klykov
22404ca1fc update clap to v3: bench-streamer 2022-03-23 09:25:36 +01:00
klykov
01317395e9 update Cargo.lock 2022-03-23 09:25:36 +01:00
klykov
3f2971692d update clap to v3: net-utils 2022-03-23 09:25:36 +01:00
klykov
300c50798f update clap to v3: log-analyzer 2022-03-23 09:25:36 +01:00
klykov
12e24a90a0 update clap to v3: net-sharper 2022-03-23 09:25:36 +01:00
Edgar Xi
d8be0d9430 make get_protobuf_or_bincode_cells accept IntoIter on row_keys, make get_confirmed_blocks_with_data return an Iterator 2022-03-22 22:47:25 -06:00
Edgar Xi
f717fda9a3 modify get_protobuf_or_bincode_cells to accept and return an iterator 2022-03-22 22:47:25 -06:00
Edgar Xi
fbcf6a0802 use &[T] instead of Vec<T> where appropriate
clippy
2022-03-22 22:47:25 -06:00
Edgar Xi
5533e9393c appease clippy 2022-03-22 22:47:25 -06:00
Edgar Xi
f3219fb695 add get_confirmed_blocks_with_data and get_protobuf_or_bincode_cells 2022-03-22 22:47:25 -06:00
Jeff Washington (jwash)
bc35e1c5f5 snapshot code needs all storages for hash calc (#23840) 2022-03-22 21:27:54 -05:00
Justin Starry
92462ae031 Manually serialize and use send_wire_transaction for votes (#23826)
* Revert "core: partial versioned transaction support for voting service"

This reverts commit eb3df4c20e.

* Manually serialize vote tx before sending to TPU
2022-03-23 09:47:55 +08:00
Alexander Meißner
9f0ca6d88a Refactor: Remove trait from nonce keyed account (#23811)
* Removes the trait `NonceKeyedAccount`.
2022-03-23 02:09:30 +01:00
Jack May
3d7c8442c7 add size check for from_raw_parts (#23781) 2022-03-22 15:20:39 -07:00
Jon Cinque
7af48465fa transaction-status: Add return data to meta (#23688)
* transaction-status: Add return data to meta

* Add return data to simulation results

* Use pretty-hex for printing return data

* Update arg name, make TransactionRecord struct

* Rename TransactionRecord -> ExecutionRecord
2022-03-22 23:17:05 +01:00
Kirill Lykov
359e2de090 ignore heavy tests in dos 2022-03-22 20:19:28 +01:00
Jeff Washington (jwash)
1089a38aaf AcctIdx: rework scan and write to disk (#23794) 2022-03-22 11:54:12 -05:00
Jeff Washington (jwash)
89ba3ff139 log fail to evict (#23815) 2022-03-22 09:19:38 -05:00
axleiro
16b73a998b Increasing timeout in local-cluster-slow by 10 min 2022-03-22 17:52:06 +05:30
axleiro
9347d57973 increasing timeout of local-cluster-slow test by 10 min 2022-03-22 17:51:13 +05:30
Yueh-Hsuan Chiang
ae75b1a25f (LedgerStore) Add compression type (#23578)
This PR adds `--rocksdb-ledger-compression` as a hidden argument to the validator
for specifying the compression algorithm for TransactionStatus.  Available compression
algorithms include `lz4`, `snappy`, `zlib`. The default value is `none`.

Experimental results show that with lz4 compression, we can achieve ~37% size-reduction
on the TransactionStatus column family, or ~8% size-reduction of the ledger store size.
2022-03-22 02:27:09 -07:00
Lijun Wang
49228573f4 Use connection cache in send transaction (#23712)
Use connection cache in send transaction (#23712)
2022-03-21 23:24:21 -07:00
Trent Nelson
eb3df4c20e core: partial versioned transaction support for voting service 2022-03-21 22:59:05 -06:00
Justin Starry
016d3c450a Update TpuConnection interface to be compatible with versioned txs (#23760)
* Update TpuConnection interface to be compatible with versioned txs

* Add convenience method for sending txs

* use parallel iterator to serialize transactions
2022-03-22 09:45:22 +08:00
HaoranYi
45a7c6edfb Fix typos and a small refactor (#23805)
* fix typo

* remove packet_has_more_unprocessed_transactions function
2022-03-21 18:35:31 -05:00
Will Hickey
c4ecfa5716 Bump version to v1.11 (#23807)
* Revert crossbeam_epoch to stable. 0.9.8 only works with nightly
* Remove unneeded unit expression
2022-03-21 17:40:50 -05:00
Jeff Washington (jwash)
24f6855f86 AcctIdx: only remove a fixed number of items per write lock (#23795) 2022-03-21 16:55:04 -05:00
samkim-crypto
10eeafd3d6 zk-token-sdk: handle edge cases for transfer with fee (#23804)
* zk-token-sdk: handle edge cases for transfer with fee

* zk-token-sdk: clippy

* zk-token-sdk: clippy

* zk-token-sdk: cargo fmt
2022-03-21 16:10:33 -04:00
Brooks Prumo
cb06126388 Set accounts_data_len on feature activation (#23730) 2022-03-21 12:28:26 -05:00
Tyera Eulberg
9c60991cd3 Add ability to query bigtable via solana-test-validator, with hidden params 2022-03-21 11:26:49 -06:00
Trent Nelson
9b32b72990 bigtable: allow custom instance names 2022-03-21 11:26:49 -06:00
Trent Nelson
f513195468 bigtable: add a config ctor for LedgerStorage 2022-03-21 11:26:49 -06:00
Tyera Eulberg
63ee00e647 Refactor validator bigtable config 2022-03-21 11:26:49 -06:00
Michael Vines
99f1a43262 Add v1.10 backport label, remove v1.8 backport label 2022-03-21 09:50:55 -07:00
DimAn
739e43ba58 Add ability to get the latest incremental snapshot via RPC (#23788) 2022-03-21 11:48:49 -05:00
Lijun Wang
ae76fe2bd7 Made connection cache configurable. (#23783)
Added command-line argument tpu-use-quic argument.
Changed connection cache to return different connections based on the config.
2022-03-21 09:31:37 -07:00
Pankaj Garg
5d03b188c8 Use QUIC client in voting service (#23713)
* Use QUIC client in voting service

* guard quic-client usage with a flag

* add measure to time the quic client

* move time measure outside if block

* remove quic vs UDP flag from voting service
2022-03-21 09:10:16 -07:00
Jeff Washington (jwash)
965ab9186d AcctIdx: fix infinite loop (#23806) 2022-03-21 10:58:36 -05:00
Justin Starry
15357480ec Refactor instruction compilation and update message account key ordering (#23729)
* Refactor: Make instruction compilation usable for other message versions

* apply trents feedback

* Fix tests

* Fix bpf compatiblity
2022-03-21 20:53:32 +08:00
axleiro
a1a29b0b86 Increased timeout limit of coverage and stable-perf by 10 mins each (#23797)
* Increased timeout limit of coverage and stable-perf by 10 mins each

* Increasing timeout for in disk CI by 10 min
2022-03-21 15:08:23 +05:30
Jeff Washington (jwash)
258db77100 AcctIdx: factor 'scan' out of flush_internal (#23777) 2022-03-20 22:00:38 -05:00
carllin
f34434f96b Drop lock (#23765) 2022-03-20 21:27:24 -04:00
Jeff Washington (jwash)
dd69f3baf5 throttle index adding to allow disk flushing to keep up and reduce startup ram usage (#23773) 2022-03-20 19:56:20 -05:00
Brooks Prumo
335c4b668b Fix bug in bank/sysvar_cache tests (#23780) 2022-03-19 21:38:18 -05:00
Ikko Ashimine
848093b9fd Fix typo in processor.rs (#23786)
relavant -> relevant
2022-03-19 15:24:40 -05:00
Jeff Washington (jwash)
df29276eb0 AcctIdx: remove -> evict (#23775) 2022-03-18 17:13:21 -05:00
Tao Zhu
71ea05c176 replace nested for_each with flat_map 2022-03-18 16:37:41 -05:00
Tao Zhu
1c369fb55f Scan entire UnprocessedPacketBatches buffer to produce stake and locator of each packet 2022-03-18 16:37:41 -05:00
Jack May
1f052c6234 disable deprecated BPF loader deploys (#23757) 2022-03-18 14:29:49 -07:00
Jack May
7e358c654f add test to assert type assumption (#23769) 2022-03-18 14:15:59 -07:00
g1stavo
c556811c0f docs: fix stake state typo (#23776) 2022-03-18 13:45:07 -06:00
Jeff Washington (jwash)
a419374fa4 factor out function (#23742) 2022-03-18 14:10:52 -05:00
Jack May
0e64fb1fab don't rely on align_offset to check alignment (#23770) 2022-03-18 11:30:52 -07:00
Brian Anderson
fcea92ec6c Improve correctness of Rust-side type definitions for C invoke syscall (#23624)
* Make Rust definitions of C types repr(C)

* Make SolInstruction field types agree with C definitions

* Use correct SolSignerSeedsC type in SyscallInvokeSignedC

* rustfmt

* Change asserts to debug asserts in syscall.rs
2022-03-18 11:30:30 -07:00
Yueh-Hsuan Chiang
f999eef452 (LedgerStore) Rename BlockstoreAdvancedOptions to LedgerColumnOptions (#23764)
This PR renames BlockstoreAdvancedOptions to LedgerColumnOptions, as we will
pass-down this struct to LedgerColumn to allow it to perform metric reporting.
2022-03-18 11:13:35 -07:00
Tao Zhu
56428be629 Not exposing inner cost_table to encapsulating implementation details,
making future change easier.
2022-03-18 12:58:43 -05:00
dependabot[bot]
00ddf6576c chore: bump crossbeam-channel from 0.5.2 to 0.5.3 (#23698)
* chore: bump crossbeam-channel from 0.5.2 to 0.5.3

Bumps [crossbeam-channel](https://github.com/crossbeam-rs/crossbeam) from 0.5.2 to 0.5.3.
- [Release notes](https://github.com/crossbeam-rs/crossbeam/releases)
- [Changelog](https://github.com/crossbeam-rs/crossbeam/blob/master/CHANGELOG.md)
- [Commits](https://github.com/crossbeam-rs/crossbeam/compare/crossbeam-channel-0.5.2...crossbeam-channel-0.5.3)

---
updated-dependencies:
- dependency-name: crossbeam-channel
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <you@example.com>
2022-03-18 11:44:33 -06:00
Jeff Washington (jwash)
998e7d18f9 AcctIdx: never retry a bucket flush (#23732) 2022-03-18 12:20:42 -05:00
Brian Anderson
c9b8977226 Add crate docs for solana-program (#23363)
* Add crate docs for solana-program

* Rework solana-program docs for pr feedback

* Clarify log module docs

* Remove address lookup table program from solana-program docs
2022-03-18 08:27:51 -07:00
HaoranYi
f54e746fc5 Support u8 slice digester in frozen abi struct. (#23726)
* support u8 slice in frozen abi digester

* use slice in account struct

* add bpf cargo lock file

* no need to pass account.data to serializer

* fix comments
2022-03-18 09:31:07 -05:00
Kirill Lykov
c694703e14 address PR comments 2022-03-18 14:55:33 +01:00
Kirill Lykov
2da896fa40 add documentation 2022-03-18 14:55:33 +01:00
Kirill Lykov
7074ebf45a address PR comments 2022-03-18 14:55:33 +01:00
klykov
957bc0db6b add tests to dos tool 2022-03-18 14:55:33 +01:00
klykov
d9dbfc83d5 upd Cargo.lock 2022-03-18 14:55:33 +01:00
klykov
f5339882cb refactor cmdline interface 2022-03-18 14:55:33 +01:00
klykov
a63dee87ec add transaction parameters dump 2022-03-18 14:55:33 +01:00
klykov
1b0c9ad4c0 add option payer to dos tool 2022-03-18 14:55:33 +01:00
klykov
cf73f6dc74 fix typo in dos 2022-03-18 14:55:33 +01:00
klykov
dce5d1c1fa avoid signatures if unnecessary in dos 2022-03-18 14:55:33 +01:00
klykov
1641d1d329 fix: cache blockhash in dos tool 2022-03-18 14:55:33 +01:00
klykov
cb537e80d7 add transaction options to dos 2022-03-18 14:55:33 +01:00
klykov
d4d95f1811 add valid blockhash option to dos 2022-03-18 14:55:33 +01:00
klykov
797c3324f0 add number of signatures to dos 2022-03-18 14:55:33 +01:00
Tao Zhu
0ed23899e7 directly use compute_budget MAX_UNITS and DEFAULT_UNITS 2022-03-18 08:53:11 -05:00
Tao Zhu
a4cacf3389 add deterministic default cost 2022-03-18 08:53:11 -05:00
Trent Nelson
ce2e82cfb6 validator: --only-known-rpc requires a --known-validator ... 2022-03-18 07:02:16 +00:00
Jeff Washington (jwash)
857576d76f AcctIdx: move write to disk outside in mem write lock (#23731) 2022-03-17 23:09:41 -05:00
Brooks Prumo
7ff8c80e25 Add accounts_data_len to bank snapshot (#23714) 2022-03-17 20:14:54 -05:00
Tao Zhu
c478fe2047 add timing metrics, some renaming 2022-03-17 19:31:28 -05:00
Tao Zhu
fd515097d8 leader qos part 2: add stage to find sender stake, set to packet meta 2022-03-17 19:31:28 -05:00
Stephen Akridge
976b138e76 Add tx weighting stage 2022-03-17 19:31:28 -05:00
Jeff Washington (jwash)
664deb2157 AcctIdx: get rid of unused is_dirty (#23733) 2022-03-17 16:29:36 -05:00
Lijun Wang
8b230b86cc Use borrow instead of move in interfaces defined by TpuConnection (#23734)
* Use borrow instead of move in interfaces defined by TpuConnection to avoid data copy

* Removed a few more unnecessary whole array slicing.
2022-03-17 13:31:11 -07:00
behzad nouri
6b0d34d70d removes redundant Arcs from Blockstore (#23735) 2022-03-17 19:43:57 +00:00
Jeff Washington (jwash)
342f1ab1cb clean up/add comments (#23727) 2022-03-17 14:23:08 -05:00
Will Hickey
2f58c9e501 Bump version to 1.10.4 (#23743) 2022-03-17 14:02:13 -05:00
312 changed files with 25398 additions and 14361 deletions

View File

@@ -93,18 +93,11 @@ pull_request_rules:
- author=mergify[bot]
- head~=^mergify/bp/
- "#status-failure=0"
- "-merged"
actions:
label:
add:
- automerge
- name: v1.8 backport
conditions:
- label=v1.8
actions:
backport:
ignore_conflicts: true
branches:
- v1.8
- name: v1.9 backport
conditions:
- label=v1.9
@@ -113,6 +106,14 @@ pull_request_rules:
ignore_conflicts: true
branches:
- v1.9
- name: v1.10 backport
conditions:
- label=v1.10
actions:
backport:
ignore_conflicts: true
branches:
- v1.10
commands_restrictions:
# The author of copied PRs is the Mergify user.

378
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-account-decoder"
version = "1.10.3"
version = "1.11.0"
description = "Solana account decoder"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -19,9 +19,9 @@ lazy_static = "1.4.0"
serde = "1.0.136"
serde_derive = "1.0.103"
serde_json = "1.0.79"
solana-config-program = { path = "../programs/config", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.3" }
solana-config-program = { path = "../programs/config", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.11.0" }
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
thiserror = "1.0"
zstd = "0.11.1"

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accounts-bench"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -12,11 +12,11 @@ publish = false
clap = "2.33.1"
log = "0.4.14"
rayon = "1.5.1"
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-measure = { path = "../measure", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.11.0" }
solana-measure = { path = "../measure", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -125,6 +125,7 @@ fn main() {
None,
false,
None,
None,
false,
);
time_store.stop();

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accounts-cluster-bench"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -13,25 +13,25 @@ clap = "2.33.1"
log = "0.4.14"
rand = "0.7.0"
rayon = "1.5.1"
solana-account-decoder = { path = "../account-decoder", version = "=1.10.3" }
solana-clap-utils = { path = "../clap-utils", version = "=1.10.3" }
solana-client = { path = "../client", version = "=1.10.3" }
solana-faucet = { path = "../faucet", version = "=1.10.3" }
solana-gossip = { path = "../gossip", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-measure = { path = "../measure", version = "=1.10.3" }
solana-net-utils = { path = "../net-utils", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-streamer = { path = "../streamer", version = "=1.10.3" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-account-decoder = { path = "../account-decoder", version = "=1.11.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.11.0" }
solana-client = { path = "../client", version = "=1.11.0" }
solana-faucet = { path = "../faucet", version = "=1.11.0" }
solana-gossip = { path = "../gossip", version = "=1.11.0" }
solana-logger = { path = "../logger", version = "=1.11.0" }
solana-measure = { path = "../measure", version = "=1.11.0" }
solana-net-utils = { path = "../net-utils", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-streamer = { path = "../streamer", version = "=1.11.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
[dev-dependencies]
solana-core = { path = "../core", version = "=1.10.3" }
solana-local-cluster = { path = "../local-cluster", version = "=1.10.3" }
solana-test-validator = { path = "../test-validator", version = "=1.10.3" }
solana-core = { path = "../core", version = "=1.11.0" }
solana-local-cluster = { path = "../local-cluster", version = "=1.11.0" }
solana-test-validator = { path = "../test-validator", version = "=1.11.0" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-banking-bench"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -14,17 +14,17 @@ crossbeam-channel = "0.5"
log = "0.4.14"
rand = "0.7.0"
rayon = "1.5.1"
solana-core = { path = "../core", version = "=1.10.3" }
solana-gossip = { path = "../gossip", version = "=1.10.3" }
solana-ledger = { path = "../ledger", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-measure = { path = "../measure", version = "=1.10.3" }
solana-perf = { path = "../perf", version = "=1.10.3" }
solana-poh = { path = "../poh", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-streamer = { path = "../streamer", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-core = { path = "../core", version = "=1.11.0" }
solana-gossip = { path = "../gossip", version = "=1.11.0" }
solana-ledger = { path = "../ledger", version = "=1.11.0" }
solana-logger = { path = "../logger", version = "=1.11.0" }
solana-measure = { path = "../measure", version = "=1.11.0" }
solana-perf = { path = "../perf", version = "=1.11.0" }
solana-poh = { path = "../poh", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-streamer = { path = "../streamer", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-client"
version = "1.10.3"
version = "1.11.0"
description = "Solana banks client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,17 +12,17 @@ edition = "2021"
[dependencies]
borsh = "0.9.3"
futures = "0.3"
solana-banks-interface = { path = "../banks-interface", version = "=1.10.3" }
solana-program = { path = "../sdk/program", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-banks-interface = { path = "../banks-interface", version = "=1.11.0" }
solana-program = { path = "../sdk/program", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
tarpc = { version = "0.27.2", features = ["full"] }
thiserror = "1.0"
tokio = { version = "1", features = ["full"] }
tokio-serde = { version = "0.8", features = ["bincode"] }
[dev-dependencies]
solana-banks-server = { path = "../banks-server", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-banks-server = { path = "../banks-server", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
[lib]
crate-type = ["lib"]

View File

@@ -1,5 +1,8 @@
use {
solana_sdk::{transaction::TransactionError, transport::TransportError},
solana_sdk::{
transaction::TransactionError, transaction_context::TransactionReturnData,
transport::TransportError,
},
std::io,
tarpc::client::RpcError,
thiserror::Error,
@@ -25,6 +28,7 @@ pub enum BanksClientError {
err: TransactionError,
logs: Vec<String>,
units_consumed: u64,
return_data: Option<TransactionReturnData>,
},
}

View File

@@ -247,6 +247,7 @@ impl BanksClient {
err,
logs: simulation_details.logs,
units_consumed: simulation_details.units_consumed,
return_data: simulation_details.return_data,
}),
BanksTransactionResultWithSimulation {
result: Some(result),

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-interface"
version = "1.10.3"
version = "1.11.0"
description = "Solana banks RPC interface"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,7 +11,7 @@ edition = "2021"
[dependencies]
serde = { version = "1.0.136", features = ["derive"] }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
tarpc = { version = "0.27.2", features = ["full"] }
[lib]

View File

@@ -12,6 +12,7 @@ use {
pubkey::Pubkey,
signature::Signature,
transaction::{self, Transaction, TransactionError},
transaction_context::TransactionReturnData,
},
};
@@ -35,6 +36,7 @@ pub struct TransactionStatus {
pub struct TransactionSimulationDetails {
pub logs: Vec<String>,
pub units_consumed: u64,
pub return_data: Option<TransactionReturnData>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-server"
version = "1.10.3"
version = "1.11.0"
description = "Solana banks server"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -13,10 +13,10 @@ edition = "2021"
bincode = "1.3.3"
crossbeam-channel = "0.5"
futures = "0.3"
solana-banks-interface = { path = "../banks-interface", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.10.3" }
solana-banks-interface = { path = "../banks-interface", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.11.0" }
tarpc = { version = "0.27.2", features = ["full"] }
tokio = { version = "1", features = ["full"] }
tokio-serde = { version = "0.8", features = ["bincode"] }

View File

@@ -24,7 +24,7 @@ use {
transaction::{self, SanitizedTransaction, Transaction},
},
solana_send_transaction_service::{
send_transaction_service::{SendTransactionService, TransactionInfo},
send_transaction_service::{SendTransactionService, TransactionInfo, DEFAULT_TPU_USE_QUIC},
tpu_info::NullTpuInfo,
},
std::{
@@ -266,6 +266,7 @@ impl Banks for BanksServer {
logs,
post_simulation_accounts: _,
units_consumed,
return_data,
} = self
.bank(commitment)
.simulate_transaction_unchecked(sanitized_transaction)
@@ -275,6 +276,7 @@ impl Banks for BanksServer {
simulation_details: Some(TransactionSimulationDetails {
logs,
units_consumed,
return_data,
}),
};
}
@@ -399,6 +401,7 @@ pub async fn start_tcp_server(
receiver,
5_000,
0,
DEFAULT_TPU_USE_QUIC,
);
let server = BanksServer::new(

View File

@@ -2,18 +2,18 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-bench-streamer"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
publish = false
[dependencies]
clap = "2.33.1"
crossbeam-channel = "0.5"
solana-net-utils = { path = "../net-utils", version = "=1.10.3" }
solana-streamer = { path = "../streamer", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
clap = { version = "3.1.5", features = ["cargo"] }
solana-net-utils = { path = "../net-utils", version = "=1.11.0" }
solana-streamer = { path = "../streamer", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,6 +1,6 @@
#![allow(clippy::integer_arithmetic)]
use {
clap::{crate_description, crate_name, value_t, App, Arg},
clap::{crate_description, crate_name, Arg, Command},
crossbeam_channel::unbounded,
solana_streamer::{
packet::{Packet, PacketBatch, PacketBatchRecycler, PACKET_DATA_SIZE},
@@ -57,18 +57,18 @@ fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketBatchReceiver) ->
fn main() -> Result<()> {
let mut num_sockets = 1usize;
let matches = App::new(crate_name!())
let matches = Command::new(crate_name!())
.about(crate_description!())
.version(solana_version::version!())
.arg(
Arg::with_name("num-recv-sockets")
Arg::new("num-recv-sockets")
.long("num-recv-sockets")
.value_name("NUM")
.takes_value(true)
.help("Use NUM receive sockets"),
)
.arg(
Arg::with_name("num-producers")
Arg::new("num-producers")
.long("num-producers")
.value_name("NUM")
.takes_value(true)
@@ -80,7 +80,7 @@ fn main() -> Result<()> {
num_sockets = max(num_sockets, n.to_string().parse().expect("integer"));
}
let num_producers = value_t!(matches, "num_producers", u64).unwrap_or(4);
let num_producers: u64 = matches.value_of_t("num_producers").unwrap_or(4);
let port = 0;
let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-bench-tps"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -15,23 +15,23 @@ log = "0.4.14"
rayon = "1.5.1"
serde_json = "1.0.79"
serde_yaml = "0.8.23"
solana-client = { path = "../client", version = "=1.10.3" }
solana-core = { path = "../core", version = "=1.10.3" }
solana-faucet = { path = "../faucet", version = "=1.10.3" }
solana-genesis = { path = "../genesis", version = "=1.10.3" }
solana-gossip = { path = "../gossip", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-measure = { path = "../measure", version = "=1.10.3" }
solana-metrics = { path = "../metrics", version = "=1.10.3" }
solana-net-utils = { path = "../net-utils", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-streamer = { path = "../streamer", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-client = { path = "../client", version = "=1.11.0" }
solana-core = { path = "../core", version = "=1.11.0" }
solana-faucet = { path = "../faucet", version = "=1.11.0" }
solana-genesis = { path = "../genesis", version = "=1.11.0" }
solana-gossip = { path = "../gossip", version = "=1.11.0" }
solana-logger = { path = "../logger", version = "=1.11.0" }
solana-measure = { path = "../measure", version = "=1.11.0" }
solana-metrics = { path = "../metrics", version = "=1.11.0" }
solana-net-utils = { path = "../net-utils", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-streamer = { path = "../streamer", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
[dev-dependencies]
serial_test = "0.6.0"
solana-local-cluster = { path = "../local-cluster", version = "=1.10.3" }
solana-local-cluster = { path = "../local-cluster", version = "=1.11.0" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-bloom"
version = "1.10.3"
version = "1.11.0"
description = "Solana bloom filter"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -17,9 +17,9 @@ rand = "0.7.0"
rayon = "1.5.1"
serde = { version = "1.0.136", features = ["rc"] }
serde_derive = "1.0.103"
solana-frozen-abi = { path = "../frozen-abi", version = "=1.10.3" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.0" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
[lib]
crate-type = ["lib"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-bucket-map"
version = "1.10.3"
version = "1.11.0"
description = "solana-bucket-map"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-bucket-map"
@@ -15,14 +15,14 @@ log = { version = "0.4.11" }
memmap2 = "0.5.3"
modular-bitfield = "0.11.2"
rand = "0.7.0"
solana-measure = { path = "../measure", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-measure = { path = "../measure", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
tempfile = "3.3.0"
[dev-dependencies]
fs_extra = "1.2.0"
rayon = "1.5.0"
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.11.0" }
[lib]
crate-type = ["lib"]

View File

@@ -137,7 +137,7 @@ all_test_steps() {
^ci/test-coverage.sh \
^scripts/coverage.sh \
; then
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 40
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 50
wait_step
else
annotate --style info --context test-coverage \
@@ -152,14 +152,14 @@ all_test_steps() {
^ci/test-coverage.sh \
^scripts/coverage-in-disk.sh \
; then
command_step coverage-in-disk ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 40
command_step coverage-in-disk ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 50
wait_step
else
annotate --style info --context test-coverage \
"Coverage skipped as no .rs files were modified"
fi
# Full test suite
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 60
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 70
wait_step
# BPF test suite
@@ -303,7 +303,7 @@ EOF
command_step "local-cluster-slow" \
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-slow.sh" \
30
40
}
pull_or_push_steps() {

View File

@@ -139,7 +139,7 @@ all_test_steps() {
^ci/test-coverage.sh \
^scripts/coverage.sh \
; then
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 40
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 50
wait_step
else
annotate --style info --context test-coverage \
@@ -147,7 +147,7 @@ all_test_steps() {
fi
# Full test suite
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 60
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 70
wait_step
# BPF test suite
@@ -295,7 +295,7 @@ EOF
command_step "local-cluster-slow" \
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-slow.sh" \
30
40
}
pull_or_push_steps() {

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-clap-utils"
version = "1.10.3"
version = "1.11.0"
description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -13,9 +13,9 @@ edition = "2021"
chrono = "0.4"
clap = "2.33.0"
rpassword = "6.0"
solana-perf = { path = "../perf", version = "=1.10.3" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.10.3", default-features = false }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-perf = { path = "../perf", version = "=1.11.0" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.11.0", default-features = false }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
thiserror = "1.0.30"
tiny-bip39 = "0.8.2"
uriparse = "0.6.3"

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-cli-config"
description = "Blockchain, Rebuilt for Scale"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-cli-output"
description = "Blockchain, Rebuilt for Scale"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -17,14 +17,15 @@ clap = "2.33.0"
console = "0.15.0"
humantime = "2.0.1"
indicatif = "0.16.2"
pretty-hex = "0.2.1"
serde = "1.0.136"
serde_json = "1.0.79"
solana-account-decoder = { path = "../account-decoder", version = "=1.10.3" }
solana-clap-utils = { path = "../clap-utils", version = "=1.10.3" }
solana-client = { path = "../client", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.3" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.3" }
solana-account-decoder = { path = "../account-decoder", version = "=1.11.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.11.0" }
solana-client = { path = "../client", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.11.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.11.0" }
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
[dev-dependencies]

View File

@@ -2776,10 +2776,10 @@ mod tests {
let expected_msg = "AwECBwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDgTl3Dqh9\
F19Wo1Rmw0x+zMuNipG07jeiXfYPW4/Js5QEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQE\
BAQEBAYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBQUFBQUFBQUFBQUFBQUFBQUF\
BQUFBQUFBQUFBQUFBQUGp9UXGSxWjuCKhF9z0peIzwNcMUWyGrNE2AYuqUAAAAAAAAAAAAAA\
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcH\
BwcCBgMDBQIEBAAAAAYCAQQMAgAAACoAAAAAAAAA"
BAQEBAUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBgYGBgYGBgYGBgYGBgYGBgYG\
BgYGBgYGBgYGBgYGBgYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAan1RcZLFaO\
4IqEX3PSl4jPA1wxRbIas0TYBi6pQAAABwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcH\
BwcCBQMEBgIEBAAAAAUCAQMMAgAAACoAAAAAAAAA"
.to_string();
let config = ReturnSignersConfig {
dump_transaction_message: true,

View File

@@ -14,6 +14,7 @@ use {
signature::Signature,
stake,
transaction::{TransactionError, TransactionVersion, VersionedTransaction},
transaction_context::TransactionReturnData,
},
solana_transaction_status::{Rewards, UiTransactionStatusMeta},
spl_memo::{id as spl_memo_id, v1::id as spl_memo_v1_id},
@@ -246,6 +247,7 @@ fn write_transaction<W: io::Write>(
write_fees(w, transaction_status.fee, prefix)?;
write_balances(w, transaction_status, prefix)?;
write_log_messages(w, transaction_status.log_messages.as_ref(), prefix)?;
write_return_data(w, transaction_status.return_data.as_ref(), prefix)?;
write_rewards(w, transaction_status.rewards.as_ref(), prefix)?;
} else {
writeln!(w, "{}Status: Unavailable", prefix)?;
@@ -576,6 +578,25 @@ fn write_balances<W: io::Write>(
Ok(())
}
fn write_return_data<W: io::Write>(
w: &mut W,
return_data: Option<&TransactionReturnData>,
prefix: &str,
) -> io::Result<()> {
if let Some(return_data) = return_data {
if !return_data.data.is_empty() {
use pretty_hex::*;
writeln!(
w,
"{}Return Data from Program {}:",
prefix, return_data.program_id
)?;
writeln!(w, "{} {:?}", prefix, return_data.data.hex_dump())?;
}
}
Ok(())
}
fn write_log_messages<W: io::Write>(
w: &mut W,
log_messages: Option<&Vec<String>>,
@@ -750,6 +771,10 @@ mod test {
commission: None,
}]),
loaded_addresses: LoadedAddresses::default(),
return_data: Some(TransactionReturnData {
program_id: Pubkey::new_from_array([2u8; 32]),
data: vec![1, 2, 3],
}),
};
let output = {
@@ -786,6 +811,9 @@ Status: Ok
Account 1 balance: ◎0.00001 -> ◎0.0000099
Log Messages:
Test message
Return Data from Program 8qbHbw2BbbTHBW1sbeqakYXVKRQM8Ne7pLK7m6CVfeR:
Length: 3 (0x3) bytes
0000: 01 02 03 ...
Rewards:
Address Type Amount New Balance \0
4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi rent -◎0.000000100 ◎0.000009900 \0
@@ -820,6 +848,10 @@ Rewards:
commission: None,
}]),
loaded_addresses,
return_data: Some(TransactionReturnData {
program_id: Pubkey::new_from_array([2u8; 32]),
data: vec![1, 2, 3],
}),
};
let output = {
@@ -865,6 +897,9 @@ Status: Ok
Account 3 balance: ◎0.00002
Log Messages:
Test message
Return Data from Program 8qbHbw2BbbTHBW1sbeqakYXVKRQM8Ne7pLK7m6CVfeR:
Length: 3 (0x3) bytes
0000: 01 02 03 ...
Rewards:
Address Type Amount New Balance \0
CktRuQ2mttgRGkXJtyksdKHjUdc2C4TgDzyB98oEzy8 rent -◎0.000000100 ◎0.000014900 \0

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-cli"
description = "Blockchain, Rebuilt for Scale"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -27,29 +27,29 @@ semver = "1.0.6"
serde = "1.0.136"
serde_derive = "1.0.103"
serde_json = "1.0.79"
solana-account-decoder = { path = "../account-decoder", version = "=1.10.3" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.10.3" }
solana-clap-utils = { path = "../clap-utils", version = "=1.10.3" }
solana-cli-config = { path = "../cli-config", version = "=1.10.3" }
solana-cli-output = { path = "../cli-output", version = "=1.10.3" }
solana-client = { path = "../client", version = "=1.10.3" }
solana-config-program = { path = "../programs/config", version = "=1.10.3" }
solana-faucet = { path = "../faucet", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-program-runtime = { path = "../program-runtime", version = "=1.10.3" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.3" }
solana-account-decoder = { path = "../account-decoder", version = "=1.11.0" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.11.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.11.0" }
solana-cli-config = { path = "../cli-config", version = "=1.11.0" }
solana-cli-output = { path = "../cli-output", version = "=1.11.0" }
solana-client = { path = "../client", version = "=1.11.0" }
solana-config-program = { path = "../programs/config", version = "=1.11.0" }
solana-faucet = { path = "../faucet", version = "=1.11.0" }
solana-logger = { path = "../logger", version = "=1.11.0" }
solana-program-runtime = { path = "../program-runtime", version = "=1.11.0" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.11.0" }
solana_rbpf = "=0.2.24"
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
thiserror = "1.0.30"
tiny-bip39 = "0.8.2"
[dev-dependencies]
solana-streamer = { path = "../streamer", version = "=1.10.3" }
solana-test-validator = { path = "../test-validator", version = "=1.10.3" }
solana-streamer = { path = "../streamer", version = "=1.11.0" }
solana-test-validator = { path = "../test-validator", version = "=1.11.0" }
tempfile = "3.3.0"
[[bin]]

View File

@@ -162,6 +162,7 @@ pub enum CliCommand {
address: Option<SignerIndex>,
use_deprecated_loader: bool,
allow_excessive_balance: bool,
skip_fee_check: bool,
},
Program(ProgramCliCommand),
// Stake Commands
@@ -744,6 +745,7 @@ pub fn parse_command(
signers.push(signer);
1
});
let skip_fee_check = matches.is_present("skip_fee_check");
Ok(CliCommandInfo {
command: CliCommand::Deploy {
@@ -751,6 +753,7 @@ pub fn parse_command(
address,
use_deprecated_loader: matches.is_present("use_deprecated_loader"),
allow_excessive_balance: matches.is_present("allow_excessive_balance"),
skip_fee_check,
},
signers,
})
@@ -1129,6 +1132,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
address,
use_deprecated_loader,
allow_excessive_balance,
skip_fee_check,
} => process_deploy(
rpc_client,
config,
@@ -1136,6 +1140,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
*address,
*use_deprecated_loader,
*allow_excessive_balance,
*skip_fee_check,
),
CliCommand::Program(program_subcommand) => {
process_program_subcommand(rpc_client, config, program_subcommand)
@@ -1967,6 +1972,7 @@ mod tests {
address: None,
use_deprecated_loader: false,
allow_excessive_balance: false,
skip_fee_check: false,
},
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
}
@@ -1989,6 +1995,7 @@ mod tests {
address: Some(1),
use_deprecated_loader: false,
allow_excessive_balance: false,
skip_fee_check: false,
},
signers: vec![
read_keypair_file(&keypair_file).unwrap().into(),
@@ -2382,6 +2389,7 @@ mod tests {
address: None,
use_deprecated_loader: false,
allow_excessive_balance: false,
skip_fee_check: false,
};
config.output_format = OutputFormat::JsonCompact;
let result = process_command(&config);
@@ -2402,6 +2410,7 @@ mod tests {
address: None,
use_deprecated_loader: false,
allow_excessive_balance: false,
skip_fee_check: false,
};
assert!(process_command(&config).is_err());
}

View File

@@ -33,7 +33,7 @@ use {
rpc_request::DELINQUENT_VALIDATOR_SLOT_DISTANCE,
rpc_response::SlotInfo,
},
solana_program_runtime::compute_budget::ComputeBudget,
solana_program_runtime::compute_budget,
solana_remote_wallet::remote_wallet::RemoteWalletManager,
solana_sdk::{
account::from_account,
@@ -1409,7 +1409,7 @@ pub fn process_ping(
)];
if let Some(additional_fee) = additional_fee {
ixs.push(ComputeBudgetInstruction::request_units(
ComputeBudget::new(false).max_units as u32,
compute_budget::DEFAULT_UNITS,
*additional_fee,
));
}

View File

@@ -66,6 +66,7 @@ pub enum ProgramCliCommand {
is_final: bool,
max_len: Option<usize>,
allow_excessive_balance: bool,
skip_fee_check: bool,
},
WriteBuffer {
program_location: String,
@@ -73,6 +74,7 @@ pub enum ProgramCliCommand {
buffer_pubkey: Option<Pubkey>,
buffer_authority_signer_index: Option<SignerIndex>,
max_len: Option<usize>,
skip_fee_check: bool,
},
SetBufferAuthority {
buffer_pubkey: Pubkey,
@@ -114,6 +116,13 @@ impl ProgramSubCommands for App<'_, '_> {
SubCommand::with_name("program")
.about("Program management")
.setting(AppSettings::SubcommandRequiredElseHelp)
.arg(
Arg::with_name("skip_fee_check")
.long("skip-fee-check")
.hidden(true)
.takes_value(false)
.global(true)
)
.subcommand(
SubCommand::with_name("deploy")
.about("Deploy a program")
@@ -406,6 +415,12 @@ impl ProgramSubCommands for App<'_, '_> {
.long("allow-excessive-deploy-account-balance")
.takes_value(false)
.help("Use the designated program id, even if the account already holds a large balance of SOL")
)
.arg(
Arg::with_name("skip_fee_check")
.long("skip-fee-check")
.hidden(true)
.takes_value(false)
),
)
}
@@ -416,7 +431,14 @@ pub fn parse_program_subcommand(
default_signer: &DefaultSigner,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let response = match matches.subcommand() {
let (subcommand, sub_matches) = matches.subcommand();
let matches_skip_fee_check = matches.is_present("skip_fee_check");
let sub_matches_skip_fee_check = sub_matches
.map(|m| m.is_present("skip_fee_check"))
.unwrap_or(false);
let skip_fee_check = matches_skip_fee_check || sub_matches_skip_fee_check;
let response = match (subcommand, sub_matches) {
("deploy", Some(matches)) => {
let mut bulk_signers = vec![Some(
default_signer.signer_from_path(matches, wallet_manager)?,
@@ -476,6 +498,7 @@ pub fn parse_program_subcommand(
is_final: matches.is_present("final"),
max_len,
allow_excessive_balance: matches.is_present("allow_excessive_balance"),
skip_fee_check,
}),
signers: signer_info.signers,
}
@@ -521,6 +544,7 @@ pub fn parse_program_subcommand(
buffer_authority_signer_index: signer_info
.index_of_or_none(buffer_authority_pubkey),
max_len,
skip_fee_check,
}),
signers: signer_info.signers,
}
@@ -669,6 +693,7 @@ pub fn process_program_subcommand(
is_final,
max_len,
allow_excessive_balance,
skip_fee_check,
} => process_program_deploy(
rpc_client,
config,
@@ -681,6 +706,7 @@ pub fn process_program_subcommand(
*is_final,
*max_len,
*allow_excessive_balance,
*skip_fee_check,
),
ProgramCliCommand::WriteBuffer {
program_location,
@@ -688,6 +714,7 @@ pub fn process_program_subcommand(
buffer_pubkey,
buffer_authority_signer_index,
max_len,
skip_fee_check,
} => process_write_buffer(
rpc_client,
config,
@@ -696,6 +723,7 @@ pub fn process_program_subcommand(
*buffer_pubkey,
*buffer_authority_signer_index,
*max_len,
*skip_fee_check,
),
ProgramCliCommand::SetBufferAuthority {
buffer_pubkey,
@@ -793,6 +821,7 @@ fn process_program_deploy(
is_final: bool,
max_len: Option<usize>,
allow_excessive_balance: bool,
skip_fee_check: bool,
) -> ProcessResult {
let (words, mnemonic, buffer_keypair) = create_ephemeral_keypair()?;
let (buffer_provided, buffer_signer, buffer_pubkey) = if let Some(i) = buffer_signer_index {
@@ -947,6 +976,7 @@ fn process_program_deploy(
&buffer_pubkey,
Some(upgrade_authority_signer),
allow_excessive_balance,
skip_fee_check,
)
} else {
do_process_program_upgrade(
@@ -957,6 +987,7 @@ fn process_program_deploy(
config.signers[upgrade_authority_signer_index],
&buffer_pubkey,
buffer_signer,
skip_fee_check,
)
};
if result.is_ok() && is_final {
@@ -983,6 +1014,7 @@ fn process_write_buffer(
buffer_pubkey: Option<Pubkey>,
buffer_authority_signer_index: Option<SignerIndex>,
max_len: Option<usize>,
skip_fee_check: bool,
) -> ProcessResult {
// Create ephemeral keypair to use for Buffer account, if not provided
let (words, mnemonic, buffer_keypair) = create_ephemeral_keypair()?;
@@ -1050,6 +1082,7 @@ fn process_write_buffer(
&buffer_pubkey,
Some(buffer_authority),
true,
skip_fee_check,
);
if result.is_err() && buffer_signer_index.is_none() && buffer_signer.is_some() {
@@ -1636,6 +1669,7 @@ pub fn process_deploy(
buffer_signer_index: Option<SignerIndex>,
use_deprecated_loader: bool,
allow_excessive_balance: bool,
skip_fee_check: bool,
) -> ProcessResult {
// Create ephemeral keypair to use for Buffer account, if not provided
let (words, mnemonic, buffer_keypair) = create_ephemeral_keypair()?;
@@ -1666,6 +1700,7 @@ pub fn process_deploy(
&buffer_signer.pubkey(),
Some(buffer_signer),
allow_excessive_balance,
skip_fee_check,
);
if result.is_err() && buffer_signer_index.is_none() {
report_ephemeral_mnemonic(words, mnemonic);
@@ -1704,6 +1739,7 @@ fn do_process_program_write_and_deploy(
buffer_pubkey: &Pubkey,
buffer_authority_signer: Option<&dyn Signer>,
allow_excessive_balance: bool,
skip_fee_check: bool,
) -> ProcessResult {
// Build messages to calculate fees
let mut messages: Vec<&Message> = Vec::new();
@@ -1834,7 +1870,9 @@ fn do_process_program_write_and_deploy(
messages.push(message);
}
check_payer(&rpc_client, config, balance_needed, &messages)?;
if !skip_fee_check {
check_payer(&rpc_client, config, balance_needed, &messages)?;
}
send_deploy_messages(
rpc_client,
@@ -1868,6 +1906,7 @@ fn do_process_program_upgrade(
upgrade_authority: &dyn Signer,
buffer_pubkey: &Pubkey,
buffer_signer: Option<&dyn Signer>,
skip_fee_check: bool,
) -> ProcessResult {
let loader_id = bpf_loader_upgradeable::id();
let data_len = program_data.len();
@@ -1967,7 +2006,10 @@ fn do_process_program_upgrade(
);
messages.push(&final_message);
check_payer(&rpc_client, config, balance_needed, &messages)?;
if !skip_fee_check {
check_payer(&rpc_client, config, balance_needed, &messages)?;
}
send_deploy_messages(
rpc_client,
config,
@@ -2255,6 +2297,7 @@ mod tests {
is_final: false,
max_len: None,
allow_excessive_balance: false,
skip_fee_check: false,
}),
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
}
@@ -2281,6 +2324,7 @@ mod tests {
is_final: false,
max_len: Some(42),
allow_excessive_balance: false,
skip_fee_check: false,
}),
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
}
@@ -2309,6 +2353,7 @@ mod tests {
is_final: false,
max_len: None,
allow_excessive_balance: false,
skip_fee_check: false,
}),
signers: vec![
read_keypair_file(&keypair_file).unwrap().into(),
@@ -2339,6 +2384,7 @@ mod tests {
is_final: false,
max_len: None,
allow_excessive_balance: false,
skip_fee_check: false,
}),
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
}
@@ -2368,6 +2414,7 @@ mod tests {
is_final: false,
max_len: None,
allow_excessive_balance: false,
skip_fee_check: false,
}),
signers: vec![
read_keypair_file(&keypair_file).unwrap().into(),
@@ -2400,6 +2447,7 @@ mod tests {
is_final: false,
max_len: None,
allow_excessive_balance: false,
skip_fee_check: false,
}),
signers: vec![
read_keypair_file(&keypair_file).unwrap().into(),
@@ -2427,6 +2475,7 @@ mod tests {
upgrade_authority_signer_index: 0,
is_final: true,
max_len: None,
skip_fee_check: false,
allow_excessive_balance: false,
}),
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
@@ -2460,6 +2509,7 @@ mod tests {
buffer_pubkey: None,
buffer_authority_signer_index: Some(0),
max_len: None,
skip_fee_check: false,
}),
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
}
@@ -2483,6 +2533,7 @@ mod tests {
buffer_pubkey: None,
buffer_authority_signer_index: Some(0),
max_len: Some(42),
skip_fee_check: false,
}),
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
}
@@ -2509,6 +2560,7 @@ mod tests {
buffer_pubkey: Some(buffer_keypair.pubkey()),
buffer_authority_signer_index: Some(0),
max_len: None,
skip_fee_check: false,
}),
signers: vec![
read_keypair_file(&keypair_file).unwrap().into(),
@@ -2538,6 +2590,7 @@ mod tests {
buffer_pubkey: None,
buffer_authority_signer_index: Some(1),
max_len: None,
skip_fee_check: false,
}),
signers: vec![
read_keypair_file(&keypair_file).unwrap().into(),
@@ -2572,6 +2625,7 @@ mod tests {
buffer_pubkey: Some(buffer_keypair.pubkey()),
buffer_authority_signer_index: Some(2),
max_len: None,
skip_fee_check: false,
}),
signers: vec![
read_keypair_file(&keypair_file).unwrap().into(),
@@ -3014,6 +3068,7 @@ mod tests {
is_final: false,
max_len: None,
allow_excessive_balance: false,
skip_fee_check: false,
}),
signers: vec![&default_keypair],
output_format: OutputFormat::JsonCompact,

View File

@@ -1383,17 +1383,12 @@ pub fn process_stake_authorize(
};
if let Some(authorized) = authorized {
match authorization_type {
StakeAuthorize::Staker => {
// first check authorized withdrawer
check_current_authority(&authorized.withdrawer, &authority.pubkey())
.or_else(|_| {
// ...then check authorized staker. If neither matches, error will
// print the stake key as `expected`
check_current_authority(&authorized.staker, &authority.pubkey())
})?;
}
StakeAuthorize::Staker => check_current_authority(
&[authorized.withdrawer, authorized.staker],
&authority.pubkey(),
)?,
StakeAuthorize::Withdrawer => {
check_current_authority(&authorized.withdrawer, &authority.pubkey())?;
check_current_authority(&[authorized.withdrawer], &authority.pubkey())?;
}
}
} else {
@@ -1935,7 +1930,7 @@ pub fn process_stake_set_lockup(
};
if let Some(lockup) = lockup {
if lockup.custodian != Pubkey::default() {
check_current_authority(&lockup.custodian, &custodian.pubkey())?;
check_current_authority(&[lockup.custodian], &custodian.pubkey())?;
}
} else {
return Err(CliError::RpcRequestError(format!(
@@ -2119,13 +2114,13 @@ fn get_stake_account_state(
}
pub(crate) fn check_current_authority(
account_current_authority: &Pubkey,
permitted_authorities: &[Pubkey],
provided_current_authority: &Pubkey,
) -> Result<(), CliError> {
if account_current_authority != provided_current_authority {
if !permitted_authorities.contains(provided_current_authority) {
Err(CliError::RpcRequestError(format!(
"Invalid current authority provided: {:?}, expected {:?}",
provided_current_authority, account_current_authority
"Invalid authority provided: {:?}, expected {:?}",
provided_current_authority, permitted_authorities
)))
} else {
Ok(())

View File

@@ -910,7 +910,10 @@ pub fn process_vote_authorize(
"Invalid vote account state; no authorized voters found".to_string(),
)
})?;
check_current_authority(&current_authorized_voter, &authorized.pubkey())?;
check_current_authority(
&[current_authorized_voter, vote_state.authorized_withdrawer],
&authorized.pubkey(),
)?;
if let Some(signer) = new_authorized_signer {
if signer.is_interactive() {
return Err(CliError::BadParameter(format!(
@@ -927,7 +930,7 @@ pub fn process_vote_authorize(
(new_authorized_pubkey, "new_authorized_pubkey".to_string()),
)?;
if let Some(vote_state) = vote_state {
check_current_authority(&vote_state.authorized_withdrawer, &authorized.pubkey())?
check_current_authority(&[vote_state.authorized_withdrawer], &authorized.pubkey())?
}
}
}

View File

@@ -62,6 +62,7 @@ fn test_cli_program_deploy_non_upgradeable() {
address: None,
use_deprecated_loader: false,
allow_excessive_balance: false,
skip_fee_check: false,
};
config.output_format = OutputFormat::JsonCompact;
let response = process_command(&config);
@@ -91,6 +92,7 @@ fn test_cli_program_deploy_non_upgradeable() {
address: Some(1),
use_deprecated_loader: false,
allow_excessive_balance: false,
skip_fee_check: false,
};
process_command(&config).unwrap();
let account1 = rpc_client
@@ -118,6 +120,7 @@ fn test_cli_program_deploy_non_upgradeable() {
address: Some(1),
use_deprecated_loader: false,
allow_excessive_balance: false,
skip_fee_check: false,
};
process_command(&config).unwrap_err();
@@ -127,6 +130,7 @@ fn test_cli_program_deploy_non_upgradeable() {
address: Some(1),
use_deprecated_loader: false,
allow_excessive_balance: true,
skip_fee_check: false,
};
process_command(&config).unwrap();
let account2 = rpc_client
@@ -193,6 +197,7 @@ fn test_cli_program_deploy_no_authority() {
upgrade_authority_signer_index: 1,
is_final: true,
max_len: None,
skip_fee_check: false,
});
config.output_format = OutputFormat::JsonCompact;
let response = process_command(&config);
@@ -218,6 +223,7 @@ fn test_cli_program_deploy_no_authority() {
upgrade_authority_signer_index: 1,
is_final: false,
max_len: None,
skip_fee_check: false,
});
process_command(&config).unwrap_err();
}
@@ -278,6 +284,7 @@ fn test_cli_program_deploy_with_authority() {
upgrade_authority_signer_index: 1,
is_final: false,
max_len: Some(max_len),
skip_fee_check: false,
});
config.output_format = OutputFormat::JsonCompact;
let response = process_command(&config);
@@ -325,6 +332,7 @@ fn test_cli_program_deploy_with_authority() {
upgrade_authority_signer_index: 1,
is_final: false,
max_len: Some(max_len),
skip_fee_check: false,
});
let response = process_command(&config);
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
@@ -366,6 +374,7 @@ fn test_cli_program_deploy_with_authority() {
upgrade_authority_signer_index: 1,
is_final: false,
max_len: Some(max_len),
skip_fee_check: false,
});
process_command(&config).unwrap();
let program_account = rpc_client.get_account(&program_pubkey).unwrap();
@@ -420,6 +429,7 @@ fn test_cli_program_deploy_with_authority() {
upgrade_authority_signer_index: 1,
is_final: false,
max_len: None,
skip_fee_check: false,
});
process_command(&config).unwrap();
let program_account = rpc_client.get_account(&program_pubkey).unwrap();
@@ -494,6 +504,7 @@ fn test_cli_program_deploy_with_authority() {
upgrade_authority_signer_index: 1,
is_final: false,
max_len: None,
skip_fee_check: false,
});
process_command(&config).unwrap_err();
@@ -509,6 +520,7 @@ fn test_cli_program_deploy_with_authority() {
upgrade_authority_signer_index: 1,
is_final: true,
max_len: None,
skip_fee_check: false,
});
let response = process_command(&config);
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
@@ -611,6 +623,7 @@ fn test_cli_program_close_program() {
upgrade_authority_signer_index: 1,
is_final: false,
max_len: Some(max_len),
skip_fee_check: false,
});
config.output_format = OutputFormat::JsonCompact;
process_command(&config).unwrap();
@@ -695,6 +708,7 @@ fn test_cli_program_write_buffer() {
buffer_pubkey: None,
buffer_authority_signer_index: None,
max_len: None,
skip_fee_check: false,
});
config.output_format = OutputFormat::JsonCompact;
let response = process_command(&config);
@@ -729,6 +743,7 @@ fn test_cli_program_write_buffer() {
buffer_pubkey: Some(buffer_keypair.pubkey()),
buffer_authority_signer_index: None,
max_len: Some(max_len),
skip_fee_check: false,
});
let response = process_command(&config);
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
@@ -790,6 +805,7 @@ fn test_cli_program_write_buffer() {
buffer_pubkey: Some(buffer_keypair.pubkey()),
buffer_authority_signer_index: Some(2),
max_len: None,
skip_fee_check: false,
});
let response = process_command(&config);
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
@@ -827,6 +843,7 @@ fn test_cli_program_write_buffer() {
buffer_pubkey: None,
buffer_authority_signer_index: Some(2),
max_len: None,
skip_fee_check: false,
});
let response = process_command(&config);
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
@@ -899,6 +916,7 @@ fn test_cli_program_write_buffer() {
buffer_pubkey: None,
buffer_authority_signer_index: None,
max_len: None,
skip_fee_check: false,
});
config.output_format = OutputFormat::JsonCompact;
let response = process_command(&config);
@@ -938,6 +956,7 @@ fn test_cli_program_write_buffer() {
buffer_pubkey: Some(buffer_keypair.pubkey()),
buffer_authority_signer_index: None,
max_len: None, //Some(max_len),
skip_fee_check: false,
});
process_command(&config).unwrap();
config.signers = vec![&keypair, &buffer_keypair];
@@ -951,6 +970,7 @@ fn test_cli_program_write_buffer() {
upgrade_authority_signer_index: 1,
is_final: true,
max_len: None,
skip_fee_check: false,
});
config.output_format = OutputFormat::JsonCompact;
let error = process_command(&config).unwrap_err();
@@ -1008,6 +1028,7 @@ fn test_cli_program_set_buffer_authority() {
buffer_pubkey: Some(buffer_keypair.pubkey()),
buffer_authority_signer_index: None,
max_len: None,
skip_fee_check: false,
});
process_command(&config).unwrap();
let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap();
@@ -1123,6 +1144,7 @@ fn test_cli_program_mismatch_buffer_authority() {
buffer_pubkey: Some(buffer_keypair.pubkey()),
buffer_authority_signer_index: Some(2),
max_len: None,
skip_fee_check: false,
});
process_command(&config).unwrap();
let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap();
@@ -1145,6 +1167,7 @@ fn test_cli_program_mismatch_buffer_authority() {
upgrade_authority_signer_index: 1,
is_final: true,
max_len: None,
skip_fee_check: false,
});
process_command(&config).unwrap_err();
@@ -1160,6 +1183,7 @@ fn test_cli_program_mismatch_buffer_authority() {
upgrade_authority_signer_index: 1,
is_final: true,
max_len: None,
skip_fee_check: false,
});
process_command(&config).unwrap();
}
@@ -1216,6 +1240,7 @@ fn test_cli_program_show() {
buffer_pubkey: Some(buffer_keypair.pubkey()),
buffer_authority_signer_index: Some(2),
max_len: None,
skip_fee_check: false,
});
process_command(&config).unwrap();
@@ -1275,6 +1300,7 @@ fn test_cli_program_show() {
upgrade_authority_signer_index: 1,
is_final: false,
max_len: Some(max_len),
skip_fee_check: false,
});
config.output_format = OutputFormat::JsonCompact;
let min_slot = rpc_client.get_slot().unwrap();
@@ -1401,6 +1427,7 @@ fn test_cli_program_dump() {
buffer_pubkey: Some(buffer_keypair.pubkey()),
buffer_authority_signer_index: Some(2),
max_len: None,
skip_fee_check: false,
});
process_command(&config).unwrap();

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-client-test"
version = "1.10.3"
version = "1.11.0"
description = "Solana RPC Test"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -14,25 +14,25 @@ publish = false
futures-util = "0.3.21"
serde_json = "1.0.79"
serial_test = "0.6.0"
solana-client = { path = "../client", version = "=1.10.3" }
solana-ledger = { path = "../ledger", version = "=1.10.3" }
solana-measure = { path = "../measure", version = "=1.10.3" }
solana-merkle-tree = { path = "../merkle-tree", version = "=1.10.3" }
solana-metrics = { path = "../metrics", version = "=1.10.3" }
solana-perf = { path = "../perf", version = "=1.10.3" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.10.3" }
solana-rpc = { path = "../rpc", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-streamer = { path = "../streamer", version = "=1.10.3" }
solana-test-validator = { path = "../test-validator", version = "=1.10.3" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-client = { path = "../client", version = "=1.11.0" }
solana-ledger = { path = "../ledger", version = "=1.11.0" }
solana-measure = { path = "../measure", version = "=1.11.0" }
solana-merkle-tree = { path = "../merkle-tree", version = "=1.11.0" }
solana-metrics = { path = "../metrics", version = "=1.11.0" }
solana-perf = { path = "../perf", version = "=1.11.0" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.11.0" }
solana-rpc = { path = "../rpc", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-streamer = { path = "../streamer", version = "=1.11.0" }
solana-test-validator = { path = "../test-validator", version = "=1.11.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
systemstat = "0.1.10"
tokio = { version = "1", features = ["full"] }
[dev-dependencies]
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.11.0" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-client"
version = "1.10.3"
version = "1.11.0"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -35,15 +35,16 @@ semver = "1.0.6"
serde = "1.0.136"
serde_derive = "1.0.103"
serde_json = "1.0.79"
solana-account-decoder = { path = "../account-decoder", version = "=1.10.3" }
solana-clap-utils = { path = "../clap-utils", version = "=1.10.3" }
solana-faucet = { path = "../faucet", version = "=1.10.3" }
solana-measure = { path = "../measure", version = "=1.10.3" }
solana-net-utils = { path = "../net-utils", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.3" }
solana-account-decoder = { path = "../account-decoder", version = "=1.11.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.11.0" }
solana-faucet = { path = "../faucet", version = "=1.11.0" }
solana-measure = { path = "../measure", version = "=1.11.0" }
solana-net-utils = { path = "../net-utils", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-streamer = { path = "../streamer", version = "=1.11.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.11.0" }
thiserror = "1.0"
tokio = { version = "1", features = ["full"] }
tokio-stream = "0.1.8"
@@ -54,7 +55,7 @@ url = "2.2.2"
[dev-dependencies]
assert_matches = "1.5.0"
jsonrpc-http-server = "18.0.0"
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.11.0" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,6 +1,9 @@
use {
crate::{tpu_connection::TpuConnection, udp_client::UdpTpuConnection},
crate::{
quic_client::QuicTpuConnection, tpu_connection::TpuConnection, udp_client::UdpTpuConnection,
},
lazy_static::lazy_static,
solana_sdk::{transaction::VersionedTransaction, transport::TransportError},
std::{
collections::{hash_map::Entry, BTreeMap, HashMap},
net::{SocketAddr, UdpSocket},
@@ -11,9 +14,15 @@ use {
// Should be non-zero
static MAX_CONNECTIONS: usize = 64;
#[derive(Clone)]
enum Connection {
Udp(Arc<UdpTpuConnection>),
Quic(Arc<QuicTpuConnection>),
}
struct ConnMap {
// Keeps track of the connection associated with an addr and the last time it was used
map: HashMap<SocketAddr, (Arc<dyn TpuConnection + 'static + Sync + Send>, u64)>,
map: HashMap<SocketAddr, (Connection, u64)>,
// Helps to find the least recently used connection. The search and inserts are O(log(n))
// but since we're bounding the size of the collections, this should be constant
// (and hopefully negligible) time. In theory, we can do this in constant time
@@ -23,6 +32,7 @@ struct ConnMap {
// that seems non-"Rust-y" and low bang/buck. This is still pretty terrible though...
last_used_times: BTreeMap<u64, SocketAddr>,
ticks: u64,
use_quic: bool,
}
impl ConnMap {
@@ -31,21 +41,31 @@ impl ConnMap {
map: HashMap::new(),
last_used_times: BTreeMap::new(),
ticks: 0,
use_quic: false,
}
}
pub fn set_use_quic(&mut self, use_quic: bool) {
self.use_quic = use_quic;
}
}
lazy_static! {
static ref CONNECTION_MAP: Mutex<ConnMap> = Mutex::new(ConnMap::new());
}
pub fn set_use_quic(use_quic: bool) {
let mut map = (*CONNECTION_MAP).lock().unwrap();
map.set_use_quic(use_quic);
}
#[allow(dead_code)]
// TODO: see https://github.com/solana-labs/solana/issues/23661
// remove lazy_static and optimize and refactor this
pub fn get_connection(addr: &SocketAddr) -> Arc<dyn TpuConnection + 'static + Sync + Send> {
fn get_connection(addr: &SocketAddr) -> Connection {
let mut map = (*CONNECTION_MAP).lock().unwrap();
let ticks = map.ticks;
let use_quic = map.use_quic;
let (conn, target_ticks) = match map.map.entry(*addr) {
Entry::Occupied(mut entry) => {
let mut pair = entry.get_mut();
@@ -57,12 +77,15 @@ pub fn get_connection(addr: &SocketAddr) -> Arc<dyn TpuConnection + 'static + Sy
let send_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
// TODO: see https://github.com/solana-labs/solana/issues/23659
// make it configurable (e.g. via the command line) whether to use UDP or Quic
let conn = Arc::new(UdpTpuConnection::new(send_socket, *addr));
let conn = if use_quic {
Connection::Quic(Arc::new(QuicTpuConnection::new(send_socket, *addr)))
} else {
Connection::Udp(Arc::new(UdpTpuConnection::new(send_socket, *addr)))
};
entry.insert((conn.clone(), ticks));
(
conn as Arc<dyn TpuConnection + 'static + Sync + Send>,
ticks,
)
(conn, ticks)
}
};
@@ -85,13 +108,69 @@ pub fn get_connection(addr: &SocketAddr) -> Arc<dyn TpuConnection + 'static + Sy
conn
}
// TODO: see https://github.com/solana-labs/solana/issues/23851
// use enum_dispatch and get rid of this tedious code.
// The main blocker to using enum_dispatch right now is that
// the it doesn't work with static methods like TpuConnection::new
// which is used by thin_client. This will be eliminated soon
// once thin_client is moved to using this connection cache.
// Once that is done, we will migrate to using enum_dispatch
// This will be done in a followup to
// https://github.com/solana-labs/solana/pull/23817
pub fn send_wire_transaction_batch(
packets: &[&[u8]],
addr: &SocketAddr,
) -> Result<(), TransportError> {
let conn = get_connection(addr);
match conn {
Connection::Udp(conn) => conn.send_wire_transaction_batch(packets),
Connection::Quic(conn) => conn.send_wire_transaction_batch(packets),
}
}
pub fn send_wire_transaction(
wire_transaction: &[u8],
addr: &SocketAddr,
) -> Result<(), TransportError> {
let conn = get_connection(addr);
match conn {
Connection::Udp(conn) => conn.send_wire_transaction(wire_transaction),
Connection::Quic(conn) => conn.send_wire_transaction(wire_transaction),
}
}
pub fn serialize_and_send_transaction(
transaction: &VersionedTransaction,
addr: &SocketAddr,
) -> Result<(), TransportError> {
let conn = get_connection(addr);
match conn {
Connection::Udp(conn) => conn.serialize_and_send_transaction(transaction),
Connection::Quic(conn) => conn.serialize_and_send_transaction(transaction),
}
}
pub fn par_serialize_and_send_transaction_batch(
transactions: &[VersionedTransaction],
addr: &SocketAddr,
) -> Result<(), TransportError> {
let conn = get_connection(addr);
match conn {
Connection::Udp(conn) => conn.par_serialize_and_send_transaction_batch(transactions),
Connection::Quic(conn) => conn.par_serialize_and_send_transaction_batch(transactions),
}
}
#[cfg(test)]
mod tests {
use {
crate::connection_cache::{get_connection, CONNECTION_MAP, MAX_CONNECTIONS},
crate::{
connection_cache::{get_connection, Connection, CONNECTION_MAP, MAX_CONNECTIONS},
tpu_connection::TpuConnection,
},
rand::{Rng, SeedableRng},
rand_chacha::ChaChaRng,
std::net::SocketAddr,
std::net::{IpAddr, SocketAddr},
};
fn get_addr(rng: &mut ChaChaRng) -> SocketAddr {
@@ -105,6 +184,13 @@ mod tests {
addr_str.parse().expect("Invalid address")
}
fn ip(conn: Connection) -> IpAddr {
match conn {
Connection::Udp(conn) => conn.tpu_addr().ip(),
Connection::Quic(conn) => conn.tpu_addr().ip(),
}
}
#[test]
fn test_connection_cache() {
// Allow the test to run deterministically
@@ -120,7 +206,7 @@ mod tests {
// be lazy and not connect until first use or handle connection errors somehow
// (without crashing, as would be required in a real practical validator)
let first_addr = get_addr(&mut rng);
assert!(get_connection(&first_addr).tpu_addr().ip() == first_addr.ip());
assert!(ip(get_connection(&first_addr)) == first_addr.ip());
let addrs = (0..MAX_CONNECTIONS)
.into_iter()
.map(|_| {
@@ -133,7 +219,7 @@ mod tests {
let map = (*CONNECTION_MAP).lock().unwrap();
addrs.iter().for_each(|a| {
let conn = map.map.get(a).expect("Address not found");
assert!(a.ip() == conn.0.tpu_addr().ip());
assert!(a.ip() == ip(conn.0.clone()));
});
assert!(map.map.get(&first_addr).is_none());

View File

@@ -229,6 +229,7 @@ impl RpcSender for MockSender {
post_token_balances: None,
rewards: None,
loaded_addresses: None,
return_data: None,
}),
},
block_time: Some(1628633791),
@@ -340,6 +341,7 @@ impl RpcSender for MockSender {
logs: None,
accounts: None,
units_consumed: None,
return_data: None,
},
})?,
"getMinimumBalanceForRentExemption" => json![20],

View File

@@ -7,10 +7,8 @@ use {
futures::future::join_all,
itertools::Itertools,
quinn::{ClientConfig, Endpoint, EndpointConfig, NewConnection, WriteError},
rayon::iter::{IntoParallelIterator, ParallelIterator},
solana_sdk::{
quic::{QUIC_MAX_CONCURRENT_STREAMS, QUIC_PORT_OFFSET},
transaction::Transaction,
transport::Result as TransportResult,
},
std::{
@@ -65,21 +63,22 @@ impl TpuConnection for QuicTpuConnection {
&self.client.addr
}
fn send_wire_transaction(&self, data: Vec<u8>) -> TransportResult<()> {
fn send_wire_transaction<T>(&self, wire_transaction: T) -> TransportResult<()>
where
T: AsRef<[u8]>,
{
let _guard = self.client.runtime.enter();
let send_buffer = self.client.send_buffer(&data[..]);
let send_buffer = self.client.send_buffer(wire_transaction);
self.client.runtime.block_on(send_buffer)?;
Ok(())
}
fn send_batch(&self, transactions: Vec<Transaction>) -> TransportResult<()> {
let buffers = transactions
.into_par_iter()
.map(|tx| bincode::serialize(&tx).expect("serialize Transaction in send_batch"))
.collect::<Vec<_>>();
fn send_wire_transaction_batch<T>(&self, buffers: &[T]) -> TransportResult<()>
where
T: AsRef<[u8]>,
{
let _guard = self.client.runtime.enter();
let send_batch = self.client.send_batch(&buffers[..]);
let send_batch = self.client.send_batch(buffers);
self.client.runtime.block_on(send_batch)?;
Ok(())
}
@@ -162,12 +161,18 @@ impl QuicClient {
}
}
pub async fn send_buffer(&self, data: &[u8]) -> Result<(), ClientErrorKind> {
self._send_buffer(data).await?;
pub async fn send_buffer<T>(&self, data: T) -> Result<(), ClientErrorKind>
where
T: AsRef<[u8]>,
{
self._send_buffer(data.as_ref()).await?;
Ok(())
}
pub async fn send_batch(&self, buffers: &[Vec<u8>]) -> Result<(), ClientErrorKind> {
pub async fn send_batch<T>(&self, buffers: &[T]) -> Result<(), ClientErrorKind>
where
T: AsRef<[u8]>,
{
// Start off by "testing" the connection by sending the first transaction
// This will also connect to the server if not already connected
// and reconnect and retry if the first send attempt failed
@@ -182,7 +187,7 @@ impl QuicClient {
if buffers.is_empty() {
return Ok(());
}
let connection = self._send_buffer(&buffers[0][..]).await?;
let connection = self._send_buffer(buffers[0].as_ref()).await?;
// Used to avoid dereferencing the Arc multiple times below
// by just getting a reference to the NewConnection once
@@ -196,7 +201,7 @@ impl QuicClient {
join_all(
buffs
.into_iter()
.map(|buf| Self::_send_buffer_using_conn(&buf[..], connection_ref)),
.map(|buf| Self::_send_buffer_using_conn(buf.as_ref(), connection_ref)),
)
});

View File

@@ -7,6 +7,7 @@ use {
hash::Hash,
inflation::Inflation,
transaction::{Result, TransactionError},
transaction_context::TransactionReturnData,
},
solana_transaction_status::{
ConfirmedTransactionStatusWithSignature, TransactionConfirmationStatus, UiConfirmedBlock,
@@ -347,6 +348,7 @@ pub struct RpcSimulateTransactionResult {
pub logs: Option<Vec<String>>,
pub accounts: Option<Vec<Option<UiAccount>>>,
pub units_consumed: Option<u64>,
pub return_data: Option<TransactionReturnData>,
}
#[derive(Serialize, Deserialize, Clone, Debug)]

View File

@@ -24,7 +24,7 @@ use {
signers::Signers,
system_instruction,
timing::duration_as_ms,
transaction::{self, Transaction},
transaction::{self, Transaction, VersionedTransaction},
transport::Result as TransportResult,
},
std::{
@@ -215,10 +215,13 @@ impl<C: 'static + TpuConnection> ThinClient<C> {
let mut num_confirmed = 0;
let mut wait_time = MAX_PROCESSING_AGE;
// resend the same transaction until the transaction has no chance of succeeding
let wire_transaction =
bincode::serialize(&transaction).expect("transaction serialization failed");
while now.elapsed().as_secs() < wait_time as u64 {
if num_confirmed == 0 {
// Send the transaction if there has been no confirmation (e.g. the first time)
self.tpu_connection().send_transaction(transaction)?;
self.tpu_connection()
.send_wire_transaction(&wire_transaction)?;
}
if let Ok(confirmed_blocks) = self.poll_for_signature_confirmation(
@@ -601,12 +604,17 @@ impl<C: 'static + TpuConnection> SyncClient for ThinClient<C> {
impl<C: 'static + TpuConnection> AsyncClient for ThinClient<C> {
fn async_send_transaction(&self, transaction: Transaction) -> TransportResult<Signature> {
self.tpu_connection().send_transaction(&transaction)?;
let transaction = VersionedTransaction::from(transaction);
self.tpu_connection()
.serialize_and_send_transaction(&transaction)?;
Ok(transaction.signatures[0])
}
fn async_send_batch(&self, transactions: Vec<Transaction>) -> TransportResult<()> {
self.tpu_connection().send_batch(transactions)
let batch: Vec<VersionedTransaction> = transactions.into_iter().map(Into::into).collect();
self.tpu_connection()
.par_serialize_and_send_transaction_batch(&batch[..])?;
Ok(())
}
fn async_send_message<T: Signers>(

View File

@@ -1,21 +1,40 @@
use {
solana_sdk::{transaction::Transaction, transport::Result as TransportResult},
rayon::iter::{IntoParallelIterator, ParallelIterator},
solana_sdk::{transaction::VersionedTransaction, transport::Result as TransportResult},
std::net::{SocketAddr, UdpSocket},
};
pub trait TpuConnection {
fn new(client_socket: UdpSocket, tpu_addr: SocketAddr) -> Self
where
Self: Sized;
fn new(client_socket: UdpSocket, tpu_addr: SocketAddr) -> Self;
fn tpu_addr(&self) -> &SocketAddr;
fn send_transaction(&self, tx: &Transaction) -> TransportResult<()> {
let data = bincode::serialize(tx).expect("serialize Transaction in send_transaction");
self.send_wire_transaction(data)
fn serialize_and_send_transaction(
&self,
transaction: &VersionedTransaction,
) -> TransportResult<()> {
let wire_transaction =
bincode::serialize(transaction).expect("serialize Transaction in send_batch");
self.send_wire_transaction(&wire_transaction)
}
fn send_wire_transaction(&self, data: Vec<u8>) -> TransportResult<()>;
fn send_wire_transaction<T>(&self, wire_transaction: T) -> TransportResult<()>
where
T: AsRef<[u8]>;
fn send_batch(&self, transactions: Vec<Transaction>) -> TransportResult<()>;
fn par_serialize_and_send_transaction_batch(
&self,
transactions: &[VersionedTransaction],
) -> TransportResult<()> {
let buffers = transactions
.into_par_iter()
.map(|tx| bincode::serialize(&tx).expect("serialize Transaction in send_batch"))
.collect::<Vec<_>>();
self.send_wire_transaction_batch(&buffers)
}
fn send_wire_transaction_batch<T>(&self, buffers: &[T]) -> TransportResult<()>
where
T: AsRef<[u8]>;
}

View File

@@ -3,7 +3,9 @@
use {
crate::tpu_connection::TpuConnection,
solana_sdk::{transaction::Transaction, transport::Result as TransportResult},
core::iter::repeat,
solana_sdk::transport::Result as TransportResult,
solana_streamer::sendmmsg::batch_send,
std::net::{SocketAddr, UdpSocket},
};
@@ -24,19 +26,20 @@ impl TpuConnection for UdpTpuConnection {
&self.addr
}
fn send_wire_transaction(&self, data: Vec<u8>) -> TransportResult<()> {
self.socket.send_to(&data[..], self.addr)?;
fn send_wire_transaction<T>(&self, wire_transaction: T) -> TransportResult<()>
where
T: AsRef<[u8]>,
{
self.socket.send_to(wire_transaction.as_ref(), self.addr)?;
Ok(())
}
fn send_batch(&self, transactions: Vec<Transaction>) -> TransportResult<()> {
transactions
.into_iter()
.map(|tx| bincode::serialize(&tx).expect("serialize Transaction in send_batch"))
.try_for_each(|buff| -> TransportResult<()> {
self.socket.send_to(&buff[..], self.addr)?;
Ok(())
})?;
fn send_wire_transaction_batch<T>(&self, buffers: &[T]) -> TransportResult<()>
where
T: AsRef<[u8]>,
{
let pkts: Vec<_> = buffers.iter().zip(repeat(self.tpu_addr())).collect();
batch_send(&self.socket, &pkts)?;
Ok(())
}
}

View File

@@ -1,7 +1,7 @@
[package]
name = "solana-core"
description = "Blockchain, Rebuilt for Scale"
version = "1.10.3"
version = "1.11.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-core"
readme = "../README.md"
@@ -33,30 +33,30 @@ rayon = "1.5.1"
retain_mut = "0.1.7"
serde = "1.0.136"
serde_derive = "1.0.103"
solana-address-lookup-table-program = { path = "../programs/address-lookup-table", version = "=1.10.3" }
solana-bloom = { path = "../bloom", version = "=1.10.3" }
solana-client = { path = "../client", version = "=1.10.3" }
solana-entry = { path = "../entry", version = "=1.10.3" }
solana-frozen-abi = { path = "../frozen-abi", version = "=1.10.3" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.10.3" }
solana-geyser-plugin-manager = { path = "../geyser-plugin-manager", version = "=1.10.3" }
solana-gossip = { path = "../gossip", version = "=1.10.3" }
solana-ledger = { path = "../ledger", version = "=1.10.3" }
solana-measure = { path = "../measure", version = "=1.10.3" }
solana-metrics = { path = "../metrics", version = "=1.10.3" }
solana-net-utils = { path = "../net-utils", version = "=1.10.3" }
solana-perf = { path = "../perf", version = "=1.10.3" }
solana-poh = { path = "../poh", version = "=1.10.3" }
solana-program-runtime = { path = "../program-runtime", version = "=1.10.3" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.10.3" }
solana-replica-lib = { path = "../replica-lib", version = "=1.10.3" }
solana-rpc = { path = "../rpc", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.10.3" }
solana-streamer = { path = "../streamer", version = "=1.10.3" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.3" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.3" }
solana-address-lookup-table-program = { path = "../programs/address-lookup-table", version = "=1.11.0" }
solana-bloom = { path = "../bloom", version = "=1.11.0" }
solana-client = { path = "../client", version = "=1.11.0" }
solana-entry = { path = "../entry", version = "=1.11.0" }
solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.0" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.0" }
solana-geyser-plugin-manager = { path = "../geyser-plugin-manager", version = "=1.11.0" }
solana-gossip = { path = "../gossip", version = "=1.11.0" }
solana-ledger = { path = "../ledger", version = "=1.11.0" }
solana-measure = { path = "../measure", version = "=1.11.0" }
solana-metrics = { path = "../metrics", version = "=1.11.0" }
solana-net-utils = { path = "../net-utils", version = "=1.11.0" }
solana-perf = { path = "../perf", version = "=1.11.0" }
solana-poh = { path = "../poh", version = "=1.11.0" }
solana-program-runtime = { path = "../program-runtime", version = "=1.11.0" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.11.0" }
solana-replica-lib = { path = "../replica-lib", version = "=1.11.0" }
solana-rpc = { path = "../rpc", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.11.0" }
solana-streamer = { path = "../streamer", version = "=1.11.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.11.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.11.0" }
sys-info = "0.9.1"
tempfile = "3.3.0"
thiserror = "1.0"
@@ -69,10 +69,10 @@ raptorq = "1.6.5"
reqwest = { version = "0.11.10", default-features = false, features = ["blocking", "rustls-tls", "json"] }
serde_json = "1.0.79"
serial_test = "0.6.0"
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-program-runtime = { path = "../program-runtime", version = "=1.10.3" }
solana-stake-program = { path = "../programs/stake", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.11.0" }
solana-program-runtime = { path = "../program-runtime", version = "=1.11.0" }
solana-stake-program = { path = "../programs/stake", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
static_assertions = "1.1.0"
systemstat = "0.1.10"

View File

@@ -159,7 +159,7 @@ fn bench_sigverify_stage(bencher: &mut Bencher) {
for _ in 0..batches.len() {
if let Some(batch) = batches.pop() {
sent_len += batch.packets.len();
packet_s.send(batch).unwrap();
packet_s.send(vec![batch]).unwrap();
}
}
let mut received = 0;

View File

@@ -6,12 +6,10 @@
use {
crossbeam_channel::RecvTimeoutError,
rayon::ThreadPool,
solana_gossip::cluster_info::{ClusterInfo, MAX_SNAPSHOT_HASHES},
solana_measure::measure::Measure,
solana_runtime::{
accounts_db::{self, AccountsDb},
accounts_hash::HashStats,
accounts_hash::{CalcAccountsHashConfig, HashStats},
snapshot_config::SnapshotConfig,
snapshot_package::{
AccountsPackage, AccountsPackageReceiver, PendingSnapshotPackage, SnapshotPackage,
@@ -22,7 +20,6 @@ use {
solana_sdk::{clock::Slot, hash::Hash, pubkey::Pubkey},
std::{
collections::{HashMap, HashSet},
path::{Path, PathBuf},
sync::{
atomic::{AtomicBool, Ordering},
Arc,
@@ -46,7 +43,6 @@ impl AccountsHashVerifier {
halt_on_known_validators_accounts_hash_mismatch: bool,
fault_injection_rate_slots: u64,
snapshot_config: Option<SnapshotConfig>,
ledger_path: PathBuf,
) -> Self {
let exit = exit.clone();
let cluster_info = cluster_info.clone();
@@ -54,7 +50,6 @@ impl AccountsHashVerifier {
.name("solana-hash-accounts".to_string())
.spawn(move || {
let mut hashes = vec![];
let mut thread_pool = None;
loop {
if exit.load(Ordering::Relaxed) {
break;
@@ -62,11 +57,6 @@ impl AccountsHashVerifier {
match accounts_package_receiver.recv_timeout(Duration::from_secs(1)) {
Ok(accounts_package) => {
if accounts_package.hash_for_testing.is_some() && thread_pool.is_none()
{
thread_pool = Some(accounts_db::make_min_priority_thread_pool());
}
Self::process_accounts_package(
accounts_package,
&cluster_info,
@@ -77,8 +67,6 @@ impl AccountsHashVerifier {
&exit,
fault_injection_rate_slots,
snapshot_config.as_ref(),
thread_pool.as_ref(),
&ledger_path,
);
}
Err(RecvTimeoutError::Disconnected) => break,
@@ -103,10 +91,8 @@ impl AccountsHashVerifier {
exit: &Arc<AtomicBool>,
fault_injection_rate_slots: u64,
snapshot_config: Option<&SnapshotConfig>,
thread_pool: Option<&ThreadPool>,
ledger_path: &Path,
) {
Self::verify_accounts_package_hash(&accounts_package, thread_pool, ledger_path);
Self::verify_accounts_package_hash(&accounts_package);
Self::push_accounts_hashes_to_cluster(
&accounts_package,
@@ -121,25 +107,32 @@ impl AccountsHashVerifier {
Self::submit_for_packaging(accounts_package, pending_snapshot_package, snapshot_config);
}
fn verify_accounts_package_hash(
accounts_package: &AccountsPackage,
thread_pool: Option<&ThreadPool>,
ledger_path: &Path,
) {
fn verify_accounts_package_hash(accounts_package: &AccountsPackage) {
let mut measure_hash = Measure::start("hash");
if let Some(expected_hash) = accounts_package.hash_for_testing {
if let Some(expected_hash) = accounts_package.accounts_hash_for_testing {
let mut sort_time = Measure::start("sort_storages");
let sorted_storages = SortedStorages::new(&accounts_package.snapshot_storages);
let (hash, lamports) = AccountsDb::calculate_accounts_hash_without_index(
ledger_path,
&sorted_storages,
thread_pool,
HashStats::default(),
false,
None,
None, // this will fail with filler accounts
None, // this code path is only for testing, so use default # passes here
)
.unwrap();
sort_time.stop();
let mut timings = HashStats {
storage_sort_us: sort_time.as_us(),
..HashStats::default()
};
timings.calc_storage_size_quartiles(&accounts_package.snapshot_storages);
let (hash, lamports) = accounts_package
.accounts
.accounts_db
.calculate_accounts_hash_without_index(
&CalcAccountsHashConfig {
storages: &sorted_storages,
use_bg_thread_pool: true,
check_hash: false,
ancestors: None,
},
timings,
)
.unwrap();
assert_eq!(accounts_package.expected_capitalization, lamports);
assert_eq!(expected_hash, hash);
@@ -160,7 +153,7 @@ impl AccountsHashVerifier {
exit: &Arc<AtomicBool>,
fault_injection_rate_slots: u64,
) {
let hash = accounts_package.hash;
let hash = accounts_package.accounts_hash;
if fault_injection_rate_slots != 0
&& accounts_package.slot % fault_injection_rate_slots == 0
{
@@ -353,6 +346,7 @@ mod tests {
incremental_snapshot_archive_interval_slots: Slot::MAX,
..SnapshotConfig::default()
};
let accounts = Arc::new(solana_runtime::accounts::Accounts::default_for_tests());
for i in 0..MAX_SNAPSHOT_HASHES + 1 {
let accounts_package = AccountsPackage {
slot: full_snapshot_archive_interval_slots + i as u64,
@@ -360,18 +354,17 @@ mod tests {
slot_deltas: vec![],
snapshot_links: TempDir::new().unwrap(),
snapshot_storages: vec![],
hash: hash(&[i as u8]),
accounts_hash: hash(&[i as u8]),
archive_format: ArchiveFormat::TarBzip2,
snapshot_version: SnapshotVersion::default(),
snapshot_archives_dir: PathBuf::default(),
expected_capitalization: 0,
hash_for_testing: None,
accounts_hash_for_testing: None,
cluster_type: ClusterType::MainnetBeta,
snapshot_type: None,
accounts: Arc::clone(&accounts),
};
let ledger_path = TempDir::new().unwrap();
AccountsHashVerifier::process_accounts_package(
accounts_package,
&cluster_info,
@@ -382,8 +375,6 @@ mod tests {
&exit,
0,
Some(&snapshot_config),
None,
ledger_path.path(),
);
// sleep for 1ms to create a newer timestmap for gossip entry

View File

@@ -1,5 +1,5 @@
//! The `banking_stage` processes Transaction messages. It is intended to be used
//! to contruct a software pipeline. The stage uses all available CPU cores and
//! to construct a software pipeline. The stage uses all available CPU cores and
//! can do its processing in parallel with signature verification on the GPU.
use {
crate::{
@@ -14,6 +14,7 @@ use {
histogram::Histogram,
itertools::Itertools,
retain_mut::RetainMut,
solana_client::connection_cache::send_wire_transaction_batch,
solana_entry::entry::hash_transactions,
solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo},
solana_ledger::blockstore_processor::TransactionStatusSender,
@@ -51,8 +52,8 @@ use {
transaction::{
self, AddressLoader, SanitizedTransaction, TransactionError, VersionedTransaction,
},
transport::TransportError,
},
solana_streamer::sendmmsg::{batch_send, SendPktsError},
solana_transaction_status::token_balances::{
collect_token_balances, TransactionTokenBalancesSet,
},
@@ -60,7 +61,7 @@ use {
cmp,
collections::HashMap,
env,
net::{SocketAddr, UdpSocket},
net::SocketAddr,
sync::{
atomic::{AtomicU64, AtomicUsize, Ordering},
Arc, Mutex, RwLock,
@@ -194,7 +195,7 @@ impl BankingStageStats {
}
fn report(&mut self, report_interval_ms: u64) {
// skip repoting metrics if stats is empty
// skip reporting metrics if stats is empty
if self.is_empty() {
return;
}
@@ -482,11 +483,10 @@ impl BankingStage {
/// Forwards all valid, unprocessed packets in the buffer, up to a rate limit. Returns
/// the number of successfully forwarded packets in second part of tuple
fn forward_buffered_packets(
socket: &std::net::UdpSocket,
tpu_forwards: &std::net::SocketAddr,
packets: Vec<&Packet>,
data_budget: &DataBudget,
) -> (std::io::Result<()>, usize) {
) -> (std::result::Result<(), TransportError>, usize) {
const INTERVAL_MS: u64 = 100;
const MAX_BYTES_PER_SECOND: usize = 10_000 * 1200;
const MAX_BYTES_PER_INTERVAL: usize = MAX_BYTES_PER_SECOND * INTERVAL_MS as usize / 1000;
@@ -502,18 +502,35 @@ impl BankingStage {
.iter()
.filter_map(|p| {
if !p.meta.forwarded() && data_budget.take(p.meta.size) {
Some((&p.data[..p.meta.size], tpu_forwards))
Some(&p.data[..p.meta.size])
} else {
None
}
})
.collect();
// TODO: see https://github.com/solana-labs/solana/issues/23819
// fix this so returns the correct number of succeeded packets
// when there's an error sending the batch. This was left as-is for now
// in favor of shipping Quic support, which was considered higher-priority
if !packet_vec.is_empty() {
inc_new_counter_info!("banking_stage-forwarded_packets", packet_vec.len());
if let Err(SendPktsError::IoError(ioerr, num_failed)) = batch_send(socket, &packet_vec)
{
return (Err(ioerr), packet_vec.len().saturating_sub(num_failed));
let mut measure = Measure::start("banking_stage-forward-us");
let res = send_wire_transaction_batch(&packet_vec, tpu_forwards);
measure.stop();
inc_new_counter_info!(
"banking_stage-forward-us",
measure.as_us() as usize,
1000,
1000
);
if let Err(err) = res {
inc_new_counter_info!("banking_stage-forward_packets-failed-batches", 1);
return (Err(err), 0);
}
}
@@ -700,14 +717,11 @@ impl BankingStage {
// `original_unprocessed_indexes` must have remaining packets to process
// if not yet processed.
assert!(Self::packet_has_more_unprocessed_transactions(
&original_unprocessed_indexes
));
assert!(!original_unprocessed_indexes.is_empty());
true
}
}
});
proc_start.stop();
debug!(
@@ -766,7 +780,6 @@ impl BankingStage {
#[allow(clippy::too_many_arguments)]
fn process_buffered_packets(
my_pubkey: &Pubkey,
socket: &std::net::UdpSocket,
poh_recorder: &Arc<Mutex<PohRecorder>>,
cluster_info: &ClusterInfo,
buffered_packet_batches: &mut UnprocessedPacketBatches,
@@ -846,7 +859,6 @@ impl BankingStage {
cluster_info,
buffered_packet_batches,
poh_recorder,
socket,
false,
data_budget,
slot_metrics_tracker,
@@ -865,7 +877,6 @@ impl BankingStage {
cluster_info,
buffered_packet_batches,
poh_recorder,
socket,
true,
data_budget,
slot_metrics_tracker,
@@ -887,7 +898,6 @@ impl BankingStage {
cluster_info: &ClusterInfo,
buffered_packet_batches: &mut UnprocessedPacketBatches,
poh_recorder: &Arc<Mutex<PohRecorder>>,
socket: &UdpSocket,
hold: bool,
data_budget: &DataBudget,
slot_metrics_tracker: &mut LeaderSlotMetricsTracker,
@@ -913,7 +923,7 @@ impl BankingStage {
Self::filter_valid_packets_for_forwarding(buffered_packet_batches.iter());
let forwardable_packets_len = forwardable_packets.len();
let (_forward_result, sucessful_forwarded_packets_count) =
Self::forward_buffered_packets(socket, &addr, forwardable_packets, data_budget);
Self::forward_buffered_packets(&addr, forwardable_packets, data_budget);
let failed_forwarded_packets_count =
forwardable_packets_len.saturating_sub(sucessful_forwarded_packets_count);
@@ -958,7 +968,6 @@ impl BankingStage {
cost_model: Arc<RwLock<CostModel>>,
) {
let recorder = poh_recorder.lock().unwrap().recorder();
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut buffered_packet_batches = UnprocessedPacketBatches::with_capacity(batch_limit);
let mut banking_stage_stats = BankingStageStats::new(id);
let qos_service = QosService::new(cost_model, id);
@@ -970,7 +979,6 @@ impl BankingStage {
|_| {
Self::process_buffered_packets(
&my_pubkey,
&socket,
poh_recorder,
cluster_info,
&mut buffered_packet_batches,
@@ -1183,6 +1191,7 @@ impl BankingStage {
MAX_PROCESSING_AGE,
transaction_status_sender.is_some(),
transaction_status_sender.is_some(),
transaction_status_sender.is_some(),
&mut execute_and_commit_timings.execute_timings,
)
},
@@ -2015,7 +2024,7 @@ impl BankingStage {
banking_stage_stats: &mut BankingStageStats,
slot_metrics_tracker: &mut LeaderSlotMetricsTracker,
) {
if Self::packet_has_more_unprocessed_transactions(&packet_indexes) {
if !packet_indexes.is_empty() {
if unprocessed_packet_batches.len() >= batch_limit {
*dropped_packet_batches_count += 1;
if let Some(dropped_batch) = unprocessed_packet_batches.pop_front() {
@@ -2041,10 +2050,6 @@ impl BankingStage {
}
}
fn packet_has_more_unprocessed_transactions(packet_indexes: &[usize]) -> bool {
!packet_indexes.is_empty()
}
pub fn join(self) -> thread::Result<()> {
for bank_thread_hdl in self.bank_thread_hdls {
bank_thread_hdl.join()?;
@@ -2156,6 +2161,7 @@ mod tests {
log_messages: None,
inner_instructions: None,
durable_nonce_fee: None,
return_data: None,
})
}
@@ -3835,7 +3841,6 @@ mod tests {
let local_node = Node::new_localhost_with_pubkey(validator_pubkey);
let cluster_info = new_test_cluster_info(local_node.info);
let send_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let recv_socket = &local_node.sockets.tpu_forwards[0];
let test_cases = vec![
@@ -3857,7 +3862,6 @@ mod tests {
&cluster_info,
&mut unprocessed_packet_batches,
&poh_recorder,
&send_socket,
true,
&data_budget,
&mut LeaderSlotMetricsTracker::new(0),
@@ -3935,7 +3939,6 @@ mod tests {
let local_node = Node::new_localhost_with_pubkey(validator_pubkey);
let cluster_info = new_test_cluster_info(local_node.info);
let send_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let recv_socket = &local_node.sockets.tpu_forwards[0];
let test_cases = vec![
@@ -3969,7 +3972,6 @@ mod tests {
&cluster_info,
&mut unprocessed_packet_batches,
&poh_recorder,
&send_socket,
hold,
&DataBudget::default(),
&mut LeaderSlotMetricsTracker::new(0),

View File

@@ -10,7 +10,7 @@ use {
solana_ledger::{ancestor_iterator::AncestorIterator, blockstore::Blockstore, blockstore_db},
solana_runtime::{
bank::Bank, bank_forks::BankForks, commitment::VOTE_THRESHOLD_SIZE,
vote_account::VoteAccount,
vote_account::VoteAccountsHashMap,
},
solana_sdk::{
clock::{Slot, UnixTimestamp},
@@ -253,7 +253,7 @@ impl Tower {
pub(crate) fn collect_vote_lockouts(
vote_account_pubkey: &Pubkey,
bank_slot: Slot,
vote_accounts: &HashMap<Pubkey, (/*stake:*/ u64, VoteAccount)>,
vote_accounts: &VoteAccountsHashMap,
ancestors: &HashMap<Slot, HashSet<Slot>>,
get_frozen_hash: impl Fn(Slot) -> Option<Hash>,
latest_validator_votes_for_frozen_banks: &mut LatestValidatorVotesForFrozenBanks,
@@ -636,7 +636,7 @@ impl Tower {
descendants: &HashMap<Slot, HashSet<u64>>,
progress: &ProgressMap,
total_stake: u64,
epoch_vote_accounts: &HashMap<Pubkey, (u64, VoteAccount)>,
epoch_vote_accounts: &VoteAccountsHashMap,
latest_validator_votes_for_frozen_banks: &LatestValidatorVotesForFrozenBanks,
heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice,
) -> SwitchForkDecision {
@@ -929,7 +929,7 @@ impl Tower {
descendants: &HashMap<Slot, HashSet<u64>>,
progress: &ProgressMap,
total_stake: u64,
epoch_vote_accounts: &HashMap<Pubkey, (u64, VoteAccount)>,
epoch_vote_accounts: &VoteAccountsHashMap,
latest_validator_votes_for_frozen_banks: &LatestValidatorVotesForFrozenBanks,
heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice,
) -> SwitchForkDecision {
@@ -1377,7 +1377,7 @@ pub mod test {
},
itertools::Itertools,
solana_ledger::{blockstore::make_slot_entries, get_tmp_ledger_path},
solana_runtime::bank::Bank,
solana_runtime::{bank::Bank, vote_account::VoteAccount},
solana_sdk::{
account::{Account, AccountSharedData, ReadableAccount, WritableAccount},
clock::Slot,
@@ -1398,7 +1398,7 @@ pub mod test {
trees::tr,
};
fn gen_stakes(stake_votes: &[(u64, &[u64])]) -> HashMap<Pubkey, (u64, VoteAccount)> {
fn gen_stakes(stake_votes: &[(u64, &[u64])]) -> VoteAccountsHashMap {
stake_votes
.iter()
.map(|(lamports, votes)| {

View File

@@ -134,10 +134,6 @@ impl CostUpdateService {
.upsert_instruction_cost(program_id, units);
update_count += 1;
}
debug!(
"after replayed into bank, updated cost model instruction cost table, current values: {:?}",
cost_model.read().unwrap().get_instruction_cost_table()
);
update_count
}
}
@@ -150,15 +146,10 @@ mod tests {
fn test_update_cost_model_with_empty_execute_timings() {
let cost_model = Arc::new(RwLock::new(CostModel::default()));
let mut empty_execute_timings = ExecuteTimings::default();
CostUpdateService::update_cost_model(&cost_model, &mut empty_execute_timings);
assert_eq!(
0,
cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.len()
CostUpdateService::update_cost_model(&cost_model, &mut empty_execute_timings),
);
}
@@ -188,22 +179,16 @@ mod tests {
total_errored_units,
},
);
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
assert_eq!(
1,
cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.len()
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings),
);
assert_eq!(
Some(&expected_cost),
expected_cost,
cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.get(&program_key_1)
.find_instruction_cost(&program_key_1)
);
}
@@ -225,22 +210,16 @@ mod tests {
total_errored_units: 0,
},
);
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
assert_eq!(
1,
cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.len()
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings),
);
assert_eq!(
Some(&expected_cost),
expected_cost,
cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.get(&program_key_1)
.find_instruction_cost(&program_key_1)
);
}
}
@@ -264,20 +243,46 @@ mod tests {
total_errored_units: 0,
},
);
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
// If both the `errored_txs_compute_consumed` is empty and `count == 0`, then
// nothing should be inserted into the cost model
assert!(cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.is_empty());
assert_eq!(
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings),
0
);
}
// set up current instruction cost to 100
let current_program_cost = 100;
{
execute_timings.details.per_program_timings.insert(
program_key_1,
ProgramTiming {
accumulated_us: 1000,
accumulated_units: current_program_cost,
count: 1,
errored_txs_compute_consumed: vec![],
total_errored_units: 0,
},
);
assert_eq!(
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings),
1
);
assert_eq!(
current_program_cost,
cost_model
.read()
.unwrap()
.find_instruction_cost(&program_key_1)
);
}
// Test updating cost model with only erroring compute costs where the `cost_per_error` is
// greater than the current instruction cost for the program. Should update with the
// new erroring compute costs
let cost_per_error = 1000;
// the expect cost is (previous_cost + new_cost)/2 = (100 + 1000)/2 = 550
let expected_units = 550;
{
let errored_txs_compute_consumed = vec![cost_per_error; 3];
let total_errored_units = errored_txs_compute_consumed.iter().sum();
@@ -291,29 +296,23 @@ mod tests {
total_errored_units,
},
);
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
assert_eq!(
1,
cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.len()
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings),
1
);
assert_eq!(
Some(&cost_per_error),
expected_units,
cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.get(&program_key_1)
.find_instruction_cost(&program_key_1)
);
}
// Test updating cost model with only erroring compute costs where the error cost is
// `smaller_cost_per_error`, less than the current instruction cost for the program.
// The cost should not decrease for these new lesser errors
let smaller_cost_per_error = cost_per_error - 10;
let smaller_cost_per_error = expected_units - 10;
{
let errored_txs_compute_consumed = vec![smaller_cost_per_error; 3];
let total_errored_units = errored_txs_compute_consumed.iter().sum();
@@ -327,22 +326,16 @@ mod tests {
total_errored_units,
},
);
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
assert_eq!(
1,
cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.len()
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings),
1
);
assert_eq!(
Some(&cost_per_error),
expected_units,
cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.get(&program_key_1)
.find_instruction_cost(&program_key_1)
);
}
}

View File

@@ -0,0 +1,185 @@
use {
crossbeam_channel::{Receiver, RecvTimeoutError, Sender},
rayon::{prelude::*, ThreadPool},
solana_gossip::cluster_info::ClusterInfo,
solana_measure::measure::Measure,
solana_perf::packet::PacketBatch,
solana_rayon_threadlimit::get_thread_count,
solana_runtime::bank_forks::BankForks,
solana_sdk::timing::timestamp,
solana_streamer::streamer::{self, StreamerError},
std::{
cell::RefCell,
collections::HashMap,
net::IpAddr,
sync::{Arc, RwLock},
thread::{self, Builder, JoinHandle},
time::{Duration, Instant},
},
};
const IP_TO_STAKE_REFRESH_DURATION: Duration = Duration::from_secs(5);
thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new()
.num_threads(get_thread_count())
.thread_name(|ix| format!("transaction_sender_stake_stage_{}", ix))
.build()
.unwrap()));
pub type FindPacketSenderStakeSender = Sender<Vec<PacketBatch>>;
pub type FindPacketSenderStakeReceiver = Receiver<Vec<PacketBatch>>;
#[derive(Debug, Default)]
struct FindPacketSenderStakeStats {
last_print: u64,
refresh_ip_to_stake_time: u64,
apply_sender_stakes_time: u64,
send_batches_time: u64,
receive_batches_time: u64,
total_batches: u64,
total_packets: u64,
}
impl FindPacketSenderStakeStats {
fn report(&mut self) {
let now = timestamp();
let elapsed_ms = now - self.last_print;
if elapsed_ms > 2000 {
datapoint_info!(
"find_packet_sender_stake-services_stats",
(
"refresh_ip_to_stake_time",
self.refresh_ip_to_stake_time as i64,
i64
),
(
"apply_sender_stakes_time",
self.apply_sender_stakes_time as i64,
i64
),
("send_batches_time", self.send_batches_time as i64, i64),
(
"receive_batches_time",
self.receive_batches_time as i64,
i64
),
("total_batches", self.total_batches as i64, i64),
("total_packets", self.total_packets as i64, i64),
);
*self = FindPacketSenderStakeStats::default();
self.last_print = now;
}
}
}
pub struct FindPacketSenderStakeStage {
thread_hdl: JoinHandle<()>,
}
impl FindPacketSenderStakeStage {
pub fn new(
packet_receiver: streamer::PacketBatchReceiver,
sender: FindPacketSenderStakeSender,
bank_forks: Arc<RwLock<BankForks>>,
cluster_info: Arc<ClusterInfo>,
) -> Self {
let mut stats = FindPacketSenderStakeStats::default();
let thread_hdl = Builder::new()
.name("find-packet-sender-stake".to_string())
.spawn(move || {
let mut last_stakes = Instant::now();
let mut ip_to_stake: HashMap<IpAddr, u64> = HashMap::new();
loop {
let mut refresh_ip_to_stake_time = Measure::start("refresh_ip_to_stake_time");
Self::try_refresh_ip_to_stake(
&mut last_stakes,
&mut ip_to_stake,
bank_forks.clone(),
cluster_info.clone(),
);
refresh_ip_to_stake_time.stop();
stats.refresh_ip_to_stake_time = stats
.refresh_ip_to_stake_time
.saturating_add(refresh_ip_to_stake_time.as_us());
match streamer::recv_packet_batches(&packet_receiver) {
Ok((mut batches, num_packets, recv_duration)) => {
let num_batches = batches.len();
let mut apply_sender_stakes_time =
Measure::start("apply_sender_stakes_time");
Self::apply_sender_stakes(&mut batches, &ip_to_stake);
apply_sender_stakes_time.stop();
let mut send_batches_time = Measure::start("send_batches_time");
if let Err(e) = sender.send(batches) {
info!("Sender error: {:?}", e);
}
send_batches_time.stop();
stats.apply_sender_stakes_time = stats
.apply_sender_stakes_time
.saturating_add(apply_sender_stakes_time.as_us());
stats.send_batches_time = stats
.send_batches_time
.saturating_add(send_batches_time.as_us());
stats.receive_batches_time = stats
.receive_batches_time
.saturating_add(recv_duration.as_nanos() as u64);
stats.total_batches =
stats.total_batches.saturating_add(num_batches as u64);
stats.total_packets =
stats.total_packets.saturating_add(num_packets as u64);
}
Err(e) => match e {
StreamerError::RecvTimeout(RecvTimeoutError::Disconnected) => break,
StreamerError::RecvTimeout(RecvTimeoutError::Timeout) => (),
_ => error!("error: {:?}", e),
},
}
stats.report();
}
})
.unwrap();
Self { thread_hdl }
}
fn try_refresh_ip_to_stake(
last_stakes: &mut Instant,
ip_to_stake: &mut HashMap<IpAddr, u64>,
bank_forks: Arc<RwLock<BankForks>>,
cluster_info: Arc<ClusterInfo>,
) {
if last_stakes.elapsed() > IP_TO_STAKE_REFRESH_DURATION {
let root_bank = bank_forks.read().unwrap().root_bank();
let staked_nodes = root_bank.staked_nodes();
*ip_to_stake = cluster_info
.tvu_peers()
.into_iter()
.filter_map(|node| {
let stake = staked_nodes.get(&node.id)?;
Some((node.tvu.ip(), *stake))
})
.collect();
*last_stakes = Instant::now();
}
}
fn apply_sender_stakes(batches: &mut [PacketBatch], ip_to_stake: &HashMap<IpAddr, u64>) {
PAR_THREAD_POOL.with(|thread_pool| {
thread_pool.borrow().install(|| {
batches
.into_par_iter()
.flat_map(|batch| batch.packets.par_iter_mut())
.for_each(|packet| {
packet.meta.sender_stake =
*ip_to_stake.get(&packet.meta.addr().ip()).unwrap_or(&0);
});
})
});
}
pub fn join(self) -> thread::Result<()> {
self.thread_hdl.join()
}
}

View File

@@ -24,6 +24,7 @@ pub mod cost_update_service;
pub mod drop_bank_service;
pub mod duplicate_repair_status;
pub mod fetch_stage;
pub mod find_packet_sender_stake_stage;
pub mod fork_choice;
pub mod gen_keys;
pub mod heaviest_subtree_fork_choice;

View File

@@ -7,7 +7,7 @@ use {
},
solana_ledger::blockstore_processor::{ConfirmationProgress, ConfirmationTiming},
solana_program_runtime::timings::ExecuteTimingType,
solana_runtime::{bank::Bank, bank_forks::BankForks, vote_account::VoteAccount},
solana_runtime::{bank::Bank, bank_forks::BankForks, vote_account::VoteAccountsHashMap},
solana_sdk::{clock::Slot, hash::Hash, pubkey::Pubkey},
std::{
collections::{BTreeMap, HashMap, HashSet},
@@ -516,7 +516,7 @@ impl PropagatedStats {
&mut self,
node_pubkey: &Pubkey,
vote_account_pubkeys: &[Pubkey],
epoch_vote_accounts: &HashMap<Pubkey, (u64, VoteAccount)>,
epoch_vote_accounts: &VoteAccountsHashMap,
) {
self.propagated_node_ids.insert(*node_pubkey);
for vote_account_pubkey in vote_account_pubkeys.iter() {
@@ -695,7 +695,7 @@ impl ProgressMap {
#[cfg(test)]
mod test {
use super::*;
use {super::*, solana_runtime::vote_account::VoteAccount};
#[test]
fn test_add_vote_pubkey() {

View File

@@ -2301,7 +2301,7 @@ impl ReplayStage {
}
}
// send accumulated excute-timings to cost_update_service
// send accumulated execute-timings to cost_update_service
if !execute_timings.details.per_program_timings.is_empty() {
cost_update_sender
.send(CostUpdate::ExecuteTiming {
@@ -2589,7 +2589,7 @@ impl ReplayStage {
*/
// Imagine 90% of validators voted on slot 4, but only 9% landed. If everybody that fails
// the switch theshold abandons slot 4 to build on slot 8 (because it's *currently* heavier),
// the switch threshold abandons slot 4 to build on slot 8 (because it's *currently* heavier),
// then there will be no blocks to include the votes for slot 4, and the network halts
// because 90% of validators can't vote
info!(

View File

@@ -240,7 +240,7 @@ fn retransmit(
epoch_fetch.stop();
stats.epoch_fetch += epoch_fetch.as_us();
let mut epoch_cache_update = Measure::start("retransmit_epoch_cach_update");
let mut epoch_cache_update = Measure::start("retransmit_epoch_cache_update");
maybe_reset_shreds_received_cache(shreds_received, hasher_reset_ts);
epoch_cache_update.stop();
stats.epoch_cache_update += epoch_cache_update.as_us();

View File

@@ -2,17 +2,17 @@
use {
crate::packet_hasher::PacketHasher,
crossbeam_channel::unbounded,
crossbeam_channel::{unbounded, Sender},
lru::LruCache,
solana_ledger::shred::{get_shred_slot_index_type, ShredFetchStats},
solana_perf::{
cuda_runtime::PinnedVec,
packet::{Packet, PacketBatchRecycler, PacketFlags},
packet::{Packet, PacketBatch, PacketBatchRecycler, PacketFlags},
recycler::Recycler,
},
solana_runtime::bank_forks::BankForks,
solana_sdk::clock::{Slot, DEFAULT_MS_PER_SLOT},
solana_streamer::streamer::{self, PacketBatchReceiver, PacketBatchSender},
solana_streamer::streamer::{self, PacketBatchReceiver},
std::{
net::UdpSocket,
sync::{atomic::AtomicBool, Arc, RwLock},
@@ -65,7 +65,7 @@ impl ShredFetchStage {
// updates packets received on a channel and sends them on another channel
fn modify_packets<F>(
recvr: PacketBatchReceiver,
sendr: PacketBatchSender,
sendr: Sender<Vec<PacketBatch>>,
bank_forks: Option<Arc<RwLock<BankForks>>>,
name: &'static str,
modify: F,
@@ -125,7 +125,7 @@ impl ShredFetchStage {
stats = ShredFetchStats::default();
last_stats = Instant::now();
}
if sendr.send(packet_batch).is_err() {
if sendr.send(vec![packet_batch]).is_err() {
break;
}
}
@@ -134,7 +134,7 @@ impl ShredFetchStage {
fn packet_modifier<F>(
sockets: Vec<Arc<UdpSocket>>,
exit: &Arc<AtomicBool>,
sender: PacketBatchSender,
sender: Sender<Vec<PacketBatch>>,
recycler: Recycler<PinnedVec<Packet>>,
bank_forks: Option<Arc<RwLock<BankForks>>>,
name: &'static str,
@@ -170,7 +170,7 @@ impl ShredFetchStage {
sockets: Vec<Arc<UdpSocket>>,
forward_sockets: Vec<Arc<UdpSocket>>,
repair_socket: Arc<UdpSocket>,
sender: &PacketBatchSender,
sender: &Sender<Vec<PacketBatch>>,
bank_forks: Option<Arc<RwLock<BankForks>>>,
exit: &Arc<AtomicBool>,
) -> Self {

View File

@@ -6,9 +6,9 @@
//! if perf-libs are available
use {
crate::sigverify,
crate::{find_packet_sender_stake_stage, sigverify},
core::time::Duration,
crossbeam_channel::{Receiver, RecvTimeoutError, SendError, Sender},
crossbeam_channel::{RecvTimeoutError, SendError, Sender},
itertools::Itertools,
solana_measure::measure::Measure,
solana_perf::{
@@ -16,7 +16,7 @@ use {
sigverify::{count_valid_packets, shrink_batches, Deduper},
},
solana_sdk::timing,
solana_streamer::streamer::{self, PacketBatchReceiver, StreamerError},
solana_streamer::streamer::{self, StreamerError},
std::{
thread::{self, Builder, JoinHandle},
time::Instant,
@@ -192,7 +192,7 @@ impl SigVerifier for DisabledSigVerifier {
impl SigVerifyStage {
#[allow(clippy::new_ret_no_self)]
pub fn new<T: SigVerifier + 'static + Send + Clone>(
packet_receiver: Receiver<PacketBatch>,
packet_receiver: find_packet_sender_stake_stage::FindPacketSenderStakeReceiver,
verified_sender: Sender<Vec<PacketBatch>>,
verifier: T,
) -> Self {
@@ -227,12 +227,12 @@ impl SigVerifyStage {
fn verifier<T: SigVerifier>(
deduper: &Deduper,
recvr: &PacketBatchReceiver,
recvr: &find_packet_sender_stake_stage::FindPacketSenderStakeReceiver,
sendr: &Sender<Vec<PacketBatch>>,
verifier: &T,
stats: &mut SigVerifierStats,
) -> Result<()> {
let (mut batches, num_packets, recv_duration) = streamer::recv_packet_batches(recvr)?;
let (mut batches, num_packets, recv_duration) = streamer::recv_vec_packet_batches(recvr)?;
let batches_len = batches.len();
debug!(
@@ -312,7 +312,7 @@ impl SigVerifyStage {
}
fn verifier_service<T: SigVerifier + 'static + Send + Clone>(
packet_receiver: PacketBatchReceiver,
packet_receiver: find_packet_sender_stake_stage::FindPacketSenderStakeReceiver,
verified_sender: Sender<Vec<PacketBatch>>,
verifier: &T,
) -> JoinHandle<()> {
@@ -358,7 +358,7 @@ impl SigVerifyStage {
}
fn verifier_services<T: SigVerifier + 'static + Send + Clone>(
packet_receiver: PacketBatchReceiver,
packet_receiver: find_packet_sender_stake_stage::FindPacketSenderStakeReceiver,
verified_sender: Sender<Vec<PacketBatch>>,
verifier: T,
) -> JoinHandle<()> {
@@ -445,7 +445,7 @@ mod tests {
for _ in 0..batches.len() {
if let Some(batch) = batches.pop() {
sent_len += batch.packets.len();
packet_s.send(batch).unwrap();
packet_s.send(vec![batch]).unwrap();
}
}
let mut received = 0;

View File

@@ -10,6 +10,7 @@ use {
GossipVerifiedVoteHashSender, VerifiedVoteSender, VoteTracker,
},
fetch_stage::FetchStage,
find_packet_sender_stake_stage::FindPacketSenderStakeStage,
sigverify::TransactionSigVerifier,
sigverify_stage::SigVerifyStage,
},
@@ -55,6 +56,8 @@ pub struct Tpu {
cluster_info_vote_listener: ClusterInfoVoteListener,
broadcast_stage: BroadcastStage,
tpu_quic_t: thread::JoinHandle<()>,
find_packet_sender_stake_stage: FindPacketSenderStakeStage,
vote_find_packet_sender_stake_stage: FindPacketSenderStakeStage,
}
impl Tpu {
@@ -103,6 +106,26 @@ impl Tpu {
poh_recorder,
tpu_coalesce_ms,
);
let (find_packet_sender_stake_sender, find_packet_sender_stake_receiver) = unbounded();
let find_packet_sender_stake_stage = FindPacketSenderStakeStage::new(
packet_receiver,
find_packet_sender_stake_sender,
bank_forks.clone(),
cluster_info.clone(),
);
let (vote_find_packet_sender_stake_sender, vote_find_packet_sender_stake_receiver) =
unbounded();
let vote_find_packet_sender_stake_stage = FindPacketSenderStakeStage::new(
vote_packet_receiver,
vote_find_packet_sender_stake_sender,
bank_forks.clone(),
cluster_info.clone(),
);
let (verified_sender, verified_receiver) = unbounded();
let tpu_quic_t = solana_streamer::quic::spawn_server(
@@ -117,7 +140,7 @@ impl Tpu {
let sigverify_stage = {
let verifier = TransactionSigVerifier::default();
SigVerifyStage::new(packet_receiver, verified_sender, verifier)
SigVerifyStage::new(find_packet_sender_stake_receiver, verified_sender, verifier)
};
let (verified_tpu_vote_packets_sender, verified_tpu_vote_packets_receiver) = unbounded();
@@ -125,7 +148,7 @@ impl Tpu {
let vote_sigverify_stage = {
let verifier = TransactionSigVerifier::new_reject_non_vote();
SigVerifyStage::new(
vote_packet_receiver,
vote_find_packet_sender_stake_receiver,
verified_tpu_vote_packets_sender,
verifier,
)
@@ -179,6 +202,8 @@ impl Tpu {
cluster_info_vote_listener,
broadcast_stage,
tpu_quic_t,
find_packet_sender_stake_stage,
vote_find_packet_sender_stake_stage,
}
}
@@ -189,6 +214,8 @@ impl Tpu {
self.vote_sigverify_stage.join(),
self.cluster_info_vote_listener.join(),
self.banking_stage.join(),
self.find_packet_sender_stake_stage.join(),
self.vote_find_packet_sender_stake_stage.join(),
];
self.tpu_quic_t.join()?;
let broadcast_result = self.broadcast_stage.join();

View File

@@ -98,7 +98,6 @@ pub struct TvuConfig {
pub accounts_hash_fault_injection_slots: u64,
pub accounts_db_caching_enabled: bool,
pub test_hash_calculation: bool,
pub use_index_hash_calculation: bool,
pub rocksdb_compaction_interval: Option<u64>,
pub rocksdb_max_compaction_jitter: Option<u64>,
pub wait_for_vote_to_start_leader: bool,
@@ -230,7 +229,6 @@ impl Tvu {
tvu_config.halt_on_known_validators_accounts_hash_mismatch,
tvu_config.accounts_hash_fault_injection_slots,
snapshot_config.clone(),
blockstore.ledger_path().to_path_buf(),
);
let (snapshot_request_sender, snapshot_request_handler) = match snapshot_config {
@@ -359,7 +357,6 @@ impl Tvu {
accounts_background_request_handler,
tvu_config.accounts_db_caching_enabled,
tvu_config.test_hash_calculation,
tvu_config.use_index_hash_calculation,
last_full_snapshot_slot,
);

View File

@@ -37,6 +37,17 @@ pub struct DeserializedPacketBatch {
pub unprocessed_packets: HashMap<usize, DeserializedPacket>,
}
/// References to a packet in `UnprocessedPacketBatches`, where
/// - batch_index references to `DeserializedPacketBatch`,
/// - packet_index references to `packet` within `DeserializedPacketBatch.packet_batch`
#[derive(Debug, Default)]
pub struct PacketLocator {
#[allow(dead_code)]
batch_index: usize,
#[allow(dead_code)]
packet_index: usize,
}
/// Currently each banking_stage thread has a `UnprocessedPacketBatches` buffer to store
/// PacketBatch's received from sigverify. Banking thread continuously scans the buffer
/// to pick proper packets to add to the block.
@@ -79,6 +90,50 @@ impl UnprocessedPacketBatches {
pub fn with_capacity(capacity: usize) -> Self {
UnprocessedPacketBatches(VecDeque::with_capacity(capacity))
}
/// Returns total number of all packets (including unprocessed and processed) in buffer
#[allow(dead_code)]
fn get_packets_count(&self) -> usize {
self.iter()
.map(|deserialized_packet_batch| deserialized_packet_batch.packet_batch.packets.len())
.sum()
}
/// Returns total number of unprocessed packets in buffer
#[allow(dead_code)]
fn get_unprocessed_packets_count(&self) -> usize {
self.iter()
.map(|deserialized_packet_batch| deserialized_packet_batch.unprocessed_packets.len())
.sum()
}
/// Iterates the inner `Vec<DeserializedPacketBatch>`.
/// Returns the flattened result of mapping each
/// `DeserializedPacketBatch` to a list the batch's inner
/// packets' sender's stake and their `PacketLocator`'s within the
/// `Vec<DeserializedPacketBatch>`.
#[allow(dead_code)]
fn get_stakes_and_locators(&self) -> (Vec<u64>, Vec<PacketLocator>) {
self.iter()
.enumerate()
.flat_map(|(batch_index, deserialized_packet_batch)| {
let packet_batch = &deserialized_packet_batch.packet_batch;
deserialized_packet_batch
.unprocessed_packets
.keys()
.map(move |packet_index| {
let p = &packet_batch.packets[*packet_index];
(
p.meta.sender_stake,
PacketLocator {
batch_index,
packet_index: *packet_index,
},
)
})
})
.unzip()
}
}
impl DeserializedPacketBatch {
@@ -135,8 +190,8 @@ impl DeserializedPacketBatch {
Some(&packet.data[msg_start..msg_end])
}
// Returns whether the given `PacketBatch` has any more remaining unprocessed
// transactions
/// Returns whether the given `PacketBatch` has any more remaining unprocessed
/// transactions
pub fn update_buffered_packets_with_new_unprocessed(
&mut self,
_original_unprocessed_indexes: &[usize],
@@ -159,8 +214,24 @@ mod tests {
use {
super::*,
solana_sdk::{signature::Keypair, system_transaction},
std::net::IpAddr,
};
fn packet_with_sender_stake(sender_stake: u64, ip: Option<IpAddr>) -> Packet {
let tx = system_transaction::transfer(
&Keypair::new(),
&solana_sdk::pubkey::new_rand(),
1,
Hash::new_unique(),
);
let mut packet = Packet::from_data(None, &tx).unwrap();
packet.meta.sender_stake = sender_stake;
if let Some(ip) = ip {
packet.meta.addr = ip;
}
packet
}
#[test]
fn test_packet_message() {
let keypair = Keypair::new();
@@ -175,4 +246,92 @@ mod tests {
transaction.message_data()
);
}
#[test]
fn test_get_packets_count() {
// create a buffer with 3 batches, each has 2 packets but only first one is valid
let batch_size = 2usize;
let batch_count = 3usize;
let unprocessed_packet_batches: UnprocessedPacketBatches = (0..batch_count)
.map(|_batch_index| {
DeserializedPacketBatch::new(
PacketBatch::new(
(0..batch_size)
.map(|packet_index| packet_with_sender_stake(packet_index as u64, None))
.collect(),
),
vec![0],
false,
)
})
.collect();
// Assert total packets count, and unprocessed packets count
assert_eq!(
batch_size * batch_count,
unprocessed_packet_batches.get_packets_count()
);
assert_eq!(
batch_count,
unprocessed_packet_batches.get_unprocessed_packets_count()
);
}
#[test]
fn test_get_stakes_and_locators_from_empty_buffer() {
let unprocessed_packet_batches = UnprocessedPacketBatches::default();
let (stakes, locators) = unprocessed_packet_batches.get_stakes_and_locators();
assert!(stakes.is_empty());
assert!(locators.is_empty());
}
#[test]
fn test_get_stakes_and_locators() {
solana_logger::setup();
// setup senders' address and stake
let senders: Vec<(IpAddr, u64)> = vec![
(IpAddr::from([127, 0, 0, 1]), 1),
(IpAddr::from([127, 0, 0, 2]), 2),
(IpAddr::from([127, 0, 0, 3]), 3),
];
// create a buffer with 3 batches, each has 2 packet from above sender.
// buffer looks like:
// [127.0.0.1, 127.0.0.2]
// [127.0.0.3, 127.0.0.1]
// [127.0.0.2, 127.0.0.3]
let batch_size = 2usize;
let batch_count = 3usize;
let unprocessed_packet_batches: UnprocessedPacketBatches = (0..batch_count)
.map(|batch_index| {
DeserializedPacketBatch::new(
PacketBatch::new(
(0..batch_size)
.map(|packet_index| {
let n = (batch_index * batch_size + packet_index) % senders.len();
packet_with_sender_stake(senders[n].1, Some(senders[n].0))
})
.collect(),
),
(0..batch_size).collect(),
false,
)
})
.collect();
let (stakes, locators) = unprocessed_packet_batches.get_stakes_and_locators();
// Produced stakes and locators should both have "batch_size * batch_count" entries;
assert_eq!(batch_size * batch_count, stakes.len());
assert_eq!(batch_size * batch_count, locators.len());
// Assert stakes and locators are in good order
locators.iter().enumerate().for_each(|(index, locator)| {
assert_eq!(
stakes[index],
senders[(locator.batch_index * batch_size + locator.packet_index) % senders.len()]
.1
);
});
}
}

View File

@@ -38,7 +38,7 @@ use {
blockstore::{
Blockstore, BlockstoreError, BlockstoreSignals, CompletedSlotsReceiver, PurgeType,
},
blockstore_db::{BlockstoreAdvancedOptions, BlockstoreOptions, BlockstoreRecoveryMode},
blockstore_db::{BlockstoreOptions, BlockstoreRecoveryMode, LedgerColumnOptions},
blockstore_processor::{self, TransactionStatusSender},
leader_schedule::FixedSchedule,
leader_schedule_cache::LeaderScheduleCache,
@@ -162,13 +162,12 @@ pub struct ValidatorConfig {
pub warp_slot: Option<Slot>,
pub accounts_db_test_hash_calculation: bool,
pub accounts_db_skip_shrink: bool,
pub accounts_db_use_index_hash_calculation: bool,
pub tpu_coalesce_ms: u64,
pub validator_exit: Arc<RwLock<Exit>>,
pub no_wait_for_vote_to_start_leader: bool,
pub accounts_shrink_ratio: AccountShrinkThreshold,
pub wait_to_vote_slot: Option<Slot>,
pub blockstore_advanced_options: BlockstoreAdvancedOptions,
pub ledger_column_options: LedgerColumnOptions,
}
impl Default for ValidatorConfig {
@@ -223,14 +222,13 @@ impl Default for ValidatorConfig {
warp_slot: None,
accounts_db_test_hash_calculation: false,
accounts_db_skip_shrink: false,
accounts_db_use_index_hash_calculation: true,
tpu_coalesce_ms: DEFAULT_TPU_COALESCE_MS,
validator_exit: Arc::new(RwLock::new(Exit::default())),
no_wait_for_vote_to_start_leader: true,
accounts_shrink_ratio: AccountShrinkThreshold::default(),
accounts_db_config: None,
wait_to_vote_slot: None,
blockstore_advanced_options: BlockstoreAdvancedOptions::default(),
ledger_column_options: LedgerColumnOptions::default(),
}
}
}
@@ -915,7 +913,6 @@ impl Validator {
accounts_hash_fault_injection_slots: config.accounts_hash_fault_injection_slots,
accounts_db_caching_enabled: config.accounts_db_caching_enabled,
test_hash_calculation: config.accounts_db_test_hash_calculation,
use_index_hash_calculation: config.accounts_db_use_index_hash_calculation,
rocksdb_compaction_interval: config.rocksdb_compaction_interval,
rocksdb_max_compaction_jitter: config.rocksdb_compaction_interval,
wait_for_vote_to_start_leader,
@@ -1297,7 +1294,8 @@ fn load_blockstore(
ledger_path,
BlockstoreOptions {
recovery_mode: config.wal_recovery_mode.clone(),
advanced_options: config.blockstore_advanced_options.clone(),
column_options: config.ledger_column_options.clone(),
enforce_ulimit_nofile: config.enforce_ulimit_nofile,
..BlockstoreOptions::default()
},
)
@@ -1331,7 +1329,7 @@ fn load_blockstore(
blockstore.clone(),
exit,
enable_rpc_transaction_history,
config.rpc_config.enable_cpi_and_log_storage,
config.rpc_config.enable_extended_tx_metadata_storage,
transaction_notifier,
)
} else {
@@ -1538,7 +1536,7 @@ fn initialize_rpc_transaction_history_services(
blockstore: Arc<Blockstore>,
exit: &Arc<AtomicBool>,
enable_rpc_transaction_history: bool,
enable_cpi_and_log_storage: bool,
enable_extended_tx_metadata_storage: bool,
transaction_notifier: Option<TransactionNotifierLock>,
) -> TransactionHistoryServices {
let max_complete_transaction_status_slot = Arc::new(AtomicU64::new(blockstore.max_root()));
@@ -1552,7 +1550,7 @@ fn initialize_rpc_transaction_history_services(
enable_rpc_transaction_history,
transaction_notifier.clone(),
blockstore.clone(),
enable_cpi_and_log_storage,
enable_extended_tx_metadata_storage,
exit,
));
@@ -1799,7 +1797,6 @@ mod tests {
std::fs::remove_dir_all,
};
#[test]
fn validator_exit() {
solana_logger::setup();
let leader_keypair = Keypair::new();
@@ -1879,7 +1876,6 @@ mod tests {
}
}
#[test]
fn validator_parallel_exit() {
let leader_keypair = Keypair::new();
let leader_node = Node::new_localhost_with_pubkey(&leader_keypair.pubkey());
@@ -1927,6 +1923,12 @@ mod tests {
}
}
#[test]
fn test_validator_exit() {
validator_exit();
validator_parallel_exit();
}
#[test]
fn test_wait_for_supermajority() {
solana_logger::setup();

View File

@@ -1,6 +1,7 @@
use {
crate::tower_storage::{SavedTowerVersions, TowerStorage},
crossbeam_channel::Receiver,
solana_client::connection_cache::send_wire_transaction,
solana_gossip::cluster_info::ClusterInfo,
solana_measure::measure::Measure,
solana_poh::poh_recorder::PohRecorder,
@@ -86,7 +87,13 @@ impl VotingService {
} else {
crate::banking_stage::next_leader_tpu(cluster_info, poh_recorder)
};
let _ = cluster_info.send_transaction(vote_op.tx(), target_address);
let mut measure = Measure::start("vote_tx_send-ms");
let target_address = target_address.unwrap_or_else(|| cluster_info.my_contact_info().tpu);
let wire_vote_tx = bincode::serialize(vote_op.tx()).expect("vote serialization failure");
let _ = send_wire_transaction(&wire_vote_tx, &target_address);
measure.stop();
inc_new_counter_info!("vote_tx_send-ms", measure.as_ms() as usize);
match vote_op {
VoteOp::PushVote {

View File

@@ -604,7 +604,7 @@ impl WindowService {
}
if last_print.elapsed().as_secs() > 2 {
metrics.report_metrics("recv-window-insert-shreds");
metrics.report_metrics("blockstore-insert-shreds");
metrics = BlockstoreInsertionMetrics::default();
ws_metrics.report_metrics("recv-window-insert-shreds");
ws_metrics = WindowServiceMetrics::default();

View File

@@ -10,7 +10,7 @@ mod tests {
solana_ledger::{
blockstore::{make_many_slot_shreds, Blockstore},
blockstore_db::{
BlockstoreAdvancedOptions, BlockstoreOptions, BlockstoreRocksFifoOptions,
BlockstoreOptions, BlockstoreRocksFifoOptions, LedgerColumnOptions,
ShredStorageType,
},
get_tmp_ledger_path,
@@ -351,13 +351,14 @@ mod tests {
&ledger_path,
if config.fifo_compaction {
BlockstoreOptions {
advanced_options: BlockstoreAdvancedOptions {
column_options: LedgerColumnOptions {
shred_storage_type: ShredStorageType::RocksFifo(
BlockstoreRocksFifoOptions {
shred_data_cf_size: config.shred_data_cf_size,
..BlockstoreRocksFifoOptions::default()
},
),
..LedgerColumnOptions::default()
},
..BlockstoreOptions::default()
}

View File

@@ -265,8 +265,7 @@ mod tests {
// set_root should send a snapshot request
bank_forks.set_root(bank.slot(), &request_sender, None);
bank.update_accounts_hash();
snapshot_request_handler
.handle_snapshot_requests(false, false, false, 0, &mut None);
snapshot_request_handler.handle_snapshot_requests(false, false, 0, &mut None);
}
}
@@ -707,7 +706,6 @@ mod tests {
bank_forks.set_root(bank.slot(), &request_sender, None);
bank.update_accounts_hash();
snapshot_request_handler.handle_snapshot_requests(
false,
false,
false,
0,
@@ -931,7 +929,6 @@ mod tests {
true,
);
let tmpdir = TempDir::new().unwrap();
let accounts_hash_verifier = AccountsHashVerifier::new(
accounts_package_receiver,
Some(pending_snapshot_package),
@@ -941,7 +938,6 @@ mod tests {
false,
0,
Some(snapshot_test_config.snapshot_config.clone()),
tmpdir.path().to_path_buf(),
);
let accounts_background_service = AccountsBackgroundService::new(
@@ -949,7 +945,6 @@ mod tests {
&exit,
abs_request_handler,
false,
false,
true,
None,
);

View File

@@ -681,8 +681,8 @@ console.log(`Stake balance: ${stakeBalance}`)
// We can verify the state of our stake. This may take some time to become active
let stakeState = await connection.getStakeActivation(stakeAccount.publicKey);
console.log(`Stake Stake: ${stakeState.state}`);
// Stake State: inactive
console.log(`Stake state: ${stakeState.state}`);
// Stake state: inactive
// To delegate our stake, we get the current vote accounts and choose the first
let voteAccounts = await connection.getVoteAccounts();

View File

@@ -1531,7 +1531,7 @@ Returns the latest blockhash
- `RpcResponse<object>` - RpcResponse JSON object with `value` field set to a JSON object including:
- `blockhash: <string>` - a Hash as base-58 encoded string
- `lastValidBlockHeight: u64` - Slot
- `lastValidBlockHeight: <u64>` - last [block height](../../terminology.md#block-height) at which the blockhash will be valid
#### Example:
@@ -3059,7 +3059,7 @@ curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d '
Result:
```json
{ "jsonrpc": "2.0", "result": { "solana-core": "1.10.3" }, "id": 1 }
{ "jsonrpc": "2.0", "result": { "solana-core": "1.11.0" }, "id": 1 }
```
### getVoteAccounts
@@ -5147,7 +5147,7 @@ The result will be an RpcResponse JSON object with `value` set to a JSON object
- `blockhash: <string>` - a Hash as base-58 encoded string
- `feeCalculator: <object>` - FeeCalculator object, the fee schedule for this block hash
- `lastValidSlot: <u64>` - DEPRECATED - this value is inaccurate and should not be relied upon
- `lastValidBlockHeight: <u64>` - last [block height](../../terminology.md#block-height) at which a blockhash will be valid
- `lastValidBlockHeight: <u64>` - last [block height](../../terminology.md#block-height) at which the blockhash will be valid
#### Example:

View File

@@ -2,29 +2,31 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-dos"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
publish = false
description = "Tool to send various requests to cluster in order to evaluate the effect on performance"
[dependencies]
bincode = "1.3.3"
clap = "2.33.1"
clap = {version = "3.1.5", features = ["derive", "cargo"]}
log = "0.4.14"
rand = "0.7.0"
solana-client = { path = "../client", version = "=1.10.3" }
solana-core = { path = "../core", version = "=1.10.3" }
solana-gossip = { path = "../gossip", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-net-utils = { path = "../net-utils", version = "=1.10.3" }
solana-perf = { path = "../perf", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-streamer = { path = "../streamer", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
serde = "1.0.136"
solana-client = { path = "../client", version = "=1.11.0" }
solana-core = { path = "../core", version = "=1.11.0" }
solana-gossip = { path = "../gossip", version = "=1.11.0" }
solana-logger = { path = "../logger", version = "=1.11.0" }
solana-net-utils = { path = "../net-utils", version = "=1.11.0" }
solana-perf = { path = "../perf", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-streamer = { path = "../streamer", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]
[dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "=1.10.3" }
solana-local-cluster = { path = "../local-cluster", version = "=1.11.0" }

View File

@@ -1,12 +1,50 @@
//! DoS tool
//!
//! Sends requests to cluster in a loop to measure
//! the effect of handling these requests on the performance of the cluster.
//!
//! * `mode` argument defines interface to use (e.g. rpc, tvu, tpu)
//! * `data-type` argument specifies the type of the request.
//! Some request types might be used only with particular `mode` value.
//! For example, `get-account-info` is valid only with `mode=rpc`.
//!
//! Most options are provided for `data-type = transaction`.
//! These options allow to compose transaction which fails at
//! a particular stage of the processing pipeline.
//!
//! Example 1: send random transactions to TPU
//! ```bash
//! solana-dos --entrypoint 127.0.0.1:8001 --mode tpu --data-type random
//! ```
//!
//! Example 2: send unique transactions with valid recent blockhash to TPU
//! ```bash
//! solana-dos --entrypoint 127.0.0.1:8001 --mode tpu --data-type random
//! solana-dos --entrypoint 127.0.0.1:8001 --mode tpu \
//! --data-type transaction --generate-unique-transactions
//! --payer config/bootstrap-validator/identity.json \
//! --generate-valid-blockhash
//! ```
//!
#![allow(clippy::integer_arithmetic)]
use {
clap::{crate_description, crate_name, value_t, value_t_or_exit, App, Arg},
clap::{crate_description, crate_name, crate_version, ArgEnum, Args, Parser},
log::*,
rand::{thread_rng, Rng},
serde::{Deserialize, Serialize},
solana_client::rpc_client::RpcClient,
solana_core::serve_repair::RepairProtocol,
solana_gossip::{contact_info::ContactInfo, gossip_service::discover},
solana_sdk::pubkey::Pubkey,
solana_sdk::{
hash::Hash,
instruction::{AccountMeta, CompiledInstruction, Instruction},
pubkey::Pubkey,
signature::{read_keypair_file, Keypair, Signature, Signer},
stake,
system_instruction::SystemInstruction,
system_program,
transaction::Transaction,
},
solana_streamer::socket::SocketAddrSpace,
std::{
net::{SocketAddr, UdpSocket},
@@ -23,38 +61,151 @@ fn get_repair_contact(nodes: &[ContactInfo]) -> ContactInfo {
contact
}
struct TransactionGenerator {
blockhash: Hash,
last_generated: Instant,
transaction_params: TransactionParams,
cached_transaction: Option<Transaction>,
}
impl TransactionGenerator {
fn new(transaction_params: TransactionParams) -> Self {
TransactionGenerator {
blockhash: Hash::default(),
last_generated: (Instant::now() - Duration::from_secs(100)),
transaction_params,
cached_transaction: None,
}
}
fn generate(&mut self, payer: Option<&Keypair>, rpc_client: &Option<RpcClient>) -> Transaction {
if !self.transaction_params.unique_transactions && self.cached_transaction.is_some() {
return self.cached_transaction.as_ref().unwrap().clone();
}
// generate a new blockhash every 1sec
if self.transaction_params.valid_blockhash
&& self.last_generated.elapsed().as_millis() > 1000
{
self.blockhash = rpc_client.as_ref().unwrap().get_latest_blockhash().unwrap();
self.last_generated = Instant::now();
}
// in order to evaluate the performance implications of the different transactions
// we create here transactions which are filtered out on different stages of processing pipeline
// create an arbitrary valid instruction
let lamports = 5;
let transfer_instruction = SystemInstruction::Transfer { lamports };
let program_ids = vec![system_program::id(), stake::program::id()];
// transaction with payer, in this case signatures are valid and num_signatures is irrelevant
// random payer will cause error "attempt to debit an account but found no record of a prior credit"
// if payer is correct, it will trigger error with not enough signatures
let transaction = if let Some(payer) = payer {
let instruction = Instruction::new_with_bincode(
program_ids[0],
&transfer_instruction,
vec![
AccountMeta::new(program_ids[0], false),
AccountMeta::new(program_ids[1], false),
],
);
Transaction::new_signed_with_payer(
&[instruction],
Some(&payer.pubkey()),
&[payer],
self.blockhash,
)
} else if self.transaction_params.valid_signatures {
// Since we don't provide a payer, this transaction will end up
// filtered at legacy.rs sanitize method (banking_stage) with error "a program cannot be payer"
let kpvals: Vec<Keypair> = (0..self.transaction_params.num_signatures)
.map(|_| Keypair::new())
.collect();
let keypairs: Vec<&Keypair> = kpvals.iter().collect();
let instructions = vec![CompiledInstruction::new(
0,
&transfer_instruction,
vec![0, 1],
)];
Transaction::new_with_compiled_instructions(
&keypairs,
&[],
self.blockhash,
program_ids,
instructions,
)
} else {
// Since we provided invalid signatures
// this transaction will end up filtered at legacy.rs (banking_stage) because
// num_required_signatures == 0
let instructions = vec![CompiledInstruction::new(
0,
&transfer_instruction,
vec![0, 1],
)];
let mut tx = Transaction::new_with_compiled_instructions(
&[] as &[&Keypair; 0],
&[],
self.blockhash,
program_ids,
instructions,
);
tx.signatures = vec![Signature::new_unique(); self.transaction_params.num_signatures];
tx
};
// if we need to generate only one transaction, we cache it to reuse later
if !self.transaction_params.unique_transactions {
self.cached_transaction = Some(transaction.clone());
}
transaction
}
}
fn run_dos(
nodes: &[ContactInfo],
iterations: usize,
entrypoint_addr: SocketAddr,
data_type: String,
data_size: usize,
mode: String,
data_input: Option<String>,
payer: Option<&Keypair>,
params: DosClientParameters,
) {
let mut target = None;
let mut rpc_client = None;
if nodes.is_empty() {
if mode == "rpc" {
rpc_client = Some(RpcClient::new_socket(entrypoint_addr));
if params.mode == Mode::Rpc {
rpc_client = Some(RpcClient::new_socket(params.entrypoint_addr));
}
target = Some(entrypoint_addr);
target = Some(params.entrypoint_addr);
} else {
info!("************ NODE ***********");
for node in nodes {
if node.gossip == entrypoint_addr {
target = match mode.as_str() {
"gossip" => Some(node.gossip),
"tvu" => Some(node.tvu),
"tvu_forwards" => Some(node.tvu_forwards),
"tpu" => Some(node.tpu),
"tpu_forwards" => Some(node.tpu_forwards),
"repair" => Some(node.repair),
"serve_repair" => Some(node.serve_repair),
"rpc" => {
info!("{:?}", node);
}
info!("ADDR = {}", params.entrypoint_addr);
for node in nodes {
if node.gossip == params.entrypoint_addr {
info!("{}", node.gossip);
target = match params.mode {
Mode::Gossip => Some(node.gossip),
Mode::Tvu => Some(node.tvu),
Mode::TvuForwards => Some(node.tvu_forwards),
Mode::Tpu => {
rpc_client = Some(RpcClient::new_socket(node.rpc));
Some(node.tpu)
}
Mode::TpuForwards => Some(node.tpu_forwards),
Mode::Repair => Some(node.repair),
Mode::ServeRepair => Some(node.serve_repair),
Mode::Rpc => {
rpc_client = Some(RpcClient::new_socket(node.rpc));
None
}
&_ => panic!("Unknown mode"),
};
break;
}
@@ -62,74 +213,83 @@ fn run_dos(
}
let target = target.expect("should have target");
info!("Targetting {}", target);
info!("Targeting {}", target);
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut data = Vec::new();
let mut transaction_generator = None;
match data_type.as_str() {
"repair_highest" => {
match params.data_type {
DataType::RepairHighest => {
let slot = 100;
let req = RepairProtocol::WindowIndexWithNonce(get_repair_contact(nodes), slot, 0, 0);
data = bincode::serialize(&req).unwrap();
}
"repair_shred" => {
DataType::RepairShred => {
let slot = 100;
let req =
RepairProtocol::HighestWindowIndexWithNonce(get_repair_contact(nodes), slot, 0, 0);
data = bincode::serialize(&req).unwrap();
}
"repair_orphan" => {
DataType::RepairOrphan => {
let slot = 100;
let req = RepairProtocol::OrphanWithNonce(get_repair_contact(nodes), slot, 0);
data = bincode::serialize(&req).unwrap();
}
"random" => {
data.resize(data_size, 0);
DataType::Random => {
data.resize(params.data_size, 0);
}
"transaction" => {
let tx = solana_perf::test_tx::test_tx();
DataType::Transaction => {
let tp = params.transaction_params;
info!("{:?}", tp);
transaction_generator = Some(TransactionGenerator::new(tp));
let tx = transaction_generator
.as_mut()
.unwrap()
.generate(payer, &rpc_client);
info!("{:?}", tx);
data = bincode::serialize(&tx).unwrap();
}
"get_account_info" => {}
"get_program_accounts" => {}
&_ => {
panic!("unknown data type");
}
DataType::GetAccountInfo => {}
DataType::GetProgramAccounts => {}
}
let mut last_log = Instant::now();
let mut count = 0;
let mut error_count = 0;
loop {
if mode == "rpc" {
match data_type.as_str() {
"get_account_info" => {
let res = rpc_client
.as_ref()
.unwrap()
.get_account(&Pubkey::from_str(data_input.as_ref().unwrap()).unwrap());
if res.is_err() {
error_count += 1;
}
}
"get_program_accounts" => {
let res = rpc_client.as_ref().unwrap().get_program_accounts(
&Pubkey::from_str(data_input.as_ref().unwrap()).unwrap(),
if params.mode == Mode::Rpc {
match params.data_type {
DataType::GetAccountInfo => {
let res = rpc_client.as_ref().unwrap().get_account(
&Pubkey::from_str(params.data_input.as_ref().unwrap()).unwrap(),
);
if res.is_err() {
error_count += 1;
}
}
&_ => {
DataType::GetProgramAccounts => {
let res = rpc_client.as_ref().unwrap().get_program_accounts(
&Pubkey::from_str(params.data_input.as_ref().unwrap()).unwrap(),
);
if res.is_err() {
error_count += 1;
}
}
_ => {
panic!("unsupported data type");
}
}
} else {
if data_type == "random" {
if params.data_type == DataType::Random {
thread_rng().fill(&mut data[..]);
}
if let Some(tg) = transaction_generator.as_mut() {
let tx = tg.generate(payer, &rpc_client);
info!("{:?}", tx);
data = bincode::serialize(&tx).unwrap();
}
let res = socket.send_to(&data, target);
if res.is_err() {
error_count += 1;
@@ -147,126 +307,173 @@ fn run_dos(
}
}
// command line parsing
#[derive(Parser)]
#[clap(name = crate_name!(),
version = crate_version!(),
about = crate_description!(),
rename_all = "kebab-case"
)]
struct DosClientParameters {
#[clap(long, arg_enum, help = "Interface to DoS")]
mode: Mode,
#[clap(long, arg_enum, help = "Type of data to send")]
data_type: DataType,
#[clap(
long = "entrypoint",
parse(try_from_str = addr_parser),
default_value = "127.0.0.1:8001",
help = "Gossip entrypoint address. Usually <ip>:8001"
)]
entrypoint_addr: SocketAddr,
#[clap(
long,
default_value = "128",
required_if_eq("data-type", "random"),
help = "Size of packet to DoS with, relevant only for data-type=random"
)]
data_size: usize,
#[clap(long, help = "Data to send [Optional]")]
data_input: Option<String>,
#[clap(long, help = "Just use entrypoint address directly")]
skip_gossip: bool,
#[clap(long, help = "Allow contacting private ip addresses")]
allow_private_addr: bool,
#[clap(flatten)]
transaction_params: TransactionParams,
}
#[derive(Args, Serialize, Deserialize, Debug, Default)]
#[clap(rename_all = "kebab-case")]
struct TransactionParams {
#[clap(
long,
default_value = "2",
help = "Number of signatures in transaction"
)]
num_signatures: usize,
#[clap(long, help = "Generate a valid blockhash for transaction")]
valid_blockhash: bool,
#[clap(long, help = "Generate valid signature(s) for transaction")]
valid_signatures: bool,
#[clap(long, help = "Generate unique transactions")]
unique_transactions: bool,
#[clap(
long = "payer",
help = "Payer's keypair file to fund transactions [Optional]"
)]
payer_filename: Option<String>,
}
#[derive(ArgEnum, Clone, Eq, PartialEq)]
enum Mode {
Gossip,
Tvu,
TvuForwards,
Tpu,
TpuForwards,
Repair,
ServeRepair,
Rpc,
}
#[derive(ArgEnum, Clone, Eq, PartialEq)]
enum DataType {
RepairHighest,
RepairShred,
RepairOrphan,
Random,
GetAccountInfo,
GetProgramAccounts,
Transaction,
}
fn addr_parser(addr: &str) -> Result<SocketAddr, &'static str> {
match solana_net_utils::parse_host_port(addr) {
Ok(v) => Ok(v),
Err(_) => Err("failed to parse entrypoint address"),
}
}
/// input checks which are not covered by Clap
fn validate_input(params: &DosClientParameters) {
if params.mode == Mode::Rpc
&& (params.data_type != DataType::GetAccountInfo
&& params.data_type != DataType::GetProgramAccounts)
{
panic!("unsupported data type");
}
if params.data_type != DataType::Transaction {
let tp = &params.transaction_params;
if tp.valid_blockhash
|| tp.valid_signatures
|| tp.unique_transactions
|| tp.payer_filename.is_some()
{
println!("Arguments valid-blockhash, valid-sign, unique-trans, payer are ignored if data-type != transaction");
}
}
if params.transaction_params.payer_filename.is_some()
&& params.transaction_params.valid_signatures
{
println!("Arguments valid-signatures is ignored if payer is provided");
}
}
fn main() {
solana_logger::setup_with_default("solana=info");
let matches = App::new(crate_name!())
.about(crate_description!())
.version(solana_version::version!())
.arg(
Arg::with_name("entrypoint")
.long("entrypoint")
.takes_value(true)
.value_name("HOST:PORT")
.help("Gossip entrypoint address. Usually <ip>:8001"),
)
.arg(
Arg::with_name("mode")
.long("mode")
.takes_value(true)
.value_name("MODE")
.possible_values(&[
"gossip",
"tvu",
"tvu_forwards",
"tpu",
"tpu_forwards",
"repair",
"serve_repair",
"rpc",
])
.help("Interface to DoS"),
)
.arg(
Arg::with_name("data_size")
.long("data-size")
.takes_value(true)
.value_name("BYTES")
.help("Size of packet to DoS with"),
)
.arg(
Arg::with_name("data_type")
.long("data-type")
.takes_value(true)
.value_name("TYPE")
.possible_values(&[
"repair_highest",
"repair_shred",
"repair_orphan",
"random",
"get_account_info",
"get_program_accounts",
"transaction",
])
.help("Type of data to send"),
)
.arg(
Arg::with_name("data_input")
.long("data-input")
.takes_value(true)
.value_name("TYPE")
.help("Data to send"),
)
.arg(
Arg::with_name("skip_gossip")
.long("skip-gossip")
.help("Just use entrypoint address directly"),
)
.arg(
Arg::with_name("allow_private_addr")
.long("allow-private-addr")
.takes_value(false)
.help("Allow contacting private ip addresses")
.hidden(true),
)
.get_matches();
let mut entrypoint_addr = SocketAddr::from(([127, 0, 0, 1], 8001));
if let Some(addr) = matches.value_of("entrypoint") {
entrypoint_addr = solana_net_utils::parse_host_port(addr).unwrap_or_else(|e| {
eprintln!("failed to parse entrypoint address: {}", e);
exit(1)
});
}
let data_size = value_t!(matches, "data_size", usize).unwrap_or(128);
let skip_gossip = matches.is_present("skip_gossip");
let mode = value_t_or_exit!(matches, "mode", String);
let data_type = value_t_or_exit!(matches, "data_type", String);
let data_input = value_t!(matches, "data_input", String).ok();
let cmd_params = DosClientParameters::parse();
validate_input(&cmd_params);
let mut nodes = vec![];
if !skip_gossip {
info!("Finding cluster entry: {:?}", entrypoint_addr);
let socket_addr_space = SocketAddrSpace::new(matches.is_present("allow_private_addr"));
if !cmd_params.skip_gossip {
info!("Finding cluster entry: {:?}", cmd_params.entrypoint_addr);
let socket_addr_space = SocketAddrSpace::new(cmd_params.allow_private_addr);
let (gossip_nodes, _validators) = discover(
None, // keypair
Some(&entrypoint_addr),
None, // num_nodes
Duration::from_secs(60), // timeout
None, // find_node_by_pubkey
Some(&entrypoint_addr), // find_node_by_gossip_addr
None, // my_gossip_addr
0, // my_shred_version
Some(&cmd_params.entrypoint_addr),
None, // num_nodes
Duration::from_secs(60), // timeout
None, // find_node_by_pubkey
Some(&cmd_params.entrypoint_addr), // find_node_by_gossip_addr
None, // my_gossip_addr
0, // my_shred_version
socket_addr_space,
)
.unwrap_or_else(|err| {
eprintln!("Failed to discover {} node: {:?}", entrypoint_addr, err);
eprintln!(
"Failed to discover {} node: {:?}",
cmd_params.entrypoint_addr, err
);
exit(1);
});
nodes = gossip_nodes;
}
info!("done found {} nodes", nodes.len());
let payer = cmd_params
.transaction_params
.payer_filename
.as_ref()
.map(|keypair_file_name| {
read_keypair_file(&keypair_file_name)
.unwrap_or_else(|_| panic!("bad keypair {:?}", keypair_file_name))
});
run_dos(
&nodes,
0,
entrypoint_addr,
data_type,
data_size,
mode,
data_input,
);
run_dos(&nodes, 0, payer.as_ref(), cmd_params);
}
#[cfg(test)]
@@ -284,34 +491,207 @@ pub mod test {
timestamp(),
)];
let entrypoint_addr = nodes[0].gossip;
run_dos(
&nodes,
1,
entrypoint_addr,
"random".to_string(),
10,
"tvu".to_string(),
None,
DosClientParameters {
entrypoint_addr,
mode: Mode::Tvu,
data_size: 10,
data_type: DataType::Random,
data_input: None,
skip_gossip: false,
allow_private_addr: false,
transaction_params: TransactionParams::default(),
},
);
run_dos(
&nodes,
1,
entrypoint_addr,
"repair_highest".to_string(),
10,
"repair".to_string(),
None,
DosClientParameters {
entrypoint_addr,
mode: Mode::Repair,
data_size: 10,
data_type: DataType::RepairHighest,
data_input: None,
skip_gossip: false,
allow_private_addr: false,
transaction_params: TransactionParams::default(),
},
);
run_dos(
&nodes,
1,
entrypoint_addr,
"repair_shred".to_string(),
10,
"serve_repair".to_string(),
None,
DosClientParameters {
entrypoint_addr,
mode: Mode::ServeRepair,
data_size: 10,
data_type: DataType::RepairShred,
data_input: None,
skip_gossip: false,
allow_private_addr: false,
transaction_params: TransactionParams::default(),
},
);
}
#[test]
#[ignore]
fn test_dos_local_cluster_transactions() {
let num_nodes = 1;
let cluster =
LocalCluster::new_with_equal_stakes(num_nodes, 100, 3, SocketAddrSpace::Unspecified);
assert_eq!(cluster.validators.len(), num_nodes);
let nodes = cluster.get_node_pubkeys();
let node = cluster.get_contact_info(&nodes[0]).unwrap().clone();
let nodes_slice = [node];
// send random transactions to TPU
// will be discarded on sigverify stage
run_dos(
&nodes_slice,
1,
None,
DosClientParameters {
entrypoint_addr: cluster.entry_point_info.gossip,
mode: Mode::Tpu,
data_size: 1024,
data_type: DataType::Random,
data_input: None,
skip_gossip: false,
allow_private_addr: false,
transaction_params: TransactionParams::default(),
},
);
// send transactions to TPU with 2 random signatures
// will be filtered on dedup (because transactions are not unique)
run_dos(
&nodes_slice,
1,
None,
DosClientParameters {
entrypoint_addr: cluster.entry_point_info.gossip,
mode: Mode::Tpu,
data_size: 0, // irrelevant if not random
data_type: DataType::Transaction,
data_input: None,
skip_gossip: false,
allow_private_addr: false,
transaction_params: TransactionParams {
num_signatures: 2,
valid_blockhash: false,
valid_signatures: false,
unique_transactions: false,
payer_filename: None,
},
},
);
// send *unique* transactions to TPU with 4 random signatures
// will be discarded on banking stage in legacy.rs
// ("there should be at least 1 RW fee-payer account")
run_dos(
&nodes_slice,
1,
None,
DosClientParameters {
entrypoint_addr: cluster.entry_point_info.gossip,
mode: Mode::Tpu,
data_size: 0, // irrelevant if not random
data_type: DataType::Transaction,
data_input: None,
skip_gossip: false,
allow_private_addr: false,
transaction_params: TransactionParams {
num_signatures: 4,
valid_blockhash: false,
valid_signatures: false,
unique_transactions: true,
payer_filename: None,
},
},
);
// send unique transactions to TPU with 2 random signatures
// will be discarded on banking stage in legacy.rs (A program cannot be a payer)
// because we haven't provided a valid payer
run_dos(
&nodes_slice,
1,
None,
DosClientParameters {
entrypoint_addr: cluster.entry_point_info.gossip,
mode: Mode::Tpu,
data_size: 0, // irrelevant if not random
data_type: DataType::Transaction,
data_input: None,
skip_gossip: false,
allow_private_addr: false,
transaction_params: TransactionParams {
num_signatures: 2,
valid_blockhash: false, // irrelevant without valid payer, because
// it will be filtered before blockhash validity checks
valid_signatures: true,
unique_transactions: true,
payer_filename: None,
},
},
);
// send unique transaction to TPU with valid blockhash
// will be discarded due to invalid hash
run_dos(
&nodes_slice,
1,
Some(&cluster.funding_keypair),
DosClientParameters {
entrypoint_addr: cluster.entry_point_info.gossip,
mode: Mode::Tpu,
data_size: 0, // irrelevant if not random
data_type: DataType::Transaction,
data_input: None,
skip_gossip: false,
allow_private_addr: false,
transaction_params: TransactionParams {
num_signatures: 2,
valid_blockhash: false,
valid_signatures: true,
unique_transactions: true,
payer_filename: None,
},
},
);
// send unique transaction to TPU with valid blockhash
// will fail with error processing Instruction 0: missing required signature for instruction
run_dos(
&nodes_slice,
1,
Some(&cluster.funding_keypair),
DosClientParameters {
entrypoint_addr: cluster.entry_point_info.gossip,
mode: Mode::Tpu,
data_size: 0, // irrelevant if not random
data_type: DataType::Transaction,
data_input: None,
skip_gossip: false,
allow_private_addr: false,
transaction_params: TransactionParams {
num_signatures: 2,
valid_blockhash: true,
valid_signatures: true,
unique_transactions: true,
payer_filename: None,
},
},
);
}
@@ -330,11 +710,23 @@ pub mod test {
run_dos(
&[node],
10_000_000,
cluster.entry_point_info.gossip,
"transaction".to_string(),
1000,
"tpu".to_string(),
None,
Some(&cluster.funding_keypair),
DosClientParameters {
entrypoint_addr: cluster.entry_point_info.gossip,
mode: Mode::Tpu,
data_size: 0, // irrelevant if not random
data_type: DataType::Transaction,
data_input: None,
skip_gossip: false,
allow_private_addr: false,
transaction_params: TransactionParams {
num_signatures: 2,
valid_blockhash: true,
valid_signatures: true,
unique_transactions: true,
payer_filename: None,
},
},
);
}
}

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-download-utils"
version = "1.10.3"
version = "1.11.0"
description = "Solana Download Utils"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -14,8 +14,8 @@ console = "0.15.0"
indicatif = "0.16.2"
log = "0.4.14"
reqwest = { version = "0.11.10", default-features = false, features = ["blocking", "rustls-tls", "json"] }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
[lib]
crate-type = ["lib"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-entry"
version = "1.10.3"
version = "1.11.0"
description = "Solana Entry"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -18,16 +18,16 @@ log = "0.4.11"
rand = "0.7.0"
rayon = "1.5.1"
serde = "1.0.136"
solana-measure = { path = "../measure", version = "=1.10.3" }
solana-merkle-tree = { path = "../merkle-tree", version = "=1.10.3" }
solana-metrics = { path = "../metrics", version = "=1.10.3" }
solana-perf = { path = "../perf", version = "=1.10.3" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-measure = { path = "../measure", version = "=1.11.0" }
solana-merkle-tree = { path = "../merkle-tree", version = "=1.11.0" }
solana-metrics = { path = "../metrics", version = "=1.11.0" }
solana-perf = { path = "../perf", version = "=1.11.0" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
[dev-dependencies]
matches = "0.1.9"
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.11.0" }
[lib]
crate-type = ["lib"]

View File

@@ -14,6 +14,7 @@
"@cloudflare/stream-react": "^1.2.0",
"@metamask/jazzicon": "^2.0.0",
"@metaplex/js": "4.12.0",
"@project-serum/anchor": "^0.22.1",
"@project-serum/serum": "^0.13.61",
"@react-hook/debounce": "^4.0.0",
"@sentry/react": "^6.16.1",
@@ -4489,17 +4490,18 @@
}
},
"node_modules/@project-serum/anchor": {
"version": "0.11.1",
"resolved": "https://registry.npmjs.org/@project-serum/anchor/-/anchor-0.11.1.tgz",
"integrity": "sha512-oIdm4vTJkUy6GmE6JgqDAuQPKI7XM4TPJkjtoIzp69RZe0iAD9JP2XHx7lV1jLdYXeYHqDXfBt3zcq7W91K6PA==",
"version": "0.22.1",
"resolved": "https://registry.npmjs.org/@project-serum/anchor/-/anchor-0.22.1.tgz",
"integrity": "sha512-5pHeyvQhzLahIQ8aZymmDMZJAJFklN0joZdI+YIqFkK2uU/mlKr6rBLQjxysf/j1mLLiNG00tdyLfUtTAdQz7w==",
"dependencies": {
"@project-serum/borsh": "^0.2.2",
"@project-serum/borsh": "^0.2.5",
"@solana/web3.js": "^1.17.0",
"base64-js": "^1.5.1",
"bn.js": "^5.1.2",
"bs58": "^4.0.1",
"buffer-layout": "^1.2.0",
"buffer-layout": "^1.2.2",
"camelcase": "^5.3.1",
"cross-fetch": "^3.1.5",
"crypto-hash": "^1.3.0",
"eventemitter3": "^4.0.7",
"find": "^0.3.0",
@@ -4547,6 +4549,30 @@
"node": ">=10"
}
},
"node_modules/@project-serum/serum/node_modules/@project-serum/anchor": {
"version": "0.11.1",
"resolved": "https://registry.npmjs.org/@project-serum/anchor/-/anchor-0.11.1.tgz",
"integrity": "sha512-oIdm4vTJkUy6GmE6JgqDAuQPKI7XM4TPJkjtoIzp69RZe0iAD9JP2XHx7lV1jLdYXeYHqDXfBt3zcq7W91K6PA==",
"dependencies": {
"@project-serum/borsh": "^0.2.2",
"@solana/web3.js": "^1.17.0",
"base64-js": "^1.5.1",
"bn.js": "^5.1.2",
"bs58": "^4.0.1",
"buffer-layout": "^1.2.0",
"camelcase": "^5.3.1",
"crypto-hash": "^1.3.0",
"eventemitter3": "^4.0.7",
"find": "^0.3.0",
"js-sha256": "^0.9.0",
"pako": "^2.0.3",
"snake-case": "^3.0.4",
"toml": "^3.0.0"
},
"engines": {
"node": ">=11"
}
},
"node_modules/@project-serum/serum/node_modules/@solana/spl-token": {
"version": "0.1.6",
"resolved": "https://registry.npmjs.org/@solana/spl-token/-/spl-token-0.1.6.tgz",
@@ -4594,6 +4620,11 @@
"node": ">=10"
}
},
"node_modules/@project-serum/serum/node_modules/pako": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/pako/-/pako-2.0.4.tgz",
"integrity": "sha512-v8tweI900AUkZN6heMU/4Uy4cXRc2AYNRggVmTR+dEncawDJgCdLMximOVA2p4qO57WMynangsfGRb5WD6L1Bg=="
},
"node_modules/@project-serum/sol-wallet-adapter": {
"version": "0.1.8",
"resolved": "https://registry.npmjs.org/@project-serum/sol-wallet-adapter/-/sol-wallet-adapter-0.1.8.tgz",
@@ -18534,9 +18565,9 @@
}
},
"node_modules/minimist": {
"version": "1.2.5",
"resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz",
"integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw=="
"version": "1.2.6",
"resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz",
"integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q=="
},
"node_modules/minipass": {
"version": "3.1.3",
@@ -18707,9 +18738,9 @@
"integrity": "sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ=="
},
"node_modules/nanoid": {
"version": "3.1.23",
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.1.23.tgz",
"integrity": "sha512-FiB0kzdP0FFVGDKlRLEQ1BgDzU87dy5NnzjeW9YZNt+/c3+q82EQDUwniSAUxp/F0gFNI1ZhKU1FqYsMuqZVnw==",
"version": "3.3.1",
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.1.tgz",
"integrity": "sha512-n6Vs/3KGyxPQd6uO0eH4Bv0ojGSUvuLlIHtC3Y0kEO23YRge8H9x1GCzLn28YX0H66pMkxuaeESFq4tKISKwdw==",
"bin": {
"nanoid": "bin/nanoid.cjs"
},
@@ -30606,17 +30637,18 @@
"peer": true
},
"@project-serum/anchor": {
"version": "0.11.1",
"resolved": "https://registry.npmjs.org/@project-serum/anchor/-/anchor-0.11.1.tgz",
"integrity": "sha512-oIdm4vTJkUy6GmE6JgqDAuQPKI7XM4TPJkjtoIzp69RZe0iAD9JP2XHx7lV1jLdYXeYHqDXfBt3zcq7W91K6PA==",
"version": "0.22.1",
"resolved": "https://registry.npmjs.org/@project-serum/anchor/-/anchor-0.22.1.tgz",
"integrity": "sha512-5pHeyvQhzLahIQ8aZymmDMZJAJFklN0joZdI+YIqFkK2uU/mlKr6rBLQjxysf/j1mLLiNG00tdyLfUtTAdQz7w==",
"requires": {
"@project-serum/borsh": "^0.2.2",
"@project-serum/borsh": "^0.2.5",
"@solana/web3.js": "^1.17.0",
"base64-js": "^1.5.1",
"bn.js": "^5.1.2",
"bs58": "^4.0.1",
"buffer-layout": "^1.2.0",
"buffer-layout": "^1.2.2",
"camelcase": "^5.3.1",
"cross-fetch": "^3.1.5",
"crypto-hash": "^1.3.0",
"eventemitter3": "^4.0.7",
"find": "^0.3.0",
@@ -30654,6 +30686,27 @@
"buffer-layout": "^1.2.0"
},
"dependencies": {
"@project-serum/anchor": {
"version": "0.11.1",
"resolved": "https://registry.npmjs.org/@project-serum/anchor/-/anchor-0.11.1.tgz",
"integrity": "sha512-oIdm4vTJkUy6GmE6JgqDAuQPKI7XM4TPJkjtoIzp69RZe0iAD9JP2XHx7lV1jLdYXeYHqDXfBt3zcq7W91K6PA==",
"requires": {
"@project-serum/borsh": "^0.2.2",
"@solana/web3.js": "^1.17.0",
"base64-js": "^1.5.1",
"bn.js": "^5.1.2",
"bs58": "^4.0.1",
"buffer-layout": "^1.2.0",
"camelcase": "^5.3.1",
"crypto-hash": "^1.3.0",
"eventemitter3": "^4.0.7",
"find": "^0.3.0",
"js-sha256": "^0.9.0",
"pako": "^2.0.3",
"snake-case": "^3.0.4",
"toml": "^3.0.0"
}
},
"@solana/spl-token": {
"version": "0.1.6",
"resolved": "https://registry.npmjs.org/@solana/spl-token/-/spl-token-0.1.6.tgz",
@@ -30680,6 +30733,11 @@
"version": "10.0.0",
"resolved": "https://registry.npmjs.org/dotenv/-/dotenv-10.0.0.tgz",
"integrity": "sha512-rlBi9d8jpv9Sf1klPjNfFAuWDjKLwTIJJ/VxtoTwIR6hnZxcEOQCZg2oIL3MWBYw5GpUDKOEnND7LXTbIpQ03Q=="
},
"pako": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/pako/-/pako-2.0.4.tgz",
"integrity": "sha512-v8tweI900AUkZN6heMU/4Uy4cXRc2AYNRggVmTR+dEncawDJgCdLMximOVA2p4qO57WMynangsfGRb5WD6L1Bg=="
}
}
},
@@ -41480,9 +41538,9 @@
}
},
"minimist": {
"version": "1.2.5",
"resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz",
"integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw=="
"version": "1.2.6",
"resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz",
"integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q=="
},
"minipass": {
"version": "3.1.3",
@@ -41622,9 +41680,9 @@
"integrity": "sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ=="
},
"nanoid": {
"version": "3.1.23",
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.1.23.tgz",
"integrity": "sha512-FiB0kzdP0FFVGDKlRLEQ1BgDzU87dy5NnzjeW9YZNt+/c3+q82EQDUwniSAUxp/F0gFNI1ZhKU1FqYsMuqZVnw=="
"version": "3.3.1",
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.1.tgz",
"integrity": "sha512-n6Vs/3KGyxPQd6uO0eH4Bv0ojGSUvuLlIHtC3Y0kEO23YRge8H9x1GCzLn28YX0H66pMkxuaeESFq4tKISKwdw=="
},
"nanomatch": {
"version": "1.2.13",

View File

@@ -9,6 +9,7 @@
"@cloudflare/stream-react": "^1.2.0",
"@metamask/jazzicon": "^2.0.0",
"@metaplex/js": "4.12.0",
"@project-serum/anchor": "^0.22.1",
"@project-serum/serum": "^0.13.61",
"@react-hook/debounce": "^4.0.0",
"@sentry/react": "^6.16.1",

View File

@@ -42,6 +42,7 @@ export function SearchBar() {
<div className="row align-items-center">
<div className="col">
<Select
autoFocus
ref={(ref) => (selectRef.current = ref)}
options={buildOptions(
search,

View File

@@ -1,6 +1,7 @@
import React from "react";
import classNames from "classnames";
import {
PingInfo,
PingRollupInfo,
PingStatus,
useSolanaPingInfo,
@@ -107,12 +108,10 @@ const CUSTOM_TOOLTIP = function (this: any, tooltipModel: ChartTooltipModel) {
// Set Text
if (tooltipModel.body) {
const { label, value } = tooltipModel.dataPoints[0];
const { label } = tooltipModel.dataPoints[0];
const tooltipContent = tooltipEl.querySelector("div");
if (tooltipContent) {
let innerHtml = `<div class="value">${value} ms</div>`;
innerHtml += `<div class="label">${label}</div>`;
tooltipContent.innerHTML = innerHtml;
tooltipContent.innerHTML = `${label}`;
}
}
@@ -173,33 +172,56 @@ const CHART_OPTION: ChartOptions = {
function PingBarChart({ pingInfo }: { pingInfo: PingRollupInfo }) {
const [series, setSeries] = React.useState<Series>("short");
const seriesData = pingInfo[series] || [];
const maxMean = seriesData.reduce((a, b) => {
return Math.max(a, b.mean);
}, 0);
const seriesLength = seriesData.length;
const backgroundColor = (val: PingInfo) => {
if (val.submitted === 0) {
return "#08a274";
}
return val.loss > 0.5 ? "#f00" : "#00D192";
};
const chartData: Chart.ChartData = {
labels: seriesData.map((val, i) => {
return `
<p class="mb-0">${val.confirmed} of ${val.submitted} confirmed</p>
${
val.loss
? `<p class="mb-0">${val.loss.toLocaleString(undefined, {
style: "percent",
minimumFractionDigits: 2,
})} loss</p>`
: ""
if (val.submitted === 0) {
return `
<div class="label">
<p class="mb-0">Ping statistics unavailable</p>
${SERIES_INFO[series].label(seriesLength - i)}min ago
</div>
`;
}
${SERIES_INFO[series].label(seriesLength - i)}min ago
return `
<div class="value">${val.mean} ms</div>
<div class="label">
<p class="mb-0">${val.confirmed} of ${val.submitted} confirmed</p>
${
val.loss
? `<p class="mb-0">${val.loss.toLocaleString(undefined, {
style: "percent",
minimumFractionDigits: 2,
})} loss</p>`
: ""
}
${SERIES_INFO[series].label(seriesLength - i)}min ago
</div>
`;
}),
datasets: [
{
backgroundColor: seriesData.map((val) =>
val.loss > 0.5 ? "#f00" : "#00D192"
),
hoverBackgroundColor: seriesData.map((val) =>
val.loss > 0.5 ? "#f00" : "#00D192"
),
minBarLength: 2,
backgroundColor: seriesData.map(backgroundColor),
hoverBackgroundColor: seriesData.map(backgroundColor),
borderWidth: 0,
data: seriesData.map((val) => val.mean || 0),
data: seriesData.map((val) => {
if (val.submitted === 0) {
return maxMean * 0.5;
}
return val.mean || 0;
}),
},
],
};

View File

@@ -296,11 +296,11 @@ function isFullyInactivated(
return false;
}
const delegatedStake = stake.delegation.stake.toNumber();
const inactiveStake = activation.inactive;
const delegatedStake = stake.delegation.stake;
const inactiveStake = new BN(activation.inactive);
return (
!stake.delegation.deactivationEpoch.eq(MAX_EPOCH) &&
delegatedStake === inactiveStake
delegatedStake.eq(inactiveStake)
);
}

View File

@@ -0,0 +1,167 @@
import {
Connection,
SignatureResult,
TransactionInstruction,
} from "@solana/web3.js";
import { InstructionCard } from "./InstructionCard";
import {
BorshInstructionCoder,
Idl,
Program,
Provider,
} from "@project-serum/anchor";
import React, { useEffect, useState } from "react";
import { useCluster } from "../../providers/cluster";
import { Address } from "../common/Address";
import { snakeCase } from "snake-case";
export function GenericAnchorDetailsCard(props: {
ix: TransactionInstruction;
index: number;
result: SignatureResult;
signature: string;
innerCards?: JSX.Element[];
childIndex?: number;
}) {
const { ix, index, result, innerCards, childIndex } = props;
const cluster = useCluster();
const [idl, setIdl] = useState<Idl | null>();
useEffect(() => {
async function fetchIdl() {
if (idl) {
return;
}
// fetch on chain idl
const idl_: Idl | null = await Program.fetchIdl(ix.programId, {
connection: new Connection(cluster.url),
} as Provider);
setIdl(idl_);
}
fetchIdl();
}, [ix.programId, cluster.url, idl]);
const [programName, setProgramName] = useState<string | null>(null);
const [ixTitle, setIxTitle] = useState<string | null>(null);
const [ixAccounts, setIxAccounts] = useState<
{ name: string; isMut: boolean; isSigner: boolean; pda?: Object }[] | null
>(null);
useEffect(() => {
async function parseIxDetailsUsingCoder() {
if (!idl || (programName && ixTitle && ixAccounts)) {
return;
}
// e.g. voter_stake_registry -> voter stake registry
var _programName = idl.name.replaceAll("_", " ").trim();
// e.g. voter stake registry -> Voter Stake Registry
_programName = _programName
.toLowerCase()
.split(" ")
.map((word) => word.charAt(0).toUpperCase() + word.substring(1))
.join(" ");
setProgramName(_programName);
const coder = new BorshInstructionCoder(idl);
const decodedIx = coder.decode(ix.data);
if (!decodedIx) {
return;
}
// get ix title, pascal case it
var _ixTitle = decodedIx.name;
_ixTitle = _ixTitle.charAt(0).toUpperCase() + _ixTitle.slice(1);
setIxTitle(_ixTitle);
// get ix accounts
const idlInstructions = idl.instructions.filter(
(ix) => ix.name === decodedIx.name
);
if (idlInstructions.length === 0) {
return;
}
setIxAccounts(
idlInstructions[0].accounts as {
// type coercing since anchor doesn't export the underlying type
name: string;
isMut: boolean;
isSigner: boolean;
pda?: Object;
}[]
);
}
parseIxDetailsUsingCoder();
}, [
ix.programId,
ix.keys,
ix.data,
idl,
cluster,
programName,
ixTitle,
ixAccounts,
]);
return (
<div>
{idl && (
<InstructionCard
ix={ix}
index={index}
result={result}
title={`${programName || "Unknown"}: ${ixTitle || "Unknown"}`}
innerCards={innerCards}
childIndex={childIndex}
>
<tr key={ix.programId.toBase58()}>
<td>Program</td>
<td className="text-lg-end">
<Address pubkey={ix.programId} alignRight link />
</td>
</tr>
{ixAccounts != null &&
ix.keys.map((am, keyIndex) => (
<tr key={keyIndex}>
<td>
<div className="me-2 d-md-inline">
{/* remaining accounts would not have a name */}
{ixAccounts[keyIndex] &&
snakeCase(ixAccounts[keyIndex].name)}
{!ixAccounts[keyIndex] &&
"remaining account #" +
(keyIndex - ixAccounts.length + 1)}
</div>
{am.isWritable && (
<span className="badge bg-info-soft me-1">Writable</span>
)}
{am.isSigner && (
<span className="badge bg-info-soft me-1">Signer</span>
)}
</td>
<td>
<Address pubkey={am.pubkey} alignRight link />
</td>
</tr>
))}
</InstructionCard>
)}
{!idl && (
<InstructionCard
ix={ix}
index={index}
result={result}
title={`Unknown Program: Unknown Instruction`}
innerCards={innerCards}
childIndex={childIndex}
defaultRaw
/>
)}
</div>
);
}

View File

@@ -0,0 +1,16 @@
import { TransactionInstruction } from "@solana/web3.js";
// list of programs written in anchor
// - should have idl on-chain for GenericAnchorDetailsCard to work out of the box
// - before adding another program to this list, please make sure that the ix
// are decoding without any errors
const knownAnchorPrograms = [
// https://github.com/blockworks-foundation/voter-stake-registry
"4Q6WW2ouZ6V3iaNm56MTd5n2tnTm4C5fiH8miFHnAFHo",
];
export const isInstructionFromAnAnchorProgram = (
instruction: TransactionInstruction
) => {
return knownAnchorPrograms.includes(instruction.programId.toBase58());
};

View File

@@ -500,7 +500,7 @@ export function decodeInitOpenOrders(
openOrders: ix.keys[0].pubkey,
openOrdersOwner: ix.keys[1].pubkey,
market: ix.keys[2].pubkey,
openOrdersMarketAuthority: ix.keys[4].pubkey,
openOrdersMarketAuthority: ix.keys[4]?.pubkey,
},
};
}

View File

@@ -166,6 +166,10 @@ const BurnChecked = type({
tokenAmount: TokenAmountUi,
});
const SyncNative = type({
account: PublicKeyFromString,
});
export type TokenInstructionType = Infer<typeof TokenInstructionType>;
export const TokenInstructionType = enums([
"initializeMint",
@@ -188,6 +192,7 @@ export const TokenInstructionType = enums([
"approveChecked",
"mintToChecked",
"burnChecked",
"syncNative",
]);
export const IX_STRUCTS = {
@@ -211,6 +216,7 @@ export const IX_STRUCTS = {
approveChecked: ApproveChecked,
mintToChecked: MintToChecked,
burnChecked: BurnChecked,
syncNative: SyncNative,
};
export const IX_TITLES = {
@@ -234,4 +240,5 @@ export const IX_TITLES = {
approveChecked: "Approve (Checked)",
mintToChecked: "Mint To (Checked)",
burnChecked: "Burn (Checked)",
syncNative: "Sync Native",
};

View File

@@ -21,8 +21,8 @@ import { WormholeDetailsCard } from "components/instruction/WormholeDetailsCard"
import { UnknownDetailsCard } from "components/instruction/UnknownDetailsCard";
import { BonfidaBotDetailsCard } from "components/instruction/BonfidaBotDetails";
import {
SignatureProps,
INNER_INSTRUCTIONS_START_SLOT,
SignatureProps,
} from "pages/TransactionDetailsPage";
import { intoTransactionInstruction } from "utils/tx";
import { isSerumInstruction } from "components/instruction/serum/types";
@@ -39,10 +39,12 @@ import { BpfUpgradeableLoaderDetailsCard } from "components/instruction/bpf-upgr
import { VoteDetailsCard } from "components/instruction/vote/VoteDetailsCard";
import { isWormholeInstruction } from "components/instruction/wormhole/types";
import { AssociatedTokenDetailsCard } from "components/instruction/AssociatedTokenDetailsCard";
import { isMangoInstruction } from "components/instruction/mango/types";
import { MangoDetailsCard } from "components/instruction/MangoDetails";
import { isPythInstruction } from "components/instruction/pyth/types";
import { PythDetailsCard } from "components/instruction/pyth/PythDetailsCard";
import { isInstructionFromAnAnchorProgram } from "../instruction/anchor/types";
import { GenericAnchorDetailsCard } from "../instruction/GenericAnchorDetails";
import { isMangoInstruction } from "../instruction/mango/types";
export type InstructionDetailsProps = {
tx: ParsedTransaction;
@@ -214,6 +216,8 @@ function renderInstructionCard({
if (isBonfidaBotInstruction(transactionIx)) {
return <BonfidaBotDetailsCard key={key} {...props} />;
} else if (isInstructionFromAnAnchorProgram(transactionIx)) {
return <GenericAnchorDetailsCard key={key} {...props} />;
} else if (isMangoInstruction(transactionIx)) {
return <MangoDetailsCard key={key} {...props} />;
} else if (isSerumInstruction(transactionIx)) {

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-faucet"
version = "1.10.3"
version = "1.11.0"
description = "Solana Faucet"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -17,12 +17,12 @@ crossbeam-channel = "0.5"
log = "0.4.14"
serde = "1.0.136"
serde_derive = "1.0.103"
solana-clap-utils = { path = "../clap-utils", version = "=1.10.3" }
solana-cli-config = { path = "../cli-config", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-metrics = { path = "../metrics", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-clap-utils = { path = "../clap-utils", version = "=1.11.0" }
solana-cli-config = { path = "../cli-config", version = "=1.11.0" }
solana-logger = { path = "../logger", version = "=1.11.0" }
solana-metrics = { path = "../metrics", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
thiserror = "1.0"
tokio = { version = "1", features = ["full"] }

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-frozen-abi"
version = "1.10.3"
version = "1.11.0"
description = "Solana Frozen ABI"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -16,8 +16,9 @@ lazy_static = "1.4.0"
log = "0.4.14"
serde = "1.0.136"
serde_derive = "1.0.103"
serde_bytes = "0.11"
sha2 = "0.10.2"
solana-frozen-abi-macro = { path = "macro", version = "=1.10.3" }
solana-frozen-abi-macro = { path = "macro", version = "=1.11.0" }
thiserror = "1.0"
[target.'cfg(not(target_arch = "bpf"))'.dependencies]
@@ -26,7 +27,7 @@ im = { version = "15.0.0", features = ["rayon", "serde"] }
memmap2 = "0.5.3"
[target.'cfg(not(target_arch = "bpf"))'.dev-dependencies]
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.11.0" }
[build-dependencies]
rustc_version = "0.4"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-frozen-abi-macro"
version = "1.10.3"
version = "1.11.0"
description = "Solana Frozen ABI Macro"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"

View File

@@ -562,6 +562,17 @@ mod tests {
#[derive(Serialize, AbiExample)]
struct TestNewtypeStruct(i8);
#[frozen_abi(digest = "Hbs1X2X7TF2gFEfsspwfZ1JKr8ZGbLY3uidQBebqcMYt")]
#[derive(Serialize, AbiExample)]
struct Foo<'a> {
#[serde(with = "serde_bytes")]
data1: Vec<u8>,
#[serde(with = "serde_bytes")]
data2: &'a [u8],
#[serde(with = "serde_bytes")]
data3: &'a Vec<u8>,
}
#[frozen_abi(digest = "5qio5qYurHDv6fq5kcwP2ue2RBEazSZF8CPk2kUuwC2j")]
#[derive(Serialize, AbiExample)]
struct TestStructReversed {

View File

@@ -410,11 +410,18 @@ lazy_static! {
impl AbiExample for &Vec<u8> {
fn example() -> Self {
info!("AbiExample for (&Vec<T>): {}", type_name::<Self>());
info!("AbiExample for (&Vec<u8>): {}", type_name::<Self>());
&*VEC_U8
}
}
impl AbiExample for &[u8] {
fn example() -> Self {
info!("AbiExample for (&[u8]): {}", type_name::<Self>());
&VEC_U8[..]
}
}
impl<T: AbiExample> AbiExample for VecDeque<T> {
fn example() -> Self {
info!("AbiExample for (Vec<T>): {}", type_name::<Self>());

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-genesis-utils"
version = "1.10.3"
version = "1.11.0"
description = "Solana Genesis Utils"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -10,9 +10,9 @@ documentation = "https://docs.rs/solana-download-utils"
edition = "2021"
[dependencies]
solana-download-utils = { path = "../download-utils", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-download-utils = { path = "../download-utils", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
[lib]
crate-type = ["lib"]

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-genesis"
description = "Blockchain, Rebuilt for Scale"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -15,16 +15,16 @@ clap = "2.33.1"
serde = "1.0.136"
serde_json = "1.0.79"
serde_yaml = "0.8.23"
solana-clap-utils = { path = "../clap-utils", version = "=1.10.3" }
solana-cli-config = { path = "../cli-config", version = "=1.10.3" }
solana-entry = { path = "../entry", version = "=1.10.3" }
solana-ledger = { path = "../ledger", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-stake-program = { path = "../programs/stake", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.3" }
solana-clap-utils = { path = "../clap-utils", version = "=1.11.0" }
solana-cli-config = { path = "../cli-config", version = "=1.11.0" }
solana-entry = { path = "../entry", version = "=1.11.0" }
solana-ledger = { path = "../ledger", version = "=1.11.0" }
solana-logger = { path = "../logger", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-stake-program = { path = "../programs/stake", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.11.0" }
tempfile = "3.3.0"
[[bin]]

View File

@@ -13,7 +13,7 @@ use {
},
solana_entry::poh::compute_hashes_per_tick,
solana_genesis::{genesis_accounts::add_genesis_accounts, Base64Account},
solana_ledger::{blockstore::create_new_ledger, blockstore_db::BlockstoreAdvancedOptions},
solana_ledger::{blockstore::create_new_ledger, blockstore_db::LedgerColumnOptions},
solana_runtime::hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
solana_sdk::{
account::{Account, AccountSharedData, ReadableAccount, WritableAccount},
@@ -629,7 +629,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
&ledger_path,
&genesis_config,
max_genesis_archive_unpacked_size,
BlockstoreAdvancedOptions::default(),
LedgerColumnOptions::default(),
)?;
println!("{}", genesis_config);

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-geyser-plugin-interface"
description = "The Solana Geyser plugin interface."
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -11,8 +11,8 @@ documentation = "https://docs.rs/solana-geyser-plugin-interface"
[dependencies]
log = "0.4.11"
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.11.0" }
thiserror = "1.0.30"
[package.metadata.docs.rs]

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-geyser-plugin-manager"
description = "The Solana Geyser plugin manager."
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -16,13 +16,13 @@ json5 = "0.4.1"
libloading = "0.7.3"
log = "0.4.11"
serde_json = "1.0.79"
solana-geyser-plugin-interface = { path = "../geyser-plugin-interface", version = "=1.10.3" }
solana-measure = { path = "../measure", version = "=1.10.3" }
solana-metrics = { path = "../metrics", version = "=1.10.3" }
solana-rpc = { path = "../rpc", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.3" }
solana-geyser-plugin-interface = { path = "../geyser-plugin-interface", version = "=1.11.0" }
solana-measure = { path = "../measure", version = "=1.11.0" }
solana-metrics = { path = "../metrics", version = "=1.11.0" }
solana-rpc = { path = "../rpc", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.11.0" }
thiserror = "1.0.30"
[package.metadata.docs.rs]

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-gossip"
description = "Blockchain, Rebuilt for Scale"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -27,24 +27,24 @@ rayon = "1.5.1"
serde = "1.0.136"
serde_bytes = "0.11"
serde_derive = "1.0.103"
solana-bloom = { path = "../bloom", version = "=1.10.3" }
solana-clap-utils = { path = "../clap-utils", version = "=1.10.3" }
solana-client = { path = "../client", version = "=1.10.3" }
solana-entry = { path = "../entry", version = "=1.10.3" }
solana-frozen-abi = { path = "../frozen-abi", version = "=1.10.3" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.10.3" }
solana-ledger = { path = "../ledger", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-measure = { path = "../measure", version = "=1.10.3" }
solana-metrics = { path = "../metrics", version = "=1.10.3" }
solana-net-utils = { path = "../net-utils", version = "=1.10.3" }
solana-perf = { path = "../perf", version = "=1.10.3" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-streamer = { path = "../streamer", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.3" }
solana-bloom = { path = "../bloom", version = "=1.11.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.11.0" }
solana-client = { path = "../client", version = "=1.11.0" }
solana-entry = { path = "../entry", version = "=1.11.0" }
solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.0" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.0" }
solana-ledger = { path = "../ledger", version = "=1.11.0" }
solana-logger = { path = "../logger", version = "=1.11.0" }
solana-measure = { path = "../measure", version = "=1.11.0" }
solana-metrics = { path = "../metrics", version = "=1.11.0" }
solana-net-utils = { path = "../net-utils", version = "=1.11.0" }
solana-perf = { path = "../perf", version = "=1.11.0" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-streamer = { path = "../streamer", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.11.0" }
thiserror = "1.0"
[dev-dependencies]

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-install"
description = "The solana cluster software installer"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -26,12 +26,12 @@ reqwest = { version = "0.11.10", default-features = false, features = ["blocking
semver = "1.0.6"
serde = { version = "1.0.136", features = ["derive"] }
serde_yaml = "0.8.23"
solana-clap-utils = { path = "../clap-utils", version = "=1.10.3" }
solana-client = { path = "../client", version = "=1.10.3" }
solana-config-program = { path = "../programs/config", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-clap-utils = { path = "../clap-utils", version = "=1.11.0" }
solana-client = { path = "../client", version = "=1.11.0" }
solana-config-program = { path = "../programs/config", version = "=1.11.0" }
solana-logger = { path = "../logger", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
tar = "0.4.38"
tempfile = "3.3.0"
url = "2.2.2"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-keygen"
version = "1.10.3"
version = "1.11.0"
description = "Solana key generation utility"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -14,11 +14,11 @@ bs58 = "0.4.0"
clap = "2.33"
dirs-next = "2.0.0"
num_cpus = "1.13.1"
solana-clap-utils = { path = "../clap-utils", version = "=1.10.3" }
solana-cli-config = { path = "../cli-config", version = "=1.10.3" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-clap-utils = { path = "../clap-utils", version = "=1.11.0" }
solana-cli-config = { path = "../cli-config", version = "=1.11.0" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
tiny-bip39 = "0.8.2"
[[bin]]

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-ledger-tool"
description = "Blockchain, Rebuilt for Scale"
version = "1.10.3"
version = "1.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -21,20 +21,20 @@ log = { version = "0.4.14" }
regex = "1"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0.79"
solana-clap-utils = { path = "../clap-utils", version = "=1.10.3" }
solana-cli-output = { path = "../cli-output", version = "=1.10.3" }
solana-core = { path = "../core", version = "=1.10.3" }
solana-entry = { path = "../entry", version = "=1.10.3" }
solana-ledger = { path = "../ledger", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-measure = { path = "../measure", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-stake-program = { path = "../programs/stake", version = "=1.10.3" }
solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.10.3" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.3" }
solana-version = { path = "../version", version = "=1.10.3" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.3" }
solana-clap-utils = { path = "../clap-utils", version = "=1.11.0" }
solana-cli-output = { path = "../cli-output", version = "=1.11.0" }
solana-core = { path = "../core", version = "=1.11.0" }
solana-entry = { path = "../entry", version = "=1.11.0" }
solana-ledger = { path = "../ledger", version = "=1.11.0" }
solana-logger = { path = "../logger", version = "=1.11.0" }
solana-measure = { path = "../measure", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-stake-program = { path = "../programs/stake", version = "=1.11.0" }
solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.11.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.11.0" }
solana-version = { path = "../version", version = "=1.11.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.11.0" }
tokio = { version = "1", features = ["full"] }
[target.'cfg(not(target_env = "msvc"))'.dependencies]

View File

@@ -34,8 +34,9 @@ async fn upload(
starting_slot: Slot,
ending_slot: Option<Slot>,
force_reupload: bool,
config: solana_storage_bigtable::LedgerStorageConfig,
) -> Result<(), Box<dyn std::error::Error>> {
let bigtable = solana_storage_bigtable::LedgerStorage::new(false, None, None)
let bigtable = solana_storage_bigtable::LedgerStorage::new_with_config(config)
.await
.map_err(|err| format!("Failed to connect to storage: {:?}", err))?;
@@ -50,17 +51,22 @@ async fn upload(
.await
}
async fn delete_slots(slots: Vec<Slot>, dry_run: bool) -> Result<(), Box<dyn std::error::Error>> {
let read_only = dry_run;
let bigtable = solana_storage_bigtable::LedgerStorage::new(read_only, None, None)
async fn delete_slots(
slots: Vec<Slot>,
config: solana_storage_bigtable::LedgerStorageConfig,
) -> Result<(), Box<dyn std::error::Error>> {
let dry_run = config.read_only;
let bigtable = solana_storage_bigtable::LedgerStorage::new_with_config(config)
.await
.map_err(|err| format!("Failed to connect to storage: {:?}", err))?;
solana_ledger::bigtable_delete::delete_confirmed_blocks(bigtable, slots, dry_run).await
}
async fn first_available_block() -> Result<(), Box<dyn std::error::Error>> {
let bigtable = solana_storage_bigtable::LedgerStorage::new(true, None, None).await?;
async fn first_available_block(
config: solana_storage_bigtable::LedgerStorageConfig,
) -> Result<(), Box<dyn std::error::Error>> {
let bigtable = solana_storage_bigtable::LedgerStorage::new_with_config(config).await?;
match bigtable.get_first_available_block().await? {
Some(block) => println!("{}", block),
None => println!("No blocks available"),
@@ -69,8 +75,12 @@ async fn first_available_block() -> Result<(), Box<dyn std::error::Error>> {
Ok(())
}
async fn block(slot: Slot, output_format: OutputFormat) -> Result<(), Box<dyn std::error::Error>> {
let bigtable = solana_storage_bigtable::LedgerStorage::new(false, None, None)
async fn block(
slot: Slot,
output_format: OutputFormat,
config: solana_storage_bigtable::LedgerStorageConfig,
) -> Result<(), Box<dyn std::error::Error>> {
let bigtable = solana_storage_bigtable::LedgerStorage::new_with_config(config)
.await
.map_err(|err| format!("Failed to connect to storage: {:?}", err))?;
@@ -101,8 +111,12 @@ async fn block(slot: Slot, output_format: OutputFormat) -> Result<(), Box<dyn st
Ok(())
}
async fn blocks(starting_slot: Slot, limit: usize) -> Result<(), Box<dyn std::error::Error>> {
let bigtable = solana_storage_bigtable::LedgerStorage::new(false, None, None)
async fn blocks(
starting_slot: Slot,
limit: usize,
config: solana_storage_bigtable::LedgerStorageConfig,
) -> Result<(), Box<dyn std::error::Error>> {
let bigtable = solana_storage_bigtable::LedgerStorage::new_with_config(config)
.await
.map_err(|err| format!("Failed to connect to storage: {:?}", err))?;
@@ -116,11 +130,10 @@ async fn blocks(starting_slot: Slot, limit: usize) -> Result<(), Box<dyn std::er
async fn compare_blocks(
starting_slot: Slot,
limit: usize,
credential_path: String,
config: solana_storage_bigtable::LedgerStorageConfig,
ref_config: solana_storage_bigtable::LedgerStorageConfig,
) -> Result<(), Box<dyn std::error::Error>> {
assert!(!credential_path.is_empty());
let owned_bigtable = solana_storage_bigtable::LedgerStorage::new(false, None, None)
let owned_bigtable = solana_storage_bigtable::LedgerStorage::new_with_config(config)
.await
.map_err(|err| format!("failed to connect to owned bigtable: {:?}", err))?;
let owned_bigtable_slots = owned_bigtable
@@ -130,10 +143,9 @@ async fn compare_blocks(
"owned bigtable {} blocks found ",
owned_bigtable_slots.len()
);
let reference_bigtable =
solana_storage_bigtable::LedgerStorage::new(false, None, Some(credential_path))
.await
.map_err(|err| format!("failed to connect to reference bigtable: {:?}", err))?;
let reference_bigtable = solana_storage_bigtable::LedgerStorage::new_with_config(ref_config)
.await
.map_err(|err| format!("failed to connect to reference bigtable: {:?}", err))?;
let reference_bigtable_slots = reference_bigtable
.get_confirmed_blocks(starting_slot, limit)
@@ -160,8 +172,9 @@ async fn confirm(
signature: &Signature,
verbose: bool,
output_format: OutputFormat,
config: solana_storage_bigtable::LedgerStorageConfig,
) -> Result<(), Box<dyn std::error::Error>> {
let bigtable = solana_storage_bigtable::LedgerStorage::new(false, None, None)
let bigtable = solana_storage_bigtable::LedgerStorage::new_with_config(config)
.await
.map_err(|err| format!("Failed to connect to storage: {:?}", err))?;
@@ -211,8 +224,9 @@ pub async fn transaction_history(
verbose: bool,
show_transactions: bool,
query_chunk_size: usize,
config: solana_storage_bigtable::LedgerStorageConfig,
) -> Result<(), Box<dyn std::error::Error>> {
let bigtable = solana_storage_bigtable::LedgerStorage::new(true, None, None).await?;
let bigtable = solana_storage_bigtable::LedgerStorage::new_with_config(config).await?;
let mut loaded_block: Option<(Slot, ConfirmedBlock)> = None;
while limit > 0 {
@@ -308,6 +322,15 @@ impl BigTableSubCommand for App<'_, '_> {
.about("Ledger data on a BigTable instance")
.setting(AppSettings::InferSubcommands)
.setting(AppSettings::SubcommandRequiredElseHelp)
.arg(
Arg::with_name("rpc_bigtable_instance_name")
.global(true)
.long("rpc-bigtable-instance-name")
.takes_value(true)
.value_name("INSTANCE_NAME")
.default_value(solana_storage_bigtable::DEFAULT_INSTANCE_NAME)
.help("Name of the target Bigtable instance")
)
.subcommand(
SubCommand::with_name("upload")
.about("Upload the ledger to BigTable")
@@ -417,7 +440,8 @@ impl BigTableSubCommand for App<'_, '_> {
.required(true)
.default_value("1000")
.help("Maximum number of slots to check"),
).arg(
)
.arg(
Arg::with_name("reference_credential")
.long("reference-credential")
.short("c")
@@ -425,6 +449,14 @@ impl BigTableSubCommand for App<'_, '_> {
.takes_value(true)
.required(true)
.help("File path for a credential to a reference bigtable"),
)
.arg(
Arg::with_name("reference_instance_name")
.long("reference-instance-name")
.takes_value(true)
.value_name("INSTANCE_NAME")
.default_value(solana_storage_bigtable::DEFAULT_INSTANCE_NAME)
.help("Name of the reference Bigtable instance to compare to")
),
)
.subcommand(
@@ -521,7 +553,28 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) {
let verbose = matches.is_present("verbose");
let output_format = OutputFormat::from_matches(matches, "output_format", verbose);
let future = match matches.subcommand() {
// this is kinda stupid, but there seems to be a bug in clap when a subcommand
// arg is marked both `global(true)` and `default_value("default_value")`.
// despite the "global", when the arg is specified on the subcommand, its value
// is not propagated down to the (sub)subcommand args, resulting in the default
// value when queried there. similarly, if the arg is specified on the
// (sub)subcommand, the value is not propagated back up to the subcommand args,
// again resulting in the default value. the arg having declared a
// `default_value()` obviates `is_present(...)` tests since they will always
// return true. so we consede and compare against the expected default. :/
let (subcommand, sub_matches) = matches.subcommand();
let on_command = matches
.value_of("rpc_bigtable_instance_name")
.map(|v| v != solana_storage_bigtable::DEFAULT_INSTANCE_NAME)
.unwrap_or(false);
let instance_name = if on_command {
value_t_or_exit!(matches, "rpc_bigtable_instance_name", String)
} else {
let sub_matches = sub_matches.as_ref().unwrap();
value_t_or_exit!(sub_matches, "rpc_bigtable_instance_name", String)
};
let future = match (subcommand, sub_matches) {
("upload", Some(arg_matches)) => {
let starting_slot = value_t!(arg_matches, "starting_slot", Slot).unwrap_or(0);
let ending_slot = value_t!(arg_matches, "ending_slot", Slot).ok();
@@ -531,41 +584,79 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) {
AccessType::TryPrimaryThenSecondary,
None,
);
let config = solana_storage_bigtable::LedgerStorageConfig {
read_only: false,
instance_name,
..solana_storage_bigtable::LedgerStorageConfig::default()
};
runtime.block_on(upload(
blockstore,
starting_slot,
ending_slot,
force_reupload,
config,
))
}
("delete-slots", Some(arg_matches)) => {
let slots = values_t_or_exit!(arg_matches, "slots", Slot);
let dry_run = !arg_matches.is_present("force");
runtime.block_on(delete_slots(slots, dry_run))
let config = solana_storage_bigtable::LedgerStorageConfig {
read_only: !arg_matches.is_present("force"),
instance_name,
..solana_storage_bigtable::LedgerStorageConfig::default()
};
runtime.block_on(delete_slots(slots, config))
}
("first-available-block", Some(_arg_matches)) => {
let config = solana_storage_bigtable::LedgerStorageConfig {
read_only: true,
instance_name,
..solana_storage_bigtable::LedgerStorageConfig::default()
};
runtime.block_on(first_available_block(config))
}
("first-available-block", Some(_arg_matches)) => runtime.block_on(first_available_block()),
("block", Some(arg_matches)) => {
let slot = value_t_or_exit!(arg_matches, "slot", Slot);
runtime.block_on(block(slot, output_format))
let config = solana_storage_bigtable::LedgerStorageConfig {
read_only: false,
instance_name,
..solana_storage_bigtable::LedgerStorageConfig::default()
};
runtime.block_on(block(slot, output_format, config))
}
("blocks", Some(arg_matches)) => {
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
let limit = value_t_or_exit!(arg_matches, "limit", usize);
let config = solana_storage_bigtable::LedgerStorageConfig {
read_only: false,
instance_name,
..solana_storage_bigtable::LedgerStorageConfig::default()
};
runtime.block_on(blocks(starting_slot, limit))
runtime.block_on(blocks(starting_slot, limit, config))
}
("compare-blocks", Some(arg_matches)) => {
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
let limit = value_t_or_exit!(arg_matches, "limit", usize);
let reference_credential_filepath =
value_t_or_exit!(arg_matches, "reference_credential", String);
let config = solana_storage_bigtable::LedgerStorageConfig {
read_only: false,
instance_name,
..solana_storage_bigtable::LedgerStorageConfig::default()
};
let credential_path = Some(value_t_or_exit!(
arg_matches,
"reference_credential",
String
));
let ref_instance_name =
value_t_or_exit!(arg_matches, "reference_instance_name", String);
let ref_config = solana_storage_bigtable::LedgerStorageConfig {
read_only: false,
credential_path,
instance_name: ref_instance_name,
..solana_storage_bigtable::LedgerStorageConfig::default()
};
runtime.block_on(compare_blocks(
starting_slot,
limit,
reference_credential_filepath,
))
runtime.block_on(compare_blocks(starting_slot, limit, config, ref_config))
}
("confirm", Some(arg_matches)) => {
let signature = arg_matches
@@ -573,8 +664,13 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) {
.unwrap()
.parse()
.expect("Invalid signature");
let config = solana_storage_bigtable::LedgerStorageConfig {
read_only: false,
instance_name,
..solana_storage_bigtable::LedgerStorageConfig::default()
};
runtime.block_on(confirm(&signature, verbose, output_format))
runtime.block_on(confirm(&signature, verbose, output_format, config))
}
("transaction-history", Some(arg_matches)) => {
let address = pubkey_of(arg_matches, "address").unwrap();
@@ -587,6 +683,11 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) {
.value_of("until")
.map(|signature| signature.parse().expect("Invalid signature"));
let show_transactions = arg_matches.is_present("show_transactions");
let config = solana_storage_bigtable::LedgerStorageConfig {
read_only: true,
instance_name,
..solana_storage_bigtable::LedgerStorageConfig::default()
};
runtime.block_on(transaction_history(
&address,
@@ -596,6 +697,7 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) {
verbose,
show_transactions,
query_chunk_size,
config,
))
}
_ => unreachable!(),

View File

@@ -25,8 +25,8 @@ use {
bank_forks_utils,
blockstore::{create_new_ledger, Blockstore, PurgeType},
blockstore_db::{
self, AccessType, BlockstoreAdvancedOptions, BlockstoreOptions, BlockstoreRecoveryMode,
Database,
self, AccessType, BlockstoreOptions, BlockstoreRecoveryMode, Database,
LedgerColumnOptions,
},
blockstore_processor::{BlockstoreProcessorError, ProcessOptions},
shred::Shred,
@@ -326,6 +326,26 @@ fn output_ledger(
}
}
fn output_account(
pubkey: &Pubkey,
account: &AccountSharedData,
modified_slot: Option<Slot>,
print_account_data: bool,
) {
println!("{}", pubkey);
println!(" balance: {} SOL", lamports_to_sol(account.lamports()));
println!(" owner: '{}'", account.owner());
println!(" executable: {}", account.executable());
if let Some(slot) = modified_slot {
println!(" slot: {}", slot);
}
println!(" rent_epoch: {}", account.rent_epoch());
println!(" data_len: {}", account.data().len());
if print_account_data {
println!(" data: '{}'", bs58::encode(account.data()).into_string());
}
}
fn render_dot(dot: String, output_file: &str, output_format: &str) -> io::Result<()> {
let mut child = Command::new("dot")
.arg(format!("-T{}", output_format))
@@ -1162,6 +1182,24 @@ fn main() {
SubCommand::with_name("genesis")
.about("Prints the ledger's genesis config")
.arg(&max_genesis_archive_unpacked_size_arg)
.arg(
Arg::with_name("accounts")
.long("accounts")
.takes_value(false)
.help("Print the ledger's genesis accounts"),
)
.arg(
Arg::with_name("no_account_data")
.long("no-account-data")
.takes_value(false)
.requires("accounts")
.help("Do not print account data when printing account contents."),
)
)
.subcommand(
SubCommand::with_name("genesis-hash")
.about("Prints the ledger's genesis hash")
.arg(&max_genesis_archive_unpacked_size_arg)
)
.subcommand(
SubCommand::with_name("parse_full_frozen")
@@ -1177,11 +1215,6 @@ fn main() {
.help("path to log file to parse"),
)
)
.subcommand(
SubCommand::with_name("genesis-hash")
.about("Prints the ledger's genesis hash")
.arg(&max_genesis_archive_unpacked_size_arg)
)
.subcommand(
SubCommand::with_name("modify-genesis")
.about("Modifies genesis parameters")
@@ -1457,11 +1490,10 @@ fn main() {
.takes_value(false)
.help("Do not print contents of each account, which is very slow with lots of accounts."),
)
.arg(
Arg::with_name("no_account_data")
.long("no-account-data")
.takes_value(false)
.help("Do not print account data when printing account contents."),
.arg(Arg::with_name("no_account_data")
.long("no-account-data")
.takes_value(false)
.help("Do not print account data when printing account contents."),
)
.arg(&max_genesis_archive_unpacked_size_arg)
).subcommand(
@@ -1690,7 +1722,21 @@ fn main() {
}
}
("genesis", Some(arg_matches)) => {
println!("{}", open_genesis_config_by(&ledger_path, arg_matches));
let genesis_config = open_genesis_config_by(&ledger_path, arg_matches);
let print_accouunts = arg_matches.is_present("accounts");
if print_accouunts {
let print_account_data = !arg_matches.is_present("no_account_data");
for (pubkey, account) in genesis_config.accounts {
output_account(
&pubkey,
&AccountSharedData::from(account),
None,
print_account_data,
);
}
} else {
println!("{}", genesis_config);
}
}
("genesis-hash", Some(arg_matches)) => {
println!(
@@ -1719,7 +1765,7 @@ fn main() {
&output_directory,
&genesis_config,
solana_runtime::hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
BlockstoreAdvancedOptions::default(),
LedgerColumnOptions::default(),
)
.unwrap_or_else(|err| {
eprintln!("Failed to write genesis config: {:?}", err);
@@ -2560,17 +2606,7 @@ fn main() {
let print_account_data = !arg_matches.is_present("no_account_data");
let mut measure = Measure::start("printing account contents");
for (pubkey, (account, slot)) in accounts.into_iter() {
let data_len = account.data().len();
println!("{}:", pubkey);
println!(" - balance: {} SOL", lamports_to_sol(account.lamports()));
println!(" - owner: '{}'", account.owner());
println!(" - executable: {}", account.executable());
println!(" - slot: {}", slot);
println!(" - rent_epoch: {}", account.rent_epoch());
if print_account_data {
println!(" - data: '{}'", bs58::encode(account.data()).into_string());
}
println!(" - data_len: {}", data_len);
output_account(&pubkey, &account, Some(slot), print_account_data);
}
measure.stop();
info!("{}", measure);

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-ledger"
version = "1.10.3"
version = "1.11.0"
description = "Solana ledger"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,6 +11,7 @@ edition = "2021"
[dependencies]
bincode = "1.3.3"
bitflags = "1.3.1"
byteorder = "1.4.3"
chrono = { version = "0.4.11", features = ["serde"] }
chrono-humanize = "0.2.1"
@@ -21,6 +22,7 @@ itertools = "0.10.3"
lazy_static = "1.4.0"
libc = "0.2.120"
log = { version = "0.4.14" }
lru = "0.7.3"
num-derive = "0.3"
num-traits = "0.2"
num_cpus = "1.13.1"
@@ -32,21 +34,21 @@ reed-solomon-erasure = { version = "5.0.1", features = ["simd-accel"] }
serde = "1.0.136"
serde_bytes = "0.11.5"
sha2 = "0.10.2"
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.10.3" }
solana-entry = { path = "../entry", version = "=1.10.3" }
solana-frozen-abi = { path = "../frozen-abi", version = "=1.10.3" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.10.3" }
solana-measure = { path = "../measure", version = "=1.10.3" }
solana-metrics = { path = "../metrics", version = "=1.10.3" }
solana-perf = { path = "../perf", version = "=1.10.3" }
solana-program-runtime = { path = "../program-runtime", version = "=1.10.3" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.10.3" }
solana-runtime = { path = "../runtime", version = "=1.10.3" }
solana-sdk = { path = "../sdk", version = "=1.10.3" }
solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.10.3" }
solana-storage-proto = { path = "../storage-proto", version = "=1.10.3" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.3" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.3" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.11.0" }
solana-entry = { path = "../entry", version = "=1.11.0" }
solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.0" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.0" }
solana-measure = { path = "../measure", version = "=1.11.0" }
solana-metrics = { path = "../metrics", version = "=1.11.0" }
solana-perf = { path = "../perf", version = "=1.11.0" }
solana-program-runtime = { path = "../program-runtime", version = "=1.11.0" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.11.0" }
solana-runtime = { path = "../runtime", version = "=1.11.0" }
solana-sdk = { path = "../sdk", version = "=1.11.0" }
solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.11.0" }
solana-storage-proto = { path = "../storage-proto", version = "=1.11.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.11.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.11.0" }
tempfile = "3.3.0"
thiserror = "1.0"
tokio = { version = "1", features = ["full"] }
@@ -63,8 +65,8 @@ features = ["lz4"]
[dev-dependencies]
assert_matches = "1.5.0"
matches = "0.1.9"
solana-account-decoder = { path = "../account-decoder", version = "=1.10.3" }
solana-logger = { path = "../logger", version = "=1.10.3" }
solana-account-decoder = { path = "../account-decoder", version = "=1.11.0" }
solana-logger = { path = "../logger", version = "=1.11.0" }
[build-dependencies]
rustc_version = "0.4"

View File

@@ -5,9 +5,8 @@ use {
crate::{
ancestor_iterator::AncestorIterator,
blockstore_db::{
columns as cf, AccessType, BlockstoreAdvancedOptions, BlockstoreOptions, Column,
ColumnName, Database, IteratorDirection, IteratorMode, LedgerColumn, Result,
ShredStorageType, WriteBatch,
columns as cf, AccessType, BlockstoreOptions, Column, Database, IteratorDirection,
IteratorMode, LedgerColumn, LedgerColumnOptions, Result, ShredStorageType, WriteBatch,
},
blockstore_meta::*,
leader_schedule_cache::LeaderScheduleCache,
@@ -16,6 +15,7 @@ use {
max_ticks_per_n_shreds, ErasureSetId, Result as ShredResult, Shred, ShredId, ShredType,
Shredder, SHRED_PAYLOAD_SIZE,
},
slot_stats::{ShredSource, SlotsStats},
},
bincode::deserialize,
crossbeam_channel::{bounded, Receiver, Sender, TrySendError},
@@ -50,7 +50,7 @@ use {
borrow::Cow,
cell::RefCell,
cmp,
collections::{hash_map::Entry as HashMapEntry, BTreeMap, BTreeSet, HashMap, HashSet},
collections::{hash_map::Entry as HashMapEntry, BTreeSet, HashMap, HashSet},
convert::TryInto,
fs,
io::{Error as IoError, ErrorKind},
@@ -60,7 +60,6 @@ use {
atomic::{AtomicBool, Ordering},
Arc, Mutex, RwLock, RwLockWriteGuard,
},
time::Instant,
},
tempfile::{Builder, TempDir},
thiserror::Error,
@@ -75,7 +74,6 @@ pub mod blockstore_purge;
pub const BLOCKSTORE_DIRECTORY_ROCKS_LEVEL: &str = "rocksdb";
pub const BLOCKSTORE_DIRECTORY_ROCKS_FIFO: &str = "rocksdb_fifo";
pub const BLOCKSTORE_METRICS_ERROR: i64 = -1;
thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new()
.num_threads(get_thread_count())
@@ -171,34 +169,13 @@ pub struct Blockstore {
block_height_cf: LedgerColumn<cf::BlockHeight>,
program_costs_cf: LedgerColumn<cf::ProgramCosts>,
bank_hash_cf: LedgerColumn<cf::BankHash>,
last_root: Arc<RwLock<Slot>>,
insert_shreds_lock: Arc<Mutex<()>>,
last_root: RwLock<Slot>,
insert_shreds_lock: Mutex<()>,
pub new_shreds_signals: Vec<Sender<bool>>,
pub completed_slots_senders: Vec<CompletedSlotsSender>,
pub lowest_cleanup_slot: Arc<RwLock<Slot>>,
pub lowest_cleanup_slot: RwLock<Slot>,
no_compaction: bool,
slots_stats: Arc<Mutex<SlotsStats>>,
advanced_options: BlockstoreAdvancedOptions,
}
struct SlotsStats {
last_cleanup_ts: Instant,
stats: BTreeMap<Slot, SlotStats>,
}
impl Default for SlotsStats {
fn default() -> Self {
SlotsStats {
last_cleanup_ts: Instant::now(),
stats: BTreeMap::new(),
}
}
}
#[derive(Default)]
struct SlotStats {
num_repaired: usize,
num_recovered: usize,
slots_stats: Mutex<SlotsStats>,
}
pub struct IndexMetaWorkingSetEntry {
@@ -223,13 +200,6 @@ pub struct SlotMetaWorkingSetEntry {
did_insert_occur: bool,
}
#[derive(PartialEq, Debug, Clone)]
enum ShredSource {
Turbine,
Repaired,
Recovered,
}
#[derive(Default)]
pub struct BlockstoreInsertionMetrics {
pub num_shreds: usize,
@@ -258,101 +228,6 @@ pub struct BlockstoreInsertionMetrics {
num_coding_shreds_inserted: usize,
}
#[derive(Default)]
/// A metrics struct that exposes RocksDB's column family properties.
///
/// Here we only expose a subset of all the internal properties which are
/// relevant to the ledger store performance.
///
/// The list of completed RocksDB internal properties can be found
/// [here](https://github.com/facebook/rocksdb/blob/08809f5e6cd9cc4bc3958dd4d59457ae78c76660/include/rocksdb/db.h#L654-L689).
pub struct BlockstoreRocksDbColumnFamilyMetrics {
// Size related
// The storage size occupied by the column family.
// RocksDB's internal property key: "rocksdb.total-sst-files-size"
pub total_sst_files_size: i64,
// The memory size occupied by the column family's in-memory buffer.
// RocksDB's internal property key: "rocksdb.size-all-mem-tables"
pub size_all_mem_tables: i64,
// Snapshot related
// Number of snapshots hold for the column family.
// RocksDB's internal property key: "rocksdb.num-snapshots"
pub num_snapshots: i64,
// Unit timestamp of the oldest unreleased snapshot.
// RocksDB's internal property key: "rocksdb.oldest-snapshot-time"
pub oldest_snapshot_time: i64,
// Write related
// The current actual delayed write rate. 0 means no delay.
// RocksDB's internal property key: "rocksdb.actual-delayed-write-rate"
pub actual_delayed_write_rate: i64,
// A flag indicating whether writes are stopped on this column family.
// 1 indicates writes have been stopped.
// RocksDB's internal property key: "rocksdb.is-write-stopped"
pub is_write_stopped: i64,
// Memory / block cache related
// The block cache capacity of the column family.
// RocksDB's internal property key: "rocksdb.block-cache-capacity"
pub block_cache_capacity: i64,
// The memory size used by the column family in the block cache.
// RocksDB's internal property key: "rocksdb.block-cache-usage"
pub block_cache_usage: i64,
// The memory size used by the column family in the block cache where
// entries are pinned.
// RocksDB's internal property key: "rocksdb.block-cache-pinned-usage"
pub block_cache_pinned_usage: i64,
// The estimated memory size used for reading SST tables in this column
// family such as filters and index blocks. Note that this number does not
// include the memory used in block cache.
// RocksDB's internal property key: "rocksdb.estimate-table-readers-mem"
pub estimate_table_readers_mem: i64,
// Flush and compaction
// A 1 or 0 flag indicating whether a memtable flush is pending.
// If this number is 1, it means a memtable is waiting for being flushed,
// but there might be too many L0 files that prevents it from being flushed.
// RocksDB's internal property key: "rocksdb.mem-table-flush-pending"
pub mem_table_flush_pending: i64,
// A 1 or 0 flag indicating whether a compaction job is pending.
// If this number is 1, it means some part of the column family requires
// compaction in order to maintain shape of LSM tree, but the compaction
// is pending because the desired compaction job is either waiting for
// other dependnent compactions to be finished or waiting for an available
// compaction thread.
// RocksDB's internal property key: "rocksdb.compaction-pending"
pub compaction_pending: i64,
// The number of compactions that are currently running for the column family.
// RocksDB's internal property key: "rocksdb.num-running-compactions"
pub num_running_compactions: i64,
// The number of flushes that are currently running for the column family.
// RocksDB's internal property key: "rocksdb.num-running-flushes"
pub num_running_flushes: i64,
// FIFO Compaction related
// returns an estimation of the oldest key timestamp in the DB. Only vailable
// for FIFO compaction with compaction_options_fifo.allow_compaction = false.
// RocksDB's internal property key: "rocksdb.estimate-oldest-key-time"
pub estimate_oldest_key_time: i64,
// Misc
// The accumulated number of RocksDB background errors.
// RocksDB's internal property key: "rocksdb.background-errors"
pub background_errors: i64,
}
impl SlotMetaWorkingSetEntry {
/// Construct a new SlotMetaWorkingSetEntry with the specified `new_slot_meta`
/// and `old_slot_meta`. `did_insert_occur` is set to false.
@@ -448,97 +323,6 @@ impl BlockstoreInsertionMetrics {
}
}
impl BlockstoreRocksDbColumnFamilyMetrics {
/// Report metrics with the specified metric name and column family tag.
/// The metric name and the column family tag is embeded in the parameter
/// `metric_name_and_cf_tag` with the following format.
///
/// For example, "blockstore_rocksdb_cfs,cf_name=shred_data".
pub fn report_metrics(&self, metric_name_and_cf_tag: &'static str) {
datapoint_info!(
metric_name_and_cf_tag,
// Size related
(
"total_sst_files_size",
self.total_sst_files_size as i64,
i64
),
("size_all_mem_tables", self.size_all_mem_tables as i64, i64),
// Snapshot related
("num_snapshots", self.num_snapshots as i64, i64),
(
"oldest_snapshot_time",
self.oldest_snapshot_time as i64,
i64
),
// Write related
(
"actual_delayed_write_rate",
self.actual_delayed_write_rate as i64,
i64
),
("is_write_stopped", self.is_write_stopped as i64, i64),
// Memory / block cache related
(
"block_cache_capacity",
self.block_cache_capacity as i64,
i64
),
("block_cache_usage", self.block_cache_usage as i64, i64),
(
"block_cache_pinned_usage",
self.block_cache_pinned_usage as i64,
i64
),
(
"estimate_table_readers_mem",
self.estimate_table_readers_mem as i64,
i64
),
// Flush and compaction
(
"mem_table_flush_pending",
self.mem_table_flush_pending as i64,
i64
),
("compaction_pending", self.compaction_pending as i64, i64),
(
"num_running_compactions",
self.num_running_compactions as i64,
i64
),
("num_running_flushes", self.num_running_flushes as i64, i64),
// FIFO Compaction related
(
"estimate_oldest_key_time",
self.estimate_oldest_key_time as i64,
i64
),
// Misc
("background_errors", self.background_errors as i64, i64),
);
}
}
macro_rules! rocksdb_metric_header {
($metric_name:literal, $cf_name:literal, $advanced_options:expr) => {
match $advanced_options.shred_storage_type {
ShredStorageType::RocksLevel =>
rocksdb_metric_header!(@all_fields $metric_name, $cf_name, "rocks_level"),
ShredStorageType::RocksFifo(_) =>
rocksdb_metric_header!(@all_fields $metric_name, $cf_name, "rocks_fifo"),
}
};
(@all_fields $metric_name:literal, $cf_name:literal, $storage_type:literal) => {
concat!($metric_name,
",cf_name=", $cf_name,
",storage=", $storage_type,
)
};
}
use rocksdb_metric_header;
impl Blockstore {
pub fn db(self) -> Arc<Database> {
self.db
@@ -569,9 +353,8 @@ impl Blockstore {
fn do_open(ledger_path: &Path, options: BlockstoreOptions) -> Result<Blockstore> {
fs::create_dir_all(&ledger_path)?;
let blockstore_path = ledger_path.join(Self::blockstore_directory(
&options.advanced_options.shred_storage_type,
&options.column_options.shred_storage_type,
));
let advanced_options = options.advanced_options.clone();
adjust_ulimit_nofile(options.enforce_ulimit_nofile)?;
@@ -615,7 +398,7 @@ impl Blockstore {
.next()
.map(|(slot, _)| slot)
.unwrap_or(0);
let last_root = Arc::new(RwLock::new(max_root));
let last_root = RwLock::new(max_root);
// Get active transaction-status index or 0
let active_transaction_status_index = db
@@ -659,12 +442,11 @@ impl Blockstore {
bank_hash_cf,
new_shreds_signals: vec![],
completed_slots_senders: vec![],
insert_shreds_lock: Arc::new(Mutex::new(())),
insert_shreds_lock: Mutex::<()>::default(),
last_root,
lowest_cleanup_slot: Arc::new(RwLock::new(0)),
lowest_cleanup_slot: RwLock::<Slot>::default(),
no_compaction: false,
slots_stats: Arc::new(Mutex::new(SlotsStats::default())),
advanced_options,
slots_stats: Mutex::<SlotsStats>::default(),
};
if initialize_transaction_status_index {
blockstore.initialize_transaction_status_index()?;
@@ -961,162 +743,24 @@ impl Blockstore {
/// Collects and reports [`BlockstoreRocksDbColumnFamilyMetrics`] for the
/// all the column families.
pub fn submit_rocksdb_cf_metrics_for_all_cfs(&self) {
let advanced_options = &self.advanced_options;
self.submit_rocksdb_cf_metrics::<cf::SlotMeta>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"slot_meta",
advanced_options
));
self.submit_rocksdb_cf_metrics::<cf::DeadSlots>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"dead_slots",
advanced_options
));
self.submit_rocksdb_cf_metrics::<cf::DuplicateSlots>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"duplicate_slots",
advanced_options
));
self.submit_rocksdb_cf_metrics::<cf::ErasureMeta>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"erasure_meta",
advanced_options
));
self.submit_rocksdb_cf_metrics::<cf::Orphans>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"orphans",
advanced_options
));
self.submit_rocksdb_cf_metrics::<cf::BankHash>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"bank_hash",
advanced_options
));
self.submit_rocksdb_cf_metrics::<cf::Root>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"root",
advanced_options
));
self.submit_rocksdb_cf_metrics::<cf::Index>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"index",
advanced_options
));
self.submit_rocksdb_cf_metrics::<cf::ShredData>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"shred_data",
advanced_options
));
self.submit_rocksdb_cf_metrics::<cf::ShredCode>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"shred_code",
advanced_options
));
self.submit_rocksdb_cf_metrics::<cf::TransactionStatus>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"transaction_status",
advanced_options
));
self.submit_rocksdb_cf_metrics::<cf::AddressSignatures>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"address_signature",
advanced_options
));
self.submit_rocksdb_cf_metrics::<cf::TransactionMemos>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"transaction_memos",
advanced_options
));
self.submit_rocksdb_cf_metrics::<cf::TransactionStatusIndex>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"transaction_status_index",
advanced_options
));
self.submit_rocksdb_cf_metrics::<cf::Rewards>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"rewards",
advanced_options
));
self.submit_rocksdb_cf_metrics::<cf::Blocktime>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"blocktime",
advanced_options
));
self.submit_rocksdb_cf_metrics::<cf::PerfSamples>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"perf_sample",
advanced_options
));
self.submit_rocksdb_cf_metrics::<cf::BlockHeight>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"block_height",
advanced_options
));
self.submit_rocksdb_cf_metrics::<cf::ProgramCosts>(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"program_costs",
advanced_options
));
}
/// Collects and reports [`BlockstoreRocksDbColumnFamilyMetrics`] for the
/// given column family.
fn submit_rocksdb_cf_metrics<C: 'static + Column + ColumnName>(
&self,
metric_name_and_cf_tag: &'static str,
) {
let cf = self.db.column::<C>();
let cf_rocksdb_metrics = BlockstoreRocksDbColumnFamilyMetrics {
total_sst_files_size: cf
.get_int_property(RocksProperties::TOTAL_SST_FILES_SIZE)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
size_all_mem_tables: cf
.get_int_property(RocksProperties::SIZE_ALL_MEM_TABLES)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
num_snapshots: cf
.get_int_property(RocksProperties::NUM_SNAPSHOTS)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
oldest_snapshot_time: cf
.get_int_property(RocksProperties::OLDEST_SNAPSHOT_TIME)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
actual_delayed_write_rate: cf
.get_int_property(RocksProperties::ACTUAL_DELAYED_WRITE_RATE)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
is_write_stopped: cf
.get_int_property(RocksProperties::IS_WRITE_STOPPED)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
block_cache_capacity: cf
.get_int_property(RocksProperties::BLOCK_CACHE_CAPACITY)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
block_cache_usage: cf
.get_int_property(RocksProperties::BLOCK_CACHE_USAGE)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
block_cache_pinned_usage: cf
.get_int_property(RocksProperties::BLOCK_CACHE_PINNED_USAGE)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
estimate_table_readers_mem: cf
.get_int_property(RocksProperties::ESTIMATE_TABLE_READERS_MEM)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
mem_table_flush_pending: cf
.get_int_property(RocksProperties::MEM_TABLE_FLUSH_PENDING)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
compaction_pending: cf
.get_int_property(RocksProperties::COMPACTION_PENDING)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
num_running_compactions: cf
.get_int_property(RocksProperties::NUM_RUNNING_COMPACTIONS)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
num_running_flushes: cf
.get_int_property(RocksProperties::NUM_RUNNING_FLUSHES)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
estimate_oldest_key_time: cf
.get_int_property(RocksProperties::ESTIMATE_OLDEST_KEY_TIME)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
background_errors: cf
.get_int_property(RocksProperties::BACKGROUND_ERRORS)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
};
cf_rocksdb_metrics.report_metrics(metric_name_and_cf_tag);
self.meta_cf.submit_rocksdb_cf_metrics();
self.dead_slots_cf.submit_rocksdb_cf_metrics();
self.duplicate_slots_cf.submit_rocksdb_cf_metrics();
self.erasure_meta_cf.submit_rocksdb_cf_metrics();
self.orphans_cf.submit_rocksdb_cf_metrics();
self.index_cf.submit_rocksdb_cf_metrics();
self.data_shred_cf.submit_rocksdb_cf_metrics();
self.code_shred_cf.submit_rocksdb_cf_metrics();
self.transaction_status_cf.submit_rocksdb_cf_metrics();
self.address_signatures_cf.submit_rocksdb_cf_metrics();
self.transaction_memos_cf.submit_rocksdb_cf_metrics();
self.transaction_status_index_cf.submit_rocksdb_cf_metrics();
self.rewards_cf.submit_rocksdb_cf_metrics();
self.blocktime_cf.submit_rocksdb_cf_metrics();
self.perf_samples_cf.submit_rocksdb_cf_metrics();
self.block_height_cf.submit_rocksdb_cf_metrics();
self.program_costs_cf.submit_rocksdb_cf_metrics();
self.bank_hash_cf.submit_rocksdb_cf_metrics();
}
fn try_shred_recovery(
@@ -1250,13 +894,13 @@ impl Blockstore {
let mut newly_completed_data_sets: Vec<CompletedDataSetInfo> = vec![];
let mut inserted_indices = Vec::new();
for (i, (shred, is_repaired)) in shreds.into_iter().zip(is_repaired).enumerate() {
let shred_source = if is_repaired {
ShredSource::Repaired
} else {
ShredSource::Turbine
};
match shred.shred_type() {
ShredType::Data => {
let shred_source = if is_repaired {
ShredSource::Repaired
} else {
ShredSource::Turbine
};
match self.check_insert_data_shred(
shred,
&mut erasure_metas,
@@ -1295,7 +939,7 @@ impl Blockstore {
&mut index_meta_time,
handle_duplicate,
is_trusted,
is_repaired,
shred_source,
metrics,
);
}
@@ -1481,7 +1125,7 @@ impl Blockstore {
index_meta_time: &mut u64,
handle_duplicate: &F,
is_trusted: bool,
is_repaired: bool,
shred_source: ShredSource,
metrics: &mut BlockstoreInsertionMetrics,
) -> bool
where
@@ -1548,13 +1192,10 @@ impl Blockstore {
return false;
}
if is_repaired {
let mut slots_stats = self.slots_stats.lock().unwrap();
let mut e = slots_stats.stats.entry(slot).or_default();
e.num_repaired += 1;
}
self.slots_stats
.lock()
.unwrap()
.add_shred(slot, shred_source);
// insert coding shred into rocks
let result = self
.insert_coding_shred(index_meta, &shred, write_batch)
@@ -1700,7 +1341,7 @@ impl Blockstore {
just_inserted_shreds,
&self.last_root,
leader_schedule,
shred_source.clone(),
shred_source,
) {
return Err(InsertDataShredError::InvalidShred);
}
@@ -1972,49 +1613,12 @@ impl Blockstore {
end_index,
})
.collect();
if shred_source == ShredSource::Repaired || shred_source == ShredSource::Recovered {
{
let mut slots_stats = self.slots_stats.lock().unwrap();
let mut e = slots_stats.stats.entry(slot_meta.slot).or_default();
if shred_source == ShredSource::Repaired {
e.num_repaired += 1;
slots_stats.add_shred(slot_meta.slot, shred_source);
if slot_meta.is_full() {
slots_stats.set_full(slot_meta);
}
if shred_source == ShredSource::Recovered {
e.num_recovered += 1;
}
}
if slot_meta.is_full() {
let (num_repaired, num_recovered) = {
let mut slots_stats = self.slots_stats.lock().unwrap();
if let Some(e) = slots_stats.stats.remove(&slot_meta.slot) {
if slots_stats.last_cleanup_ts.elapsed().as_secs() > 30 {
let root = self.last_root();
slots_stats.stats = slots_stats.stats.split_off(&root);
slots_stats.last_cleanup_ts = Instant::now();
}
(e.num_repaired, e.num_recovered)
} else {
(0, 0)
}
};
datapoint_info!(
"shred_insert_is_full",
(
"total_time_ms",
solana_sdk::timing::timestamp() - slot_meta.first_shred_timestamp,
i64
),
("slot", slot_meta.slot, i64),
(
"last_index",
slot_meta
.last_index
.and_then(|ix| i64::try_from(ix).ok())
.unwrap_or(-1),
i64
),
("num_repaired", num_repaired, i64),
("num_recovered", num_recovered, i64),
);
}
trace!("inserted shred into slot {:?} and index {:?}", slot, index);
Ok(newly_completed_data_sets)
@@ -2109,7 +1713,7 @@ impl Blockstore {
ticks_per_slot: u64,
parent: Option<u64>,
is_full_slot: bool,
keypair: &Arc<Keypair>,
keypair: &Keypair,
entries: Vec<Entry>,
version: u16,
) -> Result<usize /*num of data shreds*/> {
@@ -3574,7 +3178,7 @@ impl Blockstore {
self.db.is_primary_access()
}
pub fn scan_and_fix_roots(&self, exit: &Arc<AtomicBool>) -> Result<()> {
pub fn scan_and_fix_roots(&self, exit: &AtomicBool) -> Result<()> {
let ancestor_iterator = AncestorIterator::new(self.last_root(), self)
.take_while(|&slot| slot >= self.lowest_cleanup_slot());
@@ -4150,20 +3754,20 @@ pub fn create_new_ledger(
ledger_path: &Path,
genesis_config: &GenesisConfig,
max_genesis_archive_unpacked_size: u64,
advanced_options: BlockstoreAdvancedOptions,
column_options: LedgerColumnOptions,
) -> Result<Hash> {
Blockstore::destroy(ledger_path)?;
genesis_config.write(ledger_path)?;
// Fill slot 0 with ticks that link back to the genesis_config to bootstrap the ledger.
let blockstore_dir = Blockstore::blockstore_directory(&advanced_options.shred_storage_type);
let blockstore_dir = Blockstore::blockstore_directory(&column_options.shred_storage_type);
let blockstore = Blockstore::open_with_options(
ledger_path,
BlockstoreOptions {
access_type: AccessType::PrimaryOnly,
recovery_mode: None,
enforce_ulimit_nofile: false,
advanced_options: advanced_options.clone(),
column_options: column_options.clone(),
},
)?;
let ticks_per_slot = genesis_config.ticks_per_slot;
@@ -4332,7 +3936,7 @@ macro_rules! create_new_tmp_ledger {
$crate::blockstore::create_new_ledger_from_name(
$crate::tmp_ledger_name!(),
$genesis_config,
$crate::blockstore_db::BlockstoreAdvancedOptions::default(),
$crate::blockstore_db::LedgerColumnOptions::default(),
)
};
}
@@ -4343,7 +3947,7 @@ macro_rules! create_new_tmp_ledger_auto_delete {
$crate::blockstore::create_new_ledger_from_name_auto_delete(
$crate::tmp_ledger_name!(),
$genesis_config,
$crate::blockstore_db::BlockstoreAdvancedOptions::default(),
$crate::blockstore_db::LedgerColumnOptions::default(),
)
};
}
@@ -4354,10 +3958,11 @@ macro_rules! create_new_tmp_ledger_fifo_auto_delete {
$crate::blockstore::create_new_ledger_from_name_auto_delete(
$crate::tmp_ledger_name!(),
$genesis_config,
$crate::blockstore_db::BlockstoreAdvancedOptions {
$crate::blockstore_db::LedgerColumnOptions {
shred_storage_type: $crate::blockstore_db::ShredStorageType::RocksFifo(
$crate::blockstore_db::BlockstoreRocksFifoOptions::default(),
),
..$crate::blockstore_db::LedgerColumnOptions::default()
},
)
};
@@ -4388,10 +3993,10 @@ pub fn verify_shred_slots(slot: Slot, parent_slot: Slot, last_root: Slot) -> boo
pub fn create_new_ledger_from_name(
name: &str,
genesis_config: &GenesisConfig,
advanced_options: BlockstoreAdvancedOptions,
column_options: LedgerColumnOptions,
) -> (PathBuf, Hash) {
let (ledger_path, blockhash) =
create_new_ledger_from_name_auto_delete(name, genesis_config, advanced_options);
create_new_ledger_from_name_auto_delete(name, genesis_config, column_options);
(ledger_path.into_path(), blockhash)
}
@@ -4402,14 +4007,14 @@ pub fn create_new_ledger_from_name(
pub fn create_new_ledger_from_name_auto_delete(
name: &str,
genesis_config: &GenesisConfig,
advanced_options: BlockstoreAdvancedOptions,
column_options: LedgerColumnOptions,
) -> (TempDir, Hash) {
let ledger_path = get_ledger_path_from_name_auto_delete(name);
let blockhash = create_new_ledger(
ledger_path.path(),
genesis_config,
MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
advanced_options,
column_options,
)
.unwrap();
(ledger_path, blockhash)
@@ -4666,6 +4271,7 @@ pub mod tests {
pubkey::Pubkey,
signature::Signature,
transaction::{Transaction, TransactionError},
transaction_context::TransactionReturnData,
},
solana_storage_proto::convert::generated,
solana_transaction_status::{InnerInstructions, Reward, Rewards, TransactionTokenBalance},
@@ -4718,10 +4324,11 @@ pub mod tests {
let blockstore = Blockstore::open_with_options(
ledger_path.path(),
BlockstoreOptions {
advanced_options: BlockstoreAdvancedOptions {
column_options: LedgerColumnOptions {
shred_storage_type: ShredStorageType::RocksFifo(
BlockstoreRocksFifoOptions::default(),
),
..LedgerColumnOptions::default()
},
..BlockstoreOptions::default()
},
@@ -6355,7 +5962,7 @@ pub mod tests {
panic!("no dupes");
},
false,
false,
ShredSource::Turbine,
&mut BlockstoreInsertionMetrics::default(),
));
@@ -6373,7 +5980,7 @@ pub mod tests {
counter.fetch_add(1, Ordering::Relaxed);
},
false,
false,
ShredSource::Turbine,
&mut BlockstoreInsertionMetrics::default(),
));
assert_eq!(counter.load(Ordering::Relaxed), 1);
@@ -6826,6 +6433,7 @@ pub mod tests {
post_token_balances: Some(vec![]),
rewards: Some(vec![]),
loaded_addresses: LoadedAddresses::default(),
return_data: Some(TransactionReturnData::default()),
}
.into();
blockstore
@@ -6843,6 +6451,7 @@ pub mod tests {
post_token_balances: Some(vec![]),
rewards: Some(vec![]),
loaded_addresses: LoadedAddresses::default(),
return_data: Some(TransactionReturnData::default()),
}
.into();
blockstore
@@ -6860,6 +6469,7 @@ pub mod tests {
post_token_balances: Some(vec![]),
rewards: Some(vec![]),
loaded_addresses: LoadedAddresses::default(),
return_data: Some(TransactionReturnData::default()),
}
.into();
blockstore
@@ -6879,6 +6489,7 @@ pub mod tests {
post_token_balances: Some(vec![]),
rewards: Some(vec![]),
loaded_addresses: LoadedAddresses::default(),
return_data: Some(TransactionReturnData::default()),
},
}
})
@@ -6991,6 +6602,10 @@ pub mod tests {
writable: vec![Pubkey::new_unique()],
readonly: vec![Pubkey::new_unique()],
};
let test_return_data = TransactionReturnData {
program_id: Pubkey::new_unique(),
data: vec![1, 2, 3],
};
// result not found
assert!(transaction_status_cf
@@ -7010,6 +6625,7 @@ pub mod tests {
post_token_balances: Some(post_token_balances_vec.clone()),
rewards: Some(rewards_vec.clone()),
loaded_addresses: test_loaded_addresses.clone(),
return_data: Some(test_return_data.clone()),
}
.into();
assert!(transaction_status_cf
@@ -7028,6 +6644,7 @@ pub mod tests {
post_token_balances,
rewards,
loaded_addresses,
return_data,
} = transaction_status_cf
.get_protobuf_or_bincode::<StoredTransactionStatusMeta>((0, Signature::default(), 0))
.unwrap()
@@ -7044,6 +6661,7 @@ pub mod tests {
assert_eq!(post_token_balances.unwrap(), post_token_balances_vec);
assert_eq!(rewards.unwrap(), rewards_vec);
assert_eq!(loaded_addresses, test_loaded_addresses);
assert_eq!(return_data.unwrap(), test_return_data);
// insert value
let status = TransactionStatusMeta {
@@ -7057,6 +6675,7 @@ pub mod tests {
post_token_balances: Some(post_token_balances_vec.clone()),
rewards: Some(rewards_vec.clone()),
loaded_addresses: test_loaded_addresses.clone(),
return_data: Some(test_return_data.clone()),
}
.into();
assert!(transaction_status_cf
@@ -7075,6 +6694,7 @@ pub mod tests {
post_token_balances,
rewards,
loaded_addresses,
return_data,
} = transaction_status_cf
.get_protobuf_or_bincode::<StoredTransactionStatusMeta>((
0,
@@ -7097,6 +6717,7 @@ pub mod tests {
assert_eq!(post_token_balances.unwrap(), post_token_balances_vec);
assert_eq!(rewards.unwrap(), rewards_vec);
assert_eq!(loaded_addresses, test_loaded_addresses);
assert_eq!(return_data.unwrap(), test_return_data);
}
#[test]
@@ -7325,6 +6946,7 @@ pub mod tests {
post_token_balances: Some(vec![]),
rewards: Some(vec![]),
loaded_addresses: LoadedAddresses::default(),
return_data: Some(TransactionReturnData::default()),
}
.into();
@@ -7520,6 +7142,7 @@ pub mod tests {
post_token_balances: Some(vec![]),
rewards: Some(vec![]),
loaded_addresses: LoadedAddresses::default(),
return_data: Some(TransactionReturnData::default()),
}
.into();
@@ -7691,6 +7314,10 @@ pub mod tests {
let post_token_balances = Some(vec![]);
let rewards = Some(vec![]);
let signature = transaction.signatures[0];
let return_data = Some(TransactionReturnData {
program_id: Pubkey::new_unique(),
data: vec![1, 2, 3],
});
let status = TransactionStatusMeta {
status: Ok(()),
fee: 42,
@@ -7702,6 +7329,7 @@ pub mod tests {
post_token_balances: post_token_balances.clone(),
rewards: rewards.clone(),
loaded_addresses: LoadedAddresses::default(),
return_data: return_data.clone(),
}
.into();
blockstore
@@ -7721,6 +7349,7 @@ pub mod tests {
post_token_balances,
rewards,
loaded_addresses: LoadedAddresses::default(),
return_data,
},
}
})
@@ -7792,6 +7421,10 @@ pub mod tests {
let pre_token_balances = Some(vec![]);
let post_token_balances = Some(vec![]);
let rewards = Some(vec![]);
let return_data = Some(TransactionReturnData {
program_id: Pubkey::new_unique(),
data: vec![1, 2, 3],
});
let signature = transaction.signatures[0];
let status = TransactionStatusMeta {
status: Ok(()),
@@ -7804,6 +7437,7 @@ pub mod tests {
post_token_balances: post_token_balances.clone(),
rewards: rewards.clone(),
loaded_addresses: LoadedAddresses::default(),
return_data: return_data.clone(),
}
.into();
blockstore
@@ -7823,6 +7457,7 @@ pub mod tests {
post_token_balances,
rewards,
loaded_addresses: LoadedAddresses::default(),
return_data,
},
}
})
@@ -8582,6 +8217,7 @@ pub mod tests {
post_token_balances: Some(vec![]),
rewards: Some(vec![]),
loaded_addresses: LoadedAddresses::default(),
return_data: Some(TransactionReturnData::default()),
}
.into();
transaction_status_cf
@@ -9139,6 +8775,10 @@ pub mod tests {
commission: None,
}]),
loaded_addresses: LoadedAddresses::default(),
return_data: Some(TransactionReturnData {
program_id: Pubkey::new_unique(),
data: vec![1, 2, 3],
}),
};
let deprecated_status: StoredTransactionStatusMeta = status.clone().try_into().unwrap();
let protobuf_status: generated::TransactionStatusMeta = status.into();

View File

@@ -9,9 +9,10 @@ use {
self,
compaction_filter::CompactionFilter,
compaction_filter_factory::{CompactionFilterContext, CompactionFilterFactory},
ColumnFamily, ColumnFamilyDescriptor, CompactionDecision, DBCompactionStyle, DBIterator,
DBRawIterator, DBRecoveryMode, FifoCompactOptions, IteratorMode as RocksIteratorMode,
Options, WriteBatch as RWriteBatch, DB,
properties as RocksProperties, ColumnFamily, ColumnFamilyDescriptor, CompactionDecision,
DBCompactionStyle, DBCompressionType as RocksCompressionType, DBIterator, DBRawIterator,
DBRecoveryMode, FifoCompactOptions, IteratorMode as RocksIteratorMode, Options,
WriteBatch as RWriteBatch, DB,
},
serde::{de::DeserializeOwned, Serialize},
solana_runtime::hardened_unpack::UnpackError,
@@ -35,6 +36,8 @@ use {
thiserror::Error,
};
const BLOCKSTORE_METRICS_ERROR: i64 = -1;
// The default storage size for storing shreds when `rocksdb-shred-compaction`
// is set to `fifo` in the validator arguments. This amount of storage size
// in bytes will equally allocated to both data shreds and coding shreds.
@@ -98,6 +101,222 @@ const PROGRAM_COSTS_CF: &str = "program_costs";
// 1 day is chosen for the same reasoning of DEFAULT_COMPACTION_SLOT_INTERVAL
const PERIODIC_COMPACTION_SECONDS: u64 = 60 * 60 * 24;
#[derive(Default)]
/// A metrics struct that exposes RocksDB's column family properties.
///
/// Here we only expose a subset of all the internal properties which are
/// relevant to the ledger store performance.
///
/// The list of completed RocksDB internal properties can be found
/// [here](https://github.com/facebook/rocksdb/blob/08809f5e6cd9cc4bc3958dd4d59457ae78c76660/include/rocksdb/db.h#L654-L689).
pub struct BlockstoreRocksDbColumnFamilyMetrics {
// Size related
// The storage size occupied by the column family.
// RocksDB's internal property key: "rocksdb.total-sst-files-size"
pub total_sst_files_size: i64,
// The memory size occupied by the column family's in-memory buffer.
// RocksDB's internal property key: "rocksdb.size-all-mem-tables"
pub size_all_mem_tables: i64,
// Snapshot related
// Number of snapshots hold for the column family.
// RocksDB's internal property key: "rocksdb.num-snapshots"
pub num_snapshots: i64,
// Unit timestamp of the oldest unreleased snapshot.
// RocksDB's internal property key: "rocksdb.oldest-snapshot-time"
pub oldest_snapshot_time: i64,
// Write related
// The current actual delayed write rate. 0 means no delay.
// RocksDB's internal property key: "rocksdb.actual-delayed-write-rate"
pub actual_delayed_write_rate: i64,
// A flag indicating whether writes are stopped on this column family.
// 1 indicates writes have been stopped.
// RocksDB's internal property key: "rocksdb.is-write-stopped"
pub is_write_stopped: i64,
// Memory / block cache related
// The block cache capacity of the column family.
// RocksDB's internal property key: "rocksdb.block-cache-capacity"
pub block_cache_capacity: i64,
// The memory size used by the column family in the block cache.
// RocksDB's internal property key: "rocksdb.block-cache-usage"
pub block_cache_usage: i64,
// The memory size used by the column family in the block cache where
// entries are pinned.
// RocksDB's internal property key: "rocksdb.block-cache-pinned-usage"
pub block_cache_pinned_usage: i64,
// The estimated memory size used for reading SST tables in this column
// family such as filters and index blocks. Note that this number does not
// include the memory used in block cache.
// RocksDB's internal property key: "rocksdb.estimate-table-readers-mem"
pub estimate_table_readers_mem: i64,
// Flush and compaction
// A 1 or 0 flag indicating whether a memtable flush is pending.
// If this number is 1, it means a memtable is waiting for being flushed,
// but there might be too many L0 files that prevents it from being flushed.
// RocksDB's internal property key: "rocksdb.mem-table-flush-pending"
pub mem_table_flush_pending: i64,
// A 1 or 0 flag indicating whether a compaction job is pending.
// If this number is 1, it means some part of the column family requires
// compaction in order to maintain shape of LSM tree, but the compaction
// is pending because the desired compaction job is either waiting for
// other dependnent compactions to be finished or waiting for an available
// compaction thread.
// RocksDB's internal property key: "rocksdb.compaction-pending"
pub compaction_pending: i64,
// The number of compactions that are currently running for the column family.
// RocksDB's internal property key: "rocksdb.num-running-compactions"
pub num_running_compactions: i64,
// The number of flushes that are currently running for the column family.
// RocksDB's internal property key: "rocksdb.num-running-flushes"
pub num_running_flushes: i64,
// FIFO Compaction related
// returns an estimation of the oldest key timestamp in the DB. Only vailable
// for FIFO compaction with compaction_options_fifo.allow_compaction = false.
// RocksDB's internal property key: "rocksdb.estimate-oldest-key-time"
pub estimate_oldest_key_time: i64,
// Misc
// The accumulated number of RocksDB background errors.
// RocksDB's internal property key: "rocksdb.background-errors"
pub background_errors: i64,
}
impl BlockstoreRocksDbColumnFamilyMetrics {
/// Report metrics with the specified metric name and column family tag.
/// The metric name and the column family tag is embeded in the parameter
/// `metric_name_and_cf_tag` with the following format.
///
/// For example, "blockstore_rocksdb_cfs,cf_name=shred_data".
pub fn report_metrics(&self, metric_name_and_cf_tag: &'static str) {
datapoint_info!(
metric_name_and_cf_tag,
// Size related
(
"total_sst_files_size",
self.total_sst_files_size as i64,
i64
),
("size_all_mem_tables", self.size_all_mem_tables as i64, i64),
// Snapshot related
("num_snapshots", self.num_snapshots as i64, i64),
(
"oldest_snapshot_time",
self.oldest_snapshot_time as i64,
i64
),
// Write related
(
"actual_delayed_write_rate",
self.actual_delayed_write_rate as i64,
i64
),
("is_write_stopped", self.is_write_stopped as i64, i64),
// Memory / block cache related
(
"block_cache_capacity",
self.block_cache_capacity as i64,
i64
),
("block_cache_usage", self.block_cache_usage as i64, i64),
(
"block_cache_pinned_usage",
self.block_cache_pinned_usage as i64,
i64
),
(
"estimate_table_readers_mem",
self.estimate_table_readers_mem as i64,
i64
),
// Flush and compaction
(
"mem_table_flush_pending",
self.mem_table_flush_pending as i64,
i64
),
("compaction_pending", self.compaction_pending as i64, i64),
(
"num_running_compactions",
self.num_running_compactions as i64,
i64
),
("num_running_flushes", self.num_running_flushes as i64, i64),
// FIFO Compaction related
(
"estimate_oldest_key_time",
self.estimate_oldest_key_time as i64,
i64
),
// Misc
("background_errors", self.background_errors as i64, i64),
);
}
}
macro_rules! rocksdb_metric_header {
($metric_name:literal, $cf_name:literal, $column_options:expr) => {
match $column_options.shred_storage_type {
ShredStorageType::RocksLevel =>
rocksdb_metric_header!(@compression_type $metric_name, $cf_name, $column_options, "rocks_level"),
ShredStorageType::RocksFifo(_) =>
rocksdb_metric_header!(@compression_type $metric_name, $cf_name, $column_options, "rocks_fifo"),
}
};
(@compression_type $metric_name:literal, $cf_name:literal, $column_options:expr, $storage_type:literal) => {
match $column_options.compression_type {
BlockstoreCompressionType::None => rocksdb_metric_header!(@all_fields
$metric_name,
$cf_name,
$storage_type,
"None"
),
BlockstoreCompressionType::Snappy => rocksdb_metric_header!(@all_fields
$metric_name,
$cf_name,
$storage_type,
"Snappy"
),
BlockstoreCompressionType::Lz4 => rocksdb_metric_header!(@all_fields
$metric_name,
$cf_name,
$storage_type,
"Lz4"
),
BlockstoreCompressionType::Zlib => rocksdb_metric_header!(@all_fields
$metric_name,
$cf_name,
$storage_type,
"Zlib"
),
}
};
(@all_fields $metric_name:literal, $cf_name:literal, $storage_type:literal, $compression_type:literal) => {
concat!($metric_name,
",cf_name=", $cf_name,
",storage=", $storage_type,
",compression=", $compression_type,
)
};
}
use rocksdb_metric_header;
#[derive(Error, Debug)]
pub enum BlockstoreError {
ShredForIndexExists,
@@ -554,6 +773,13 @@ pub trait Column {
}
}
pub trait ColumnMetrics {
fn report_cf_metrics(
cf_metrics: BlockstoreRocksDbColumnFamilyMetrics,
column_options: &Arc<LedgerColumnOptions>,
);
}
pub trait ColumnName {
const NAME: &'static str;
}
@@ -638,7 +864,18 @@ impl Column for columns::TransactionStatus {
(index, Signature::default(), 0)
}
}
impl ColumnMetrics for columns::TransactionStatus {
fn report_cf_metrics(
cf_metrics: BlockstoreRocksDbColumnFamilyMetrics,
column_options: &Arc<LedgerColumnOptions>,
) {
cf_metrics.report_metrics(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"transaction_status",
column_options
));
}
}
impl ColumnName for columns::TransactionStatus {
const NAME: &'static str = TRANSACTION_STATUS_CF;
}
@@ -679,7 +916,18 @@ impl Column for columns::AddressSignatures {
(index, Pubkey::default(), 0, Signature::default())
}
}
impl ColumnMetrics for columns::AddressSignatures {
fn report_cf_metrics(
cf_metrics: BlockstoreRocksDbColumnFamilyMetrics,
column_options: &Arc<LedgerColumnOptions>,
) {
cf_metrics.report_metrics(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"address_signatures",
column_options
));
}
}
impl ColumnName for columns::AddressSignatures {
const NAME: &'static str = ADDRESS_SIGNATURES_CF;
}
@@ -710,7 +958,18 @@ impl Column for columns::TransactionMemos {
Signature::default()
}
}
impl ColumnMetrics for columns::TransactionMemos {
fn report_cf_metrics(
cf_metrics: BlockstoreRocksDbColumnFamilyMetrics,
column_options: &Arc<LedgerColumnOptions>,
) {
cf_metrics.report_metrics(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"transaction_memos",
column_options
));
}
}
impl ColumnName for columns::TransactionMemos {
const NAME: &'static str = TRANSACTION_MEMOS_CF;
}
@@ -741,12 +1000,35 @@ impl Column for columns::TransactionStatusIndex {
slot
}
}
impl ColumnMetrics for columns::TransactionStatusIndex {
fn report_cf_metrics(
cf_metrics: BlockstoreRocksDbColumnFamilyMetrics,
column_options: &Arc<LedgerColumnOptions>,
) {
cf_metrics.report_metrics(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"transaction_status_index",
column_options
));
}
}
impl ColumnName for columns::TransactionStatusIndex {
const NAME: &'static str = TRANSACTION_STATUS_INDEX_CF;
}
impl SlotColumn for columns::Rewards {}
impl ColumnMetrics for columns::Rewards {
fn report_cf_metrics(
cf_metrics: BlockstoreRocksDbColumnFamilyMetrics,
column_options: &Arc<LedgerColumnOptions>,
) {
cf_metrics.report_metrics(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"rewards",
column_options
));
}
}
impl ColumnName for columns::Rewards {
const NAME: &'static str = REWARDS_CF;
}
@@ -755,6 +1037,18 @@ impl ProtobufColumn for columns::Rewards {
}
impl SlotColumn for columns::Blocktime {}
impl ColumnMetrics for columns::Blocktime {
fn report_cf_metrics(
cf_metrics: BlockstoreRocksDbColumnFamilyMetrics,
column_options: &Arc<LedgerColumnOptions>,
) {
cf_metrics.report_metrics(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"blocktime",
column_options
));
}
}
impl ColumnName for columns::Blocktime {
const NAME: &'static str = BLOCKTIME_CF;
}
@@ -763,6 +1057,18 @@ impl TypedColumn for columns::Blocktime {
}
impl SlotColumn for columns::PerfSamples {}
impl ColumnMetrics for columns::PerfSamples {
fn report_cf_metrics(
cf_metrics: BlockstoreRocksDbColumnFamilyMetrics,
column_options: &Arc<LedgerColumnOptions>,
) {
cf_metrics.report_metrics(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"perf_samples",
column_options
));
}
}
impl ColumnName for columns::PerfSamples {
const NAME: &'static str = PERF_SAMPLES_CF;
}
@@ -771,6 +1077,18 @@ impl TypedColumn for columns::PerfSamples {
}
impl SlotColumn for columns::BlockHeight {}
impl ColumnMetrics for columns::BlockHeight {
fn report_cf_metrics(
cf_metrics: BlockstoreRocksDbColumnFamilyMetrics,
column_options: &Arc<LedgerColumnOptions>,
) {
cf_metrics.report_metrics(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"block_height",
column_options
));
}
}
impl ColumnName for columns::BlockHeight {
const NAME: &'static str = BLOCK_HEIGHT_CF;
}
@@ -778,6 +1096,19 @@ impl TypedColumn for columns::BlockHeight {
type Type = u64;
}
impl ColumnMetrics for columns::ProgramCosts {
fn report_cf_metrics(
cf_metrics: BlockstoreRocksDbColumnFamilyMetrics,
column_options: &Arc<LedgerColumnOptions>,
) {
cf_metrics.report_metrics(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"program_costs",
column_options
));
}
}
impl ColumnName for columns::ProgramCosts {
const NAME: &'static str = PROGRAM_COSTS_CF;
}
@@ -831,7 +1162,18 @@ impl Column for columns::ShredCode {
(slot, 0)
}
}
impl ColumnMetrics for columns::ShredCode {
fn report_cf_metrics(
cf_metrics: BlockstoreRocksDbColumnFamilyMetrics,
column_options: &Arc<LedgerColumnOptions>,
) {
cf_metrics.report_metrics(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"shred_code",
column_options
));
}
}
impl ColumnName for columns::ShredCode {
const NAME: &'static str = CODE_SHRED_CF;
}
@@ -861,12 +1203,35 @@ impl Column for columns::ShredData {
(slot, 0)
}
}
impl ColumnMetrics for columns::ShredData {
fn report_cf_metrics(
cf_metrics: BlockstoreRocksDbColumnFamilyMetrics,
column_options: &Arc<LedgerColumnOptions>,
) {
cf_metrics.report_metrics(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"shred_data",
column_options
));
}
}
impl ColumnName for columns::ShredData {
const NAME: &'static str = DATA_SHRED_CF;
}
impl SlotColumn for columns::Index {}
impl ColumnMetrics for columns::Index {
fn report_cf_metrics(
cf_metrics: BlockstoreRocksDbColumnFamilyMetrics,
column_options: &Arc<LedgerColumnOptions>,
) {
cf_metrics.report_metrics(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"index",
column_options
));
}
}
impl ColumnName for columns::Index {
const NAME: &'static str = INDEX_CF;
}
@@ -875,6 +1240,18 @@ impl TypedColumn for columns::Index {
}
impl SlotColumn for columns::DeadSlots {}
impl ColumnMetrics for columns::DeadSlots {
fn report_cf_metrics(
cf_metrics: BlockstoreRocksDbColumnFamilyMetrics,
column_options: &Arc<LedgerColumnOptions>,
) {
cf_metrics.report_metrics(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"dead_slots",
column_options
));
}
}
impl ColumnName for columns::DeadSlots {
const NAME: &'static str = DEAD_SLOTS_CF;
}
@@ -883,6 +1260,18 @@ impl TypedColumn for columns::DeadSlots {
}
impl SlotColumn for columns::DuplicateSlots {}
impl ColumnMetrics for columns::DuplicateSlots {
fn report_cf_metrics(
cf_metrics: BlockstoreRocksDbColumnFamilyMetrics,
column_options: &Arc<LedgerColumnOptions>,
) {
cf_metrics.report_metrics(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"duplicate_slots",
column_options
));
}
}
impl ColumnName for columns::DuplicateSlots {
const NAME: &'static str = DUPLICATE_SLOTS_CF;
}
@@ -891,6 +1280,18 @@ impl TypedColumn for columns::DuplicateSlots {
}
impl SlotColumn for columns::Orphans {}
impl ColumnMetrics for columns::Orphans {
fn report_cf_metrics(
cf_metrics: BlockstoreRocksDbColumnFamilyMetrics,
column_options: &Arc<LedgerColumnOptions>,
) {
cf_metrics.report_metrics(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"orphans",
column_options
));
}
}
impl ColumnName for columns::Orphans {
const NAME: &'static str = ORPHANS_CF;
}
@@ -899,6 +1300,18 @@ impl TypedColumn for columns::Orphans {
}
impl SlotColumn for columns::BankHash {}
impl ColumnMetrics for columns::BankHash {
fn report_cf_metrics(
cf_metrics: BlockstoreRocksDbColumnFamilyMetrics,
column_options: &Arc<LedgerColumnOptions>,
) {
cf_metrics.report_metrics(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"bank_hash",
column_options
));
}
}
impl ColumnName for columns::BankHash {
const NAME: &'static str = BANK_HASH_CF;
}
@@ -907,6 +1320,18 @@ impl TypedColumn for columns::BankHash {
}
impl SlotColumn for columns::Root {}
impl ColumnMetrics for columns::Root {
fn report_cf_metrics(
cf_metrics: BlockstoreRocksDbColumnFamilyMetrics,
column_options: &Arc<LedgerColumnOptions>,
) {
cf_metrics.report_metrics(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"root",
column_options
));
}
}
impl ColumnName for columns::Root {
const NAME: &'static str = ROOT_CF;
}
@@ -915,6 +1340,18 @@ impl TypedColumn for columns::Root {
}
impl SlotColumn for columns::SlotMeta {}
impl ColumnMetrics for columns::SlotMeta {
fn report_cf_metrics(
cf_metrics: BlockstoreRocksDbColumnFamilyMetrics,
column_options: &Arc<LedgerColumnOptions>,
) {
cf_metrics.report_metrics(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"slot_meta",
column_options
));
}
}
impl ColumnName for columns::SlotMeta {
const NAME: &'static str = META_CF;
}
@@ -948,6 +1385,18 @@ impl Column for columns::ErasureMeta {
(slot, 0)
}
}
impl ColumnMetrics for columns::ErasureMeta {
fn report_cf_metrics(
cf_metrics: BlockstoreRocksDbColumnFamilyMetrics,
column_options: &Arc<LedgerColumnOptions>,
) {
cf_metrics.report_metrics(rocksdb_metric_header!(
"blockstore_rocksdb_cfs",
"erasure_meta",
column_options
));
}
}
impl ColumnName for columns::ErasureMeta {
const NAME: &'static str = ERASURE_META_CF;
}
@@ -959,15 +1408,73 @@ impl TypedColumn for columns::ErasureMeta {
pub struct Database {
backend: Arc<Rocks>,
path: Arc<Path>,
column_options: Arc<LedgerColumnOptions>,
}
#[derive(Debug, Clone)]
pub struct LedgerColumn<C>
where
C: Column,
C: Column + ColumnName + ColumnMetrics,
{
backend: Arc<Rocks>,
column: PhantomData<C>,
pub column_options: Arc<LedgerColumnOptions>,
}
impl<C: Column + ColumnName + ColumnMetrics> LedgerColumn<C> {
pub fn submit_rocksdb_cf_metrics(&self) {
let cf_rocksdb_metrics = BlockstoreRocksDbColumnFamilyMetrics {
total_sst_files_size: self
.get_int_property(RocksProperties::TOTAL_SST_FILES_SIZE)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
size_all_mem_tables: self
.get_int_property(RocksProperties::SIZE_ALL_MEM_TABLES)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
num_snapshots: self
.get_int_property(RocksProperties::NUM_SNAPSHOTS)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
oldest_snapshot_time: self
.get_int_property(RocksProperties::OLDEST_SNAPSHOT_TIME)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
actual_delayed_write_rate: self
.get_int_property(RocksProperties::ACTUAL_DELAYED_WRITE_RATE)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
is_write_stopped: self
.get_int_property(RocksProperties::IS_WRITE_STOPPED)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
block_cache_capacity: self
.get_int_property(RocksProperties::BLOCK_CACHE_CAPACITY)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
block_cache_usage: self
.get_int_property(RocksProperties::BLOCK_CACHE_USAGE)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
block_cache_pinned_usage: self
.get_int_property(RocksProperties::BLOCK_CACHE_PINNED_USAGE)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
estimate_table_readers_mem: self
.get_int_property(RocksProperties::ESTIMATE_TABLE_READERS_MEM)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
mem_table_flush_pending: self
.get_int_property(RocksProperties::MEM_TABLE_FLUSH_PENDING)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
compaction_pending: self
.get_int_property(RocksProperties::COMPACTION_PENDING)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
num_running_compactions: self
.get_int_property(RocksProperties::NUM_RUNNING_COMPACTIONS)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
num_running_flushes: self
.get_int_property(RocksProperties::NUM_RUNNING_FLUSHES)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
estimate_oldest_key_time: self
.get_int_property(RocksProperties::ESTIMATE_OLDEST_KEY_TIME)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
background_errors: self
.get_int_property(RocksProperties::BACKGROUND_ERRORS)
.unwrap_or(BLOCKSTORE_METRICS_ERROR),
};
C::report_cf_metrics(cf_rocksdb_metrics, &self.column_options);
}
}
pub struct WriteBatch<'a> {
@@ -975,7 +1482,7 @@ pub struct WriteBatch<'a> {
map: HashMap<&'static str, &'a ColumnFamily>,
}
#[derive(Clone)]
#[derive(Debug, Clone)]
pub enum ShredStorageType {
// Stores shreds under RocksDB's default compaction (level).
RocksLevel,
@@ -991,19 +1498,49 @@ impl Default for ShredStorageType {
}
}
/// Advanced options for blockstore.
/// The each advanced option might also be used as a tag that supports
/// group-by operation when reporting Blockstore metrics.
#[derive(Clone)]
pub struct BlockstoreAdvancedOptions {
// Determine how to store both data and coding shreds. Default: RocksLevel.
pub shred_storage_type: ShredStorageType,
#[derive(Debug, Clone)]
pub enum BlockstoreCompressionType {
None,
Snappy,
Lz4,
Zlib,
}
impl Default for BlockstoreAdvancedOptions {
impl Default for BlockstoreCompressionType {
fn default() -> Self {
Self::None
}
}
impl BlockstoreCompressionType {
fn to_rocksdb_compression_type(&self) -> RocksCompressionType {
match self {
Self::None => RocksCompressionType::None,
Self::Snappy => RocksCompressionType::Snappy,
Self::Lz4 => RocksCompressionType::Lz4,
Self::Zlib => RocksCompressionType::Zlib,
}
}
}
/// Options for LedgerColumn.
/// Each field might also be used as a tag that supports group-by operation when
/// reporting metrics.
#[derive(Debug, Clone)]
pub struct LedgerColumnOptions {
// Determine how to store both data and coding shreds. Default: RocksLevel.
pub shred_storage_type: ShredStorageType,
// Determine the way to compress column families which are eligible for
// compression.
pub compression_type: BlockstoreCompressionType,
}
impl Default for LedgerColumnOptions {
fn default() -> Self {
Self {
shred_storage_type: ShredStorageType::RocksLevel,
compression_type: BlockstoreCompressionType::default(),
}
}
}
@@ -1015,7 +1552,7 @@ pub struct BlockstoreOptions {
pub recovery_mode: Option<BlockstoreRecoveryMode>,
// Whether to allow unlimited number of open files. Default: true.
pub enforce_ulimit_nofile: bool,
pub advanced_options: BlockstoreAdvancedOptions,
pub column_options: LedgerColumnOptions,
}
impl Default for BlockstoreOptions {
@@ -1025,12 +1562,12 @@ impl Default for BlockstoreOptions {
access_type: AccessType::PrimaryOnly,
recovery_mode: None,
enforce_ulimit_nofile: true,
advanced_options: BlockstoreAdvancedOptions::default(),
column_options: LedgerColumnOptions::default(),
}
}
}
#[derive(Clone)]
#[derive(Debug, Clone)]
pub struct BlockstoreRocksFifoOptions {
// The maximum storage size for storing data shreds in column family
// [`cf::DataShred`]. Typically, data shreds contribute around 25% of the
@@ -1065,11 +1602,13 @@ impl Default for BlockstoreRocksFifoOptions {
impl Database {
pub fn open(path: &Path, options: BlockstoreOptions) -> Result<Self> {
let column_options = Arc::new(options.column_options.clone());
let backend = Arc::new(Rocks::open(path, options)?);
Ok(Database {
backend,
path: Arc::from(path),
column_options,
})
}
@@ -1114,11 +1653,12 @@ impl Database {
pub fn column<C>(&self) -> LedgerColumn<C>
where
C: Column + ColumnName,
C: Column + ColumnName + ColumnMetrics,
{
LedgerColumn {
backend: Arc::clone(&self.backend),
column: PhantomData,
column_options: Arc::clone(&self.column_options),
}
}
@@ -1167,7 +1707,7 @@ impl Database {
impl<C> LedgerColumn<C>
where
C: Column + ColumnName,
C: Column + ColumnName + ColumnMetrics,
{
pub fn get_bytes(&self, key: C::Index) -> Result<Option<Vec<u8>>> {
self.backend.get_cf(self.handle(), &C::key(key))
@@ -1255,7 +1795,7 @@ where
impl<C> LedgerColumn<C>
where
C: TypedColumn + ColumnName,
C: TypedColumn + ColumnName + ColumnMetrics,
{
pub fn get(&self, key: C::Index) -> Result<Option<C::Type>> {
if let Some(serialized_value) = self.backend.get_cf(self.handle(), &C::key(key))? {
@@ -1281,7 +1821,7 @@ where
impl<C> LedgerColumn<C>
where
C: ProtobufColumn + ColumnName,
C: ProtobufColumn + ColumnName + ColumnMetrics,
{
pub fn get_protobuf_or_bincode<T: DeserializeOwned + Into<C::Type>>(
&self,
@@ -1444,9 +1984,24 @@ fn get_cf_options<C: 'static + Column + ColumnName>(
});
}
process_cf_options_advanced::<C>(&mut cf_options, &options.column_options);
cf_options
}
fn process_cf_options_advanced<C: 'static + Column + ColumnName>(
cf_options: &mut Options,
column_options: &LedgerColumnOptions,
) {
if should_enable_compression::<C>() {
cf_options.set_compression_type(
column_options
.compression_type
.to_rocksdb_compression_type(),
);
}
}
/// Creates and returns the column family descriptors for both data shreds and
/// coding shreds column families.
///
@@ -1459,23 +2014,27 @@ fn new_cf_descriptor_pair_shreds<
options: &BlockstoreOptions,
oldest_slot: &OldestSlot,
) -> (ColumnFamilyDescriptor, ColumnFamilyDescriptor) {
match &options.advanced_options.shred_storage_type {
match &options.column_options.shred_storage_type {
ShredStorageType::RocksLevel => (
new_cf_descriptor::<D>(options, oldest_slot),
new_cf_descriptor::<C>(options, oldest_slot),
),
ShredStorageType::RocksFifo(fifo_options) => (
new_cf_descriptor_fifo::<D>(&fifo_options.shred_data_cf_size),
new_cf_descriptor_fifo::<C>(&fifo_options.shred_code_cf_size),
new_cf_descriptor_fifo::<D>(&fifo_options.shred_data_cf_size, &options.column_options),
new_cf_descriptor_fifo::<C>(&fifo_options.shred_code_cf_size, &options.column_options),
),
}
}
fn new_cf_descriptor_fifo<C: 'static + Column + ColumnName>(
max_cf_size: &u64,
column_options: &LedgerColumnOptions,
) -> ColumnFamilyDescriptor {
if *max_cf_size > FIFO_WRITE_BUFFER_SIZE {
ColumnFamilyDescriptor::new(C::NAME, get_cf_options_fifo::<C>(max_cf_size))
ColumnFamilyDescriptor::new(
C::NAME,
get_cf_options_fifo::<C>(max_cf_size, column_options),
)
} else {
panic!(
"{} cf_size must be greater than write buffer size {} when using ShredStorageType::RocksFifo.",
@@ -1495,7 +2054,10 @@ fn new_cf_descriptor_fifo<C: 'static + Column + ColumnName>(
/// rocksdb will start deleting the oldest SST file when the column family
/// size reaches `max_cf_size` - `FIFO_WRITE_BUFFER_SIZE` to strictly
/// maintain the size limit.
fn get_cf_options_fifo<C: 'static + Column + ColumnName>(max_cf_size: &u64) -> Options {
fn get_cf_options_fifo<C: 'static + Column + ColumnName>(
max_cf_size: &u64,
column_options: &LedgerColumnOptions,
) -> Options {
let mut options = Options::default();
options.set_max_write_buffer_number(8);
@@ -1520,6 +2082,8 @@ fn get_cf_options_fifo<C: 'static + Column + ColumnName>(max_cf_size: &u64) -> O
options.set_compaction_style(DBCompactionStyle::Fifo);
options.set_fifo_compaction_options(&fifo_compact_options);
process_cf_options_advanced::<C>(&mut options, column_options);
options
}
@@ -1576,6 +2140,11 @@ fn should_exclude_from_compaction(cf_name: &str) -> bool {
no_compaction_cfs.get(cf_name).is_some()
}
// Returns true if the column family enables compression.
fn should_enable_compression<C: 'static + Column + ColumnName>() -> bool {
C::NAME == columns::TransactionStatus::NAME
}
#[cfg(test)]
pub mod tests {
use {super::*, crate::blockstore_db::columns::ShredData};

View File

@@ -34,7 +34,7 @@ use {
snapshot_utils,
transaction_batch::TransactionBatch,
transaction_cost_metrics_sender::TransactionCostMetricsSender,
vote_account::VoteAccount,
vote_account::VoteAccountsHashMap,
vote_sender_types::ReplayVoteSender,
},
solana_sdk::{
@@ -181,6 +181,7 @@ fn execute_batch(
transaction_status_sender.is_some(),
transaction_status_sender.is_some(),
transaction_status_sender.is_some(),
transaction_status_sender.is_some(),
timings,
);
@@ -1259,7 +1260,7 @@ fn load_frozen_forks(
new_root_bank.exhaustively_free_unused_resource(*last_full_snapshot_slot);
last_free = Instant::now();
new_root_bank.update_accounts_hash_with_index_option(
snapshot_config.accounts_hash_use_index,
false,
snapshot_config.accounts_hash_debug_verify,
false,
);
@@ -1348,7 +1349,7 @@ fn supermajority_root(roots: &[(Slot, u64)], total_epoch_stake: u64) -> Option<S
fn supermajority_root_from_vote_accounts(
bank_slot: Slot,
total_epoch_stake: u64,
vote_accounts: &HashMap<Pubkey, (/*stake:*/ u64, VoteAccount)>,
vote_accounts: &VoteAccountsHashMap,
) -> Option<Slot> {
let mut roots_stakes: Vec<(Slot, u64)> = vote_accounts
.iter()
@@ -1550,8 +1551,11 @@ pub mod tests {
matches::assert_matches,
rand::{thread_rng, Rng},
solana_entry::entry::{create_ticks, next_entry, next_entry_mut},
solana_runtime::genesis_utils::{
self, create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs,
solana_runtime::{
genesis_utils::{
self, create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs,
},
vote_account::VoteAccount,
},
solana_sdk::{
account::{AccountSharedData, WritableAccount},
@@ -3510,6 +3514,7 @@ pub mod tests {
false,
false,
false,
false,
&mut ExecuteTimings::default(),
);
let (err, signature) = get_first_error(&batch, fee_collection_results).unwrap();
@@ -3797,27 +3802,23 @@ pub mod tests {
#[test]
#[allow(clippy::field_reassign_with_default)]
fn test_supermajority_root_from_vote_accounts() {
let convert_to_vote_accounts =
|roots_stakes: Vec<(Slot, u64)>| -> HashMap<Pubkey, (u64, VoteAccount)> {
roots_stakes
.into_iter()
.map(|(root, stake)| {
let mut vote_state = VoteState::default();
vote_state.root_slot = Some(root);
let mut vote_account = AccountSharedData::new(
1,
VoteState::size_of(),
&solana_vote_program::id(),
);
let versioned = VoteStateVersions::new_current(vote_state);
VoteState::serialize(&versioned, vote_account.data_as_mut_slice()).unwrap();
(
solana_sdk::pubkey::new_rand(),
(stake, VoteAccount::from(vote_account)),
)
})
.collect()
};
let convert_to_vote_accounts = |roots_stakes: Vec<(Slot, u64)>| -> VoteAccountsHashMap {
roots_stakes
.into_iter()
.map(|(root, stake)| {
let mut vote_state = VoteState::default();
vote_state.root_slot = Some(root);
let mut vote_account =
AccountSharedData::new(1, VoteState::size_of(), &solana_vote_program::id());
let versioned = VoteStateVersions::new_current(vote_state);
VoteState::serialize(&versioned, vote_account.data_as_mut_slice()).unwrap();
(
solana_sdk::pubkey::new_rand(),
(stake, VoteAccount::from(vote_account)),
)
})
.collect()
};
let total_stake = 10;
let slot = 100;

View File

@@ -8,6 +8,7 @@ pub mod bigtable_delete;
pub mod bigtable_upload;
pub mod bigtable_upload_service;
pub mod block_error;
mod slot_stats;
#[macro_use]
pub mod blockstore;
pub mod ancestor_iterator;

Some files were not shown because too many files have changed in this diff Show More