Compare commits

...

403 Commits

Author SHA1 Message Date
fc28a8fa26 ci: remove spl downstream build tests 2022-04-16 06:36:47 +00:00
de065d865a Add Ident case (#24390) (#24401)
(cherry picked from commit a0e3e3c193)

Co-authored-by: Tyera Eulberg <tyera@solana.com>
2022-04-15 22:48:26 -06:00
aada9786eb rpc-pubsub: reduce metrics/log spam
(cherry picked from commit 50fba01842)
2022-04-15 22:23:01 -06:00
0269bf0f19 Bump version to 1.9.17 2022-04-15 09:30:21 +00:00
4e5edd8a46 cli: sort option for validators by version (#24236)
(cherry picked from commit 91993d89b0)

# Conflicts:
#	Cargo.lock
#	cli-output/Cargo.toml
#	programs/bpf/Cargo.lock

Co-authored-by: Trent Nelson <trent@solana.com>
2022-04-14 21:58:05 +00:00
2ac42ef4ec Remove duplicate increment (#24219) (#24225)
(cherry picked from commit ff3b6d2b8b)

Co-authored-by: carllin <carl@solana.com>
2022-04-14 13:24:33 -07:00
ad6df47c24 refactor clone 2022-04-14 13:22:52 -07:00
468d9b3933 Bump bpf-tools to v1.25 (#24289)
- Tweak linker script
  Ensure that all read only sections end up in one segment, and
  everything else in other segments. Discard .eh_frame, .hash and
  .gnu.hash since they are unused.
- Don't create invalid string slices in stdout/stderr on Solana
- Report exceeded stack size as a warning if dynamic frames are off
- Native support for signed division in SBF
  Adds BPF_SDIV, which is enabled only for the SBF subtarget.
- Introduce dynamic stack frames and the SBFv2 flag
  Dynamic stack frames  are currently opt-in and enabled setting
  cpu=sbfv2. When sbfv2 is used, ELF files are flagged with
  e_flags=EF_SBF_V2 so the runtime can detect it and react
  accordingly.

(cherry picked from commit 6b611e1c52)

Co-authored-by: Dmitri Makarov <dmakarov@alumni.stanford.edu>
2022-04-13 20:05:22 +00:00
89c0357ec8 Bump sbf-tools version to v1.24
(cherry picked from commit 689064a4f4)
2022-04-12 19:09:44 -07:00
2ae5b411f5 Double the chunk size for sending the program binary data in tx
(cherry picked from commit 03ed334ebb)

# Conflicts:
#	programs/bpf/tests/programs.rs
2022-04-12 19:09:44 -07:00
643ae053ab Bump version to v1.9.16 2022-04-09 03:08:56 +00:00
e02542003d fix merge conflicts 2022-04-08 19:22:26 -05:00
671e9cbac1 Address review comments
(cherry picked from commit a058f348a2)

# Conflicts:
#	runtime/src/cost_tracker.rs
2022-04-08 19:22:26 -05:00
55e64910f7 Unittest for cost tracker after process_and_record_transactions
(cherry picked from commit 2ed29771f2)
2022-04-08 19:22:26 -05:00
9ae13f26e5 Adjustments to cost_tracker updates
- don't store pending tx signatures and costs in CostTracker
- apply tx costs to global state immediately again
- go from commit_or_cancel to update_or_remove, where the cost tracker
  is either updated with the true costs for successful tx, or the costs
  of a retryable tx is removed
- move the function into qos_service and hold the cost tracker lock for
  the whole loop

(cherry picked from commit 924b8ea1eb)

# Conflicts:
#	core/src/qos_service.rs
#	runtime/src/cost_tracker.rs
2022-04-08 19:22:26 -05:00
6cb6b9206f - Only commit successfully executed transactions' cost to cost_tracker;
- In-fly transactions are pended in cost_tracker until being committed
  or cancelled;

(cherry picked from commit 9e07272af8)

# Conflicts:
#	core/src/qos_service.rs
#	runtime/src/cost_model.rs
#	runtime/src/cost_tracker.rs
2022-04-08 19:22:26 -05:00
a225421737 remove 'check_hash' from accounts hash calc (backport #23873) (backport #23902) (#24191)
* disable 'check_hash' on accounts hash calc (#23873) (#23902)

(cherry picked from commit 5a892af2fe)

Co-authored-by: Jeff Washington (jwash) <wash678@gmail.com>
(cherry picked from commit f0c5962817)

# Conflicts:
#	runtime/src/accounts_db.rs

* resolve conflicts

Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
Co-authored-by: Jeff Washington (jwash) <wash678@gmail.com>
2022-04-08 15:44:33 -05:00
7615585464 v1.9: Bump tonic, tonic-build, prost, and etcd-client (#24159)
* Bump tonic, prost, and etcd-client

* Restore doc ignores

* Restore clippy ignore
2022-04-08 10:22:05 -06:00
1489cbf5a0 Note this is a modified backport that does not SAVE the new fields, but does load them. (#24074) (#24078)
Original:
Start saving/loading prior_roots(_with_hash) to snapshot (#23844)

    * Start saving/loading prior_roots(_with_hash) to snapshot

    * Update runtime/src/accounts_index.rs

    Co-authored-by: Michael Vines <mvines@gmail.com>

    * Update runtime/src/accounts_index.rs

    Co-authored-by: Michael Vines <mvines@gmail.com>

    * update comment

    Co-authored-by: Michael Vines <mvines@gmail.com>
    (cherry picked from commit 396b49a7c1)

Co-authored-by: Jeff Washington (jwash) <wash678@gmail.com>
(cherry picked from commit b157a9111f)

Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2022-04-08 09:55:00 -05:00
af3f26ac96 reduces gossip crds stats (#24132) (#24143)
(cherry picked from commit cd09390367)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2022-04-06 18:26:53 +00:00
a812f4410e Specify if archive size datapoint is for full or incremental snapshots (#23941) (#23956)
(cherry picked from commit 31b707b625)

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-03-26 19:08:21 +00:00
aa26edb7fe Bump version to 1.9.15 (#23954) 2022-03-26 11:33:49 -05:00
caa55bb0f2 cli: allow skipping fee-checks when writing program buffers (hidden) 2022-03-25 19:34:26 -06:00
fb5b42cabc Add get_confirmed_blocks_with_data method (backport #23618) (#23939)
* add get_confirmed_blocks_with_data and get_protobuf_or_bincode_cells

(cherry picked from commit f3219fb695)

* appease clippy

(cherry picked from commit 5533e9393c)

* use &[T] instead of Vec<T> where appropriate

clippy

(cherry picked from commit fbcf6a0802)

* modify get_protobuf_or_bincode_cells to accept and return an iterator

(cherry picked from commit f717fda9a3)

* make get_protobuf_or_bincode_cells accept IntoIter on row_keys, make get_confirmed_blocks_with_data return an Iterator

(cherry picked from commit d8be0d9430)

Co-authored-by: Edgar Xi <edgarxi97@gmail.com>
2022-03-26 01:13:32 +00:00
0c01b236c6 ci: don't allow mergify to add automerge label to merged PRs (#23930)
(cherry picked from commit e34c52934c)

Co-authored-by: Trent Nelson <trent@solana.com>
2022-03-25 16:20:04 +00:00
6e02c14055 docs: fix stake state typo (#23776) (#23778)
(cherry picked from commit c556811c0f)

Co-authored-by: g1stavo <gustavocsalvador@gmail.com>
2022-03-22 00:10:40 +00:00
a70bce050d Add ability to get the latest incremental snapshot via RPC (#23788) (#23808)
(cherry picked from commit 739e43ba58)

Co-authored-by: DimAn <diman@diman.io>
2022-03-21 20:07:34 +00:00
edb38c4bbd validator: --only-known-rpc requires a --known-validator ... (#23767)
(cherry picked from commit ce2e82cfb6)

Co-authored-by: Trent Nelson <trent@solana.com>
2022-03-18 08:54:52 +00:00
2e1989bb9b validator: adds --no-incremental-snapshots NOP arg for v1.10 forward compat 2022-03-17 23:50:37 -06:00
08f4b19a21 Add accounts_data_len to bank snapshot (backport #23714) (#23758)
* Add accounts_data_len to bank snapshot (#23714)

(cherry picked from commit 7ff8c80e25)

# Conflicts:
#	runtime/src/serde_snapshot/tests.rs

* fix frozen abi hash

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-03-18 04:44:56 +00:00
da6bb66475 Pin version for publish cargo-check (#23716) (#23721)
(cherry picked from commit 330d6db19a)

Co-authored-by: Tyera Eulberg <tyera@solana.com>
2022-03-17 01:52:19 +00:00
8859f35b26 Bump version to 1.9.14 (#23703)
* Bump version to 1.9.14

* Bump openssl-src
2022-03-16 11:21:18 -06:00
3ac7e043a7 Update InvalidRentPayingAccount error (#23680) (#23693)
(cherry picked from commit 410463fb72)

Co-authored-by: Tyera Eulberg <tyera@solana.com>
2022-03-16 05:25:23 +00:00
db43c5d46d log hash and lamport result of calculate_accounts_hash_without_index (#23425) (#23676)
(cherry picked from commit d909b7c80b)

Co-authored-by: Jeff Washington (jwash) <wash678@gmail.com>
2022-03-15 19:23:13 +00:00
48dc4b3bb2 ledger tool halt at slot verify hash (#23424) (#23677)
(cherry picked from commit ef8b7d9c62)

Co-authored-by: Jeff Washington (jwash) <wash678@gmail.com>
2022-03-15 16:17:29 +00:00
b0543a1ae6 docs: update sysvar docs for load_instruction_at_checked (#22925) (#23673)
* docs: update sysvar docs for load_instruction_at_checked

Update the instruction introspection docs to use the updated load_instruction_at_checked function instead of deprecated load_instruction_at

* Update to load_current_index_checked

(cherry picked from commit 64e2d9dc47)

Co-authored-by: Zayyan Faizal <zayyanf@gmail.com>
2022-03-15 12:51:15 +00:00
8b0576d954 Rename AccountsDb plugins to Geyser plugins (backport #23604) (#23668)
* Rename AccountsDb plugins to Geyser plugins (#23604)

(cherry picked from commit 102dd68a03)

# Conflicts:
#	Cargo.lock
#	Cargo.toml
#	core/Cargo.toml
#	core/src/replay_stage.rs
#	core/src/tvu.rs
#	geyser-plugin-interface/Cargo.toml
#	geyser-plugin-manager/Cargo.toml
#	geyser-plugin-manager/src/geyser_plugin_service.rs
#	geyser-plugin-manager/src/slot_status_notifier.rs
#	validator/src/bin/solana-test-validator.rs
#	validator/src/main.rs

* Fix conflicts

Co-authored-by: Tyera Eulberg <tyera@solana.com>
2022-03-15 05:04:17 +00:00
5c3e1967e6 Add space for keys in calculation for rent exempt in process_set_validator_info (#23485) (#23658)
* Fix `process_set_validator_info`

Add space for keys in calculation for rent exempt in
`process_set_validator_info`.

The space required for allocating the `(ConfigKeys, ValidatorInfo)`
tuple only considered space for `ValidatorInfo`.
But `config_instruction::create_account` also requires space for `n`
keys.

* Remove one clone call from closure

(cherry picked from commit 7eaec26a1c)

Co-authored-by: Enrique Fynn <me@enriquefynn.com>
2022-03-14 23:18:14 +00:00
06ebed861e Bump publish crate timeout 2022-03-13 21:01:15 -07:00
52bd1658cc Disable publish of test crates 2022-03-13 20:48:17 -07:00
07a6b597d0 Update Cargo.toml 2022-03-13 17:22:01 -07:00
857a541ddb Pin histogram crate version to fix cargo publish
(cherry picked from commit 01b6f97f0b)

# Conflicts:
#	ledger-tool/Cargo.toml
2022-03-13 17:22:01 -07:00
87fee49ed7 Add token-balance unit test 2022-03-11 20:58:00 -07:00
3ed915dcc9 Bump version to 1.9.13 (#23614) 2022-03-11 10:09:26 -06:00
3875bc91ab Revert "chore: bump dashmap from 4.0.2 to 5.1.0 (#23372) (#23521)" (#23596)
This reverts commit f56b25ac29.
2022-03-11 09:12:46 +01:00
e0f5fb887b Ensure blocks do not exceed the max accounts data size during Replay Stage (backport #23422) (#23589)
* Ensure blocks do not exceed the max accounts data size during Replay Stage (#23422)

(cherry picked from commit 3c6840050c)

# Conflicts:
#	runtime/src/bank.rs

* fix conflicts

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-03-10 18:59:46 +00:00
49952e05cf Report even if slot begins and ends in process_buffered_packets() (#23549) (#23576)
(cherry picked from commit 588414a776)

Co-authored-by: carllin <carl@solana.com>
2022-03-10 07:59:26 +00:00
4a100fbe3b Bump version to 1.9.12 (#23577) 2022-03-09 23:22:45 -06:00
607a98e9d0 remove persist_cost_table code 2022-03-09 21:06:00 -07:00
86a97563b5 Patch validator from loading persisted program costs 2022-03-09 21:06:00 -07:00
4e5d9885da Revert exponential moving average cost model changes (backport #23541) (#23543)
* Revert "fix tests after merge"

This reverts commit ba2d83f580.

(cherry picked from commit 0a17edcc1f)

* Revert "1. Persist to blockstore less frequently;"

This reverts commit 7aa1fb4e24.

(cherry picked from commit c878c9e2cb)

# Conflicts:
#	core/src/cost_update_service.rs
#	core/src/tvu.rs
#	runtime/src/cost_model.rs

* Revert "use EMA in place of Welford"

This reverts commit 6587dbfa47.

(cherry picked from commit 9acbfa5eb1)

* Revert "- estimate a program cost as 2 standard deviation above mean"

This reverts commit a25ac1c988.

(cherry picked from commit 5a0cd05866)

# Conflicts:
#	core/src/cost_update_service.rs
#	runtime/src/cost_model.rs

* fix merge conflicts

Co-authored-by: Carl Lin <carl@solana.com>
Co-authored-by: Tao Zhu <tao@solana.com>
2022-03-09 03:55:36 +00:00
714cf0eff2 solana-validator set-identity no longer writes a tower file unnecessarily (#23542)
(cherry picked from commit b719d6a2ad)

Co-authored-by: Michael Vines <mvines@gmail.com>
2022-03-09 01:55:14 +00:00
babba3b0ff Fix getting the golden snapshot hashes during bootstrap with Incremental Snapshots (#23518) (#23532)
(cherry picked from commit 9b80452c7c)

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-03-08 23:07:26 +00:00
8c59fa5a73 Update regex to v1.5.5
(cherry picked from commit 536a99705b)
2022-03-08 11:53:30 -08:00
de694402ca Bump version to 1.9.11 2022-03-08 10:53:04 -08:00
f56b25ac29 chore: bump dashmap from 4.0.2 to 5.1.0 (#23372) (#23521)
* chore: bump dashmap from 4.0.2 to 5.1.0

Bumps [dashmap](https://github.com/xacrimon/dashmap) from 4.0.2 to 5.1.0.
- [Release notes](https://github.com/xacrimon/dashmap/releases)
- [Commits](https://github.com/xacrimon/dashmap/commits/v5.1.0)

---
updated-dependencies:
- dependency-name: dashmap
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
(cherry picked from commit 3a0271c113)

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-03-08 14:05:58 +00:00
2137008532 Fix broken doc links in solana-runtime (#23504) (#23515)
(cherry picked from commit ddbf5c782f)

Co-authored-by: Brian Anderson <andersrb@gmail.com>
2022-03-08 01:02:27 +00:00
4a4e560299 Fix incorrect nonoverlapping test in sol_memcpy (backport #21007) (#23512)
* Fix incorrect nonoverlapping test in sol_memcpy (#21007)

Thanks!

(cherry picked from commit df2b448993)

# Conflicts:
#	programs/bpf_loader/src/syscalls.rs
#	sdk/program/src/program_stubs.rs
#	sdk/src/feature_set.rs

* resolve conflicts

Co-authored-by: Brian Anderson <andersrb@gmail.com>
Co-authored-by: Jack May <jack@solana.com>
2022-03-07 23:10:54 +00:00
66f85a0703 Fix backport 2022-03-03 15:12:47 -07:00
3bee925967 Resized accounts must be rent exempt 2022-03-03 10:40:46 -08:00
44109c0cd4 docs: post merge review for #23286 (#23320)
(cherry picked from commit 6a0d2fcfa7)

Co-authored-by: Trent Nelson <trent@solana.com>
2022-03-02 20:11:03 -07:00
83281fe3ff Adds comments related to the public RPC endpoints (#22797) (#23451)
* Adds comments related to the public RPC endpoints

* Update docs/src/cluster/rpc-endpoints.md

Co-authored-by: Michael Vines <mvines@gmail.com>

Co-authored-by: Brian Long <bl@triton.one>
Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Michael Vines <mvines@gmail.com>
(cherry picked from commit 7dbde2247d)

Co-authored-by: Brian Long <brian.long@fmadata.com>
2022-03-02 19:40:05 +00:00
2dd5b76986 Restore solana-test-validator informational output
(cherry picked from commit 9ec514f6c5)
2022-03-02 18:53:33 +01:00
0e6a849fc7 bumps up crds-shards-bits (#23220) (#23412)
The commit adjust CRDS_SHARDS_BITS up to be in-line with mask_bits in
gossip pull request. This will avoid redundant filtering of irrelevant
crds entries when responding to pull requests.

(cherry picked from commit 1282277126)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2022-03-01 18:20:26 +00:00
db9826c93f Improve UX querying rpc for blocks at or before the snapshot from boot (backport #23403) (#23405)
* Improve UX querying rpc for blocks at or before the snapshot from boot (#23403)

* Bump first-available block to first complete block

* Remove obsolete purges in tests (PrimaryIndex toggling no longer in use

* Check first-available block in Rpc check_slot_cleaned_up

(cherry picked from commit 3b5b71ce44)

# Conflicts:
#	ledger/src/blockstore.rs

* Fix conflicts

Co-authored-by: Tyera Eulberg <tyera@solana.com>
2022-03-01 17:13:54 +00:00
46a02b7b4a Allow sub-rent-exempt-minimum transfers to 1nc1nerator (backport #23382) (#23404)
* Allow sub-rent-exempt-minimum transfers to `1nc1nerator` (#23382)

* Add failing test

* Allow small burns to incinerator

* Use check_id method

(cherry picked from commit 19448ba078)

# Conflicts:
#	runtime/src/account_rent_state.rs

* Fix conflicts

Co-authored-by: Tyera Eulberg <tyera@solana.com>
2022-03-01 17:12:47 +00:00
83fb17c77b client: expose creating RpcClients with custom RpcSender impls
(cherry picked from commit 6666f23c01)

# Conflicts:
#	client/src/rpc_sender.rs
2022-03-01 08:52:32 -07:00
e6f6a8a1b4 docs: resolve svgbob binary (#23399)
(cherry picked from commit 5877e38baa)

Co-authored-by: Trent Nelson <trent@solana.com>
2022-03-01 00:58:29 +00:00
6b71570f48 ci: move all formerly-default-queue jobs to solana queue (#23388)
(cherry picked from commit f814c4a082)

# Conflicts:
#	ci/buildkite-pipeline.sh

Co-authored-by: Trent Nelson <trent@solana.com>
2022-02-28 22:28:05 +00:00
34631669a4 Updating known validators in the docs for testnet (#23379)
(cherry picked from commit 22d2a40133)

Co-authored-by: tigarcia <tim.m.garcia@gmail.com>
2022-02-28 17:23:29 +00:00
a400fd4558 Align the solana validators output columns for -ud,-ut, and -um
(cherry picked from commit 87b76aeeb0)
2022-02-27 18:49:15 -08:00
f42f094dd0 Prevent new RentPaying state created by paying fees (backport #23358) (#23360)
* Prevent new RentPaying state created by paying fees (#23358)

* Add failing test

* Check fee-payer rent-state change on load

* Add more test cases

* Review comments

(cherry picked from commit 36484f4f08)

# Conflicts:
#	runtime/src/account_rent_state.rs

* Fix conflicts

Co-authored-by: Tyera Eulberg <tyera@solana.com>
2022-02-26 21:37:06 +00:00
ef9ffffcaa followup safety checks for #23295 (#23340)
(cherry picked from commit 5e0086c1ee)

# Conflicts:
#	runtime/src/builtins.rs

Co-authored-by: Trent Nelson <trent@solana.com>
2022-02-25 08:58:25 +00:00
61fea1d2a7 Update derivation path integer sign specification (#23336) (#23339)
Previously, `ACCOUNT` and `CHANGE` were specified as being positive integers, but since both can assume a value of 0 (as in the given example), they should be specified as nonnegative integers

(cherry picked from commit d1f141484e)

Co-authored-by: alnoki <43892045+alnoki@users.noreply.github.com>
2022-02-24 20:11:12 -07:00
023ab1c427 Update the consumed compute units cost for hashing syscalls
This change prevents zero-cost computation of hash functions on
unbound number of zero-length slices of data.  The cost for each slice
is at least equal to the base cost of a memory operation, but could be
more for longer slices.

(cherry picked from commit 0a3a18744f)
2022-02-24 17:41:22 -08:00
f4f0b64af4 Hack fix for ICE as seen in CI (#23306)
(cherry picked from commit c81dd602c4)

Co-authored-by: Trent Nelson <trent@solana.com>
2022-02-24 01:23:34 +00:00
84c57dd0a8 Bump version to v1.9.10 (#23304) 2022-02-23 11:30:21 -06:00
450404f800 v1.9: Enforce tx metadata upload to bigtable (#23212)
* Enforce tx metadata upload with static types (#23028)

* resolve conflicts

* fix test
2022-02-23 12:03:17 +08:00
8413700a2f docs: clarify spl token account creation handling for exchange integrations (#23288)
(cherry picked from commit 09d064c090)

Co-authored-by: Trent Nelson <trent@solana.com>
2022-02-23 00:47:54 +00:00
215c708599 Fix builtin handling on epoch boundaries (backport #23256) (#23273)
* Fix builtin handling on epoch boundaries (#23256)

(cherry picked from commit bcda74f42f)

# Conflicts:
#	runtime/src/bank.rs
#	runtime/src/genesis_utils.rs

* fix conflicts

Co-authored-by: Justin Starry <justin@solana.com>
2022-02-22 16:15:10 +00:00
c02c73fa5f Bump rbpf to v0.2.24 (#23263) (#23266)
(cherry picked from commit d0d256ee9a)

Co-authored-by: Alexander Meißner <AlexanderMeissner@gmx.net>
2022-02-22 11:46:05 +00:00
68d846c7a9 Revert "Add simulation detection countermeasure (backport #22880) (#23143)" (#23262)
This reverts commit 0fdbec9735.
2022-02-21 13:38:40 -07:00
08d6b9850d Bump version to 1.9.9 (#23247) 2022-02-19 21:17:11 -06:00
4ebeb33602 Skip adding builtins if they will be removed (backport #23233) (#23241)
* Skip adding builtins if they will be removed (#23233)

* Add failing test for precompile transition

* Skip adding builtins if they will be removed

* cargo clean

* nits

* fix abi check

* remove workaround

Co-authored-by: Jack May <jack@solana.com>
(cherry picked from commit 1719d2349f)

# Conflicts:
#	runtime/src/bank.rs
#	runtime/src/builtins.rs

* resolve conflicts

Co-authored-by: Justin Starry <justin@solana.com>
Co-authored-by: Jack May <jack@solana.com>
2022-02-19 08:28:30 +00:00
1f4ad0d1e8 Precompiles owned by the native loader (#23237) (#23240)
(cherry picked from commit 970f543ef6)

Co-authored-by: Jack May <jack@solana.com>
2022-02-19 02:45:15 +00:00
b2b92d7f5c Add --locked to spl-token-cli install (#23223) (#23225)
(cherry picked from commit c696944d36)

Co-authored-by: Will Hickey <will.hickey@solana.com>
2022-02-18 05:12:51 +00:00
02f8651a9c Fix the flaky test test_restart_tower_rollback (backport #23129) (#23155)
* Fix the flaky test test_restart_tower_rollback (#23129)

* Add flag to disable voting until a slot to avoid duplicate voting

* Fix the tower rollback test and remove it from flaky.

(cherry picked from commit ab92578b02)

* Resolve conflicts

Co-authored-by: Ashwin Sekar <ashwin@solana.com>
2022-02-17 20:31:27 +00:00
0fdbec9735 Add simulation detection countermeasure (backport #22880) (#23143)
* Add simulation detection countermeasure (#22880)

* Add simulation detection countermeasures

* Add program and test using TestValidator

* Remove incinerator deposit

* Remove incinerator

* Update Cargo.lock

* Add more features to simulation bank

* Update Cargo.lock per rebase

Co-authored-by: Jon Cinque <jon.cinque@gmail.com>
(cherry picked from commit c42b80f099)

# Conflicts:
#	programs/bpf/Cargo.lock
#	programs/bpf/Cargo.toml

* Update Cargo.lock

Co-authored-by: Michael Vines <mvines@gmail.com>
Co-authored-by: Jon Cinque <jon.cinque@gmail.com>
2022-02-17 14:45:24 +00:00
f629c71849 Add slot-based timing metrics (backport #23097) (#23210)
* Add execute timings (#23097)

(cherry picked from commit 619335df1a)

# Conflicts:
#	core/src/banking_stage.rs

* resolve conflicts

Co-authored-by: carllin <carl@solana.com>
2022-02-17 10:57:50 +00:00
43e562142f docs: remove wallet ads (#23208)
(cherry picked from commit fa680a35ea)

Co-authored-by: Trent Nelson <trent@solana.com>
2022-02-17 05:54:08 +00:00
c3098e99d1 Bump version to v1.9.8 2022-02-16 21:42:57 -07:00
421ad42b12 Fix flaky optimistic confirmation tests (backport #23178) (#23200)
* Fix flaky optimistic confirmation tests (#23178)

(cherry picked from commit bca1d51735)

# Conflicts:
#	local-cluster/tests/local_cluster.rs
#	local-cluster/tests/local_cluster_flakey.rs

* Resolve conflicts

Co-authored-by: carllin <carl@solana.com>
2022-02-17 03:18:24 +00:00
08cc140d4a accounts_index: Add SPL Token account indexing for token-2022 accounts (#23043) (#23203)
(cherry picked from commit a102453bae)

Co-authored-by: Michael Vines <mvines@gmail.com>
2022-02-17 02:20:38 +00:00
2120ef5808 Fix ed25519 builtin program handling (backport #23182) (#23195)
* Fix ed25519 builtin program handling (#23182)

* Fix ed25519 builtin program handling

* Fix tests

* Add integration tests for processing transactions with ed25519 ixs

* Fix another test

* fix formatting

(cherry picked from commit 813725dfec)

* fix tests

Co-authored-by: Justin Starry <justin@solana.com>
Co-authored-by: Jack May <jack@solana.com>
2022-02-17 00:44:44 +00:00
c08af09aaa Removed solana-accountsdb-plugin-postgres from the monorepo as it has its own (#22567) (#23202)
Removed solana-accountsdb-plugin-postgres from the monorepo as it has its own  standalone repo now
2022-02-16 15:52:59 -08:00
8b12749f02 forward_buffered_packets return packet count in error path (#23167) (#23187)
(cherry picked from commit 115d71536b)

Co-authored-by: Jeff Biseda <jbiseda@gmail.com>
2022-02-16 13:01:16 -08:00
e343a17ce9 Update ping to transfer to self, with rotating amount (#22657) (#22675)
* Update ping to transfer to self, with rotating amount

* Remove balance check

(cherry picked from commit 90689585ef)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2022-02-16 12:52:28 -07:00
3fd78ac6ea shrink batches when over 80% of the space is wasted (backport #23066) (#23189)
* shrink batches when over 80% of the space is wasted (#23066)

* shrink batches when over 80% of the space is wasted

(cherry picked from commit 83d31c9e65)

# Conflicts:
#	core/benches/sigverify_stage.rs
#	core/src/sigverify_stage.rs
#	perf/src/sigverify.rs

* fixup!

Co-authored-by: anatoly yakovenko <anatoly@solana.com>
2022-02-16 19:30:16 +00:00
41bbc11a46 flag end-of-slot when poh bank is gone (backport #23069) (#23174)
* flag end-of-slot when poh bank is gone

(cherry picked from commit 03bf66a51b)

# Conflicts:
#	core/src/banking_stage.rs

* merge fix

Co-authored-by: Tao Zhu <tao@solana.com>
2022-02-16 18:08:03 +00:00
68934353f2 Typo fix (#23152) (#23153)
Fixed a type in the documentation.

(cherry picked from commit bb50259956)

Co-authored-by: Jerry <jerskisnow@protonmail.com>
2022-02-16 10:53:30 -07:00
92543a3f92 fix typo in docs (#22690) (#22691)
(cherry picked from commit 2b111cd631)

Co-authored-by: Steve James <0x2t1ff@gmail.com>
2022-02-16 10:50:45 -07:00
a514aff819 Update jsonrpc-api.md (#23190) (#23192)
(cherry picked from commit aaf657297f)

Co-authored-by: gagliardetto <gagliardetto@users.noreply.github.com>
2022-02-16 17:24:23 +00:00
8d8525e4fc Allow cli users to authorize Staker signed by Withdrawer (#23146) (#23176)
(cherry picked from commit 88b66ae3a8)

Co-authored-by: Tyera Eulberg <tyera@solana.com>
2022-02-16 10:16:55 -07:00
2c1cec4e2c validator: invert vote account sanity check arg 2022-02-16 08:31:43 +00:00
7d0a0a26bb rpc: genericize client constructors 2022-02-16 08:31:43 +00:00
73016d3ed2 rpc: make getGenesisHash part of minimal api 2022-02-16 08:31:43 +00:00
9b1cb5c1b7 test-validator: use JsonRpcConfig::default_for_test for consistency 2022-02-16 08:31:43 +00:00
94f4748a34 Generate full snapshots 4x faster to keep incremental snapshots nice and small
(cherry picked from commit 577fa4ec0c)
2022-02-15 21:22:23 -08:00
8963724ed6 solana-validator set-identity now supports the --require-tower flag 2022-02-15 21:09:44 -08:00
65df58c64a Update deprecated methods and recommend getBlocksWithLimit (#23127)
(cherry picked from commit d2a407a9a7)
2022-02-15 18:01:37 -08:00
380c5da2d0 Add --skip-new-snapshot-check to exit and wait-for-restart-window subcommands
(cherry picked from commit 527f62c744)
2022-02-15 18:01:05 -08:00
7d488a6ed8 Remove references to instruction parsing in SPL Token -> Depositing section (#23161) (#23163)
(cherry picked from commit 917113914d)

Co-authored-by: Tyera Eulberg <tyera@solana.com>
2022-02-16 00:12:10 +00:00
159cfdae25 solana-validator monitor now reports identity changes (#23156)
(cherry picked from commit b44f40ee3a)

Co-authored-by: Michael Vines <mvines@gmail.com>
2022-02-15 22:48:17 +00:00
1c3d09ed21 solana-validator wait-for-restart-window --min-idle-time X now works 2022-02-15 08:58:53 -08:00
2c8cfdb3f3 Add fees to tx-wide caps (backport #22081) (#23095)
* Add fees to tx-wide caps (#22081)

(cherry picked from commit 3d9874b95a)

# Conflicts:
#	runtime/src/bank.rs

* resolve

Co-authored-by: Jack May <jack@solana.com>
2022-02-15 01:36:02 +00:00
85570ac207 Add error message for readlink -f failure (#23102) (#23121)
* Add error message for readlink -f failure

(cherry picked from commit 89f5145f64)

Co-authored-by: Will Hickey <will.hickey@solana.com>
2022-02-14 20:23:26 +00:00
054b95cbe1 docs: fix broken link for "transaction-id" (#22682) (#22683)
(cherry picked from commit a300e2d2dc)

Co-authored-by: Radu Pașparugă <radupasparuga25@gmail.com>
2022-02-14 22:21:29 +08:00
b67a5bb3b9 fix typo (#23107) (#23108)
(cherry picked from commit 22a2a4252a)

Co-authored-by: thepalmtrees <96289385+thepalmtrees@users.noreply.github.com>
2022-02-13 16:15:53 +00:00
3e3fb4e296 Introduce slot-specific packet metrics (backport #22906) (#23077)
* Introduce slot-specific packet metrics (#22906)

(cherry picked from commit 2f9e30a1f7)

# Conflicts:
#	core/benches/banking_stage.rs
#	core/src/banking_stage.rs
#	core/src/qos_service.rs

* Resolve conflicdts

Co-authored-by: carllin <carl@solana.com>
2022-02-13 05:44:38 +00:00
f66d8551e9 Update minimum port range due to addition of QUIC port 2022-02-12 08:49:48 -08:00
a5cb10666c Bump QUIC_PORT_OFFSET to 6 to avoid jostling around other ports (#23096)
(cherry picked from commit 817f47d970)

Co-authored-by: Michael Vines <mvines@gmail.com>
2022-02-12 02:36:04 +00:00
76384758d8 adds validator version to set_panic_hook (#23082) (#23088)
(cherry picked from commit 78089941ff)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2022-02-12 02:11:14 +00:00
4eca26ae50 Document message APIs (backport #22873) (#23091)
* Document message APIs (#22873)

* Document message APIs

* Ignore clippy

* Update sdk/program/src/message/mod.rs

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

* Fix new_with_blockhash example

* Rename nonce_account_address in example

Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>
(cherry picked from commit f7753ce85f)

# Conflicts:
#	sdk/program/src/message/mod.rs

* Fix conflict

Co-authored-by: Brian Anderson <andersrb@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2022-02-11 21:09:14 +00:00
2d144afec5 Bump version to 1.9.6 (#23092) 2022-02-11 15:00:06 -06:00
781609b27a uses sendmmsg in streamer (backport #23062) (#23080)
* uses sendmmsg in streamer (#23062)

packet::send_to sends packets one by one:
https://github.com/solana-labs/solana/blob/9213fcb11/streamer/src/packet.rs#L63-L75

sendmmsg uses a single system call for multiple messages:
https://github.com/solana-labs/solana/blob/9213fcb11/streamer/src/sendmmsg.rs#L94

(cherry picked from commit c078ca3fb3)

# Conflicts:
#	streamer/src/streamer.rs

* removes mergify merge conflicts

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2022-02-11 15:47:45 +00:00
5a5244ecf8 Add --full-rpc-api to run.sh (#23072) (#23079)
(cherry picked from commit 34443a238e)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2022-02-11 13:11:30 +00:00
2e60f95ab9 mention staking reward in getInflationReward doc (#23073) (#23074)
(cherry picked from commit 4bd6a231d2)

Co-authored-by: Anton <62949848+icepaq@users.noreply.github.com>
2022-02-11 04:21:29 +00:00
55179524bd Add deactivate-feature feature to test validator cli (#23041) (#23065)
Co-authored-by: Charlie You <charlie.you@hey.com>
2022-02-10 19:18:20 -08:00
4a0785ddcd Disable features programmatically in TestValidatorGenesis (#22860) (#23064)
* Supported starting test-validator and disabling features

* Enable starting test validator and removing feature accounts

* Enable deactivating feature accounts

* Enable deactivating feature accounts - updates per PR comments

* Enable deactivating feature accounts - updates per PR comments

* Added more verbosity when key for deactition is either not a Feature or not in genesis_config accounts

(cherry picked from commit 3c65fd7ba3)

Co-authored-by: Frank V. Castellucci <5435165+FrankC01@users.noreply.github.com>
2022-02-10 14:58:51 -08:00
4698fbc036 Move cap_accounts_data_len feature gate only around new error (#23048) (#23057)
(cherry picked from commit 0a1ab945bc)

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-02-10 20:41:15 +00:00
70f76b450e Add sbf-tools version to cargo target cache name on CI agents (#23027)
(cherry picked from commit c7aa7fb66b)

Co-authored-by: Dmitri Makarov <dmakarov@alumni.stanford.edu>
2022-02-09 22:24:04 +00:00
d64eebb799 estimate a program cost as 2 standard deviation above mean (backport #22286) (#23019)
* - estimate a program cost as 2 standard deviation above mean
- replaced get_average / get_mode with get_default to assign max units to unknown program

(cherry picked from commit a25ac1c988)

# Conflicts:
#	runtime/src/cost_model.rs

* use EMA in place of Welford

(cherry picked from commit 6587dbfa47)

* 1. Persist to blockstore less frequently;
2. reduce alpha for EMA to 1 percent to have roughly 200 data points for estimatio

(cherry picked from commit 7aa1fb4e24)

# Conflicts:
#	core/src/cost_update_service.rs
#	core/src/tvu.rs
#	runtime/src/cost_model.rs

* fix tests after merge

(cherry picked from commit ba2d83f580)

* fix merge

Co-authored-by: Tao Zhu <tao@solana.com>
2022-02-09 22:16:55 +00:00
71211e0d90 rebase 2022-02-09 10:44:09 -08:00
320fbd63c5 Prepare RPC subsystem for multiple SPL Token program ids
(cherry picked from commit 86d465c531)

# Conflicts:
#	rpc/src/rpc.rs
#	transaction-status/src/parse_instruction.rs
#	transaction-status/src/token_balances.rs
2022-02-09 10:44:09 -08:00
0fe00bab7d Return the accounts data len delta after processing messages (#22986) (#23023)
(cherry picked from commit 869cfc9a1c)

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-02-09 03:06:57 +00:00
00630d9c1b monitor: Remove getMaxRetransmitSlot RPC method usage
(cherry picked from commit dcd4ea9111)
2022-02-08 11:06:12 -08:00
d05b5b0902 Add get_processed_sibling_instruction syscall (#22859) (#22956) 2022-02-08 09:21:11 -08:00
5c69af607d Put accounts data len updates behind feature gate (#22918) (#23007)
(cherry picked from commit f0f4042680)

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-02-08 16:45:22 +00:00
df16a37ab5 bench should update leader schedule cache (#22991) (#22998)
(cherry picked from commit e52e48076e)

Co-authored-by: Tao Zhu <82401714+taozhu-chicago@users.noreply.github.com>
2022-02-08 15:05:57 +00:00
432eafd730 Search for consecutive ports (#22979) (#22984)
(cherry picked from commit 514aab46d9)

Co-authored-by: sakridge <sakridge@gmail.com>
2022-02-07 18:45:02 +00:00
41142a7d76 Optimize batching of transactions during replay for parallel processing (backport #22917) (#22982)
* Optimize batching of transactions during replay for parallel processing

(cherry picked from commit 4de14e530b)

* fix build

(cherry picked from commit dfef68f985)

* updates to address review feedback

(cherry picked from commit c5d8560cdb)

* suppress clippy

(cherry picked from commit a146f2d853)

Co-authored-by: Pankaj Garg <pankaj@solana.com>
2022-02-07 18:25:33 +00:00
8047601a7b Fix typo (#22973)
Fix typo

(cherry picked from commit eaf2df99c6)

Co-authored-by: wil-se <sebastiani.1753672@studenti.uniroma1.it>
2022-02-06 16:46:15 +00:00
85856a73aa Implement json output for solana ping (backport #22959) (#22968)
* Implement json output for solana ping (#22959)

(cherry picked from commit d2c89213ff)

# Conflicts:
#	cli/src/cluster_query.rs

* Fix conflicts

Co-authored-by: Tyera Eulberg <tyera@solana.com>
2022-02-05 23:42:33 +00:00
c3890ada8e Bumps solana_rbpf to version v0.2.23 (#22954) (#22961)
(cherry picked from commit e05cf4bf97)

Co-authored-by: Alexander Meißner <AlexanderMeissner@gmx.net>
2022-02-05 13:08:29 +00:00
ceb253ce90 Bumps solana_rbpf to version v0.2.22 (#22923) (#22955)
* Bumps solana_rbpf to v0.2.22

* Adjusts vm::Config and feature gates.

(cherry picked from commit 96c88d1a5e)

Co-authored-by: Alexander Meißner <AlexanderMeissner@gmx.net>
2022-02-05 11:16:22 +00:00
dd6c365bd9 Resolve conflicts (#22905)
Co-authored-by: carllin <carl@solana.com>
2022-02-05 06:47:18 +00:00
9ea025315e removes VoteTracker::new in favor of VoteTracker::default (#22941) (#22946)
VoteTracker::new does not need a bank and is so redundant:
https://github.com/solana-labs/solana/blob/5a230f418/core/src/cluster_info_vote_listener.rs#L103-L107
(cherry picked from commit 27aaf9df85)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2022-02-04 20:59:01 +00:00
c43cef79b5 Add quic port for accepting transactions (#22753) (#22937)
using quinn library

streamer: Sign TLS cert with validator identity key

Handle multiple incoming chunks

(cherry picked from commit 5a230f418d)

Co-authored-by: sakridge <sakridge@gmail.com>
2022-02-04 20:53:27 +00:00
2605724aa3 Bump bpf-tools to v1.23 (#22929)
(cherry picked from commit a9d9a5095b)

Co-authored-by: Dmitri Makarov <dmakarov@alumni.stanford.edu>
2022-02-04 04:14:25 +00:00
539f303eb7 Handle accounts data size changes due to rent-collected accounts (#22412) (#22919)
Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-02-04 01:13:12 +00:00
f7091811d4 Allow buffered packets be consumed if bank is active, regardless leader schedule 2022-02-03 16:56:27 -06:00
15ef1827bf push live packets straight to buffer, leader only process packets from buffer 2022-02-03 16:56:27 -06:00
85fef67213 Refactor Rent::due() with RentDue enum (#22346) (#22921)
(cherry picked from commit d90d5ee9b6)

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-02-03 22:15:54 +00:00
90a70d9b5b Use lazy_rent_collection directly (#22410) (#22920)
(cherry picked from commit 9bc2592da1)

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-02-03 22:07:28 +00:00
643442e830 Reject close of active vote accounts (backport #22651) (#22896)
* Reject close of active vote accounts (#22651)

* 10461 Reject close of vote accounts unless it earned no credits in the previous epoch. This is checked by comparing current epoch (from clock sysvar) with the most recent epoch with credits in vote state.

(cherry picked from commit 75563f6c7b)

# Conflicts:
#	programs/vote/src/vote_processor.rs
#	sdk/src/feature_set.rs

* Resolve merge conflicts

Co-authored-by: Will Hickey <csu_hickey@yahoo.com>
Co-authored-by: Will Hickey <will.hickey@solana.com>
2022-02-03 19:59:07 +00:00
69e207ca58 rpc: use minimal mode by default (backport #22734) (#22879)
* rpc: use minimal mode by default

(cherry picked from commit eac4a6df68)

# Conflicts:
#	local-cluster/tests/local_cluster.rs

* test-validator-bin: reinstate full rpc method set

Co-authored-by: Trent Nelson <trent@solana.com>
2022-02-03 08:25:49 +00:00
fb8db79e63 adds reverse lookup index to cluster-nodes (#22892) (#22894)
retransmit has to exclude slot leader from set of nodes for each shred;
which currently requires a linear scan:
https://github.com/solana-labs/solana/blob/e3b137066/core/src/cluster_nodes.rs#L238-L242

This commit adds a reverse lookup index to avoid linear scan.

(cherry picked from commit dccbddad80)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2022-02-02 21:15:51 +00:00
237347847b caches WeightedShuffle struct in ClusterNodes (#22877) (#22889)
Instead of reconstructing WeightedShuffle struct for each shred
broadcast or retransmit, we can use the same struct with minimal
mutations.

(cherry picked from commit e3b137066d)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2022-02-02 16:54:57 +00:00
4706790c20 docs-ci: prebuild cli bin with output to appease TravisCI hang check (#22884)
(cherry picked from commit 2fda90e414)

Co-authored-by: Trent Nelson <trent@solana.com>
2022-02-02 09:00:48 +00:00
04281734e5 Cleanup serde snapshot common.rs (#22854) (#22863)
Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-02-02 00:35:50 +00:00
a98ca9037d More serde snapshot cleanup (backport #22449) (#22872)
* More serde snapshot cleanup (#22449)

(cherry picked from commit 2756abce39)

# Conflicts:
#	runtime/src/serde_snapshot.rs
#	runtime/src/serde_snapshot/newer.rs

* fixup

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-02-01 22:25:04 +00:00
12e40a40f5 Cleanup serde snapshot's "future" to "newer" (backport #22431) (#22870)
* Refactor serde snapshot's "future" to "newer" (#22431)

(cherry picked from commit 9c3144e286)

# Conflicts:
#	runtime/src/serde_snapshot.rs

* fixup conflicts

* fixup remove unused use

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-02-01 19:03:02 +00:00
c715bc93cf removes Rng field from WeightedShuffle struct (#22850) (#22868)
(cherry picked from commit 45e09664b8)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2022-02-01 17:33:52 +00:00
3aa3cd8852 Clean up before credits_auto_rewind (#22839) (#22866)
* Clean up before credits_auto_rewind

* Use `=` intead of `|=` for mutable bool

(cherry picked from commit 545c97f903)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2022-02-01 15:22:33 +00:00
f83cb74509 rpc-sts: dedupe before initial send (#22856)
(cherry picked from commit 9f1f7aff2b)

Co-authored-by: Trent Nelson <trent@solana.com>
2022-02-01 01:54:54 +00:00
6c47a98945 Small punctuation fix (#22838) (#22849)
(cherry picked from commit 29bf1e2529)

Co-authored-by: Justin Kat <601027+Jkat@users.noreply.github.com>
2022-01-31 18:48:16 +00:00
4dfbb4347c includes zero weighted entries in WeightedShuffle (#22829) (#22847)
Current WeightedShuffle implementation excludes zero weighted entries
from the shuffle:
https://github.com/solana-labs/solana/blob/13e631dcf/gossip/src/weighted_shuffle.rs#L29-L30

Though mathematically this might make more sense, for our use-cases
(turbine specifically), this results in less efficient code:
https://github.com/solana-labs/solana/blob/13e631dcf/core/src/cluster_nodes.rs#L409-L430

This commit changes the implementation so that zero weighted indices are
also included in the shuffle but appear only at the end after non-zero
weighted indices.

(cherry picked from commit 604ca9316c)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2022-01-31 18:16:40 +00:00
28fc733894 add 'ticks-per-slot' to 'solana-test-validator' (#22701)
* add 'ticks-per-slot' to 'solana-test-validator'

* add input parser validator for "ticks-per-slot" argument

* fix fmt

(cherry picked from commit 0562426661)
2022-01-28 20:36:35 -08:00
93b44d8a4c Add new_from_parent() timings (#22744) (#22806)
(cherry picked from commit 94a5aee484)

Co-authored-by: carllin <carl@solana.com>
2022-01-28 03:10:53 +00:00
2804204f80 Adds TEST_DUPLICATE_PRIVILEGE_ESCALATION_SIGNER and TEST_DUPLICATE_PRIVILEGE_ESCALATION_WRITABLE. (#22790) 2022-01-28 00:52:09 +01:00
4d891043d1 Update syscall base costs 2022-01-27 13:36:16 -08:00
74498650bc Always contact release.solana.com over https (#22795)
(cherry picked from commit bd86459a94)

Co-authored-by: Michael Vines <mvines@gmail.com>
2022-01-27 21:30:37 +00:00
af3b307734 solana-test-validator now supports the --rpc-pubsub-enable-vote-subscription flag
(cherry picked from commit 75658e2a96)
2022-01-27 11:19:26 -08:00
2368e09d89 Add vote account address to vote subscription
(cherry picked from commit 331b953551)

# Conflicts:
#	core/src/cluster_info_vote_listener.rs
#	rpc/src/rpc_pubsub.rs
#	rpc/src/rpc_subscriptions.rs
2022-01-27 11:19:26 -08:00
6fca541847 Restrict the Mergify copy command to core contributors (#22792)
(cherry picked from commit c0638439be)

Co-authored-by: Michael Vines <mvines@gmail.com>
2022-01-27 17:10:07 +00:00
15e9cedc0d test_ed25519 fails if we randomly select index 1 (#22780)
(cherry picked from commit c1b543c74d)

Co-authored-by: Sean Young <sean@mess.org>
2022-01-27 12:50:01 +00:00
d68a40396c Improve poh recorder metrics (#22730) (#22764)
* Improve poh recorder metrics

* Add metric for poh service send record

* feedback

* clean up

(cherry picked from commit 115b488807)

Co-authored-by: Justin Starry <justin@solana.com>
2022-01-27 08:56:41 +00:00
b0e0410003 Set the correct root in block commitment cache initialization (#22750) (#22757)
* Set the correct root in block commitment cache initialization

* clean up test

* bump

(cherry picked from commit d9c259a231)

Co-authored-by: Justin Starry <justin@solana.com>
2022-01-27 03:44:59 +00:00
d1174f677e Perf: Reduce write locks on blockhash queue (#22729) (#22751)
* Perf: Reduce write locks on blockhash queue

* Add comment about thread safety

* Add comment about write starvation

(cherry picked from commit 071e97053f)

Co-authored-by: Justin Starry <justin@solana.com>
2022-01-26 10:03:50 +00:00
cf88542254 Update vote-signing.md to remove references to anachronistic behavior (#22742)
(cherry picked from commit 8b1cde83c1)

Co-authored-by: Bryan Ischo <bryan@ischo.com>
2022-01-25 23:53:46 +00:00
99c55dbec3 Export BanksClientError (#22715) (#22732)
(cherry picked from commit f366e0f890)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2022-01-25 19:04:45 +00:00
bc412d51d6 Fix stable-bpf job by referencing Signature directly (#22721) 2022-01-25 02:41:36 +00:00
87c3e71bb8 spl-associated-token-account: Add feature for new program (#22648) (#22719)
* spl-associated-token-account: Add feature for new program

* Address feedback

(cherry picked from commit fc21af4e6e)

Co-authored-by: Jon Cinque <jon.cinque@gmail.com>
2022-01-24 19:20:30 -07:00
d0cf5bb721 Bump thread_local (#22711) (#22714)
(cherry picked from commit 1c10677f82)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2022-01-24 12:10:10 -07:00
fb54991901 Fix typos (#22700)
Fix typos

(cherry picked from commit fd0f5e4d12)

Co-authored-by: tanliwei <tanliwei@users.noreply.github.com>
2022-01-24 03:23:40 +00:00
9995a54be7 fix: flag was incorrect in doc (#22698)
(cherry picked from commit 714a344937)

Co-authored-by: Arash <arash@backbone.link>
2022-01-23 21:24:58 +00:00
d9a5f714e1 Refactor: Rename variables and helper method to PohRecorder (#22676) (#22688)
* Refactor: Rename leader_first_tick_height field

* Refactor: add `PohRecorder::slot_for_tick_height` helper

* Refactor: Add type for poh leader status

(cherry picked from commit 1240217a73)

Co-authored-by: Justin Starry <justin@solana.com>
2022-01-23 15:34:44 +08:00
620a80b581 sigverify -- dedupe bloom filter too slow followups 2022-01-21 23:59:41 -07:00
b354dae249 Perf: Only check executors cache for executable bpf program ids (backport #22624) (#22629)
* Perf: Only check executors cache for executable bpf program ids (#22624)

* Only check executors cache for executable bpf program ids

* switch to native loader check

* clean up tests

* fix tests

* clippy

(cherry picked from commit 7d34a7acac)

# Conflicts:
#	runtime/src/bank.rs

* resolve conflicts

Co-authored-by: Justin Starry <justin@solana.com>
2022-01-22 14:08:15 +08:00
af7ed83285 Document transaction module (backport #22440) (#22664)
* Document transaction module (#22440)

* Document transaction module

* example_mocks is only for feature = full

(cherry picked from commit 8dd62854fa)

# Conflicts:
#	sdk/src/transaction/mod.rs

* Fix conflicts

Co-authored-by: Brian Anderson <andersrb@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2022-01-21 22:58:44 -07:00
8bc4cc90d2 Bump version to 1.9.6 2022-01-21 20:15:43 -07:00
39a4cc95dc v1.9: Impl get_/set_return_data syscalls for ProgramTest (#22652)
* Remove &mut self from set_return_data

* Impl get_/set_return_data for program-test SyscallStubs

* Add return_data program-test
2022-01-21 18:03:27 -07:00
187ed6a387 Remove unused fields from Bank (backport #22491) (#22630)
* Remove unused fields from Bank (#22491)

(cherry picked from commit 9977396d8f)

# Conflicts:
#	runtime/src/serde_snapshot/future.rs

* fixup the backport

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-01-22 00:13:32 +00:00
91bc44931f Add hidden cli option to allow validator reports replayed transaction cost metrics (backport #22369) (#22519)
* Add hidden cli option to allow validator reports replayed transaction cost metrics (#22369)

* add hidden cli option to allow validator reports replayed transaction cost detail metrics

* Update validator/src/main.rs

Co-authored-by: Michael Vines <mvines@gmail.com>

* - rebase master, using unbounded instead of channel; dowgrade to datapoint_trace

* removed cli arg, prefer log at trace

Co-authored-by: Michael Vines <mvines@gmail.com>
(cherry picked from commit a724fa2347)

# Conflicts:
#	core/src/tvu.rs

* fix conflict

Co-authored-by: Tao Zhu <82401714+taozhu-chicago@users.noreply.github.com>
Co-authored-by: Tao Zhu <tao@solana.com>
2022-01-21 15:05:41 -07:00
35ca3182ba Add estimated and actual block cost units metrics (backport #22326) (#22517)
* Add estimated and actual block cost units metrics (#22326)

* - report cost details for transactions selected to be packed into block;
- report estimated execution units packed into block, and actual units and time after execution

* revert reporting per-transaction details

* rollup transaction cost details (eg signature cost, wirte lock, data cost and execution costs) into block stats

* change naming from units to cu, use struct to replace tuple

(cherry picked from commit 1309a9cea0)

# Conflicts:
#	core/src/banking_stage.rs
#	core/src/qos_service.rs

* fix conflicts

Co-authored-by: Tao Zhu <82401714+taozhu-chicago@users.noreply.github.com>
Co-authored-by: Tao Zhu <tao@solana.com>
2022-01-21 15:05:19 -07:00
24345d8e63 Update introduction.md (#22623) (#22625)
A few fixes for grammatical and spelling issues.

(cherry picked from commit 373f200ab8)

Co-authored-by: filip <44206832+filipkujawa@users.noreply.github.com>
2022-01-21 13:31:47 -07:00
bf45f5b88e Faster dedup v1.9 (#22638)
Faster dedup port of #22607
2022-01-21 11:21:28 -08:00
2ddb5b27c1 Refactor: move instructions sysvar serialization out of Message (#22544) (#22595)
(cherry picked from commit 7ba57e7a7c)

Co-authored-by: Justin Starry <justin@solana.com>
2022-01-21 13:45:47 +08:00
7f10fd6a21 Refactor: move compute budget runtime logic into solana-program-runtime (backport #22543) (#22545)
* Refactor: move compute budget runtime logic into solana-program-runtime (#22543)

(cherry picked from commit cc76a73c49)

# Conflicts:
#	programs/bpf/tests/programs.rs
#	sdk/src/compute_budget.rs

* resolve conflicts

Co-authored-by: Justin Starry <justin@solana.com>
2022-01-21 13:45:01 +08:00
a0a881594a Speed up packet dedup and fix benches (#22592) (#22613)
* Speed up packet dedup and fix benches

* fix tests

* allow int arithmetic in bench

(cherry picked from commit a2d251ce1e)

Co-authored-by: Justin Starry <justin@solana.com>
2022-01-20 22:51:58 +00:00
e9e35fd7bd system-monitor-service: support percentages from bigger numbers (#22598)
(cherry picked from commit cca3dbc76d)

Co-authored-by: Trent Nelson <trent@solana.com>
2022-01-20 11:37:52 +00:00
66b94b86a9 banking-stage: remove unused stats fields 2022-01-20 06:09:10 +00:00
59f406d78a Refactor: move simple vote parsing to runtime (backport #22537) (#22587)
* Refactor: move simple vote parsing to runtime (#22537)

(cherry picked from commit 7f20c6149e)

# Conflicts:
#	core/src/cluster_info_vote_listener.rs
#	core/src/verified_vote_packets.rs
#	programs/vote/src/vote_transaction.rs
#	rpc/src/rpc_subscriptions.rs
#	runtime/src/bank.rs
#	runtime/src/bank_utils.rs
#	runtime/src/vote_sender_types.rs

* resolve conflicts

Co-authored-by: Justin Starry <justin@solana.com>
2022-01-20 04:51:50 +00:00
dbf9a32883 Optimize packet dedup (#22571) (#22585)
* Use bloom filter to dedup packets

* dedup first

* Update bloom/src/bloom.rs

Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>

* Update core/src/sigverify_stage.rs

Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>

* Update core/src/sigverify_stage.rs

Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>

* Update core/src/sigverify_stage.rs

Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>

* fixup

* fixup

* fixup

Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>
(cherry picked from commit d343713f61)

# Conflicts:
#	Cargo.lock
#	core/Cargo.toml
#	core/src/banking_stage.rs
#	core/src/sigverify_stage.rs
#	gossip/Cargo.toml
#	perf/Cargo.toml
#	programs/bpf/Cargo.lock
#	runtime/Cargo.toml

Co-authored-by: anatoly yakovenko <anatoly@solana.com>
2022-01-20 02:51:49 +00:00
37e9076db0 Add PacketBatch packet_indexes stat (#22564) (#22575)
* collect stats on packet batch indicies

* cleanup

* cleanup

* cleanup

* change name

(cherry picked from commit 650882217c)

# Conflicts:
#	core/src/banking_stage.rs

Co-authored-by: buffalu <85544055+buffalu@users.noreply.github.com>
2022-01-20 00:05:05 +00:00
f77ea5f324 improves sigverify discard_excess_packets performance (backport #22577) (#22580)
* improves sigverify discard_excess_packets performance (#22577)

As shown by the added benchmark, current code does worse if there is a
spam address plus a lot of unique addresses.

on current master:
test bench_packet_discard_many_senders  ... bench:   1,997,960 ns/iter (+/- 103,715)
test bench_packet_discard_mixed_senders ... bench:  14,256,116 ns/iter (+/- 534,865)
test bench_packet_discard_single_sender ... bench:   1,306,809 ns/iter (+/- 61,992)

with this commit:
test bench_packet_discard_many_senders  ... bench:   1,644,025 ns/iter (+/- 83,715)
test bench_packet_discard_mixed_senders ... bench:   1,089,789 ns/iter (+/- 86,324)
test bench_packet_discard_single_sender ... bench:     955,234 ns/iter (+/- 55,953)

(cherry picked from commit dcf44d2523)

# Conflicts:
#	core/src/sigverify_stage.rs

* removes mergify merge conflicts

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2022-01-19 20:14:10 +00:00
c9df037dae Track discard time of excess packets in sigverify (#22554) (#22570)
* discard time histogram

* closer to the if

* update

(cherry picked from commit e616a7ebfc)

Co-authored-by: anatoly yakovenko <anatoly@solana.com>
2022-01-19 01:52:30 +00:00
2b87d99479 Use VecDeque instead of Vec in sigverify stage (#22538) (#22550)
avoid bad performance of remove(0) for a single sender

(cherry picked from commit 49443406fd)

# Conflicts:
#	core/src/sigverify_stage.rs

Co-authored-by: sakridge <sakridge@gmail.com>
2022-01-19 01:46:34 +00:00
2546ef4ad6 metrics for generate new bank forks (#22492) (#22548)
* metrics for generate new bank forks

* fixed

* Apply suggestions from code review

Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>

* --fixup

* fixup!

Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>
(cherry picked from commit 2d94e6e5d3)

# Conflicts:
#	core/src/replay_stage.rs

Co-authored-by: anatoly yakovenko <anatoly@solana.com>
2022-01-19 01:45:25 +00:00
96ae795758 Add more details about vote account key rotation (#22539)
(cherry picked from commit 901b2881fb)

Co-authored-by: Michael Vines <mvines@gmail.com>
2022-01-17 09:16:25 +00:00
9bddb4e437 vote account withdraw authority may change the authorized voter 2022-01-15 23:46:10 -08:00
4079f12a3e Perf: Store deserialized sysvars in the sysvars cache (backport #22455) (#22480)
* Perf: Store deserialized sysvars in the sysvars cache (#22455)

* Perf: Store deserialized sysvars in sysvars cache

* add bench

* resolve conflicts

Co-authored-by: Justin Starry <justin@solana.com>
2022-01-15 08:48:34 +00:00
e121b94524 Bugfix/block subscribe (#22516) (#22525)
* use correct operation name

* require enable_rpc_transaction_history flag when enabling block_subscription

Co-authored-by: Zano <segfaultdoctor@protonmail.com>
(cherry picked from commit 7171b3a3ac)

Co-authored-by: segfaultdoctor <zano@jito.wtf>
2022-01-15 05:05:21 +00:00
a7623ad18c Fetch sysvars from invoke context for vote program (backport #22444) (#22469)
* Fetch sysvars from invoke context for vote program (#22444)

* resolve conflicts

Co-authored-by: Justin Starry <justin@solana.com>
2022-01-15 03:56:00 +00:00
054e475c6c ABI changed - added two more vote_cost related fields to cost_tracker 2022-01-14 10:49:43 -06:00
7a421fe602 Port counting vote CUs to block cost to v1.9 2022-01-14 10:49:43 -06:00
2ef0b85829 docs: fix get fee for message docs (#22501) (#22504)
(cherry picked from commit f12a8fcd73)

Co-authored-by: Yihau Chen <a122092487@gmail.com>
2022-01-14 09:00:22 +00:00
a6b7a3b7ff Refactor: move sysvar cache to new module (backport #22448) (#22461)
* Refactor: move sysvar cache to new module

(cherry picked from commit 7171c95bdd)

# Conflicts:
#	Cargo.lock
#	program-runtime/Cargo.toml
#	program-runtime/src/invoke_context.rs
#	programs/bpf/Cargo.lock
#	programs/bpf_loader/src/syscalls.rs
#	programs/stake/src/stake_instruction.rs
#	programs/vote/src/vote_instruction.rs
#	runtime/src/message_processor.rs

* resolve conflicts

Co-authored-by: Justin Starry <justin@solana.com>
2022-01-14 08:43:26 +00:00
9d69f2b324 Bank::get_fee_for_message is now nonce aware (backport #22494) (#22499)
* `Bank::get_fee_for_message` is now nonce aware

(cherry picked from commit 4c577d7f8c)

# Conflicts:
#	runtime/src/bank.rs
#	sdk/program/src/message/sanitized.rs

* Resolve conflicts

Co-authored-by: Michael Vines <mvines@gmail.com>
2022-01-14 03:25:10 +00:00
4f82a4ba1f log internals (#22493) (#22497)
(cherry picked from commit eca8d21249)

Co-authored-by: carllin <carl@solana.com>
2022-01-14 02:33:36 +00:00
ed0b30efcc nit: Traceable balance checks (#22462) (#22489)
(cherry picked from commit 1632ee03da)

Co-authored-by: Jack May <jack@solana.com>
2022-01-13 19:09:00 +00:00
4ee6bc9a93 downgrade individual per-program-timing to trace to reduce writes to influx (#22471)
(cherry picked from commit 6614727be8)

Co-authored-by: Tao Zhu <tao@solana.com>
2022-01-13 02:37:47 +00:00
676c43b9d2 Fixed a merge issue (#22464)
Removed AddressLookupError
2022-01-12 13:56:39 -08:00
b1d8296498 Update docs vis-a-vis prohibition of RentPaying accounts (#22438) (#22458)
* Rent-exempt docs for exchange integrations

* Remove discussion of rent-paying accounts from developing docs

* Improve verbiage

(cherry picked from commit b27333e52d)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2022-01-12 19:58:05 +00:00
34984ed16e v1.9: Only examine explicit tx accounts for rent state (#22442)
* Add failing test

* Fix: only examine accounts explicitly included in a tx
2022-01-11 20:55:10 -07:00
f4d1577337 Refactor: consolidate memo extraction for each message version (#22422) (#22435)
(cherry picked from commit 35a5dd9c45)

Co-authored-by: Justin Starry <justin@solana.com>
2022-01-12 00:23:03 +00:00
58dcc451a9 Prevent rent-paying account creation (backport #22292) (#22428)
* Prevent rent-paying account creation (#22292)

* Fixup typo

* Add new feature

* Add new TransactionError

* Add framework for checking account state before and after transaction processing

* Fail transactions that leave new rent-paying accounts

* Only check rent-state of writable tx accounts

* Review comments: combine process_result success behavior; log and metrics before feature activation

* Fix tests that assume rent-exempt accounts are okay

* Remove test no longer relevant

* Remove native/sysvar special case

* Move metrics submission to report legacy->legacy rent paying transitions as well

(cherry picked from commit 637e366b18)

# Conflicts:
#	runtime/src/bank.rs
#	runtime/src/lib.rs

* Fix conflicts and rework for TransactionRefCells

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2022-01-11 23:17:03 +00:00
f0695ef6d9 optimizes ReadOnlyAccountsCache LRU eviction implementation (backport #22403) (#22426)
* optimizes ReadOnlyAccountsCache LRU eviction implementation (#22403)

ReadOnlyAccountsCache is using a background thread, table scan and sort
to implement LRU eviction policy:
https://github.com/solana-labs/solana/blob/eaa52bc93/runtime/src/read_only_accounts_cache.rs#L66-L73
https://github.com/solana-labs/solana/blob/eaa52bc93/runtime/src/read_only_accounts_cache.rs#L186-L191
https://github.com/solana-labs/solana/blob/eaa52bc93/runtime/src/read_only_accounts_cache.rs#L222

DashMap internally locks each shard when accessed; so a table scan in
the background thread can create a lot of lock contention.

This commit adds an index-list queue containing cached keys in the order
that they are accessed. Each hash-map entry also includes its index into
this queue.
When an item is first entered into the cache, it is added to the end of
the queue. Also each time an entry is looked up from the cache it is
moved to the end of queue. As a result, items in the queue are always
sorted in the order that they have last been accessed. When doing LRU
eviction, cache entries are evicted from the front of the queue.
Using index-list, all queue operations above are O(1) with low overhead
and so above achieves an efficient implementation of LRU cache eviction
policy.

(cherry picked from commit a49ef49f87)

# Conflicts:
#	Cargo.lock
#	programs/bpf/Cargo.lock
#	runtime/Cargo.toml
#	runtime/src/accounts_db.rs

* removes backport merge conflicts

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2022-01-11 19:09:57 +00:00
41b0d6cca3 limits gossip vote stats to the top most voted slots (#22416) (#22418)
(cherry picked from commit 49da347d84)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2022-01-10 23:14:57 +00:00
ae77a52c97 wrap create executor timings datapoint in a module (#22398)
(cherry picked from commit 428575f9ae)

Co-authored-by: Trent Nelson <trent@solana.com>
2022-01-09 06:41:44 +00:00
133314e58c bank: fix executor cache metrics (#22396)
(cherry picked from commit 3b4aad9df1)

Co-authored-by: Trent Nelson <trent@solana.com>
2022-01-09 06:06:35 +00:00
cb49ae21b4 Bump version to v1.9.5 2022-01-08 21:17:51 +00:00
a9ebba5643 Clarify docs of minimum_balance (#22385) (#22387)
(cherry picked from commit 0f94e1d3a2)

Co-authored-by: Evan Conrad <evan@roomservice.dev>
2022-01-08 20:07:57 +00:00
8ce65878da improve multi executor cache addition (#22382)
Co-authored-by: Jack May <jack@solana.com>
2022-01-08 13:03:46 +00:00
a4ca18a54d add excutor creation trace timings 2022-01-08 05:25:37 -07:00
7cb147fdcd Executor cache count primer (backport #22333) (#22375)
* bank: prime new executor cache entry use-counts

(cherry picked from commit 4ce48307bb)

* --amend

(cherry picked from commit ad3cb0bc93)

Co-authored-by: Trent Nelson <trent@solana.com>
2022-01-08 11:01:34 +00:00
2d693be9fa remove per program timings from blockstore processor ledger replay (#22370) (#22372)
(cherry picked from commit 813006b33b)

Co-authored-by: carllin <carl@solana.com>
2022-01-08 08:43:48 +00:00
50e716fc80 bank: Add executors cache metrics (#22368)
(cherry picked from commit 6d76db1de5)

Co-authored-by: Trent Nelson <trent@solana.com>
2022-01-08 01:34:53 +00:00
1f00926874 Add runtime support for address table lookups (backport #22223) (#22354) 2022-01-08 07:57:04 +08:00
662c6be51e removes CowCachedExecutors (#22343) (#22363)
Copy-on-write semantics for cached executors can be implemented by a
simple Arc<CachedExecutors> as opposed to CowCachedExecutors:
https://github.com/solana-labs/solana/blob/f1e2598ba/runtime/src/bank.rs#L244-L247

This will also avoid the need for double locking as in:
https://github.com/solana-labs/solana/blob/f1e2598ba/runtime/src/bank.rs#L3490-L3491
https://github.com/solana-labs/solana/blob/f1e2598ba/runtime/src/bank.rs#L3525-L3526

(cherry picked from commit c2389fc209)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2022-01-07 16:04:13 +00:00
9761f5b67f Add aarch64-apple-darwin publish tarball step (#22356)
(cherry picked from commit e2aa932e97)

Co-authored-by: Michael Vines <mvines@gmail.com>
2022-01-07 10:17:11 +00:00
7b1da62763 Add execute metrics (backport #22296) (#22335)
* move `ExecuteTimings` from `runtime::bank` to `program_runtime::timings`

(cherry picked from commit 7d32909e17)

# Conflicts:
#	core/Cargo.toml
#	ledger/Cargo.toml
#	programs/bpf/Cargo.lock

* Add execute metrics

(cherry picked from commit b25e4a200b)

* Add metrics for executor creation

(cherry picked from commit 848b6dfbdd)

* Add helper macro for `AddAssign`ing with saturating arithmetic

(cherry picked from commit deb9344e49)

* Use saturating_add_assign macro

(cherry picked from commit 72fc6096a0)

* Consolidate process instruction execution timings to own struct

(cherry picked from commit 390ef0fbcd)

Co-authored-by: Trent Nelson <trent@solana.com>
Co-authored-by: Carl Lin <carl@solana.com>
2022-01-07 09:11:18 +00:00
2f97fee71a Cleanup ledger-tool analyze-storage command (#22310) (#22352)
* Make ledger-tool analyze-storage use Blockstore::open()

Opening a large ledger may require setting a larger open file descriptor
limit. Blockstore::open() does this whereas the underlying Database
object that analyze-storage was opening does not.

* Move key_size call lookup to take advantage of traits

* Fix typo where analyze worked on wrong column

* Make analyze-storage analyze all columns

(cherry picked from commit 9f1f64e384)

Co-authored-by: steviez <steven@solana.com>
2022-01-07 07:47:27 +00:00
3ae674dd28 Increase timeout of local-cluster-slow CI step 2022-01-07 15:31:10 +08:00
8214bc9db4 Retain executor cache counts (#22322) (#22341)
(cherry picked from commit f1e2598baa)

Co-authored-by: Jack May <jack@solana.com>
2022-01-06 19:00:29 +00:00
1132def37c Split up local cluster tests into separate CI steps (backport #22295) (#22303)
* Split up local cluster tests into separate CI steps (#22295)

* Split up local cluster tests into separate CI steps

* Update buildkite-pipeline.sh

(cherry picked from commit 0e1afcbb26)

# Conflicts:
#	local-cluster/tests/local_cluster.rs

* resolve conflicts

Co-authored-by: Justin Starry <justin@solana.com>
2022-01-06 17:02:45 +00:00
7267ebaaf2 Consume from AccountsDataMeter (backport #21994) (#22323)
* Consume from AccountsDataMeter (#21994)

(cherry picked from commit 1460f00e0f)

# Conflicts:
#	program-runtime/src/invoke_context.rs

* fixup! conflicts

* fix tests for v1.9

* fixup! clippy

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-01-06 17:01:02 +00:00
4be6e52a4f cache executors on failed transactions (backport #22308) (#22328)
* cache executors on failed transactions (#22308)

(cherry picked from commit 12e160269e)

# Conflicts:
#	program-runtime/src/invoke_context.rs
#	runtime/src/bank.rs

* resolve conflicts

Co-authored-by: Jack May <jack@solana.com>
2022-01-06 09:14:48 +00:00
e7348243b4 [ledger-tool]compare_blocks (#22229) (#22330)
* 1.made load_credentials accept credential path as a parameter. 2.partial implement bigtable comparasion function

* finding missing blocks in bigtables in a specified range

* refactor compare-blocks,add unit test for missing_blocks and fmt

* compare-block fix last block bug

* refactor compare-block and improve wording

* Update ledger-tool/src/bigtable.rs

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

* update compare-block command-line description

* style:improve wording/naming/code style

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
(cherry picked from commit d9220652ad)

Co-authored-by: pieceofr <komimi.p@gmail.com>
2022-01-06 08:55:26 +00:00
fc0c74d722 Only sum accounts data len from non-zero lamport accounts (#22309) (#22317)
(cherry picked from commit ab13e39518)

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-01-06 02:43:54 +00:00
687cd4779e Add AccountsDataMeter to InvokeContext (#21813) (#22299)
(cherry picked from commit 800472ddf5)

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-01-06 01:31:11 +00:00
b28d7050ab Update default --dynamic-port-range values to include some room for additional ports that may be added in the future (#22321)
(cherry picked from commit 37ebd9bd9e)

Co-authored-by: Michael Vines <mvines@gmail.com>
2022-01-06 01:29:06 +00:00
6d72acfd6d --dynamic-port-range now requires at least 12 ports 2022-01-05 16:12:28 -08:00
840ec0686e Fix broken build from bpf/tests/programs.rs (#22312)
These tests were broken due to PR #22289
2022-01-05 15:06:15 -06:00
ba0188a36d Bump version to 1.9.4 (#22304) 2022-01-05 12:02:36 -06:00
05b9a2f203 fix(rpc): recreate dead and uncleaned subscriptions (#22281) (#22294)
(cherry picked from commit c1995c647b)

Co-authored-by: Nikita <bananaelecitrus@gmail.com>
2022-01-05 17:16:12 +00:00
8578429c4d Refactor: Improve type safety and readability of transaction execution (backport #22215) (#22289)
* Refactor: Improve type safety and readability of transaction execution (#22215)

* resolve conflicts

Co-authored-by: Justin Starry <justin@solana.com>
2022-01-05 23:01:15 +08:00
87f4a1f4b6 Bank gets accounts data len delta from MessageProcessor::process_message() (#22288)
(cherry picked from commit 635337d2ff)

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-01-05 05:50:03 +00:00
17411f9b4c Add accounts_data_len to Bank (#21781) (#22285)
(cherry picked from commit eeb97fe7ce)

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-01-05 02:24:52 +00:00
fb0e5adc7e patches bug in recv_mmsg when npkts != nrecv (backport #22276) (#22280)
* removes total-size from return value of recv_mmsg

(cherry picked from commit 4b24499916)

* patches bug in recv_mmsg when npkts != nrecv

If recv_mmsg receives 2 packets where the first one is filtered out,
then it returns npkts == 1:
https://github.com/solana-labs/solana/blob/01a096adc/streamer/src/recvmmsg.rs#L104-L115

But then streamer::packet::recv_from will erroneously keep the 1st
packet and drop the 2nd one:
https://github.com/solana-labs/solana/blob/01a096adc/streamer/src/packet.rs#L34-L49

To avoid this bug, this commit updates recv_mmsg to always return total
number of received packets. If socket address cannot be correctly
obtained, it is left as the default value which is UNSPECIFIED:
https://github.com/solana-labs/solana/blob/01a096adc/sdk/src/packet.rs#L145

(cherry picked from commit 379feecae5)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2022-01-04 23:42:52 +00:00
f4ded6fb6b Updates to the address lookup table proposal (#22269) (#22270)
Co-authored-by: Justin Starry <justin@solana.com>
2022-01-04 23:38:51 +00:00
f89bf7b939 Compute accounts data len during generate_index() (#21757) (#22277)
(cherry picked from commit ec7e17787e)

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-01-04 22:44:51 +00:00
c99aed4abf Update address map proposal to improve dev experience (#21576) (#22283)
* Update address map proposal to improve dev experience

* another revision to match implementation

(cherry picked from commit 0224a8b127)

Co-authored-by: Justin Starry <justin@solana.com>
2022-01-04 22:11:59 +00:00
edfd8c1717 Fix program log filtering (#22133) (#22151)
(cherry picked from commit c7b0917e1a)

Co-authored-by: Jack May <jack@solana.com>
2022-01-04 21:56:49 +00:00
09dbf069e8 Add test to enforce that program id account info for CPI is optional (#22069) (#22103)
* Update tests to demonstrate that program id account info for CPI is optional

* Clean up comments that say that program id account info is required

(cherry picked from commit ec7536faf6)

Co-authored-by: Justin Starry <justin@solana.com>
2022-01-04 21:47:48 +00:00
9764d4349b Add return types to generate_index() (#21735) (#22275)
(cherry picked from commit 1528f85112)

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-01-04 18:27:40 +00:00
d84b994451 shrinks size of Packet.Meta (backport #22224) (#22273)
* removes seed and slot fields from Packet.Meta

507367e6ac
updated window-service to send shreds (as opposed to packets) to
retransmit-stage and so seed and slot fields in Packet.Meta are unused:
https://github.com/solana-labs/solana/blob/d6ec103be/sdk/src/packet.rs#L27-L28

(cherry picked from commit aa9f7ed7e8)

* uses std::net::IpAddr type for Packet.Meta.addr

(cherry picked from commit 73a7741c49)

# Conflicts:
#	streamer/src/streamer.rs

* adds bitflags to Packet.Meta

Instead of a separate bool type for each flag, all the flags can be
encoded in a type-safe bitflags encoded in a single u8:
https://github.com/solana-labs/solana/blob/d6ec103be/sdk/src/packet.rs#L19-L31

(cherry picked from commit 01a096adc8)

# Conflicts:
#	sdk/Cargo.toml

* removes backport merge conflicts

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2022-01-04 17:38:29 +00:00
185f52b712 Lower vote-only-mode to 400 (#22210) (#22272)
(cherry picked from commit 2486e21ffe)

Co-authored-by: sakridge <sakridge@gmail.com>
2022-01-04 15:15:16 +00:00
3b59f67562 Limit number of accounts that a transaction can lock (backport #22201) (#22263)
* Limit number of accounts that a transaction can lock (#22201)

(cherry picked from commit 2b5e00d36d)

# Conflicts:
#	accountsdb-plugin-postgres/src/postgres_client/postgres_client_transaction.rs
#	runtime/src/accounts.rs
#	runtime/src/bank.rs
#	sdk/src/feature_set.rs
#	sdk/src/transaction/error.rs
#	storage-proto/proto/transaction_by_addr.proto
#	storage-proto/src/convert.rs

* resolve conflicts

Co-authored-by: Justin Starry <justin@solana.com>
2022-01-04 11:34:34 +00:00
7d2589e2ac Documentation typos (#22262) (#22268)
* Fix typo markdown link

* Add missing punctuation full stop

(cherry picked from commit 9665da9d0b)

Co-authored-by: glihm <dev@glihm.net>
2022-01-04 11:15:06 +00:00
77558c315d Fixed issue #22124 -- missing historical data if slot updated later. (#22193) (#22259)
* Fixed issue #22124 -- missing historical data if slot updated later.

* Fixed a couple of comments

(cherry picked from commit 5b6027bef0)

Co-authored-by: Lijun Wang <83639177+lijunwangs@users.noreply.github.com>
2022-01-04 07:18:58 +00:00
464d533da3 Flip iter operations to keep associated address/header/packets together (#22245) (#22257)
Flip iter operations to keep associated address/header/packets together

Before this change, if cast_socket_addr() returned a None for any
address/header pair, the subsequent zip() would misalign the
address/header pair and packet. So, this change zips all three together,
then does filter_map() so keep things aligned.

Additionally, compute total_size inline to avoid running through packets
a second time.

(cherry picked from commit 20b61e28b6)

Co-authored-by: steviez <steven@solana.com>
2022-01-04 07:06:06 +00:00
f8bf478fde Fix bug, add error specific timings (#22225) (#22252)
(cherry picked from commit 005592998d)

Co-authored-by: carllin <carl@solana.com>
2022-01-04 02:53:59 +00:00
35fb47d1ce removes epoch_authorized_voters from VoteTracker (backport #22192) (#22248)
* removes epoch_authorized_voters from VoteTracker (#22192)

https://github.com/solana-labs/solana/pull/22169
verifies authorized-voter early on in vote-listener pipeline; and so
VoteTracker no longer needs to maintain and check for epoch authorized
voters.

(cherry picked from commit 69d71f8f86)

# Conflicts:
#	core/src/cluster_info_vote_listener.rs

* removes backport merge conflicts

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2022-01-04 01:51:24 +00:00
5bd27dd175 Correctly set CI_COMMIT when Buildkite provides HEAD instead of a real commit 2022-01-03 17:39:49 -08:00
794f28d9ab Switch from arm64-apple-darwin to aarch64-apple-darwin to align with Rust's target names 2022-01-03 17:16:49 -08:00
d7a673f7f5 Add support for arm64-apple-darwin release/channel artifacts 2022-01-03 17:16:34 -08:00
b3fa1288aa Use experimential docker virtualization framework for arm64
(cherry picked from commit ed0b47c6f8)
2022-01-03 16:54:37 -08:00
3e4e2e9113 Prevent lookup tables from being closed during deactivation slot (#22221) (#22247)
(cherry picked from commit bbe5b66324)

Co-authored-by: Justin Starry <justin@solana.com>
2022-01-03 23:32:03 +00:00
fd4754e5a9 Correctly set CI_OS_NAME for macOs buildkite agents 2022-01-03 12:54:57 -08:00
0a9460ed8b re-calibrate limit based on mainnet data (backport #21995) (#22212)
* re-calibrate limit based on mainnet data, see issue #21917

(cherry picked from commit d743c2917c)

# Conflicts:
#	runtime/src/block_cost_limits.rs

* set secp256k1 cost similar to sigverify

(cherry picked from commit a2a7e91ad6)

* removes backport merge conflicts

Co-authored-by: Tao Zhu <tao@solana.com>
2022-01-03 19:22:10 +00:00
478c641cb5 Fix token-balance owner type in docs (#22240) (#22242)
(cherry picked from commit 9029b46570)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2022-01-03 18:28:54 +00:00
735f000952 Remove Xargo.toml reference (#22239)
(cherry picked from commit 56fd32bda2)

Co-authored-by: Michael Vines <mvines@gmail.com>
2022-01-03 17:39:19 +00:00
264bb903a3 Bump rbpf to v0.2.21 (#22216) (#22217)
(cherry picked from commit 9139be89b7)
2022-01-01 20:42:53 +00:00
7c5d3e5874 Exit early on BigTable error (#22200) (#22209)
(cherry picked from commit 0b1b36f088)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2022-01-01 00:45:18 +00:00
70d5b6aeaf Bump solana_rbpf to version v0.2.20 (#22164) (#22207)
(cherry picked from commit 8a43e2d889)

Co-authored-by: Alexander Meißner <AlexanderMeissner@gmx.net>
2021-12-31 21:57:20 +00:00
ca451ea23e checks for authorized voter early on in the vote-listener pipeline (backport #22169) (#22206)
* checks for authorized voter early on in the vote-listener pipeline (#22169)

Before votes are verified that they are signed by the authorized voter,
they might be dropped in verified-vote-packets code. If there are
enough many spam votes from unauthorized voters, this may potentially
drop valid votes but keep the false ones.
https://github.com/solana-labs/solana/blob/57986f982/core/src/verified_vote_packets.rs#L165-L168

(cherry picked from commit c0c6038654)

# Conflicts:
#	core/src/cluster_info_vote_listener.rs

* removes backport merge conflicts

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-31 21:46:12 +00:00
113d261a2c Count compute units even when transaction errors (backport #22182) (#22199)
* Count compute units even when transaction errors (#22182)

(cherry picked from commit d06e6c7425)

# Conflicts:
#	program-runtime/src/invoke_context.rs
#	runtime/src/cost_model.rs
#	runtime/src/message_processor.rs

* Resolve conflicts

Co-authored-by: carllin <carl@solana.com>
2021-12-31 21:14:00 +00:00
c6ab915668 chore: update transaction error links in docs (#22189) (#22197)
(cherry picked from commit 4e4577afbe)

Co-authored-by: Jacob Creech <82475023+jacobcreech@users.noreply.github.com>
2021-12-30 22:32:10 +00:00
d5c0ffc11f Update install/src/command.rs
Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>
(cherry picked from commit 29edb130cc)
2021-12-30 07:30:17 -08:00
6a2b62de62 Add connect timeout and change overall timeout to None
(cherry picked from commit 3c1416091e)
2021-12-30 07:30:17 -08:00
4645be3e52 fix: Installer increase download req timeout from 30 seconds to 6 minutes
(cherry picked from commit a1912f8400)
2021-12-30 07:30:17 -08:00
7efd0391e9 Revert "Count compute units even when transaction errors (backport #22059) (#22154)" (#22175)
This reverts commit 401c542d2a.
2021-12-30 02:39:25 -05:00
6a556c5adb Stream additional block metadata via plugin (#22023) (#22179)
* Stream additional block metadata through plugin
blockhash, block_height, block_time, rewards are streamed

(cherry picked from commit f14928a970)

Co-authored-by: Lijun Wang <83639177+lijunwangs@users.noreply.github.com>
2021-12-30 05:44:12 +00:00
0cd45400ca Add docs for notifying transactions via plugin (#22097) (#22178)
* Added documentations for streaming transactions via plugin

* Updated comments for transaction info

* Updated doc on transaction format

* Removed a white space

* Apply suggestions from code review from Tyera

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
(cherry picked from commit 135af08b8b)

Co-authored-by: Lijun Wang <83639177+lijunwangs@users.noreply.github.com>
2021-12-30 05:10:32 +00:00
531f36c571 Don't forward packets received from TPU forwards port (#22078) (#22171)
* Don't forward packets received from TPU forwards port

* Add banking stage test

(cherry picked from commit b1d9a2e60e)

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-30 10:42:28 +08:00
9c9d3e8b6b discards serialized gossip crds votes if cannot parse tx (backport #22129) (#22172)
* discards serialized gossip crds votes if cannot parse tx (#22129)

(cherry picked from commit c9c78622a8)

# Conflicts:
#	gossip/src/crds_value.rs

* removes backport merge conflicts

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-29 22:38:12 +00:00
74b98c2dd4 get_signatures_for_address does not correctly account for result sets that span local and Bigtable sources (#22115) (#22168)
* get_signatures_for_address does not correctly account for result sets that span Blockstore and Bigtable.

This causes Bigtable to return `RowNotFound` until the new tx is uploaded.

Check that `before` exists in Bigtable, and if not, set it to `None` to return the full data set.

References #21442
Closes #22110

* Differentiate between before sig not found and no newer signatures

* Dedupe bigtable results to account for potential upload race

Co-authored-by: Tyera Eulberg <tyera@solana.com>
(cherry picked from commit bac6821e19)

Co-authored-by: Omar Kilani <omar.kilani@gmail.com>
2021-12-29 19:52:36 +00:00
9fb67f9b07 Prevent log spam (#22148) (#22152)
(cherry picked from commit f061059e45)

Co-authored-by: carllin <carl@solana.com>
2021-12-29 08:28:48 +00:00
401c542d2a Count compute units even when transaction errors (backport #22059) (#22154)
* Count compute units even when transaction errors (#22059)

(cherry picked from commit eaa8c67bde)

# Conflicts:
#	program-runtime/src/invoke_context.rs
#	runtime/src/bank.rs
#	runtime/src/message_processor.rs

* Fix merge conflicts

Co-authored-by: carllin <carl@solana.com>
2021-12-29 08:04:12 +00:00
14ed446923 cargo-build-bpf: Add Windows support (#20276) (#22155)
* cargo-build-bpf: Add Windows support

* Update error message

(cherry picked from commit 57986f982a)

Co-authored-by: Jon Cinque <jon.cinque@gmail.com>
2021-12-29 03:07:39 +00:00
adc584ee22 Add (preflight) simulation to BanksClient (#22084) (#22149)
* Add more-legitimate conversion from legacy Transaction to SanitizedTransaction

* Add Banks method with preflight checks

* Expose BanksClient method with preflight checks

* Unwrap simulation err

* Add Bank simulation method that works on unfrozen Banks

* Add simpler api

* Better name: BanksTransactionResultWithSimulation

(cherry picked from commit 422a095647)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-12-28 22:04:27 +00:00
810ca36eae skip reporting all-zero stats (#22054)
(cherry picked from commit 9c5d82557a)

Co-authored-by: Tao Zhu <tao@solana.com>
2021-12-28 07:03:13 +00:00
16f821ea8c Ensure AncestorHashesSerice selects an open port (#21919) (#21997)
(cherry picked from commit 7f6fb6937a)

Co-authored-by: carllin <wumu727@gmail.com>
2021-12-28 06:46:59 +00:00
584e9bfbe7 docs: fix typo (#22116) (#22118)
(cherry picked from commit f643a8b425)

Co-authored-by: Samuel Oloruntoba <git@kayandra.co>
2021-12-26 04:36:49 +00:00
3ad4c3306c Add PubsubClient::vote_subscribe (#22114)
(cherry picked from commit 0a0fc85282)

Co-authored-by: Kirill Fomichev <fanatid@ya.ru>
2021-12-25 23:19:46 +00:00
be0bcd85ed tracks erasure coding shreds' indices explicitly (#21822) (#22094)
The indices for erasure coding shreds are tied to data shreds:
https://github.com/solana-labs/solana/blob/90f41fd9b/ledger/src/shred.rs#L921

However with the upcoming changes to erasure schema, there will be more
erasure coding shreds than data shreds and we can no longer infer coding
shreds indices from data shreds.

The commit adds constructs to track coding shreds indices explicitly.

(cherry picked from commit 65d59f4ef0)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-23 19:38:50 +00:00
8708186760 Update checks.rs
(cherry picked from commit d06c04d02c)
2021-12-23 07:00:26 -08:00
8f3e37c174 Remove msg spam from deploying
(cherry picked from commit 52c1eb0160)
2021-12-23 07:00:26 -08:00
7d61935bf1 Bump bpf-tools to v1.21 (#22083)
(cherry picked from commit 7cc6262b5a)

Co-authored-by: Dmitri Makarov <dmakarov@alumni.stanford.edu>
2021-12-23 03:38:04 +00:00
a70eb098f4 Fix transaction pk violation (#22057) (#22076)
* Handle PK violation issue for transaction notification. The transaction might be replayed due to
validator restart.

(cherry picked from commit d6de4a2f4e)

Co-authored-by: Lijun Wang <83639177+lijunwangs@users.noreply.github.com>
2021-12-23 00:33:35 +00:00
f31593bfbe Update jsonrpc-api.md to document 'owner' property (#22074) (#22077)
* Update jsonrpc-api.md to document 'owner' property

Documents 'owner' property on the token balances struct.

* Update docs/src/developing/clients/jsonrpc-api.md

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
(cherry picked from commit 67c8034fe5)

Co-authored-by: splintred <91386295+splintred@users.noreply.github.com>
2021-12-23 00:13:30 +00:00
8f26c71964 Fixed a typo in the SQL statement (#21872) (#22075)
* Fixed a typo in the SQL statement

* Fixed additional two errors in the postgres database objects

(cherry picked from commit b610e5503e)

Co-authored-by: Lijun Wang <83639177+lijunwangs@users.noreply.github.com>
2021-12-22 22:55:58 +00:00
9fbaaa5102 Increment execution timings on errors as well (#22053) (#22072)
(cherry picked from commit 37f6777ceb)

Co-authored-by: carllin <carl@solana.com>
2021-12-22 22:50:19 +00:00
78e7913352 Bump version to 1.9.3 (#22065) 2021-12-22 11:41:03 -06:00
f58b87befe v1.9: bump tarpc from 0.26.2 to 0.27.2 and add BanksClientError (#22055)
* chore: bump tarpc from 0.26.2 to 0.27.2

Bumps [tarpc](https://github.com/google/tarpc) from 0.26.2 to 0.27.2.
- [Release notes](https://github.com/google/tarpc/releases)
- [Changelog](https://github.com/google/tarpc/blob/master/RELEASES.md)
- [Commits](https://github.com/google/tarpc/commits)

---
updated-dependencies:
- dependency-name: tarpc
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

* Accommodate breaking changes

* Reword incorrect error message

* Add error module

* Revert client Error type to io::Error; easy transition to BanksClientError

* Bump tracing crates in programs

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
2021-12-22 03:41:16 +00:00
1a2823b875 chore: bump lru from 0.7.0 to 0.7.1 (#22018) (#22056)
Bumps [lru](https://github.com/jeromefroe/lru-rs) from 0.7.0 to 0.7.1.
- [Release notes](https://github.com/jeromefroe/lru-rs/releases)
- [Changelog](https://github.com/jeromefroe/lru-rs/blob/master/CHANGELOG.md)
- [Commits](https://github.com/jeromefroe/lru-rs/compare/0.7.0...0.7.1)

---
updated-dependencies:
- dependency-name: lru
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
(cherry picked from commit 69d0b08dd8)

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2021-12-21 16:27:43 -07:00
75fe0d3ecf Fix #21986 (#22035) (#22049)
* Partial revert "Updates documentation around what needs to be passed in CPI. (#21633)"

* Enforces the program_id being passed explicitly by removing it from get_instruction_keyed_accounts().

* instruction_accounts => instructions_account

(cherry picked from commit ba8e15848e)

Co-authored-by: Alexander Meißner <AlexanderMeissner@gmx.net>
2021-12-21 17:54:18 +00:00
c296a6c9ed The sidebar for the plugin doc is showing the item as "Overview", corrected the styles (#22033) (#22040)
(cherry picked from commit 2347f65133)

Co-authored-by: Lijun Wang <83639177+lijunwangs@users.noreply.github.com>
2021-12-21 02:58:53 +00:00
57e5406476 Add deactivation cooldown before address lookup tables can be closed (#22011) (#22036)
(cherry picked from commit f5d1115468)

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-21 02:10:14 +00:00
4f57c4a4fe Fix weird formatting of bullets (#22013) (#22030)
(cherry picked from commit 116517fb6d)

Co-authored-by: Kardashev <96332127+0xkardashev@users.noreply.github.com>
2021-12-20 20:41:18 +00:00
c4b3b2865d Update program close docs (#22026) (#22027)
(cherry picked from commit b8eff3456c)

Co-authored-by: Jack May <jack@solana.com>
2021-12-20 18:55:39 +00:00
f58c375b1f typo: lanaguage -> language (#22009) (#22015)
(cherry picked from commit e92a81b741)

Co-authored-by: Peter Johnson <peter@geocode.earth>
2021-12-20 07:34:31 +00:00
bf41c53f11 chore: add blockSubscribe api docs (#22002) (#22008)
Co-authored-by: Zano <segfaultdoctor@protonmail.com>
(cherry picked from commit df6a4930b9)

Co-authored-by: segfaultdoctor <seg@jito.network>
2021-12-19 16:48:36 +00:00
e3a4b98432 removes Select in favor of recv_timeout/try_iter (#21981) (#22001)
crossbeam_channel::Select::ready_timeout might return with success spuriously.

(cherry picked from commit 7476dfeec0)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-18 19:37:07 +00:00
91657ba8fe new net-stats require a new table (#21996) (#22000)
(cherry picked from commit 3fe942ab30)

Co-authored-by: Jeff Biseda <jbiseda@gmail.com>
2021-12-18 10:26:16 +00:00
35ee48bec9 RPC Block Subscription (backport #21787) (#21992)
* RPC Block Subscription (#21787)

* add stuff

* compiling

* add notify block

* wip

* feat: add blockSubscribe pubsub method

* address PR comments

Co-authored-by: Lucas B <buffalu@jito.network>
Co-authored-by: Zano <segfaultdoctor@protonmail.com>
(cherry picked from commit 76098dd42a)

# Conflicts:
#	Cargo.lock
#	client-test/Cargo.toml
#	rpc/src/rpc_subscriptions.rs

* Fix conflicts

Co-authored-by: segfaultdoctor <seg@jito.network>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2021-12-18 01:43:37 +00:00
02cfa85214 Update to reed-solomon-erasure 5.0.1, to get simd-accel on M1 macs (#21990)
(cherry picked from commit 5f054cd51b)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-12-18 00:52:21 +00:00
02be3a6568 Check file size of snapshot_version when unarchiving snapshot (#21925) (#21983)
(cherry picked from commit 0f6e8d3385)

Co-authored-by: mooori <moritz.zielke@gmail.com>
2021-12-17 21:02:53 +00:00
b20fae5a09 simplifies ShredIndex api (#21932) (#21959)
(cherry picked from commit efd64a3862)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-17 19:50:49 +00:00
e572678176 removes next_shred_index from return value of entries to shreds api (#21961) (#21980)
next-shred-index is already readily available from returned data shreds.
The commit simplifies the api for upcoming changes to erasure coding
schema which will require explicit tracking of indices for coding shreds
as well as data shreds.

(cherry picked from commit 89d66c3210)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-17 17:57:57 +00:00
f4521002b9 Clean up demote program write lock feature (backport #21949) (#21969)
* Clean up demote program write lock feature (#21949)

* Clean up demote program write lock feature

* fix test

(cherry picked from commit 6ff0be6a82)

# Conflicts:
#	programs/bpf_loader/src/syscalls.rs
#	runtime/src/accounts.rs

* resolve conflicts

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-17 04:45:22 +00:00
0c5a2bcd5a Update getSignaturesForAddress and getConfirmedSignaturesForAddress2 RPC call description (#21955) (#21960)
* Update jsonrpc-api.md

* Update docs/src/developing/clients/jsonrpc-api.md

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

* Wrap 80chars

* Update docs/src/developing/clients/jsonrpc-api.md

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
(cherry picked from commit 3398f5a2f5)

Co-authored-by: jdcaballerov <743513+jdcaballerov@users.noreply.github.com>
2021-12-16 20:59:51 +00:00
c25d16bf0d adds ErasureSetId identifying erasure coding sets of shreds (backport #21928) (#21946)
* adds ErasureSetId identifying erasure coding sets of shreds (#21928)

(cherry picked from commit 8183f28636)

# Conflicts:
#	ledger/src/blockstore.rs

* removes backport merge conflicts

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-16 17:49:39 +00:00
301e38044a Fixes the calculation of the "compute_meter_consumption" across process_instruction() and process_message(). (#21944) (#21945)
(cherry picked from commit 49cb161203)

Co-authored-by: Alexander Meißner <AlexanderMeissner@gmx.net>
2021-12-16 16:28:28 +00:00
bfa6302985 Bump version to 1.9.2 2021-12-15 16:18:14 -08:00
b66e2ae353 add caching_enabled option to test-validator
(cherry picked from commit 5fb7da12f2)
2021-12-15 16:11:51 -08:00
3967dc8685 rebase 2021-12-15 15:33:45 -08:00
569c83295d Update argument name
(cherry picked from commit ed924e3bc4)
2021-12-15 15:33:45 -08:00
a462c58594 Add option to load accounts from file
This introduces the `--clone-from-file` option for
solana-test-validator. It allows specifying any number of files
(without extension) containing account info and data, which will be
loaded at genesis. This is similar to `--bpf-program` for programs
loading.

The files will be searched for in the CWD or in `tests/fixtures`.

Example: `solana-test-validator --clone-from-file SRM_token USD_token`
(cherry picked from commit 9b06d64eb8)

# Conflicts:
#	test-validator/Cargo.toml
2021-12-15 15:33:45 -08:00
7dba8bb49f Add complete account dump to file
This commit introduces the ability to dump the complete content of an
account to a JSON file (compact or not depending on the provided format
option).

Example:

```sh
solana account -u m \
  --output json-compact \
  --output-file SRM_token.json \
  SRMuApVNdxXokk5GT7XD5cUUgXMBCoAz2LHeuAoKWRt
```

Note: Behavior remains untouched if format option `--output` is not
provided (only account data gets written to file).

(cherry picked from commit 0e9e67b65d)
2021-12-15 15:33:45 -08:00
c907d4444d add accountsdb-plugin-config to test-validator (#21918)
(cherry picked from commit c2a94a8fb0)

Co-authored-by: Kirill Fomichev <fanatid@ya.ru>
2021-12-15 09:48:12 +00:00
b4c847557b Restore solana_validator::test_validator export
(cherry picked from commit e124659aca)
2021-12-15 00:29:04 -08:00
de48347078 Add json support for feature sets; also print output after feature list (#21905) (#21914)
* Add json support for feature sets; also print output after feature list

* Move stringifying into Display implementation

(cherry picked from commit dcd2854829)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-12-15 07:08:39 +00:00
9f173d3717 Add helper crate to generate syscalls.txt 2021-12-14 21:34:36 -08:00
dcd76e484f Update openssl-src package to resolve cargo audit complaint
(cherry picked from commit 7ba27e5cae)
2021-12-14 19:09:26 -08:00
2246135654 Document solana_program::instruction (#21817) (#21906)
* Document solana_program::instruction

* Apply suggestions from code review

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
(cherry picked from commit dcb5849484)

Co-authored-by: Brian Anderson <andersrb@gmail.com>
2021-12-15 00:55:56 +00:00
41ea597256 Fix subtraction overflow (#21871) (#21901)
(cherry picked from commit cb395abff7)

Co-authored-by: carllin <carl@solana.com>
2021-12-14 23:22:47 +00:00
fb955bd4ec Update Cargo.toml 2021-12-14 14:18:20 -08:00
5c3fbb384f Futures 0.3.18 has been yanked, back off to .17
(cherry picked from commit 2a6dcb2ffd)

# Conflicts:
#	ledger/Cargo.toml
2021-12-14 14:18:20 -08:00
a056fd88cb uses Option<Slot> for SlotMeta.parent_slot (backport #21808) (#21899)
* uses Option<Slot> for SlotMeta.parent_slot (#21808)

SlotMeta.parent_slot for the head of a detached chain of slots is
unknown and that is indicated by u64::MAX which lacks type-safety:
https://github.com/solana-labs/solana/blob/6c108c8fc/ledger/src/blockstore_meta.rs#L203-L205

The commit changes the type to Option<Slot>. Backward compatibility is
maintained by customizing serde serialize/deserialize implementations.

(cherry picked from commit 8d980f07ba)

# Conflicts:
#	ledger-tool/src/main.rs

* removes backport merge conflicts

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-14 21:42:57 +00:00
2f1816d1db adds ShredId uniquely identifying each shred (backport #21820) (#21897)
* adds ShredId uniquely identifying each shred (#21820)

(cherry picked from commit 4ceb2689f5)

# Conflicts:
#	ledger/src/blockstore.rs

* removes backport merge conflicts

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-14 21:03:08 +00:00
2cd2f3ba7b Bump rbpf to v0.2.19 (#21880) (#21891)
* Bump rbpf to v0.2.19

Co-authored-by: Alexander Meißner <AlexanderMeissner@gmx.net>
(cherry picked from commit 509bcd2e74)

Co-authored-by: Jack May <jack@solana.com>
2021-12-14 20:30:31 +00:00
135dfdbf1e Don't publish rbpf-cli to crates.io 2021-12-14 12:12:19 -08:00
fad4bfdf2a Don't publish poh-bench to crates.io 2021-12-14 12:10:03 -08:00
a9d4728c35 Deserialize accounts before acquiring stakes cache lock (#21733) (#21889)
* Deserialize stored accounts before locking stakes cache

* fix test

(cherry picked from commit 2bbe1d875a)

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-14 16:47:01 +00:00
3977bcde63 Add missing word "that" (#21878) (#21884)
(cherry picked from commit 746869fdac)

Co-authored-by: Raza <42661870+AlmostEfficient@users.noreply.github.com>
2021-12-14 14:44:48 +00:00
cf2a9de19c Add solana-cli-config link to rust-api.md (#21840) (#21874)
(cherry picked from commit 033106ed81)

Co-authored-by: Brian Anderson <andersrb@gmail.com>
2021-12-14 08:22:13 +00:00
5e2b12aee5 Restore ALL behavior; add enum variant, comments, and help text to make behavior clearer (#21854) (#21863)
(cherry picked from commit bed1b143a5)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-12-14 05:21:25 +00:00
6c329e2fd3 Fixup RPC docs (backport #21858) (#21864)
* Remove old notes referring to EOL versions

(cherry picked from commit eebaf89874)

* Add notes about new v1.9 rpc apis

(cherry picked from commit fd212fd2a4)

Co-authored-by: Tyera Eulberg <tyera@solana.com>
2021-12-14 02:46:44 +00:00
0376045c7d cli: Order displayed feature list by status (#21810) (#21830)
(cherry picked from commit 1149c1880d)

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-13 14:39:47 +00:00
c1f54c22ed Remove the 5 integer msg! form
(cherry picked from commit c5c699a918)
2021-12-11 12:47:43 -08:00
0576d133ad Add Accountsdb plugin documentations (#21746) (#21799)
Add the public facing documentation about the plugin framework: explaining the interface, how to load plugin and the example PostgreSQL plugin implementation.
Updated the rust documentation for the plugin interfaces for accounts and slot.
This changes are targeted for v1.8. Information about transactions will be updated later.
2021-12-11 11:04:22 -08:00
9956afb2bd uses Option<u64> for SlotMeta.last_index (#21775) (#21806)
SlotMeta.last_index may be unknown and current code is using u64::MAX to
indicate that:
https://github.com/solana-labs/solana/blob/6c108c8fc/ledger/src/blockstore_meta.rs#L169-L174

This lacks type-safety and can introduce bugs if not always checked for
Several instances of slot_meta.last_index + 1 are also subject to
overflow.

This commit updates the type to Option<u64>. Backward compatibility is
maintained by customizing serde serialize/deserialize implementations.

(cherry picked from commit e08139f949)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-11 17:39:05 +00:00
01941cf3de Rename Packets to PacketBatch (backport #21794) (#21805)
* Rename Packets to PacketBatch (#21794)

(cherry picked from commit 254ef3e7b6)

# Conflicts:
#	core/src/verified_vote_packets.rs

* resolve conflicts

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-11 16:53:23 +00:00
4b63d51e3e Bump version to 1.9.1 (#21802) 2021-12-11 12:50:36 +00:00
5bf4445ae6 Add address lookup table program (backport #21616) (#21789)
* Add address lookup table program (#21616)

* Add address lookup table program

* feedback

(cherry picked from commit 9b41ddd9ba)

# Conflicts:
#	runtime/Cargo.toml

* resolve conflicts

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-11 05:26:46 +00:00
7782d34bbf Add StakesCache struct to abstract away locking (#21738) (#21796) 2021-12-10 22:38:04 -05:00
2c4765e75a Bump solana_rbpf to version v0.2.18 (#21774) (#21786)
(cherry picked from commit a5a0dabe7b)

Co-authored-by: Alexander Meißner <AlexanderMeissner@gmx.net>
2021-12-11 02:38:03 +00:00
e71ea19e60 adds back ErasureMeta::first_coding_index field (#21623) (#21785)
https://github.com/solana-labs/solana/pull/16646
removed first_coding_index since the field is currently redundant and
always equal to fec_set_index.
However, with upcoming changes to erasure coding schema, this will no
longer be the same as fec_set_index and so requires a separate field to
represent.

(cherry picked from commit 49ba09b333)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-10 23:14:10 +00:00
ed0040d555 Update to Rust 1.57.0 (#21779)
(cherry picked from commit 15a9fa6f53)

Co-authored-by: Steven Czabaniuk <steven@solana.com>
2021-12-10 22:23:48 +00:00
da9e6826ac Move type alias and use it more broadly (#21763) (#21777)
(cherry picked from commit 350845c513)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-12-10 21:44:41 +00:00
68fc72a7f4 Add more reporting for invalid stake cache members and prune them (#21654) (#21741)
* Add more reporting for invalid stake cache members

* feedback

(cherry picked from commit 6fc329180b)

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-10 18:30:16 +00:00
2a6bb2b954 Migrate from address maps to address lookup tables (#21634) (#21773)
* Migrate from address maps to address lookup tables

* update sanitize error

* cargo fmt

* update abi

(cherry picked from commit 6c108c8fc3)

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-10 18:10:37 +00:00
ef51778c78 Nits in message-processor (#21755) (#21762)
* Fixup typo

* Simplify types slightly

(cherry picked from commit c1386d66e6)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-12-10 11:24:20 -05:00
abecf292a3 Expand docs for Pubkey::create_program_address (#21750) (#21759)
* Expand docs for Pubkey::create_program_address

* Update sdk/program/src/pubkey.rs

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
(cherry picked from commit 6919c4863b)

Co-authored-by: Brian Anderson <andersrb@gmail.com>
2021-12-10 11:23:54 -05:00
a31660815f rebase 2021-12-09 18:41:47 -08:00
539ad4bea6 Remove libcurl to prevent wasm-pack segfault in libssl
(cherry picked from commit f32216588d)
2021-12-09 18:41:47 -08:00
85f601993f Cargo.lock
(cherry picked from commit f4babb7566)

# Conflicts:
#	Cargo.lock
#	programs/bpf/Cargo.lock
2021-12-09 18:41:47 -08:00
b0754cc575 Add initial wasm bindings for Instruction, SystemProgram and Transaction
(cherry picked from commit a35df1cb02)
2021-12-09 18:41:47 -08:00
effd0b2547 Add wasm bindings for Hash
(cherry picked from commit 03a956e8d9)
2021-12-09 18:41:47 -08:00
8836069719 Add wasm bindings for Pubkey and Keypair
(cherry picked from commit 488dc37fec)
2021-12-09 18:41:47 -08:00
2698a5c705 AcctIdx: env var to enable testing of disk buckets (#21494) (#21723)
(cherry picked from commit 54862eba0d)

Co-authored-by: Jeff Washington (jwash) <wash678@gmail.com>
2021-12-09 23:39:06 +00:00
dd157fd47f Fixed minor issues with the cluster overview docs which had confused some (#21744) (#21745)
new users.

(cherry picked from commit 6d18b6bab5)

Co-authored-by: bji <bryan@ischo.com>
2021-12-09 20:41:21 +00:00
8cacf82cb8 adds more sanity checks to shreds (#21675) (#21734)
(cherry picked from commit 8063273d09)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-09 18:44:43 +00:00
8ee5fbc5c0 simulateTransaction now returns the correct error code if accounts are provided as input (#21716)
(cherry picked from commit 824994db69)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-12-09 01:12:42 +00:00
f2a6b94e5c SDK: Add stdlib.h include to pull in abort() (#21700) (#21705)
(cherry picked from commit 923720f529)

Co-authored-by: Jon Cinque <jon.cinque@gmail.com>
2021-12-08 17:31:11 +00:00
ef970bb14a - Implicitly fixes invoke_context.return_data not being reset between instructions in process_message. (#21671) (#21684)
- Lets InvokeContext::process_cross_program_instruction() handle the first invocation depth too.
- Marks InvokeContext::verify(), InvokeContext::verify_and_update() and InvokeContext::process_executable_chain() private.
- Renames InvokeContext::process_cross_program_instruction() to InvokeContext::process_instruction().
- Removes InvokeContext::new_mock_with_sysvars().

(cherry picked from commit 1df88837c8)

Co-authored-by: Alexander Meißner <AlexanderMeissner@gmx.net>
2021-12-08 10:48:49 +00:00
cabd851904 Avoid entropy sources when constructing a solana_program::message::Message.
The solana-program crate can be used in certain embedded environments (HSMs) where
the source of entropy, whether used for cryptographic purposes or not, is tightly
controlled. In these cases, using the default OS source of entrophy is not always
acceptable. Thus, using the default Rust stdlib entropy source for seeding its
default hasher, is prohibited. This means any use of HashMap/HashSet must be able
to be constructed and used with a custom hasher implementation.

This commit removes the use of Itertools::unique() to dedupe Instructions that are
being compiled into a new Message, which uses a default-configured HashMap
under-the-hood. Instead, we use a BTreeSet which does not invoke any entropy
source in order to seed a hash implementation.

(cherry picked from commit 4da435f2a0)
2021-12-07 22:36:21 -08:00
2d2ef59550 Ensure we have keys to activate these features (#21669) (#21674)
(cherry picked from commit 45e56c599d)

Co-authored-by: Sean Young <sean@mess.org>
2021-12-07 23:24:11 +00:00
b7b56d5016 Docs: Solflare web/app updates (#21540) (#21668)
* Update Solflare description

* Add Solflare to mobile wallets

* Sort mobile wallets alphabetically

* Sort web wollets alphabetically

* Update docs/src/wallet-guide/apps.md

* Update docs/src/wallet-guide/apps.md

* Update docs/src/wallet-guide/web-wallets.md

* Update docs/src/wallet-guide/web-wallets.md

* Update docs/src/wallet-guide/apps.md

Co-authored-by: Justin Starry <justin.m.starry@gmail.com>
(cherry picked from commit a2477c1f32)

Co-authored-by: Boris Vujicic <turshija@gmail.com>
2021-12-07 16:44:28 +00:00
18e3a635b4 docs: Fix SOL staked formula (#21615) (#21667)
Fix the formula on the proposal page: https://docs.solana.com/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards

(cherry picked from commit b57097ef18)

Co-authored-by: Melroy van den Berg <melroy@melroy.org>
2021-12-07 16:01:12 +00:00
2b4347d502 Add option to reclaim accounts-cluster-bench accounts/lamports (backport #21656) (#21658)
* Add option to reclaim accounts-cluster-bench accounts/lamports (#21656)

* Add option to reclaim accounts-cluster-bench accounts/lamports

* lint

(cherry picked from commit 205fd95722)

# Conflicts:
#	accounts-cluster-bench/Cargo.toml

* Fix conflict

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2021-12-07 09:18:48 +00:00
87accd16d8 Fixup flaky tests (#21617) (#21647)
* Fixup flaky tests

* Fixup listeners

(cherry picked from commit f493a88258)

Co-authored-by: carllin <carl@solana.com>
2021-12-07 03:54:14 +00:00
0e969015fc Add offline and fee-payer utilities to CLI vote module (#21579) (#21649)
* create-vote-account: add offline, nonce, fee_payer capabilities

* vote-authorize: add offline, nonce, fee-payer

* vote-update-things: add offline, nonce, fee-payer

* withdraw-vote: add offline, nonce, fee-payer

* close-vote-acct: add fee-payer

* Allow WithdrawVoteAccount to empty account, since offline operations cannot perform account state queries as in CloseVoteAccount

* Fix lint

* Update offline-signing docs

* Add some parse unit tests

* Add offline integration test

(cherry picked from commit 873fe81bc0)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-12-07 01:51:02 +00:00
46935c022e Ensure that StakeDelegations and StakeHistory serde (#21640) (#21653)
Add tests to StakeDelegations and StakeHistory to ensure that the outer
types serialize and deserialize correctly to/from the inner types.

(cherry picked from commit da4015a959)

Co-authored-by: Brooks Prumo <brooks@solana.com>
2021-12-07 01:44:49 +00:00
8a7106bc08 Remove activated feature for filtering invalid stakes from rewards (#21641) (#21651)
(cherry picked from commit a1adcb23b6)

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-07 00:58:31 +00:00
89d2f34a03 Reject vote withdraws that create non-rent-exempt accounts (backport #21639) (#21645)
* Reject vote withdraws that create non-rent-exempt accounts (#21639)

* Reject vote withdraws that create non-rent-exempt accounts

* fix mocked instruction test

(cherry picked from commit e123883b26)

# Conflicts:
#	sdk/src/feature_set.rs

* resolve conflicts

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-07 00:42:01 +00:00
b3fa1e4550 Move transaction error code into new module (#21635) (#21638)
(cherry picked from commit 3dab1e711d)

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-06 20:11:20 +00:00
58c755e1d4 Rework docs for Pubkey::find_program_address and friends (#21528) (#21637)
* Rework docs for Pubkey::find_program_address and friends

* Remove circular dependency

* Minor tweaks

* Apply suggestions from code review

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

* Sort solana-program dev-dependencies

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
(cherry picked from commit d1c101cde2)

Co-authored-by: Brian Anderson <andersrb@gmail.com>
2021-12-06 19:04:35 +00:00
60085305b4 Fix spelling of 'Borsh' (#21624)
(cherry picked from commit f3c2803af9)

Co-authored-by: Brian Anderson <andersrb@gmail.com>
2021-12-06 05:31:28 +00:00
b4c8e095bd adds back position field to coding-shred-header (#21600) (#21620)
https://github.com/solana-labs/solana/pull/17004
removed position field from coding-shred-header because as it stands the
field is redundant and unused.
However, with the upcoming changes to erasure coding schema this field
will no longer be redundant and needs to be populated.

(cherry picked from commit cd17f63d81)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-05 16:40:22 +00:00
3e28ffa884 Bump RpcClient node versions (#21612) (#21613)
* Bump blockhash/fee api check versions

* Bump snapshot api check version

(cherry picked from commit 3e5a5a834f)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-12-05 01:08:22 +00:00
573 changed files with 37931 additions and 18885 deletions

View File

@ -12,7 +12,8 @@ export PS4="++"
# Restore target/ from the previous CI build on this machine
#
eval "$(ci/channel-info.sh)"
export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"
eval "$(ci/sbf-tools-info.sh)"
export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"-"$SBF_TOOLS_VERSION"
(
set -x
MAX_CACHE_SIZE=18 # gigabytes

1
.gitignore vendored
View File

@ -4,6 +4,7 @@
/solana-metrics/
/solana-metrics.tar.bz2
/target/
/test-ledger/
**/*.rs.bk
.cargo

View File

@ -93,6 +93,7 @@ pull_request_rules:
- author=mergify[bot]
- head~=^mergify/bp/
- "#status-failure=0"
- "-merged"
actions:
label:
add:
@ -113,3 +114,10 @@ pull_request_rules:
ignore_conflicts: true
branches:
- v1.9
commands_restrictions:
# The author of copied PRs is the Mergify user.
# Restrict `copy` access to Core Contributors
copy:
conditions:
- author=@core-contributors

1375
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,17 +1,16 @@
[workspace]
members = [
"accountsdb-plugin-interface",
"accountsdb-plugin-manager",
"accountsdb-plugin-postgres",
"accounts-cluster-bench",
"bench-streamer",
"bench-tps",
"account-decoder",
"accounts-bench",
"accounts-cluster-bench",
"banking-bench",
"banks-client",
"banks-interface",
"banks-server",
"bench-streamer",
"bench-tps",
"bucket_map",
"bloom",
"clap-utils",
"cli-config",
"cli-output",
@ -26,6 +25,8 @@ members = [
"validator",
"genesis",
"genesis-utils",
"geyser-plugin-interface",
"geyser-plugin-manager",
"gossip",
"install",
"keygen",
@ -46,7 +47,11 @@ members = [
"poh",
"poh-bench",
"program-test",
"programs/address-lookup-table",
"programs/address-lookup-table-tests",
"programs/ed25519-tests",
"programs/bpf_loader",
"programs/bpf_loader/gen-syscall-list",
"programs/compute-budget",
"programs/config",
"programs/stake",
@ -65,7 +70,6 @@ members = [
"tokens",
"transaction-dos",
"transaction-status",
"account-decoder",
"upload-perf",
"net-utils",
"version",

View File

@ -1,6 +1,6 @@
[package]
name = "solana-account-decoder"
version = "1.9.0"
version = "1.9.17"
description = "Solana account decoder"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@ -19,9 +19,9 @@ lazy_static = "1.4.0"
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.72"
solana-config-program = { path = "../programs/config", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.0" }
solana-config-program = { path = "../programs/config", version = "=1.9.17" }
solana-sdk = { path = "../sdk", version = "=1.9.17" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.17" }
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
thiserror = "1.0"
zstd = "0.9.0"

View File

@ -5,7 +5,7 @@ use {
parse_nonce::parse_nonce,
parse_stake::parse_stake,
parse_sysvar::parse_sysvar,
parse_token::{parse_token, spl_token_id},
parse_token::{parse_token, spl_token_ids},
parse_vote::parse_vote,
},
inflector::Inflector,
@ -21,7 +21,6 @@ lazy_static! {
static ref STAKE_PROGRAM_ID: Pubkey = stake::program::id();
static ref SYSTEM_PROGRAM_ID: Pubkey = system_program::id();
static ref SYSVAR_PROGRAM_ID: Pubkey = sysvar::id();
static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id();
static ref VOTE_PROGRAM_ID: Pubkey = solana_vote_program::id();
pub static ref PARSABLE_PROGRAM_IDS: HashMap<Pubkey, ParsableAccount> = {
let mut m = HashMap::new();
@ -31,7 +30,9 @@ lazy_static! {
);
m.insert(*CONFIG_PROGRAM_ID, ParsableAccount::Config);
m.insert(*SYSTEM_PROGRAM_ID, ParsableAccount::Nonce);
m.insert(*TOKEN_PROGRAM_ID, ParsableAccount::SplToken);
for spl_token_id in spl_token_ids() {
m.insert(spl_token_id, ParsableAccount::SplToken);
}
m.insert(*STAKE_PROGRAM_ID, ParsableAccount::Stake);
m.insert(*SYSVAR_PROGRAM_ID, ParsableAccount::Sysvar);
m.insert(*VOTE_PROGRAM_ID, ParsableAccount::Vote);

View File

@ -40,7 +40,7 @@ pub fn parse_sysvar(data: &[u8], pubkey: &Pubkey) -> Result<SysvarAccountType, P
.iter()
.map(|entry| UiRecentBlockhashesEntry {
blockhash: entry.blockhash.to_string(),
fee_calculator: entry.fee_calculator.clone().into(),
fee_calculator: entry.fee_calculator.into(),
})
.collect();
SysvarAccountType::RecentBlockhashes(recent_blockhashes)

View File

@ -15,16 +15,31 @@ use {
// A helper function to convert spl_token::id() as spl_sdk::pubkey::Pubkey to
// solana_sdk::pubkey::Pubkey
pub fn spl_token_id() -> Pubkey {
fn spl_token_id() -> Pubkey {
Pubkey::new_from_array(spl_token::id().to_bytes())
}
// Returns all known SPL Token program ids
pub fn spl_token_ids() -> Vec<Pubkey> {
vec![spl_token_id()]
}
// Check if the provided program id as a known SPL Token program id
pub fn is_known_spl_token_id(program_id: &Pubkey) -> bool {
*program_id == spl_token_id()
}
// A helper function to convert spl_token::native_mint::id() as spl_sdk::pubkey::Pubkey to
// solana_sdk::pubkey::Pubkey
pub fn spl_token_native_mint() -> Pubkey {
Pubkey::new_from_array(spl_token::native_mint::id().to_bytes())
}
// The program id of the `spl_token_native_mint` account
pub fn spl_token_native_mint_program_id() -> Pubkey {
spl_token_id()
}
// A helper function to convert a solana_sdk::pubkey::Pubkey to spl_sdk::pubkey::Pubkey
pub fn spl_token_pubkey(pubkey: &Pubkey) -> SplTokenPubkey {
SplTokenPubkey::new_from_array(pubkey.to_bytes())

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accounts-bench"
version = "1.9.0"
version = "1.9.17"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -11,11 +11,11 @@ publish = false
[dependencies]
log = "0.4.14"
rayon = "1.5.1"
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.17" }
solana-runtime = { path = "../runtime", version = "=1.9.17" }
solana-measure = { path = "../measure", version = "=1.9.17" }
solana-sdk = { path = "../sdk", version = "=1.9.17" }
solana-version = { path = "../version", version = "=1.9.17" }
clap = "2.33.1"
[package.metadata.docs.rs]

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accounts-cluster-bench"
version = "1.9.0"
version = "1.9.17"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -13,24 +13,25 @@ clap = "2.33.1"
log = "0.4.14"
rand = "0.7.0"
rayon = "1.5.1"
solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" }
solana-client = { path = "../client", version = "=1.9.0" }
solana-core = { path = "../core", version = "=1.9.0" }
solana-faucet = { path = "../faucet", version = "=1.9.0" }
solana-gossip = { path = "../gossip", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-net-utils = { path = "../net-utils", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-streamer = { path = "../streamer", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-account-decoder = { path = "../account-decoder", version = "=1.9.17" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.17" }
solana-client = { path = "../client", version = "=1.9.17" }
solana-core = { path = "../core", version = "=1.9.17" }
solana-faucet = { path = "../faucet", version = "=1.9.17" }
solana-gossip = { path = "../gossip", version = "=1.9.17" }
solana-logger = { path = "../logger", version = "=1.9.17" }
solana-measure = { path = "../measure", version = "=1.9.17" }
solana-net-utils = { path = "../net-utils", version = "=1.9.17" }
solana-runtime = { path = "../runtime", version = "=1.9.17" }
solana-sdk = { path = "../sdk", version = "=1.9.17" }
solana-streamer = { path = "../streamer", version = "=1.9.17" }
solana-test-validator = { path = "../test-validator", version = "=1.9.17" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.17" }
solana-version = { path = "../version", version = "=1.9.17" }
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
[dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "=1.9.0" }
solana-local-cluster = { path = "../local-cluster", version = "=1.9.17" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -23,6 +23,7 @@ use {
solana_streamer::socket::SocketAddrSpace,
solana_transaction_status::parse_token::spl_token_instruction,
std::{
cmp::min,
net::SocketAddr,
process::exit,
sync::{
@ -156,24 +157,30 @@ fn make_create_message(
fn make_close_message(
keypair: &Keypair,
base_keypair: &Keypair,
max_closed_seed: Arc<AtomicU64>,
max_created: Arc<AtomicU64>,
max_closed: Arc<AtomicU64>,
num_instructions: usize,
balance: u64,
spl_token: bool,
) -> Message {
let instructions: Vec<_> = (0..num_instructions)
.into_iter()
.map(|_| {
.filter_map(|_| {
let program_id = if spl_token {
inline_spl_token::id()
} else {
system_program::id()
};
let seed = max_closed_seed.fetch_add(1, Ordering::Relaxed).to_string();
let max_created_seed = max_created.load(Ordering::Relaxed);
let max_closed_seed = max_closed.load(Ordering::Relaxed);
if max_closed_seed >= max_created_seed {
return None;
}
let seed = max_closed.fetch_add(1, Ordering::Relaxed).to_string();
let address =
Pubkey::create_with_seed(&base_keypair.pubkey(), &seed, &program_id).unwrap();
if spl_token {
spl_token_instruction(
Some(spl_token_instruction(
spl_token::instruction::close_account(
&spl_token::id(),
&spl_token_pubkey(&address),
@ -182,16 +189,16 @@ fn make_close_message(
&[],
)
.unwrap(),
)
))
} else {
system_instruction::transfer_with_seed(
Some(system_instruction::transfer_with_seed(
&address,
&base_keypair.pubkey(),
seed,
&program_id,
&keypair.pubkey(),
balance,
)
))
}
})
.collect();
@ -211,6 +218,7 @@ fn run_accounts_bench(
maybe_lamports: Option<u64>,
num_instructions: usize,
mint: Option<Pubkey>,
reclaim_accounts: bool,
) {
assert!(num_instructions > 0);
let client =
@ -350,6 +358,7 @@ fn run_accounts_bench(
let message = make_close_message(
payer_keypairs[0],
&base_keypair,
seed_tracker.max_created.clone(),
seed_tracker.max_closed.clone(),
1,
min_balance,
@ -372,7 +381,7 @@ fn run_accounts_bench(
}
count += 1;
if last_log.elapsed().as_millis() > 3000 {
if last_log.elapsed().as_millis() > 3000 || count >= iterations {
info!(
"total_accounts_created: {} total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance(s): {:?}",
total_accounts_created, total_accounts_closed, tx_sent_count, count, balances
@ -387,6 +396,83 @@ fn run_accounts_bench(
}
}
executor.close();
if reclaim_accounts {
let executor = TransactionExecutor::new(entrypoint_addr);
loop {
let max_closed_seed = seed_tracker.max_closed.load(Ordering::Relaxed);
let max_created_seed = seed_tracker.max_created.load(Ordering::Relaxed);
if latest_blockhash.elapsed().as_millis() > 10_000 {
blockhash = client.get_latest_blockhash().expect("blockhash");
latest_blockhash = Instant::now();
}
message.recent_blockhash = blockhash;
let fee = client
.get_fee_for_message(&message)
.expect("get_fee_for_message");
let sigs_len = executor.num_outstanding();
if sigs_len < batch_size && max_closed_seed < max_created_seed {
let num_to_close = min(
batch_size - sigs_len,
(max_created_seed - max_closed_seed) as usize,
);
if num_to_close >= payer_keypairs.len() {
info!("closing {} accounts", num_to_close);
let chunk_size = num_to_close / payer_keypairs.len();
info!("{:?} chunk_size", chunk_size);
if chunk_size > 0 {
for (i, keypair) in payer_keypairs.iter().enumerate() {
let txs: Vec<_> = (0..chunk_size)
.into_par_iter()
.filter_map(|_| {
let message = make_close_message(
keypair,
&base_keypair,
seed_tracker.max_created.clone(),
seed_tracker.max_closed.clone(),
num_instructions,
min_balance,
mint.is_some(),
);
if message.instructions.is_empty() {
return None;
}
let signers: Vec<&Keypair> = vec![keypair, &base_keypair];
Some(Transaction::new(&signers, message, blockhash))
})
.collect();
balances[i] = balances[i].saturating_sub(fee * txs.len() as u64);
info!("close txs: {}", txs.len());
let new_ids = executor.push_transactions(txs);
info!("close ids: {}", new_ids.len());
tx_sent_count += new_ids.len();
total_accounts_closed += (num_instructions * new_ids.len()) as u64;
}
}
}
} else {
let _ = executor.drain_cleared();
}
count += 1;
if last_log.elapsed().as_millis() > 3000 || max_closed_seed >= max_created_seed {
info!(
"total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance(s): {:?}",
total_accounts_closed, tx_sent_count, count, balances
);
last_log = Instant::now();
}
if max_closed_seed >= max_created_seed {
break;
}
if executor.num_outstanding() >= batch_size {
sleep(Duration::from_millis(500));
}
}
executor.close();
}
}
fn main() {
@ -462,7 +548,7 @@ fn main() {
.long("iterations")
.takes_value(true)
.value_name("NUM")
.help("Number of iterations to make"),
.help("Number of iterations to make. 0 = unlimited iterations."),
)
.arg(
Arg::with_name("check_gossip")
@ -475,6 +561,12 @@ fn main() {
.takes_value(true)
.help("Mint address to initialize account"),
)
.arg(
Arg::with_name("reclaim_accounts")
.long("reclaim-accounts")
.takes_value(false)
.help("Reclaim accounts after session ends; incompatible with --iterations 0"),
)
.get_matches();
let skip_gossip = !matches.is_present("check_gossip");
@ -556,6 +648,7 @@ fn main() {
lamports,
num_instructions,
mint,
matches.is_present("reclaim_accounts"),
);
}
@ -564,18 +657,24 @@ pub mod test {
use {
super::*,
solana_core::validator::ValidatorConfig,
solana_faucet::faucet::run_local_faucet,
solana_local_cluster::{
local_cluster::{ClusterConfig, LocalCluster},
validator_configs::make_identical_validator_configs,
},
solana_measure::measure::Measure,
solana_sdk::poh_config::PohConfig,
solana_sdk::{native_token::sol_to_lamports, poh_config::PohConfig},
solana_test_validator::TestValidator,
spl_token::{
solana_program::program_pack::Pack,
state::{Account, Mint},
},
};
#[test]
fn test_accounts_cluster_bench() {
solana_logger::setup();
let validator_config = ValidatorConfig::default();
let validator_config = ValidatorConfig::default_for_test();
let num_nodes = 1;
let mut config = ClusterConfig {
cluster_lamports: 10_000_000,
@ -605,6 +704,108 @@ pub mod test {
maybe_lamports,
num_instructions,
None,
false,
);
start.stop();
info!("{}", start);
}
#[test]
fn test_create_then_reclaim_spl_token_accounts() {
solana_logger::setup();
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_custom_fees(
mint_pubkey,
1,
Some(faucet_addr),
SocketAddrSpace::Unspecified,
);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
// Created funder
let funder = Keypair::new();
let latest_blockhash = rpc_client.get_latest_blockhash().unwrap();
let signature = rpc_client
.request_airdrop_with_blockhash(
&funder.pubkey(),
sol_to_lamports(1.0),
&latest_blockhash,
)
.unwrap();
rpc_client
.confirm_transaction_with_spinner(
&signature,
&latest_blockhash,
CommitmentConfig::confirmed(),
)
.unwrap();
// Create Mint
let spl_mint_keypair = Keypair::new();
let spl_mint_len = Mint::get_packed_len();
let spl_mint_rent = rpc_client
.get_minimum_balance_for_rent_exemption(spl_mint_len)
.unwrap();
let transaction = Transaction::new_signed_with_payer(
&[
system_instruction::create_account(
&funder.pubkey(),
&spl_mint_keypair.pubkey(),
spl_mint_rent,
spl_mint_len as u64,
&inline_spl_token::id(),
),
spl_token_instruction(
spl_token::instruction::initialize_mint(
&spl_token::id(),
&spl_token_pubkey(&spl_mint_keypair.pubkey()),
&spl_token_pubkey(&spl_mint_keypair.pubkey()),
None,
2,
)
.unwrap(),
),
],
Some(&funder.pubkey()),
&[&funder, &spl_mint_keypair],
latest_blockhash,
);
let _sig = rpc_client
.send_and_confirm_transaction(&transaction)
.unwrap();
let account_len = Account::get_packed_len();
let minimum_balance = rpc_client
.get_minimum_balance_for_rent_exemption(account_len)
.unwrap();
let iterations = 5;
let batch_size = 100;
let close_nth_batch = 0;
let num_instructions = 4;
let mut start = Measure::start("total accounts run");
let keypair0 = Keypair::new();
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
run_accounts_bench(
test_validator
.rpc_url()
.replace("http://", "")
.parse()
.unwrap(),
faucet_addr,
&[&keypair0, &keypair1, &keypair2],
iterations,
Some(account_len as u64),
batch_size,
close_nth_batch,
Some(minimum_balance),
num_instructions,
Some(spl_mint_keypair.pubkey()),
true,
);
start.stop();
info!("{}", start);

View File

@ -1,20 +0,0 @@
<p align="center">
<a href="https://solana.com">
<img alt="Solana" src="https://i.imgur.com/IKyzQ6T.png" width="250" />
</a>
</p>
# Solana AccountsDb Plugin Interface
This crate enables an AccountsDb plugin to be plugged into the Solana Validator runtime to take actions
at the time of each account update; for example, saving the account state to an external database. The plugin must implement the `AccountsDbPlugin` trait. Please see the detail of the `accountsdb_plugin_interface.rs` for the interface definition.
The plugin should produce a `cdylib` dynamic library, which must expose a `C` function `_create_plugin()` that
instantiates the implementation of the interface.
The `solana-accountsdb-plugin-postgres` crate provides an example of how to create a plugin which saves the accounts data into an
external PostgreSQL databases.
More information about Solana is available in the [Solana documentation](https://docs.solana.com/).
Still have questions? Ask us on [Discord](https://discordapp.com/invite/pquxPsq)

View File

@ -1 +0,0 @@
pub mod accountsdb_plugin_interface;

View File

@ -1,31 +0,0 @@
[package]
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accountsdb-plugin-manager"
description = "The Solana AccountsDb plugin manager."
version = "1.9.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-validator"
[dependencies]
bs58 = "0.4.0"
crossbeam-channel = "0.5"
libloading = "0.7.2"
log = "0.4.11"
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.72"
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-metrics = { path = "../metrics", version = "=1.9.0" }
solana-rpc = { path = "../rpc", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
thiserror = "1.0.30"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -1,6 +0,0 @@
pub mod accounts_update_notifier;
pub mod accountsdb_plugin_manager;
pub mod accountsdb_plugin_service;
pub mod slot_status_notifier;
pub mod slot_status_observer;
pub mod transaction_notifier;

View File

@ -1,39 +0,0 @@
[package]
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accountsdb-plugin-postgres"
description = "The Solana AccountsDb plugin for PostgreSQL database."
version = "1.9.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-validator"
[lib]
crate-type = ["cdylib", "rlib"]
[dependencies]
bs58 = "0.4.0"
chrono = { version = "0.4.11", features = ["serde"] }
crossbeam-channel = "0.5"
log = "0.4.14"
postgres = { version = "0.19.2", features = ["with-chrono-0_4"] }
postgres-types = { version = "0.2.2", features = ["derive"] }
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.72"
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-metrics = { path = "../metrics", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
thiserror = "1.0.30"
tokio-postgres = "0.7.4"
[dev-dependencies]
solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -1,5 +0,0 @@
This is an example implementing the AccountsDb plugin for PostgreSQL database.
Please see the `src/accountsdb_plugin_postgres.rs` for the format of the plugin's configuration file.
To create the schema objects for the database, please use `scripts/create_schema.sql`.
`scripts/drop_schema.sql` can be used to tear down the schema objects.

View File

@ -1,183 +0,0 @@
/**
* This plugin implementation for PostgreSQL requires the following tables
*/
-- The table storing accounts
CREATE TABLE account (
pubkey BYTEA PRIMARY KEY,
owner BYTEA,
lamports BIGINT NOT NULL,
slot BIGINT NOT NULL,
executable BOOL NOT NULL,
rent_epoch BIGINT NOT NULL,
data BYTEA,
write_version BIGINT NOT NULL,
updated_on TIMESTAMP NOT NULL
);
-- The table storing slot information
CREATE TABLE slot (
slot BIGINT PRIMARY KEY,
parent BIGINT,
status VARCHAR(16) NOT NULL,
updated_on TIMESTAMP NOT NULL
);
-- Types for Transactions
Create TYPE "TransactionErrorCode" AS ENUM (
'AccountInUse',
'AccountLoadedTwice',
'AccountNotFound',
'ProgramAccountNotFound',
'InsufficientFundsForFee',
'InvalidAccountForFee',
'AlreadyProcessed',
'BlockhashNotFound',
'InstructionError',
'CallChainTooDeep',
'MissingSignatureForFee',
'InvalidAccountIndex',
'SignatureFailure',
'InvalidProgramForExecution',
'SanitizeFailure',
'ClusterMaintenance',
'AccountBorrowOutstanding',
'WouldExceedMaxAccountCostLimit',
'WouldExceedMaxBlockCostLimit',
'UnsupportedVersion',
'InvalidWritableAccount'
);
CREATE TYPE "TransactionError" AS (
error_code "TransactionErrorCode",
error_detail VARCHAR(256)
);
CREATE TYPE "CompiledInstruction" AS (
program_id_index SMALLINT,
accounts SMALLINT[],
data BYTEA
);
CREATE TYPE "InnerInstructions" AS (
index SMALLINT,
instructions "CompiledInstruction"[]
);
CREATE TYPE "TransactionTokenBalance" AS (
account_index SMALLINT,
mint VARCHAR(44),
ui_token_amount DOUBLE PRECISION,
owner VARCHAR(44)
);
Create TYPE "RewardType" AS ENUM (
'Fee',
'Rent',
'Staking',
'Voting'
);
CREATE TYPE "Reward" AS (
pubkey VARCHAR(44),
lamports BIGINT,
post_balance BIGINT,
reward_type "RewardType",
commission SMALLINT
);
CREATE TYPE "TransactionStatusMeta" AS (
error "TransactionError",
fee BIGINT,
pre_balances BIGINT[],
post_balances BIGINT[],
inner_instructions "InnerInstructions"[],
log_messages TEXT[],
pre_token_balances "TransactionTokenBalance"[],
post_token_balances "TransactionTokenBalance"[],
rewards "Reward"[]
);
CREATE TYPE "TransactionMessageHeader" AS (
num_required_signatures SMALLINT,
num_readonly_signed_accounts SMALLINT,
num_readonly_unsigned_accounts SMALLINT
);
CREATE TYPE "TransactionMessage" AS (
header "TransactionMessageHeader",
account_keys BYTEA[],
recent_blockhash BYTEA,
instructions "CompiledInstruction"[]
);
CREATE TYPE "AddressMapIndexes" AS (
writable SMALLINT[],
readonly SMALLINT[]
);
CREATE TYPE "TransactionMessageV0" AS (
header "TransactionMessageHeader",
account_keys BYTEA[],
recent_blockhash BYTEA,
instructions "CompiledInstruction"[],
address_map_indexes "AddressMapIndexes"[]
);
CREATE TYPE "MappedAddresses" AS (
writable BYTEA[],
readonly BYTEA[]
);
CREATE TYPE "MappedMessage" AS (
message "TransactionMessageV0",
mapped_addresses "MappedAddresses"
);
-- The table storing transactions
CREATE TABLE transaction (
slot BIGINT NOT NULL,
signature BYTEA NOT NULL,
is_vote BOOL NOT NULL,
message_type SMALLINT, -- 0: legacy, 1: v0 message
legacy_message "TransactionMessage",
v0_mapped_message "MappedMessage",
signatures BYTEA[],
message_hash BYTEA,
meta "TransactionStatusMeta",
updated_on TIMESTAMP NOT NULL,
CONSTRAINT transaction_pk PRIMARY KEY (slot, signature)
);
/**
* The following is for keeping historical data for accounts and is not required for plugin to work.
*/
-- The table storing historical data for accounts
CREATE TABLE account_audit (
pubkey BYTEA,
owner BYTEA,
lamports BIGINT NOT NULL,
slot BIGINT NOT NULL,
executable BOOL NOT NULL,
rent_epoch BIGINT NOT NULL,
data BYTEA,
write_version BIGINT NOT NULL,
updated_on TIMESTAMP NOT NULL
);
CREATE INDEX account_audit_account_key ON account_audit (pubkey, write_version);
CREATE FUNCTION audit_account_update() RETURNS trigger AS $audit_account_update$
BEGIN
INSERT INTO account_audit (pubkey, owner, lamports, slot, executable, rent_epoch, data, write_version, updated_on)
VALUES (OLD.pubkey, OLD.owner, OLD.lamports, OLD.slot,
OLD.executable, OLD.rent_epoch, OLD.data, OLD.write_version, OLD.updated_on);
RETURN NEW;
END;
$audit_account_update$ LANGUAGE plpgsql;
CREATE TRIGGER account_update_trigger AFTER UPDATE OR DELETE ON account
FOR EACH ROW EXECUTE PROCEDURE audit_account_update();

View File

@ -1,25 +0,0 @@
/**
* Script for cleaning up the schema for PostgreSQL used for the AccountsDb plugin.
*/
DROP TRIGGER account_update_trigger ON account;
DROP FUNCTION audit_account_update;
DROP TABLE account_audit;
DROP TABLE account;
DROP TABLE slot;
DROP TABLE transaction;
DROP TYPE "TransactionError" CASCADE;
DROP TYPE "TransactionErrorCode" CASCADE;
DROP TYPE "MappedMessage" CASCADE;
DROP TYPE "MappedAddresses" CASCADE;
DROP TYPE "TransactionMessageV0" CASCADE;
DROP TYPE "AddressMapIndexes" CASCADE;
DROP TYPE "TransactionMessage" CASCADE;
DROP TYPE "TransactionMessageHeader" CASCADE;
DROP TYPE "TransactionStatusMeta" CASCADE;
DROP TYPE "RewardType" CASCADE;
DROP TYPE "Reward" CASCADE;
DROP TYPE "TransactionTokenBalance" CASCADE;
DROP TYPE "InnerInstructions" CASCADE;
DROP TYPE "CompiledInstruction" CASCADE;

View File

@ -1,802 +0,0 @@
# This a reference configuration file for the PostgreSQL database version 14.
# -----------------------------
# PostgreSQL configuration file
# -----------------------------
#
# This file consists of lines of the form:
#
# name = value
#
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
# "#" anywhere on a line. The complete list of parameter names and allowed
# values can be found in the PostgreSQL documentation.
#
# The commented-out settings shown in this file represent the default values.
# Re-commenting a setting is NOT sufficient to revert it to the default value;
# you need to reload the server.
#
# This file is read on server startup and when the server receives a SIGHUP
# signal. If you edit the file on a running system, you have to SIGHUP the
# server for the changes to take effect, run "pg_ctl reload", or execute
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
# require a server shutdown and restart to take effect.
#
# Any parameter can also be given as a command-line option to the server, e.g.,
# "postgres -c log_connections=on". Some parameters can be changed at run time
# with the "SET" SQL command.
#
# Memory units: B = bytes Time units: us = microseconds
# kB = kilobytes ms = milliseconds
# MB = megabytes s = seconds
# GB = gigabytes min = minutes
# TB = terabytes h = hours
# d = days
#------------------------------------------------------------------------------
# FILE LOCATIONS
#------------------------------------------------------------------------------
# The default values of these variables are driven from the -D command-line
# option or PGDATA environment variable, represented here as ConfigDir.
data_directory = '/var/lib/postgresql/14/main' # use data in another directory
# (change requires restart)
hba_file = '/etc/postgresql/14/main/pg_hba.conf' # host-based authentication file
# (change requires restart)
ident_file = '/etc/postgresql/14/main/pg_ident.conf' # ident configuration file
# (change requires restart)
# If external_pid_file is not explicitly set, no extra PID file is written.
external_pid_file = '/var/run/postgresql/14-main.pid' # write an extra PID file
# (change requires restart)
#------------------------------------------------------------------------------
# CONNECTIONS AND AUTHENTICATION
#------------------------------------------------------------------------------
# - Connection Settings -
#listen_addresses = 'localhost' # what IP address(es) to listen on;
# comma-separated list of addresses;
# defaults to 'localhost'; use '*' for all
# (change requires restart)
listen_addresses = '*'
port = 5433 # (change requires restart)
max_connections = 200 # (change requires restart)
#superuser_reserved_connections = 3 # (change requires restart)
unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories
# (change requires restart)
#unix_socket_group = '' # (change requires restart)
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
# (change requires restart)
#bonjour = off # advertise server via Bonjour
# (change requires restart)
#bonjour_name = '' # defaults to the computer name
# (change requires restart)
# - TCP settings -
# see "man tcp" for details
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
# 0 selects the system default
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
# 0 selects the system default
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
# 0 selects the system default
#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
# 0 selects the system default
#client_connection_check_interval = 0 # time between checks for client
# disconnection while running queries;
# 0 for never
# - Authentication -
#authentication_timeout = 1min # 1s-600s
#password_encryption = scram-sha-256 # scram-sha-256 or md5
#db_user_namespace = off
# GSSAPI using Kerberos
#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab'
#krb_caseins_users = off
# - SSL -
ssl = on
#ssl_ca_file = ''
ssl_cert_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem'
#ssl_crl_file = ''
#ssl_crl_dir = ''
ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key'
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
#ssl_prefer_server_ciphers = on
#ssl_ecdh_curve = 'prime256v1'
#ssl_min_protocol_version = 'TLSv1.2'
#ssl_max_protocol_version = ''
#ssl_dh_params_file = ''
#ssl_passphrase_command = ''
#ssl_passphrase_command_supports_reload = off
#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
#------------------------------------------------------------------------------
# - Memory -
shared_buffers = 1GB # min 128kB
# (change requires restart)
#huge_pages = try # on, off, or try
# (change requires restart)
#huge_page_size = 0 # zero for system default
# (change requires restart)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 0 # zero disables the feature
# (change requires restart)
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
# you actively intend to use prepared transactions.
#work_mem = 4MB # min 64kB
#hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem
#maintenance_work_mem = 64MB # min 1MB
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
#logical_decoding_work_mem = 64MB # min 64kB
#max_stack_depth = 2MB # min 100kB
#shared_memory_type = mmap # the default is the first option
# supported by the operating system:
# mmap
# sysv
# windows
# (change requires restart)
dynamic_shared_memory_type = posix # the default is the first option
# supported by the operating system:
# posix
# sysv
# windows
# mmap
# (change requires restart)
#min_dynamic_shared_memory = 0MB # (change requires restart)
# - Disk -
#temp_file_limit = -1 # limits per-process temp file space
# in kilobytes, or -1 for no limit
# - Kernel Resources -
#max_files_per_process = 1000 # min 64
# (change requires restart)
# - Cost-Based Vacuum Delay -
#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
#vacuum_cost_page_hit = 1 # 0-10000 credits
#vacuum_cost_page_miss = 2 # 0-10000 credits
#vacuum_cost_page_dirty = 20 # 0-10000 credits
#vacuum_cost_limit = 200 # 1-10000 credits
# - Background Writer -
#bgwriter_delay = 200ms # 10-10000ms between rounds
#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
# - Asynchronous Behavior -
#backend_flush_after = 0 # measured in pages, 0 disables
effective_io_concurrency = 1000 # 1-1000; 0 disables prefetching
#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching
#max_worker_processes = 8 # (change requires restart)
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers
#max_parallel_workers = 8 # maximum number of max_worker_processes that
# can be used in parallel operations
#parallel_leader_participation = on
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
# (change requires restart)
#------------------------------------------------------------------------------
# WRITE-AHEAD LOG
#------------------------------------------------------------------------------
# - Settings -
wal_level = minimal # minimal, replica, or logical
# (change requires restart)
fsync = off # flush data to disk for crash safety
# (turning this off can cause
# unrecoverable data corruption)
synchronous_commit = off # synchronization level;
# off, local, remote_write, remote_apply, or on
#wal_sync_method = fsync # the default is the first option
# supported by the operating system:
# open_datasync
# fdatasync (default on Linux and FreeBSD)
# fsync
# fsync_writethrough
# open_sync
full_page_writes = off # recover from partial page writes
#wal_log_hints = off # also do full page writes of non-critical updates
# (change requires restart)
#wal_compression = off # enable compression of full-page writes
#wal_init_zero = on # zero-fill new WAL files
#wal_recycle = on # recycle WAL files
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
# (change requires restart)
#wal_writer_delay = 200ms # 1-10000 milliseconds
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
#wal_skip_threshold = 2MB
#commit_delay = 0 # range 0-100000, in microseconds
#commit_siblings = 5 # range 1-1000
# - Checkpoints -
#checkpoint_timeout = 5min # range 30s-1d
#checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
#checkpoint_warning = 30s # 0 disables
max_wal_size = 1GB
min_wal_size = 80MB
# - Archiving -
#archive_mode = off # enables archiving; off, on, or always
# (change requires restart)
#archive_command = '' # command to use to archive a logfile segment
# placeholders: %p = path of file to archive
# %f = file name only
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
#archive_timeout = 0 # force a logfile segment switch after this
# number of seconds; 0 disables
# - Archive Recovery -
# These are only used in recovery mode.
#restore_command = '' # command to use to restore an archived logfile segment
# placeholders: %p = path of file to restore
# %f = file name only
# e.g. 'cp /mnt/server/archivedir/%f %p'
#archive_cleanup_command = '' # command to execute at every restartpoint
#recovery_end_command = '' # command to execute at completion of recovery
# - Recovery Target -
# Set these only when performing a targeted recovery.
#recovery_target = '' # 'immediate' to end recovery as soon as a
# consistent state is reached
# (change requires restart)
#recovery_target_name = '' # the named restore point to which recovery will proceed
# (change requires restart)
#recovery_target_time = '' # the time stamp up to which recovery will proceed
# (change requires restart)
#recovery_target_xid = '' # the transaction ID up to which recovery will proceed
# (change requires restart)
#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
# (change requires restart)
#recovery_target_inclusive = on # Specifies whether to stop:
# just after the specified recovery target (on)
# just before the recovery target (off)
# (change requires restart)
#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
# (change requires restart)
#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
# (change requires restart)
#------------------------------------------------------------------------------
# REPLICATION
#------------------------------------------------------------------------------
# - Sending Servers -
# Set these on the primary and on any standby that will send replication data.
max_wal_senders = 0 # max number of walsender processes
# (change requires restart)
#max_replication_slots = 10 # max number of replication slots
# (change requires restart)
#wal_keep_size = 0 # in megabytes; 0 disables
#max_slot_wal_keep_size = -1 # in megabytes; -1 disables
#wal_sender_timeout = 60s # in milliseconds; 0 disables
#track_commit_timestamp = off # collect timestamp of transaction commit
# (change requires restart)
# - Primary Server -
# These settings are ignored on a standby server.
#synchronous_standby_names = '' # standby servers that provide sync rep
# method to choose sync standbys, number of sync standbys,
# and comma-separated list of application_name
# from standby(s); '*' = all
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
# - Standby Servers -
# These settings are ignored on a primary server.
#primary_conninfo = '' # connection string to sending server
#primary_slot_name = '' # replication slot on sending server
#promote_trigger_file = '' # file name whose presence ends recovery
#hot_standby = on # "off" disallows queries during recovery
# (change requires restart)
#max_standby_archive_delay = 30s # max delay before canceling queries
# when reading WAL from archive;
# -1 allows indefinite delay
#max_standby_streaming_delay = 30s # max delay before canceling queries
# when reading streaming WAL;
# -1 allows indefinite delay
#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name
# is not set
#wal_receiver_status_interval = 10s # send replies at least this often
# 0 disables
#hot_standby_feedback = off # send info from standby to prevent
# query conflicts
#wal_receiver_timeout = 60s # time that receiver waits for
# communication from primary
# in milliseconds; 0 disables
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
# retrieve WAL after a failed attempt
#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery
# - Subscribers -
# These settings are ignored on a publisher.
#max_logical_replication_workers = 4 # taken from max_worker_processes
# (change requires restart)
#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
#------------------------------------------------------------------------------
# QUERY TUNING
#------------------------------------------------------------------------------
# - Planner Method Configuration -
#enable_async_append = on
#enable_bitmapscan = on
#enable_gathermerge = on
#enable_hashagg = on
#enable_hashjoin = on
#enable_incremental_sort = on
#enable_indexscan = on
#enable_indexonlyscan = on
#enable_material = on
#enable_memoize = on
#enable_mergejoin = on
#enable_nestloop = on
#enable_parallel_append = on
#enable_parallel_hash = on
#enable_partition_pruning = on
#enable_partitionwise_join = off
#enable_partitionwise_aggregate = off
#enable_seqscan = on
#enable_sort = on
#enable_tidscan = on
# - Planner Cost Constants -
#seq_page_cost = 1.0 # measured on an arbitrary scale
#random_page_cost = 4.0 # same scale as above
#cpu_tuple_cost = 0.01 # same scale as above
#cpu_index_tuple_cost = 0.005 # same scale as above
#cpu_operator_cost = 0.0025 # same scale as above
#parallel_setup_cost = 1000.0 # same scale as above
#parallel_tuple_cost = 0.1 # same scale as above
#min_parallel_table_scan_size = 8MB
#min_parallel_index_scan_size = 512kB
#effective_cache_size = 4GB
#jit_above_cost = 100000 # perform JIT compilation if available
# and query more expensive than this;
# -1 disables
#jit_inline_above_cost = 500000 # inline small functions if query is
# more expensive than this; -1 disables
#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
# query is more expensive than this;
# -1 disables
# - Genetic Query Optimizer -
#geqo = on
#geqo_threshold = 12
#geqo_effort = 5 # range 1-10
#geqo_pool_size = 0 # selects default based on effort
#geqo_generations = 0 # selects default based on effort
#geqo_selection_bias = 2.0 # range 1.5-2.0
#geqo_seed = 0.0 # range 0.0-1.0
# - Other Planner Options -
#default_statistics_target = 100 # range 1-10000
#constraint_exclusion = partition # on, off, or partition
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
#from_collapse_limit = 8
#jit = on # allow JIT compilation
#join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses
#plan_cache_mode = auto # auto, force_generic_plan or
# force_custom_plan
#------------------------------------------------------------------------------
# REPORTING AND LOGGING
#------------------------------------------------------------------------------
# - Where to Log -
#log_destination = 'stderr' # Valid values are combinations of
# stderr, csvlog, syslog, and eventlog,
# depending on platform. csvlog
# requires logging_collector to be on.
# This is used when logging to stderr:
#logging_collector = off # Enable capturing of stderr and csvlog
# into log files. Required to be on for
# csvlogs.
# (change requires restart)
# These are only used if logging_collector is on:
#log_directory = 'log' # directory where log files are written,
# can be absolute or relative to PGDATA
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
# can include strftime() escapes
#log_file_mode = 0600 # creation mode for log files,
# begin with 0 to use octal notation
#log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables.
#log_rotation_size = 10MB # Automatic rotation of logfiles will
# happen after that much log output.
# 0 disables.
#log_truncate_on_rotation = off # If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
# time-driven rotation, not on restarts
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
# These are relevant when logging to syslog:
#syslog_facility = 'LOCAL0'
#syslog_ident = 'postgres'
#syslog_sequence_numbers = on
#syslog_split_messages = on
# This is only relevant when logging to eventlog (Windows):
# (change requires restart)
#event_source = 'PostgreSQL'
# - When to Log -
#log_min_messages = warning # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic
#log_min_error_statement = error # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic (effectively off)
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
# and their durations, > 0 logs only
# statements running at least this number
# of milliseconds
#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements
# and their durations, > 0 logs only a sample of
# statements running at least this number
# of milliseconds;
# sample fraction is determined by log_statement_sample_rate
#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding
# log_min_duration_sample to be logged;
# 1.0 logs all such statements, 0.0 never logs
#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements
# are logged regardless of their duration; 1.0 logs all
# statements from all transactions, 0.0 never logs
# - What to Log -
#debug_print_parse = off
#debug_print_rewritten = off
#debug_print_plan = off
#debug_pretty_print = on
#log_autovacuum_min_duration = -1 # log autovacuum activity;
# -1 disables, 0 logs all actions and
# their durations, > 0 logs only
# actions running at least this number
# of milliseconds.
#log_checkpoints = off
#log_connections = off
#log_disconnections = off
#log_duration = off
#log_error_verbosity = default # terse, default, or verbose messages
#log_hostname = off
log_line_prefix = '%m [%p] %q%u@%d ' # special values:
# %a = application name
# %u = user name
# %d = database name
# %r = remote host and port
# %h = remote host
# %b = backend type
# %p = process ID
# %P = process ID of parallel group leader
# %t = timestamp without milliseconds
# %m = timestamp with milliseconds
# %n = timestamp with milliseconds (as a Unix epoch)
# %Q = query ID (0 if none or not computed)
# %i = command tag
# %e = SQL state
# %c = session ID
# %l = session line number
# %s = session start timestamp
# %v = virtual transaction ID
# %x = transaction ID (0 if none)
# %q = stop here in non-session
# processes
# %% = '%'
# e.g. '<%u%%%d> '
#log_lock_waits = off # log lock waits >= deadlock_timeout
#log_recovery_conflict_waits = off # log standby recovery conflict waits
# >= deadlock_timeout
#log_parameter_max_length = -1 # when logging statements, limit logged
# bind-parameter values to N bytes;
# -1 means print in full, 0 disables
#log_parameter_max_length_on_error = 0 # when logging an error, limit logged
# bind-parameter values to N bytes;
# -1 means print in full, 0 disables
#log_statement = 'none' # none, ddl, mod, all
#log_replication_commands = off
#log_temp_files = -1 # log temporary files equal or larger
# than the specified size in kilobytes;
# -1 disables, 0 logs all temp files
log_timezone = 'Etc/UTC'
#------------------------------------------------------------------------------
# PROCESS TITLE
#------------------------------------------------------------------------------
cluster_name = '14/main' # added to process titles if nonempty
# (change requires restart)
#update_process_title = on
#------------------------------------------------------------------------------
# STATISTICS
#------------------------------------------------------------------------------
# - Query and Index Statistics Collector -
#track_activities = on
#track_activity_query_size = 1024 # (change requires restart)
#track_counts = on
#track_io_timing = off
#track_wal_io_timing = off
#track_functions = none # none, pl, all
stats_temp_directory = '/var/run/postgresql/14-main.pg_stat_tmp'
# - Monitoring -
#compute_query_id = auto
#log_statement_stats = off
#log_parser_stats = off
#log_planner_stats = off
#log_executor_stats = off
#------------------------------------------------------------------------------
# AUTOVACUUM
#------------------------------------------------------------------------------
#autovacuum = on # Enable autovacuum subprocess? 'on'
# requires track_counts to also be on.
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
# (change requires restart)
#autovacuum_naptime = 1min # time between autovacuum runs
#autovacuum_vacuum_threshold = 50 # min number of row updates before
# vacuum
#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts
# before vacuum; -1 disables insert
# vacuums
#autovacuum_analyze_threshold = 50 # min number of row updates before
# analyze
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table
# size before insert vacuum
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
# (change requires restart)
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
# before forced vacuum
# (change requires restart)
#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for
# autovacuum, in milliseconds;
# -1 means use vacuum_cost_delay
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
# autovacuum, -1 means use
# vacuum_cost_limit
#------------------------------------------------------------------------------
# CLIENT CONNECTION DEFAULTS
#------------------------------------------------------------------------------
# - Statement Behavior -
#client_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# log
# notice
# warning
# error
#search_path = '"$user", public' # schema names
#row_security = on
#default_table_access_method = 'heap'
#default_tablespace = '' # a tablespace name, '' uses the default
#default_toast_compression = 'pglz' # 'pglz' or 'lz4'
#temp_tablespaces = '' # a list of tablespace names, '' uses
# only default tablespace
#check_function_bodies = on
#default_transaction_isolation = 'read committed'
#default_transaction_read_only = off
#default_transaction_deferrable = off
#session_replication_role = 'origin'
#statement_timeout = 0 # in milliseconds, 0 is disabled
#lock_timeout = 0 # in milliseconds, 0 is disabled
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
#idle_session_timeout = 0 # in milliseconds, 0 is disabled
#vacuum_freeze_table_age = 150000000
#vacuum_freeze_min_age = 50000000
#vacuum_failsafe_age = 1600000000
#vacuum_multixact_freeze_table_age = 150000000
#vacuum_multixact_freeze_min_age = 5000000
#vacuum_multixact_failsafe_age = 1600000000
#bytea_output = 'hex' # hex, escape
#xmlbinary = 'base64'
#xmloption = 'content'
#gin_pending_list_limit = 4MB
# - Locale and Formatting -
datestyle = 'iso, mdy'
#intervalstyle = 'postgres'
timezone = 'Etc/UTC'
#timezone_abbreviations = 'Default' # Select the set of available time zone
# abbreviations. Currently, there are
# Default
# Australia (historical usage)
# India
# You can create your own file in
# share/timezonesets/.
#extra_float_digits = 1 # min -15, max 3; any value >0 actually
# selects precise output mode
#client_encoding = sql_ascii # actually, defaults to database
# encoding
# These settings are initialized by initdb, but they can be changed.
lc_messages = 'C.UTF-8' # locale for system error message
# strings
lc_monetary = 'C.UTF-8' # locale for monetary formatting
lc_numeric = 'C.UTF-8' # locale for number formatting
lc_time = 'C.UTF-8' # locale for time formatting
# default configuration for text search
default_text_search_config = 'pg_catalog.english'
# - Shared Library Preloading -
#local_preload_libraries = ''
#session_preload_libraries = ''
#shared_preload_libraries = '' # (change requires restart)
#jit_provider = 'llvmjit' # JIT library to use
# - Other Defaults -
#dynamic_library_path = '$libdir'
#extension_destdir = '' # prepend path when loading extensions
# and shared objects (added by Debian)
#gin_fuzzy_search_limit = 0
#------------------------------------------------------------------------------
# LOCK MANAGEMENT
#------------------------------------------------------------------------------
#deadlock_timeout = 1s
#max_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_relation = -2 # negative values mean
# (max_pred_locks_per_transaction
# / -max_pred_locks_per_relation) - 1
#max_pred_locks_per_page = 2 # min 0
#------------------------------------------------------------------------------
# VERSION AND PLATFORM COMPATIBILITY
#------------------------------------------------------------------------------
# - Previous PostgreSQL Versions -
#array_nulls = on
#backslash_quote = safe_encoding # on, off, or safe_encoding
#escape_string_warning = on
#lo_compat_privileges = off
#quote_all_identifiers = off
#standard_conforming_strings = on
#synchronize_seqscans = on
# - Other Platforms and Clients -
#transform_null_equals = off
#------------------------------------------------------------------------------
# ERROR HANDLING
#------------------------------------------------------------------------------
#exit_on_error = off # terminate session on any error?
#restart_after_crash = on # reinitialize after backend crash?
#data_sync_retry = off # retry or panic on failure to fsync
# data?
# (change requires restart)
#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+)
#------------------------------------------------------------------------------
# CONFIG FILE INCLUDES
#------------------------------------------------------------------------------
# These options allow settings to be loaded from files other than the
# default postgresql.conf. Note that these are directives, not variable
# assignments, so they can usefully be given more than once.
include_dir = 'conf.d' # include files ending in '.conf' from
# a directory, e.g., 'conf.d'
#include_if_exists = '...' # include file only if it exists
#include = '...' # include file
#------------------------------------------------------------------------------
# CUSTOMIZED OPTIONS
#------------------------------------------------------------------------------
# Add settings for extensions here

View File

@ -1,74 +0,0 @@
use {log::*, std::collections::HashSet};
#[derive(Debug)]
pub(crate) struct AccountsSelector {
pub accounts: HashSet<Vec<u8>>,
pub owners: HashSet<Vec<u8>>,
pub select_all_accounts: bool,
}
impl AccountsSelector {
pub fn default() -> Self {
AccountsSelector {
accounts: HashSet::default(),
owners: HashSet::default(),
select_all_accounts: true,
}
}
pub fn new(accounts: &[String], owners: &[String]) -> Self {
info!(
"Creating AccountsSelector from accounts: {:?}, owners: {:?}",
accounts, owners
);
let select_all_accounts = accounts.iter().any(|key| key == "*");
if select_all_accounts {
return AccountsSelector {
accounts: HashSet::default(),
owners: HashSet::default(),
select_all_accounts,
};
}
let accounts = accounts
.iter()
.map(|key| bs58::decode(key).into_vec().unwrap())
.collect();
let owners = owners
.iter()
.map(|key| bs58::decode(key).into_vec().unwrap())
.collect();
AccountsSelector {
accounts,
owners,
select_all_accounts,
}
}
pub fn is_account_selected(&self, account: &[u8], owner: &[u8]) -> bool {
self.select_all_accounts || self.accounts.contains(account) || self.owners.contains(owner)
}
/// Check if any account is of interested at all
pub fn is_enabled(&self) -> bool {
self.select_all_accounts || !self.accounts.is_empty() || !self.owners.is_empty()
}
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
#[test]
fn test_create_accounts_selector() {
AccountsSelector::new(
&["9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin".to_string()],
&[],
);
AccountsSelector::new(
&[],
&["9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin".to_string()],
);
}
}

View File

@ -1,437 +0,0 @@
use solana_measure::measure::Measure;
/// Main entry for the PostgreSQL plugin
use {
crate::{
accounts_selector::AccountsSelector,
postgres_client::{ParallelPostgresClient, PostgresClientBuilder},
transaction_selector::TransactionSelector,
},
bs58,
log::*,
serde_derive::{Deserialize, Serialize},
serde_json,
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
AccountsDbPlugin, AccountsDbPluginError, ReplicaAccountInfoVersions,
ReplicaTransactionInfoVersions, Result, SlotStatus,
},
solana_metrics::*,
std::{fs::File, io::Read},
thiserror::Error,
};
#[derive(Default)]
pub struct AccountsDbPluginPostgres {
client: Option<ParallelPostgresClient>,
accounts_selector: Option<AccountsSelector>,
transaction_selector: Option<TransactionSelector>,
}
impl std::fmt::Debug for AccountsDbPluginPostgres {
fn fmt(&self, _: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Ok(())
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AccountsDbPluginPostgresConfig {
pub host: Option<String>,
pub user: Option<String>,
pub port: Option<u16>,
pub connection_str: Option<String>,
pub threads: Option<usize>,
pub batch_size: Option<usize>,
pub panic_on_db_errors: Option<bool>,
}
#[derive(Error, Debug)]
pub enum AccountsDbPluginPostgresError {
#[error("Error connecting to the backend data store. Error message: ({msg})")]
DataStoreConnectionError { msg: String },
#[error("Error preparing data store schema. Error message: ({msg})")]
DataSchemaError { msg: String },
#[error("Error preparing data store schema. Error message: ({msg})")]
ConfigurationError { msg: String },
}
impl AccountsDbPlugin for AccountsDbPluginPostgres {
fn name(&self) -> &'static str {
"AccountsDbPluginPostgres"
}
/// Do initialization for the PostgreSQL plugin.
///
/// # Format of the config file:
/// * The `accounts_selector` section allows the user to controls accounts selections.
/// "accounts_selector" : {
/// "accounts" : \["pubkey-1", "pubkey-2", ..., "pubkey-n"\],
/// }
/// or:
/// "accounts_selector" = {
/// "owners" : \["pubkey-1", "pubkey-2", ..., "pubkey-m"\]
/// }
/// Accounts either satisyfing the accounts condition or owners condition will be selected.
/// When only owners is specified,
/// all accounts belonging to the owners will be streamed.
/// The accounts field support wildcard to select all accounts:
/// "accounts_selector" : {
/// "accounts" : \["*"\],
/// }
/// * "host", optional, specifies the PostgreSQL server.
/// * "user", optional, specifies the PostgreSQL user.
/// * "port", optional, specifies the PostgreSQL server's port.
/// * "connection_str", optional, the custom PostgreSQL connection string.
/// Please refer to https://docs.rs/postgres/0.19.2/postgres/config/struct.Config.html for the connection configuration.
/// When `connection_str` is set, the values in "host", "user" and "port" are ignored. If `connection_str` is not given,
/// `host` and `user` must be given.
/// * "threads" optional, specifies the number of worker threads for the plugin. A thread
/// maintains a PostgreSQL connection to the server. The default is '10'.
/// * "batch_size" optional, specifies the batch size of bulk insert when the AccountsDb is created
/// from restoring a snapshot. The default is '10'.
/// * "panic_on_db_errors", optional, contols if to panic when there are errors replicating data to the
/// PostgreSQL database. The default is 'false'.
/// * "transaction_selector", optional, controls if and what transaction to store. If this field is missing
/// None of the transction is stored.
/// "transaction_selector" : {
/// "mentions" : \["pubkey-1", "pubkey-2", ..., "pubkey-n"\],
/// }
/// The `mentions` field support wildcard to select all transaction or all 'vote' transactions:
/// For example, to select all transactions:
/// "transaction_selector" : {
/// "mentions" : \["*"\],
/// }
/// To select all vote transactions:
/// "transaction_selector" : {
/// "mentions" : \["all_votes"\],
/// }
/// # Examples
///
/// {
/// "libpath": "/home/solana/target/release/libsolana_accountsdb_plugin_postgres.so",
/// "host": "host_foo",
/// "user": "solana",
/// "threads": 10,
/// "accounts_selector" : {
/// "owners" : ["9oT9R5ZyRovSVnt37QvVoBttGpNqR3J7unkb567NP8k3"]
/// }
/// }
fn on_load(&mut self, config_file: &str) -> Result<()> {
solana_logger::setup_with_default("info");
info!(
"Loading plugin {:?} from config_file {:?}",
self.name(),
config_file
);
let mut file = File::open(config_file)?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
let result: serde_json::Value = serde_json::from_str(&contents).unwrap();
self.accounts_selector = Some(Self::create_accounts_selector_from_config(&result));
self.transaction_selector = Some(Self::create_transaction_selector_from_config(&result));
let result: serde_json::Result<AccountsDbPluginPostgresConfig> =
serde_json::from_str(&contents);
match result {
Err(err) => {
return Err(AccountsDbPluginError::ConfigFileReadError {
msg: format!(
"The config file is not in the JSON format expected: {:?}",
err
),
})
}
Ok(config) => {
let client = PostgresClientBuilder::build_pararallel_postgres_client(&config)?;
self.client = Some(client);
}
}
Ok(())
}
fn on_unload(&mut self) {
info!("Unloading plugin: {:?}", self.name());
match &mut self.client {
None => {}
Some(client) => {
client.join().unwrap();
}
}
}
fn update_account(
&mut self,
account: ReplicaAccountInfoVersions,
slot: u64,
is_startup: bool,
) -> Result<()> {
let mut measure_all = Measure::start("accountsdb-plugin-postgres-update-account-main");
match account {
ReplicaAccountInfoVersions::V0_0_1(account) => {
let mut measure_select =
Measure::start("accountsdb-plugin-postgres-update-account-select");
if let Some(accounts_selector) = &self.accounts_selector {
if !accounts_selector.is_account_selected(account.pubkey, account.owner) {
return Ok(());
}
} else {
return Ok(());
}
measure_select.stop();
inc_new_counter_debug!(
"accountsdb-plugin-postgres-update-account-select-us",
measure_select.as_us() as usize,
100000,
100000
);
debug!(
"Updating account {:?} with owner {:?} at slot {:?} using account selector {:?}",
bs58::encode(account.pubkey).into_string(),
bs58::encode(account.owner).into_string(),
slot,
self.accounts_selector.as_ref().unwrap()
);
match &mut self.client {
None => {
return Err(AccountsDbPluginError::Custom(Box::new(
AccountsDbPluginPostgresError::DataStoreConnectionError {
msg: "There is no connection to the PostgreSQL database."
.to_string(),
},
)));
}
Some(client) => {
let mut measure_update =
Measure::start("accountsdb-plugin-postgres-update-account-client");
let result = { client.update_account(account, slot, is_startup) };
measure_update.stop();
inc_new_counter_debug!(
"accountsdb-plugin-postgres-update-account-client-us",
measure_update.as_us() as usize,
100000,
100000
);
if let Err(err) = result {
return Err(AccountsDbPluginError::AccountsUpdateError {
msg: format!("Failed to persist the update of account to the PostgreSQL database. Error: {:?}", err)
});
}
}
}
}
}
measure_all.stop();
inc_new_counter_debug!(
"accountsdb-plugin-postgres-update-account-main-us",
measure_all.as_us() as usize,
100000,
100000
);
Ok(())
}
fn update_slot_status(
&mut self,
slot: u64,
parent: Option<u64>,
status: SlotStatus,
) -> Result<()> {
info!("Updating slot {:?} at with status {:?}", slot, status);
match &mut self.client {
None => {
return Err(AccountsDbPluginError::Custom(Box::new(
AccountsDbPluginPostgresError::DataStoreConnectionError {
msg: "There is no connection to the PostgreSQL database.".to_string(),
},
)));
}
Some(client) => {
let result = client.update_slot_status(slot, parent, status);
if let Err(err) = result {
return Err(AccountsDbPluginError::SlotStatusUpdateError{
msg: format!("Failed to persist the update of slot to the PostgreSQL database. Error: {:?}", err)
});
}
}
}
Ok(())
}
fn notify_end_of_startup(&mut self) -> Result<()> {
info!("Notifying the end of startup for accounts notifications");
match &mut self.client {
None => {
return Err(AccountsDbPluginError::Custom(Box::new(
AccountsDbPluginPostgresError::DataStoreConnectionError {
msg: "There is no connection to the PostgreSQL database.".to_string(),
},
)));
}
Some(client) => {
let result = client.notify_end_of_startup();
if let Err(err) = result {
return Err(AccountsDbPluginError::SlotStatusUpdateError{
msg: format!("Failed to notify the end of startup for accounts notifications. Error: {:?}", err)
});
}
}
}
Ok(())
}
fn notify_transaction(
&mut self,
transaction_info: ReplicaTransactionInfoVersions,
slot: u64,
) -> Result<()> {
match &mut self.client {
None => {
return Err(AccountsDbPluginError::Custom(Box::new(
AccountsDbPluginPostgresError::DataStoreConnectionError {
msg: "There is no connection to the PostgreSQL database.".to_string(),
},
)));
}
Some(client) => match transaction_info {
ReplicaTransactionInfoVersions::V0_0_1(transaction_info) => {
if let Some(transaction_selector) = &self.transaction_selector {
if !transaction_selector.is_transaction_selected(
transaction_info.is_vote,
transaction_info.transaction.message().account_keys_iter(),
) {
return Ok(());
}
} else {
return Ok(());
}
let result = client.log_transaction_info(transaction_info, slot);
if let Err(err) = result {
return Err(AccountsDbPluginError::SlotStatusUpdateError{
msg: format!("Failed to persist the transaction info to the PostgreSQL database. Error: {:?}", err)
});
}
}
},
}
Ok(())
}
/// Check if the plugin is interested in account data
/// Default is true -- if the plugin is not interested in
/// account data, please return false.
fn account_data_notifications_enabled(&self) -> bool {
self.accounts_selector
.as_ref()
.map_or_else(|| false, |selector| selector.is_enabled())
}
/// Check if the plugin is interested in transaction data
fn transaction_notifications_enabled(&self) -> bool {
self.transaction_selector
.as_ref()
.map_or_else(|| false, |selector| selector.is_enabled())
}
}
impl AccountsDbPluginPostgres {
fn create_accounts_selector_from_config(config: &serde_json::Value) -> AccountsSelector {
let accounts_selector = &config["accounts_selector"];
if accounts_selector.is_null() {
AccountsSelector::default()
} else {
let accounts = &accounts_selector["accounts"];
let accounts: Vec<String> = if accounts.is_array() {
accounts
.as_array()
.unwrap()
.iter()
.map(|val| val.as_str().unwrap().to_string())
.collect()
} else {
Vec::default()
};
let owners = &accounts_selector["owners"];
let owners: Vec<String> = if owners.is_array() {
owners
.as_array()
.unwrap()
.iter()
.map(|val| val.as_str().unwrap().to_string())
.collect()
} else {
Vec::default()
};
AccountsSelector::new(&accounts, &owners)
}
}
fn create_transaction_selector_from_config(config: &serde_json::Value) -> TransactionSelector {
let transaction_selector = &config["transaction_selector"];
if transaction_selector.is_null() {
TransactionSelector::default()
} else {
let accounts = &transaction_selector["mentions"];
let accounts: Vec<String> = if accounts.is_array() {
accounts
.as_array()
.unwrap()
.iter()
.map(|val| val.as_str().unwrap().to_string())
.collect()
} else {
Vec::default()
};
TransactionSelector::new(&accounts)
}
}
pub fn new() -> Self {
Self::default()
}
}
#[no_mangle]
#[allow(improper_ctypes_definitions)]
/// # Safety
///
/// This function returns the AccountsDbPluginPostgres pointer as trait AccountsDbPlugin.
pub unsafe extern "C" fn _create_plugin() -> *mut dyn AccountsDbPlugin {
let plugin = AccountsDbPluginPostgres::new();
let plugin: Box<dyn AccountsDbPlugin> = Box::new(plugin);
Box::into_raw(plugin)
}
#[cfg(test)]
pub(crate) mod tests {
use {super::*, serde_json};
#[test]
fn test_accounts_selector_from_config() {
let config = "{\"accounts_selector\" : { \
\"owners\" : [\"9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin\"] \
}}";
let config: serde_json::Value = serde_json::from_str(config).unwrap();
AccountsDbPluginPostgres::create_accounts_selector_from_config(&config);
}
}

View File

@ -1,4 +0,0 @@
pub mod accounts_selector;
pub mod accountsdb_plugin_postgres;
pub mod postgres_client;
pub mod transaction_selector;

View File

@ -1,905 +0,0 @@
#![allow(clippy::integer_arithmetic)]
mod postgres_client_transaction;
/// A concurrent implementation for writing accounts into the PostgreSQL in parallel.
use {
crate::accountsdb_plugin_postgres::{
AccountsDbPluginPostgresConfig, AccountsDbPluginPostgresError,
},
chrono::Utc,
crossbeam_channel::{bounded, Receiver, RecvTimeoutError, Sender},
log::*,
postgres::{Client, NoTls, Statement},
postgres_client_transaction::LogTransactionRequest,
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
AccountsDbPluginError, ReplicaAccountInfo, SlotStatus,
},
solana_measure::measure::Measure,
solana_metrics::*,
solana_sdk::timing::AtomicInterval,
std::{
sync::{
atomic::{AtomicBool, AtomicUsize, Ordering},
Arc, Mutex,
},
thread::{self, sleep, Builder, JoinHandle},
time::Duration,
},
tokio_postgres::types,
};
/// The maximum asynchronous requests allowed in the channel to avoid excessive
/// memory usage. The downside -- calls after this threshold is reached can get blocked.
const MAX_ASYNC_REQUESTS: usize = 40960;
const DEFAULT_POSTGRES_PORT: u16 = 5432;
const DEFAULT_THREADS_COUNT: usize = 100;
const DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE: usize = 10;
const ACCOUNT_COLUMN_COUNT: usize = 9;
const DEFAULT_PANIC_ON_DB_ERROR: bool = false;
struct PostgresSqlClientWrapper {
client: Client,
update_account_stmt: Statement,
bulk_account_insert_stmt: Statement,
update_slot_with_parent_stmt: Statement,
update_slot_without_parent_stmt: Statement,
update_transaction_log_stmt: Statement,
}
pub struct SimplePostgresClient {
batch_size: usize,
pending_account_updates: Vec<DbAccountInfo>,
client: Mutex<PostgresSqlClientWrapper>,
}
struct PostgresClientWorker {
client: SimplePostgresClient,
/// Indicating if accounts notification during startup is done.
is_startup_done: bool,
}
impl Eq for DbAccountInfo {}
#[derive(Clone, PartialEq, Debug)]
pub struct DbAccountInfo {
pub pubkey: Vec<u8>,
pub lamports: i64,
pub owner: Vec<u8>,
pub executable: bool,
pub rent_epoch: i64,
pub data: Vec<u8>,
pub slot: i64,
pub write_version: i64,
}
pub(crate) fn abort() -> ! {
#[cfg(not(test))]
{
// standard error is usually redirected to a log file, cry for help on standard output as
// well
eprintln!("Validator process aborted. The validator log may contain further details");
std::process::exit(1);
}
#[cfg(test)]
panic!("process::exit(1) is intercepted for friendly test failure...");
}
impl DbAccountInfo {
fn new<T: ReadableAccountInfo>(account: &T, slot: u64) -> DbAccountInfo {
let data = account.data().to_vec();
Self {
pubkey: account.pubkey().to_vec(),
lamports: account.lamports() as i64,
owner: account.owner().to_vec(),
executable: account.executable(),
rent_epoch: account.rent_epoch() as i64,
data,
slot: slot as i64,
write_version: account.write_version(),
}
}
}
pub trait ReadableAccountInfo: Sized {
fn pubkey(&self) -> &[u8];
fn owner(&self) -> &[u8];
fn lamports(&self) -> i64;
fn executable(&self) -> bool;
fn rent_epoch(&self) -> i64;
fn data(&self) -> &[u8];
fn write_version(&self) -> i64;
}
impl ReadableAccountInfo for DbAccountInfo {
fn pubkey(&self) -> &[u8] {
&self.pubkey
}
fn owner(&self) -> &[u8] {
&self.owner
}
fn lamports(&self) -> i64 {
self.lamports
}
fn executable(&self) -> bool {
self.executable
}
fn rent_epoch(&self) -> i64 {
self.rent_epoch
}
fn data(&self) -> &[u8] {
&self.data
}
fn write_version(&self) -> i64 {
self.write_version
}
}
impl<'a> ReadableAccountInfo for ReplicaAccountInfo<'a> {
fn pubkey(&self) -> &[u8] {
self.pubkey
}
fn owner(&self) -> &[u8] {
self.owner
}
fn lamports(&self) -> i64 {
self.lamports as i64
}
fn executable(&self) -> bool {
self.executable
}
fn rent_epoch(&self) -> i64 {
self.rent_epoch as i64
}
fn data(&self) -> &[u8] {
self.data
}
fn write_version(&self) -> i64 {
self.write_version as i64
}
}
pub trait PostgresClient {
fn join(&mut self) -> thread::Result<()> {
Ok(())
}
fn update_account(
&mut self,
account: DbAccountInfo,
is_startup: bool,
) -> Result<(), AccountsDbPluginError>;
fn update_slot_status(
&mut self,
slot: u64,
parent: Option<u64>,
status: SlotStatus,
) -> Result<(), AccountsDbPluginError>;
fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError>;
fn log_transaction(
&mut self,
transaction_log_info: LogTransactionRequest,
) -> Result<(), AccountsDbPluginError>;
}
impl SimplePostgresClient {
fn connect_to_db(
config: &AccountsDbPluginPostgresConfig,
) -> Result<Client, AccountsDbPluginError> {
let port = config.port.unwrap_or(DEFAULT_POSTGRES_PORT);
let connection_str = if let Some(connection_str) = &config.connection_str {
connection_str.clone()
} else {
if config.host.is_none() || config.user.is_none() {
let msg = format!(
"\"connection_str\": {:?}, or \"host\": {:?} \"user\": {:?} must be specified",
config.connection_str, config.host, config.user
);
return Err(AccountsDbPluginError::Custom(Box::new(
AccountsDbPluginPostgresError::ConfigurationError { msg },
)));
}
format!(
"host={} user={} port={}",
config.host.as_ref().unwrap(),
config.user.as_ref().unwrap(),
port
)
};
match Client::connect(&connection_str, NoTls) {
Err(err) => {
let msg = format!(
"Error in connecting to the PostgreSQL database: {:?} connection_str: {:?}",
err, connection_str
);
error!("{}", msg);
Err(AccountsDbPluginError::Custom(Box::new(
AccountsDbPluginPostgresError::DataStoreConnectionError { msg },
)))
}
Ok(client) => Ok(client),
}
}
fn build_bulk_account_insert_statement(
client: &mut Client,
config: &AccountsDbPluginPostgresConfig,
) -> Result<Statement, AccountsDbPluginError> {
let batch_size = config
.batch_size
.unwrap_or(DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE);
let mut stmt = String::from("INSERT INTO account AS acct (pubkey, slot, owner, lamports, executable, rent_epoch, data, write_version, updated_on) VALUES");
for j in 0..batch_size {
let row = j * ACCOUNT_COLUMN_COUNT;
let val_str = format!(
"(${}, ${}, ${}, ${}, ${}, ${}, ${}, ${}, ${})",
row + 1,
row + 2,
row + 3,
row + 4,
row + 5,
row + 6,
row + 7,
row + 8,
row + 9,
);
if j == 0 {
stmt = format!("{} {}", &stmt, val_str);
} else {
stmt = format!("{}, {}", &stmt, val_str);
}
}
let handle_conflict = "ON CONFLICT (pubkey) DO UPDATE SET slot=excluded.slot, owner=excluded.owner, lamports=excluded.lamports, executable=excluded.executable, rent_epoch=excluded.rent_epoch, \
data=excluded.data, write_version=excluded.write_version, updated_on=excluded.updated_on WHERE acct.slot < excluded.slot OR (\
acct.slot = excluded.slot AND acct.write_version < excluded.write_version)";
stmt = format!("{} {}", stmt, handle_conflict);
info!("{}", stmt);
let bulk_stmt = client.prepare(&stmt);
match bulk_stmt {
Err(err) => {
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
msg: format!(
"Error in preparing for the accounts update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
err, config.host, config.user, config
),
})));
}
Ok(update_account_stmt) => Ok(update_account_stmt),
}
}
fn build_single_account_upsert_statement(
client: &mut Client,
config: &AccountsDbPluginPostgresConfig,
) -> Result<Statement, AccountsDbPluginError> {
let stmt = "INSERT INTO account AS acct (pubkey, slot, owner, lamports, executable, rent_epoch, data, write_version, updated_on) \
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) \
ON CONFLICT (pubkey) DO UPDATE SET slot=excluded.slot, owner=excluded.owner, lamports=excluded.lamports, executable=excluded.executable, rent_epoch=excluded.rent_epoch, \
data=excluded.data, write_version=excluded.write_version, updated_on=excluded.updated_on WHERE acct.slot < excluded.slot OR (\
acct.slot = excluded.slot AND acct.write_version < excluded.write_version)";
let stmt = client.prepare(stmt);
match stmt {
Err(err) => {
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
msg: format!(
"Error in preparing for the accounts update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
err, config.host, config.user, config
),
})));
}
Ok(update_account_stmt) => Ok(update_account_stmt),
}
}
fn build_slot_upsert_statement_with_parent(
client: &mut Client,
config: &AccountsDbPluginPostgresConfig,
) -> Result<Statement, AccountsDbPluginError> {
let stmt = "INSERT INTO slot (slot, parent, status, updated_on) \
VALUES ($1, $2, $3, $4) \
ON CONFLICT (slot) DO UPDATE SET parent=excluded.parent, status=excluded.status, updated_on=excluded.updated_on";
let stmt = client.prepare(stmt);
match stmt {
Err(err) => {
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
msg: format!(
"Error in preparing for the slot update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
err, config.host, config.user, config
),
})));
}
Ok(stmt) => Ok(stmt),
}
}
fn build_slot_upsert_statement_without_parent(
client: &mut Client,
config: &AccountsDbPluginPostgresConfig,
) -> Result<Statement, AccountsDbPluginError> {
let stmt = "INSERT INTO slot (slot, status, updated_on) \
VALUES ($1, $2, $3) \
ON CONFLICT (slot) DO UPDATE SET status=excluded.status, updated_on=excluded.updated_on";
let stmt = client.prepare(stmt);
match stmt {
Err(err) => {
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
msg: format!(
"Error in preparing for the slot update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
err, config.host, config.user, config
),
})));
}
Ok(stmt) => Ok(stmt),
}
}
/// Internal function for updating or inserting a single account
fn upsert_account_internal(
account: &DbAccountInfo,
statement: &Statement,
client: &mut Client,
) -> Result<(), AccountsDbPluginError> {
let lamports = account.lamports() as i64;
let rent_epoch = account.rent_epoch() as i64;
let updated_on = Utc::now().naive_utc();
let result = client.query(
statement,
&[
&account.pubkey(),
&account.slot,
&account.owner(),
&lamports,
&account.executable(),
&rent_epoch,
&account.data(),
&account.write_version(),
&updated_on,
],
);
if let Err(err) = result {
let msg = format!(
"Failed to persist the update of account to the PostgreSQL database. Error: {:?}",
err
);
error!("{}", msg);
return Err(AccountsDbPluginError::AccountsUpdateError { msg });
}
Ok(())
}
/// Update or insert a single account
fn upsert_account(&mut self, account: &DbAccountInfo) -> Result<(), AccountsDbPluginError> {
let client = self.client.get_mut().unwrap();
let statement = &client.update_account_stmt;
let client = &mut client.client;
Self::upsert_account_internal(account, statement, client)
}
/// Insert accounts in batch to reduce network overhead
fn insert_accounts_in_batch(
&mut self,
account: DbAccountInfo,
) -> Result<(), AccountsDbPluginError> {
self.pending_account_updates.push(account);
if self.pending_account_updates.len() == self.batch_size {
let mut measure = Measure::start("accountsdb-plugin-postgres-prepare-values");
let mut values: Vec<&(dyn types::ToSql + Sync)> =
Vec::with_capacity(self.batch_size * ACCOUNT_COLUMN_COUNT);
let updated_on = Utc::now().naive_utc();
for j in 0..self.batch_size {
let account = &self.pending_account_updates[j];
values.push(&account.pubkey);
values.push(&account.slot);
values.push(&account.owner);
values.push(&account.lamports);
values.push(&account.executable);
values.push(&account.rent_epoch);
values.push(&account.data);
values.push(&account.write_version);
values.push(&updated_on);
}
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-postgres-prepare-values-us",
measure.as_us() as usize,
10000,
10000
);
let mut measure = Measure::start("accountsdb-plugin-postgres-update-account");
let client = self.client.get_mut().unwrap();
let result = client
.client
.query(&client.bulk_account_insert_stmt, &values);
self.pending_account_updates.clear();
if let Err(err) = result {
let msg = format!(
"Failed to persist the update of account to the PostgreSQL database. Error: {:?}",
err
);
error!("{}", msg);
return Err(AccountsDbPluginError::AccountsUpdateError { msg });
}
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-postgres-update-account-us",
measure.as_us() as usize,
10000,
10000
);
inc_new_counter_debug!(
"accountsdb-plugin-postgres-update-account-count",
self.batch_size,
10000,
10000
);
}
Ok(())
}
/// Flush any left over accounts in batch which are not processed in the last batch
fn flush_buffered_writes(&mut self) -> Result<(), AccountsDbPluginError> {
if self.pending_account_updates.is_empty() {
return Ok(());
}
let client = self.client.get_mut().unwrap();
let statement = &client.update_account_stmt;
let client = &mut client.client;
for account in self.pending_account_updates.drain(..) {
Self::upsert_account_internal(&account, statement, client)?;
}
Ok(())
}
pub fn new(config: &AccountsDbPluginPostgresConfig) -> Result<Self, AccountsDbPluginError> {
info!("Creating SimplePostgresClient...");
let mut client = Self::connect_to_db(config)?;
let bulk_account_insert_stmt =
Self::build_bulk_account_insert_statement(&mut client, config)?;
let update_account_stmt = Self::build_single_account_upsert_statement(&mut client, config)?;
let update_slot_with_parent_stmt =
Self::build_slot_upsert_statement_with_parent(&mut client, config)?;
let update_slot_without_parent_stmt =
Self::build_slot_upsert_statement_without_parent(&mut client, config)?;
let update_transaction_log_stmt =
Self::build_transaction_info_upsert_statement(&mut client, config)?;
let batch_size = config
.batch_size
.unwrap_or(DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE);
info!("Created SimplePostgresClient.");
Ok(Self {
batch_size,
pending_account_updates: Vec::with_capacity(batch_size),
client: Mutex::new(PostgresSqlClientWrapper {
client,
update_account_stmt,
bulk_account_insert_stmt,
update_slot_with_parent_stmt,
update_slot_without_parent_stmt,
update_transaction_log_stmt,
}),
})
}
}
impl PostgresClient for SimplePostgresClient {
fn update_account(
&mut self,
account: DbAccountInfo,
is_startup: bool,
) -> Result<(), AccountsDbPluginError> {
trace!(
"Updating account {} with owner {} at slot {}",
bs58::encode(account.pubkey()).into_string(),
bs58::encode(account.owner()).into_string(),
account.slot,
);
if !is_startup {
return self.upsert_account(&account);
}
self.insert_accounts_in_batch(account)
}
fn update_slot_status(
&mut self,
slot: u64,
parent: Option<u64>,
status: SlotStatus,
) -> Result<(), AccountsDbPluginError> {
info!("Updating slot {:?} at with status {:?}", slot, status);
let slot = slot as i64; // postgres only supports i64
let parent = parent.map(|parent| parent as i64);
let updated_on = Utc::now().naive_utc();
let status_str = status.as_str();
let client = self.client.get_mut().unwrap();
let result = match parent {
Some(parent) => client.client.execute(
&client.update_slot_with_parent_stmt,
&[&slot, &parent, &status_str, &updated_on],
),
None => client.client.execute(
&client.update_slot_without_parent_stmt,
&[&slot, &status_str, &updated_on],
),
};
match result {
Err(err) => {
let msg = format!(
"Failed to persist the update of slot to the PostgreSQL database. Error: {:?}",
err
);
error!("{:?}", msg);
return Err(AccountsDbPluginError::SlotStatusUpdateError { msg });
}
Ok(rows) => {
assert_eq!(1, rows, "Expected one rows to be updated a time");
}
}
Ok(())
}
fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError> {
self.flush_buffered_writes()
}
fn log_transaction(
&mut self,
transaction_log_info: LogTransactionRequest,
) -> Result<(), AccountsDbPluginError> {
self.log_transaction_impl(transaction_log_info)
}
}
struct UpdateAccountRequest {
account: DbAccountInfo,
is_startup: bool,
}
struct UpdateSlotRequest {
slot: u64,
parent: Option<u64>,
slot_status: SlotStatus,
}
#[warn(clippy::large_enum_variant)]
enum DbWorkItem {
UpdateAccount(Box<UpdateAccountRequest>),
UpdateSlot(Box<UpdateSlotRequest>),
LogTransaction(Box<LogTransactionRequest>),
}
impl PostgresClientWorker {
fn new(config: AccountsDbPluginPostgresConfig) -> Result<Self, AccountsDbPluginError> {
let result = SimplePostgresClient::new(&config);
match result {
Ok(client) => Ok(PostgresClientWorker {
client,
is_startup_done: false,
}),
Err(err) => {
error!("Error in creating SimplePostgresClient: {}", err);
Err(err)
}
}
}
fn do_work(
&mut self,
receiver: Receiver<DbWorkItem>,
exit_worker: Arc<AtomicBool>,
is_startup_done: Arc<AtomicBool>,
startup_done_count: Arc<AtomicUsize>,
panic_on_db_errors: bool,
) -> Result<(), AccountsDbPluginError> {
while !exit_worker.load(Ordering::Relaxed) {
let mut measure = Measure::start("accountsdb-plugin-postgres-worker-recv");
let work = receiver.recv_timeout(Duration::from_millis(500));
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-postgres-worker-recv-us",
measure.as_us() as usize,
100000,
100000
);
match work {
Ok(work) => match work {
DbWorkItem::UpdateAccount(request) => {
if let Err(err) = self
.client
.update_account(request.account, request.is_startup)
{
error!("Failed to update account: ({})", err);
if panic_on_db_errors {
abort();
}
}
}
DbWorkItem::UpdateSlot(request) => {
if let Err(err) = self.client.update_slot_status(
request.slot,
request.parent,
request.slot_status,
) {
error!("Failed to update slot: ({})", err);
if panic_on_db_errors {
abort();
}
}
}
DbWorkItem::LogTransaction(transaction_log_info) => {
self.client.log_transaction(*transaction_log_info)?;
}
},
Err(err) => match err {
RecvTimeoutError::Timeout => {
if !self.is_startup_done && is_startup_done.load(Ordering::Relaxed) {
if let Err(err) = self.client.notify_end_of_startup() {
error!("Error in notifying end of startup: ({})", err);
if panic_on_db_errors {
abort();
}
}
self.is_startup_done = true;
startup_done_count.fetch_add(1, Ordering::Relaxed);
}
continue;
}
_ => {
error!("Error in receiving the item {:?}", err);
if panic_on_db_errors {
abort();
}
break;
}
},
}
}
Ok(())
}
}
pub struct ParallelPostgresClient {
workers: Vec<JoinHandle<Result<(), AccountsDbPluginError>>>,
exit_worker: Arc<AtomicBool>,
is_startup_done: Arc<AtomicBool>,
startup_done_count: Arc<AtomicUsize>,
initialized_worker_count: Arc<AtomicUsize>,
sender: Sender<DbWorkItem>,
last_report: AtomicInterval,
}
impl ParallelPostgresClient {
pub fn new(config: &AccountsDbPluginPostgresConfig) -> Result<Self, AccountsDbPluginError> {
info!("Creating ParallelPostgresClient...");
let (sender, receiver) = bounded(MAX_ASYNC_REQUESTS);
let exit_worker = Arc::new(AtomicBool::new(false));
let mut workers = Vec::default();
let is_startup_done = Arc::new(AtomicBool::new(false));
let startup_done_count = Arc::new(AtomicUsize::new(0));
let worker_count = config.threads.unwrap_or(DEFAULT_THREADS_COUNT);
let initialized_worker_count = Arc::new(AtomicUsize::new(0));
for i in 0..worker_count {
let cloned_receiver = receiver.clone();
let exit_clone = exit_worker.clone();
let is_startup_done_clone = is_startup_done.clone();
let startup_done_count_clone = startup_done_count.clone();
let initialized_worker_count_clone = initialized_worker_count.clone();
let config = config.clone();
let worker = Builder::new()
.name(format!("worker-{}", i))
.spawn(move || -> Result<(), AccountsDbPluginError> {
let panic_on_db_errors = *config
.panic_on_db_errors
.as_ref()
.unwrap_or(&DEFAULT_PANIC_ON_DB_ERROR);
let result = PostgresClientWorker::new(config);
match result {
Ok(mut worker) => {
initialized_worker_count_clone.fetch_add(1, Ordering::Relaxed);
worker.do_work(
cloned_receiver,
exit_clone,
is_startup_done_clone,
startup_done_count_clone,
panic_on_db_errors,
)?;
Ok(())
}
Err(err) => {
error!("Error when making connection to database: ({})", err);
if panic_on_db_errors {
abort();
}
Err(err)
}
}
})
.unwrap();
workers.push(worker);
}
info!("Created ParallelPostgresClient.");
Ok(Self {
last_report: AtomicInterval::default(),
workers,
exit_worker,
is_startup_done,
startup_done_count,
initialized_worker_count,
sender,
})
}
pub fn join(&mut self) -> thread::Result<()> {
self.exit_worker.store(true, Ordering::Relaxed);
while !self.workers.is_empty() {
let worker = self.workers.pop();
if worker.is_none() {
break;
}
let worker = worker.unwrap();
let result = worker.join().unwrap();
if result.is_err() {
error!("The worker thread has failed: {:?}", result);
}
}
Ok(())
}
pub fn update_account(
&mut self,
account: &ReplicaAccountInfo,
slot: u64,
is_startup: bool,
) -> Result<(), AccountsDbPluginError> {
if self.last_report.should_update(30000) {
datapoint_debug!(
"postgres-plugin-stats",
("message-queue-length", self.sender.len() as i64, i64),
);
}
let mut measure = Measure::start("accountsdb-plugin-posgres-create-work-item");
let wrk_item = DbWorkItem::UpdateAccount(Box::new(UpdateAccountRequest {
account: DbAccountInfo::new(account, slot),
is_startup,
}));
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-posgres-create-work-item-us",
measure.as_us() as usize,
100000,
100000
);
let mut measure = Measure::start("accountsdb-plugin-posgres-send-msg");
if let Err(err) = self.sender.send(wrk_item) {
return Err(AccountsDbPluginError::AccountsUpdateError {
msg: format!(
"Failed to update the account {:?}, error: {:?}",
bs58::encode(account.pubkey()).into_string(),
err
),
});
}
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-posgres-send-msg-us",
measure.as_us() as usize,
100000,
100000
);
Ok(())
}
pub fn update_slot_status(
&mut self,
slot: u64,
parent: Option<u64>,
status: SlotStatus,
) -> Result<(), AccountsDbPluginError> {
if let Err(err) = self
.sender
.send(DbWorkItem::UpdateSlot(Box::new(UpdateSlotRequest {
slot,
parent,
slot_status: status,
})))
{
return Err(AccountsDbPluginError::SlotStatusUpdateError {
msg: format!("Failed to update the slot {:?}, error: {:?}", slot, err),
});
}
Ok(())
}
pub fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError> {
info!("Notifying the end of startup");
// Ensure all items in the queue has been received by the workers
while !self.sender.is_empty() {
sleep(Duration::from_millis(100));
}
self.is_startup_done.store(true, Ordering::Relaxed);
// Wait for all worker threads to be done with flushing
while self.startup_done_count.load(Ordering::Relaxed)
!= self.initialized_worker_count.load(Ordering::Relaxed)
{
info!(
"Startup done count: {}, good worker thread count: {}",
self.startup_done_count.load(Ordering::Relaxed),
self.initialized_worker_count.load(Ordering::Relaxed)
);
sleep(Duration::from_millis(100));
}
info!("Done with notifying the end of startup");
Ok(())
}
}
pub struct PostgresClientBuilder {}
impl PostgresClientBuilder {
pub fn build_pararallel_postgres_client(
config: &AccountsDbPluginPostgresConfig,
) -> Result<ParallelPostgresClient, AccountsDbPluginError> {
ParallelPostgresClient::new(config)
}
pub fn build_simple_postgres_client(
config: &AccountsDbPluginPostgresConfig,
) -> Result<SimplePostgresClient, AccountsDbPluginError> {
SimplePostgresClient::new(config)
}
}

View File

@ -1,194 +0,0 @@
/// The transaction selector is responsible for filtering transactions
/// in the plugin framework.
use {log::*, solana_sdk::pubkey::Pubkey, std::collections::HashSet};
pub(crate) struct TransactionSelector {
pub mentioned_addresses: HashSet<Vec<u8>>,
pub select_all_transactions: bool,
pub select_all_vote_transactions: bool,
}
#[allow(dead_code)]
impl TransactionSelector {
pub fn default() -> Self {
Self {
mentioned_addresses: HashSet::default(),
select_all_transactions: false,
select_all_vote_transactions: false,
}
}
/// Create a selector based on the mentioned addresses
/// To select all transactions use ["*"] or ["all"]
/// To select all vote transactions, use ["all_votes"]
/// To select transactions mentioning specific addresses use ["<pubkey1>", "<pubkey2>", ...]
pub fn new(mentioned_addresses: &[String]) -> Self {
info!(
"Creating TransactionSelector from addresses: {:?}",
mentioned_addresses
);
let select_all_transactions = mentioned_addresses
.iter()
.any(|key| key == "*" || key == "all");
if select_all_transactions {
return Self {
mentioned_addresses: HashSet::default(),
select_all_transactions,
select_all_vote_transactions: true,
};
}
let select_all_vote_transactions = mentioned_addresses.iter().any(|key| key == "all_votes");
if select_all_vote_transactions {
return Self {
mentioned_addresses: HashSet::default(),
select_all_transactions,
select_all_vote_transactions: true,
};
}
let mentioned_addresses = mentioned_addresses
.iter()
.map(|key| bs58::decode(key).into_vec().unwrap())
.collect();
Self {
mentioned_addresses,
select_all_transactions: false,
select_all_vote_transactions: false,
}
}
/// Check if a transaction is of interest.
pub fn is_transaction_selected(
&self,
is_vote: bool,
mentioned_addresses: Box<dyn Iterator<Item = &Pubkey> + '_>,
) -> bool {
if !self.is_enabled() {
return false;
}
if self.select_all_transactions || (self.select_all_vote_transactions && is_vote) {
return true;
}
for address in mentioned_addresses {
if self.mentioned_addresses.contains(address.as_ref()) {
return true;
}
}
false
}
/// Check if any transaction is of interest at all
pub fn is_enabled(&self) -> bool {
self.select_all_transactions
|| self.select_all_vote_transactions
|| !self.mentioned_addresses.is_empty()
}
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
#[test]
fn test_select_transaction() {
let pubkey1 = Pubkey::new_unique();
let pubkey2 = Pubkey::new_unique();
let selector = TransactionSelector::new(&[pubkey1.to_string()]);
assert!(selector.is_enabled());
let addresses = [pubkey1];
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
let addresses = [pubkey2];
assert!(!selector.is_transaction_selected(false, Box::new(addresses.iter())));
let addresses = [pubkey1, pubkey2];
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
}
#[test]
fn test_select_all_transaction_using_wildcard() {
let pubkey1 = Pubkey::new_unique();
let pubkey2 = Pubkey::new_unique();
let selector = TransactionSelector::new(&["*".to_string()]);
assert!(selector.is_enabled());
let addresses = [pubkey1];
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
let addresses = [pubkey2];
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
let addresses = [pubkey1, pubkey2];
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
}
#[test]
fn test_select_all_transaction_all() {
let pubkey1 = Pubkey::new_unique();
let pubkey2 = Pubkey::new_unique();
let selector = TransactionSelector::new(&["all".to_string()]);
assert!(selector.is_enabled());
let addresses = [pubkey1];
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
let addresses = [pubkey2];
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
let addresses = [pubkey1, pubkey2];
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
}
#[test]
fn test_select_all_vote_transaction() {
let pubkey1 = Pubkey::new_unique();
let pubkey2 = Pubkey::new_unique();
let selector = TransactionSelector::new(&["all_votes".to_string()]);
assert!(selector.is_enabled());
let addresses = [pubkey1];
assert!(!selector.is_transaction_selected(false, Box::new(addresses.iter())));
let addresses = [pubkey2];
assert!(selector.is_transaction_selected(true, Box::new(addresses.iter())));
let addresses = [pubkey1, pubkey2];
assert!(selector.is_transaction_selected(true, Box::new(addresses.iter())));
}
#[test]
fn test_select_no_transaction() {
let pubkey1 = Pubkey::new_unique();
let pubkey2 = Pubkey::new_unique();
let selector = TransactionSelector::new(&[]);
assert!(!selector.is_enabled());
let addresses = [pubkey1];
assert!(!selector.is_transaction_selected(false, Box::new(addresses.iter())));
let addresses = [pubkey2];
assert!(!selector.is_transaction_selected(true, Box::new(addresses.iter())));
let addresses = [pubkey1, pubkey2];
assert!(!selector.is_transaction_selected(true, Box::new(addresses.iter())));
}
}

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-banking-bench"
version = "1.9.0"
version = "1.9.17"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -14,17 +14,17 @@ crossbeam-channel = "0.5"
log = "0.4.14"
rand = "0.7.0"
rayon = "1.5.1"
solana-core = { path = "../core", version = "=1.9.0" }
solana-gossip = { path = "../gossip", version = "=1.9.0" }
solana-ledger = { path = "../ledger", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-perf = { path = "../perf", version = "=1.9.0" }
solana-poh = { path = "../poh", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-streamer = { path = "../streamer", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-core = { path = "../core", version = "=1.9.17" }
solana-gossip = { path = "../gossip", version = "=1.9.17" }
solana-ledger = { path = "../ledger", version = "=1.9.17" }
solana-logger = { path = "../logger", version = "=1.9.17" }
solana-measure = { path = "../measure", version = "=1.9.17" }
solana-perf = { path = "../perf", version = "=1.9.17" }
solana-poh = { path = "../poh", version = "=1.9.17" }
solana-runtime = { path = "../runtime", version = "=1.9.17" }
solana-streamer = { path = "../streamer", version = "=1.9.17" }
solana-sdk = { path = "../sdk", version = "=1.9.17" }
solana-version = { path = "../version", version = "=1.9.17" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -11,9 +11,10 @@ use {
blockstore::Blockstore,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
leader_schedule_cache::LeaderScheduleCache,
},
solana_measure::measure::Measure,
solana_perf::packet::to_packets_chunked,
solana_perf::packet::to_packet_batches,
solana_poh::poh_recorder::{create_test_recorder, PohRecorder, WorkingBankEntry},
solana_runtime::{
accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks,
@ -212,14 +213,19 @@ fn main() {
bank.clear_signatures();
}
let mut verified: Vec<_> = to_packets_chunked(&transactions, packets_per_chunk);
let mut verified: Vec<_> = to_packet_batches(&transactions, packets_per_chunk);
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
let (exit, poh_recorder, poh_service, signal_receiver) =
create_test_recorder(&bank, &blockstore, None);
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
let (exit, poh_recorder, poh_service, signal_receiver) = create_test_recorder(
&bank,
&blockstore,
None,
Some(leader_schedule_cache.clone()),
);
let cluster_info = ClusterInfo::new(
Node::new_localhost().info,
Arc::new(Keypair::new()),
@ -332,6 +338,7 @@ fn main() {
poh_recorder.lock().unwrap().set_bank(&bank);
assert!(poh_recorder.lock().unwrap().bank().is_some());
if bank.slot() > 32 {
leader_schedule_cache.set_root(&bank);
bank_forks.set_root(root, &AbsRequestSender::default(), None);
root += 1;
}
@ -364,7 +371,7 @@ fn main() {
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
tx.signatures[0] = Signature::new(&sig[0..64]);
}
verified = to_packets_chunked(&transactions.clone(), packets_per_chunk);
verified = to_packet_batches(&transactions.clone(), packets_per_chunk);
}
start += chunk_len;

View File

@ -1,6 +1,6 @@
[package]
name = "solana-banks-client"
version = "1.9.0"
version = "1.9.17"
description = "Solana banks client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@ -12,16 +12,17 @@ edition = "2021"
[dependencies]
borsh = "0.9.1"
futures = "0.3"
solana-banks-interface = { path = "../banks-interface", version = "=1.9.0" }
solana-program = { path = "../sdk/program", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
tarpc = { version = "0.26.2", features = ["full"] }
solana-banks-interface = { path = "../banks-interface", version = "=1.9.17" }
solana-program = { path = "../sdk/program", version = "=1.9.17" }
solana-sdk = { path = "../sdk", version = "=1.9.17" }
tarpc = { version = "0.27.2", features = ["full"] }
thiserror = "1.0"
tokio = { version = "1", features = ["full"] }
tokio-serde = { version = "0.8", features = ["bincode"] }
[dev-dependencies]
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-banks-server = { path = "../banks-server", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.17" }
solana-banks-server = { path = "../banks-server", version = "=1.9.17" }
[lib]
crate-type = ["lib"]

73
banks-client/src/error.rs Normal file
View File

@ -0,0 +1,73 @@
use {
solana_sdk::{transaction::TransactionError, transport::TransportError},
std::io,
tarpc::client::RpcError,
thiserror::Error,
};
/// Errors from BanksClient
#[derive(Error, Debug)]
pub enum BanksClientError {
#[error("client error: {0}")]
ClientError(&'static str),
#[error(transparent)]
Io(#[from] io::Error),
#[error(transparent)]
RpcError(#[from] RpcError),
#[error("transport transaction error: {0}")]
TransactionError(#[from] TransactionError),
#[error("simulation error: {err:?}, logs: {logs:?}, units_consumed: {units_consumed:?}")]
SimulationError {
err: TransactionError,
logs: Vec<String>,
units_consumed: u64,
},
}
impl BanksClientError {
pub fn unwrap(&self) -> TransactionError {
match self {
BanksClientError::TransactionError(err)
| BanksClientError::SimulationError { err, .. } => err.clone(),
_ => panic!("unexpected transport error"),
}
}
}
impl From<BanksClientError> for io::Error {
fn from(err: BanksClientError) -> Self {
match err {
BanksClientError::ClientError(err) => Self::new(io::ErrorKind::Other, err.to_string()),
BanksClientError::Io(err) => err,
BanksClientError::RpcError(err) => Self::new(io::ErrorKind::Other, err.to_string()),
BanksClientError::TransactionError(err) => {
Self::new(io::ErrorKind::Other, err.to_string())
}
BanksClientError::SimulationError { err, .. } => {
Self::new(io::ErrorKind::Other, err.to_string())
}
}
}
}
impl From<BanksClientError> for TransportError {
fn from(err: BanksClientError) -> Self {
match err {
BanksClientError::ClientError(err) => {
Self::IoError(io::Error::new(io::ErrorKind::Other, err.to_string()))
}
BanksClientError::Io(err) => {
Self::IoError(io::Error::new(io::ErrorKind::Other, err.to_string()))
}
BanksClientError::RpcError(err) => {
Self::IoError(io::Error::new(io::ErrorKind::Other, err.to_string()))
}
BanksClientError::TransactionError(err) => Self::TransactionError(err),
BanksClientError::SimulationError { err, .. } => Self::TransactionError(err),
}
}
}

View File

@ -5,11 +5,12 @@
//! but they are undocumented, may change over time, and are generally more
//! cumbersome to use.
pub use crate::error::BanksClientError;
pub use solana_banks_interface::{BanksClient as TarpcClient, TransactionStatus};
use {
borsh::BorshDeserialize,
futures::{future::join_all, Future, FutureExt},
solana_banks_interface::{BanksRequest, BanksResponse},
futures::{future::join_all, Future, FutureExt, TryFutureExt},
solana_banks_interface::{BanksRequest, BanksResponse, BanksTransactionResultWithSimulation},
solana_program::{
clock::Slot, fee_calculator::FeeCalculator, hash::Hash, program_pack::Pack, pubkey::Pubkey,
rent::Rent, sysvar::Sysvar,
@ -22,7 +23,7 @@ use {
transaction::{self, Transaction},
transport,
},
std::io::{self, Error, ErrorKind},
std::io,
tarpc::{
client::{self, NewClient, RequestDispatch},
context::{self, Context},
@ -33,6 +34,8 @@ use {
tokio_serde::formats::Bincode,
};
mod error;
// This exists only for backward compatibility
pub trait BanksClientExt {}
@ -58,7 +61,10 @@ impl BanksClient {
ctx: Context,
transaction: Transaction,
) -> impl Future<Output = io::Result<()>> + '_ {
self.inner.send_transaction_with_context(ctx, transaction)
self.inner
.send_transaction_with_context(ctx, transaction)
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
.map_err(Into::into)
}
#[deprecated(
@ -73,6 +79,8 @@ impl BanksClient {
#[allow(deprecated)]
self.inner
.get_fees_with_commitment_and_context(ctx, commitment)
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
.map_err(Into::into)
}
pub fn get_transaction_status_with_context(
@ -82,6 +90,8 @@ impl BanksClient {
) -> impl Future<Output = io::Result<Option<TransactionStatus>>> + '_ {
self.inner
.get_transaction_status_with_context(ctx, signature)
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
.map_err(Into::into)
}
pub fn get_slot_with_context(
@ -89,7 +99,10 @@ impl BanksClient {
ctx: Context,
commitment: CommitmentLevel,
) -> impl Future<Output = io::Result<Slot>> + '_ {
self.inner.get_slot_with_context(ctx, commitment)
self.inner
.get_slot_with_context(ctx, commitment)
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
.map_err(Into::into)
}
pub fn get_block_height_with_context(
@ -97,7 +110,10 @@ impl BanksClient {
ctx: Context,
commitment: CommitmentLevel,
) -> impl Future<Output = io::Result<Slot>> + '_ {
self.inner.get_block_height_with_context(ctx, commitment)
self.inner
.get_block_height_with_context(ctx, commitment)
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
.map_err(Into::into)
}
pub fn process_transaction_with_commitment_and_context(
@ -108,6 +124,24 @@ impl BanksClient {
) -> impl Future<Output = io::Result<Option<transaction::Result<()>>>> + '_ {
self.inner
.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
.map_err(Into::into)
}
pub fn process_transaction_with_preflight_and_commitment_and_context(
&mut self,
ctx: Context,
transaction: Transaction,
commitment: CommitmentLevel,
) -> impl Future<Output = Result<BanksTransactionResultWithSimulation, BanksClientError>> + '_
{
self.inner
.process_transaction_with_preflight_and_commitment_and_context(
ctx,
transaction,
commitment,
)
.map_err(Into::into)
}
pub fn get_account_with_commitment_and_context(
@ -118,6 +152,8 @@ impl BanksClient {
) -> impl Future<Output = io::Result<Option<Account>>> + '_ {
self.inner
.get_account_with_commitment_and_context(ctx, address, commitment)
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
.map_err(Into::into)
}
/// Send a transaction and return immediately. The server will resend the
@ -148,9 +184,13 @@ impl BanksClient {
pub fn get_sysvar<T: Sysvar>(&mut self) -> impl Future<Output = io::Result<T>> + '_ {
self.get_account(T::id()).map(|result| {
let sysvar = result?
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Sysvar not present"))?;
.ok_or(BanksClientError::ClientError("Sysvar not present"))
.map_err(io::Error::from)?; // Remove this map when return Err type updated to BanksClientError
from_account::<T, _>(&sysvar)
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Failed to deserialize sysvar"))
.ok_or(BanksClientError::ClientError(
"Failed to deserialize sysvar",
))
.map_err(Into::into) // Remove this when return Err type updated to BanksClientError
})
}
@ -164,7 +204,8 @@ impl BanksClient {
/// method to get both a blockhash and the blockhash's last valid slot.
#[deprecated(since = "1.9.0", note = "Please use `get_latest_blockhash` instead")]
pub fn get_recent_blockhash(&mut self) -> impl Future<Output = io::Result<Hash>> + '_ {
self.get_latest_blockhash()
#[allow(deprecated)]
self.get_fees().map(|result| Ok(result?.1))
}
/// Send a transaction and return after the transaction has been rejected or
@ -178,11 +219,60 @@ impl BanksClient {
ctx.deadline += Duration::from_secs(50);
self.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
.map(|result| match result? {
None => {
Err(Error::new(ErrorKind::TimedOut, "invalid blockhash or fee-payer").into())
}
None => Err(BanksClientError::ClientError(
"invalid blockhash or fee-payer",
)),
Some(transaction_result) => Ok(transaction_result?),
})
.map_err(Into::into) // Remove this when return Err type updated to BanksClientError
}
/// Send a transaction and return any preflight (sanitization or simulation) errors, or return
/// after the transaction has been rejected or reached the given level of commitment.
pub fn process_transaction_with_preflight_and_commitment(
&mut self,
transaction: Transaction,
commitment: CommitmentLevel,
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
let mut ctx = context::current();
ctx.deadline += Duration::from_secs(50);
self.process_transaction_with_preflight_and_commitment_and_context(
ctx,
transaction,
commitment,
)
.map(|result| match result? {
BanksTransactionResultWithSimulation {
result: None,
simulation_details: _,
} => Err(BanksClientError::ClientError(
"invalid blockhash or fee-payer",
)),
BanksTransactionResultWithSimulation {
result: Some(Err(err)),
simulation_details: Some(simulation_details),
} => Err(BanksClientError::SimulationError {
err,
logs: simulation_details.logs,
units_consumed: simulation_details.units_consumed,
}),
BanksTransactionResultWithSimulation {
result: Some(result),
simulation_details: _,
} => result.map_err(Into::into),
})
}
/// Send a transaction and return any preflight (sanitization or simulation) errors, or return
/// after the transaction has been finalized or rejected.
pub fn process_transaction_with_preflight(
&mut self,
transaction: Transaction,
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
self.process_transaction_with_preflight_and_commitment(
transaction,
CommitmentLevel::default(),
)
}
/// Send a transaction and return until the transaction has been finalized or rejected.
@ -255,10 +345,12 @@ impl BanksClient {
address: Pubkey,
) -> impl Future<Output = io::Result<T>> + '_ {
self.get_account(address).map(|result| {
let account =
result?.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Account not found"))?;
let account = result?
.ok_or(BanksClientError::ClientError("Account not found"))
.map_err(io::Error::from)?; // Remove this map when return Err type updated to BanksClientError
T::unpack_from_slice(&account.data)
.map_err(|_| io::Error::new(io::ErrorKind::Other, "Failed to deserialize account"))
.map_err(|_| BanksClientError::ClientError("Failed to deserialize account"))
.map_err(Into::into) // Remove this when return Err type updated to BanksClientError
})
}
@ -269,9 +361,8 @@ impl BanksClient {
address: Pubkey,
) -> impl Future<Output = io::Result<T>> + '_ {
self.get_account(address).map(|result| {
let account =
result?.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "account not found"))?;
T::try_from_slice(&account.data)
let account = result?.ok_or(BanksClientError::ClientError("Account not found"))?;
T::try_from_slice(&account.data).map_err(Into::into)
})
}
@ -330,7 +421,8 @@ impl BanksClient {
.map(|result| {
result?
.map(|x| x.0)
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "account not found"))
.ok_or(BanksClientError::ClientError("valid blockhash not found"))
.map_err(Into::into)
})
}
@ -348,6 +440,8 @@ impl BanksClient {
) -> impl Future<Output = io::Result<Option<(Hash, u64)>>> + '_ {
self.inner
.get_latest_blockhash_with_commitment_and_context(ctx, commitment)
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
.map_err(Into::into)
}
pub fn get_fee_for_message_with_commitment_and_context(
@ -358,6 +452,8 @@ impl BanksClient {
) -> impl Future<Output = io::Result<Option<u64>>> + '_ {
self.inner
.get_fee_for_message_with_commitment_and_context(ctx, commitment, message)
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
.map_err(Into::into)
}
}
@ -399,7 +495,7 @@ mod tests {
}
#[test]
fn test_banks_server_transfer_via_server() -> io::Result<()> {
fn test_banks_server_transfer_via_server() -> Result<(), BanksClientError> {
// This test shows the preferred way to interact with BanksServer.
// It creates a runtime explicitly (no globals via tokio macros) and calls
// `runtime.block_on()` just once, to run all the async code.
@ -432,7 +528,7 @@ mod tests {
}
#[test]
fn test_banks_server_transfer_via_client() -> io::Result<()> {
fn test_banks_server_transfer_via_client() -> Result<(), BanksClientError> {
// The caller may not want to hold the connection open until the transaction
// is processed (or blockhash expires). In this test, we verify the
// server-side functionality is available to the client.

View File

@ -1,6 +1,6 @@
[package]
name = "solana-banks-interface"
version = "1.9.0"
version = "1.9.17"
description = "Solana banks RPC interface"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@ -11,8 +11,8 @@ edition = "2021"
[dependencies]
serde = { version = "1.0.130", features = ["derive"] }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
tarpc = { version = "0.26.2", features = ["full"] }
solana-sdk = { path = "../sdk", version = "=1.9.17" }
tarpc = { version = "0.27.2", features = ["full"] }
[lib]
crate-type = ["lib"]

View File

@ -30,6 +30,19 @@ pub struct TransactionStatus {
pub confirmation_status: Option<TransactionConfirmationStatus>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct TransactionSimulationDetails {
pub logs: Vec<String>,
pub units_consumed: u64,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BanksTransactionResultWithSimulation {
pub result: Option<transaction::Result<()>>,
pub simulation_details: Option<TransactionSimulationDetails>,
}
#[tarpc::service]
pub trait Banks {
async fn send_transaction_with_context(transaction: Transaction);
@ -44,6 +57,10 @@ pub trait Banks {
-> Option<TransactionStatus>;
async fn get_slot_with_context(commitment: CommitmentLevel) -> Slot;
async fn get_block_height_with_context(commitment: CommitmentLevel) -> u64;
async fn process_transaction_with_preflight_and_commitment_and_context(
transaction: Transaction,
commitment: CommitmentLevel,
) -> BanksTransactionResultWithSimulation;
async fn process_transaction_with_commitment_and_context(
transaction: Transaction,
commitment: CommitmentLevel,

View File

@ -1,6 +1,6 @@
[package]
name = "solana-banks-server"
version = "1.9.0"
version = "1.9.17"
description = "Solana banks server"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@ -12,11 +12,11 @@ edition = "2021"
[dependencies]
bincode = "1.3.3"
futures = "0.3"
solana-banks-interface = { path = "../banks-interface", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.0" }
tarpc = { version = "0.26.2", features = ["full"] }
solana-banks-interface = { path = "../banks-interface", version = "=1.9.17" }
solana-runtime = { path = "../runtime", version = "=1.9.17" }
solana-sdk = { path = "../sdk", version = "=1.9.17" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.17" }
tarpc = { version = "0.27.2", features = ["full"] }
tokio = { version = "1", features = ["full"] }
tokio-serde = { version = "0.8", features = ["bincode"] }
tokio-stream = "0.1"

View File

@ -2,9 +2,14 @@ use {
bincode::{deserialize, serialize},
futures::{future, prelude::stream::StreamExt},
solana_banks_interface::{
Banks, BanksRequest, BanksResponse, TransactionConfirmationStatus, TransactionStatus,
Banks, BanksRequest, BanksResponse, BanksTransactionResultWithSimulation,
TransactionConfirmationStatus, TransactionSimulationDetails, TransactionStatus,
},
solana_runtime::{
bank::{Bank, TransactionSimulationResult},
bank_forks::BankForks,
commitment::BlockCommitmentCache,
},
solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache},
solana_sdk::{
account::Account,
clock::Slot,
@ -15,7 +20,7 @@ use {
message::{Message, SanitizedMessage},
pubkey::Pubkey,
signature::Signature,
transaction::{self, Transaction},
transaction::{self, SanitizedTransaction, Transaction},
},
solana_send_transaction_service::{
send_transaction_service::{SendTransactionService, TransactionInfo},
@ -35,7 +40,7 @@ use {
tarpc::{
context::Context,
serde_transport::tcp,
server::{self, Channel, Incoming},
server::{self, incoming::Incoming, Channel},
transport::{self, channel::UnboundedChannel},
ClientMessage, Response,
},
@ -242,6 +247,47 @@ impl Banks for BanksServer {
self.bank(commitment).block_height()
}
async fn process_transaction_with_preflight_and_commitment_and_context(
self,
ctx: Context,
transaction: Transaction,
commitment: CommitmentLevel,
) -> BanksTransactionResultWithSimulation {
let sanitized_transaction =
match SanitizedTransaction::try_from_legacy_transaction(transaction.clone()) {
Err(err) => {
return BanksTransactionResultWithSimulation {
result: Some(Err(err)),
simulation_details: None,
};
}
Ok(tx) => tx,
};
if let TransactionSimulationResult {
result: Err(err),
logs,
post_simulation_accounts: _,
units_consumed,
} = self
.bank(commitment)
.simulate_transaction_unchecked(sanitized_transaction)
{
return BanksTransactionResultWithSimulation {
result: Some(Err(err)),
simulation_details: Some(TransactionSimulationDetails {
logs,
units_consumed,
}),
};
}
BanksTransactionResultWithSimulation {
result: self
.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
.await,
simulation_details: None,
}
}
async fn process_transaction_with_commitment_and_context(
self,
_: Context,

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-bench-streamer"
version = "1.9.0"
version = "1.9.17"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -10,11 +10,11 @@ publish = false
[dependencies]
clap = "2.33.1"
solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" }
solana-streamer = { path = "../streamer", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-net-utils = { path = "../net-utils", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.17" }
solana-streamer = { path = "../streamer", version = "=1.9.17" }
solana-logger = { path = "../logger", version = "=1.9.17" }
solana-net-utils = { path = "../net-utils", version = "=1.9.17" }
solana-version = { path = "../version", version = "=1.9.17" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -2,8 +2,8 @@
use {
clap::{crate_description, crate_name, App, Arg},
solana_streamer::{
packet::{Packet, Packets, PacketsRecycler, PACKET_DATA_SIZE},
streamer::{receiver, PacketReceiver},
packet::{Packet, PacketBatch, PacketBatchRecycler, PACKET_DATA_SIZE},
streamer::{receiver, PacketBatchReceiver},
},
std::{
cmp::max,
@ -20,19 +20,19 @@ use {
fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut msgs = Packets::default();
msgs.packets.resize(10, Packet::default());
for w in msgs.packets.iter_mut() {
let mut packet_batch = PacketBatch::default();
packet_batch.packets.resize(10, Packet::default());
for w in packet_batch.packets.iter_mut() {
w.meta.size = PACKET_DATA_SIZE;
w.meta.set_addr(addr);
}
let msgs = Arc::new(msgs);
let packet_batch = Arc::new(packet_batch);
spawn(move || loop {
if exit.load(Ordering::Relaxed) {
return;
}
let mut num = 0;
for p in &msgs.packets {
for p in &packet_batch.packets {
let a = p.meta.addr();
assert!(p.meta.size <= PACKET_DATA_SIZE);
send.send_to(&p.data[..p.meta.size], &a).unwrap();
@ -42,14 +42,14 @@ fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
})
}
fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketReceiver) -> JoinHandle<()> {
fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketBatchReceiver) -> JoinHandle<()> {
spawn(move || loop {
if exit.load(Ordering::Relaxed) {
return;
}
let timer = Duration::new(1, 0);
if let Ok(msgs) = r.recv_timeout(timer) {
rvs.fetch_add(msgs.packets.len(), Ordering::Relaxed);
if let Ok(packet_batch) = r.recv_timeout(timer) {
rvs.fetch_add(packet_batch.packets.len(), Ordering::Relaxed);
}
})
}
@ -81,7 +81,7 @@ fn main() -> Result<()> {
let mut read_channels = Vec::new();
let mut read_threads = Vec::new();
let recycler = PacketsRecycler::default();
let recycler = PacketBatchRecycler::default();
for _ in 0..num_sockets {
let read = solana_net_utils::bind_to(ip_addr, port, false).unwrap();
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-bench-tps"
version = "1.9.0"
version = "1.9.17"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -14,23 +14,23 @@ log = "0.4.14"
rayon = "1.5.1"
serde_json = "1.0.72"
serde_yaml = "0.8.21"
solana-core = { path = "../core", version = "=1.9.0" }
solana-genesis = { path = "../genesis", version = "=1.9.0" }
solana-client = { path = "../client", version = "=1.9.0" }
solana-faucet = { path = "../faucet", version = "=1.9.0" }
solana-gossip = { path = "../gossip", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-metrics = { path = "../metrics", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-net-utils = { path = "../net-utils", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-streamer = { path = "../streamer", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-core = { path = "../core", version = "=1.9.17" }
solana-genesis = { path = "../genesis", version = "=1.9.17" }
solana-client = { path = "../client", version = "=1.9.17" }
solana-faucet = { path = "../faucet", version = "=1.9.17" }
solana-gossip = { path = "../gossip", version = "=1.9.17" }
solana-logger = { path = "../logger", version = "=1.9.17" }
solana-metrics = { path = "../metrics", version = "=1.9.17" }
solana-measure = { path = "../measure", version = "=1.9.17" }
solana-net-utils = { path = "../net-utils", version = "=1.9.17" }
solana-runtime = { path = "../runtime", version = "=1.9.17" }
solana-sdk = { path = "../sdk", version = "=1.9.17" }
solana-streamer = { path = "../streamer", version = "=1.9.17" }
solana-version = { path = "../version", version = "=1.9.17" }
[dev-dependencies]
serial_test = "0.5.1"
solana-local-cluster = { path = "../local-cluster", version = "=1.9.0" }
solana-local-cluster = { path = "../local-cluster", version = "=1.9.17" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -21,7 +21,7 @@ pub const NUM_SIGNATURES_FOR_TXS: u64 = 100_000 * 60 * 60 * 24 * 7;
fn main() {
solana_logger::setup_with_default("solana=info");
solana_metrics::set_panic_hook("bench-tps");
solana_metrics::set_panic_hook("bench-tps", /*version:*/ None);
let matches = cli::build_args(solana_version::version!()).get_matches();
let cli_config = cli::extract_args(&matches);

View File

@ -31,7 +31,7 @@ fn test_bench_tps_local_cluster(config: Config) {
node_stakes: vec![999_990; NUM_NODES],
cluster_lamports: 200_000_000,
validator_configs: make_identical_validator_configs(
&ValidatorConfig::default(),
&ValidatorConfig::default_for_test(),
NUM_NODES,
),
native_instruction_processors,

32
bloom/Cargo.toml Normal file
View File

@ -0,0 +1,32 @@
[package]
name = "solana-bloom"
version = "1.9.17"
description = "Solana bloom filter"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-bloom"
edition = "2021"
[dependencies]
bv = { version = "0.11.1", features = ["serde"] }
fnv = "1.0.7"
rand = "0.7.0"
serde = { version = "1.0.133", features = ["rc"] }
rayon = "1.5.1"
serde_derive = "1.0.103"
solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.17" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.17" }
solana-sdk = { path = "../sdk", version = "=1.9.17" }
log = "0.4.14"
[lib]
crate-type = ["lib"]
name = "solana_bloom"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]
[build-dependencies]
rustc_version = "0.4"

View File

@ -5,7 +5,7 @@ use {
bv::BitVec,
fnv::FnvHasher,
rand::Rng,
solana_runtime::bloom::{AtomicBloom, Bloom, BloomHashIndex},
solana_bloom::bloom::{AtomicBloom, Bloom, BloomHashIndex},
solana_sdk::{
hash::{hash, Hash},
signature::Signature,

1
bloom/build.rs Symbolic link
View File

@ -0,0 +1 @@
../frozen-abi/build.rs

View File

@ -101,7 +101,7 @@ impl<T: BloomHashIndex> Bloom<T> {
}
}
fn pos(&self, key: &T, k: u64) -> u64 {
key.hash_at_index(k) % self.bits.len()
key.hash_at_index(k).wrapping_rem(self.bits.len())
}
pub fn clear(&mut self) {
self.bits = BitVec::new_fill(false, self.bits.len());
@ -111,7 +111,7 @@ impl<T: BloomHashIndex> Bloom<T> {
for k in &self.keys {
let pos = self.pos(key, *k);
if !self.bits.get(pos) {
self.num_bits_set += 1;
self.num_bits_set = self.num_bits_set.saturating_add(1);
self.bits.set(pos, true);
}
}
@ -164,21 +164,26 @@ impl<T: BloomHashIndex> From<Bloom<T>> for AtomicBloom<T> {
impl<T: BloomHashIndex> AtomicBloom<T> {
fn pos(&self, key: &T, hash_index: u64) -> (usize, u64) {
let pos = key.hash_at_index(hash_index) % self.num_bits;
let pos = key.hash_at_index(hash_index).wrapping_rem(self.num_bits);
// Divide by 64 to figure out which of the
// AtomicU64 bit chunks we need to modify.
let index = pos >> 6;
let index = pos.wrapping_shr(6);
// (pos & 63) is equivalent to mod 64 so that we can find
// the index of the bit within the AtomicU64 to modify.
let mask = 1u64 << (pos & 63);
let mask = 1u64.wrapping_shl(u32::try_from(pos & 63).unwrap());
(index as usize, mask)
}
pub fn add(&self, key: &T) {
/// Adds an item to the bloom filter and returns true if the item
/// was not in the filter before.
pub fn add(&self, key: &T) -> bool {
let mut added = false;
for k in &self.keys {
let (index, mask) = self.pos(key, *k);
self.bits[index].fetch_or(mask, Ordering::Relaxed);
let prev_val = self.bits[index].fetch_or(mask, Ordering::Relaxed);
added = added || prev_val & mask == 0u64;
}
added
}
pub fn contains(&self, key: &T) -> bool {
@ -189,6 +194,12 @@ impl<T: BloomHashIndex> AtomicBloom<T> {
})
}
pub fn clear_for_tests(&mut self) {
self.bits.iter().for_each(|bit| {
bit.store(0u64, Ordering::Relaxed);
});
}
// Only for tests and simulations.
pub fn mock_clone(&self) -> Self {
Self {
@ -320,7 +331,9 @@ mod test {
assert_eq!(bloom.keys.len(), 3);
assert_eq!(bloom.num_bits, 6168);
assert_eq!(bloom.bits.len(), 97);
hash_values.par_iter().for_each(|v| bloom.add(v));
hash_values.par_iter().for_each(|v| {
bloom.add(v);
});
let bloom: Bloom<Hash> = bloom.into();
assert_eq!(bloom.keys.len(), 3);
assert_eq!(bloom.bits.len(), 6168);
@ -362,7 +375,9 @@ mod test {
}
// Round trip, re-inserting the same hash values.
let bloom: AtomicBloom<_> = bloom.into();
hash_values.par_iter().for_each(|v| bloom.add(v));
hash_values.par_iter().for_each(|v| {
bloom.add(v);
});
for hash_value in &hash_values {
assert!(bloom.contains(hash_value));
}
@ -380,7 +395,9 @@ mod test {
let bloom: AtomicBloom<_> = bloom.into();
assert_eq!(bloom.num_bits, 9731);
assert_eq!(bloom.bits.len(), (9731 + 63) / 64);
more_hash_values.par_iter().for_each(|v| bloom.add(v));
more_hash_values.par_iter().for_each(|v| {
bloom.add(v);
});
for hash_value in &hash_values {
assert!(bloom.contains(hash_value));
}

5
bloom/src/lib.rs Normal file
View File

@ -0,0 +1,5 @@
#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))]
pub mod bloom;
#[macro_use]
extern crate solana_frozen_abi_macro;

View File

@ -1,6 +1,6 @@
[package]
name = "solana-bucket-map"
version = "1.9.0"
version = "1.9.17"
description = "solana-bucket-map"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-bucket-map"
@ -12,11 +12,11 @@ edition = "2021"
[dependencies]
rayon = "1.5.0"
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.17" }
solana-sdk = { path = "../sdk", version = "=1.9.17" }
memmap2 = "0.5.0"
log = { version = "0.4.11" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.17" }
rand = "0.7.0"
fs_extra = "1.2.0"
tempfile = "3.2.0"

View File

@ -9,5 +9,8 @@ for a in "$@"; do
fi
done
set -x
set -ex
if [[ ! -f sdk/bpf/syscalls.txt ]]; then
"$here"/cargo build --manifest-path "$here"/programs/bpf_loader/gen-syscall-list/Cargo.toml
fi
exec "$here"/cargo run --manifest-path "$here"/sdk/cargo-build-bpf/Cargo.toml -- $maybe_bpf_sdk "$@"

View File

@ -102,6 +102,8 @@ command_step() {
command: "$2"
timeout_in_minutes: $3
artifact_paths: "log-*.txt"
agents:
- "queue=solana"
EOF
}
@ -168,7 +170,7 @@ all_test_steps() {
timeout_in_minutes: 20
artifact_paths: "bpf-dumps.tar.bz2"
agents:
- "queue=default"
- "queue=solana"
EOF
else
annotate --style info \
@ -221,11 +223,26 @@ EOF
- command: "scripts/build-downstream-projects.sh"
name: "downstream-projects"
timeout_in_minutes: 30
agents:
- "queue=solana"
EOF
else
annotate --style info \
"downstream-projects skipped as no relevant files were modified"
fi
# Wasm support
if affects \
^ci/test-wasm.sh \
^ci/test-stable.sh \
^sdk/ \
; then
command_step wasm ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-wasm.sh" 20
else
annotate --style info \
"wasm skipped as no relevant files were modified"
fi
# Benches...
if affects \
.rs$ \
@ -243,7 +260,15 @@ EOF
command_step "local-cluster" \
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster.sh" \
50
40
command_step "local-cluster-flakey" \
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-flakey.sh" \
10
command_step "local-cluster-slow" \
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-slow.sh" \
30
}
pull_or_push_steps() {

View File

@ -16,6 +16,11 @@ steps:
- command: "ci/publish-crate.sh"
agents:
- "queue=release-build"
timeout_in_minutes: 240
timeout_in_minutes: 360
name: "publish crate"
branches: "!master"
- command: "ci/publish-tarball.sh"
agents:
- "queue=release-build-aarch64-apple-darwin"
timeout_in_minutes: 60
name: "publish tarball (aarch64-apple-darwin)"

View File

@ -1,4 +1,4 @@
FROM solanalabs/rust:1.56.1
FROM solanalabs/rust:1.57.0
ARG date
RUN set -x \

View File

@ -1,6 +1,6 @@
# Note: when the rust version is changed also modify
# ci/rust-version.sh to pick up the new image tag
FROM rust:1.56.1
FROM rust:1.57.0
# Add Google Protocol Buffers for Libra's metrics library.
ENV PROTOC_VERSION 3.8.0
@ -11,6 +11,7 @@ RUN set -x \
&& apt-get install apt-transport-https \
&& echo deb https://apt.buildkite.com/buildkite-agent stable main > /etc/apt/sources.list.d/buildkite-agent.list \
&& apt-key adv --no-tty --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 32A37959C2FA5C3C99EFBC32A79206696452D198 \
&& curl -fsSL https://deb.nodesource.com/setup_current.x | bash - \
&& apt update \
&& apt install -y \
buildkite-agent \
@ -19,15 +20,20 @@ RUN set -x \
lcov \
libudev-dev \
mscgen \
nodejs \
net-tools \
rsync \
sudo \
golang \
unzip \
\
&& apt remove -y libcurl4-openssl-dev \
&& rm -rf /var/lib/apt/lists/* \
&& node --version \
&& npm --version \
&& rustup component add rustfmt \
&& rustup component add clippy \
&& rustup target add wasm32-unknown-unknown \
&& cargo install cargo-audit \
&& cargo install mdbook \
&& cargo install mdbook-linkcheck \

View File

@ -23,6 +23,9 @@ if [[ -n $CI ]]; then
elif [[ -n $BUILDKITE ]]; then
export CI_BRANCH=$BUILDKITE_BRANCH
export CI_BUILD_ID=$BUILDKITE_BUILD_ID
if [[ $BUILDKITE_COMMIT = HEAD ]]; then
BUILDKITE_COMMIT="$(git rev-parse HEAD)"
fi
export CI_COMMIT=$BUILDKITE_COMMIT
export CI_JOB_ID=$BUILDKITE_JOB_ID
# The standard BUILDKITE_PULL_REQUEST environment variable is always "false" due
@ -35,7 +38,18 @@ if [[ -n $CI ]]; then
export CI_BASE_BRANCH=$BUILDKITE_BRANCH
export CI_PULL_REQUEST=
fi
export CI_OS_NAME=linux
case "$(uname -s)" in
Linux)
export CI_OS_NAME=linux
;;
Darwin)
export CI_OS_NAME=osx
;;
*)
;;
esac
if [[ -n $BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG ]]; then
# The solana-secondary pipeline should use the slug of the pipeline that
# triggered it

View File

@ -70,7 +70,7 @@ for Cargo_toml in $Cargo_tomls; do
rm -rf crate-test
"$cargo" stable init crate-test
cd crate-test/
echo "${crate_name} = \"${expectedCrateVersion}\"" >> Cargo.toml
echo "${crate_name} = \"=${expectedCrateVersion}\"" >> Cargo.toml
echo "[workspace]" >> Cargo.toml
"$cargo" stable check
) && really_uploaded=1

View File

@ -39,7 +39,11 @@ fi
case "$CI_OS_NAME" in
osx)
TARGET=x86_64-apple-darwin
_cputype="$(uname -m)"
if [[ $_cputype = arm64 ]]; then
_cputype=aarch64
fi
TARGET=${_cputype}-apple-darwin
;;
linux)
TARGET=x86_64-unknown-linux-gnu
@ -146,7 +150,7 @@ elif [[ -n $BUILDKITE ]]; then
cat > release.solana.com-install <<EOF
SOLANA_RELEASE=$CHANNEL_OR_TAG
SOLANA_INSTALL_INIT_ARGS=$CHANNEL_OR_TAG
SOLANA_DOWNLOAD_ROOT=http://release.solana.com
SOLANA_DOWNLOAD_ROOT=https://release.solana.com
EOF
cat install/solana-install-init.sh >> release.solana.com-install

View File

@ -27,6 +27,8 @@ steps+=(test-stable-perf)
steps+=(test-downstream-builds)
steps+=(test-bench)
steps+=(test-local-cluster)
steps+=(test-local-cluster-flakey)
steps+=(test-local-cluster-slow)
step_index=0
if [[ -n "$1" ]]; then

View File

@ -18,13 +18,13 @@
if [[ -n $RUST_STABLE_VERSION ]]; then
stable_version="$RUST_STABLE_VERSION"
else
stable_version=1.56.1
stable_version=1.57.0
fi
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
nightly_version="$RUST_NIGHTLY_VERSION"
else
nightly_version=2021-11-30
nightly_version=2021-12-03
fi

24
ci/sbf-tools-info.sh Executable file
View File

@ -0,0 +1,24 @@
#!/usr/bin/env bash
#
# Finds the version of sbf-tools used by this source tree.
#
# stdout of this script may be eval-ed.
#
here="$(dirname "$0")"
SBF_TOOLS_VERSION=unknown
cargo_build_bpf_main="${here}/../sdk/cargo-build-bpf/src/main.rs"
if [[ -f "${cargo_build_bpf_main}" ]]; then
version=$(sed -e 's/^.*bpf_tools_version\s*=\s*"\(v[0-9.]\+\)".*/\1/;t;d' "${cargo_build_bpf_main}")
if [[ ${version} != '' ]]; then
SBF_TOOLS_VERSION="${version}"
else
echo '--- unable to parse SBF_TOOLS_VERSION'
fi
else
echo "--- '${cargo_build_bpf_main}' not present"
fi
echo SBF_TOOLS_VERSION="${SBF_TOOLS_VERSION}"

View File

@ -0,0 +1 @@
test-stable.sh

View File

@ -0,0 +1 @@
test-stable.sh

View File

@ -100,7 +100,30 @@ test-stable-perf)
;;
test-local-cluster)
_ "$cargo" stable build --release --bins ${V:+--verbose}
_ "$cargo" stable test --release --package solana-local-cluster ${V:+--verbose} -- --nocapture --test-threads=1
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster ${V:+--verbose} -- --nocapture --test-threads=1
exit 0
;;
test-local-cluster-flakey)
_ "$cargo" stable build --release --bins ${V:+--verbose}
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster_flakey ${V:+--verbose} -- --nocapture --test-threads=1
exit 0
;;
test-local-cluster-slow)
_ "$cargo" stable build --release --bins ${V:+--verbose}
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster_slow ${V:+--verbose} -- --nocapture --test-threads=1
exit 0
;;
test-wasm)
_ node --version
_ npm --version
for dir in sdk/{program,}; do
if [[ -r "$dir"/package.json ]]; then
pushd "$dir"
_ npm install
_ npm test
popd
fi
done
exit 0
;;
*)

1
ci/test-wasm.sh Symbolic link
View File

@ -0,0 +1 @@
test-stable.sh

View File

@ -19,13 +19,24 @@ upload-ci-artifact() {
upload-s3-artifact() {
echo "--- artifact: $1 to $2"
(
set -x
docker run \
--rm \
--env AWS_ACCESS_KEY_ID \
--env AWS_SECRET_ACCESS_KEY \
--volume "$PWD:/solana" \
eremite/aws-cli:2018.12.18 \
args=(
--rm
--env AWS_ACCESS_KEY_ID
--env AWS_SECRET_ACCESS_KEY
--volume "$PWD:/solana"
)
if [[ $(uname -m) = arm64 ]]; then
# Ref: https://blog.jaimyn.dev/how-to-build-multi-architecture-docker-images-on-an-m1-mac/#tldr
args+=(
--platform linux/amd64
)
fi
args+=(
eremite/aws-cli:2018.12.18
/usr/bin/s3cmd --acl-public put "$1" "$2"
)
set -x
docker run "${args[@]}"
)
}

View File

@ -1,6 +1,6 @@
[package]
name = "solana-clap-utils"
version = "1.9.0"
version = "1.9.17"
description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@ -12,9 +12,9 @@ edition = "2021"
[dependencies]
clap = "2.33.0"
rpassword = "5.0"
solana-perf = { path = "../perf", version = "=1.9.0" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-perf = { path = "../perf", version = "=1.9.17" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.17" }
solana-sdk = { path = "../sdk", version = "=1.9.17" }
thiserror = "1.0.30"
tiny-bip39 = "0.8.2"
uriparse = "0.6.3"

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-cli-config"
description = "Blockchain, Rebuilt for Scale"
version = "1.9.0"
version = "1.9.17"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-cli-output"
description = "Blockchain, Rebuilt for Scale"
version = "1.9.0"
version = "1.9.17"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -17,14 +17,15 @@ console = "0.15.0"
humantime = "2.0.1"
Inflector = "0.11.4"
indicatif = "0.16.2"
semver = "1.0.6"
serde = "1.0.130"
serde_json = "1.0.72"
solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" }
solana-client = { path = "../client", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.0" }
solana-account-decoder = { path = "../account-decoder", version = "=1.9.17" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.17" }
solana-client = { path = "../client", version = "=1.9.17" }
solana-sdk = { path = "../sdk", version = "=1.9.17" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.17" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.17" }
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
[package.metadata.docs.rs]

View File

@ -46,6 +46,8 @@ use {
},
};
static CHECK_MARK: Emoji = Emoji("", "");
static CROSS_MARK: Emoji = Emoji("", "");
static WARNING: Emoji = Emoji("⚠️", "!");
#[derive(PartialEq, Debug)]
@ -99,7 +101,7 @@ impl OutputFormat {
pub struct CliAccount {
#[serde(flatten)]
pub keyed_account: RpcKeyedAccount,
#[serde(skip_serializing)]
#[serde(skip_serializing, skip_deserializing)]
pub use_lamports_unit: bool,
}
@ -354,6 +356,7 @@ pub enum CliValidatorsSortOrder {
SkipRate,
Stake,
VoteAccount,
Version,
}
#[derive(Serialize, Deserialize)]
@ -391,19 +394,19 @@ impl fmt::Display for CliValidators {
) -> fmt::Result {
fn non_zero_or_dash(v: u64, max_v: u64) -> String {
if v == 0 {
"- ".into()
" - ".into()
} else if v == max_v {
format!("{:>8} ( 0)", v)
format!("{:>9} ( 0)", v)
} else if v > max_v.saturating_sub(100) {
format!("{:>8} ({:>3})", v, -(max_v.saturating_sub(v) as isize))
format!("{:>9} ({:>3})", v, -(max_v.saturating_sub(v) as isize))
} else {
format!("{:>8} ", v)
format!("{:>9} ", v)
}
}
writeln!(
f,
"{} {:<44} {:<44} {:>3}% {:>14} {:>14} {:>7} {:>8} {:>7} {}",
"{} {:<44} {:<44} {:>3}% {:>14} {:>14} {:>7} {:>8} {:>7} {:>22} ({:.2}%)",
if validator.delinquent {
WARNING.to_string()
} else {
@ -417,19 +420,19 @@ impl fmt::Display for CliValidators {
if let Some(skip_rate) = validator.skip_rate {
format!("{:.2}%", skip_rate)
} else {
"- ".to_string()
"- ".to_string()
},
validator.epoch_credits,
validator.version,
if validator.activated_stake > 0 {
format!(
"{} ({:.2}%)",
build_balance_message(validator.activated_stake, use_lamports_unit, true),
100. * validator.activated_stake as f64 / total_active_stake as f64,
)
} else {
"-".into()
},
build_balance_message_with_config(
validator.activated_stake,
&BuildBalanceMessageConfig {
use_lamports_unit,
trim_trailing_zeros: false,
..BuildBalanceMessageConfig::default()
}
),
100. * validator.activated_stake as f64 / total_active_stake as f64,
)
}
@ -439,13 +442,13 @@ impl fmt::Display for CliValidators {
0
};
let header = style(format!(
"{:padding$} {:<44} {:<38} {} {} {} {} {} {} {}",
"{:padding$} {:<44} {:<38} {} {} {} {} {} {} {}",
" ",
"Identity",
"Vote Account",
"Commission",
"Last Vote ",
"Root Slot ",
"Last Vote ",
"Root Slot ",
"Skip Rate",
"Credits",
"Version",
@ -492,6 +495,22 @@ impl fmt::Display for CliValidators {
CliValidatorsSortOrder::Stake => {
sorted_validators.sort_by_key(|a| a.activated_stake);
}
CliValidatorsSortOrder::Version => {
sorted_validators.sort_by(|a, b| {
use std::cmp::Ordering;
let a_version = semver::Version::parse(a.version.as_str()).ok();
let b_version = semver::Version::parse(b.version.as_str()).ok();
match (a_version, b_version) {
(None, None) => a.version.cmp(&b.version),
(None, Some(_)) => Ordering::Less,
(Some(_), None) => Ordering::Greater,
(Some(va), Some(vb)) => match va.cmp(&vb) {
Ordering::Equal => a.activated_stake.cmp(&b.activated_stake),
ordering => ordering,
},
}
});
}
}
if self.validators_reverse_sort {
@ -2523,6 +2542,172 @@ impl fmt::Display for CliGossipNodes {
impl QuietDisplay for CliGossipNodes {}
impl VerboseDisplay for CliGossipNodes {}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliPing {
pub source_pubkey: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub fixed_blockhash: Option<String>,
#[serde(skip_serializing)]
pub blockhash_from_cluster: bool,
pub pings: Vec<CliPingData>,
pub transaction_stats: CliPingTxStats,
#[serde(skip_serializing_if = "Option::is_none")]
pub confirmation_stats: Option<CliPingConfirmationStats>,
}
impl fmt::Display for CliPing {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f)?;
writeln_name_value(f, "Source Account:", &self.source_pubkey)?;
if let Some(fixed_blockhash) = &self.fixed_blockhash {
let blockhash_origin = if self.blockhash_from_cluster {
"fetched from cluster"
} else {
"supplied from cli arguments"
};
writeln!(
f,
"Fixed blockhash is used: {} ({})",
fixed_blockhash, blockhash_origin
)?;
}
writeln!(f)?;
for ping in &self.pings {
write!(f, "{}", ping)?;
}
writeln!(f)?;
writeln!(f, "--- transaction statistics ---")?;
write!(f, "{}", self.transaction_stats)?;
if let Some(confirmation_stats) = &self.confirmation_stats {
write!(f, "{}", confirmation_stats)?;
}
Ok(())
}
}
impl QuietDisplay for CliPing {}
impl VerboseDisplay for CliPing {}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliPingData {
pub success: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub signature: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub ms: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<String>,
#[serde(skip_serializing)]
pub print_timestamp: bool,
pub timestamp: String,
pub sequence: u64,
#[serde(skip_serializing_if = "Option::is_none")]
pub lamports: Option<u64>,
}
impl fmt::Display for CliPingData {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let (mark, msg) = if let Some(signature) = &self.signature {
if self.success {
(
CHECK_MARK,
format!(
"{} lamport(s) transferred: seq={:<3} time={:>4}ms signature={}",
self.lamports.unwrap(),
self.sequence,
self.ms.unwrap(),
signature
),
)
} else if let Some(error) = &self.error {
(
CROSS_MARK,
format!(
"Transaction failed: seq={:<3} error={:?} signature={}",
self.sequence, error, signature
),
)
} else {
(
CROSS_MARK,
format!(
"Confirmation timeout: seq={:<3} signature={}",
self.sequence, signature
),
)
}
} else {
(
CROSS_MARK,
format!(
"Submit failed: seq={:<3} error={:?}",
self.sequence,
self.error.as_ref().unwrap(),
),
)
};
writeln!(
f,
"{}{}{}",
if self.print_timestamp {
&self.timestamp
} else {
""
},
mark,
msg
)
}
}
impl QuietDisplay for CliPingData {}
impl VerboseDisplay for CliPingData {}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliPingTxStats {
pub num_transactions: u32,
pub num_transaction_confirmed: u32,
}
impl fmt::Display for CliPingTxStats {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
f,
"{} transactions submitted, {} transactions confirmed, {:.1}% transaction loss",
self.num_transactions,
self.num_transaction_confirmed,
(100.
- f64::from(self.num_transaction_confirmed) / f64::from(self.num_transactions)
* 100.)
)
}
}
impl QuietDisplay for CliPingTxStats {}
impl VerboseDisplay for CliPingTxStats {}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliPingConfirmationStats {
pub min: f64,
pub mean: f64,
pub max: f64,
pub std_dev: f64,
}
impl fmt::Display for CliPingConfirmationStats {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
f,
"confirmation min/mean/max/stddev = {:.0}/{:.0}/{:.0}/{:.0} ms",
self.min, self.mean, self.max, self.std_dev,
)
}
}
impl QuietDisplay for CliPingConfirmationStats {}
impl VerboseDisplay for CliPingConfirmationStats {}
#[cfg(test)]
mod tests {
use {

View File

@ -139,7 +139,7 @@ fn format_account_mode(message: &Message, index: usize) -> String {
} else {
"-"
},
if message.is_writable(index, /*demote_program_write_locks=*/ true) {
if message.is_writable(index) {
"w" // comment for consistent rust fmt (no joking; lol)
} else {
"-"

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-cli"
description = "Blockchain, Rebuilt for Scale"
version = "1.9.0"
version = "1.9.17"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -26,29 +26,29 @@ semver = "1.0.4"
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.72"
solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.9.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" }
solana-cli-config = { path = "../cli-config", version = "=1.9.0" }
solana-cli-output = { path = "../cli-output", version = "=1.9.0" }
solana-client = { path = "../client", version = "=1.9.0" }
solana-config-program = { path = "../programs/config", version = "=1.9.0" }
solana-faucet = { path = "../faucet", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-program-runtime = { path = "../program-runtime", version = "=1.9.0" }
solana_rbpf = "=0.2.16"
solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.0" }
solana-account-decoder = { path = "../account-decoder", version = "=1.9.17" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.9.17" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.17" }
solana-cli-config = { path = "../cli-config", version = "=1.9.17" }
solana-cli-output = { path = "../cli-output", version = "=1.9.17" }
solana-client = { path = "../client", version = "=1.9.17" }
solana-config-program = { path = "../programs/config", version = "=1.9.17" }
solana-faucet = { path = "../faucet", version = "=1.9.17" }
solana-logger = { path = "../logger", version = "=1.9.17" }
solana-program-runtime = { path = "../program-runtime", version = "=1.9.17" }
solana_rbpf = "=0.2.24"
solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.17" }
solana-sdk = { path = "../sdk", version = "=1.9.17" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.17" }
solana-version = { path = "../version", version = "=1.9.17" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.17" }
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
thiserror = "1.0.30"
tiny-bip39 = "0.8.2"
[dev-dependencies]
solana-streamer = { path = "../streamer", version = "=1.9.0" }
solana-test-validator = { path = "../test-validator", version = "=1.9.0" }
solana-streamer = { path = "../streamer", version = "=1.9.17" }
solana-test-validator = { path = "../test-validator", version = "=1.9.17" }
tempfile = "3.2.0"
[[bin]]

View File

@ -98,10 +98,7 @@ pub fn get_fee_for_messages(
) -> Result<u64, CliError> {
Ok(messages
.iter()
.map(|message| {
println!("msg {:?}", message.recent_blockhash);
rpc_client.get_fee_for_message(message)
})
.map(|message| rpc_client.get_fee_for_message(message))
.collect::<Result<Vec<_>, _>>()?
.iter()
.sum())

View File

@ -83,7 +83,6 @@ pub enum CliCommand {
filter: RpcTransactionLogsFilter,
},
Ping {
lamports: u64,
interval: Duration,
count: Option<u64>,
timeout: Duration,
@ -162,6 +161,7 @@ pub enum CliCommand {
address: Option<SignerIndex>,
use_deprecated_loader: bool,
allow_excessive_balance: bool,
skip_fee_check: bool,
},
Program(ProgramCliCommand),
// Stake Commands
@ -298,7 +298,13 @@ pub enum CliCommand {
authorized_voter: Option<Pubkey>,
authorized_withdrawer: Pubkey,
commission: u8,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
memo: Option<String>,
fee_payer: SignerIndex,
},
ShowVoteAccount {
pubkey: Pubkey,
@ -310,19 +316,32 @@ pub enum CliCommand {
destination_account_pubkey: Pubkey,
withdraw_authority: SignerIndex,
withdraw_amount: SpendAmount,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
memo: Option<String>,
fee_payer: SignerIndex,
},
CloseVoteAccount {
vote_account_pubkey: Pubkey,
destination_account_pubkey: Pubkey,
withdraw_authority: SignerIndex,
memo: Option<String>,
fee_payer: SignerIndex,
},
VoteAuthorize {
vote_account_pubkey: Pubkey,
new_authorized_pubkey: Pubkey,
vote_authorize: VoteAuthorize,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
memo: Option<String>,
fee_payer: SignerIndex,
authorized: SignerIndex,
new_authorized: Option<SignerIndex>,
},
@ -330,13 +349,25 @@ pub enum CliCommand {
vote_account_pubkey: Pubkey,
new_identity_account: SignerIndex,
withdraw_authority: SignerIndex,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
memo: Option<String>,
fee_payer: SignerIndex,
},
VoteUpdateCommission {
vote_account_pubkey: Pubkey,
commission: u8,
withdraw_authority: SignerIndex,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
memo: Option<String>,
fee_payer: SignerIndex,
},
// Wallet Commands
Address,
@ -713,6 +744,7 @@ pub fn parse_command(
signers.push(signer);
1
});
let skip_fee_check = matches.is_present("skip_fee_check");
Ok(CliCommandInfo {
command: CliCommand::Deploy {
@ -720,6 +752,7 @@ pub fn parse_command(
address,
use_deprecated_loader: matches.is_present("use_deprecated_loader"),
allow_excessive_balance: matches.is_present("allow_excessive_balance"),
skip_fee_check,
},
signers,
})
@ -942,7 +975,6 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
CliCommand::LiveSlots => process_live_slots(config),
CliCommand::Logs { filter } => process_logs(config, filter),
CliCommand::Ping {
lamports,
interval,
count,
timeout,
@ -951,7 +983,6 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
} => process_ping(
&rpc_client,
config,
*lamports,
interval,
count,
timeout,
@ -1098,6 +1129,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
address,
use_deprecated_loader,
allow_excessive_balance,
skip_fee_check,
} => process_deploy(
rpc_client,
config,
@ -1105,6 +1137,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
*address,
*use_deprecated_loader,
*allow_excessive_balance,
*skip_fee_check,
),
CliCommand::Program(program_subcommand) => {
process_program_subcommand(rpc_client, config, program_subcommand)
@ -1384,7 +1417,13 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
authorized_voter,
authorized_withdrawer,
commission,
sign_only,
dump_transaction_message,
blockhash_query,
ref nonce_account,
nonce_authority,
memo,
fee_payer,
} => process_create_vote_account(
&rpc_client,
config,
@ -1394,7 +1433,13 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
authorized_voter,
*authorized_withdrawer,
*commission,
*sign_only,
*dump_transaction_message,
blockhash_query,
nonce_account.as_ref(),
*nonce_authority,
memo.as_ref(),
*fee_payer,
),
CliCommand::ShowVoteAccount {
pubkey: vote_account_pubkey,
@ -1412,7 +1457,13 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
withdraw_authority,
withdraw_amount,
destination_account_pubkey,
sign_only,
dump_transaction_message,
blockhash_query,
ref nonce_account,
nonce_authority,
memo,
fee_payer,
} => process_withdraw_from_vote_account(
&rpc_client,
config,
@ -1420,13 +1471,20 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
*withdraw_authority,
*withdraw_amount,
destination_account_pubkey,
*sign_only,
*dump_transaction_message,
blockhash_query,
nonce_account.as_ref(),
*nonce_authority,
memo.as_ref(),
*fee_payer,
),
CliCommand::CloseVoteAccount {
vote_account_pubkey,
withdraw_authority,
destination_account_pubkey,
memo,
fee_payer,
} => process_close_vote_account(
&rpc_client,
config,
@ -1434,12 +1492,19 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
*withdraw_authority,
destination_account_pubkey,
memo.as_ref(),
*fee_payer,
),
CliCommand::VoteAuthorize {
vote_account_pubkey,
new_authorized_pubkey,
vote_authorize,
sign_only,
dump_transaction_message,
blockhash_query,
nonce_account,
nonce_authority,
memo,
fee_payer,
authorized,
new_authorized,
} => process_vote_authorize(
@ -1450,33 +1515,63 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
*vote_authorize,
*authorized,
*new_authorized,
*sign_only,
*dump_transaction_message,
blockhash_query,
*nonce_account,
*nonce_authority,
memo.as_ref(),
*fee_payer,
),
CliCommand::VoteUpdateValidator {
vote_account_pubkey,
new_identity_account,
withdraw_authority,
sign_only,
dump_transaction_message,
blockhash_query,
nonce_account,
nonce_authority,
memo,
fee_payer,
} => process_vote_update_validator(
&rpc_client,
config,
vote_account_pubkey,
*new_identity_account,
*withdraw_authority,
*sign_only,
*dump_transaction_message,
blockhash_query,
*nonce_account,
*nonce_authority,
memo.as_ref(),
*fee_payer,
),
CliCommand::VoteUpdateCommission {
vote_account_pubkey,
commission,
withdraw_authority,
sign_only,
dump_transaction_message,
blockhash_query,
nonce_account,
nonce_authority,
memo,
fee_payer,
} => process_vote_update_commission(
&rpc_client,
config,
vote_account_pubkey,
*commission,
*withdraw_authority,
*sign_only,
*dump_transaction_message,
blockhash_query,
*nonce_account,
*nonce_authority,
memo.as_ref(),
*fee_payer,
),
// Wallet Commands
@ -1874,6 +1969,7 @@ mod tests {
address: None,
use_deprecated_loader: false,
allow_excessive_balance: false,
skip_fee_check: false,
},
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
}
@ -1896,6 +1992,7 @@ mod tests {
address: Some(1),
use_deprecated_loader: false,
allow_excessive_balance: false,
skip_fee_check: false,
},
signers: vec![
read_keypair_file(&keypair_file).unwrap().into(),
@ -1975,7 +2072,13 @@ mod tests {
authorized_voter: Some(bob_pubkey),
authorized_withdrawer: bob_pubkey,
commission: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
config.signers = vec![&keypair, &bob_keypair, &identity_keypair];
let result = process_command(&config);
@ -2006,7 +2109,13 @@ mod tests {
vote_account_pubkey: bob_pubkey,
new_authorized_pubkey,
vote_authorize: VoteAuthorize::Withdrawer,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
authorized: 0,
new_authorized: None,
};
@ -2019,7 +2128,13 @@ mod tests {
vote_account_pubkey: bob_pubkey,
new_identity_account: 2,
withdraw_authority: 1,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
let result = process_command(&config);
assert!(result.is_ok());
@ -2195,7 +2310,13 @@ mod tests {
authorized_voter: Some(bob_pubkey),
authorized_withdrawer: bob_pubkey,
commission: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
config.signers = vec![&keypair, &bob_keypair, &identity_keypair];
assert!(process_command(&config).is_err());
@ -2204,7 +2325,13 @@ mod tests {
vote_account_pubkey: bob_pubkey,
new_authorized_pubkey: bob_pubkey,
vote_authorize: VoteAuthorize::Voter,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
authorized: 0,
new_authorized: None,
};
@ -2214,7 +2341,13 @@ mod tests {
vote_account_pubkey: bob_pubkey,
new_identity_account: 1,
withdraw_authority: 1,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
assert!(process_command(&config).is_err());
@ -2253,6 +2386,7 @@ mod tests {
address: None,
use_deprecated_loader: false,
allow_excessive_balance: false,
skip_fee_check: false,
};
config.output_format = OutputFormat::JsonCompact;
let result = process_command(&config);
@ -2273,6 +2407,7 @@ mod tests {
address: None,
use_deprecated_loader: false,
allow_excessive_balance: false,
skip_fee_check: false,
};
assert!(process_command(&config).is_err());
}

View File

@ -4,7 +4,7 @@ use {
spend_utils::{resolve_spend_tx_and_check_account_balance, SpendAmount},
},
clap::{value_t, value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand},
console::{style, Emoji},
console::style,
serde::{Deserialize, Serialize},
solana_clap_utils::{
input_parsers::*,
@ -15,7 +15,7 @@ use {
solana_cli_output::{
display::{
build_balance_message, format_labeled_address, new_spinner_progress_bar,
println_name_value, println_transaction, unix_timestamp_to_string, writeln_name_value,
println_transaction, unix_timestamp_to_string, writeln_name_value,
},
*,
},
@ -43,13 +43,13 @@ use {
message::Message,
native_token::lamports_to_sol,
nonce::State as NonceState,
pubkey::{self, Pubkey},
pubkey::Pubkey,
rent::Rent,
rpc_port::DEFAULT_RPC_PORT_STR,
signature::Signature,
slot_history,
stake::{self, state::StakeState},
system_instruction, system_program,
system_instruction,
sysvar::{
self,
slot_history::SlotHistory,
@ -74,9 +74,6 @@ use {
thiserror::Error,
};
static CHECK_MARK: Emoji = Emoji("", "");
static CROSS_MARK: Emoji = Emoji("", "");
pub trait ClusterQuerySubCommands {
fn cluster_query_subcommands(self) -> Self;
}
@ -262,15 +259,6 @@ impl ClusterQuerySubCommands for App<'_, '_> {
.takes_value(false)
.help("Print timestamp (unix time + microseconds as in gettimeofday) before each line"),
)
.arg(
Arg::with_name("lamports")
.long("lamports")
.value_name("NUMBER")
.takes_value(true)
.default_value("1")
.validator(is_amount)
.help("Number of lamports to transfer for each transaction"),
)
.arg(
Arg::with_name("timeout")
.short("t")
@ -381,6 +369,7 @@ impl ClusterQuerySubCommands for App<'_, '_> {
"root",
"skip-rate",
"stake",
"version",
"vote-account",
])
.default_value("stake")
@ -515,7 +504,6 @@ pub fn parse_cluster_ping(
default_signer: &DefaultSigner,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let lamports = value_t_or_exit!(matches, "lamports", u64);
let interval = Duration::from_secs(value_t_or_exit!(matches, "interval", u64));
let count = if matches.is_present("count") {
Some(value_t_or_exit!(matches, "count", u64))
@ -527,7 +515,6 @@ pub fn parse_cluster_ping(
let print_timestamp = matches.is_present("print_timestamp");
Ok(CliCommandInfo {
command: CliCommand::Ping {
lamports,
interval,
count,
timeout,
@ -652,6 +639,7 @@ pub fn parse_show_validators(matches: &ArgMatches<'_>) -> Result<CliCommandInfo,
"skip-rate" => CliValidatorsSortOrder::SkipRate,
"stake" => CliValidatorsSortOrder::Stake,
"vote-account" => CliValidatorsSortOrder::VoteAccount,
"version" => CliValidatorsSortOrder::Version,
_ => unreachable!(),
};
@ -1358,40 +1346,34 @@ pub fn process_get_transaction_count(rpc_client: &RpcClient, _config: &CliConfig
pub fn process_ping(
rpc_client: &RpcClient,
config: &CliConfig,
lamports: u64,
interval: &Duration,
count: &Option<u64>,
timeout: &Duration,
fixed_blockhash: &Option<Hash>,
print_timestamp: bool,
) -> ProcessResult {
println_name_value("Source Account:", &config.signers[0].pubkey().to_string());
println!();
let (signal_sender, signal_receiver) = std::sync::mpsc::channel();
ctrlc::set_handler(move || {
let _ = signal_sender.send(());
})
.expect("Error setting Ctrl-C handler");
let mut cli_pings = vec![];
let mut submit_count = 0;
let mut confirmed_count = 0;
let mut confirmation_time: VecDeque<u64> = VecDeque::with_capacity(1024);
let mut blockhash = rpc_client.get_latest_blockhash()?;
let mut blockhash_transaction_count = 0;
let mut lamports = 0;
let mut blockhash_acquired = Instant::now();
let mut blockhash_from_cluster = false;
if let Some(fixed_blockhash) = fixed_blockhash {
let blockhash_origin = if *fixed_blockhash != Hash::default() {
if *fixed_blockhash != Hash::default() {
blockhash = *fixed_blockhash;
"supplied from cli arguments"
} else {
"fetched from cluster"
};
println!(
"Fixed blockhash is used: {} ({})",
blockhash, blockhash_origin
);
blockhash_from_cluster = true;
}
}
'mainloop: for seq in 0..count.unwrap_or(std::u64::MAX) {
let now = Instant::now();
@ -1399,15 +1381,12 @@ pub fn process_ping(
// Fetch a new blockhash every minute
let new_blockhash = rpc_client.get_new_latest_blockhash(&blockhash)?;
blockhash = new_blockhash;
blockhash_transaction_count = 0;
lamports = 0;
blockhash_acquired = Instant::now();
}
let seed =
&format!("{}{}", blockhash_transaction_count, blockhash)[0..pubkey::MAX_SEED_LEN];
let to = Pubkey::create_with_seed(&config.signers[0].pubkey(), seed, &system_program::id())
.unwrap();
blockhash_transaction_count += 1;
let to = config.signers[0].pubkey();
lamports += 1;
let build_message = |lamports| {
let ix = system_instruction::transfer(&config.signers[0].pubkey(), &to, lamports);
@ -1430,11 +1409,7 @@ pub fn process_ping(
.duration_since(UNIX_EPOCH)
.unwrap()
.as_micros();
if print_timestamp {
format!("[{}.{:06}] ", micros / 1_000_000, micros % 1_000_000)
} else {
String::new()
}
format!("[{}.{:06}] ", micros / 1_000_000, micros % 1_000_000)
};
match rpc_client.send_transaction(&tx) {
@ -1448,35 +1423,51 @@ pub fn process_ping(
Ok(()) => {
let elapsed_time_millis = elapsed_time.as_millis() as u64;
confirmation_time.push_back(elapsed_time_millis);
println!(
"{}{}{} lamport(s) transferred: seq={:<3} time={:>4}ms signature={}",
timestamp(),
CHECK_MARK, lamports, seq, elapsed_time_millis, signature
);
let cli_ping_data = CliPingData {
success: true,
signature: Some(signature.to_string()),
ms: Some(elapsed_time_millis),
error: None,
timestamp: timestamp(),
print_timestamp,
sequence: seq,
lamports: Some(lamports),
};
eprint!("{}", cli_ping_data);
cli_pings.push(cli_ping_data);
confirmed_count += 1;
}
Err(err) => {
println!(
"{}{}Transaction failed: seq={:<3} error={:?} signature={}",
timestamp(),
CROSS_MARK,
seq,
err,
signature
);
let cli_ping_data = CliPingData {
success: false,
signature: Some(signature.to_string()),
ms: None,
error: Some(err.to_string()),
timestamp: timestamp(),
print_timestamp,
sequence: seq,
lamports: None,
};
eprint!("{}", cli_ping_data);
cli_pings.push(cli_ping_data);
}
}
break;
}
if elapsed_time >= *timeout {
println!(
"{}{}Confirmation timeout: seq={:<3} signature={}",
timestamp(),
CROSS_MARK,
seq,
signature
);
let cli_ping_data = CliPingData {
success: false,
signature: Some(signature.to_string()),
ms: None,
error: None,
timestamp: timestamp(),
print_timestamp,
sequence: seq,
lamports: None,
};
eprint!("{}", cli_ping_data);
cli_pings.push(cli_ping_data);
break;
}
@ -1490,13 +1481,18 @@ pub fn process_ping(
}
}
Err(err) => {
println!(
"{}{}Submit failed: seq={:<3} error={:?}",
timestamp(),
CROSS_MARK,
seq,
err
);
let cli_ping_data = CliPingData {
success: false,
signature: None,
ms: None,
error: Some(err.to_string()),
timestamp: timestamp(),
print_timestamp,
sequence: seq,
lamports: None,
};
eprint!("{}", cli_ping_data);
cli_pings.push(cli_ping_data);
}
}
submit_count += 1;
@ -1506,28 +1502,34 @@ pub fn process_ping(
}
}
println!();
println!("--- transaction statistics ---");
println!(
"{} transactions submitted, {} transactions confirmed, {:.1}% transaction loss",
submit_count,
confirmed_count,
(100. - f64::from(confirmed_count) / f64::from(submit_count) * 100.)
);
if !confirmation_time.is_empty() {
let transaction_stats = CliPingTxStats {
num_transactions: submit_count,
num_transaction_confirmed: confirmed_count,
};
let confirmation_stats = if !confirmation_time.is_empty() {
let samples: Vec<f64> = confirmation_time.iter().map(|t| *t as f64).collect();
let dist = criterion_stats::Distribution::from(samples.into_boxed_slice());
let mean = dist.mean();
println!(
"confirmation min/mean/max/stddev = {:.0}/{:.0}/{:.0}/{:.0} ms",
dist.min(),
Some(CliPingConfirmationStats {
min: dist.min(),
mean,
dist.max(),
dist.std_dev(Some(mean))
);
}
max: dist.max(),
std_dev: dist.std_dev(Some(mean)),
})
} else {
None
};
Ok("".to_string())
let cli_ping = CliPing {
source_pubkey: config.signers[0].pubkey().to_string(),
fixed_blockhash: fixed_blockhash.map(|_| blockhash.to_string()),
blockhash_from_cluster,
pings: cli_pings,
transaction_stats,
confirmation_stats,
};
Ok(config.output_format.formatted_string(&cli_ping))
}
pub fn parse_logs(
@ -2128,7 +2130,7 @@ pub fn process_calculate_rent(
timing::years_as_slots(1.0, &seconds_per_tick, clock::DEFAULT_TICKS_PER_SLOT);
let slots_per_epoch = epoch_schedule.slots_per_epoch as f64;
let years_per_epoch = slots_per_epoch / slots_per_year;
let (lamports_per_epoch, _) = rent.due(0, data_length, years_per_epoch);
let lamports_per_epoch = rent.due(0, data_length, years_per_epoch).lamports();
let cli_rent_calculation = CliRentCalculation {
lamports_per_byte_year: rent.lamports_per_byte_year,
lamports_per_epoch,
@ -2304,7 +2306,6 @@ mod tests {
parse_command(&test_ping, &default_signer, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Ping {
lamports: 1,
interval: Duration::from_secs(1),
count: Some(2),
timeout: Duration::from_secs(3),

View File

@ -5,7 +5,7 @@ use {
},
clap::{App, AppSettings, Arg, ArgMatches, SubCommand},
console::style,
serde::{Deserialize, Serialize},
serde::{Deserialize, Deserializer, Serialize, Serializer},
solana_clap_utils::{input_parsers::*, input_validators::*, keypair::*},
solana_cli_output::{QuietDisplay, VerboseDisplay},
solana_client::{client_error::ClientError, rpc_client::RpcClient},
@ -23,6 +23,7 @@ use {
cmp::Ordering,
collections::{HashMap, HashSet},
fmt,
str::FromStr,
sync::Arc,
},
};
@ -45,7 +46,7 @@ pub enum FeatureCliCommand {
},
}
#[derive(Serialize, Deserialize)]
#[derive(Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "camelCase", tag = "status", content = "sinceSlot")]
pub enum CliFeatureStatus {
Inactive,
@ -53,7 +54,29 @@ pub enum CliFeatureStatus {
Active(Slot),
}
#[derive(Serialize, Deserialize)]
impl PartialOrd for CliFeatureStatus {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for CliFeatureStatus {
fn cmp(&self, other: &Self) -> Ordering {
match (self, other) {
(Self::Inactive, Self::Inactive) => Ordering::Equal,
(Self::Inactive, _) => Ordering::Greater,
(_, Self::Inactive) => Ordering::Less,
(Self::Pending, Self::Pending) => Ordering::Equal,
(Self::Pending, _) => Ordering::Greater,
(_, Self::Pending) => Ordering::Less,
(Self::Active(self_active_slot), Self::Active(other_active_slot)) => {
self_active_slot.cmp(other_active_slot)
}
}
}
}
#[derive(Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub struct CliFeature {
pub id: String,
@ -62,11 +85,28 @@ pub struct CliFeature {
pub status: CliFeatureStatus,
}
impl PartialOrd for CliFeature {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for CliFeature {
fn cmp(&self, other: &Self) -> Ordering {
match self.status.cmp(&other.status) {
Ordering::Equal => self.id.cmp(&other.id),
ordering => ordering,
}
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliFeatures {
pub features: Vec<CliFeature>,
pub feature_activation_allowed: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub cluster_feature_sets: Option<CliClusterFeatureSets>,
#[serde(skip)]
pub inactive: bool,
}
@ -93,11 +133,16 @@ impl fmt::Display for CliFeatures {
CliFeatureStatus::Inactive => style("inactive".to_string()).red(),
CliFeatureStatus::Pending => style("activation pending".to_string()).yellow(),
CliFeatureStatus::Active(activation_slot) =>
style(format!("active since slot {}", activation_slot)).green(),
style(format!("active since slot {:>9}", activation_slot)).green(),
},
feature.description,
)?;
}
if let Some(feature_sets) = &self.cluster_feature_sets {
write!(f, "{}", feature_sets)?;
}
if self.inactive && !self.feature_activation_allowed {
writeln!(
f,
@ -114,6 +159,191 @@ impl fmt::Display for CliFeatures {
impl QuietDisplay for CliFeatures {}
impl VerboseDisplay for CliFeatures {}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliClusterFeatureSets {
pub tool_feature_set: u32,
pub feature_sets: Vec<CliFeatureSet>,
#[serde(skip)]
pub stake_allowed: bool,
#[serde(skip)]
pub rpc_allowed: bool,
}
impl fmt::Display for CliClusterFeatureSets {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut tool_feature_set_matches_cluster = false;
let software_versions_title = "Software Version";
let feature_set_title = "Feature Set";
let stake_percent_title = "Stake";
let rpc_percent_title = "RPC";
let mut max_software_versions_len = software_versions_title.len();
let mut max_feature_set_len = feature_set_title.len();
let mut max_stake_percent_len = stake_percent_title.len();
let mut max_rpc_percent_len = rpc_percent_title.len();
let feature_sets: Vec<_> = self
.feature_sets
.iter()
.map(|feature_set_info| {
let me = if self.tool_feature_set == feature_set_info.feature_set {
tool_feature_set_matches_cluster = true;
true
} else {
false
};
let software_versions: Vec<_> = feature_set_info
.software_versions
.iter()
.map(ToString::to_string)
.collect();
let software_versions = software_versions.join(", ");
let feature_set = if feature_set_info.feature_set == 0 {
"unknown".to_string()
} else {
feature_set_info.feature_set.to_string()
};
let stake_percent = format!("{:.2}%", feature_set_info.stake_percent);
let rpc_percent = format!("{:.2}%", feature_set_info.rpc_percent);
max_software_versions_len = max_software_versions_len.max(software_versions.len());
max_feature_set_len = max_feature_set_len.max(feature_set.len());
max_stake_percent_len = max_stake_percent_len.max(stake_percent.len());
max_rpc_percent_len = max_rpc_percent_len.max(rpc_percent.len());
(
software_versions,
feature_set,
stake_percent,
rpc_percent,
me,
)
})
.collect();
if !tool_feature_set_matches_cluster {
writeln!(
f,
"\n{}",
style("To activate features the tool and cluster feature sets must match, select a tool version that matches the cluster")
.bold())?;
} else {
if !self.stake_allowed {
write!(
f,
"\n{}",
style("To activate features the stake must be >= 95%")
.bold()
.red()
)?;
}
if !self.rpc_allowed {
write!(
f,
"\n{}",
style("To activate features the RPC nodes must be >= 95%")
.bold()
.red()
)?;
}
}
writeln!(
f,
"\n\n{}",
style(format!("Tool Feature Set: {}", self.tool_feature_set)).bold()
)?;
writeln!(
f,
"{}",
style(format!(
"{1:<0$} {3:<2$} {5:<4$} {7:<6$}",
max_software_versions_len,
software_versions_title,
max_feature_set_len,
feature_set_title,
max_stake_percent_len,
stake_percent_title,
max_rpc_percent_len,
rpc_percent_title,
))
.bold(),
)?;
for (software_versions, feature_set, stake_percent, rpc_percent, me) in feature_sets {
writeln!(
f,
"{1:<0$} {3:>2$} {5:>4$} {7:>6$} {8}",
max_software_versions_len,
software_versions,
max_feature_set_len,
feature_set,
max_stake_percent_len,
stake_percent,
max_rpc_percent_len,
rpc_percent,
if me { "<-- me" } else { "" },
)?;
}
writeln!(f)
}
}
impl QuietDisplay for CliClusterFeatureSets {}
impl VerboseDisplay for CliClusterFeatureSets {}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliFeatureSet {
software_versions: Vec<CliVersion>,
feature_set: u32,
stake_percent: f64,
rpc_percent: f32,
}
#[derive(Eq, PartialEq, Ord, PartialOrd)]
struct CliVersion(Option<semver::Version>);
impl fmt::Display for CliVersion {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let s = match &self.0 {
None => "unknown".to_string(),
Some(version) => version.to_string(),
};
write!(f, "{}", s)
}
}
impl FromStr for CliVersion {
type Err = semver::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let version_option = if s == "unknown" {
None
} else {
Some(semver::Version::from_str(s)?)
};
Ok(CliVersion(version_option))
}
}
impl Serialize for CliVersion {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&self.to_string())
}
}
impl<'de> Deserialize<'de> for CliVersion {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s: &str = Deserialize::deserialize(deserializer)?;
CliVersion::from_str(s).map_err(serde::de::Error::custom)
}
}
pub trait FeatureSubCommands {
fn feature_subcommands(self) -> Self;
}
@ -330,7 +560,10 @@ fn feature_set_stats(rpc_client: &RpcClient) -> Result<FeatureSetStats, ClientEr
}
// Feature activation is only allowed when 95% of the active stake is on the current feature set
fn feature_activation_allowed(rpc_client: &RpcClient, quiet: bool) -> Result<bool, ClientError> {
fn feature_activation_allowed(
rpc_client: &RpcClient,
quiet: bool,
) -> Result<(bool, Option<CliClusterFeatureSets>), ClientError> {
let my_feature_set = solana_version::Version::default().feature_set;
let feature_set_stats = feature_set_stats(rpc_client)?;
@ -346,54 +579,43 @@ fn feature_activation_allowed(rpc_client: &RpcClient, quiet: bool) -> Result<boo
)
.unwrap_or((false, false));
if !quiet {
if feature_set_stats.get(&my_feature_set).is_none() {
println!(
"{}",
style("To activate features the tool and cluster feature sets must match, select a tool version that matches the cluster")
.bold());
} else {
if !stake_allowed {
print!(
"\n{}",
style("To activate features the stake must be >= 95%")
.bold()
.red()
);
}
if !rpc_allowed {
print!(
"\n{}",
style("To activate features the RPC nodes must be >= 95%")
.bold()
.red()
);
}
}
println!(
"\n\n{}",
style(format!("Tool Feature Set: {}", my_feature_set)).bold()
);
let mut feature_set_stats = feature_set_stats.into_iter().collect::<Vec<_>>();
feature_set_stats.sort_by(|l, r| {
match l.1.software_versions[0]
.cmp(&r.1.software_versions[0])
let cluster_feature_sets = if quiet {
None
} else {
let mut feature_sets = feature_set_stats
.into_iter()
.map(
|(
feature_set,
FeatureSetStatsEntry {
stake_percent,
rpc_nodes_percent: rpc_percent,
software_versions,
},
)| {
CliFeatureSet {
software_versions: software_versions.into_iter().map(CliVersion).collect(),
feature_set,
stake_percent,
rpc_percent,
}
},
)
.collect::<Vec<_>>();
feature_sets.sort_by(|l, r| {
match l.software_versions[0]
.cmp(&r.software_versions[0])
.reverse()
{
Ordering::Equal => {
match l
.1
.stake_percent
.partial_cmp(&r.1.stake_percent)
.partial_cmp(&r.stake_percent)
.unwrap()
.reverse()
{
Ordering::Equal => {
l.1.rpc_nodes_percent
.partial_cmp(&r.1.rpc_nodes_percent)
.unwrap()
.reverse()
l.rpc_percent.partial_cmp(&r.rpc_percent).unwrap().reverse()
}
o => o,
}
@ -401,96 +623,15 @@ fn feature_activation_allowed(rpc_client: &RpcClient, quiet: bool) -> Result<boo
o => o,
}
});
Some(CliClusterFeatureSets {
tool_feature_set: my_feature_set,
feature_sets,
stake_allowed,
rpc_allowed,
})
};
let software_versions_title = "Software Version";
let feature_set_title = "Feature Set";
let stake_percent_title = "Stake";
let rpc_percent_title = "RPC";
let mut stats_output = Vec::new();
let mut max_software_versions_len = software_versions_title.len();
let mut max_feature_set_len = feature_set_title.len();
let mut max_stake_percent_len = stake_percent_title.len();
let mut max_rpc_percent_len = rpc_percent_title.len();
for (
feature_set,
FeatureSetStatsEntry {
stake_percent,
rpc_nodes_percent,
software_versions,
},
) in feature_set_stats.into_iter()
{
let me = feature_set == my_feature_set;
let feature_set = if feature_set == 0 {
"unknown".to_string()
} else {
feature_set.to_string()
};
let stake_percent = format!("{:.2}%", stake_percent);
let rpc_percent = format!("{:.2}%", rpc_nodes_percent);
let mut has_unknown = false;
let mut software_versions = software_versions
.iter()
.filter_map(|v| {
if v.is_none() {
has_unknown = true;
}
v.as_ref()
})
.map(ToString::to_string)
.collect::<Vec<_>>();
if has_unknown {
software_versions.push("unknown".to_string());
}
let software_versions = software_versions.join(", ");
max_software_versions_len = max_software_versions_len.max(software_versions.len());
max_feature_set_len = max_feature_set_len.max(feature_set.len());
max_stake_percent_len = max_stake_percent_len.max(stake_percent.len());
max_rpc_percent_len = max_rpc_percent_len.max(rpc_percent.len());
stats_output.push((
software_versions,
feature_set,
stake_percent,
rpc_percent,
me,
));
}
println!(
"{}",
style(format!(
"{1:<0$} {3:<2$} {5:<4$} {7:<6$}",
max_software_versions_len,
software_versions_title,
max_feature_set_len,
feature_set_title,
max_stake_percent_len,
stake_percent_title,
max_rpc_percent_len,
rpc_percent_title,
))
.bold(),
);
for (software_versions, feature_set, stake_percent, rpc_percent, me) in stats_output {
println!(
"{1:<0$} {3:>2$} {5:>4$} {7:>6$} {8}",
max_software_versions_len,
software_versions,
max_feature_set_len,
feature_set,
max_stake_percent_len,
stake_percent,
max_rpc_percent_len,
rpc_percent,
if me { "<-- me" } else { "" },
);
}
println!();
}
Ok(stake_allowed && rpc_allowed)
Ok((stake_allowed && rpc_allowed, cluster_feature_sets))
}
fn status_from_account(account: Account) -> Option<CliFeatureStatus> {
@ -550,10 +691,14 @@ fn process_status(
});
}
let feature_activation_allowed = feature_activation_allowed(rpc_client, features.len() <= 1)?;
features.sort_unstable();
let (feature_activation_allowed, cluster_feature_sets) =
feature_activation_allowed(rpc_client, features.len() <= 1)?;
let feature_set = CliFeatures {
features,
feature_activation_allowed,
cluster_feature_sets,
inactive,
};
Ok(config.output_format.formatted_string(&feature_set))
@ -577,7 +722,7 @@ fn process_activate(
}
}
if !feature_activation_allowed(rpc_client, false)? {
if !feature_activation_allowed(rpc_client, false)?.0 {
match force {
ForceActivation::Almost =>
return Err("Add force argument once more to override the sanity check to force feature activation ".into()),

View File

@ -65,6 +65,7 @@ pub enum ProgramCliCommand {
is_final: bool,
max_len: Option<usize>,
allow_excessive_balance: bool,
skip_fee_check: bool,
},
WriteBuffer {
program_location: String,
@ -72,6 +73,7 @@ pub enum ProgramCliCommand {
buffer_pubkey: Option<Pubkey>,
buffer_authority_signer_index: Option<SignerIndex>,
max_len: Option<usize>,
skip_fee_check: bool,
},
SetBufferAuthority {
buffer_pubkey: Pubkey,
@ -113,6 +115,13 @@ impl ProgramSubCommands for App<'_, '_> {
SubCommand::with_name("program")
.about("Program management")
.setting(AppSettings::SubcommandRequiredElseHelp)
.arg(
Arg::with_name("skip_fee_check")
.long("skip-fee-check")
.hidden(true)
.takes_value(false)
.global(true)
)
.subcommand(
SubCommand::with_name("deploy")
.about("Deploy a program")
@ -405,6 +414,12 @@ impl ProgramSubCommands for App<'_, '_> {
.long("allow-excessive-deploy-account-balance")
.takes_value(false)
.help("Use the designated program id, even if the account already holds a large balance of SOL")
)
.arg(
Arg::with_name("skip_fee_check")
.long("skip-fee-check")
.hidden(true)
.takes_value(false)
),
)
}
@ -415,7 +430,14 @@ pub fn parse_program_subcommand(
default_signer: &DefaultSigner,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let response = match matches.subcommand() {
let (subcommand, sub_matches) = matches.subcommand();
let matches_skip_fee_check = matches.is_present("skip_fee_check");
let sub_matches_skip_fee_check = sub_matches
.map(|m| m.is_present("skip_fee_check"))
.unwrap_or(false);
let skip_fee_check = matches_skip_fee_check || sub_matches_skip_fee_check;
let response = match (subcommand, sub_matches) {
("deploy", Some(matches)) => {
let mut bulk_signers = vec![Some(
default_signer.signer_from_path(matches, wallet_manager)?,
@ -475,6 +497,7 @@ pub fn parse_program_subcommand(
is_final: matches.is_present("final"),
max_len,
allow_excessive_balance: matches.is_present("allow_excessive_balance"),
skip_fee_check,
}),
signers: signer_info.signers,
}
@ -520,6 +543,7 @@ pub fn parse_program_subcommand(
buffer_authority_signer_index: signer_info
.index_of_or_none(buffer_authority_pubkey),
max_len,
skip_fee_check,
}),
signers: signer_info.signers,
}
@ -668,6 +692,7 @@ pub fn process_program_subcommand(
is_final,
max_len,
allow_excessive_balance,
skip_fee_check,
} => process_program_deploy(
rpc_client,
config,
@ -680,6 +705,7 @@ pub fn process_program_subcommand(
*is_final,
*max_len,
*allow_excessive_balance,
*skip_fee_check,
),
ProgramCliCommand::WriteBuffer {
program_location,
@ -687,6 +713,7 @@ pub fn process_program_subcommand(
buffer_pubkey,
buffer_authority_signer_index,
max_len,
skip_fee_check,
} => process_write_buffer(
rpc_client,
config,
@ -695,6 +722,7 @@ pub fn process_program_subcommand(
*buffer_pubkey,
*buffer_authority_signer_index,
*max_len,
*skip_fee_check,
),
ProgramCliCommand::SetBufferAuthority {
buffer_pubkey,
@ -792,6 +820,7 @@ fn process_program_deploy(
is_final: bool,
max_len: Option<usize>,
allow_excessive_balance: bool,
skip_fee_check: bool,
) -> ProcessResult {
let (words, mnemonic, buffer_keypair) = create_ephemeral_keypair()?;
let (buffer_provided, buffer_signer, buffer_pubkey) = if let Some(i) = buffer_signer_index {
@ -946,6 +975,7 @@ fn process_program_deploy(
&buffer_pubkey,
Some(upgrade_authority_signer),
allow_excessive_balance,
skip_fee_check,
)
} else {
do_process_program_upgrade(
@ -956,6 +986,7 @@ fn process_program_deploy(
config.signers[upgrade_authority_signer_index],
&buffer_pubkey,
buffer_signer,
skip_fee_check,
)
};
if result.is_ok() && is_final {
@ -982,6 +1013,7 @@ fn process_write_buffer(
buffer_pubkey: Option<Pubkey>,
buffer_authority_signer_index: Option<SignerIndex>,
max_len: Option<usize>,
skip_fee_check: bool,
) -> ProcessResult {
// Create ephemeral keypair to use for Buffer account, if not provided
let (words, mnemonic, buffer_keypair) = create_ephemeral_keypair()?;
@ -1049,6 +1081,7 @@ fn process_write_buffer(
&buffer_pubkey,
Some(buffer_authority),
true,
skip_fee_check,
);
if result.is_err() && buffer_signer_index.is_none() && buffer_signer.is_some() {
@ -1635,6 +1668,7 @@ pub fn process_deploy(
buffer_signer_index: Option<SignerIndex>,
use_deprecated_loader: bool,
allow_excessive_balance: bool,
skip_fee_check: bool,
) -> ProcessResult {
// Create ephemeral keypair to use for Buffer account, if not provided
let (words, mnemonic, buffer_keypair) = create_ephemeral_keypair()?;
@ -1665,6 +1699,7 @@ pub fn process_deploy(
&buffer_signer.pubkey(),
Some(buffer_signer),
allow_excessive_balance,
skip_fee_check,
);
if result.is_err() && buffer_signer_index.is_none() {
report_ephemeral_mnemonic(words, mnemonic);
@ -1703,6 +1738,7 @@ fn do_process_program_write_and_deploy(
buffer_pubkey: &Pubkey,
buffer_authority_signer: Option<&dyn Signer>,
allow_excessive_balance: bool,
skip_fee_check: bool,
) -> ProcessResult {
// Build messages to calculate fees
let mut messages: Vec<&Message> = Vec::new();
@ -1833,7 +1869,9 @@ fn do_process_program_write_and_deploy(
messages.push(message);
}
check_payer(&rpc_client, config, balance_needed, &messages)?;
if !skip_fee_check {
check_payer(&rpc_client, config, balance_needed, &messages)?;
}
send_deploy_messages(
rpc_client,
@ -1867,6 +1905,7 @@ fn do_process_program_upgrade(
upgrade_authority: &dyn Signer,
buffer_pubkey: &Pubkey,
buffer_signer: Option<&dyn Signer>,
skip_fee_check: bool,
) -> ProcessResult {
let loader_id = bpf_loader_upgradeable::id();
let data_len = program_data.len();
@ -1966,7 +2005,10 @@ fn do_process_program_upgrade(
);
messages.push(&final_message);
check_payer(&rpc_client, config, balance_needed, &messages)?;
if !skip_fee_check {
check_payer(&rpc_client, config, balance_needed, &messages)?;
}
send_deploy_messages(
rpc_client,
config,
@ -1997,10 +2039,7 @@ fn read_and_verify_elf(program_location: &str) -> Result<Vec<u8>, Box<dyn std::e
&program_data,
Some(verifier::check),
Config {
reject_unresolved_syscalls: true,
verify_mul64_imm_nonzero: false,
verify_shift32_imm: true,
reject_section_virtual_address_file_offset_mismatch: true,
reject_broken_elfs: true,
..Config::default()
},
register_syscalls(&mut invoke_context).unwrap(),
@ -2256,6 +2295,7 @@ mod tests {
is_final: false,
max_len: None,
allow_excessive_balance: false,
skip_fee_check: false,
}),
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
}
@ -2282,6 +2322,7 @@ mod tests {
is_final: false,
max_len: Some(42),
allow_excessive_balance: false,
skip_fee_check: false,
}),
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
}
@ -2310,6 +2351,7 @@ mod tests {
is_final: false,
max_len: None,
allow_excessive_balance: false,
skip_fee_check: false,
}),
signers: vec![
read_keypair_file(&keypair_file).unwrap().into(),
@ -2340,6 +2382,7 @@ mod tests {
is_final: false,
max_len: None,
allow_excessive_balance: false,
skip_fee_check: false,
}),
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
}
@ -2369,6 +2412,7 @@ mod tests {
is_final: false,
max_len: None,
allow_excessive_balance: false,
skip_fee_check: false,
}),
signers: vec![
read_keypair_file(&keypair_file).unwrap().into(),
@ -2401,6 +2445,7 @@ mod tests {
is_final: false,
max_len: None,
allow_excessive_balance: false,
skip_fee_check: false,
}),
signers: vec![
read_keypair_file(&keypair_file).unwrap().into(),
@ -2428,6 +2473,7 @@ mod tests {
upgrade_authority_signer_index: 0,
is_final: true,
max_len: None,
skip_fee_check: false,
allow_excessive_balance: false,
}),
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
@ -2461,6 +2507,7 @@ mod tests {
buffer_pubkey: None,
buffer_authority_signer_index: Some(0),
max_len: None,
skip_fee_check: false,
}),
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
}
@ -2484,6 +2531,7 @@ mod tests {
buffer_pubkey: None,
buffer_authority_signer_index: Some(0),
max_len: Some(42),
skip_fee_check: false,
}),
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
}
@ -2510,6 +2558,7 @@ mod tests {
buffer_pubkey: Some(buffer_keypair.pubkey()),
buffer_authority_signer_index: Some(0),
max_len: None,
skip_fee_check: false,
}),
signers: vec![
read_keypair_file(&keypair_file).unwrap().into(),
@ -2539,6 +2588,7 @@ mod tests {
buffer_pubkey: None,
buffer_authority_signer_index: Some(1),
max_len: None,
skip_fee_check: false,
}),
signers: vec![
read_keypair_file(&keypair_file).unwrap().into(),
@ -2573,6 +2623,7 @@ mod tests {
buffer_pubkey: Some(buffer_keypair.pubkey()),
buffer_authority_signer_index: Some(2),
max_len: None,
skip_fee_check: false,
}),
signers: vec![
read_keypair_file(&keypair_file).unwrap().into(),
@ -3015,6 +3066,7 @@ mod tests {
is_final: false,
max_len: None,
allow_excessive_balance: false,
skip_fee_check: false,
}),
signers: vec![&default_keypair],
output_format: OutputFormat::JsonCompact,

View File

@ -16,6 +16,7 @@ use {
pub enum SpendAmount {
All,
Some(u64),
RentExempt,
}
impl Default for SpendAmount {
@ -90,6 +91,7 @@ where
0,
from_pubkey,
fee_pubkey,
0,
build_message,
)?;
Ok((message, spend))
@ -97,6 +99,12 @@ where
let from_balance = rpc_client
.get_balance_with_commitment(from_pubkey, commitment)?
.value;
let from_rent_exempt_minimum = if amount == SpendAmount::RentExempt {
let data = rpc_client.get_account_data(from_pubkey)?;
rpc_client.get_minimum_balance_for_rent_exemption(data.len())?
} else {
0
};
let (message, SpendAndFee { spend, fee }) = resolve_spend_message(
rpc_client,
amount,
@ -104,6 +112,7 @@ where
from_balance,
from_pubkey,
fee_pubkey,
from_rent_exempt_minimum,
build_message,
)?;
if from_pubkey == fee_pubkey {
@ -140,6 +149,7 @@ fn resolve_spend_message<F>(
from_balance: u64,
from_pubkey: &Pubkey,
fee_pubkey: &Pubkey,
from_rent_exempt_minimum: u64,
build_message: F,
) -> Result<(Message, SpendAndFee), CliError>
where
@ -176,5 +186,20 @@ where
},
))
}
SpendAmount::RentExempt => {
let mut lamports = if from_pubkey == fee_pubkey {
from_balance.saturating_sub(fee)
} else {
from_balance
};
lamports = lamports.saturating_sub(from_rent_exempt_minimum);
Ok((
build_message(lamports),
SpendAndFee {
spend: lamports,
fee,
},
))
}
}
}

View File

@ -1384,7 +1384,13 @@ pub fn process_stake_authorize(
if let Some(authorized) = authorized {
match authorization_type {
StakeAuthorize::Staker => {
check_current_authority(&authorized.staker, &authority.pubkey())?;
// first check authorized withdrawer
check_current_authority(&authorized.withdrawer, &authority.pubkey())
.or_else(|_| {
// ...then check authorized staker. If neither matches, error will
// print the stake key as `expected`
check_current_authority(&authorized.staker, &authority.pubkey())
})?;
}
StakeAuthorize::Withdrawer => {
check_current_authority(&authorized.withdrawer, &authority.pubkey())?;

View File

@ -1,23 +1,29 @@
use {
solana_client::rpc_client::RpcClient,
solana_sdk::{clock::DEFAULT_MS_PER_SLOT, commitment_config::CommitmentConfig, pubkey::Pubkey},
solana_sdk::{clock::DEFAULT_MS_PER_SLOT, commitment_config::CommitmentConfig},
std::{thread::sleep, time::Duration},
};
pub fn check_recent_balance(expected_balance: u64, client: &RpcClient, pubkey: &Pubkey) {
(0..5).for_each(|tries| {
let balance = client
.get_balance_with_commitment(pubkey, CommitmentConfig::processed())
.unwrap()
.value;
if balance == expected_balance {
return;
}
if tries == 4 {
assert_eq!(balance, expected_balance);
}
sleep(Duration::from_millis(500));
});
#[macro_export]
macro_rules! check_balance {
($expected_balance:expr, $client:expr, $pubkey:expr) => {
(0..5).for_each(|tries| {
let balance = $client
.get_balance_with_commitment($pubkey, CommitmentConfig::processed())
.unwrap()
.value;
if balance == $expected_balance {
return;
}
if tries == 4 {
assert_eq!(balance, $expected_balance);
}
std::thread::sleep(std::time::Duration::from_millis(500));
});
};
($expected_balance:expr, $client:expr, $pubkey:expr,) => {
check_balance!($expected_balance, $client, $pubkey)
};
}
pub fn check_ready(rpc_client: &RpcClient) {

View File

@ -291,8 +291,12 @@ pub fn process_set_validator_info(
// Check existence of validator-info account
let balance = rpc_client.get_balance(&info_pubkey).unwrap_or(0);
let lamports =
rpc_client.get_minimum_balance_for_rent_exemption(ValidatorInfo::max_space() as usize)?;
let keys = vec![
(validator_info::id(), false),
(config.signers[0].pubkey(), true),
];
let data_len = ValidatorInfo::max_space() + ConfigKeys::serialized_size(keys.clone());
let lamports = rpc_client.get_minimum_balance_for_rent_exemption(data_len as usize)?;
let signers = if balance == 0 {
if info_pubkey != info_keypair.pubkey() {
@ -308,10 +312,7 @@ pub fn process_set_validator_info(
};
let build_message = |lamports| {
let keys = vec![
(validator_info::id(), false),
(config.signers[0].pubkey(), true),
];
let keys = keys.clone();
if balance == 0 {
println!(
"Publishing info for Validator {:?}",

File diff suppressed because it is too large Load Diff

View File

@ -462,18 +462,27 @@ pub fn process_show_account(
let mut account_string = config.output_format.formatted_string(&cli_account);
if config.output_format == OutputFormat::Display
|| config.output_format == OutputFormat::DisplayVerbose
{
if let Some(output_file) = output_file {
let mut f = File::create(output_file)?;
f.write_all(&data)?;
writeln!(&mut account_string)?;
writeln!(&mut account_string, "Wrote account data to {}", output_file)?;
} else if !data.is_empty() {
use pretty_hex::*;
writeln!(&mut account_string, "{:?}", data.hex_dump())?;
match config.output_format {
OutputFormat::Json | OutputFormat::JsonCompact => {
if let Some(output_file) = output_file {
let mut f = File::create(output_file)?;
f.write_all(account_string.as_bytes())?;
writeln!(&mut account_string)?;
writeln!(&mut account_string, "Wrote account to {}", output_file)?;
}
}
OutputFormat::Display | OutputFormat::DisplayVerbose => {
if let Some(output_file) = output_file {
let mut f = File::create(output_file)?;
f.write_all(&data)?;
writeln!(&mut account_string)?;
writeln!(&mut account_string, "Wrote account data to {}", output_file)?;
} else if !data.is_empty() {
use pretty_hex::*;
writeln!(&mut account_string, "{:?}", data.hex_dump())?;
}
}
OutputFormat::DisplayQuiet => (),
}
Ok(account_string)

View File

@ -1,8 +1,10 @@
#![allow(clippy::integer_arithmetic)]
use {
solana_cli::{
check_balance,
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
spend_utils::SpendAmount,
test_utils::{check_ready, check_recent_balance},
test_utils::check_ready,
},
solana_cli_output::{parse_sign_only_reply_string, OutputFormat},
solana_client::{
@ -14,6 +16,7 @@ use {
solana_sdk::{
commitment_config::CommitmentConfig,
hash::Hash,
native_token::sol_to_lamports,
pubkey::Pubkey,
signature::{keypair_from_seed, Keypair, Signer},
system_program,
@ -73,10 +76,14 @@ fn full_battery_tests(
&rpc_client,
&config_payer,
&config_payer.signers[0].pubkey(),
2000,
sol_to_lamports(2000.0),
)
.unwrap();
check_recent_balance(2000, &rpc_client, &config_payer.signers[0].pubkey());
check_balance!(
sol_to_lamports(2000.0),
&rpc_client,
&config_payer.signers[0].pubkey(),
);
let mut config_nonce = CliConfig::recent_for_tests();
config_nonce.json_rpc_url = json_rpc_url;
@ -108,12 +115,16 @@ fn full_battery_tests(
seed,
nonce_authority: optional_authority,
memo: None,
amount: SpendAmount::Some(1000),
amount: SpendAmount::Some(sol_to_lamports(1000.0)),
};
process_command(&config_payer).unwrap();
check_recent_balance(1000, &rpc_client, &config_payer.signers[0].pubkey());
check_recent_balance(1000, &rpc_client, &nonce_account);
check_balance!(
sol_to_lamports(1000.0),
&rpc_client,
&config_payer.signers[0].pubkey(),
);
check_balance!(sol_to_lamports(1000.0), &rpc_client, &nonce_account);
// Get nonce
config_payer.signers.pop();
@ -161,12 +172,16 @@ fn full_battery_tests(
nonce_authority: index,
memo: None,
destination_account_pubkey: payee_pubkey,
lamports: 100,
lamports: sol_to_lamports(100.0),
};
process_command(&config_payer).unwrap();
check_recent_balance(1000, &rpc_client, &config_payer.signers[0].pubkey());
check_recent_balance(900, &rpc_client, &nonce_account);
check_recent_balance(100, &rpc_client, &payee_pubkey);
check_balance!(
sol_to_lamports(1000.0),
&rpc_client,
&config_payer.signers[0].pubkey(),
);
check_balance!(sol_to_lamports(900.0), &rpc_client, &nonce_account);
check_balance!(sol_to_lamports(100.0), &rpc_client, &payee_pubkey);
// Show nonce account
config_payer.command = CliCommand::ShowNonceAccount {
@ -208,17 +223,22 @@ fn full_battery_tests(
nonce_authority: 1,
memo: None,
destination_account_pubkey: payee_pubkey,
lamports: 100,
lamports: sol_to_lamports(100.0),
};
process_command(&config_payer).unwrap();
check_recent_balance(1000, &rpc_client, &config_payer.signers[0].pubkey());
check_recent_balance(800, &rpc_client, &nonce_account);
check_recent_balance(200, &rpc_client, &payee_pubkey);
check_balance!(
sol_to_lamports(1000.0),
&rpc_client,
&config_payer.signers[0].pubkey(),
);
check_balance!(sol_to_lamports(800.0), &rpc_client, &nonce_account);
check_balance!(sol_to_lamports(200.0), &rpc_client, &payee_pubkey);
}
#[test]
#[allow(clippy::redundant_closure)]
fn test_create_account_with_seed() {
const ONE_SIG_FEE: f64 = 0.000005;
solana_logger::setup();
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
@ -241,19 +261,27 @@ fn test_create_account_with_seed() {
&rpc_client,
&CliConfig::recent_for_tests(),
&offline_nonce_authority_signer.pubkey(),
42,
sol_to_lamports(42.0),
)
.unwrap();
request_and_confirm_airdrop(
&rpc_client,
&CliConfig::recent_for_tests(),
&online_nonce_creator_signer.pubkey(),
4242,
sol_to_lamports(4242.0),
)
.unwrap();
check_recent_balance(42, &rpc_client, &offline_nonce_authority_signer.pubkey());
check_recent_balance(4242, &rpc_client, &online_nonce_creator_signer.pubkey());
check_recent_balance(0, &rpc_client, &to_address);
check_balance!(
sol_to_lamports(42.0),
&rpc_client,
&offline_nonce_authority_signer.pubkey(),
);
check_balance!(
sol_to_lamports(4242.0),
&rpc_client,
&online_nonce_creator_signer.pubkey(),
);
check_balance!(0, &rpc_client, &to_address);
check_ready(&rpc_client);
@ -263,7 +291,7 @@ fn test_create_account_with_seed() {
let seed = authority_pubkey.to_string()[0..32].to_string();
let nonce_address =
Pubkey::create_with_seed(&creator_pubkey, &seed, &system_program::id()).unwrap();
check_recent_balance(0, &rpc_client, &nonce_address);
check_balance!(0, &rpc_client, &nonce_address);
let mut creator_config = CliConfig::recent_for_tests();
creator_config.json_rpc_url = test_validator.rpc_url();
@ -273,13 +301,21 @@ fn test_create_account_with_seed() {
seed: Some(seed),
nonce_authority: Some(authority_pubkey),
memo: None,
amount: SpendAmount::Some(241),
amount: SpendAmount::Some(sol_to_lamports(241.0)),
};
process_command(&creator_config).unwrap();
check_recent_balance(241, &rpc_client, &nonce_address);
check_recent_balance(42, &rpc_client, &offline_nonce_authority_signer.pubkey());
check_recent_balance(4000, &rpc_client, &online_nonce_creator_signer.pubkey());
check_recent_balance(0, &rpc_client, &to_address);
check_balance!(sol_to_lamports(241.0), &rpc_client, &nonce_address);
check_balance!(
sol_to_lamports(42.0),
&rpc_client,
&offline_nonce_authority_signer.pubkey(),
);
check_balance!(
sol_to_lamports(4001.0 - ONE_SIG_FEE),
&rpc_client,
&online_nonce_creator_signer.pubkey(),
);
check_balance!(0, &rpc_client, &to_address);
// Fetch nonce hash
let nonce_hash = nonce_utils::get_account_with_commitment(
@ -299,7 +335,7 @@ fn test_create_account_with_seed() {
authority_config.command = CliCommand::ClusterVersion;
process_command(&authority_config).unwrap_err();
authority_config.command = CliCommand::Transfer {
amount: SpendAmount::Some(10),
amount: SpendAmount::Some(sol_to_lamports(10.0)),
to: to_address,
from: 0,
sign_only: true,
@ -325,7 +361,7 @@ fn test_create_account_with_seed() {
submit_config.json_rpc_url = test_validator.rpc_url();
submit_config.signers = vec![&authority_presigner];
submit_config.command = CliCommand::Transfer {
amount: SpendAmount::Some(10),
amount: SpendAmount::Some(sol_to_lamports(10.0)),
to: to_address,
from: 0,
sign_only: false,
@ -344,8 +380,16 @@ fn test_create_account_with_seed() {
derived_address_program_id: None,
};
process_command(&submit_config).unwrap();
check_recent_balance(241, &rpc_client, &nonce_address);
check_recent_balance(31, &rpc_client, &offline_nonce_authority_signer.pubkey());
check_recent_balance(4000, &rpc_client, &online_nonce_creator_signer.pubkey());
check_recent_balance(10, &rpc_client, &to_address);
check_balance!(sol_to_lamports(241.0), &rpc_client, &nonce_address);
check_balance!(
sol_to_lamports(32.0 - ONE_SIG_FEE),
&rpc_client,
&offline_nonce_authority_signer.pubkey(),
);
check_balance!(
sol_to_lamports(4001.0 - ONE_SIG_FEE),
&rpc_client,
&online_nonce_creator_signer.pubkey(),
);
check_balance!(sol_to_lamports(10.0), &rpc_client, &to_address);
}

View File

@ -1,3 +1,4 @@
#![allow(clippy::integer_arithmetic)]
use {
serde_json::Value,
solana_cli::{
@ -61,6 +62,7 @@ fn test_cli_program_deploy_non_upgradeable() {
address: None,
use_deprecated_loader: false,
allow_excessive_balance: false,
skip_fee_check: false,
};
config.output_format = OutputFormat::JsonCompact;
let response = process_command(&config);
@ -90,6 +92,7 @@ fn test_cli_program_deploy_non_upgradeable() {
address: Some(1),
use_deprecated_loader: false,
allow_excessive_balance: false,
skip_fee_check: false,
};
process_command(&config).unwrap();
let account1 = rpc_client
@ -117,6 +120,7 @@ fn test_cli_program_deploy_non_upgradeable() {
address: Some(1),
use_deprecated_loader: false,
allow_excessive_balance: false,
skip_fee_check: false,
};
process_command(&config).unwrap_err();
@ -126,6 +130,7 @@ fn test_cli_program_deploy_non_upgradeable() {
address: Some(1),
use_deprecated_loader: false,
allow_excessive_balance: true,
skip_fee_check: false,
};
process_command(&config).unwrap();
let account2 = rpc_client
@ -192,6 +197,7 @@ fn test_cli_program_deploy_no_authority() {
upgrade_authority_signer_index: 1,
is_final: true,
max_len: None,
skip_fee_check: false,
});
config.output_format = OutputFormat::JsonCompact;
let response = process_command(&config);
@ -217,6 +223,7 @@ fn test_cli_program_deploy_no_authority() {
upgrade_authority_signer_index: 1,
is_final: false,
max_len: None,
skip_fee_check: false,
});
process_command(&config).unwrap_err();
}
@ -277,6 +284,7 @@ fn test_cli_program_deploy_with_authority() {
upgrade_authority_signer_index: 1,
is_final: false,
max_len: Some(max_len),
skip_fee_check: false,
});
config.output_format = OutputFormat::JsonCompact;
let response = process_command(&config);
@ -324,6 +332,7 @@ fn test_cli_program_deploy_with_authority() {
upgrade_authority_signer_index: 1,
is_final: false,
max_len: Some(max_len),
skip_fee_check: false,
});
let response = process_command(&config);
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
@ -365,6 +374,7 @@ fn test_cli_program_deploy_with_authority() {
upgrade_authority_signer_index: 1,
is_final: false,
max_len: Some(max_len),
skip_fee_check: false,
});
process_command(&config).unwrap();
let program_account = rpc_client.get_account(&program_pubkey).unwrap();
@ -419,6 +429,7 @@ fn test_cli_program_deploy_with_authority() {
upgrade_authority_signer_index: 1,
is_final: false,
max_len: None,
skip_fee_check: false,
});
process_command(&config).unwrap();
let program_account = rpc_client.get_account(&program_pubkey).unwrap();
@ -493,6 +504,7 @@ fn test_cli_program_deploy_with_authority() {
upgrade_authority_signer_index: 1,
is_final: false,
max_len: None,
skip_fee_check: false,
});
process_command(&config).unwrap_err();
@ -508,6 +520,7 @@ fn test_cli_program_deploy_with_authority() {
upgrade_authority_signer_index: 1,
is_final: true,
max_len: None,
skip_fee_check: false,
});
let response = process_command(&config);
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
@ -610,6 +623,7 @@ fn test_cli_program_close_program() {
upgrade_authority_signer_index: 1,
is_final: false,
max_len: Some(max_len),
skip_fee_check: false,
});
config.output_format = OutputFormat::JsonCompact;
process_command(&config).unwrap();
@ -694,6 +708,7 @@ fn test_cli_program_write_buffer() {
buffer_pubkey: None,
buffer_authority_signer_index: None,
max_len: None,
skip_fee_check: false,
});
config.output_format = OutputFormat::JsonCompact;
let response = process_command(&config);
@ -728,6 +743,7 @@ fn test_cli_program_write_buffer() {
buffer_pubkey: Some(buffer_keypair.pubkey()),
buffer_authority_signer_index: None,
max_len: Some(max_len),
skip_fee_check: false,
});
let response = process_command(&config);
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
@ -789,6 +805,7 @@ fn test_cli_program_write_buffer() {
buffer_pubkey: Some(buffer_keypair.pubkey()),
buffer_authority_signer_index: Some(2),
max_len: None,
skip_fee_check: false,
});
let response = process_command(&config);
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
@ -826,6 +843,7 @@ fn test_cli_program_write_buffer() {
buffer_pubkey: None,
buffer_authority_signer_index: Some(2),
max_len: None,
skip_fee_check: false,
});
let response = process_command(&config);
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
@ -898,6 +916,7 @@ fn test_cli_program_write_buffer() {
buffer_pubkey: None,
buffer_authority_signer_index: None,
max_len: None,
skip_fee_check: false,
});
config.output_format = OutputFormat::JsonCompact;
let response = process_command(&config);
@ -937,6 +956,7 @@ fn test_cli_program_write_buffer() {
buffer_pubkey: Some(buffer_keypair.pubkey()),
buffer_authority_signer_index: None,
max_len: None, //Some(max_len),
skip_fee_check: false,
});
process_command(&config).unwrap();
config.signers = vec![&keypair, &buffer_keypair];
@ -950,6 +970,7 @@ fn test_cli_program_write_buffer() {
upgrade_authority_signer_index: 1,
is_final: true,
max_len: None,
skip_fee_check: false,
});
config.output_format = OutputFormat::JsonCompact;
let error = process_command(&config).unwrap_err();
@ -1007,6 +1028,7 @@ fn test_cli_program_set_buffer_authority() {
buffer_pubkey: Some(buffer_keypair.pubkey()),
buffer_authority_signer_index: None,
max_len: None,
skip_fee_check: false,
});
process_command(&config).unwrap();
let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap();
@ -1122,6 +1144,7 @@ fn test_cli_program_mismatch_buffer_authority() {
buffer_pubkey: Some(buffer_keypair.pubkey()),
buffer_authority_signer_index: Some(2),
max_len: None,
skip_fee_check: false,
});
process_command(&config).unwrap();
let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap();
@ -1144,6 +1167,7 @@ fn test_cli_program_mismatch_buffer_authority() {
upgrade_authority_signer_index: 1,
is_final: true,
max_len: None,
skip_fee_check: false,
});
process_command(&config).unwrap_err();
@ -1159,6 +1183,7 @@ fn test_cli_program_mismatch_buffer_authority() {
upgrade_authority_signer_index: 1,
is_final: true,
max_len: None,
skip_fee_check: false,
});
process_command(&config).unwrap();
}
@ -1215,6 +1240,7 @@ fn test_cli_program_show() {
buffer_pubkey: Some(buffer_keypair.pubkey()),
buffer_authority_signer_index: Some(2),
max_len: None,
skip_fee_check: false,
});
process_command(&config).unwrap();
@ -1274,6 +1300,7 @@ fn test_cli_program_show() {
upgrade_authority_signer_index: 1,
is_final: false,
max_len: Some(max_len),
skip_fee_check: false,
});
config.output_format = OutputFormat::JsonCompact;
let min_slot = rpc_client.get_slot().unwrap();
@ -1400,6 +1427,7 @@ fn test_cli_program_dump() {
buffer_pubkey: Some(buffer_keypair.pubkey()),
buffer_authority_signer_index: Some(2),
max_len: None,
skip_fee_check: false,
});
process_command(&config).unwrap();

View File

@ -1,9 +1,11 @@
#![allow(clippy::integer_arithmetic)]
use {
solana_cli::cli::{process_command, CliCommand, CliConfig},
solana_client::rpc_client::RpcClient,
solana_faucet::faucet::run_local_faucet,
solana_sdk::{
commitment_config::CommitmentConfig,
native_token::sol_to_lamports,
signature::{Keypair, Signer},
},
solana_streamer::socket::SocketAddrSpace,
@ -22,7 +24,7 @@ fn test_cli_request_airdrop() {
bob_config.json_rpc_url = test_validator.rpc_url();
bob_config.command = CliCommand::Airdrop {
pubkey: None,
lamports: 50,
lamports: sol_to_lamports(50.0),
};
let keypair = Keypair::new();
bob_config.signers = vec![&keypair];
@ -36,5 +38,5 @@ fn test_cli_request_airdrop() {
let balance = rpc_client
.get_balance(&bob_config.signers[0].pubkey())
.unwrap();
assert_eq!(balance, 50);
assert_eq!(balance, sol_to_lamports(50.0));
}

View File

@ -1,10 +1,12 @@
#![allow(clippy::integer_arithmetic)]
#![allow(clippy::redundant_closure)]
use {
solana_cli::{
check_balance,
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
spend_utils::SpendAmount,
stake::StakeAuthorizationIndexed,
test_utils::{check_ready, check_recent_balance},
test_utils::check_ready,
},
solana_cli_output::{parse_sign_only_reply_string, OutputFormat},
solana_client::{
@ -16,6 +18,7 @@ use {
solana_sdk::{
account_utils::StateMut,
commitment_config::CommitmentConfig,
fee::FeeStructure,
nonce::State as NonceState,
pubkey::Pubkey,
signature::{keypair_from_seed, Keypair, Signer},
@ -59,7 +62,13 @@ fn test_stake_delegation_force() {
authorized_voter: None,
authorized_withdrawer,
commission: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
process_command(&config).unwrap();
@ -144,7 +153,7 @@ fn test_seed_stake_delegation_and_deactivation() {
100_000,
)
.unwrap();
check_recent_balance(100_000, &rpc_client, &config_validator.signers[0].pubkey());
check_balance!(100_000, &rpc_client, &config_validator.signers[0].pubkey());
let stake_address = Pubkey::create_with_seed(
&config_validator.signers[0].pubkey(),
@ -233,7 +242,7 @@ fn test_stake_delegation_and_deactivation() {
100_000,
)
.unwrap();
check_recent_balance(100_000, &rpc_client, &config_validator.signers[0].pubkey());
check_balance!(100_000, &rpc_client, &config_validator.signers[0].pubkey());
// Create stake account
config_validator.signers.push(&stake_keypair);
@ -327,7 +336,7 @@ fn test_offline_stake_delegation_and_deactivation() {
100_000,
)
.unwrap();
check_recent_balance(100_000, &rpc_client, &config_validator.signers[0].pubkey());
check_balance!(100_000, &rpc_client, &config_validator.signers[0].pubkey());
request_and_confirm_airdrop(
&rpc_client,
@ -336,7 +345,7 @@ fn test_offline_stake_delegation_and_deactivation() {
100_000,
)
.unwrap();
check_recent_balance(100_000, &rpc_client, &config_offline.signers[0].pubkey());
check_balance!(100_000, &rpc_client, &config_offline.signers[0].pubkey());
// Create stake account
config_validator.signers.push(&stake_keypair);
@ -868,14 +877,15 @@ fn test_stake_authorize() {
#[test]
fn test_stake_authorize_with_fee_payer() {
solana_logger::setup();
const SIG_FEE: u64 = 42;
let fee_one_sig = FeeStructure::default().get_max_fee(1, 0);
let fee_two_sig = FeeStructure::default().get_max_fee(2, 0);
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_custom_fees(
mint_pubkey,
SIG_FEE,
1,
Some(faucet_addr),
SocketAddrSpace::Unspecified,
);
@ -904,14 +914,14 @@ fn test_stake_authorize_with_fee_payer() {
config_offline.command = CliCommand::ClusterVersion;
process_command(&config_offline).unwrap_err();
request_and_confirm_airdrop(&rpc_client, &config, &default_pubkey, 100_000).unwrap();
check_recent_balance(100_000, &rpc_client, &config.signers[0].pubkey());
request_and_confirm_airdrop(&rpc_client, &config, &default_pubkey, 5_000_000).unwrap();
check_balance!(5_000_000, &rpc_client, &config.signers[0].pubkey());
request_and_confirm_airdrop(&rpc_client, &config_payer, &payer_pubkey, 100_000).unwrap();
check_recent_balance(100_000, &rpc_client, &payer_pubkey);
request_and_confirm_airdrop(&rpc_client, &config_payer, &payer_pubkey, 5_000_000).unwrap();
check_balance!(5_000_000, &rpc_client, &payer_pubkey);
request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 100_000).unwrap();
check_recent_balance(100_000, &rpc_client, &offline_pubkey);
request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 5_000_000).unwrap();
check_balance!(5_000_000, &rpc_client, &offline_pubkey);
check_ready(&rpc_client);
@ -926,7 +936,7 @@ fn test_stake_authorize_with_fee_payer() {
withdrawer: None,
withdrawer_signer: None,
lockup: Lockup::default(),
amount: SpendAmount::Some(50_000),
amount: SpendAmount::Some(1_000_000),
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
@ -937,8 +947,7 @@ fn test_stake_authorize_with_fee_payer() {
from: 0,
};
process_command(&config).unwrap();
// `config` balance should be 50,000 - 1 stake account sig - 1 fee sig
check_recent_balance(50_000 - SIG_FEE - SIG_FEE, &rpc_client, &default_pubkey);
check_balance!(4_000_000 - fee_two_sig, &rpc_client, &default_pubkey);
// Assign authority with separate fee payer
config.signers = vec![&default_signer, &payer_keypair];
@ -962,10 +971,10 @@ fn test_stake_authorize_with_fee_payer() {
};
process_command(&config).unwrap();
// `config` balance has not changed, despite submitting the TX
check_recent_balance(50_000 - SIG_FEE - SIG_FEE, &rpc_client, &default_pubkey);
check_balance!(4_000_000 - fee_two_sig, &rpc_client, &default_pubkey);
// `config_payer` however has paid `config`'s authority sig
// and `config_payer`'s fee sig
check_recent_balance(100_000 - SIG_FEE - SIG_FEE, &rpc_client, &payer_pubkey);
check_balance!(5_000_000 - fee_two_sig, &rpc_client, &payer_pubkey);
// Assign authority with offline fee payer
let blockhash = rpc_client.get_latest_blockhash().unwrap();
@ -1013,10 +1022,10 @@ fn test_stake_authorize_with_fee_payer() {
};
process_command(&config).unwrap();
// `config`'s balance again has not changed
check_recent_balance(50_000 - SIG_FEE - SIG_FEE, &rpc_client, &default_pubkey);
check_balance!(4_000_000 - fee_two_sig, &rpc_client, &default_pubkey);
// `config_offline` however has paid 1 sig due to being both authority
// and fee payer
check_recent_balance(100_000 - SIG_FEE, &rpc_client, &offline_pubkey);
check_balance!(5_000_000 - fee_one_sig, &rpc_client, &offline_pubkey);
}
#[test]
@ -1050,12 +1059,17 @@ fn test_stake_split() {
config_offline.command = CliCommand::ClusterVersion;
process_command(&config_offline).unwrap_err();
request_and_confirm_airdrop(&rpc_client, &config, &config.signers[0].pubkey(), 500_000)
.unwrap();
check_recent_balance(500_000, &rpc_client, &config.signers[0].pubkey());
request_and_confirm_airdrop(
&rpc_client,
&config,
&config.signers[0].pubkey(),
50_000_000,
)
.unwrap();
check_balance!(50_000_000, &rpc_client, &config.signers[0].pubkey());
request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 100_000).unwrap();
check_recent_balance(100_000, &rpc_client, &offline_pubkey);
request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 1_000_000).unwrap();
check_balance!(1_000_000, &rpc_client, &offline_pubkey);
// Create stake account, identity is authority
let minimum_stake_balance = rpc_client
@ -1082,7 +1096,7 @@ fn test_stake_split() {
from: 0,
};
process_command(&config).unwrap();
check_recent_balance(
check_balance!(
10 * minimum_stake_balance,
&rpc_client,
&stake_account_pubkey,
@ -1102,7 +1116,7 @@ fn test_stake_split() {
amount: SpendAmount::Some(minimum_nonce_balance),
};
process_command(&config).unwrap();
check_recent_balance(minimum_nonce_balance, &rpc_client, &nonce_account.pubkey());
check_balance!(minimum_nonce_balance, &rpc_client, &nonce_account.pubkey());
// Fetch nonce hash
let nonce_hash = nonce_utils::get_account_with_commitment(
@ -1116,7 +1130,7 @@ fn test_stake_split() {
// Nonced offline split
let split_account = keypair_from_seed(&[2u8; 32]).unwrap();
check_recent_balance(0, &rpc_client, &split_account.pubkey());
check_balance!(0, &rpc_client, &split_account.pubkey());
config_offline.signers.push(&split_account);
config_offline.command = CliCommand::SplitStake {
stake_account_pubkey,
@ -1156,12 +1170,12 @@ fn test_stake_split() {
fee_payer: 0,
};
process_command(&config).unwrap();
check_recent_balance(
check_balance!(
8 * minimum_stake_balance,
&rpc_client,
&stake_account_pubkey,
);
check_recent_balance(
check_balance!(
2 * minimum_stake_balance,
&rpc_client,
&split_account.pubkey(),
@ -1199,12 +1213,12 @@ fn test_stake_set_lockup() {
config_offline.command = CliCommand::ClusterVersion;
process_command(&config_offline).unwrap_err();
request_and_confirm_airdrop(&rpc_client, &config, &config.signers[0].pubkey(), 500_000)
request_and_confirm_airdrop(&rpc_client, &config, &config.signers[0].pubkey(), 5_000_000)
.unwrap();
check_recent_balance(500_000, &rpc_client, &config.signers[0].pubkey());
check_balance!(5_000_000, &rpc_client, &config.signers[0].pubkey());
request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 100_000).unwrap();
check_recent_balance(100_000, &rpc_client, &offline_pubkey);
request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 1_000_000).unwrap();
check_balance!(1_000_000, &rpc_client, &offline_pubkey);
// Create stake account, identity is authority
let minimum_stake_balance = rpc_client
@ -1238,7 +1252,12 @@ fn test_stake_set_lockup() {
from: 0,
};
process_command(&config).unwrap();
check_recent_balance(
check_balance!(
10 * minimum_stake_balance,
&rpc_client,
&stake_account_pubkey,
);
check_balance!(
10 * minimum_stake_balance,
&rpc_client,
&stake_account_pubkey,
@ -1371,7 +1390,7 @@ fn test_stake_set_lockup() {
amount: SpendAmount::Some(minimum_nonce_balance),
};
process_command(&config).unwrap();
check_recent_balance(minimum_nonce_balance, &rpc_client, &nonce_account_pubkey);
check_balance!(minimum_nonce_balance, &rpc_client, &nonce_account_pubkey);
// Fetch nonce hash
let nonce_hash = nonce_utils::get_account_with_commitment(
@ -1467,10 +1486,10 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
request_and_confirm_airdrop(&rpc_client, &config, &config.signers[0].pubkey(), 200_000)
.unwrap();
check_recent_balance(200_000, &rpc_client, &config.signers[0].pubkey());
check_balance!(200_000, &rpc_client, &config.signers[0].pubkey());
request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 100_000).unwrap();
check_recent_balance(100_000, &rpc_client, &offline_pubkey);
check_balance!(100_000, &rpc_client, &offline_pubkey);
// Create nonce account
let minimum_nonce_balance = rpc_client
@ -1547,7 +1566,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
from: 0,
};
process_command(&config).unwrap();
check_recent_balance(50_000, &rpc_client, &stake_pubkey);
check_balance!(50_000, &rpc_client, &stake_pubkey);
// Fetch nonce hash
let nonce_hash = nonce_utils::get_account_with_commitment(
@ -1566,7 +1585,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
config_offline.command = CliCommand::WithdrawStake {
stake_account_pubkey: stake_pubkey,
destination_account_pubkey: recipient_pubkey,
amount: SpendAmount::Some(42),
amount: SpendAmount::Some(50_000),
withdraw_authority: 0,
custodian: None,
sign_only: true,
@ -1585,7 +1604,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
config.command = CliCommand::WithdrawStake {
stake_account_pubkey: stake_pubkey,
destination_account_pubkey: recipient_pubkey,
amount: SpendAmount::Some(42),
amount: SpendAmount::Some(50_000),
withdraw_authority: 0,
custodian: None,
sign_only: false,
@ -1601,7 +1620,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
fee_payer: 0,
};
process_command(&config).unwrap();
check_recent_balance(42, &rpc_client, &recipient_pubkey);
check_balance!(50_000, &rpc_client, &recipient_pubkey);
// Fetch nonce hash
let nonce_hash = nonce_utils::get_account_with_commitment(
@ -1661,7 +1680,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
process_command(&config).unwrap();
let seed_address =
Pubkey::create_with_seed(&stake_pubkey, seed, &stake::program::id()).unwrap();
check_recent_balance(50_000, &rpc_client, &seed_address);
check_balance!(50_000, &rpc_client, &seed_address);
}
#[test]

View File

@ -1,9 +1,11 @@
#![allow(clippy::integer_arithmetic)]
#![allow(clippy::redundant_closure)]
use {
solana_cli::{
check_balance,
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
spend_utils::SpendAmount,
test_utils::{check_ready, check_recent_balance},
test_utils::check_ready,
},
solana_cli_output::{parse_sign_only_reply_string, OutputFormat},
solana_client::{
@ -14,6 +16,8 @@ use {
solana_faucet::faucet::run_local_faucet,
solana_sdk::{
commitment_config::CommitmentConfig,
fee::FeeStructure,
native_token::sol_to_lamports,
nonce::State as NonceState,
pubkey::Pubkey,
signature::{keypair_from_seed, Keypair, NullSigner, Signer},
@ -26,6 +30,8 @@ use {
#[test]
fn test_transfer() {
solana_logger::setup();
let fee_one_sig = FeeStructure::default().get_max_fee(1, 0);
let fee_two_sig = FeeStructure::default().get_max_fee(2, 0);
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
@ -49,15 +55,16 @@ fn test_transfer() {
let sender_pubkey = config.signers[0].pubkey();
let recipient_pubkey = Pubkey::new(&[1u8; 32]);
request_and_confirm_airdrop(&rpc_client, &config, &sender_pubkey, 50_000).unwrap();
check_recent_balance(50_000, &rpc_client, &sender_pubkey);
check_recent_balance(0, &rpc_client, &recipient_pubkey);
request_and_confirm_airdrop(&rpc_client, &config, &sender_pubkey, sol_to_lamports(5.0))
.unwrap();
check_balance!(sol_to_lamports(5.0), &rpc_client, &sender_pubkey);
check_balance!(0, &rpc_client, &recipient_pubkey);
check_ready(&rpc_client);
// Plain ole transfer
config.command = CliCommand::Transfer {
amount: SpendAmount::Some(10),
amount: SpendAmount::Some(sol_to_lamports(1.0)),
to: recipient_pubkey,
from: 0,
sign_only: false,
@ -73,12 +80,16 @@ fn test_transfer() {
derived_address_program_id: None,
};
process_command(&config).unwrap();
check_recent_balance(49_989, &rpc_client, &sender_pubkey);
check_recent_balance(10, &rpc_client, &recipient_pubkey);
check_balance!(
sol_to_lamports(4.0) - fee_one_sig,
&rpc_client,
&sender_pubkey
);
check_balance!(sol_to_lamports(1.0), &rpc_client, &recipient_pubkey);
// Plain ole transfer, failure due to InsufficientFundsForSpendAndFee
config.command = CliCommand::Transfer {
amount: SpendAmount::Some(49_989),
amount: SpendAmount::Some(sol_to_lamports(4.0)),
to: recipient_pubkey,
from: 0,
sign_only: false,
@ -94,8 +105,12 @@ fn test_transfer() {
derived_address_program_id: None,
};
assert!(process_command(&config).is_err());
check_recent_balance(49_989, &rpc_client, &sender_pubkey);
check_recent_balance(10, &rpc_client, &recipient_pubkey);
check_balance!(
sol_to_lamports(4.0) - fee_one_sig,
&rpc_client,
&sender_pubkey
);
check_balance!(sol_to_lamports(1.0), &rpc_client, &recipient_pubkey);
let mut offline = CliConfig::recent_for_tests();
offline.json_rpc_url = String::default();
@ -105,13 +120,14 @@ fn test_transfer() {
process_command(&offline).unwrap_err();
let offline_pubkey = offline.signers[0].pubkey();
request_and_confirm_airdrop(&rpc_client, &offline, &offline_pubkey, 50).unwrap();
check_recent_balance(50, &rpc_client, &offline_pubkey);
request_and_confirm_airdrop(&rpc_client, &offline, &offline_pubkey, sol_to_lamports(1.0))
.unwrap();
check_balance!(sol_to_lamports(1.0), &rpc_client, &offline_pubkey);
// Offline transfer
let blockhash = rpc_client.get_latest_blockhash().unwrap();
offline.command = CliCommand::Transfer {
amount: SpendAmount::Some(10),
amount: SpendAmount::Some(sol_to_lamports(0.5)),
to: recipient_pubkey,
from: 0,
sign_only: true,
@ -133,7 +149,7 @@ fn test_transfer() {
let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap();
config.signers = vec![&offline_presigner];
config.command = CliCommand::Transfer {
amount: SpendAmount::Some(10),
amount: SpendAmount::Some(sol_to_lamports(0.5)),
to: recipient_pubkey,
from: 0,
sign_only: false,
@ -149,8 +165,12 @@ fn test_transfer() {
derived_address_program_id: None,
};
process_command(&config).unwrap();
check_recent_balance(39, &rpc_client, &offline_pubkey);
check_recent_balance(20, &rpc_client, &recipient_pubkey);
check_balance!(
sol_to_lamports(0.5) - fee_one_sig,
&rpc_client,
&offline_pubkey
);
check_balance!(sol_to_lamports(1.5), &rpc_client, &recipient_pubkey);
// Create nonce account
let nonce_account = keypair_from_seed(&[3u8; 32]).unwrap();
@ -166,7 +186,11 @@ fn test_transfer() {
amount: SpendAmount::Some(minimum_nonce_balance),
};
process_command(&config).unwrap();
check_recent_balance(49_987 - minimum_nonce_balance, &rpc_client, &sender_pubkey);
check_balance!(
sol_to_lamports(4.0) - fee_one_sig - fee_two_sig - minimum_nonce_balance,
&rpc_client,
&sender_pubkey,
);
// Fetch nonce hash
let nonce_hash = nonce_utils::get_account_with_commitment(
@ -181,7 +205,7 @@ fn test_transfer() {
// Nonced transfer
config.signers = vec![&default_signer];
config.command = CliCommand::Transfer {
amount: SpendAmount::Some(10),
amount: SpendAmount::Some(sol_to_lamports(1.0)),
to: recipient_pubkey,
from: 0,
sign_only: false,
@ -200,8 +224,12 @@ fn test_transfer() {
derived_address_program_id: None,
};
process_command(&config).unwrap();
check_recent_balance(49_976 - minimum_nonce_balance, &rpc_client, &sender_pubkey);
check_recent_balance(30, &rpc_client, &recipient_pubkey);
check_balance!(
sol_to_lamports(3.0) - 2 * fee_one_sig - fee_two_sig - minimum_nonce_balance,
&rpc_client,
&sender_pubkey,
);
check_balance!(sol_to_lamports(2.5), &rpc_client, &recipient_pubkey);
let new_nonce_hash = nonce_utils::get_account_with_commitment(
&rpc_client,
&nonce_account.pubkey(),
@ -221,7 +249,11 @@ fn test_transfer() {
new_authority: offline_pubkey,
};
process_command(&config).unwrap();
check_recent_balance(49_975 - minimum_nonce_balance, &rpc_client, &sender_pubkey);
check_balance!(
sol_to_lamports(3.0) - 3 * fee_one_sig - fee_two_sig - minimum_nonce_balance,
&rpc_client,
&sender_pubkey,
);
// Fetch nonce hash
let nonce_hash = nonce_utils::get_account_with_commitment(
@ -236,7 +268,7 @@ fn test_transfer() {
// Offline, nonced transfer
offline.signers = vec![&default_offline_signer];
offline.command = CliCommand::Transfer {
amount: SpendAmount::Some(10),
amount: SpendAmount::Some(sol_to_lamports(0.4)),
to: recipient_pubkey,
from: 0,
sign_only: true,
@ -257,7 +289,7 @@ fn test_transfer() {
let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap();
config.signers = vec![&offline_presigner];
config.command = CliCommand::Transfer {
amount: SpendAmount::Some(10),
amount: SpendAmount::Some(sol_to_lamports(0.4)),
to: recipient_pubkey,
from: 0,
sign_only: false,
@ -276,13 +308,18 @@ fn test_transfer() {
derived_address_program_id: None,
};
process_command(&config).unwrap();
check_recent_balance(28, &rpc_client, &offline_pubkey);
check_recent_balance(40, &rpc_client, &recipient_pubkey);
check_balance!(
sol_to_lamports(0.1) - 2 * fee_one_sig,
&rpc_client,
&offline_pubkey
);
check_balance!(sol_to_lamports(2.9), &rpc_client, &recipient_pubkey);
}
#[test]
fn test_transfer_multisession_signing() {
solana_logger::setup();
let fee = FeeStructure::default().get_max_fee(2, 0);
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
@ -305,19 +342,27 @@ fn test_transfer_multisession_signing() {
&rpc_client,
&CliConfig::recent_for_tests(),
&offline_from_signer.pubkey(),
43,
sol_to_lamports(43.0),
)
.unwrap();
request_and_confirm_airdrop(
&rpc_client,
&CliConfig::recent_for_tests(),
&offline_fee_payer_signer.pubkey(),
3,
sol_to_lamports(1.0) + 2 * fee,
)
.unwrap();
check_recent_balance(43, &rpc_client, &offline_from_signer.pubkey());
check_recent_balance(3, &rpc_client, &offline_fee_payer_signer.pubkey());
check_recent_balance(0, &rpc_client, &to_pubkey);
check_balance!(
sol_to_lamports(43.0),
&rpc_client,
&offline_from_signer.pubkey(),
);
check_balance!(
sol_to_lamports(1.0) + 2 * fee,
&rpc_client,
&offline_fee_payer_signer.pubkey(),
);
check_balance!(0, &rpc_client, &to_pubkey);
check_ready(&rpc_client);
@ -331,7 +376,7 @@ fn test_transfer_multisession_signing() {
fee_payer_config.command = CliCommand::ClusterVersion;
process_command(&fee_payer_config).unwrap_err();
fee_payer_config.command = CliCommand::Transfer {
amount: SpendAmount::Some(42),
amount: SpendAmount::Some(sol_to_lamports(42.0)),
to: to_pubkey,
from: 1,
sign_only: true,
@ -362,7 +407,7 @@ fn test_transfer_multisession_signing() {
from_config.command = CliCommand::ClusterVersion;
process_command(&from_config).unwrap_err();
from_config.command = CliCommand::Transfer {
amount: SpendAmount::Some(42),
amount: SpendAmount::Some(sol_to_lamports(42.0)),
to: to_pubkey,
from: 1,
sign_only: true,
@ -390,7 +435,7 @@ fn test_transfer_multisession_signing() {
config.json_rpc_url = test_validator.rpc_url();
config.signers = vec![&fee_payer_presigner, &from_presigner];
config.command = CliCommand::Transfer {
amount: SpendAmount::Some(42),
amount: SpendAmount::Some(sol_to_lamports(42.0)),
to: to_pubkey,
from: 1,
sign_only: false,
@ -407,14 +452,23 @@ fn test_transfer_multisession_signing() {
};
process_command(&config).unwrap();
check_recent_balance(1, &rpc_client, &offline_from_signer.pubkey());
check_recent_balance(1, &rpc_client, &offline_fee_payer_signer.pubkey());
check_recent_balance(42, &rpc_client, &to_pubkey);
check_balance!(
sol_to_lamports(1.0),
&rpc_client,
&offline_from_signer.pubkey(),
);
check_balance!(
sol_to_lamports(1.0) + fee,
&rpc_client,
&offline_fee_payer_signer.pubkey(),
);
check_balance!(sol_to_lamports(42.0), &rpc_client, &to_pubkey);
}
#[test]
fn test_transfer_all() {
solana_logger::setup();
let fee = FeeStructure::default().get_max_fee(1, 0);
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
@ -437,9 +491,9 @@ fn test_transfer_all() {
let sender_pubkey = config.signers[0].pubkey();
let recipient_pubkey = Pubkey::new(&[1u8; 32]);
request_and_confirm_airdrop(&rpc_client, &config, &sender_pubkey, 50_000).unwrap();
check_recent_balance(50_000, &rpc_client, &sender_pubkey);
check_recent_balance(0, &rpc_client, &recipient_pubkey);
request_and_confirm_airdrop(&rpc_client, &config, &sender_pubkey, 500_000).unwrap();
check_balance!(500_000, &rpc_client, &sender_pubkey);
check_balance!(0, &rpc_client, &recipient_pubkey);
check_ready(&rpc_client);
@ -461,8 +515,8 @@ fn test_transfer_all() {
derived_address_program_id: None,
};
process_command(&config).unwrap();
check_recent_balance(0, &rpc_client, &sender_pubkey);
check_recent_balance(49_999, &rpc_client, &recipient_pubkey);
check_balance!(0, &rpc_client, &sender_pubkey);
check_balance!(500_000 - fee, &rpc_client, &recipient_pubkey);
}
#[test]
@ -491,8 +545,8 @@ fn test_transfer_unfunded_recipient() {
let recipient_pubkey = Pubkey::new(&[1u8; 32]);
request_and_confirm_airdrop(&rpc_client, &config, &sender_pubkey, 50_000).unwrap();
check_recent_balance(50_000, &rpc_client, &sender_pubkey);
check_recent_balance(0, &rpc_client, &recipient_pubkey);
check_balance!(50_000, &rpc_client, &sender_pubkey);
check_balance!(0, &rpc_client, &recipient_pubkey);
check_ready(&rpc_client);
@ -521,6 +575,7 @@ fn test_transfer_unfunded_recipient() {
#[test]
fn test_transfer_with_seed() {
solana_logger::setup();
let fee = FeeStructure::default().get_max_fee(1, 0);
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
@ -551,17 +606,19 @@ fn test_transfer_with_seed() {
)
.unwrap();
request_and_confirm_airdrop(&rpc_client, &config, &sender_pubkey, 1).unwrap();
request_and_confirm_airdrop(&rpc_client, &config, &derived_address, 50_000).unwrap();
check_recent_balance(1, &rpc_client, &sender_pubkey);
check_recent_balance(50_000, &rpc_client, &derived_address);
check_recent_balance(0, &rpc_client, &recipient_pubkey);
request_and_confirm_airdrop(&rpc_client, &config, &sender_pubkey, sol_to_lamports(1.0))
.unwrap();
request_and_confirm_airdrop(&rpc_client, &config, &derived_address, sol_to_lamports(5.0))
.unwrap();
check_balance!(sol_to_lamports(1.0), &rpc_client, &sender_pubkey);
check_balance!(sol_to_lamports(5.0), &rpc_client, &derived_address);
check_balance!(0, &rpc_client, &recipient_pubkey);
check_ready(&rpc_client);
// Transfer with seed
config.command = CliCommand::Transfer {
amount: SpendAmount::Some(50_000),
amount: SpendAmount::Some(sol_to_lamports(5.0)),
to: recipient_pubkey,
from: 0,
sign_only: false,
@ -577,7 +634,7 @@ fn test_transfer_with_seed() {
derived_address_program_id: Some(derived_address_program_id),
};
process_command(&config).unwrap();
check_recent_balance(0, &rpc_client, &sender_pubkey);
check_recent_balance(50_000, &rpc_client, &recipient_pubkey);
check_recent_balance(0, &rpc_client, &derived_address);
check_balance!(sol_to_lamports(1.0) - fee, &rpc_client, &sender_pubkey);
check_balance!(sol_to_lamports(5.0), &rpc_client, &recipient_pubkey);
check_balance!(0, &rpc_client, &derived_address);
}

View File

@ -1,9 +1,11 @@
#![allow(clippy::integer_arithmetic)]
use {
solana_cli::{
check_balance,
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
spend_utils::SpendAmount,
test_utils::check_recent_balance,
},
solana_cli_output::{parse_sign_only_reply_string, OutputFormat},
solana_client::{
blockhash_query::{self, BlockhashQuery},
rpc_client::RpcClient,
@ -12,7 +14,7 @@ use {
solana_sdk::{
account_utils::StateMut,
commitment_config::CommitmentConfig,
signature::{Keypair, Signer},
signature::{Keypair, NullSigner, Signer},
},
solana_streamer::socket::SocketAddrSpace,
solana_test_validator::TestValidator,
@ -49,7 +51,13 @@ fn test_vote_authorize_and_withdraw() {
authorized_voter: None,
authorized_withdrawer: config.signers[0].pubkey(),
commission: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
process_command(&config).unwrap();
let vote_account = rpc_client
@ -62,12 +70,12 @@ fn test_vote_authorize_and_withdraw() {
.get_minimum_balance_for_rent_exemption(VoteState::size_of())
.unwrap()
.max(1);
check_recent_balance(expected_balance, &rpc_client, &vote_account_pubkey);
check_balance!(expected_balance, &rpc_client, &vote_account_pubkey);
// Transfer in some more SOL
config.signers = vec![&default_signer];
config.command = CliCommand::Transfer {
amount: SpendAmount::Some(1_000),
amount: SpendAmount::Some(10_000),
to: vote_account_pubkey,
from: 0,
sign_only: false,
@ -83,8 +91,8 @@ fn test_vote_authorize_and_withdraw() {
derived_address_program_id: None,
};
process_command(&config).unwrap();
let expected_balance = expected_balance + 1_000;
check_recent_balance(expected_balance, &rpc_client, &vote_account_pubkey);
let expected_balance = expected_balance + 10_000;
check_balance!(expected_balance, &rpc_client, &vote_account_pubkey);
// Authorize vote account withdrawal to another signer
let first_withdraw_authority = Keypair::new();
@ -93,7 +101,13 @@ fn test_vote_authorize_and_withdraw() {
vote_account_pubkey,
new_authorized_pubkey: first_withdraw_authority.pubkey(),
vote_authorize: VoteAuthorize::Withdrawer,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
authorized: 0,
new_authorized: None,
};
@ -112,7 +126,13 @@ fn test_vote_authorize_and_withdraw() {
vote_account_pubkey,
new_authorized_pubkey: withdraw_authority.pubkey(),
vote_authorize: VoteAuthorize::Withdrawer,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
authorized: 1,
new_authorized: Some(1),
};
@ -126,7 +146,13 @@ fn test_vote_authorize_and_withdraw() {
vote_account_pubkey,
new_authorized_pubkey: withdraw_authority.pubkey(),
vote_authorize: VoteAuthorize::Withdrawer,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
authorized: 1,
new_authorized: Some(2),
};
@ -144,14 +170,20 @@ fn test_vote_authorize_and_withdraw() {
config.command = CliCommand::WithdrawFromVoteAccount {
vote_account_pubkey,
withdraw_authority: 1,
withdraw_amount: SpendAmount::Some(100),
withdraw_amount: SpendAmount::Some(1_000),
destination_account_pubkey: destination_account,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
process_command(&config).unwrap();
let expected_balance = expected_balance - 100;
check_recent_balance(expected_balance, &rpc_client, &vote_account_pubkey);
check_recent_balance(100, &rpc_client, &destination_account);
let expected_balance = expected_balance - 1_000;
check_balance!(expected_balance, &rpc_client, &vote_account_pubkey);
check_balance!(1_000, &rpc_client, &destination_account);
// Re-assign validator identity
let new_identity_keypair = Keypair::new();
@ -160,7 +192,13 @@ fn test_vote_authorize_and_withdraw() {
vote_account_pubkey,
new_identity_account: 2,
withdraw_authority: 1,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
process_command(&config).unwrap();
@ -172,8 +210,281 @@ fn test_vote_authorize_and_withdraw() {
withdraw_authority: 1,
destination_account_pubkey: destination_account,
memo: None,
fee_payer: 0,
};
process_command(&config).unwrap();
check_recent_balance(0, &rpc_client, &vote_account_pubkey);
check_recent_balance(expected_balance, &rpc_client, &destination_account);
check_balance!(0, &rpc_client, &vote_account_pubkey);
check_balance!(expected_balance, &rpc_client, &destination_account);
}
#[test]
fn test_offline_vote_authorize_and_withdraw() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
let default_signer = Keypair::new();
let mut config_payer = CliConfig::recent_for_tests();
config_payer.json_rpc_url = test_validator.rpc_url();
config_payer.signers = vec![&default_signer];
let mut config_offline = CliConfig::recent_for_tests();
config_offline.json_rpc_url = String::default();
config_offline.command = CliCommand::ClusterVersion;
let offline_keypair = Keypair::new();
config_offline.signers = vec![&offline_keypair];
// Verify that we cannot reach the cluster
process_command(&config_offline).unwrap_err();
request_and_confirm_airdrop(
&rpc_client,
&config_payer,
&config_payer.signers[0].pubkey(),
100_000,
)
.unwrap();
check_balance!(100_000, &rpc_client, &config_payer.signers[0].pubkey());
request_and_confirm_airdrop(
&rpc_client,
&config_offline,
&config_offline.signers[0].pubkey(),
100_000,
)
.unwrap();
check_balance!(100_000, &rpc_client, &config_offline.signers[0].pubkey());
// Create vote account with specific withdrawer
let vote_account_keypair = Keypair::new();
let vote_account_pubkey = vote_account_keypair.pubkey();
config_payer.signers = vec![&default_signer, &vote_account_keypair];
config_payer.command = CliCommand::CreateVoteAccount {
vote_account: 1,
seed: None,
identity_account: 0,
authorized_voter: None,
authorized_withdrawer: offline_keypair.pubkey(),
commission: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
process_command(&config_payer).unwrap();
let vote_account = rpc_client
.get_account(&vote_account_keypair.pubkey())
.unwrap();
let vote_state: VoteStateVersions = vote_account.state().unwrap();
let authorized_withdrawer = vote_state.convert_to_current().authorized_withdrawer;
assert_eq!(authorized_withdrawer, offline_keypair.pubkey());
let expected_balance = rpc_client
.get_minimum_balance_for_rent_exemption(VoteState::size_of())
.unwrap()
.max(1);
check_balance!(expected_balance, &rpc_client, &vote_account_pubkey);
// Transfer in some more SOL
config_payer.signers = vec![&default_signer];
config_payer.command = CliCommand::Transfer {
amount: SpendAmount::Some(10_000),
to: vote_account_pubkey,
from: 0,
sign_only: false,
dump_transaction_message: false,
allow_unfunded_recipient: true,
no_wait: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
derived_address_seed: None,
derived_address_program_id: None,
};
process_command(&config_payer).unwrap();
let expected_balance = expected_balance + 10_000;
check_balance!(expected_balance, &rpc_client, &vote_account_pubkey);
// Authorize vote account withdrawal to another signer, offline
let withdraw_authority = Keypair::new();
let blockhash = rpc_client.get_latest_blockhash().unwrap();
config_offline.command = CliCommand::VoteAuthorize {
vote_account_pubkey,
new_authorized_pubkey: withdraw_authority.pubkey(),
vote_authorize: VoteAuthorize::Withdrawer,
sign_only: true,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
authorized: 0,
new_authorized: None,
};
config_offline.output_format = OutputFormat::JsonCompact;
let sig_response = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
assert!(sign_only.has_all_signers());
let offline_presigner = sign_only
.presigner_of(&config_offline.signers[0].pubkey())
.unwrap();
config_payer.signers = vec![&offline_presigner];
config_payer.command = CliCommand::VoteAuthorize {
vote_account_pubkey,
new_authorized_pubkey: withdraw_authority.pubkey(),
vote_authorize: VoteAuthorize::Withdrawer,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
authorized: 0,
new_authorized: None,
};
process_command(&config_payer).unwrap();
let vote_account = rpc_client
.get_account(&vote_account_keypair.pubkey())
.unwrap();
let vote_state: VoteStateVersions = vote_account.state().unwrap();
let authorized_withdrawer = vote_state.convert_to_current().authorized_withdrawer;
assert_eq!(authorized_withdrawer, withdraw_authority.pubkey());
// Withdraw from vote account offline
let destination_account = solana_sdk::pubkey::new_rand(); // Send withdrawal to new account to make balance check easy
let blockhash = rpc_client.get_latest_blockhash().unwrap();
let fee_payer_null_signer = NullSigner::new(&default_signer.pubkey());
config_offline.signers = vec![&fee_payer_null_signer, &withdraw_authority];
config_offline.command = CliCommand::WithdrawFromVoteAccount {
vote_account_pubkey,
withdraw_authority: 1,
withdraw_amount: SpendAmount::Some(1_000),
destination_account_pubkey: destination_account,
sign_only: true,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
config_offline.output_format = OutputFormat::JsonCompact;
let sig_response = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
let offline_presigner = sign_only
.presigner_of(&config_offline.signers[1].pubkey())
.unwrap();
config_payer.signers = vec![&default_signer, &offline_presigner];
config_payer.command = CliCommand::WithdrawFromVoteAccount {
vote_account_pubkey,
withdraw_authority: 1,
withdraw_amount: SpendAmount::Some(1_000),
destination_account_pubkey: destination_account,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
process_command(&config_payer).unwrap();
let expected_balance = expected_balance - 1_000;
check_balance!(expected_balance, &rpc_client, &vote_account_pubkey);
check_balance!(1_000, &rpc_client, &destination_account);
// Re-assign validator identity offline
let blockhash = rpc_client.get_latest_blockhash().unwrap();
let new_identity_keypair = Keypair::new();
let new_identity_null_signer = NullSigner::new(&new_identity_keypair.pubkey());
config_offline.signers = vec![
&fee_payer_null_signer,
&withdraw_authority,
&new_identity_null_signer,
];
config_offline.command = CliCommand::VoteUpdateValidator {
vote_account_pubkey,
new_identity_account: 2,
withdraw_authority: 1,
sign_only: true,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
process_command(&config_offline).unwrap();
config_offline.output_format = OutputFormat::JsonCompact;
let sig_response = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
let offline_presigner = sign_only
.presigner_of(&config_offline.signers[1].pubkey())
.unwrap();
config_payer.signers = vec![&default_signer, &offline_presigner, &new_identity_keypair];
config_payer.command = CliCommand::VoteUpdateValidator {
vote_account_pubkey,
new_identity_account: 2,
withdraw_authority: 1,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
process_command(&config_payer).unwrap();
// Close vote account offline. Must use WithdrawFromVoteAccount and specify amount, since
// CloseVoteAccount requires RpcClient
let destination_account = solana_sdk::pubkey::new_rand(); // Send withdrawal to new account to make balance check easy
config_offline.signers = vec![&fee_payer_null_signer, &withdraw_authority];
config_offline.command = CliCommand::WithdrawFromVoteAccount {
vote_account_pubkey,
withdraw_authority: 1,
withdraw_amount: SpendAmount::Some(expected_balance),
destination_account_pubkey: destination_account,
sign_only: true,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
process_command(&config_offline).unwrap();
config_offline.output_format = OutputFormat::JsonCompact;
let sig_response = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
let offline_presigner = sign_only
.presigner_of(&config_offline.signers[1].pubkey())
.unwrap();
config_payer.signers = vec![&default_signer, &offline_presigner];
config_payer.command = CliCommand::WithdrawFromVoteAccount {
vote_account_pubkey,
withdraw_authority: 1,
withdraw_amount: SpendAmount::Some(expected_balance),
destination_account_pubkey: destination_account,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
process_command(&config_payer).unwrap();
check_balance!(0, &rpc_client, &vote_account_pubkey);
check_balance!(expected_balance, &rpc_client, &destination_account);
}

View File

@ -1,6 +1,6 @@
[package]
name = "solana-client-test"
version = "1.9.0"
version = "1.9.17"
description = "Solana RPC Test"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@ -8,26 +8,29 @@ license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-client-test"
edition = "2021"
publish = false
[dependencies]
serde_json = "1.0.72"
serial_test = "0.5.1"
solana-client = { path = "../client", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-merkle-tree = { path = "../merkle-tree", version = "=1.9.0" }
solana-metrics = { path = "../metrics", version = "=1.9.0" }
solana-perf = { path = "../perf", version = "=1.9.0" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.0" }
solana-rpc = { path = "../rpc", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-streamer = { path = "../streamer", version = "=1.9.0" }
solana-test-validator = { path = "../test-validator", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-client = { path = "../client", version = "=1.9.17" }
solana-ledger = { path = "../ledger", version = "=1.9.17" }
solana-measure = { path = "../measure", version = "=1.9.17" }
solana-merkle-tree = { path = "../merkle-tree", version = "=1.9.17" }
solana-metrics = { path = "../metrics", version = "=1.9.17" }
solana-perf = { path = "../perf", version = "=1.9.17" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.17" }
solana-rpc = { path = "../rpc", version = "=1.9.17" }
solana-runtime = { path = "../runtime", version = "=1.9.17" }
solana-sdk = { path = "../sdk", version = "=1.9.17" }
solana-streamer = { path = "../streamer", version = "=1.9.17" }
solana-test-validator = { path = "../test-validator", version = "=1.9.17" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.17" }
solana-version = { path = "../version", version = "=1.9.17" }
systemstat = "0.1.10"
[dev-dependencies]
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.17" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -4,11 +4,16 @@ use {
solana_client::{
pubsub_client::PubsubClient,
rpc_client::RpcClient,
rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig},
rpc_config::{
RpcAccountInfoConfig, RpcBlockSubscribeConfig, RpcBlockSubscribeFilter,
RpcProgramAccountsConfig,
},
rpc_response::SlotInfo,
},
solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path},
solana_rpc::{
optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank,
rpc::create_test_transactions_and_populate_blockstore,
rpc_pubsub_service::{PubSubConfig, PubSubService},
rpc_subscriptions::RpcSubscriptions,
},
@ -20,7 +25,7 @@ use {
},
solana_sdk::{
clock::Slot,
commitment_config::CommitmentConfig,
commitment_config::{CommitmentConfig, CommitmentLevel},
native_token::sol_to_lamports,
pubkey::Pubkey,
rpc_port,
@ -29,11 +34,14 @@ use {
},
solana_streamer::socket::SocketAddrSpace,
solana_test_validator::TestValidator,
solana_transaction_status::{
ConfirmedBlockWithOptionalMetadata, TransactionDetails, UiTransactionEncoding,
},
std::{
collections::HashSet,
net::{IpAddr, SocketAddr},
sync::{
atomic::{AtomicBool, Ordering},
atomic::{AtomicBool, AtomicU64, Ordering},
Arc, RwLock,
},
thread::sleep,
@ -119,9 +127,10 @@ fn test_account_subscription() {
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.write().unwrap().insert(bank1);
let bob = Keypair::new();
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
&exit,
max_complete_transaction_status_slot,
bank_forks.clone(),
Arc::new(RwLock::new(BlockCommitmentCache::default())),
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
@ -194,6 +203,105 @@ fn test_account_subscription() {
assert_eq!(errors, [].to_vec());
}
#[test]
#[serial]
fn test_block_subscription() {
// setup BankForks
let exit = Arc::new(AtomicBool::new(false));
let GenesisConfigInfo {
genesis_config,
mint_keypair: alice,
..
} = create_genesis_config(10_000);
let bank = Bank::new_for_tests(&genesis_config);
let rent_exempt_amount = bank.get_minimum_balance_for_rent_exemption(0);
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
// setup Blockstore
let ledger_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&ledger_path).unwrap();
let blockstore = Arc::new(blockstore);
// populate ledger with test txs
let bank = bank_forks.read().unwrap().working_bank();
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
let keypair3 = Keypair::new();
let max_complete_transaction_status_slot = Arc::new(AtomicU64::new(blockstore.max_root()));
bank.transfer(rent_exempt_amount, &alice, &keypair2.pubkey())
.unwrap();
let _confirmed_block_signatures = create_test_transactions_and_populate_blockstore(
vec![&alice, &keypair1, &keypair2, &keypair3],
0,
bank,
blockstore.clone(),
max_complete_transaction_status_slot,
);
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
// setup RpcSubscriptions && PubSubService
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests_with_blockstore(
&exit,
max_complete_transaction_status_slot,
blockstore.clone(),
bank_forks.clone(),
Arc::new(RwLock::new(BlockCommitmentCache::default())),
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
));
let pubsub_addr = SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)),
rpc_port::DEFAULT_RPC_PUBSUB_PORT,
);
let pub_cfg = PubSubConfig {
enable_block_subscription: true,
..PubSubConfig::default()
};
let (trigger, pubsub_service) = PubSubService::new(pub_cfg, &subscriptions, pubsub_addr);
std::thread::sleep(Duration::from_millis(400));
// setup PubsubClient
let (mut client, receiver) = PubsubClient::block_subscribe(
&format!("ws://0.0.0.0:{}/", pubsub_addr.port()),
RpcBlockSubscribeFilter::All,
Some(RpcBlockSubscribeConfig {
commitment: Some(CommitmentConfig {
commitment: CommitmentLevel::Confirmed,
}),
encoding: Some(UiTransactionEncoding::Json),
transaction_details: Some(TransactionDetails::Signatures),
show_rewards: None,
}),
)
.unwrap();
// trigger Gossip notification
let slot = bank_forks.read().unwrap().highest_slot();
subscriptions.notify_gossip_subscribers(slot);
let maybe_actual = receiver.recv_timeout(Duration::from_millis(400));
match maybe_actual {
Ok(actual) => {
let complete_block = blockstore.get_complete_block(slot, false).unwrap();
let block = ConfirmedBlockWithOptionalMetadata::from(complete_block).configure(
UiTransactionEncoding::Json,
TransactionDetails::Signatures,
false,
);
assert_eq!(actual.value.slot, slot);
assert!(block.eq(&actual.value.block.unwrap()));
}
Err(e) => {
eprintln!("unexpected websocket receive timeout");
assert_eq!(Some(e), None);
}
}
// cleanup
exit.store(true, Ordering::Relaxed);
trigger.cancel();
client.shutdown().unwrap();
pubsub_service.close().unwrap();
}
#[test]
#[serial]
fn test_program_subscription() {
@ -215,9 +323,10 @@ fn test_program_subscription() {
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.write().unwrap().insert(bank1);
let bob = Keypair::new();
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
&exit,
max_complete_transaction_status_slot,
bank_forks.clone(),
Arc::new(RwLock::new(BlockCommitmentCache::default())),
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
@ -300,9 +409,10 @@ fn test_root_subscription() {
let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone();
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.write().unwrap().insert(bank1);
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
&exit,
max_complete_transaction_status_slot,
bank_forks.clone(),
Arc::new(RwLock::new(BlockCommitmentCache::default())),
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
@ -350,8 +460,10 @@ fn test_slot_subscription() {
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
let optimistically_confirmed_bank =
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks);
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
&exit,
max_complete_transaction_status_slot,
bank_forks,
Arc::new(RwLock::new(BlockCommitmentCache::default())),
optimistically_confirmed_bank,

View File

@ -1,6 +1,6 @@
[package]
name = "solana-client"
version = "1.9.0"
version = "1.9.17"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@ -23,15 +23,15 @@ semver = "1.0.4"
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.72"
solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" }
solana-faucet = { path = "../faucet", version = "=1.9.0" }
solana-net-utils = { path = "../net-utils", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.0" }
solana-account-decoder = { path = "../account-decoder", version = "=1.9.17" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.17" }
solana-faucet = { path = "../faucet", version = "=1.9.17" }
solana-net-utils = { path = "../net-utils", version = "=1.9.17" }
solana-measure = { path = "../measure", version = "=1.9.17" }
solana-sdk = { path = "../sdk", version = "=1.9.17" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.17" }
solana-version = { path = "../version", version = "=1.9.17" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.17" }
thiserror = "1.0"
tokio = { version = "1", features = ["full"] }
tungstenite = { version = "0.16.0", features = ["rustls-tls-webpki-roots"] }
@ -40,7 +40,7 @@ url = "2.2.2"
[dev-dependencies]
assert_matches = "1.5.0"
jsonrpc-http-server = "18.0.0"
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.17" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -358,7 +358,7 @@ mod tests {
context: RpcResponseContext { slot: 1 },
value: json!(RpcFees {
blockhash: rpc_blockhash.to_string(),
fee_calculator: rpc_fee_calc.clone(),
fee_calculator: rpc_fee_calc,
last_valid_slot: 42,
last_valid_block_height: 42,
}),
@ -366,7 +366,7 @@ mod tests {
let get_fee_calculator_for_blockhash_response = json!(Response {
context: RpcResponseContext { slot: 1 },
value: json!(RpcFeeCalculator {
fee_calculator: rpc_fee_calc.clone()
fee_calculator: rpc_fee_calc
}),
});
let mut mocks = HashMap::new();
@ -376,7 +376,7 @@ mod tests {
BlockhashQuery::default()
.get_blockhash_and_fee_calculator(&rpc_client, CommitmentConfig::default())
.unwrap(),
(rpc_blockhash, rpc_fee_calc.clone()),
(rpc_blockhash, rpc_fee_calc),
);
let mut mocks = HashMap::new();
mocks.insert(RpcRequest::GetFees, get_recent_blockhash_response.clone());
@ -410,7 +410,7 @@ mod tests {
let data = nonce::state::Data {
authority: Pubkey::new(&[3u8; 32]),
blockhash: nonce_blockhash,
fee_calculator: nonce_fee_calc.clone(),
fee_calculator: nonce_fee_calc,
};
let nonce_account = Account::new_data_with_space(
42,
@ -439,7 +439,7 @@ mod tests {
BlockhashQuery::All(Source::NonceAccount(nonce_pubkey))
.get_blockhash_and_fee_calculator(&rpc_client, CommitmentConfig::default())
.unwrap(),
(nonce_blockhash, nonce_fee_calc.clone()),
(nonce_blockhash, nonce_fee_calc),
);
let mut mocks = HashMap::new();
mocks.insert(RpcRequest::GetAccountInfo, get_account_response.clone());

View File

@ -37,14 +37,14 @@ impl HttpSender {
///
/// The URL is an HTTP URL, usually for port 8899, as in
/// "http://localhost:8899". The sender has a default timeout of 30 seconds.
pub fn new(url: String) -> Self {
pub fn new<U: ToString>(url: U) -> Self {
Self::new_with_timeout(url, Duration::from_secs(30))
}
/// Create an HTTP RPC sender.
///
/// The URL is an HTTP URL, usually for port 8899.
pub fn new_with_timeout(url: String, timeout: Duration) -> Self {
pub fn new_with_timeout<U: ToString>(url: U, timeout: Duration) -> Self {
// `reqwest::blocking::Client` panics if run in a tokio async context. Shuttle the
// request to a different tokio thread to avoid this
let client = Arc::new(
@ -58,7 +58,7 @@ impl HttpSender {
Self {
client,
url,
url: url.to_string(),
request_id: AtomicU64::new(0),
stats: RwLock::new(RpcTransportStats::default()),
}

View File

@ -75,13 +75,13 @@ pub struct MockSender {
/// from [`RpcRequest`] to a JSON [`Value`] response, Any entries in this map
/// override the default behavior for the given request.
impl MockSender {
pub fn new(url: String) -> Self {
pub fn new<U: ToString>(url: U) -> Self {
Self::new_with_mocks(url, Mocks::default())
}
pub fn new_with_mocks(url: String, mocks: Mocks) -> Self {
pub fn new_with_mocks<U: ToString>(url: U, mocks: Mocks) -> Self {
Self {
url,
url: url.to_string(),
mocks: RwLock::new(mocks),
}
}

View File

@ -1,12 +1,13 @@
use {
crate::{
rpc_config::{
RpcAccountInfoConfig, RpcProgramAccountsConfig, RpcSignatureSubscribeConfig,
RpcTransactionLogsConfig, RpcTransactionLogsFilter,
RpcAccountInfoConfig, RpcBlockSubscribeConfig, RpcBlockSubscribeFilter,
RpcProgramAccountsConfig, RpcSignatureSubscribeConfig, RpcTransactionLogsConfig,
RpcTransactionLogsFilter,
},
rpc_response::{
Response as RpcResponse, RpcKeyedAccount, RpcLogsResponse, RpcSignatureResult,
SlotInfo, SlotUpdate,
Response as RpcResponse, RpcBlockUpdate, RpcKeyedAccount, RpcLogsResponse,
RpcSignatureResult, RpcVote, SlotInfo, SlotUpdate,
},
},
log::*,
@ -173,6 +174,12 @@ pub type SignatureSubscription = (
Receiver<RpcResponse<RpcSignatureResult>>,
);
pub type PubsubBlockClientSubscription = PubsubClientSubscription<RpcResponse<RpcBlockUpdate>>;
pub type BlockSubscription = (
PubsubBlockClientSubscription,
Receiver<RpcResponse<RpcBlockUpdate>>,
);
pub type PubsubProgramClientSubscription = PubsubClientSubscription<RpcResponse<RpcKeyedAccount>>;
pub type ProgramSubscription = (
PubsubProgramClientSubscription,
@ -185,6 +192,9 @@ pub type AccountSubscription = (
Receiver<RpcResponse<UiAccount>>,
);
pub type PubsubVoteClientSubscription = PubsubClientSubscription<RpcVote>;
pub type VoteSubscription = (PubsubVoteClientSubscription, Receiver<RpcVote>);
pub type PubsubRootClientSubscription = PubsubClientSubscription<Slot>;
pub type RootSubscription = (PubsubRootClientSubscription, Receiver<Slot>);
@ -266,6 +276,45 @@ impl PubsubClient {
Ok((result, receiver))
}
pub fn block_subscribe(
url: &str,
filter: RpcBlockSubscribeFilter,
config: Option<RpcBlockSubscribeConfig>,
) -> Result<BlockSubscription, PubsubClientError> {
let url = Url::parse(url)?;
let socket = connect_with_retry(url)?;
let (sender, receiver) = channel();
let socket = Arc::new(RwLock::new(socket));
let socket_clone = socket.clone();
let exit = Arc::new(AtomicBool::new(false));
let exit_clone = exit.clone();
let body = json!({
"jsonrpc":"2.0",
"id":1,
"method":"blockSubscribe",
"params":[filter, config]
})
.to_string();
let subscription_id = PubsubBlockClientSubscription::send_subscribe(&socket_clone, body)?;
let t_cleanup = std::thread::spawn(move || {
Self::cleanup_with_sender(exit_clone, &socket_clone, sender)
});
let result = PubsubClientSubscription {
message_type: PhantomData,
operation: "block",
socket,
subscription_id,
t_cleanup: Some(t_cleanup),
exit,
};
Ok((result, receiver))
}
pub fn logs_subscribe(
url: &str,
filter: RpcTransactionLogsFilter,
@ -346,6 +395,39 @@ impl PubsubClient {
Ok((result, receiver))
}
pub fn vote_subscribe(url: &str) -> Result<VoteSubscription, PubsubClientError> {
let url = Url::parse(url)?;
let socket = connect_with_retry(url)?;
let (sender, receiver) = channel();
let socket = Arc::new(RwLock::new(socket));
let socket_clone = socket.clone();
let exit = Arc::new(AtomicBool::new(false));
let exit_clone = exit.clone();
let body = json!({
"jsonrpc":"2.0",
"id":1,
"method":"voteSubscribe",
})
.to_string();
let subscription_id = PubsubVoteClientSubscription::send_subscribe(&socket_clone, body)?;
let t_cleanup = std::thread::spawn(move || {
Self::cleanup_with_sender(exit_clone, &socket_clone, sender)
});
let result = PubsubClientSubscription {
message_type: PhantomData,
operation: "vote",
socket,
subscription_id,
t_cleanup: Some(t_cleanup),
exit,
};
Ok((result, receiver))
}
pub fn root_subscribe(url: &str) -> Result<RootSubscription, PubsubClientError> {
let url = Url::parse(url)?;
let socket = connect_with_retry(url)?;

View File

@ -163,7 +163,7 @@ impl RpcClient {
/// `RpcSender`. Most applications should use one of the other constructors,
/// such as [`new`] and [`new_mock`], which create an `RpcClient`
/// encapsulating an [`HttpSender`] and [`MockSender`] respectively.
fn new_sender<T: RpcSender + Send + Sync + 'static>(
pub fn new_sender<T: RpcSender + Send + Sync + 'static>(
sender: T,
config: RpcClientConfig,
) -> Self {
@ -191,7 +191,7 @@ impl RpcClient {
/// let url = "http://localhost:8899".to_string();
/// let client = RpcClient::new(url);
/// ```
pub fn new(url: String) -> Self {
pub fn new<U: ToString>(url: U) -> Self {
Self::new_with_commitment(url, CommitmentConfig::default())
}
@ -214,7 +214,7 @@ impl RpcClient {
/// let commitment_config = CommitmentConfig::processed();
/// let client = RpcClient::new_with_commitment(url, commitment_config);
/// ```
pub fn new_with_commitment(url: String, commitment_config: CommitmentConfig) -> Self {
pub fn new_with_commitment<U: ToString>(url: U, commitment_config: CommitmentConfig) -> Self {
Self::new_sender(
HttpSender::new(url),
RpcClientConfig::with_commitment(commitment_config),
@ -240,7 +240,7 @@ impl RpcClient {
/// let timeout = Duration::from_secs(1);
/// let client = RpcClient::new_with_timeout(url, timeout);
/// ```
pub fn new_with_timeout(url: String, timeout: Duration) -> Self {
pub fn new_with_timeout<U: ToString>(url: U, timeout: Duration) -> Self {
Self::new_sender(
HttpSender::new_with_timeout(url, timeout),
RpcClientConfig::with_commitment(CommitmentConfig::default()),
@ -269,8 +269,8 @@ impl RpcClient {
/// commitment_config,
/// );
/// ```
pub fn new_with_timeout_and_commitment(
url: String,
pub fn new_with_timeout_and_commitment<U: ToString>(
url: U,
timeout: Duration,
commitment_config: CommitmentConfig,
) -> Self {
@ -312,8 +312,8 @@ impl RpcClient {
/// confirm_transaction_initial_timeout,
/// );
/// ```
pub fn new_with_timeouts_and_commitment(
url: String,
pub fn new_with_timeouts_and_commitment<U: ToString>(
url: U,
timeout: Duration,
commitment_config: CommitmentConfig,
confirm_transaction_initial_timeout: Duration,
@ -347,7 +347,7 @@ impl RpcClient {
/// let url = "fails".to_string();
/// let successful_client = RpcClient::new_mock(url);
/// ```
pub fn new_mock(url: String) -> Self {
pub fn new_mock<U: ToString>(url: U) -> Self {
Self::new_sender(
MockSender::new(url),
RpcClientConfig::with_commitment(CommitmentConfig::default()),
@ -381,7 +381,7 @@ impl RpcClient {
/// let url = "succeeds".to_string();
/// let client = RpcClient::new_mock_with_mocks(url, mocks);
/// ```
pub fn new_mock_with_mocks(url: String, mocks: Mocks) -> Self {
pub fn new_mock_with_mocks<U: ToString>(url: U, mocks: Mocks) -> Self {
Self::new_sender(
MockSender::new_with_mocks(url, mocks),
RpcClientConfig::with_commitment(CommitmentConfig::default()),
@ -1329,7 +1329,7 @@ impl RpcClient {
/// # Ok::<(), ClientError>(())
/// ```
pub fn get_highest_snapshot_slot(&self) -> ClientResult<RpcSnapshotSlotInfo> {
if self.get_node_version()? < semver::Version::new(1, 8, 0) {
if self.get_node_version()? < semver::Version::new(1, 9, 0) {
#[allow(deprecated)]
self.get_snapshot_slot().map(|full| RpcSnapshotSlotInfo {
full,
@ -4747,7 +4747,7 @@ impl RpcClient {
commitment: CommitmentConfig,
) -> ClientResult<(Hash, u64)> {
let (blockhash, last_valid_block_height) =
if self.get_node_version()? < semver::Version::new(1, 8, 0) {
if self.get_node_version()? < semver::Version::new(1, 9, 0) {
let Fees {
blockhash,
last_valid_block_height,
@ -4781,7 +4781,7 @@ impl RpcClient {
blockhash: &Hash,
commitment: CommitmentConfig,
) -> ClientResult<bool> {
let result = if self.get_node_version()? < semver::Version::new(1, 8, 0) {
let result = if self.get_node_version()? < semver::Version::new(1, 9, 0) {
self.get_fee_calculator_for_blockhash_with_commitment(blockhash, commitment)?
.value
.is_some()

View File

@ -182,6 +182,23 @@ pub struct RpcSignatureSubscribeConfig {
pub enable_received_notification: Option<bool>,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum RpcBlockSubscribeFilter {
All,
MentionsAccountOrProgram(String),
}
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcBlockSubscribeConfig {
#[serde(flatten)]
pub commitment: Option<CommitmentConfig>,
pub encoding: Option<UiTransactionEncoding>,
pub transaction_details: Option<TransactionDetails>,
pub show_rewards: Option<bool>,
}
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcSignaturesForAddressConfig {

View File

@ -9,9 +9,10 @@ use {
transaction::{Result, TransactionError},
},
solana_transaction_status::{
ConfirmedTransactionStatusWithSignature, TransactionConfirmationStatus,
ConfirmedTransactionStatusWithSignature, TransactionConfirmationStatus, UiConfirmedBlock,
},
std::{collections::HashMap, fmt, net::SocketAddr},
thiserror::Error,
};
pub type RpcResult<T> = client_error::Result<Response<T>>;
@ -289,6 +290,8 @@ pub struct RpcIdentity {
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcVote {
/// Vote account address, as base-58 encoded string
pub vote_pubkey: String,
pub slots: Vec<Slot>,
pub hash: String,
pub timestamp: Option<UnixTimestamp>,
@ -424,6 +427,20 @@ pub struct RpcInflationReward {
pub commission: Option<u8>, // Vote account commission when the reward was credited
}
#[derive(Clone, Deserialize, Serialize, Debug, Error, Eq, PartialEq)]
pub enum RpcBlockUpdateError {
#[error("block store error")]
BlockStoreError,
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcBlockUpdate {
pub slot: Slot,
pub block: Option<UiConfirmedBlock>,
pub err: Option<RpcBlockUpdateError>,
}
impl From<ConfirmedTransactionStatusWithSignature> for RpcConfirmedTransactionStatusWithSignature {
fn from(value: ConfirmedTransactionStatusWithSignature) -> Self {
let ConfirmedTransactionStatusWithSignature {

View File

@ -1,7 +1,7 @@
[package]
name = "solana-core"
description = "Blockchain, Rebuilt for Scale"
version = "1.9.0"
version = "1.9.17"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-core"
readme = "../README.md"
@ -21,12 +21,12 @@ bs58 = "0.4.0"
chrono = { version = "0.4.11", features = ["serde"] }
crossbeam-channel = "0.5"
dashmap = { version = "4.0.2", features = ["rayon", "raw-api"] }
etcd-client = { version = "0.8.1", features = ["tls"]}
etcd-client = { version = "0.9.0", features = ["tls"]}
fs_extra = "1.2.0"
histogram = "0.6.9"
itertools = "0.10.1"
log = "0.4.14"
lru = "0.7.0"
lru = "0.7.1"
rand = "0.7.0"
rand_chacha = "0.2.2"
raptorq = "1.6.4"
@ -34,30 +34,32 @@ rayon = "1.5.1"
retain_mut = "0.1.5"
serde = "1.0.130"
serde_derive = "1.0.103"
solana-accountsdb-plugin-manager = { path = "../accountsdb-plugin-manager", version = "=1.9.0" }
solana-client = { path = "../client", version = "=1.9.0" }
solana-entry = { path = "../entry", version = "=1.9.0" }
solana-gossip = { path = "../gossip", version = "=1.9.0" }
solana-ledger = { path = "../ledger", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-metrics = { path = "../metrics", version = "=1.9.0" }
solana-net-utils = { path = "../net-utils", version = "=1.9.0" }
solana-perf = { path = "../perf", version = "=1.9.0" }
solana-poh = { path = "../poh", version = "=1.9.0" }
solana-rpc = { path = "../rpc", version = "=1.9.0" }
solana-replica-lib = { path = "../replica-lib", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.0" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.0" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.0" }
solana-streamer = { path = "../streamer", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.0" }
solana-bloom = { path = "../bloom", version = "=1.9.17" }
solana-client = { path = "../client", version = "=1.9.17" }
solana-entry = { path = "../entry", version = "=1.9.17" }
solana-geyser-plugin-manager = { path = "../geyser-plugin-manager", version = "=1.9.17" }
solana-gossip = { path = "../gossip", version = "=1.9.17" }
solana-ledger = { path = "../ledger", version = "=1.9.17" }
solana-logger = { path = "../logger", version = "=1.9.17" }
solana-measure = { path = "../measure", version = "=1.9.17" }
solana-metrics = { path = "../metrics", version = "=1.9.17" }
solana-net-utils = { path = "../net-utils", version = "=1.9.17" }
solana-perf = { path = "../perf", version = "=1.9.17" }
solana-poh = { path = "../poh", version = "=1.9.17" }
solana-program-runtime = { path = "../program-runtime", version = "=1.9.17" }
solana-rpc = { path = "../rpc", version = "=1.9.17" }
solana-replica-lib = { path = "../replica-lib", version = "=1.9.17" }
solana-runtime = { path = "../runtime", version = "=1.9.17" }
solana-sdk = { path = "../sdk", version = "=1.9.17" }
solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.17" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.17" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.17" }
solana-streamer = { path = "../streamer", version = "=1.9.17" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.17" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.17" }
tempfile = "3.2.0"
thiserror = "1.0"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.0" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.17" }
sys-info = "0.9.1"
tokio = { version = "1", features = ["full"] }
trees = "0.4.2"
@ -71,9 +73,9 @@ matches = "0.1.9"
reqwest = { version = "0.11.6", default-features = false, features = ["blocking", "rustls-tls", "json"] }
serde_json = "1.0.72"
serial_test = "0.5.1"
solana-program-runtime = { path = "../program-runtime", version = "=1.9.0" }
solana-stake-program = { path = "../programs/stake", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-program-runtime = { path = "../program-runtime", version = "=1.9.17" }
solana-stake-program = { path = "../programs/stake", version = "=1.9.17" }
solana-version = { path = "../version", version = "=1.9.17" }
static_assertions = "1.1.0"
systemstat = "0.1.10"

View File

@ -10,6 +10,7 @@ use {
rayon::prelude::*,
solana_core::{
banking_stage::{BankingStage, BankingStageStats},
leader_slot_banking_stage_metrics::LeaderSlotMetricsTracker,
qos_service::QosService,
},
solana_entry::entry::{next_hash, Entry},
@ -20,7 +21,7 @@ use {
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
},
solana_perf::{packet::to_packets_chunked, test_tx::test_tx},
solana_perf::{packet::to_packet_batches, test_tx::test_tx},
solana_poh::poh_recorder::{create_test_recorder, WorkingBankEntry},
solana_runtime::{bank::Bank, cost_model::CostModel},
solana_sdk::{
@ -70,18 +71,18 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
let (exit, poh_recorder, poh_service, _signal_receiver) =
create_test_recorder(&bank, &blockstore, None);
create_test_recorder(&bank, &blockstore, None, None);
let recorder = poh_recorder.lock().unwrap().recorder();
let tx = test_tx();
let len = 4096;
let chunk_size = 1024;
let batches = to_packets_chunked(&vec![tx; len], chunk_size);
let mut packets = VecDeque::new();
let batches = to_packet_batches(&vec![tx; len], chunk_size);
let mut packet_batches = VecDeque::new();
for batch in batches {
let batch_len = batch.packets.len();
packets.push_back((batch, vec![0usize; batch_len], false));
packet_batches.push_back((batch, vec![0usize; batch_len], false));
}
let (s, _r) = unbounded();
// This tests the performance of buffering packets.
@ -91,13 +92,14 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
&my_pubkey,
std::u128::MAX,
&poh_recorder,
&mut packets,
&mut packet_batches,
None,
&s,
None::<Box<dyn Fn()>>,
&BankingStageStats::default(),
&recorder,
&Arc::new(QosService::new(Arc::new(RwLock::new(CostModel::default())))),
&mut LeaderSlotMetricsTracker::new(0),
);
});
@ -174,7 +176,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
// set cost tracker limits to MAX so it will not filter out TXs
bank.write_cost_tracker()
.unwrap()
.set_limits(std::u64::MAX, std::u64::MAX);
.set_limits(std::u64::MAX, std::u64::MAX, std::u64::MAX);
debug!("threads: {} txs: {}", num_threads, txes);
@ -206,14 +208,14 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
assert!(r.is_ok(), "sanity parallel execution");
}
bank.clear_signatures();
let verified: Vec<_> = to_packets_chunked(&transactions, PACKETS_PER_BATCH);
let verified: Vec<_> = to_packet_batches(&transactions, PACKETS_PER_BATCH);
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
let (exit, poh_recorder, poh_service, signal_receiver) =
create_test_recorder(&bank, &blockstore, None);
create_test_recorder(&bank, &blockstore, None, None);
let cluster_info = ClusterInfo::new(
Node::new_localhost().info,
Arc::new(Keypair::new()),

View File

@ -100,7 +100,11 @@ fn bench_retransmitter(bencher: &mut Bencher) {
let slot = 0;
let parent = 0;
let shredder = Shredder::new(slot, parent, 0, 0).unwrap();
let mut data_shreds = shredder.entries_to_shreds(&keypair, &entries, true, 0).0;
let (mut data_shreds, _) = shredder.entries_to_shreds(
&keypair, &entries, true, // is_last_in_slot
0, // next_shred_index
0, // next_code_index
);
let num_packets = data_shreds.len();

View File

@ -40,16 +40,14 @@ fn make_shreds(num_shreds: usize) -> Vec<Shred> {
);
let entries = make_large_unchained_entries(txs_per_entry, num_entries);
let shredder = Shredder::new(1, 0, 0, 0).unwrap();
let data_shreds = shredder
.entries_to_data_shreds(
&Keypair::new(),
&entries,
true, // is_last_in_slot
0, // next_shred_index
0, // fec_set_offset
&mut ProcessShredsStats::default(),
)
.0;
let data_shreds = shredder.entries_to_data_shreds(
&Keypair::new(),
&entries,
true, // is_last_in_slot
0, // next_shred_index
0, // fec_set_offset
&mut ProcessShredsStats::default(),
);
assert!(data_shreds.len() >= num_shreds);
data_shreds
}
@ -76,7 +74,7 @@ fn bench_shredder_ticks(bencher: &mut Bencher) {
let entries = create_ticks(num_ticks, 0, Hash::default());
bencher.iter(|| {
let shredder = Shredder::new(1, 0, 0, 0).unwrap();
shredder.entries_to_shreds(&kp, &entries, true, 0);
shredder.entries_to_shreds(&kp, &entries, true, 0, 0);
})
}
@ -95,7 +93,7 @@ fn bench_shredder_large_entries(bencher: &mut Bencher) {
// 1Mb
bencher.iter(|| {
let shredder = Shredder::new(1, 0, 0, 0).unwrap();
shredder.entries_to_shreds(&kp, &entries, true, 0);
shredder.entries_to_shreds(&kp, &entries, true, 0, 0);
})
}
@ -108,7 +106,7 @@ fn bench_deshredder(bencher: &mut Bencher) {
let num_ticks = max_ticks_per_n_shreds(1, Some(shred_size)) * num_shreds as u64;
let entries = create_ticks(num_ticks, 0, Hash::default());
let shredder = Shredder::new(1, 0, 0, 0).unwrap();
let data_shreds = shredder.entries_to_shreds(&kp, &entries, true, 0).0;
let (data_shreds, _) = shredder.entries_to_shreds(&kp, &entries, true, 0, 0);
bencher.iter(|| {
let raw = &mut Shredder::deshred(&data_shreds).unwrap();
assert_ne!(raw.len(), 0);
@ -135,6 +133,7 @@ fn bench_shredder_coding(bencher: &mut Bencher) {
Shredder::generate_coding_shreds(
&data_shreds[..symbol_count],
true, // is_last_in_slot
0, // next_code_index
)
.len();
})
@ -147,6 +146,7 @@ fn bench_shredder_decoding(bencher: &mut Bencher) {
let coding_shreds = Shredder::generate_coding_shreds(
&data_shreds[..symbol_count],
true, // is_last_in_slot
0, // next_code_index
);
bencher.iter(|| {
Shredder::try_recovery(coding_shreds[..].to_vec()).unwrap();

Some files were not shown because too many files have changed in this diff Show More