Compare commits

...

92 Commits

Author SHA1 Message Date
mergify[bot]
3ac7e043a7 Update InvalidRentPayingAccount error (#23680) (#23693)
(cherry picked from commit 410463fb72)

Co-authored-by: Tyera Eulberg <tyera@solana.com>
2022-03-16 05:25:23 +00:00
mergify[bot]
db43c5d46d log hash and lamport result of calculate_accounts_hash_without_index (#23425) (#23676)
(cherry picked from commit d909b7c80b)

Co-authored-by: Jeff Washington (jwash) <wash678@gmail.com>
2022-03-15 19:23:13 +00:00
mergify[bot]
48dc4b3bb2 ledger tool halt at slot verify hash (#23424) (#23677)
(cherry picked from commit ef8b7d9c62)

Co-authored-by: Jeff Washington (jwash) <wash678@gmail.com>
2022-03-15 16:17:29 +00:00
mergify[bot]
b0543a1ae6 docs: update sysvar docs for load_instruction_at_checked (#22925) (#23673)
* docs: update sysvar docs for load_instruction_at_checked

Update the instruction introspection docs to use the updated load_instruction_at_checked function instead of deprecated load_instruction_at

* Update to load_current_index_checked

(cherry picked from commit 64e2d9dc47)

Co-authored-by: Zayyan Faizal <zayyanf@gmail.com>
2022-03-15 12:51:15 +00:00
mergify[bot]
8b0576d954 Rename AccountsDb plugins to Geyser plugins (backport #23604) (#23668)
* Rename AccountsDb plugins to Geyser plugins (#23604)

(cherry picked from commit 102dd68a03)

# Conflicts:
#	Cargo.lock
#	Cargo.toml
#	core/Cargo.toml
#	core/src/replay_stage.rs
#	core/src/tvu.rs
#	geyser-plugin-interface/Cargo.toml
#	geyser-plugin-manager/Cargo.toml
#	geyser-plugin-manager/src/geyser_plugin_service.rs
#	geyser-plugin-manager/src/slot_status_notifier.rs
#	validator/src/bin/solana-test-validator.rs
#	validator/src/main.rs

* Fix conflicts

Co-authored-by: Tyera Eulberg <tyera@solana.com>
2022-03-15 05:04:17 +00:00
mergify[bot]
5c3e1967e6 Add space for keys in calculation for rent exempt in process_set_validator_info (#23485) (#23658)
* Fix `process_set_validator_info`

Add space for keys in calculation for rent exempt in
`process_set_validator_info`.

The space required for allocating the `(ConfigKeys, ValidatorInfo)`
tuple only considered space for `ValidatorInfo`.
But `config_instruction::create_account` also requires space for `n`
keys.

* Remove one clone call from closure

(cherry picked from commit 7eaec26a1c)

Co-authored-by: Enrique Fynn <me@enriquefynn.com>
2022-03-14 23:18:14 +00:00
Michael Vines
06ebed861e Bump publish crate timeout 2022-03-13 21:01:15 -07:00
Michael Vines
52bd1658cc Disable publish of test crates 2022-03-13 20:48:17 -07:00
Michael Vines
07a6b597d0 Update Cargo.toml 2022-03-13 17:22:01 -07:00
Michael Vines
857a541ddb Pin histogram crate version to fix cargo publish
(cherry picked from commit 01b6f97f0b)

# Conflicts:
#	ledger-tool/Cargo.toml
2022-03-13 17:22:01 -07:00
Tyera Eulberg
87fee49ed7 Add token-balance unit test 2022-03-11 20:58:00 -07:00
Will Hickey
3ed915dcc9 Bump version to 1.9.13 (#23614) 2022-03-11 10:09:26 -06:00
Trent Nelson
3875bc91ab Revert "chore: bump dashmap from 4.0.2 to 5.1.0 (#23372) (#23521)" (#23596)
This reverts commit f56b25ac29.
2022-03-11 09:12:46 +01:00
mergify[bot]
e0f5fb887b Ensure blocks do not exceed the max accounts data size during Replay Stage (backport #23422) (#23589)
* Ensure blocks do not exceed the max accounts data size during Replay Stage (#23422)

(cherry picked from commit 3c6840050c)

# Conflicts:
#	runtime/src/bank.rs

* fix conflicts

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-03-10 18:59:46 +00:00
mergify[bot]
49952e05cf Report even if slot begins and ends in process_buffered_packets() (#23549) (#23576)
(cherry picked from commit 588414a776)

Co-authored-by: carllin <carl@solana.com>
2022-03-10 07:59:26 +00:00
Will Hickey
4a100fbe3b Bump version to 1.9.12 (#23577) 2022-03-09 23:22:45 -06:00
Tao Zhu
607a98e9d0 remove persist_cost_table code 2022-03-09 21:06:00 -07:00
Tao Zhu
86a97563b5 Patch validator from loading persisted program costs 2022-03-09 21:06:00 -07:00
mergify[bot]
4e5d9885da Revert exponential moving average cost model changes (backport #23541) (#23543)
* Revert "fix tests after merge"

This reverts commit ba2d83f580.

(cherry picked from commit 0a17edcc1f)

* Revert "1. Persist to blockstore less frequently;"

This reverts commit 7aa1fb4e24.

(cherry picked from commit c878c9e2cb)

# Conflicts:
#	core/src/cost_update_service.rs
#	core/src/tvu.rs
#	runtime/src/cost_model.rs

* Revert "use EMA in place of Welford"

This reverts commit 6587dbfa47.

(cherry picked from commit 9acbfa5eb1)

* Revert "- estimate a program cost as 2 standard deviation above mean"

This reverts commit a25ac1c988.

(cherry picked from commit 5a0cd05866)

# Conflicts:
#	core/src/cost_update_service.rs
#	runtime/src/cost_model.rs

* fix merge conflicts

Co-authored-by: Carl Lin <carl@solana.com>
Co-authored-by: Tao Zhu <tao@solana.com>
2022-03-09 03:55:36 +00:00
mergify[bot]
714cf0eff2 solana-validator set-identity no longer writes a tower file unnecessarily (#23542)
(cherry picked from commit b719d6a2ad)

Co-authored-by: Michael Vines <mvines@gmail.com>
2022-03-09 01:55:14 +00:00
mergify[bot]
babba3b0ff Fix getting the golden snapshot hashes during bootstrap with Incremental Snapshots (#23518) (#23532)
(cherry picked from commit 9b80452c7c)

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-03-08 23:07:26 +00:00
Michael Vines
8c59fa5a73 Update regex to v1.5.5
(cherry picked from commit 536a99705b)
2022-03-08 11:53:30 -08:00
Stephen Akridge
de694402ca Bump version to 1.9.11 2022-03-08 10:53:04 -08:00
mergify[bot]
f56b25ac29 chore: bump dashmap from 4.0.2 to 5.1.0 (#23372) (#23521)
* chore: bump dashmap from 4.0.2 to 5.1.0

Bumps [dashmap](https://github.com/xacrimon/dashmap) from 4.0.2 to 5.1.0.
- [Release notes](https://github.com/xacrimon/dashmap/releases)
- [Commits](https://github.com/xacrimon/dashmap/commits/v5.1.0)

---
updated-dependencies:
- dependency-name: dashmap
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
(cherry picked from commit 3a0271c113)

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-03-08 14:05:58 +00:00
mergify[bot]
2137008532 Fix broken doc links in solana-runtime (#23504) (#23515)
(cherry picked from commit ddbf5c782f)

Co-authored-by: Brian Anderson <andersrb@gmail.com>
2022-03-08 01:02:27 +00:00
mergify[bot]
4a4e560299 Fix incorrect nonoverlapping test in sol_memcpy (backport #21007) (#23512)
* Fix incorrect nonoverlapping test in sol_memcpy (#21007)

Thanks!

(cherry picked from commit df2b448993)

# Conflicts:
#	programs/bpf_loader/src/syscalls.rs
#	sdk/program/src/program_stubs.rs
#	sdk/src/feature_set.rs

* resolve conflicts

Co-authored-by: Brian Anderson <andersrb@gmail.com>
Co-authored-by: Jack May <jack@solana.com>
2022-03-07 23:10:54 +00:00
Tyera Eulberg
66f85a0703 Fix backport 2022-03-03 15:12:47 -07:00
Jack May
3bee925967 Resized accounts must be rent exempt 2022-03-03 10:40:46 -08:00
mergify[bot]
44109c0cd4 docs: post merge review for #23286 (#23320)
(cherry picked from commit 6a0d2fcfa7)

Co-authored-by: Trent Nelson <trent@solana.com>
2022-03-02 20:11:03 -07:00
mergify[bot]
83281fe3ff Adds comments related to the public RPC endpoints (#22797) (#23451)
* Adds comments related to the public RPC endpoints

* Update docs/src/cluster/rpc-endpoints.md

Co-authored-by: Michael Vines <mvines@gmail.com>

Co-authored-by: Brian Long <bl@triton.one>
Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Michael Vines <mvines@gmail.com>
(cherry picked from commit 7dbde2247d)

Co-authored-by: Brian Long <brian.long@fmadata.com>
2022-03-02 19:40:05 +00:00
Michael Vines
2dd5b76986 Restore solana-test-validator informational output
(cherry picked from commit 9ec514f6c5)
2022-03-02 18:53:33 +01:00
mergify[bot]
0e6a849fc7 bumps up crds-shards-bits (#23220) (#23412)
The commit adjust CRDS_SHARDS_BITS up to be in-line with mask_bits in
gossip pull request. This will avoid redundant filtering of irrelevant
crds entries when responding to pull requests.

(cherry picked from commit 1282277126)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2022-03-01 18:20:26 +00:00
mergify[bot]
db9826c93f Improve UX querying rpc for blocks at or before the snapshot from boot (backport #23403) (#23405)
* Improve UX querying rpc for blocks at or before the snapshot from boot (#23403)

* Bump first-available block to first complete block

* Remove obsolete purges in tests (PrimaryIndex toggling no longer in use

* Check first-available block in Rpc check_slot_cleaned_up

(cherry picked from commit 3b5b71ce44)

# Conflicts:
#	ledger/src/blockstore.rs

* Fix conflicts

Co-authored-by: Tyera Eulberg <tyera@solana.com>
2022-03-01 17:13:54 +00:00
mergify[bot]
46a02b7b4a Allow sub-rent-exempt-minimum transfers to 1nc1nerator (backport #23382) (#23404)
* Allow sub-rent-exempt-minimum transfers to `1nc1nerator` (#23382)

* Add failing test

* Allow small burns to incinerator

* Use check_id method

(cherry picked from commit 19448ba078)

# Conflicts:
#	runtime/src/account_rent_state.rs

* Fix conflicts

Co-authored-by: Tyera Eulberg <tyera@solana.com>
2022-03-01 17:12:47 +00:00
Trent Nelson
83fb17c77b client: expose creating RpcClients with custom RpcSender impls
(cherry picked from commit 6666f23c01)

# Conflicts:
#	client/src/rpc_sender.rs
2022-03-01 08:52:32 -07:00
mergify[bot]
e6f6a8a1b4 docs: resolve svgbob binary (#23399)
(cherry picked from commit 5877e38baa)

Co-authored-by: Trent Nelson <trent@solana.com>
2022-03-01 00:58:29 +00:00
mergify[bot]
6b71570f48 ci: move all formerly-default-queue jobs to solana queue (#23388)
(cherry picked from commit f814c4a082)

# Conflicts:
#	ci/buildkite-pipeline.sh

Co-authored-by: Trent Nelson <trent@solana.com>
2022-02-28 22:28:05 +00:00
mergify[bot]
34631669a4 Updating known validators in the docs for testnet (#23379)
(cherry picked from commit 22d2a40133)

Co-authored-by: tigarcia <tim.m.garcia@gmail.com>
2022-02-28 17:23:29 +00:00
Michael Vines
a400fd4558 Align the solana validators output columns for -ud,-ut, and -um
(cherry picked from commit 87b76aeeb0)
2022-02-27 18:49:15 -08:00
mergify[bot]
f42f094dd0 Prevent new RentPaying state created by paying fees (backport #23358) (#23360)
* Prevent new RentPaying state created by paying fees (#23358)

* Add failing test

* Check fee-payer rent-state change on load

* Add more test cases

* Review comments

(cherry picked from commit 36484f4f08)

# Conflicts:
#	runtime/src/account_rent_state.rs

* Fix conflicts

Co-authored-by: Tyera Eulberg <tyera@solana.com>
2022-02-26 21:37:06 +00:00
mergify[bot]
ef9ffffcaa followup safety checks for #23295 (#23340)
(cherry picked from commit 5e0086c1ee)

# Conflicts:
#	runtime/src/builtins.rs

Co-authored-by: Trent Nelson <trent@solana.com>
2022-02-25 08:58:25 +00:00
mergify[bot]
61fea1d2a7 Update derivation path integer sign specification (#23336) (#23339)
Previously, `ACCOUNT` and `CHANGE` were specified as being positive integers, but since both can assume a value of 0 (as in the given example), they should be specified as nonnegative integers

(cherry picked from commit d1f141484e)

Co-authored-by: alnoki <43892045+alnoki@users.noreply.github.com>
2022-02-24 20:11:12 -07:00
Dmitri Makarov
023ab1c427 Update the consumed compute units cost for hashing syscalls
This change prevents zero-cost computation of hash functions on
unbound number of zero-length slices of data.  The cost for each slice
is at least equal to the base cost of a memory operation, but could be
more for longer slices.

(cherry picked from commit 0a3a18744f)
2022-02-24 17:41:22 -08:00
mergify[bot]
f4f0b64af4 Hack fix for ICE as seen in CI (#23306)
(cherry picked from commit c81dd602c4)

Co-authored-by: Trent Nelson <trent@solana.com>
2022-02-24 01:23:34 +00:00
Will Hickey
84c57dd0a8 Bump version to v1.9.10 (#23304) 2022-02-23 11:30:21 -06:00
Justin Starry
450404f800 v1.9: Enforce tx metadata upload to bigtable (#23212)
* Enforce tx metadata upload with static types (#23028)

* resolve conflicts

* fix test
2022-02-23 12:03:17 +08:00
mergify[bot]
8413700a2f docs: clarify spl token account creation handling for exchange integrations (#23288)
(cherry picked from commit 09d064c090)

Co-authored-by: Trent Nelson <trent@solana.com>
2022-02-23 00:47:54 +00:00
mergify[bot]
215c708599 Fix builtin handling on epoch boundaries (backport #23256) (#23273)
* Fix builtin handling on epoch boundaries (#23256)

(cherry picked from commit bcda74f42f)

# Conflicts:
#	runtime/src/bank.rs
#	runtime/src/genesis_utils.rs

* fix conflicts

Co-authored-by: Justin Starry <justin@solana.com>
2022-02-22 16:15:10 +00:00
mergify[bot]
c02c73fa5f Bump rbpf to v0.2.24 (#23263) (#23266)
(cherry picked from commit d0d256ee9a)

Co-authored-by: Alexander Meißner <AlexanderMeissner@gmx.net>
2022-02-22 11:46:05 +00:00
Tyera Eulberg
68d846c7a9 Revert "Add simulation detection countermeasure (backport #22880) (#23143)" (#23262)
This reverts commit 0fdbec9735.
2022-02-21 13:38:40 -07:00
Will Hickey
08d6b9850d Bump version to 1.9.9 (#23247) 2022-02-19 21:17:11 -06:00
mergify[bot]
4ebeb33602 Skip adding builtins if they will be removed (backport #23233) (#23241)
* Skip adding builtins if they will be removed (#23233)

* Add failing test for precompile transition

* Skip adding builtins if they will be removed

* cargo clean

* nits

* fix abi check

* remove workaround

Co-authored-by: Jack May <jack@solana.com>
(cherry picked from commit 1719d2349f)

# Conflicts:
#	runtime/src/bank.rs
#	runtime/src/builtins.rs

* resolve conflicts

Co-authored-by: Justin Starry <justin@solana.com>
Co-authored-by: Jack May <jack@solana.com>
2022-02-19 08:28:30 +00:00
mergify[bot]
1f4ad0d1e8 Precompiles owned by the native loader (#23237) (#23240)
(cherry picked from commit 970f543ef6)

Co-authored-by: Jack May <jack@solana.com>
2022-02-19 02:45:15 +00:00
mergify[bot]
b2b92d7f5c Add --locked to spl-token-cli install (#23223) (#23225)
(cherry picked from commit c696944d36)

Co-authored-by: Will Hickey <will.hickey@solana.com>
2022-02-18 05:12:51 +00:00
mergify[bot]
02f8651a9c Fix the flaky test test_restart_tower_rollback (backport #23129) (#23155)
* Fix the flaky test test_restart_tower_rollback (#23129)

* Add flag to disable voting until a slot to avoid duplicate voting

* Fix the tower rollback test and remove it from flaky.

(cherry picked from commit ab92578b02)

* Resolve conflicts

Co-authored-by: Ashwin Sekar <ashwin@solana.com>
2022-02-17 20:31:27 +00:00
mergify[bot]
0fdbec9735 Add simulation detection countermeasure (backport #22880) (#23143)
* Add simulation detection countermeasure (#22880)

* Add simulation detection countermeasures

* Add program and test using TestValidator

* Remove incinerator deposit

* Remove incinerator

* Update Cargo.lock

* Add more features to simulation bank

* Update Cargo.lock per rebase

Co-authored-by: Jon Cinque <jon.cinque@gmail.com>
(cherry picked from commit c42b80f099)

# Conflicts:
#	programs/bpf/Cargo.lock
#	programs/bpf/Cargo.toml

* Update Cargo.lock

Co-authored-by: Michael Vines <mvines@gmail.com>
Co-authored-by: Jon Cinque <jon.cinque@gmail.com>
2022-02-17 14:45:24 +00:00
mergify[bot]
f629c71849 Add slot-based timing metrics (backport #23097) (#23210)
* Add execute timings (#23097)

(cherry picked from commit 619335df1a)

# Conflicts:
#	core/src/banking_stage.rs

* resolve conflicts

Co-authored-by: carllin <carl@solana.com>
2022-02-17 10:57:50 +00:00
mergify[bot]
43e562142f docs: remove wallet ads (#23208)
(cherry picked from commit fa680a35ea)

Co-authored-by: Trent Nelson <trent@solana.com>
2022-02-17 05:54:08 +00:00
Trent Nelson
c3098e99d1 Bump version to v1.9.8 2022-02-16 21:42:57 -07:00
mergify[bot]
421ad42b12 Fix flaky optimistic confirmation tests (backport #23178) (#23200)
* Fix flaky optimistic confirmation tests (#23178)

(cherry picked from commit bca1d51735)

# Conflicts:
#	local-cluster/tests/local_cluster.rs
#	local-cluster/tests/local_cluster_flakey.rs

* Resolve conflicts

Co-authored-by: carllin <carl@solana.com>
2022-02-17 03:18:24 +00:00
mergify[bot]
08cc140d4a accounts_index: Add SPL Token account indexing for token-2022 accounts (#23043) (#23203)
(cherry picked from commit a102453bae)

Co-authored-by: Michael Vines <mvines@gmail.com>
2022-02-17 02:20:38 +00:00
mergify[bot]
2120ef5808 Fix ed25519 builtin program handling (backport #23182) (#23195)
* Fix ed25519 builtin program handling (#23182)

* Fix ed25519 builtin program handling

* Fix tests

* Add integration tests for processing transactions with ed25519 ixs

* Fix another test

* fix formatting

(cherry picked from commit 813725dfec)

* fix tests

Co-authored-by: Justin Starry <justin@solana.com>
Co-authored-by: Jack May <jack@solana.com>
2022-02-17 00:44:44 +00:00
Lijun Wang
c08af09aaa Removed solana-accountsdb-plugin-postgres from the monorepo as it has its own (#22567) (#23202)
Removed solana-accountsdb-plugin-postgres from the monorepo as it has its own  standalone repo now
2022-02-16 15:52:59 -08:00
mergify[bot]
8b12749f02 forward_buffered_packets return packet count in error path (#23167) (#23187)
(cherry picked from commit 115d71536b)

Co-authored-by: Jeff Biseda <jbiseda@gmail.com>
2022-02-16 13:01:16 -08:00
mergify[bot]
e343a17ce9 Update ping to transfer to self, with rotating amount (#22657) (#22675)
* Update ping to transfer to self, with rotating amount

* Remove balance check

(cherry picked from commit 90689585ef)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2022-02-16 12:52:28 -07:00
mergify[bot]
3fd78ac6ea shrink batches when over 80% of the space is wasted (backport #23066) (#23189)
* shrink batches when over 80% of the space is wasted (#23066)

* shrink batches when over 80% of the space is wasted

(cherry picked from commit 83d31c9e65)

# Conflicts:
#	core/benches/sigverify_stage.rs
#	core/src/sigverify_stage.rs
#	perf/src/sigverify.rs

* fixup!

Co-authored-by: anatoly yakovenko <anatoly@solana.com>
2022-02-16 19:30:16 +00:00
mergify[bot]
41bbc11a46 flag end-of-slot when poh bank is gone (backport #23069) (#23174)
* flag end-of-slot when poh bank is gone

(cherry picked from commit 03bf66a51b)

# Conflicts:
#	core/src/banking_stage.rs

* merge fix

Co-authored-by: Tao Zhu <tao@solana.com>
2022-02-16 18:08:03 +00:00
mergify[bot]
68934353f2 Typo fix (#23152) (#23153)
Fixed a type in the documentation.

(cherry picked from commit bb50259956)

Co-authored-by: Jerry <jerskisnow@protonmail.com>
2022-02-16 10:53:30 -07:00
mergify[bot]
92543a3f92 fix typo in docs (#22690) (#22691)
(cherry picked from commit 2b111cd631)

Co-authored-by: Steve James <0x2t1ff@gmail.com>
2022-02-16 10:50:45 -07:00
mergify[bot]
a514aff819 Update jsonrpc-api.md (#23190) (#23192)
(cherry picked from commit aaf657297f)

Co-authored-by: gagliardetto <gagliardetto@users.noreply.github.com>
2022-02-16 17:24:23 +00:00
mergify[bot]
8d8525e4fc Allow cli users to authorize Staker signed by Withdrawer (#23146) (#23176)
(cherry picked from commit 88b66ae3a8)

Co-authored-by: Tyera Eulberg <tyera@solana.com>
2022-02-16 10:16:55 -07:00
Trent Nelson
2c1cec4e2c validator: invert vote account sanity check arg 2022-02-16 08:31:43 +00:00
Trent Nelson
7d0a0a26bb rpc: genericize client constructors 2022-02-16 08:31:43 +00:00
Trent Nelson
73016d3ed2 rpc: make getGenesisHash part of minimal api 2022-02-16 08:31:43 +00:00
Trent Nelson
9b1cb5c1b7 test-validator: use JsonRpcConfig::default_for_test for consistency 2022-02-16 08:31:43 +00:00
Michael Vines
94f4748a34 Generate full snapshots 4x faster to keep incremental snapshots nice and small
(cherry picked from commit 577fa4ec0c)
2022-02-15 21:22:23 -08:00
Michael Vines
8963724ed6 solana-validator set-identity now supports the --require-tower flag 2022-02-15 21:09:44 -08:00
Tyera Eulberg
65df58c64a Update deprecated methods and recommend getBlocksWithLimit (#23127)
(cherry picked from commit d2a407a9a7)
2022-02-15 18:01:37 -08:00
Michael Vines
380c5da2d0 Add --skip-new-snapshot-check to exit and wait-for-restart-window subcommands
(cherry picked from commit 527f62c744)
2022-02-15 18:01:05 -08:00
mergify[bot]
7d488a6ed8 Remove references to instruction parsing in SPL Token -> Depositing section (#23161) (#23163)
(cherry picked from commit 917113914d)

Co-authored-by: Tyera Eulberg <tyera@solana.com>
2022-02-16 00:12:10 +00:00
mergify[bot]
159cfdae25 solana-validator monitor now reports identity changes (#23156)
(cherry picked from commit b44f40ee3a)

Co-authored-by: Michael Vines <mvines@gmail.com>
2022-02-15 22:48:17 +00:00
Michael Vines
1c3d09ed21 solana-validator wait-for-restart-window --min-idle-time X now works 2022-02-15 08:58:53 -08:00
mergify[bot]
2c8cfdb3f3 Add fees to tx-wide caps (backport #22081) (#23095)
* Add fees to tx-wide caps (#22081)

(cherry picked from commit 3d9874b95a)

# Conflicts:
#	runtime/src/bank.rs

* resolve

Co-authored-by: Jack May <jack@solana.com>
2022-02-15 01:36:02 +00:00
mergify[bot]
85570ac207 Add error message for readlink -f failure (#23102) (#23121)
* Add error message for readlink -f failure

(cherry picked from commit 89f5145f64)

Co-authored-by: Will Hickey <will.hickey@solana.com>
2022-02-14 20:23:26 +00:00
mergify[bot]
054b95cbe1 docs: fix broken link for "transaction-id" (#22682) (#22683)
(cherry picked from commit a300e2d2dc)

Co-authored-by: Radu Pașparugă <radupasparuga25@gmail.com>
2022-02-14 22:21:29 +08:00
mergify[bot]
b67a5bb3b9 fix typo (#23107) (#23108)
(cherry picked from commit 22a2a4252a)

Co-authored-by: thepalmtrees <96289385+thepalmtrees@users.noreply.github.com>
2022-02-13 16:15:53 +00:00
mergify[bot]
3e3fb4e296 Introduce slot-specific packet metrics (backport #22906) (#23077)
* Introduce slot-specific packet metrics (#22906)

(cherry picked from commit 2f9e30a1f7)

# Conflicts:
#	core/benches/banking_stage.rs
#	core/src/banking_stage.rs
#	core/src/qos_service.rs

* Resolve conflicdts

Co-authored-by: carllin <carl@solana.com>
2022-02-13 05:44:38 +00:00
Michael Vines
f66d8551e9 Update minimum port range due to addition of QUIC port 2022-02-12 08:49:48 -08:00
mergify[bot]
a5cb10666c Bump QUIC_PORT_OFFSET to 6 to avoid jostling around other ports (#23096)
(cherry picked from commit 817f47d970)

Co-authored-by: Michael Vines <mvines@gmail.com>
2022-02-12 02:36:04 +00:00
mergify[bot]
76384758d8 adds validator version to set_panic_hook (#23082) (#23088)
(cherry picked from commit 78089941ff)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2022-02-12 02:11:14 +00:00
mergify[bot]
4eca26ae50 Document message APIs (backport #22873) (#23091)
* Document message APIs (#22873)

* Document message APIs

* Ignore clippy

* Update sdk/program/src/message/mod.rs

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

* Fix new_with_blockhash example

* Rename nonce_account_address in example

Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>
(cherry picked from commit f7753ce85f)

# Conflicts:
#	sdk/program/src/message/mod.rs

* Fix conflict

Co-authored-by: Brian Anderson <andersrb@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2022-02-11 21:09:14 +00:00
Will Hickey
2d144afec5 Bump version to 1.9.6 (#23092) 2022-02-11 15:00:06 -06:00
298 changed files with 8680 additions and 8248 deletions

564
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,16 +1,14 @@
[workspace]
members = [
"accountsdb-plugin-interface",
"accountsdb-plugin-manager",
"accountsdb-plugin-postgres",
"accounts-cluster-bench",
"bench-streamer",
"bench-tps",
"account-decoder",
"accounts-bench",
"accounts-cluster-bench",
"banking-bench",
"banks-client",
"banks-interface",
"banks-server",
"bench-streamer",
"bench-tps",
"bucket_map",
"bloom",
"clap-utils",
@@ -27,6 +25,8 @@ members = [
"validator",
"genesis",
"genesis-utils",
"geyser-plugin-interface",
"geyser-plugin-manager",
"gossip",
"install",
"keygen",
@@ -49,6 +49,7 @@ members = [
"program-test",
"programs/address-lookup-table",
"programs/address-lookup-table-tests",
"programs/ed25519-tests",
"programs/bpf_loader",
"programs/bpf_loader/gen-syscall-list",
"programs/compute-budget",
@@ -69,7 +70,6 @@ members = [
"tokens",
"transaction-dos",
"transaction-status",
"account-decoder",
"upload-perf",
"net-utils",
"version",

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-account-decoder"
version = "1.9.6"
version = "1.9.13"
description = "Solana account decoder"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -19,9 +19,9 @@ lazy_static = "1.4.0"
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.72"
solana-config-program = { path = "../programs/config", version = "=1.9.6" }
solana-sdk = { path = "../sdk", version = "=1.9.6" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.6" }
solana-config-program = { path = "../programs/config", version = "=1.9.13" }
solana-sdk = { path = "../sdk", version = "=1.9.13" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.13" }
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
thiserror = "1.0"
zstd = "0.9.0"

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accounts-bench"
version = "1.9.6"
version = "1.9.13"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -11,11 +11,11 @@ publish = false
[dependencies]
log = "0.4.14"
rayon = "1.5.1"
solana-logger = { path = "../logger", version = "=1.9.6" }
solana-runtime = { path = "../runtime", version = "=1.9.6" }
solana-measure = { path = "../measure", version = "=1.9.6" }
solana-sdk = { path = "../sdk", version = "=1.9.6" }
solana-version = { path = "../version", version = "=1.9.6" }
solana-logger = { path = "../logger", version = "=1.9.13" }
solana-runtime = { path = "../runtime", version = "=1.9.13" }
solana-measure = { path = "../measure", version = "=1.9.13" }
solana-sdk = { path = "../sdk", version = "=1.9.13" }
solana-version = { path = "../version", version = "=1.9.13" }
clap = "2.33.1"
[package.metadata.docs.rs]

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accounts-cluster-bench"
version = "1.9.6"
version = "1.9.13"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -13,25 +13,25 @@ clap = "2.33.1"
log = "0.4.14"
rand = "0.7.0"
rayon = "1.5.1"
solana-account-decoder = { path = "../account-decoder", version = "=1.9.6" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.6" }
solana-client = { path = "../client", version = "=1.9.6" }
solana-core = { path = "../core", version = "=1.9.6" }
solana-faucet = { path = "../faucet", version = "=1.9.6" }
solana-gossip = { path = "../gossip", version = "=1.9.6" }
solana-logger = { path = "../logger", version = "=1.9.6" }
solana-measure = { path = "../measure", version = "=1.9.6" }
solana-net-utils = { path = "../net-utils", version = "=1.9.6" }
solana-runtime = { path = "../runtime", version = "=1.9.6" }
solana-sdk = { path = "../sdk", version = "=1.9.6" }
solana-streamer = { path = "../streamer", version = "=1.9.6" }
solana-test-validator = { path = "../test-validator", version = "=1.9.6" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.6" }
solana-version = { path = "../version", version = "=1.9.6" }
solana-account-decoder = { path = "../account-decoder", version = "=1.9.13" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.13" }
solana-client = { path = "../client", version = "=1.9.13" }
solana-core = { path = "../core", version = "=1.9.13" }
solana-faucet = { path = "../faucet", version = "=1.9.13" }
solana-gossip = { path = "../gossip", version = "=1.9.13" }
solana-logger = { path = "../logger", version = "=1.9.13" }
solana-measure = { path = "../measure", version = "=1.9.13" }
solana-net-utils = { path = "../net-utils", version = "=1.9.13" }
solana-runtime = { path = "../runtime", version = "=1.9.13" }
solana-sdk = { path = "../sdk", version = "=1.9.13" }
solana-streamer = { path = "../streamer", version = "=1.9.13" }
solana-test-validator = { path = "../test-validator", version = "=1.9.13" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.13" }
solana-version = { path = "../version", version = "=1.9.13" }
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
[dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "=1.9.6" }
solana-local-cluster = { path = "../local-cluster", version = "=1.9.13" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,20 +0,0 @@
<p align="center">
<a href="https://solana.com">
<img alt="Solana" src="https://i.imgur.com/IKyzQ6T.png" width="250" />
</a>
</p>
# Solana AccountsDb Plugin Interface
This crate enables an AccountsDb plugin to be plugged into the Solana Validator runtime to take actions
at the time of each account update; for example, saving the account state to an external database. The plugin must implement the `AccountsDbPlugin` trait. Please see the detail of the `accountsdb_plugin_interface.rs` for the interface definition.
The plugin should produce a `cdylib` dynamic library, which must expose a `C` function `_create_plugin()` that
instantiates the implementation of the interface.
The `solana-accountsdb-plugin-postgres` crate provides an example of how to create a plugin which saves the accounts data into an
external PostgreSQL databases.
More information about Solana is available in the [Solana documentation](https://docs.solana.com/).
Still have questions? Ask us on [Discord](https://discordapp.com/invite/pquxPsq)

View File

@@ -1 +0,0 @@
pub mod accountsdb_plugin_interface;

View File

@@ -1,31 +0,0 @@
[package]
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accountsdb-plugin-manager"
description = "The Solana AccountsDb plugin manager."
version = "1.9.6"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-validator"
[dependencies]
bs58 = "0.4.0"
crossbeam-channel = "0.5"
libloading = "0.7.2"
log = "0.4.11"
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.72"
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.9.6" }
solana-logger = { path = "../logger", version = "=1.9.6" }
solana-measure = { path = "../measure", version = "=1.9.6" }
solana-metrics = { path = "../metrics", version = "=1.9.6" }
solana-rpc = { path = "../rpc", version = "=1.9.6" }
solana-runtime = { path = "../runtime", version = "=1.9.6" }
solana-sdk = { path = "../sdk", version = "=1.9.6" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.6" }
thiserror = "1.0.30"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,39 +0,0 @@
[package]
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accountsdb-plugin-postgres"
description = "The Solana AccountsDb plugin for PostgreSQL database."
version = "1.9.6"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-validator"
[lib]
crate-type = ["cdylib", "rlib"]
[dependencies]
bs58 = "0.4.0"
chrono = { version = "0.4.11", features = ["serde"] }
crossbeam-channel = "0.5"
log = "0.4.14"
postgres = { version = "0.19.2", features = ["with-chrono-0_4"] }
postgres-types = { version = "0.2.2", features = ["derive"] }
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.72"
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.9.6" }
solana-logger = { path = "../logger", version = "=1.9.6" }
solana-measure = { path = "../measure", version = "=1.9.6" }
solana-metrics = { path = "../metrics", version = "=1.9.6" }
solana-runtime = { path = "../runtime", version = "=1.9.6" }
solana-sdk = { path = "../sdk", version = "=1.9.6" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.6" }
thiserror = "1.0.30"
tokio-postgres = "0.7.4"
[dev-dependencies]
solana-account-decoder = { path = "../account-decoder", version = "=1.9.6" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,5 +0,0 @@
This is an example implementing the AccountsDb plugin for PostgreSQL database.
Please see the `src/accountsdb_plugin_postgres.rs` for the format of the plugin's configuration file.
To create the schema objects for the database, please use `scripts/create_schema.sql`.
`scripts/drop_schema.sql` can be used to tear down the schema objects.

View File

@@ -1,201 +0,0 @@
/**
* This plugin implementation for PostgreSQL requires the following tables
*/
-- The table storing accounts
CREATE TABLE account (
pubkey BYTEA PRIMARY KEY,
owner BYTEA,
lamports BIGINT NOT NULL,
slot BIGINT NOT NULL,
executable BOOL NOT NULL,
rent_epoch BIGINT NOT NULL,
data BYTEA,
write_version BIGINT NOT NULL,
updated_on TIMESTAMP NOT NULL
);
-- The table storing slot information
CREATE TABLE slot (
slot BIGINT PRIMARY KEY,
parent BIGINT,
status VARCHAR(16) NOT NULL,
updated_on TIMESTAMP NOT NULL
);
-- Types for Transactions
Create TYPE "TransactionErrorCode" AS ENUM (
'AccountInUse',
'AccountLoadedTwice',
'AccountNotFound',
'ProgramAccountNotFound',
'InsufficientFundsForFee',
'InvalidAccountForFee',
'AlreadyProcessed',
'BlockhashNotFound',
'InstructionError',
'CallChainTooDeep',
'MissingSignatureForFee',
'InvalidAccountIndex',
'SignatureFailure',
'InvalidProgramForExecution',
'SanitizeFailure',
'ClusterMaintenance',
'AccountBorrowOutstanding',
'WouldExceedMaxAccountCostLimit',
'WouldExceedMaxBlockCostLimit',
'UnsupportedVersion',
'InvalidWritableAccount',
'WouldExceedMaxAccountDataCostLimit',
'TooManyAccountLocks',
'AddressLookupTableNotFound',
'InvalidAddressLookupTableOwner',
'InvalidAddressLookupTableData',
'InvalidAddressLookupTableIndex',
'InvalidRentPayingAccount'
);
CREATE TYPE "TransactionError" AS (
error_code "TransactionErrorCode",
error_detail VARCHAR(256)
);
CREATE TYPE "CompiledInstruction" AS (
program_id_index SMALLINT,
accounts SMALLINT[],
data BYTEA
);
CREATE TYPE "InnerInstructions" AS (
index SMALLINT,
instructions "CompiledInstruction"[]
);
CREATE TYPE "TransactionTokenBalance" AS (
account_index SMALLINT,
mint VARCHAR(44),
ui_token_amount DOUBLE PRECISION,
owner VARCHAR(44)
);
Create TYPE "RewardType" AS ENUM (
'Fee',
'Rent',
'Staking',
'Voting'
);
CREATE TYPE "Reward" AS (
pubkey VARCHAR(44),
lamports BIGINT,
post_balance BIGINT,
reward_type "RewardType",
commission SMALLINT
);
CREATE TYPE "TransactionStatusMeta" AS (
error "TransactionError",
fee BIGINT,
pre_balances BIGINT[],
post_balances BIGINT[],
inner_instructions "InnerInstructions"[],
log_messages TEXT[],
pre_token_balances "TransactionTokenBalance"[],
post_token_balances "TransactionTokenBalance"[],
rewards "Reward"[]
);
CREATE TYPE "TransactionMessageHeader" AS (
num_required_signatures SMALLINT,
num_readonly_signed_accounts SMALLINT,
num_readonly_unsigned_accounts SMALLINT
);
CREATE TYPE "TransactionMessage" AS (
header "TransactionMessageHeader",
account_keys BYTEA[],
recent_blockhash BYTEA,
instructions "CompiledInstruction"[]
);
CREATE TYPE "TransactionMessageAddressTableLookup" AS (
account_key BYTEA,
writable_indexes SMALLINT[],
readonly_indexes SMALLINT[]
);
CREATE TYPE "TransactionMessageV0" AS (
header "TransactionMessageHeader",
account_keys BYTEA[],
recent_blockhash BYTEA,
instructions "CompiledInstruction"[],
address_table_lookups "TransactionMessageAddressTableLookup"[]
);
CREATE TYPE "LoadedAddresses" AS (
writable BYTEA[],
readonly BYTEA[]
);
CREATE TYPE "LoadedMessageV0" AS (
message "TransactionMessageV0",
loaded_addresses "LoadedAddresses"
);
-- The table storing transactions
CREATE TABLE transaction (
slot BIGINT NOT NULL,
signature BYTEA NOT NULL,
is_vote BOOL NOT NULL,
message_type SMALLINT, -- 0: legacy, 1: v0 message
legacy_message "TransactionMessage",
v0_loaded_message "LoadedMessageV0",
signatures BYTEA[],
message_hash BYTEA,
meta "TransactionStatusMeta",
updated_on TIMESTAMP NOT NULL,
CONSTRAINT transaction_pk PRIMARY KEY (slot, signature)
);
-- The table storing block metadata
CREATE TABLE block (
slot BIGINT PRIMARY KEY,
blockhash VARCHAR(44),
rewards "Reward"[],
block_time BIGINT,
block_height BIGINT,
updated_on TIMESTAMP NOT NULL
);
/**
* The following is for keeping historical data for accounts and is not required for plugin to work.
*/
-- The table storing historical data for accounts
CREATE TABLE account_audit (
pubkey BYTEA,
owner BYTEA,
lamports BIGINT NOT NULL,
slot BIGINT NOT NULL,
executable BOOL NOT NULL,
rent_epoch BIGINT NOT NULL,
data BYTEA,
write_version BIGINT NOT NULL,
updated_on TIMESTAMP NOT NULL
);
CREATE INDEX account_audit_account_key ON account_audit (pubkey, write_version);
CREATE FUNCTION audit_account_update() RETURNS trigger AS $audit_account_update$
BEGIN
INSERT INTO account_audit (pubkey, owner, lamports, slot, executable, rent_epoch, data, write_version, updated_on)
VALUES (OLD.pubkey, OLD.owner, OLD.lamports, OLD.slot,
OLD.executable, OLD.rent_epoch, OLD.data, OLD.write_version, OLD.updated_on);
RETURN NEW;
END;
$audit_account_update$ LANGUAGE plpgsql;
CREATE TRIGGER account_update_trigger AFTER UPDATE OR DELETE ON account
FOR EACH ROW EXECUTE PROCEDURE audit_account_update();

View File

@@ -1,26 +0,0 @@
/**
* Script for cleaning up the schema for PostgreSQL used for the AccountsDb plugin.
*/
DROP TRIGGER account_update_trigger ON account;
DROP FUNCTION audit_account_update;
DROP TABLE account_audit;
DROP TABLE account;
DROP TABLE slot;
DROP TABLE transaction;
DROP TABLE block;
DROP TYPE "TransactionError" CASCADE;
DROP TYPE "TransactionErrorCode" CASCADE;
DROP TYPE "LoadedMessageV0" CASCADE;
DROP TYPE "LoadedAddresses" CASCADE;
DROP TYPE "TransactionMessageV0" CASCADE;
DROP TYPE "TransactionMessage" CASCADE;
DROP TYPE "TransactionMessageHeader" CASCADE;
DROP TYPE "TransactionMessageAddressTableLookup" CASCADE;
DROP TYPE "TransactionStatusMeta" CASCADE;
DROP TYPE "RewardType" CASCADE;
DROP TYPE "Reward" CASCADE;
DROP TYPE "TransactionTokenBalance" CASCADE;
DROP TYPE "InnerInstructions" CASCADE;
DROP TYPE "CompiledInstruction" CASCADE;

View File

@@ -1,802 +0,0 @@
# This a reference configuration file for the PostgreSQL database version 14.
# -----------------------------
# PostgreSQL configuration file
# -----------------------------
#
# This file consists of lines of the form:
#
# name = value
#
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
# "#" anywhere on a line. The complete list of parameter names and allowed
# values can be found in the PostgreSQL documentation.
#
# The commented-out settings shown in this file represent the default values.
# Re-commenting a setting is NOT sufficient to revert it to the default value;
# you need to reload the server.
#
# This file is read on server startup and when the server receives a SIGHUP
# signal. If you edit the file on a running system, you have to SIGHUP the
# server for the changes to take effect, run "pg_ctl reload", or execute
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
# require a server shutdown and restart to take effect.
#
# Any parameter can also be given as a command-line option to the server, e.g.,
# "postgres -c log_connections=on". Some parameters can be changed at run time
# with the "SET" SQL command.
#
# Memory units: B = bytes Time units: us = microseconds
# kB = kilobytes ms = milliseconds
# MB = megabytes s = seconds
# GB = gigabytes min = minutes
# TB = terabytes h = hours
# d = days
#------------------------------------------------------------------------------
# FILE LOCATIONS
#------------------------------------------------------------------------------
# The default values of these variables are driven from the -D command-line
# option or PGDATA environment variable, represented here as ConfigDir.
data_directory = '/var/lib/postgresql/14/main' # use data in another directory
# (change requires restart)
hba_file = '/etc/postgresql/14/main/pg_hba.conf' # host-based authentication file
# (change requires restart)
ident_file = '/etc/postgresql/14/main/pg_ident.conf' # ident configuration file
# (change requires restart)
# If external_pid_file is not explicitly set, no extra PID file is written.
external_pid_file = '/var/run/postgresql/14-main.pid' # write an extra PID file
# (change requires restart)
#------------------------------------------------------------------------------
# CONNECTIONS AND AUTHENTICATION
#------------------------------------------------------------------------------
# - Connection Settings -
#listen_addresses = 'localhost' # what IP address(es) to listen on;
# comma-separated list of addresses;
# defaults to 'localhost'; use '*' for all
# (change requires restart)
listen_addresses = '*'
port = 5433 # (change requires restart)
max_connections = 200 # (change requires restart)
#superuser_reserved_connections = 3 # (change requires restart)
unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories
# (change requires restart)
#unix_socket_group = '' # (change requires restart)
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
# (change requires restart)
#bonjour = off # advertise server via Bonjour
# (change requires restart)
#bonjour_name = '' # defaults to the computer name
# (change requires restart)
# - TCP settings -
# see "man tcp" for details
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
# 0 selects the system default
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
# 0 selects the system default
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
# 0 selects the system default
#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
# 0 selects the system default
#client_connection_check_interval = 0 # time between checks for client
# disconnection while running queries;
# 0 for never
# - Authentication -
#authentication_timeout = 1min # 1s-600s
#password_encryption = scram-sha-256 # scram-sha-256 or md5
#db_user_namespace = off
# GSSAPI using Kerberos
#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab'
#krb_caseins_users = off
# - SSL -
ssl = on
#ssl_ca_file = ''
ssl_cert_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem'
#ssl_crl_file = ''
#ssl_crl_dir = ''
ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key'
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
#ssl_prefer_server_ciphers = on
#ssl_ecdh_curve = 'prime256v1'
#ssl_min_protocol_version = 'TLSv1.2'
#ssl_max_protocol_version = ''
#ssl_dh_params_file = ''
#ssl_passphrase_command = ''
#ssl_passphrase_command_supports_reload = off
#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
#------------------------------------------------------------------------------
# - Memory -
shared_buffers = 1GB # min 128kB
# (change requires restart)
#huge_pages = try # on, off, or try
# (change requires restart)
#huge_page_size = 0 # zero for system default
# (change requires restart)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 0 # zero disables the feature
# (change requires restart)
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
# you actively intend to use prepared transactions.
#work_mem = 4MB # min 64kB
#hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem
#maintenance_work_mem = 64MB # min 1MB
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
#logical_decoding_work_mem = 64MB # min 64kB
#max_stack_depth = 2MB # min 100kB
#shared_memory_type = mmap # the default is the first option
# supported by the operating system:
# mmap
# sysv
# windows
# (change requires restart)
dynamic_shared_memory_type = posix # the default is the first option
# supported by the operating system:
# posix
# sysv
# windows
# mmap
# (change requires restart)
#min_dynamic_shared_memory = 0MB # (change requires restart)
# - Disk -
#temp_file_limit = -1 # limits per-process temp file space
# in kilobytes, or -1 for no limit
# - Kernel Resources -
#max_files_per_process = 1000 # min 64
# (change requires restart)
# - Cost-Based Vacuum Delay -
#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
#vacuum_cost_page_hit = 1 # 0-10000 credits
#vacuum_cost_page_miss = 2 # 0-10000 credits
#vacuum_cost_page_dirty = 20 # 0-10000 credits
#vacuum_cost_limit = 200 # 1-10000 credits
# - Background Writer -
#bgwriter_delay = 200ms # 10-10000ms between rounds
#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
# - Asynchronous Behavior -
#backend_flush_after = 0 # measured in pages, 0 disables
effective_io_concurrency = 1000 # 1-1000; 0 disables prefetching
#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching
#max_worker_processes = 8 # (change requires restart)
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers
#max_parallel_workers = 8 # maximum number of max_worker_processes that
# can be used in parallel operations
#parallel_leader_participation = on
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
# (change requires restart)
#------------------------------------------------------------------------------
# WRITE-AHEAD LOG
#------------------------------------------------------------------------------
# - Settings -
wal_level = minimal # minimal, replica, or logical
# (change requires restart)
fsync = off # flush data to disk for crash safety
# (turning this off can cause
# unrecoverable data corruption)
synchronous_commit = off # synchronization level;
# off, local, remote_write, remote_apply, or on
#wal_sync_method = fsync # the default is the first option
# supported by the operating system:
# open_datasync
# fdatasync (default on Linux and FreeBSD)
# fsync
# fsync_writethrough
# open_sync
full_page_writes = off # recover from partial page writes
#wal_log_hints = off # also do full page writes of non-critical updates
# (change requires restart)
#wal_compression = off # enable compression of full-page writes
#wal_init_zero = on # zero-fill new WAL files
#wal_recycle = on # recycle WAL files
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
# (change requires restart)
#wal_writer_delay = 200ms # 1-10000 milliseconds
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
#wal_skip_threshold = 2MB
#commit_delay = 0 # range 0-100000, in microseconds
#commit_siblings = 5 # range 1-1000
# - Checkpoints -
#checkpoint_timeout = 5min # range 30s-1d
#checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
#checkpoint_warning = 30s # 0 disables
max_wal_size = 1GB
min_wal_size = 80MB
# - Archiving -
#archive_mode = off # enables archiving; off, on, or always
# (change requires restart)
#archive_command = '' # command to use to archive a logfile segment
# placeholders: %p = path of file to archive
# %f = file name only
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
#archive_timeout = 0 # force a logfile segment switch after this
# number of seconds; 0 disables
# - Archive Recovery -
# These are only used in recovery mode.
#restore_command = '' # command to use to restore an archived logfile segment
# placeholders: %p = path of file to restore
# %f = file name only
# e.g. 'cp /mnt/server/archivedir/%f %p'
#archive_cleanup_command = '' # command to execute at every restartpoint
#recovery_end_command = '' # command to execute at completion of recovery
# - Recovery Target -
# Set these only when performing a targeted recovery.
#recovery_target = '' # 'immediate' to end recovery as soon as a
# consistent state is reached
# (change requires restart)
#recovery_target_name = '' # the named restore point to which recovery will proceed
# (change requires restart)
#recovery_target_time = '' # the time stamp up to which recovery will proceed
# (change requires restart)
#recovery_target_xid = '' # the transaction ID up to which recovery will proceed
# (change requires restart)
#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
# (change requires restart)
#recovery_target_inclusive = on # Specifies whether to stop:
# just after the specified recovery target (on)
# just before the recovery target (off)
# (change requires restart)
#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
# (change requires restart)
#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
# (change requires restart)
#------------------------------------------------------------------------------
# REPLICATION
#------------------------------------------------------------------------------
# - Sending Servers -
# Set these on the primary and on any standby that will send replication data.
max_wal_senders = 0 # max number of walsender processes
# (change requires restart)
#max_replication_slots = 10 # max number of replication slots
# (change requires restart)
#wal_keep_size = 0 # in megabytes; 0 disables
#max_slot_wal_keep_size = -1 # in megabytes; -1 disables
#wal_sender_timeout = 60s # in milliseconds; 0 disables
#track_commit_timestamp = off # collect timestamp of transaction commit
# (change requires restart)
# - Primary Server -
# These settings are ignored on a standby server.
#synchronous_standby_names = '' # standby servers that provide sync rep
# method to choose sync standbys, number of sync standbys,
# and comma-separated list of application_name
# from standby(s); '*' = all
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
# - Standby Servers -
# These settings are ignored on a primary server.
#primary_conninfo = '' # connection string to sending server
#primary_slot_name = '' # replication slot on sending server
#promote_trigger_file = '' # file name whose presence ends recovery
#hot_standby = on # "off" disallows queries during recovery
# (change requires restart)
#max_standby_archive_delay = 30s # max delay before canceling queries
# when reading WAL from archive;
# -1 allows indefinite delay
#max_standby_streaming_delay = 30s # max delay before canceling queries
# when reading streaming WAL;
# -1 allows indefinite delay
#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name
# is not set
#wal_receiver_status_interval = 10s # send replies at least this often
# 0 disables
#hot_standby_feedback = off # send info from standby to prevent
# query conflicts
#wal_receiver_timeout = 60s # time that receiver waits for
# communication from primary
# in milliseconds; 0 disables
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
# retrieve WAL after a failed attempt
#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery
# - Subscribers -
# These settings are ignored on a publisher.
#max_logical_replication_workers = 4 # taken from max_worker_processes
# (change requires restart)
#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
#------------------------------------------------------------------------------
# QUERY TUNING
#------------------------------------------------------------------------------
# - Planner Method Configuration -
#enable_async_append = on
#enable_bitmapscan = on
#enable_gathermerge = on
#enable_hashagg = on
#enable_hashjoin = on
#enable_incremental_sort = on
#enable_indexscan = on
#enable_indexonlyscan = on
#enable_material = on
#enable_memoize = on
#enable_mergejoin = on
#enable_nestloop = on
#enable_parallel_append = on
#enable_parallel_hash = on
#enable_partition_pruning = on
#enable_partitionwise_join = off
#enable_partitionwise_aggregate = off
#enable_seqscan = on
#enable_sort = on
#enable_tidscan = on
# - Planner Cost Constants -
#seq_page_cost = 1.0 # measured on an arbitrary scale
#random_page_cost = 4.0 # same scale as above
#cpu_tuple_cost = 0.01 # same scale as above
#cpu_index_tuple_cost = 0.005 # same scale as above
#cpu_operator_cost = 0.0025 # same scale as above
#parallel_setup_cost = 1000.0 # same scale as above
#parallel_tuple_cost = 0.1 # same scale as above
#min_parallel_table_scan_size = 8MB
#min_parallel_index_scan_size = 512kB
#effective_cache_size = 4GB
#jit_above_cost = 100000 # perform JIT compilation if available
# and query more expensive than this;
# -1 disables
#jit_inline_above_cost = 500000 # inline small functions if query is
# more expensive than this; -1 disables
#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
# query is more expensive than this;
# -1 disables
# - Genetic Query Optimizer -
#geqo = on
#geqo_threshold = 12
#geqo_effort = 5 # range 1-10
#geqo_pool_size = 0 # selects default based on effort
#geqo_generations = 0 # selects default based on effort
#geqo_selection_bias = 2.0 # range 1.5-2.0
#geqo_seed = 0.0 # range 0.0-1.0
# - Other Planner Options -
#default_statistics_target = 100 # range 1-10000
#constraint_exclusion = partition # on, off, or partition
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
#from_collapse_limit = 8
#jit = on # allow JIT compilation
#join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses
#plan_cache_mode = auto # auto, force_generic_plan or
# force_custom_plan
#------------------------------------------------------------------------------
# REPORTING AND LOGGING
#------------------------------------------------------------------------------
# - Where to Log -
#log_destination = 'stderr' # Valid values are combinations of
# stderr, csvlog, syslog, and eventlog,
# depending on platform. csvlog
# requires logging_collector to be on.
# This is used when logging to stderr:
#logging_collector = off # Enable capturing of stderr and csvlog
# into log files. Required to be on for
# csvlogs.
# (change requires restart)
# These are only used if logging_collector is on:
#log_directory = 'log' # directory where log files are written,
# can be absolute or relative to PGDATA
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
# can include strftime() escapes
#log_file_mode = 0600 # creation mode for log files,
# begin with 0 to use octal notation
#log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables.
#log_rotation_size = 10MB # Automatic rotation of logfiles will
# happen after that much log output.
# 0 disables.
#log_truncate_on_rotation = off # If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
# time-driven rotation, not on restarts
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
# These are relevant when logging to syslog:
#syslog_facility = 'LOCAL0'
#syslog_ident = 'postgres'
#syslog_sequence_numbers = on
#syslog_split_messages = on
# This is only relevant when logging to eventlog (Windows):
# (change requires restart)
#event_source = 'PostgreSQL'
# - When to Log -
#log_min_messages = warning # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic
#log_min_error_statement = error # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic (effectively off)
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
# and their durations, > 0 logs only
# statements running at least this number
# of milliseconds
#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements
# and their durations, > 0 logs only a sample of
# statements running at least this number
# of milliseconds;
# sample fraction is determined by log_statement_sample_rate
#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding
# log_min_duration_sample to be logged;
# 1.0 logs all such statements, 0.0 never logs
#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements
# are logged regardless of their duration; 1.0 logs all
# statements from all transactions, 0.0 never logs
# - What to Log -
#debug_print_parse = off
#debug_print_rewritten = off
#debug_print_plan = off
#debug_pretty_print = on
#log_autovacuum_min_duration = -1 # log autovacuum activity;
# -1 disables, 0 logs all actions and
# their durations, > 0 logs only
# actions running at least this number
# of milliseconds.
#log_checkpoints = off
#log_connections = off
#log_disconnections = off
#log_duration = off
#log_error_verbosity = default # terse, default, or verbose messages
#log_hostname = off
log_line_prefix = '%m [%p] %q%u@%d ' # special values:
# %a = application name
# %u = user name
# %d = database name
# %r = remote host and port
# %h = remote host
# %b = backend type
# %p = process ID
# %P = process ID of parallel group leader
# %t = timestamp without milliseconds
# %m = timestamp with milliseconds
# %n = timestamp with milliseconds (as a Unix epoch)
# %Q = query ID (0 if none or not computed)
# %i = command tag
# %e = SQL state
# %c = session ID
# %l = session line number
# %s = session start timestamp
# %v = virtual transaction ID
# %x = transaction ID (0 if none)
# %q = stop here in non-session
# processes
# %% = '%'
# e.g. '<%u%%%d> '
#log_lock_waits = off # log lock waits >= deadlock_timeout
#log_recovery_conflict_waits = off # log standby recovery conflict waits
# >= deadlock_timeout
#log_parameter_max_length = -1 # when logging statements, limit logged
# bind-parameter values to N bytes;
# -1 means print in full, 0 disables
#log_parameter_max_length_on_error = 0 # when logging an error, limit logged
# bind-parameter values to N bytes;
# -1 means print in full, 0 disables
#log_statement = 'none' # none, ddl, mod, all
#log_replication_commands = off
#log_temp_files = -1 # log temporary files equal or larger
# than the specified size in kilobytes;
# -1 disables, 0 logs all temp files
log_timezone = 'Etc/UTC'
#------------------------------------------------------------------------------
# PROCESS TITLE
#------------------------------------------------------------------------------
cluster_name = '14/main' # added to process titles if nonempty
# (change requires restart)
#update_process_title = on
#------------------------------------------------------------------------------
# STATISTICS
#------------------------------------------------------------------------------
# - Query and Index Statistics Collector -
#track_activities = on
#track_activity_query_size = 1024 # (change requires restart)
#track_counts = on
#track_io_timing = off
#track_wal_io_timing = off
#track_functions = none # none, pl, all
stats_temp_directory = '/var/run/postgresql/14-main.pg_stat_tmp'
# - Monitoring -
#compute_query_id = auto
#log_statement_stats = off
#log_parser_stats = off
#log_planner_stats = off
#log_executor_stats = off
#------------------------------------------------------------------------------
# AUTOVACUUM
#------------------------------------------------------------------------------
#autovacuum = on # Enable autovacuum subprocess? 'on'
# requires track_counts to also be on.
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
# (change requires restart)
#autovacuum_naptime = 1min # time between autovacuum runs
#autovacuum_vacuum_threshold = 50 # min number of row updates before
# vacuum
#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts
# before vacuum; -1 disables insert
# vacuums
#autovacuum_analyze_threshold = 50 # min number of row updates before
# analyze
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table
# size before insert vacuum
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
# (change requires restart)
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
# before forced vacuum
# (change requires restart)
#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for
# autovacuum, in milliseconds;
# -1 means use vacuum_cost_delay
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
# autovacuum, -1 means use
# vacuum_cost_limit
#------------------------------------------------------------------------------
# CLIENT CONNECTION DEFAULTS
#------------------------------------------------------------------------------
# - Statement Behavior -
#client_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# log
# notice
# warning
# error
#search_path = '"$user", public' # schema names
#row_security = on
#default_table_access_method = 'heap'
#default_tablespace = '' # a tablespace name, '' uses the default
#default_toast_compression = 'pglz' # 'pglz' or 'lz4'
#temp_tablespaces = '' # a list of tablespace names, '' uses
# only default tablespace
#check_function_bodies = on
#default_transaction_isolation = 'read committed'
#default_transaction_read_only = off
#default_transaction_deferrable = off
#session_replication_role = 'origin'
#statement_timeout = 0 # in milliseconds, 0 is disabled
#lock_timeout = 0 # in milliseconds, 0 is disabled
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
#idle_session_timeout = 0 # in milliseconds, 0 is disabled
#vacuum_freeze_table_age = 150000000
#vacuum_freeze_min_age = 50000000
#vacuum_failsafe_age = 1600000000
#vacuum_multixact_freeze_table_age = 150000000
#vacuum_multixact_freeze_min_age = 5000000
#vacuum_multixact_failsafe_age = 1600000000
#bytea_output = 'hex' # hex, escape
#xmlbinary = 'base64'
#xmloption = 'content'
#gin_pending_list_limit = 4MB
# - Locale and Formatting -
datestyle = 'iso, mdy'
#intervalstyle = 'postgres'
timezone = 'Etc/UTC'
#timezone_abbreviations = 'Default' # Select the set of available time zone
# abbreviations. Currently, there are
# Default
# Australia (historical usage)
# India
# You can create your own file in
# share/timezonesets/.
#extra_float_digits = 1 # min -15, max 3; any value >0 actually
# selects precise output mode
#client_encoding = sql_ascii # actually, defaults to database
# encoding
# These settings are initialized by initdb, but they can be changed.
lc_messages = 'C.UTF-8' # locale for system error message
# strings
lc_monetary = 'C.UTF-8' # locale for monetary formatting
lc_numeric = 'C.UTF-8' # locale for number formatting
lc_time = 'C.UTF-8' # locale for time formatting
# default configuration for text search
default_text_search_config = 'pg_catalog.english'
# - Shared Library Preloading -
#local_preload_libraries = ''
#session_preload_libraries = ''
#shared_preload_libraries = '' # (change requires restart)
#jit_provider = 'llvmjit' # JIT library to use
# - Other Defaults -
#dynamic_library_path = '$libdir'
#extension_destdir = '' # prepend path when loading extensions
# and shared objects (added by Debian)
#gin_fuzzy_search_limit = 0
#------------------------------------------------------------------------------
# LOCK MANAGEMENT
#------------------------------------------------------------------------------
#deadlock_timeout = 1s
#max_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_relation = -2 # negative values mean
# (max_pred_locks_per_transaction
# / -max_pred_locks_per_relation) - 1
#max_pred_locks_per_page = 2 # min 0
#------------------------------------------------------------------------------
# VERSION AND PLATFORM COMPATIBILITY
#------------------------------------------------------------------------------
# - Previous PostgreSQL Versions -
#array_nulls = on
#backslash_quote = safe_encoding # on, off, or safe_encoding
#escape_string_warning = on
#lo_compat_privileges = off
#quote_all_identifiers = off
#standard_conforming_strings = on
#synchronize_seqscans = on
# - Other Platforms and Clients -
#transform_null_equals = off
#------------------------------------------------------------------------------
# ERROR HANDLING
#------------------------------------------------------------------------------
#exit_on_error = off # terminate session on any error?
#restart_after_crash = on # reinitialize after backend crash?
#data_sync_retry = off # retry or panic on failure to fsync
# data?
# (change requires restart)
#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+)
#------------------------------------------------------------------------------
# CONFIG FILE INCLUDES
#------------------------------------------------------------------------------
# These options allow settings to be loaded from files other than the
# default postgresql.conf. Note that these are directives, not variable
# assignments, so they can usefully be given more than once.
include_dir = 'conf.d' # include files ending in '.conf' from
# a directory, e.g., 'conf.d'
#include_if_exists = '...' # include file only if it exists
#include = '...' # include file
#------------------------------------------------------------------------------
# CUSTOMIZED OPTIONS
#------------------------------------------------------------------------------
# Add settings for extensions here

View File

@@ -1,74 +0,0 @@
use {log::*, std::collections::HashSet};
#[derive(Debug)]
pub(crate) struct AccountsSelector {
pub accounts: HashSet<Vec<u8>>,
pub owners: HashSet<Vec<u8>>,
pub select_all_accounts: bool,
}
impl AccountsSelector {
pub fn default() -> Self {
AccountsSelector {
accounts: HashSet::default(),
owners: HashSet::default(),
select_all_accounts: true,
}
}
pub fn new(accounts: &[String], owners: &[String]) -> Self {
info!(
"Creating AccountsSelector from accounts: {:?}, owners: {:?}",
accounts, owners
);
let select_all_accounts = accounts.iter().any(|key| key == "*");
if select_all_accounts {
return AccountsSelector {
accounts: HashSet::default(),
owners: HashSet::default(),
select_all_accounts,
};
}
let accounts = accounts
.iter()
.map(|key| bs58::decode(key).into_vec().unwrap())
.collect();
let owners = owners
.iter()
.map(|key| bs58::decode(key).into_vec().unwrap())
.collect();
AccountsSelector {
accounts,
owners,
select_all_accounts,
}
}
pub fn is_account_selected(&self, account: &[u8], owner: &[u8]) -> bool {
self.select_all_accounts || self.accounts.contains(account) || self.owners.contains(owner)
}
/// Check if any account is of interested at all
pub fn is_enabled(&self) -> bool {
self.select_all_accounts || !self.accounts.is_empty() || !self.owners.is_empty()
}
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
#[test]
fn test_create_accounts_selector() {
AccountsSelector::new(
&["9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin".to_string()],
&[],
);
AccountsSelector::new(
&[],
&["9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin".to_string()],
);
}
}

View File

@@ -1,466 +0,0 @@
use solana_measure::measure::Measure;
/// Main entry for the PostgreSQL plugin
use {
crate::{
accounts_selector::AccountsSelector,
postgres_client::{ParallelPostgresClient, PostgresClientBuilder},
transaction_selector::TransactionSelector,
},
bs58,
log::*,
serde_derive::{Deserialize, Serialize},
serde_json,
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
AccountsDbPlugin, AccountsDbPluginError, ReplicaAccountInfoVersions,
ReplicaBlockInfoVersions, ReplicaTransactionInfoVersions, Result, SlotStatus,
},
solana_metrics::*,
std::{fs::File, io::Read},
thiserror::Error,
};
#[derive(Default)]
pub struct AccountsDbPluginPostgres {
client: Option<ParallelPostgresClient>,
accounts_selector: Option<AccountsSelector>,
transaction_selector: Option<TransactionSelector>,
}
impl std::fmt::Debug for AccountsDbPluginPostgres {
fn fmt(&self, _: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Ok(())
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AccountsDbPluginPostgresConfig {
pub host: Option<String>,
pub user: Option<String>,
pub port: Option<u16>,
pub connection_str: Option<String>,
pub threads: Option<usize>,
pub batch_size: Option<usize>,
pub panic_on_db_errors: Option<bool>,
/// Indicates if to store historical data for accounts
pub store_account_historical_data: Option<bool>,
}
#[derive(Error, Debug)]
pub enum AccountsDbPluginPostgresError {
#[error("Error connecting to the backend data store. Error message: ({msg})")]
DataStoreConnectionError { msg: String },
#[error("Error preparing data store schema. Error message: ({msg})")]
DataSchemaError { msg: String },
#[error("Error preparing data store schema. Error message: ({msg})")]
ConfigurationError { msg: String },
}
impl AccountsDbPlugin for AccountsDbPluginPostgres {
fn name(&self) -> &'static str {
"AccountsDbPluginPostgres"
}
/// Do initialization for the PostgreSQL plugin.
///
/// # Format of the config file:
/// * The `accounts_selector` section allows the user to controls accounts selections.
/// "accounts_selector" : {
/// "accounts" : \["pubkey-1", "pubkey-2", ..., "pubkey-n"\],
/// }
/// or:
/// "accounts_selector" = {
/// "owners" : \["pubkey-1", "pubkey-2", ..., "pubkey-m"\]
/// }
/// Accounts either satisyfing the accounts condition or owners condition will be selected.
/// When only owners is specified,
/// all accounts belonging to the owners will be streamed.
/// The accounts field supports wildcard to select all accounts:
/// "accounts_selector" : {
/// "accounts" : \["*"\],
/// }
/// * "host", optional, specifies the PostgreSQL server.
/// * "user", optional, specifies the PostgreSQL user.
/// * "port", optional, specifies the PostgreSQL server's port.
/// * "connection_str", optional, the custom PostgreSQL connection string.
/// Please refer to https://docs.rs/postgres/0.19.2/postgres/config/struct.Config.html for the connection configuration.
/// When `connection_str` is set, the values in "host", "user" and "port" are ignored. If `connection_str` is not given,
/// `host` and `user` must be given.
/// "store_account_historical_data", optional, set it to 'true', to store historical account data to account_audit
/// table.
/// * "threads" optional, specifies the number of worker threads for the plugin. A thread
/// maintains a PostgreSQL connection to the server. The default is '10'.
/// * "batch_size" optional, specifies the batch size of bulk insert when the AccountsDb is created
/// from restoring a snapshot. The default is '10'.
/// * "panic_on_db_errors", optional, contols if to panic when there are errors replicating data to the
/// PostgreSQL database. The default is 'false'.
/// * "transaction_selector", optional, controls if and what transaction to store. If this field is missing
/// None of the transction is stored.
/// "transaction_selector" : {
/// "mentions" : \["pubkey-1", "pubkey-2", ..., "pubkey-n"\],
/// }
/// The `mentions` field support wildcard to select all transaction or all 'vote' transactions:
/// For example, to select all transactions:
/// "transaction_selector" : {
/// "mentions" : \["*"\],
/// }
/// To select all vote transactions:
/// "transaction_selector" : {
/// "mentions" : \["all_votes"\],
/// }
/// # Examples
///
/// {
/// "libpath": "/home/solana/target/release/libsolana_accountsdb_plugin_postgres.so",
/// "host": "host_foo",
/// "user": "solana",
/// "threads": 10,
/// "accounts_selector" : {
/// "owners" : ["9oT9R5ZyRovSVnt37QvVoBttGpNqR3J7unkb567NP8k3"]
/// }
/// }
fn on_load(&mut self, config_file: &str) -> Result<()> {
solana_logger::setup_with_default("info");
info!(
"Loading plugin {:?} from config_file {:?}",
self.name(),
config_file
);
let mut file = File::open(config_file)?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
let result: serde_json::Value = serde_json::from_str(&contents).unwrap();
self.accounts_selector = Some(Self::create_accounts_selector_from_config(&result));
self.transaction_selector = Some(Self::create_transaction_selector_from_config(&result));
let result: serde_json::Result<AccountsDbPluginPostgresConfig> =
serde_json::from_str(&contents);
match result {
Err(err) => {
return Err(AccountsDbPluginError::ConfigFileReadError {
msg: format!(
"The config file is not in the JSON format expected: {:?}",
err
),
})
}
Ok(config) => {
let client = PostgresClientBuilder::build_pararallel_postgres_client(&config)?;
self.client = Some(client);
}
}
Ok(())
}
fn on_unload(&mut self) {
info!("Unloading plugin: {:?}", self.name());
match &mut self.client {
None => {}
Some(client) => {
client.join().unwrap();
}
}
}
fn update_account(
&mut self,
account: ReplicaAccountInfoVersions,
slot: u64,
is_startup: bool,
) -> Result<()> {
let mut measure_all = Measure::start("accountsdb-plugin-postgres-update-account-main");
match account {
ReplicaAccountInfoVersions::V0_0_1(account) => {
let mut measure_select =
Measure::start("accountsdb-plugin-postgres-update-account-select");
if let Some(accounts_selector) = &self.accounts_selector {
if !accounts_selector.is_account_selected(account.pubkey, account.owner) {
return Ok(());
}
} else {
return Ok(());
}
measure_select.stop();
inc_new_counter_debug!(
"accountsdb-plugin-postgres-update-account-select-us",
measure_select.as_us() as usize,
100000,
100000
);
debug!(
"Updating account {:?} with owner {:?} at slot {:?} using account selector {:?}",
bs58::encode(account.pubkey).into_string(),
bs58::encode(account.owner).into_string(),
slot,
self.accounts_selector.as_ref().unwrap()
);
match &mut self.client {
None => {
return Err(AccountsDbPluginError::Custom(Box::new(
AccountsDbPluginPostgresError::DataStoreConnectionError {
msg: "There is no connection to the PostgreSQL database."
.to_string(),
},
)));
}
Some(client) => {
let mut measure_update =
Measure::start("accountsdb-plugin-postgres-update-account-client");
let result = { client.update_account(account, slot, is_startup) };
measure_update.stop();
inc_new_counter_debug!(
"accountsdb-plugin-postgres-update-account-client-us",
measure_update.as_us() as usize,
100000,
100000
);
if let Err(err) = result {
return Err(AccountsDbPluginError::AccountsUpdateError {
msg: format!("Failed to persist the update of account to the PostgreSQL database. Error: {:?}", err)
});
}
}
}
}
}
measure_all.stop();
inc_new_counter_debug!(
"accountsdb-plugin-postgres-update-account-main-us",
measure_all.as_us() as usize,
100000,
100000
);
Ok(())
}
fn update_slot_status(
&mut self,
slot: u64,
parent: Option<u64>,
status: SlotStatus,
) -> Result<()> {
info!("Updating slot {:?} at with status {:?}", slot, status);
match &mut self.client {
None => {
return Err(AccountsDbPluginError::Custom(Box::new(
AccountsDbPluginPostgresError::DataStoreConnectionError {
msg: "There is no connection to the PostgreSQL database.".to_string(),
},
)));
}
Some(client) => {
let result = client.update_slot_status(slot, parent, status);
if let Err(err) = result {
return Err(AccountsDbPluginError::SlotStatusUpdateError{
msg: format!("Failed to persist the update of slot to the PostgreSQL database. Error: {:?}", err)
});
}
}
}
Ok(())
}
fn notify_end_of_startup(&mut self) -> Result<()> {
info!("Notifying the end of startup for accounts notifications");
match &mut self.client {
None => {
return Err(AccountsDbPluginError::Custom(Box::new(
AccountsDbPluginPostgresError::DataStoreConnectionError {
msg: "There is no connection to the PostgreSQL database.".to_string(),
},
)));
}
Some(client) => {
let result = client.notify_end_of_startup();
if let Err(err) = result {
return Err(AccountsDbPluginError::SlotStatusUpdateError{
msg: format!("Failed to notify the end of startup for accounts notifications. Error: {:?}", err)
});
}
}
}
Ok(())
}
fn notify_transaction(
&mut self,
transaction_info: ReplicaTransactionInfoVersions,
slot: u64,
) -> Result<()> {
match &mut self.client {
None => {
return Err(AccountsDbPluginError::Custom(Box::new(
AccountsDbPluginPostgresError::DataStoreConnectionError {
msg: "There is no connection to the PostgreSQL database.".to_string(),
},
)));
}
Some(client) => match transaction_info {
ReplicaTransactionInfoVersions::V0_0_1(transaction_info) => {
if let Some(transaction_selector) = &self.transaction_selector {
if !transaction_selector.is_transaction_selected(
transaction_info.is_vote,
transaction_info.transaction.message().account_keys_iter(),
) {
return Ok(());
}
} else {
return Ok(());
}
let result = client.log_transaction_info(transaction_info, slot);
if let Err(err) = result {
return Err(AccountsDbPluginError::SlotStatusUpdateError{
msg: format!("Failed to persist the transaction info to the PostgreSQL database. Error: {:?}", err)
});
}
}
},
}
Ok(())
}
fn notify_block_metadata(&mut self, block_info: ReplicaBlockInfoVersions) -> Result<()> {
match &mut self.client {
None => {
return Err(AccountsDbPluginError::Custom(Box::new(
AccountsDbPluginPostgresError::DataStoreConnectionError {
msg: "There is no connection to the PostgreSQL database.".to_string(),
},
)));
}
Some(client) => match block_info {
ReplicaBlockInfoVersions::V0_0_1(block_info) => {
let result = client.update_block_metadata(block_info);
if let Err(err) = result {
return Err(AccountsDbPluginError::SlotStatusUpdateError{
msg: format!("Failed to persist the update of block metadata to the PostgreSQL database. Error: {:?}", err)
});
}
}
},
}
Ok(())
}
/// Check if the plugin is interested in account data
/// Default is true -- if the plugin is not interested in
/// account data, please return false.
fn account_data_notifications_enabled(&self) -> bool {
self.accounts_selector
.as_ref()
.map_or_else(|| false, |selector| selector.is_enabled())
}
/// Check if the plugin is interested in transaction data
fn transaction_notifications_enabled(&self) -> bool {
self.transaction_selector
.as_ref()
.map_or_else(|| false, |selector| selector.is_enabled())
}
}
impl AccountsDbPluginPostgres {
fn create_accounts_selector_from_config(config: &serde_json::Value) -> AccountsSelector {
let accounts_selector = &config["accounts_selector"];
if accounts_selector.is_null() {
AccountsSelector::default()
} else {
let accounts = &accounts_selector["accounts"];
let accounts: Vec<String> = if accounts.is_array() {
accounts
.as_array()
.unwrap()
.iter()
.map(|val| val.as_str().unwrap().to_string())
.collect()
} else {
Vec::default()
};
let owners = &accounts_selector["owners"];
let owners: Vec<String> = if owners.is_array() {
owners
.as_array()
.unwrap()
.iter()
.map(|val| val.as_str().unwrap().to_string())
.collect()
} else {
Vec::default()
};
AccountsSelector::new(&accounts, &owners)
}
}
fn create_transaction_selector_from_config(config: &serde_json::Value) -> TransactionSelector {
let transaction_selector = &config["transaction_selector"];
if transaction_selector.is_null() {
TransactionSelector::default()
} else {
let accounts = &transaction_selector["mentions"];
let accounts: Vec<String> = if accounts.is_array() {
accounts
.as_array()
.unwrap()
.iter()
.map(|val| val.as_str().unwrap().to_string())
.collect()
} else {
Vec::default()
};
TransactionSelector::new(&accounts)
}
}
pub fn new() -> Self {
Self::default()
}
}
#[no_mangle]
#[allow(improper_ctypes_definitions)]
/// # Safety
///
/// This function returns the AccountsDbPluginPostgres pointer as trait AccountsDbPlugin.
pub unsafe extern "C" fn _create_plugin() -> *mut dyn AccountsDbPlugin {
let plugin = AccountsDbPluginPostgres::new();
let plugin: Box<dyn AccountsDbPlugin> = Box::new(plugin);
Box::into_raw(plugin)
}
#[cfg(test)]
pub(crate) mod tests {
use {super::*, serde_json};
#[test]
fn test_accounts_selector_from_config() {
let config = "{\"accounts_selector\" : { \
\"owners\" : [\"9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin\"] \
}}";
let config: serde_json::Value = serde_json::from_str(config).unwrap();
AccountsDbPluginPostgres::create_accounts_selector_from_config(&config);
}
}

View File

@@ -1,4 +0,0 @@
pub mod accounts_selector;
pub mod accountsdb_plugin_postgres;
pub mod postgres_client;
pub mod transaction_selector;

File diff suppressed because it is too large Load Diff

View File

@@ -1,97 +0,0 @@
use {
crate::{
accountsdb_plugin_postgres::{
AccountsDbPluginPostgresConfig, AccountsDbPluginPostgresError,
},
postgres_client::{
postgres_client_transaction::DbReward, SimplePostgresClient, UpdateBlockMetadataRequest,
},
},
chrono::Utc,
log::*,
postgres::{Client, Statement},
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
AccountsDbPluginError, ReplicaBlockInfo,
},
};
#[derive(Clone, Debug)]
pub struct DbBlockInfo {
pub slot: i64,
pub blockhash: String,
pub rewards: Vec<DbReward>,
pub block_time: Option<i64>,
pub block_height: Option<i64>,
}
impl<'a> From<&ReplicaBlockInfo<'a>> for DbBlockInfo {
fn from(block_info: &ReplicaBlockInfo) -> Self {
Self {
slot: block_info.slot as i64,
blockhash: block_info.blockhash.to_string(),
rewards: block_info.rewards.iter().map(DbReward::from).collect(),
block_time: block_info.block_time,
block_height: block_info
.block_height
.map(|block_height| block_height as i64),
}
}
}
impl SimplePostgresClient {
pub(crate) fn build_block_metadata_upsert_statement(
client: &mut Client,
config: &AccountsDbPluginPostgresConfig,
) -> Result<Statement, AccountsDbPluginError> {
let stmt =
"INSERT INTO block (slot, blockhash, rewards, block_time, block_height, updated_on) \
VALUES ($1, $2, $3, $4, $5, $6)";
let stmt = client.prepare(stmt);
match stmt {
Err(err) => {
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
msg: format!(
"Error in preparing for the block metadata update PostgreSQL database: ({}) host: {:?} user: {:?} config: {:?}",
err, config.host, config.user, config
),
})));
}
Ok(stmt) => Ok(stmt),
}
}
pub(crate) fn update_block_metadata_impl(
&mut self,
block_info: UpdateBlockMetadataRequest,
) -> Result<(), AccountsDbPluginError> {
let client = self.client.get_mut().unwrap();
let statement = &client.update_block_metadata_stmt;
let client = &mut client.client;
let updated_on = Utc::now().naive_utc();
let block_info = block_info.block_info;
let result = client.query(
statement,
&[
&block_info.slot,
&block_info.blockhash,
&block_info.rewards,
&block_info.block_time,
&block_info.block_height,
&updated_on,
],
);
if let Err(err) = result {
let msg = format!(
"Failed to persist the update of block metadata to the PostgreSQL database. Error: {:?}",
err);
error!("{}", msg);
return Err(AccountsDbPluginError::AccountsUpdateError { msg });
}
Ok(())
}
}

View File

@@ -1,194 +0,0 @@
/// The transaction selector is responsible for filtering transactions
/// in the plugin framework.
use {log::*, solana_sdk::pubkey::Pubkey, std::collections::HashSet};
pub(crate) struct TransactionSelector {
pub mentioned_addresses: HashSet<Vec<u8>>,
pub select_all_transactions: bool,
pub select_all_vote_transactions: bool,
}
#[allow(dead_code)]
impl TransactionSelector {
pub fn default() -> Self {
Self {
mentioned_addresses: HashSet::default(),
select_all_transactions: false,
select_all_vote_transactions: false,
}
}
/// Create a selector based on the mentioned addresses
/// To select all transactions use ["*"] or ["all"]
/// To select all vote transactions, use ["all_votes"]
/// To select transactions mentioning specific addresses use ["<pubkey1>", "<pubkey2>", ...]
pub fn new(mentioned_addresses: &[String]) -> Self {
info!(
"Creating TransactionSelector from addresses: {:?}",
mentioned_addresses
);
let select_all_transactions = mentioned_addresses
.iter()
.any(|key| key == "*" || key == "all");
if select_all_transactions {
return Self {
mentioned_addresses: HashSet::default(),
select_all_transactions,
select_all_vote_transactions: true,
};
}
let select_all_vote_transactions = mentioned_addresses.iter().any(|key| key == "all_votes");
if select_all_vote_transactions {
return Self {
mentioned_addresses: HashSet::default(),
select_all_transactions,
select_all_vote_transactions: true,
};
}
let mentioned_addresses = mentioned_addresses
.iter()
.map(|key| bs58::decode(key).into_vec().unwrap())
.collect();
Self {
mentioned_addresses,
select_all_transactions: false,
select_all_vote_transactions: false,
}
}
/// Check if a transaction is of interest.
pub fn is_transaction_selected(
&self,
is_vote: bool,
mentioned_addresses: Box<dyn Iterator<Item = &Pubkey> + '_>,
) -> bool {
if !self.is_enabled() {
return false;
}
if self.select_all_transactions || (self.select_all_vote_transactions && is_vote) {
return true;
}
for address in mentioned_addresses {
if self.mentioned_addresses.contains(address.as_ref()) {
return true;
}
}
false
}
/// Check if any transaction is of interest at all
pub fn is_enabled(&self) -> bool {
self.select_all_transactions
|| self.select_all_vote_transactions
|| !self.mentioned_addresses.is_empty()
}
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
#[test]
fn test_select_transaction() {
let pubkey1 = Pubkey::new_unique();
let pubkey2 = Pubkey::new_unique();
let selector = TransactionSelector::new(&[pubkey1.to_string()]);
assert!(selector.is_enabled());
let addresses = [pubkey1];
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
let addresses = [pubkey2];
assert!(!selector.is_transaction_selected(false, Box::new(addresses.iter())));
let addresses = [pubkey1, pubkey2];
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
}
#[test]
fn test_select_all_transaction_using_wildcard() {
let pubkey1 = Pubkey::new_unique();
let pubkey2 = Pubkey::new_unique();
let selector = TransactionSelector::new(&["*".to_string()]);
assert!(selector.is_enabled());
let addresses = [pubkey1];
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
let addresses = [pubkey2];
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
let addresses = [pubkey1, pubkey2];
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
}
#[test]
fn test_select_all_transaction_all() {
let pubkey1 = Pubkey::new_unique();
let pubkey2 = Pubkey::new_unique();
let selector = TransactionSelector::new(&["all".to_string()]);
assert!(selector.is_enabled());
let addresses = [pubkey1];
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
let addresses = [pubkey2];
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
let addresses = [pubkey1, pubkey2];
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
}
#[test]
fn test_select_all_vote_transaction() {
let pubkey1 = Pubkey::new_unique();
let pubkey2 = Pubkey::new_unique();
let selector = TransactionSelector::new(&["all_votes".to_string()]);
assert!(selector.is_enabled());
let addresses = [pubkey1];
assert!(!selector.is_transaction_selected(false, Box::new(addresses.iter())));
let addresses = [pubkey2];
assert!(selector.is_transaction_selected(true, Box::new(addresses.iter())));
let addresses = [pubkey1, pubkey2];
assert!(selector.is_transaction_selected(true, Box::new(addresses.iter())));
}
#[test]
fn test_select_no_transaction() {
let pubkey1 = Pubkey::new_unique();
let pubkey2 = Pubkey::new_unique();
let selector = TransactionSelector::new(&[]);
assert!(!selector.is_enabled());
let addresses = [pubkey1];
assert!(!selector.is_transaction_selected(false, Box::new(addresses.iter())));
let addresses = [pubkey2];
assert!(!selector.is_transaction_selected(true, Box::new(addresses.iter())));
let addresses = [pubkey1, pubkey2];
assert!(!selector.is_transaction_selected(true, Box::new(addresses.iter())));
}
}

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-banking-bench"
version = "1.9.6"
version = "1.9.13"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -14,17 +14,17 @@ crossbeam-channel = "0.5"
log = "0.4.14"
rand = "0.7.0"
rayon = "1.5.1"
solana-core = { path = "../core", version = "=1.9.6" }
solana-gossip = { path = "../gossip", version = "=1.9.6" }
solana-ledger = { path = "../ledger", version = "=1.9.6" }
solana-logger = { path = "../logger", version = "=1.9.6" }
solana-measure = { path = "../measure", version = "=1.9.6" }
solana-perf = { path = "../perf", version = "=1.9.6" }
solana-poh = { path = "../poh", version = "=1.9.6" }
solana-runtime = { path = "../runtime", version = "=1.9.6" }
solana-streamer = { path = "../streamer", version = "=1.9.6" }
solana-sdk = { path = "../sdk", version = "=1.9.6" }
solana-version = { path = "../version", version = "=1.9.6" }
solana-core = { path = "../core", version = "=1.9.13" }
solana-gossip = { path = "../gossip", version = "=1.9.13" }
solana-ledger = { path = "../ledger", version = "=1.9.13" }
solana-logger = { path = "../logger", version = "=1.9.13" }
solana-measure = { path = "../measure", version = "=1.9.13" }
solana-perf = { path = "../perf", version = "=1.9.13" }
solana-poh = { path = "../poh", version = "=1.9.13" }
solana-runtime = { path = "../runtime", version = "=1.9.13" }
solana-streamer = { path = "../streamer", version = "=1.9.13" }
solana-sdk = { path = "../sdk", version = "=1.9.13" }
solana-version = { path = "../version", version = "=1.9.13" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-client"
version = "1.9.6"
version = "1.9.13"
description = "Solana banks client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,17 +12,17 @@ edition = "2021"
[dependencies]
borsh = "0.9.1"
futures = "0.3"
solana-banks-interface = { path = "../banks-interface", version = "=1.9.6" }
solana-program = { path = "../sdk/program", version = "=1.9.6" }
solana-sdk = { path = "../sdk", version = "=1.9.6" }
solana-banks-interface = { path = "../banks-interface", version = "=1.9.13" }
solana-program = { path = "../sdk/program", version = "=1.9.13" }
solana-sdk = { path = "../sdk", version = "=1.9.13" }
tarpc = { version = "0.27.2", features = ["full"] }
thiserror = "1.0"
tokio = { version = "1", features = ["full"] }
tokio-serde = { version = "0.8", features = ["bincode"] }
[dev-dependencies]
solana-runtime = { path = "../runtime", version = "=1.9.6" }
solana-banks-server = { path = "../banks-server", version = "=1.9.6" }
solana-runtime = { path = "../runtime", version = "=1.9.13" }
solana-banks-server = { path = "../banks-server", version = "=1.9.13" }
[lib]
crate-type = ["lib"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-interface"
version = "1.9.6"
version = "1.9.13"
description = "Solana banks RPC interface"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,7 +11,7 @@ edition = "2021"
[dependencies]
serde = { version = "1.0.130", features = ["derive"] }
solana-sdk = { path = "../sdk", version = "=1.9.6" }
solana-sdk = { path = "../sdk", version = "=1.9.13" }
tarpc = { version = "0.27.2", features = ["full"] }
[lib]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-server"
version = "1.9.6"
version = "1.9.13"
description = "Solana banks server"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,10 +12,10 @@ edition = "2021"
[dependencies]
bincode = "1.3.3"
futures = "0.3"
solana-banks-interface = { path = "../banks-interface", version = "=1.9.6" }
solana-runtime = { path = "../runtime", version = "=1.9.6" }
solana-sdk = { path = "../sdk", version = "=1.9.6" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.6" }
solana-banks-interface = { path = "../banks-interface", version = "=1.9.13" }
solana-runtime = { path = "../runtime", version = "=1.9.13" }
solana-sdk = { path = "../sdk", version = "=1.9.13" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.13" }
tarpc = { version = "0.27.2", features = ["full"] }
tokio = { version = "1", features = ["full"] }
tokio-serde = { version = "0.8", features = ["bincode"] }

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-bench-streamer"
version = "1.9.6"
version = "1.9.13"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -10,11 +10,11 @@ publish = false
[dependencies]
clap = "2.33.1"
solana-clap-utils = { path = "../clap-utils", version = "=1.9.6" }
solana-streamer = { path = "../streamer", version = "=1.9.6" }
solana-logger = { path = "../logger", version = "=1.9.6" }
solana-net-utils = { path = "../net-utils", version = "=1.9.6" }
solana-version = { path = "../version", version = "=1.9.6" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.13" }
solana-streamer = { path = "../streamer", version = "=1.9.13" }
solana-logger = { path = "../logger", version = "=1.9.13" }
solana-net-utils = { path = "../net-utils", version = "=1.9.13" }
solana-version = { path = "../version", version = "=1.9.13" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-bench-tps"
version = "1.9.6"
version = "1.9.13"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -14,23 +14,23 @@ log = "0.4.14"
rayon = "1.5.1"
serde_json = "1.0.72"
serde_yaml = "0.8.21"
solana-core = { path = "../core", version = "=1.9.6" }
solana-genesis = { path = "../genesis", version = "=1.9.6" }
solana-client = { path = "../client", version = "=1.9.6" }
solana-faucet = { path = "../faucet", version = "=1.9.6" }
solana-gossip = { path = "../gossip", version = "=1.9.6" }
solana-logger = { path = "../logger", version = "=1.9.6" }
solana-metrics = { path = "../metrics", version = "=1.9.6" }
solana-measure = { path = "../measure", version = "=1.9.6" }
solana-net-utils = { path = "../net-utils", version = "=1.9.6" }
solana-runtime = { path = "../runtime", version = "=1.9.6" }
solana-sdk = { path = "../sdk", version = "=1.9.6" }
solana-streamer = { path = "../streamer", version = "=1.9.6" }
solana-version = { path = "../version", version = "=1.9.6" }
solana-core = { path = "../core", version = "=1.9.13" }
solana-genesis = { path = "../genesis", version = "=1.9.13" }
solana-client = { path = "../client", version = "=1.9.13" }
solana-faucet = { path = "../faucet", version = "=1.9.13" }
solana-gossip = { path = "../gossip", version = "=1.9.13" }
solana-logger = { path = "../logger", version = "=1.9.13" }
solana-metrics = { path = "../metrics", version = "=1.9.13" }
solana-measure = { path = "../measure", version = "=1.9.13" }
solana-net-utils = { path = "../net-utils", version = "=1.9.13" }
solana-runtime = { path = "../runtime", version = "=1.9.13" }
solana-sdk = { path = "../sdk", version = "=1.9.13" }
solana-streamer = { path = "../streamer", version = "=1.9.13" }
solana-version = { path = "../version", version = "=1.9.13" }
[dev-dependencies]
serial_test = "0.5.1"
solana-local-cluster = { path = "../local-cluster", version = "=1.9.6" }
solana-local-cluster = { path = "../local-cluster", version = "=1.9.13" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -21,7 +21,7 @@ pub const NUM_SIGNATURES_FOR_TXS: u64 = 100_000 * 60 * 60 * 24 * 7;
fn main() {
solana_logger::setup_with_default("solana=info");
solana_metrics::set_panic_hook("bench-tps");
solana_metrics::set_panic_hook("bench-tps", /*version:*/ None);
let matches = cli::build_args(solana_version::version!()).get_matches();
let cli_config = cli::extract_args(&matches);

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-bloom"
version = "1.9.6"
version = "1.9.13"
description = "Solana bloom filter"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -16,9 +16,9 @@ rand = "0.7.0"
serde = { version = "1.0.133", features = ["rc"] }
rayon = "1.5.1"
serde_derive = "1.0.103"
solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.6" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.6" }
solana-sdk = { path = "../sdk", version = "=1.9.6" }
solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.13" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.13" }
solana-sdk = { path = "../sdk", version = "=1.9.13" }
log = "0.4.14"
[lib]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-bucket-map"
version = "1.9.6"
version = "1.9.13"
description = "solana-bucket-map"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-bucket-map"
@@ -12,11 +12,11 @@ edition = "2021"
[dependencies]
rayon = "1.5.0"
solana-logger = { path = "../logger", version = "=1.9.6" }
solana-sdk = { path = "../sdk", version = "=1.9.6" }
solana-logger = { path = "../logger", version = "=1.9.13" }
solana-sdk = { path = "../sdk", version = "=1.9.13" }
memmap2 = "0.5.0"
log = { version = "0.4.11" }
solana-measure = { path = "../measure", version = "=1.9.6" }
solana-measure = { path = "../measure", version = "=1.9.13" }
rand = "0.7.0"
fs_extra = "1.2.0"
tempfile = "3.2.0"

View File

@@ -102,6 +102,8 @@ command_step() {
command: "$2"
timeout_in_minutes: $3
artifact_paths: "log-*.txt"
agents:
- "queue=solana"
EOF
}
@@ -168,7 +170,7 @@ all_test_steps() {
timeout_in_minutes: 20
artifact_paths: "bpf-dumps.tar.bz2"
agents:
- "queue=default"
- "queue=solana"
EOF
else
annotate --style info \
@@ -221,6 +223,8 @@ EOF
- command: "scripts/build-downstream-projects.sh"
name: "downstream-projects"
timeout_in_minutes: 30
agents:
- "queue=solana"
EOF
else
annotate --style info \

View File

@@ -16,7 +16,7 @@ steps:
- command: "ci/publish-crate.sh"
agents:
- "queue=release-build"
timeout_in_minutes: 240
timeout_in_minutes: 360
name: "publish crate"
branches: "!master"
- command: "ci/publish-tarball.sh"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-clap-utils"
version = "1.9.6"
version = "1.9.13"
description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,9 +12,9 @@ edition = "2021"
[dependencies]
clap = "2.33.0"
rpassword = "5.0"
solana-perf = { path = "../perf", version = "=1.9.6" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.6" }
solana-sdk = { path = "../sdk", version = "=1.9.6" }
solana-perf = { path = "../perf", version = "=1.9.13" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.13" }
solana-sdk = { path = "../sdk", version = "=1.9.13" }
thiserror = "1.0.30"
tiny-bip39 = "0.8.2"
uriparse = "0.6.3"

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-cli-config"
description = "Blockchain, Rebuilt for Scale"
version = "1.9.6"
version = "1.9.13"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-cli-output"
description = "Blockchain, Rebuilt for Scale"
version = "1.9.6"
version = "1.9.13"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -19,12 +19,12 @@ Inflector = "0.11.4"
indicatif = "0.16.2"
serde = "1.0.130"
serde_json = "1.0.72"
solana-account-decoder = { path = "../account-decoder", version = "=1.9.6" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.6" }
solana-client = { path = "../client", version = "=1.9.6" }
solana-sdk = { path = "../sdk", version = "=1.9.6" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.6" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.6" }
solana-account-decoder = { path = "../account-decoder", version = "=1.9.13" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.13" }
solana-client = { path = "../client", version = "=1.9.13" }
solana-sdk = { path = "../sdk", version = "=1.9.13" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.13" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.13" }
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
[package.metadata.docs.rs]

View File

@@ -393,19 +393,19 @@ impl fmt::Display for CliValidators {
) -> fmt::Result {
fn non_zero_or_dash(v: u64, max_v: u64) -> String {
if v == 0 {
"- ".into()
" - ".into()
} else if v == max_v {
format!("{:>8} ( 0)", v)
format!("{:>9} ( 0)", v)
} else if v > max_v.saturating_sub(100) {
format!("{:>8} ({:>3})", v, -(max_v.saturating_sub(v) as isize))
format!("{:>9} ({:>3})", v, -(max_v.saturating_sub(v) as isize))
} else {
format!("{:>8} ", v)
format!("{:>9} ", v)
}
}
writeln!(
f,
"{} {:<44} {:<44} {:>3}% {:>14} {:>14} {:>7} {:>8} {:>7} {}",
"{} {:<44} {:<44} {:>3}% {:>14} {:>14} {:>7} {:>8} {:>7} {:>22} ({:.2}%)",
if validator.delinquent {
WARNING.to_string()
} else {
@@ -419,19 +419,19 @@ impl fmt::Display for CliValidators {
if let Some(skip_rate) = validator.skip_rate {
format!("{:.2}%", skip_rate)
} else {
"- ".to_string()
"- ".to_string()
},
validator.epoch_credits,
validator.version,
if validator.activated_stake > 0 {
format!(
"{} ({:.2}%)",
build_balance_message(validator.activated_stake, use_lamports_unit, true),
100. * validator.activated_stake as f64 / total_active_stake as f64,
)
} else {
"-".into()
},
build_balance_message_with_config(
validator.activated_stake,
&BuildBalanceMessageConfig {
use_lamports_unit,
trim_trailing_zeros: false,
..BuildBalanceMessageConfig::default()
}
),
100. * validator.activated_stake as f64 / total_active_stake as f64,
)
}
@@ -441,13 +441,13 @@ impl fmt::Display for CliValidators {
0
};
let header = style(format!(
"{:padding$} {:<44} {:<38} {} {} {} {} {} {} {}",
"{:padding$} {:<44} {:<38} {} {} {} {} {} {} {}",
" ",
"Identity",
"Vote Account",
"Commission",
"Last Vote ",
"Root Slot ",
"Last Vote ",
"Root Slot ",
"Skip Rate",
"Credits",
"Version",

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-cli"
description = "Blockchain, Rebuilt for Scale"
version = "1.9.6"
version = "1.9.13"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -26,29 +26,29 @@ semver = "1.0.4"
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.72"
solana-account-decoder = { path = "../account-decoder", version = "=1.9.6" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.9.6" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.6" }
solana-cli-config = { path = "../cli-config", version = "=1.9.6" }
solana-cli-output = { path = "../cli-output", version = "=1.9.6" }
solana-client = { path = "../client", version = "=1.9.6" }
solana-config-program = { path = "../programs/config", version = "=1.9.6" }
solana-faucet = { path = "../faucet", version = "=1.9.6" }
solana-logger = { path = "../logger", version = "=1.9.6" }
solana-program-runtime = { path = "../program-runtime", version = "=1.9.6" }
solana_rbpf = "=0.2.23"
solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.6" }
solana-sdk = { path = "../sdk", version = "=1.9.6" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.6" }
solana-version = { path = "../version", version = "=1.9.6" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.6" }
solana-account-decoder = { path = "../account-decoder", version = "=1.9.13" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.9.13" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.13" }
solana-cli-config = { path = "../cli-config", version = "=1.9.13" }
solana-cli-output = { path = "../cli-output", version = "=1.9.13" }
solana-client = { path = "../client", version = "=1.9.13" }
solana-config-program = { path = "../programs/config", version = "=1.9.13" }
solana-faucet = { path = "../faucet", version = "=1.9.13" }
solana-logger = { path = "../logger", version = "=1.9.13" }
solana-program-runtime = { path = "../program-runtime", version = "=1.9.13" }
solana_rbpf = "=0.2.24"
solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.13" }
solana-sdk = { path = "../sdk", version = "=1.9.13" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.13" }
solana-version = { path = "../version", version = "=1.9.13" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.13" }
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
thiserror = "1.0.30"
tiny-bip39 = "0.8.2"
[dev-dependencies]
solana-streamer = { path = "../streamer", version = "=1.9.6" }
solana-test-validator = { path = "../test-validator", version = "=1.9.6" }
solana-streamer = { path = "../streamer", version = "=1.9.13" }
solana-test-validator = { path = "../test-validator", version = "=1.9.13" }
tempfile = "3.2.0"
[[bin]]

View File

@@ -83,7 +83,6 @@ pub enum CliCommand {
filter: RpcTransactionLogsFilter,
},
Ping {
lamports: u64,
interval: Duration,
count: Option<u64>,
timeout: Duration,
@@ -973,7 +972,6 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
CliCommand::LiveSlots => process_live_slots(config),
CliCommand::Logs { filter } => process_logs(config, filter),
CliCommand::Ping {
lamports,
interval,
count,
timeout,
@@ -982,7 +980,6 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
} => process_ping(
&rpc_client,
config,
*lamports,
interval,
count,
timeout,

View File

@@ -43,13 +43,13 @@ use {
message::Message,
native_token::lamports_to_sol,
nonce::State as NonceState,
pubkey::{self, Pubkey},
pubkey::Pubkey,
rent::Rent,
rpc_port::DEFAULT_RPC_PORT_STR,
signature::Signature,
slot_history,
stake::{self, state::StakeState},
system_instruction, system_program,
system_instruction,
sysvar::{
self,
slot_history::SlotHistory,
@@ -259,15 +259,6 @@ impl ClusterQuerySubCommands for App<'_, '_> {
.takes_value(false)
.help("Print timestamp (unix time + microseconds as in gettimeofday) before each line"),
)
.arg(
Arg::with_name("lamports")
.long("lamports")
.value_name("NUMBER")
.takes_value(true)
.default_value("1")
.validator(is_amount)
.help("Number of lamports to transfer for each transaction"),
)
.arg(
Arg::with_name("timeout")
.short("t")
@@ -512,7 +503,6 @@ pub fn parse_cluster_ping(
default_signer: &DefaultSigner,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let lamports = value_t_or_exit!(matches, "lamports", u64);
let interval = Duration::from_secs(value_t_or_exit!(matches, "interval", u64));
let count = if matches.is_present("count") {
Some(value_t_or_exit!(matches, "count", u64))
@@ -524,7 +514,6 @@ pub fn parse_cluster_ping(
let print_timestamp = matches.is_present("print_timestamp");
Ok(CliCommandInfo {
command: CliCommand::Ping {
lamports,
interval,
count,
timeout,
@@ -1355,7 +1344,6 @@ pub fn process_get_transaction_count(rpc_client: &RpcClient, _config: &CliConfig
pub fn process_ping(
rpc_client: &RpcClient,
config: &CliConfig,
lamports: u64,
interval: &Duration,
count: &Option<u64>,
timeout: &Duration,
@@ -1375,7 +1363,7 @@ pub fn process_ping(
let mut confirmation_time: VecDeque<u64> = VecDeque::with_capacity(1024);
let mut blockhash = rpc_client.get_latest_blockhash()?;
let mut blockhash_transaction_count = 0;
let mut lamports = 0;
let mut blockhash_acquired = Instant::now();
let mut blockhash_from_cluster = false;
if let Some(fixed_blockhash) = fixed_blockhash {
@@ -1391,15 +1379,12 @@ pub fn process_ping(
// Fetch a new blockhash every minute
let new_blockhash = rpc_client.get_new_latest_blockhash(&blockhash)?;
blockhash = new_blockhash;
blockhash_transaction_count = 0;
lamports = 0;
blockhash_acquired = Instant::now();
}
let seed =
&format!("{}{}", blockhash_transaction_count, blockhash)[0..pubkey::MAX_SEED_LEN];
let to = Pubkey::create_with_seed(&config.signers[0].pubkey(), seed, &system_program::id())
.unwrap();
blockhash_transaction_count += 1;
let to = config.signers[0].pubkey();
lamports += 1;
let build_message = |lamports| {
let ix = system_instruction::transfer(&config.signers[0].pubkey(), &to, lamports);
@@ -2319,7 +2304,6 @@ mod tests {
parse_command(&test_ping, &default_signer, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Ping {
lamports: 1,
interval: Duration::from_secs(1),
count: Some(2),
timeout: Duration::from_secs(3),

View File

@@ -1384,7 +1384,13 @@ pub fn process_stake_authorize(
if let Some(authorized) = authorized {
match authorization_type {
StakeAuthorize::Staker => {
check_current_authority(&authorized.staker, &authority.pubkey())?;
// first check authorized withdrawer
check_current_authority(&authorized.withdrawer, &authority.pubkey())
.or_else(|_| {
// ...then check authorized staker. If neither matches, error will
// print the stake key as `expected`
check_current_authority(&authorized.staker, &authority.pubkey())
})?;
}
StakeAuthorize::Withdrawer => {
check_current_authority(&authorized.withdrawer, &authority.pubkey())?;

View File

@@ -291,8 +291,12 @@ pub fn process_set_validator_info(
// Check existence of validator-info account
let balance = rpc_client.get_balance(&info_pubkey).unwrap_or(0);
let lamports =
rpc_client.get_minimum_balance_for_rent_exemption(ValidatorInfo::max_space() as usize)?;
let keys = vec![
(validator_info::id(), false),
(config.signers[0].pubkey(), true),
];
let data_len = ValidatorInfo::max_space() + ConfigKeys::serialized_size(keys.clone());
let lamports = rpc_client.get_minimum_balance_for_rent_exemption(data_len as usize)?;
let signers = if balance == 0 {
if info_pubkey != info_keypair.pubkey() {
@@ -308,10 +312,7 @@ pub fn process_set_validator_info(
};
let build_message = |lamports| {
let keys = vec![
(validator_info::id(), false),
(config.signers[0].pubkey(), true),
];
let keys = keys.clone();
if balance == 0 {
println!(
"Publishing info for Validator {:?}",

View File

@@ -238,6 +238,7 @@ fn full_battery_tests(
#[test]
#[allow(clippy::redundant_closure)]
fn test_create_account_with_seed() {
const ONE_SIG_FEE: f64 = 0.000005;
solana_logger::setup();
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
@@ -310,7 +311,7 @@ fn test_create_account_with_seed() {
&offline_nonce_authority_signer.pubkey(),
);
check_balance!(
sol_to_lamports(4000.999999999),
sol_to_lamports(4001.0 - ONE_SIG_FEE),
&rpc_client,
&online_nonce_creator_signer.pubkey(),
);
@@ -381,12 +382,12 @@ fn test_create_account_with_seed() {
process_command(&submit_config).unwrap();
check_balance!(sol_to_lamports(241.0), &rpc_client, &nonce_address);
check_balance!(
sol_to_lamports(31.999999999),
sol_to_lamports(32.0 - ONE_SIG_FEE),
&rpc_client,
&offline_nonce_authority_signer.pubkey(),
);
check_balance!(
sol_to_lamports(4000.999999999),
sol_to_lamports(4001.0 - ONE_SIG_FEE),
&rpc_client,
&online_nonce_creator_signer.pubkey(),
);

View File

@@ -18,6 +18,7 @@ use {
solana_sdk::{
account_utils::StateMut,
commitment_config::CommitmentConfig,
fee::FeeStructure,
nonce::State as NonceState,
pubkey::Pubkey,
signature::{keypair_from_seed, Keypair, Signer},
@@ -876,14 +877,15 @@ fn test_stake_authorize() {
#[test]
fn test_stake_authorize_with_fee_payer() {
solana_logger::setup();
const SIG_FEE: u64 = 42;
let fee_one_sig = FeeStructure::default().get_max_fee(1, 0);
let fee_two_sig = FeeStructure::default().get_max_fee(2, 0);
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_custom_fees(
mint_pubkey,
SIG_FEE,
1,
Some(faucet_addr),
SocketAddrSpace::Unspecified,
);
@@ -912,14 +914,14 @@ fn test_stake_authorize_with_fee_payer() {
config_offline.command = CliCommand::ClusterVersion;
process_command(&config_offline).unwrap_err();
request_and_confirm_airdrop(&rpc_client, &config, &default_pubkey, 100_000).unwrap();
check_balance!(100_000, &rpc_client, &config.signers[0].pubkey());
request_and_confirm_airdrop(&rpc_client, &config, &default_pubkey, 5_000_000).unwrap();
check_balance!(5_000_000, &rpc_client, &config.signers[0].pubkey());
request_and_confirm_airdrop(&rpc_client, &config_payer, &payer_pubkey, 100_000).unwrap();
check_balance!(100_000, &rpc_client, &payer_pubkey);
request_and_confirm_airdrop(&rpc_client, &config_payer, &payer_pubkey, 5_000_000).unwrap();
check_balance!(5_000_000, &rpc_client, &payer_pubkey);
request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 100_000).unwrap();
check_balance!(100_000, &rpc_client, &offline_pubkey);
request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 5_000_000).unwrap();
check_balance!(5_000_000, &rpc_client, &offline_pubkey);
check_ready(&rpc_client);
@@ -934,7 +936,7 @@ fn test_stake_authorize_with_fee_payer() {
withdrawer: None,
withdrawer_signer: None,
lockup: Lockup::default(),
amount: SpendAmount::Some(50_000),
amount: SpendAmount::Some(1_000_000),
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
@@ -945,8 +947,7 @@ fn test_stake_authorize_with_fee_payer() {
from: 0,
};
process_command(&config).unwrap();
// `config` balance should be 50,000 - 1 stake account sig - 1 fee sig
check_balance!(50_000 - SIG_FEE - SIG_FEE, &rpc_client, &default_pubkey);
check_balance!(4_000_000 - fee_two_sig, &rpc_client, &default_pubkey);
// Assign authority with separate fee payer
config.signers = vec![&default_signer, &payer_keypair];
@@ -970,10 +971,10 @@ fn test_stake_authorize_with_fee_payer() {
};
process_command(&config).unwrap();
// `config` balance has not changed, despite submitting the TX
check_balance!(50_000 - SIG_FEE - SIG_FEE, &rpc_client, &default_pubkey);
check_balance!(4_000_000 - fee_two_sig, &rpc_client, &default_pubkey);
// `config_payer` however has paid `config`'s authority sig
// and `config_payer`'s fee sig
check_balance!(100_000 - SIG_FEE - SIG_FEE, &rpc_client, &payer_pubkey);
check_balance!(5_000_000 - fee_two_sig, &rpc_client, &payer_pubkey);
// Assign authority with offline fee payer
let blockhash = rpc_client.get_latest_blockhash().unwrap();
@@ -1021,10 +1022,10 @@ fn test_stake_authorize_with_fee_payer() {
};
process_command(&config).unwrap();
// `config`'s balance again has not changed
check_balance!(50_000 - SIG_FEE - SIG_FEE, &rpc_client, &default_pubkey);
check_balance!(4_000_000 - fee_two_sig, &rpc_client, &default_pubkey);
// `config_offline` however has paid 1 sig due to being both authority
// and fee payer
check_balance!(100_000 - SIG_FEE, &rpc_client, &offline_pubkey);
check_balance!(5_000_000 - fee_one_sig, &rpc_client, &offline_pubkey);
}
#[test]
@@ -1058,12 +1059,17 @@ fn test_stake_split() {
config_offline.command = CliCommand::ClusterVersion;
process_command(&config_offline).unwrap_err();
request_and_confirm_airdrop(&rpc_client, &config, &config.signers[0].pubkey(), 500_000)
.unwrap();
check_balance!(500_000, &rpc_client, &config.signers[0].pubkey());
request_and_confirm_airdrop(
&rpc_client,
&config,
&config.signers[0].pubkey(),
50_000_000,
)
.unwrap();
check_balance!(50_000_000, &rpc_client, &config.signers[0].pubkey());
request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 100_000).unwrap();
check_balance!(100_000, &rpc_client, &offline_pubkey);
request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 1_000_000).unwrap();
check_balance!(1_000_000, &rpc_client, &offline_pubkey);
// Create stake account, identity is authority
let minimum_stake_balance = rpc_client
@@ -1207,12 +1213,12 @@ fn test_stake_set_lockup() {
config_offline.command = CliCommand::ClusterVersion;
process_command(&config_offline).unwrap_err();
request_and_confirm_airdrop(&rpc_client, &config, &config.signers[0].pubkey(), 500_000)
request_and_confirm_airdrop(&rpc_client, &config, &config.signers[0].pubkey(), 5_000_000)
.unwrap();
check_balance!(500_000, &rpc_client, &config.signers[0].pubkey());
check_balance!(5_000_000, &rpc_client, &config.signers[0].pubkey());
request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 100_000).unwrap();
check_balance!(100_000, &rpc_client, &offline_pubkey);
request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 1_000_000).unwrap();
check_balance!(1_000_000, &rpc_client, &offline_pubkey);
// Create stake account, identity is authority
let minimum_stake_balance = rpc_client

View File

@@ -16,6 +16,7 @@ use {
solana_faucet::faucet::run_local_faucet,
solana_sdk::{
commitment_config::CommitmentConfig,
fee::FeeStructure,
native_token::sol_to_lamports,
nonce::State as NonceState,
pubkey::Pubkey,
@@ -29,6 +30,8 @@ use {
#[test]
fn test_transfer() {
solana_logger::setup();
let fee_one_sig = FeeStructure::default().get_max_fee(1, 0);
let fee_two_sig = FeeStructure::default().get_max_fee(2, 0);
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
@@ -77,7 +80,11 @@ fn test_transfer() {
derived_address_program_id: None,
};
process_command(&config).unwrap();
check_balance!(sol_to_lamports(4.0) - 1, &rpc_client, &sender_pubkey);
check_balance!(
sol_to_lamports(4.0) - fee_one_sig,
&rpc_client,
&sender_pubkey
);
check_balance!(sol_to_lamports(1.0), &rpc_client, &recipient_pubkey);
// Plain ole transfer, failure due to InsufficientFundsForSpendAndFee
@@ -98,7 +105,11 @@ fn test_transfer() {
derived_address_program_id: None,
};
assert!(process_command(&config).is_err());
check_balance!(sol_to_lamports(4.0) - 1, &rpc_client, &sender_pubkey);
check_balance!(
sol_to_lamports(4.0) - fee_one_sig,
&rpc_client,
&sender_pubkey
);
check_balance!(sol_to_lamports(1.0), &rpc_client, &recipient_pubkey);
let mut offline = CliConfig::recent_for_tests();
@@ -154,7 +165,11 @@ fn test_transfer() {
derived_address_program_id: None,
};
process_command(&config).unwrap();
check_balance!(sol_to_lamports(0.5) - 1, &rpc_client, &offline_pubkey);
check_balance!(
sol_to_lamports(0.5) - fee_one_sig,
&rpc_client,
&offline_pubkey
);
check_balance!(sol_to_lamports(1.5), &rpc_client, &recipient_pubkey);
// Create nonce account
@@ -172,7 +187,7 @@ fn test_transfer() {
};
process_command(&config).unwrap();
check_balance!(
sol_to_lamports(4.0) - 3 - minimum_nonce_balance,
sol_to_lamports(4.0) - fee_one_sig - fee_two_sig - minimum_nonce_balance,
&rpc_client,
&sender_pubkey,
);
@@ -210,7 +225,7 @@ fn test_transfer() {
};
process_command(&config).unwrap();
check_balance!(
sol_to_lamports(3.0) - 4 - minimum_nonce_balance,
sol_to_lamports(3.0) - 2 * fee_one_sig - fee_two_sig - minimum_nonce_balance,
&rpc_client,
&sender_pubkey,
);
@@ -235,7 +250,7 @@ fn test_transfer() {
};
process_command(&config).unwrap();
check_balance!(
sol_to_lamports(3.0) - 5 - minimum_nonce_balance,
sol_to_lamports(3.0) - 3 * fee_one_sig - fee_two_sig - minimum_nonce_balance,
&rpc_client,
&sender_pubkey,
);
@@ -293,13 +308,18 @@ fn test_transfer() {
derived_address_program_id: None,
};
process_command(&config).unwrap();
check_balance!(sol_to_lamports(0.1) - 2, &rpc_client, &offline_pubkey);
check_balance!(
sol_to_lamports(0.1) - 2 * fee_one_sig,
&rpc_client,
&offline_pubkey
);
check_balance!(sol_to_lamports(2.9), &rpc_client, &recipient_pubkey);
}
#[test]
fn test_transfer_multisession_signing() {
solana_logger::setup();
let fee = FeeStructure::default().get_max_fee(2, 0);
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
@@ -329,7 +349,7 @@ fn test_transfer_multisession_signing() {
&rpc_client,
&CliConfig::recent_for_tests(),
&offline_fee_payer_signer.pubkey(),
sol_to_lamports(1.0) + 3,
sol_to_lamports(1.0) + 2 * fee,
)
.unwrap();
check_balance!(
@@ -338,7 +358,7 @@ fn test_transfer_multisession_signing() {
&offline_from_signer.pubkey(),
);
check_balance!(
sol_to_lamports(1.0) + 3,
sol_to_lamports(1.0) + 2 * fee,
&rpc_client,
&offline_fee_payer_signer.pubkey(),
);
@@ -438,7 +458,7 @@ fn test_transfer_multisession_signing() {
&offline_from_signer.pubkey(),
);
check_balance!(
sol_to_lamports(1.0) + 1,
sol_to_lamports(1.0) + fee,
&rpc_client,
&offline_fee_payer_signer.pubkey(),
);
@@ -448,6 +468,7 @@ fn test_transfer_multisession_signing() {
#[test]
fn test_transfer_all() {
solana_logger::setup();
let fee = FeeStructure::default().get_max_fee(1, 0);
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
@@ -470,8 +491,8 @@ fn test_transfer_all() {
let sender_pubkey = config.signers[0].pubkey();
let recipient_pubkey = Pubkey::new(&[1u8; 32]);
request_and_confirm_airdrop(&rpc_client, &config, &sender_pubkey, 50_000).unwrap();
check_balance!(50_000, &rpc_client, &sender_pubkey);
request_and_confirm_airdrop(&rpc_client, &config, &sender_pubkey, 500_000).unwrap();
check_balance!(500_000, &rpc_client, &sender_pubkey);
check_balance!(0, &rpc_client, &recipient_pubkey);
check_ready(&rpc_client);
@@ -495,7 +516,7 @@ fn test_transfer_all() {
};
process_command(&config).unwrap();
check_balance!(0, &rpc_client, &sender_pubkey);
check_balance!(49_999, &rpc_client, &recipient_pubkey);
check_balance!(500_000 - fee, &rpc_client, &recipient_pubkey);
}
#[test]
@@ -554,6 +575,7 @@ fn test_transfer_unfunded_recipient() {
#[test]
fn test_transfer_with_seed() {
solana_logger::setup();
let fee = FeeStructure::default().get_max_fee(1, 0);
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
@@ -612,7 +634,7 @@ fn test_transfer_with_seed() {
derived_address_program_id: Some(derived_address_program_id),
};
process_command(&config).unwrap();
check_balance!(sol_to_lamports(1.0) - 1, &rpc_client, &sender_pubkey);
check_balance!(sol_to_lamports(1.0) - fee, &rpc_client, &sender_pubkey);
check_balance!(sol_to_lamports(5.0), &rpc_client, &recipient_pubkey);
check_balance!(0, &rpc_client, &derived_address);
}

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-client-test"
version = "1.9.6"
version = "1.9.13"
description = "Solana RPC Test"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -8,28 +8,29 @@ license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-client-test"
edition = "2021"
publish = false
[dependencies]
serde_json = "1.0.72"
serial_test = "0.5.1"
solana-client = { path = "../client", version = "=1.9.6" }
solana-ledger = { path = "../ledger", version = "=1.9.6" }
solana-measure = { path = "../measure", version = "=1.9.6" }
solana-merkle-tree = { path = "../merkle-tree", version = "=1.9.6" }
solana-metrics = { path = "../metrics", version = "=1.9.6" }
solana-perf = { path = "../perf", version = "=1.9.6" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.6" }
solana-rpc = { path = "../rpc", version = "=1.9.6" }
solana-runtime = { path = "../runtime", version = "=1.9.6" }
solana-sdk = { path = "../sdk", version = "=1.9.6" }
solana-streamer = { path = "../streamer", version = "=1.9.6" }
solana-test-validator = { path = "../test-validator", version = "=1.9.6" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.6" }
solana-version = { path = "../version", version = "=1.9.6" }
solana-client = { path = "../client", version = "=1.9.13" }
solana-ledger = { path = "../ledger", version = "=1.9.13" }
solana-measure = { path = "../measure", version = "=1.9.13" }
solana-merkle-tree = { path = "../merkle-tree", version = "=1.9.13" }
solana-metrics = { path = "../metrics", version = "=1.9.13" }
solana-perf = { path = "../perf", version = "=1.9.13" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.13" }
solana-rpc = { path = "../rpc", version = "=1.9.13" }
solana-runtime = { path = "../runtime", version = "=1.9.13" }
solana-sdk = { path = "../sdk", version = "=1.9.13" }
solana-streamer = { path = "../streamer", version = "=1.9.13" }
solana-test-validator = { path = "../test-validator", version = "=1.9.13" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.13" }
solana-version = { path = "../version", version = "=1.9.13" }
systemstat = "0.1.10"
[dev-dependencies]
solana-logger = { path = "../logger", version = "=1.9.6" }
solana-logger = { path = "../logger", version = "=1.9.13" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -8,7 +8,7 @@ use {
RpcAccountInfoConfig, RpcBlockSubscribeConfig, RpcBlockSubscribeFilter,
RpcProgramAccountsConfig,
},
rpc_response::{RpcBlockUpdate, SlotInfo},
rpc_response::SlotInfo,
},
solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path},
solana_rpc::{
@@ -34,7 +34,9 @@ use {
},
solana_streamer::socket::SocketAddrSpace,
solana_test_validator::TestValidator,
solana_transaction_status::{TransactionDetails, UiTransactionEncoding},
solana_transaction_status::{
ConfirmedBlockWithOptionalMetadata, TransactionDetails, UiTransactionEncoding,
},
std::{
collections::HashSet,
net::{IpAddr, SocketAddr},
@@ -212,6 +214,7 @@ fn test_block_subscription() {
..
} = create_genesis_config(10_000);
let bank = Bank::new_for_tests(&genesis_config);
let rent_exempt_amount = bank.get_minimum_balance_for_rent_exemption(0);
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
// setup Blockstore
@@ -225,6 +228,8 @@ fn test_block_subscription() {
let keypair2 = Keypair::new();
let keypair3 = Keypair::new();
let max_complete_transaction_status_slot = Arc::new(AtomicU64::new(blockstore.max_root()));
bank.transfer(rent_exempt_amount, &alice, &keypair2.pubkey())
.unwrap();
let _confirmed_block_signatures = create_test_transactions_and_populate_blockstore(
vec![&alice, &keypair1, &keypair2, &keypair3],
0,
@@ -276,22 +281,12 @@ fn test_block_subscription() {
match maybe_actual {
Ok(actual) => {
let complete_block = blockstore.get_complete_block(slot, false).unwrap();
let block = complete_block.clone().configure(
let block = ConfirmedBlockWithOptionalMetadata::from(complete_block).configure(
UiTransactionEncoding::Json,
TransactionDetails::Signatures,
false,
);
let expected = RpcBlockUpdate {
slot,
block: Some(block),
err: None,
};
let block = complete_block.configure(
UiTransactionEncoding::Json,
TransactionDetails::Signatures,
false,
);
assert_eq!(actual.value.slot, expected.slot);
assert_eq!(actual.value.slot, slot);
assert!(block.eq(&actual.value.block.unwrap()));
}
Err(e) => {

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-client"
version = "1.9.6"
version = "1.9.13"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -23,15 +23,15 @@ semver = "1.0.4"
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.72"
solana-account-decoder = { path = "../account-decoder", version = "=1.9.6" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.6" }
solana-faucet = { path = "../faucet", version = "=1.9.6" }
solana-net-utils = { path = "../net-utils", version = "=1.9.6" }
solana-measure = { path = "../measure", version = "=1.9.6" }
solana-sdk = { path = "../sdk", version = "=1.9.6" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.6" }
solana-version = { path = "../version", version = "=1.9.6" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.6" }
solana-account-decoder = { path = "../account-decoder", version = "=1.9.13" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.13" }
solana-faucet = { path = "../faucet", version = "=1.9.13" }
solana-net-utils = { path = "../net-utils", version = "=1.9.13" }
solana-measure = { path = "../measure", version = "=1.9.13" }
solana-sdk = { path = "../sdk", version = "=1.9.13" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.13" }
solana-version = { path = "../version", version = "=1.9.13" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.13" }
thiserror = "1.0"
tokio = { version = "1", features = ["full"] }
tungstenite = { version = "0.16.0", features = ["rustls-tls-webpki-roots"] }
@@ -40,7 +40,7 @@ url = "2.2.2"
[dev-dependencies]
assert_matches = "1.5.0"
jsonrpc-http-server = "18.0.0"
solana-logger = { path = "../logger", version = "=1.9.6" }
solana-logger = { path = "../logger", version = "=1.9.13" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -37,14 +37,14 @@ impl HttpSender {
///
/// The URL is an HTTP URL, usually for port 8899, as in
/// "http://localhost:8899". The sender has a default timeout of 30 seconds.
pub fn new(url: String) -> Self {
pub fn new<U: ToString>(url: U) -> Self {
Self::new_with_timeout(url, Duration::from_secs(30))
}
/// Create an HTTP RPC sender.
///
/// The URL is an HTTP URL, usually for port 8899.
pub fn new_with_timeout(url: String, timeout: Duration) -> Self {
pub fn new_with_timeout<U: ToString>(url: U, timeout: Duration) -> Self {
// `reqwest::blocking::Client` panics if run in a tokio async context. Shuttle the
// request to a different tokio thread to avoid this
let client = Arc::new(
@@ -58,7 +58,7 @@ impl HttpSender {
Self {
client,
url,
url: url.to_string(),
request_id: AtomicU64::new(0),
stats: RwLock::new(RpcTransportStats::default()),
}

View File

@@ -75,13 +75,13 @@ pub struct MockSender {
/// from [`RpcRequest`] to a JSON [`Value`] response, Any entries in this map
/// override the default behavior for the given request.
impl MockSender {
pub fn new(url: String) -> Self {
pub fn new<U: ToString>(url: U) -> Self {
Self::new_with_mocks(url, Mocks::default())
}
pub fn new_with_mocks(url: String, mocks: Mocks) -> Self {
pub fn new_with_mocks<U: ToString>(url: U, mocks: Mocks) -> Self {
Self {
url,
url: url.to_string(),
mocks: RwLock::new(mocks),
}
}

View File

@@ -163,7 +163,7 @@ impl RpcClient {
/// `RpcSender`. Most applications should use one of the other constructors,
/// such as [`new`] and [`new_mock`], which create an `RpcClient`
/// encapsulating an [`HttpSender`] and [`MockSender`] respectively.
fn new_sender<T: RpcSender + Send + Sync + 'static>(
pub fn new_sender<T: RpcSender + Send + Sync + 'static>(
sender: T,
config: RpcClientConfig,
) -> Self {
@@ -191,7 +191,7 @@ impl RpcClient {
/// let url = "http://localhost:8899".to_string();
/// let client = RpcClient::new(url);
/// ```
pub fn new(url: String) -> Self {
pub fn new<U: ToString>(url: U) -> Self {
Self::new_with_commitment(url, CommitmentConfig::default())
}
@@ -214,7 +214,7 @@ impl RpcClient {
/// let commitment_config = CommitmentConfig::processed();
/// let client = RpcClient::new_with_commitment(url, commitment_config);
/// ```
pub fn new_with_commitment(url: String, commitment_config: CommitmentConfig) -> Self {
pub fn new_with_commitment<U: ToString>(url: U, commitment_config: CommitmentConfig) -> Self {
Self::new_sender(
HttpSender::new(url),
RpcClientConfig::with_commitment(commitment_config),
@@ -240,7 +240,7 @@ impl RpcClient {
/// let timeout = Duration::from_secs(1);
/// let client = RpcClient::new_with_timeout(url, timeout);
/// ```
pub fn new_with_timeout(url: String, timeout: Duration) -> Self {
pub fn new_with_timeout<U: ToString>(url: U, timeout: Duration) -> Self {
Self::new_sender(
HttpSender::new_with_timeout(url, timeout),
RpcClientConfig::with_commitment(CommitmentConfig::default()),
@@ -269,8 +269,8 @@ impl RpcClient {
/// commitment_config,
/// );
/// ```
pub fn new_with_timeout_and_commitment(
url: String,
pub fn new_with_timeout_and_commitment<U: ToString>(
url: U,
timeout: Duration,
commitment_config: CommitmentConfig,
) -> Self {
@@ -312,8 +312,8 @@ impl RpcClient {
/// confirm_transaction_initial_timeout,
/// );
/// ```
pub fn new_with_timeouts_and_commitment(
url: String,
pub fn new_with_timeouts_and_commitment<U: ToString>(
url: U,
timeout: Duration,
commitment_config: CommitmentConfig,
confirm_transaction_initial_timeout: Duration,
@@ -347,7 +347,7 @@ impl RpcClient {
/// let url = "fails".to_string();
/// let successful_client = RpcClient::new_mock(url);
/// ```
pub fn new_mock(url: String) -> Self {
pub fn new_mock<U: ToString>(url: U) -> Self {
Self::new_sender(
MockSender::new(url),
RpcClientConfig::with_commitment(CommitmentConfig::default()),
@@ -381,7 +381,7 @@ impl RpcClient {
/// let url = "succeeds".to_string();
/// let client = RpcClient::new_mock_with_mocks(url, mocks);
/// ```
pub fn new_mock_with_mocks(url: String, mocks: Mocks) -> Self {
pub fn new_mock_with_mocks<U: ToString>(url: U, mocks: Mocks) -> Self {
Self::new_sender(
MockSender::new_with_mocks(url, mocks),
RpcClientConfig::with_commitment(CommitmentConfig::default()),

View File

@@ -1,7 +1,7 @@
[package]
name = "solana-core"
description = "Blockchain, Rebuilt for Scale"
version = "1.9.6"
version = "1.9.13"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-core"
readme = "../README.md"
@@ -34,32 +34,32 @@ rayon = "1.5.1"
retain_mut = "0.1.5"
serde = "1.0.130"
serde_derive = "1.0.103"
solana-accountsdb-plugin-manager = { path = "../accountsdb-plugin-manager", version = "=1.9.6" }
solana-bloom = { path = "../bloom", version = "=1.9.6" }
solana-client = { path = "../client", version = "=1.9.6" }
solana-entry = { path = "../entry", version = "=1.9.6" }
solana-gossip = { path = "../gossip", version = "=1.9.6" }
solana-ledger = { path = "../ledger", version = "=1.9.6" }
solana-logger = { path = "../logger", version = "=1.9.6" }
solana-measure = { path = "../measure", version = "=1.9.6" }
solana-metrics = { path = "../metrics", version = "=1.9.6" }
solana-net-utils = { path = "../net-utils", version = "=1.9.6" }
solana-perf = { path = "../perf", version = "=1.9.6" }
solana-poh = { path = "../poh", version = "=1.9.6" }
solana-program-runtime = { path = "../program-runtime", version = "=1.9.6" }
solana-rpc = { path = "../rpc", version = "=1.9.6" }
solana-replica-lib = { path = "../replica-lib", version = "=1.9.6" }
solana-runtime = { path = "../runtime", version = "=1.9.6" }
solana-sdk = { path = "../sdk", version = "=1.9.6" }
solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.6" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.6" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.6" }
solana-streamer = { path = "../streamer", version = "=1.9.6" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.6" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.6" }
solana-bloom = { path = "../bloom", version = "=1.9.13" }
solana-client = { path = "../client", version = "=1.9.13" }
solana-entry = { path = "../entry", version = "=1.9.13" }
solana-geyser-plugin-manager = { path = "../geyser-plugin-manager", version = "=1.9.13" }
solana-gossip = { path = "../gossip", version = "=1.9.13" }
solana-ledger = { path = "../ledger", version = "=1.9.13" }
solana-logger = { path = "../logger", version = "=1.9.13" }
solana-measure = { path = "../measure", version = "=1.9.13" }
solana-metrics = { path = "../metrics", version = "=1.9.13" }
solana-net-utils = { path = "../net-utils", version = "=1.9.13" }
solana-perf = { path = "../perf", version = "=1.9.13" }
solana-poh = { path = "../poh", version = "=1.9.13" }
solana-program-runtime = { path = "../program-runtime", version = "=1.9.13" }
solana-rpc = { path = "../rpc", version = "=1.9.13" }
solana-replica-lib = { path = "../replica-lib", version = "=1.9.13" }
solana-runtime = { path = "../runtime", version = "=1.9.13" }
solana-sdk = { path = "../sdk", version = "=1.9.13" }
solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.13" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.13" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.13" }
solana-streamer = { path = "../streamer", version = "=1.9.13" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.13" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.13" }
tempfile = "3.2.0"
thiserror = "1.0"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.6" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.13" }
sys-info = "0.9.1"
tokio = { version = "1", features = ["full"] }
trees = "0.4.2"
@@ -73,9 +73,9 @@ matches = "0.1.9"
reqwest = { version = "0.11.6", default-features = false, features = ["blocking", "rustls-tls", "json"] }
serde_json = "1.0.72"
serial_test = "0.5.1"
solana-program-runtime = { path = "../program-runtime", version = "=1.9.6" }
solana-stake-program = { path = "../programs/stake", version = "=1.9.6" }
solana-version = { path = "../version", version = "=1.9.6" }
solana-program-runtime = { path = "../program-runtime", version = "=1.9.13" }
solana-stake-program = { path = "../programs/stake", version = "=1.9.13" }
solana-version = { path = "../version", version = "=1.9.13" }
static_assertions = "1.1.0"
systemstat = "0.1.10"

View File

@@ -10,6 +10,7 @@ use {
rayon::prelude::*,
solana_core::{
banking_stage::{BankingStage, BankingStageStats},
leader_slot_banking_stage_metrics::LeaderSlotMetricsTracker,
qos_service::QosService,
},
solana_entry::entry::{next_hash, Entry},
@@ -98,6 +99,7 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
&BankingStageStats::default(),
&recorder,
&Arc::new(QosService::new(Arc::new(RwLock::new(CostModel::default())))),
&mut LeaderSlotMetricsTracker::new(0),
);
});

View File

@@ -9,7 +9,7 @@ use {
log::*,
rand::{thread_rng, Rng},
solana_core::{sigverify::TransactionSigVerifier, sigverify_stage::SigVerifyStage},
solana_perf::{packet::to_packet_batches, test_tx::test_tx},
solana_perf::{packet::to_packet_batches, packet::PacketBatch, test_tx::test_tx},
solana_sdk::{
hash::Hash,
signature::{Keypair, Signer},
@@ -112,19 +112,10 @@ fn bench_packet_discard_mixed_senders(bencher: &mut Bencher) {
});
}
#[bench]
fn bench_sigverify_stage(bencher: &mut Bencher) {
solana_logger::setup();
let (packet_s, packet_r) = channel();
let (verified_s, verified_r) = unbounded();
let verifier = TransactionSigVerifier::default();
let stage = SigVerifyStage::new(packet_r, verified_s, verifier);
let now = Instant::now();
fn gen_batches(use_same_tx: bool) -> Vec<PacketBatch> {
let len = 4096;
let use_same_tx = true;
let chunk_size = 1024;
let mut batches = if use_same_tx {
if use_same_tx {
let tx = test_tx();
to_packet_batches(&vec![tx; len], chunk_size)
} else {
@@ -142,14 +133,28 @@ fn bench_sigverify_stage(bencher: &mut Bencher) {
})
.collect();
to_packet_batches(&txs, chunk_size)
};
}
}
trace!(
"starting... generation took: {} ms batches: {}",
duration_as_ms(&now.elapsed()),
batches.len()
);
#[bench]
fn bench_sigverify_stage(bencher: &mut Bencher) {
solana_logger::setup();
trace!("start");
let (packet_s, packet_r) = channel();
let (verified_s, verified_r) = unbounded();
let verifier = TransactionSigVerifier::default();
let stage = SigVerifyStage::new(packet_r, verified_s, verifier);
let use_same_tx = true;
bencher.iter(move || {
let now = Instant::now();
let mut batches = gen_batches(use_same_tx);
trace!(
"starting... generation took: {} ms batches: {}",
duration_as_ms(&now.elapsed()),
batches.len()
);
let mut sent_len = 0;
for _ in 0..batches.len() {
if let Some(batch) = batches.pop() {
@@ -165,7 +170,7 @@ fn bench_sigverify_stage(bencher: &mut Bencher) {
received += v.packets.len();
batches.push(v);
}
if received >= sent_len {
if use_same_tx || received >= sent_len {
break;
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -10,44 +10,31 @@ use {
solana_runtime::{bank::Bank, cost_model::CostModel},
solana_sdk::timing::timestamp,
std::{
sync::atomic::{AtomicBool, Ordering},
sync::{mpsc::Receiver, Arc, RwLock},
thread::{self, Builder, JoinHandle},
time::Duration,
},
};
// Update blockstore persistence storage when accumulated cost_table updates count exceeds the threshold
const PERSIST_THRESHOLD: u64 = 1_000;
#[derive(Default)]
pub struct CostUpdateServiceTiming {
last_print: u64,
update_cost_model_count: u64,
update_cost_model_elapsed: u64,
persist_cost_table_elapsed: u64,
}
impl CostUpdateServiceTiming {
fn update(
&mut self,
update_cost_model_count: Option<u64>,
update_cost_model_elapsed: Option<u64>,
persist_cost_table_elapsed: Option<u64>,
) {
if let Some(update_cost_model_count) = update_cost_model_count {
self.update_cost_model_count += update_cost_model_count;
}
if let Some(update_cost_model_elapsed) = update_cost_model_elapsed {
self.update_cost_model_elapsed += update_cost_model_elapsed;
}
if let Some(persist_cost_table_elapsed) = persist_cost_table_elapsed {
self.persist_cost_table_elapsed += persist_cost_table_elapsed;
}
fn update(&mut self, update_cost_model_count: u64, update_cost_model_elapsed: u64) {
self.update_cost_model_count += update_cost_model_count;
self.update_cost_model_elapsed += update_cost_model_elapsed;
let now = timestamp();
let elapsed_ms = now - self.last_print;
if elapsed_ms > 1000 {
datapoint_info!(
"cost-update-service-stats",
("total_elapsed_us", elapsed_ms * 1000, i64),
(
"update_cost_model_count",
self.update_cost_model_count as i64,
@@ -58,11 +45,6 @@ impl CostUpdateServiceTiming {
self.update_cost_model_elapsed as i64,
i64
),
(
"persist_cost_table_elapsed",
self.persist_cost_table_elapsed as i64,
i64
),
);
*self = CostUpdateServiceTiming::default();
@@ -89,6 +71,7 @@ pub struct CostUpdateService {
impl CostUpdateService {
#[allow(clippy::new_ret_no_self)]
pub fn new(
exit: Arc<AtomicBool>,
blockstore: Arc<Blockstore>,
cost_model: Arc<RwLock<CostModel>>,
cost_update_receiver: CostUpdateReceiver,
@@ -96,7 +79,7 @@ impl CostUpdateService {
let thread_hdl = Builder::new()
.name("solana-cost-update-service".to_string())
.spawn(move || {
Self::service_loop(blockstore, cost_model, cost_update_receiver);
Self::service_loop(exit, blockstore, cost_model, cost_update_receiver);
})
.unwrap();
@@ -108,99 +91,85 @@ impl CostUpdateService {
}
fn service_loop(
blockstore: Arc<Blockstore>,
exit: Arc<AtomicBool>,
_blockstore: Arc<Blockstore>,
cost_model: Arc<RwLock<CostModel>>,
cost_update_receiver: CostUpdateReceiver,
) {
let mut cost_update_service_timing = CostUpdateServiceTiming::default();
let mut update_count = 0_u64;
let mut update_count: u64;
let wait_timer = Duration::from_millis(100);
for cost_update in cost_update_receiver.iter() {
match cost_update {
CostUpdate::FrozenBank { bank } => {
bank.read_cost_tracker().unwrap().report_stats(bank.slot());
loop {
if exit.load(Ordering::Relaxed) {
break;
}
update_count = 0_u64;
let mut update_cost_model_time = Measure::start("update_cost_model_time");
for cost_update in cost_update_receiver.try_iter() {
match cost_update {
CostUpdate::FrozenBank { bank } => {
bank.read_cost_tracker().unwrap().report_stats(bank.slot());
}
CostUpdate::ExecuteTiming {
mut execute_timings,
} => {
Self::update_cost_model(&cost_model, &mut execute_timings);
update_count += 1;
}
}
CostUpdate::ExecuteTiming {
mut execute_timings,
} => {
let mut update_cost_model_time = Measure::start("update_cost_model_time");
update_count += Self::update_cost_model(&cost_model, &mut execute_timings);
update_cost_model_time.stop();
cost_update_service_timing.update(
Some(update_count),
Some(update_cost_model_time.as_us()),
None,
);
}
update_cost_model_time.stop();
if update_count > PERSIST_THRESHOLD {
let mut persist_cost_table_time = Measure::start("persist_cost_table_time");
Self::persist_cost_table(&blockstore, &cost_model);
update_count = 0_u64;
persist_cost_table_time.stop();
cost_update_service_timing.update(
None,
None,
Some(persist_cost_table_time.as_us()),
cost_update_service_timing.update(update_count, update_cost_model_time.as_us());
thread::sleep(wait_timer);
}
}
fn update_cost_model(
cost_model: &RwLock<CostModel>,
execute_timings: &mut ExecuteTimings,
) -> bool {
let mut dirty = false;
{
for (program_id, program_timings) in &mut execute_timings.details.per_program_timings {
let current_estimated_program_cost =
cost_model.read().unwrap().find_instruction_cost(program_id);
program_timings.coalesce_error_timings(current_estimated_program_cost);
if program_timings.count < 1 {
continue;
}
let units = program_timings.accumulated_units / program_timings.count as u64;
match cost_model
.write()
.unwrap()
.upsert_instruction_cost(program_id, units)
{
Ok(c) => {
debug!(
"after replayed into bank, instruction {:?} has averaged cost {}",
program_id, c
);
dirty = true;
}
Err(err) => {
debug!(
"after replayed into bank, instruction {:?} failed to update cost, err: {}",
program_id, err
);
}
}
}
}
}
// Normalize `program_timings` with current estimated cost, update instruction_cost table
// Returns number of updates applied
fn update_cost_model(
cost_model: &RwLock<CostModel>,
execute_timings: &mut ExecuteTimings,
) -> u64 {
let mut update_count = 0_u64;
for (program_id, program_timings) in &mut execute_timings.details.per_program_timings {
let current_estimated_program_cost =
cost_model.read().unwrap().find_instruction_cost(program_id);
program_timings.coalesce_error_timings(current_estimated_program_cost);
if program_timings.count < 1 {
continue;
}
let units = program_timings.accumulated_units / program_timings.count as u64;
cost_model
.write()
.unwrap()
.upsert_instruction_cost(program_id, units);
update_count += 1;
debug!(
"After replayed into bank, updated cost for instruction {:?}, update_value {}, pre_aggregated_value {}",
program_id, units, current_estimated_program_cost
);
}
update_count
}
// 1. Remove obsolete program entries from persisted table to limit its size
// 2. Update persisted program cost. This involves EMA cost calculation at
// execute_cost_table.get_cost()
fn persist_cost_table(blockstore: &Blockstore, cost_model: &RwLock<CostModel>) {
let db_records = blockstore.read_program_costs().expect("read programs");
let cost_model = cost_model.read().unwrap();
let active_program_keys = cost_model.get_program_keys();
// delete records from blockstore if they are no longer in cost_table
db_records.iter().for_each(|(pubkey, _)| {
if !active_program_keys.contains(&pubkey) {
blockstore
.delete_program_cost(pubkey)
.expect("delete old program");
}
});
active_program_keys.iter().for_each(|program_id| {
let cost = cost_model.find_instruction_cost(program_id);
blockstore
.write_program_cost(program_id, &cost)
.expect("persist program costs to blockstore");
});
debug!(
"after replayed into bank, updated cost model instruction cost table, current values: {:?}",
cost_model.read().unwrap().get_instruction_cost_table()
);
dirty
}
}
@@ -212,9 +181,15 @@ mod tests {
fn test_update_cost_model_with_empty_execute_timings() {
let cost_model = Arc::new(RwLock::new(CostModel::default()));
let mut empty_execute_timings = ExecuteTimings::default();
CostUpdateService::update_cost_model(&cost_model, &mut empty_execute_timings);
assert_eq!(
CostUpdateService::update_cost_model(&cost_model, &mut empty_execute_timings),
0
0,
cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.len()
);
}
@@ -232,7 +207,7 @@ mod tests {
let accumulated_units: u64 = 100;
let total_errored_units = 0;
let count: u32 = 10;
expected_cost = accumulated_units / count as u64; // = 10
expected_cost = accumulated_units / count as u64;
execute_timings.details.per_program_timings.insert(
program_key_1,
@@ -244,15 +219,22 @@ mod tests {
total_errored_units,
},
);
let update_count =
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
assert_eq!(1, update_count);
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
assert_eq!(
expected_cost,
1,
cost_model
.read()
.unwrap()
.find_instruction_cost(&program_key_1)
.get_instruction_cost_table()
.len()
);
assert_eq!(
Some(&expected_cost),
cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.get(&program_key_1)
);
}
@@ -261,8 +243,8 @@ mod tests {
let accumulated_us: u64 = 2000;
let accumulated_units: u64 = 200;
let count: u32 = 10;
// to expect new cost = (mean + 2 * std) of [10, 20]
expected_cost = 13;
// to expect new cost is Average(new_value, existing_value)
expected_cost = ((accumulated_units / count as u64) + expected_cost) / 2;
execute_timings.details.per_program_timings.insert(
program_key_1,
@@ -274,15 +256,22 @@ mod tests {
total_errored_units: 0,
},
);
let update_count =
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
assert_eq!(1, update_count);
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
assert_eq!(
expected_cost,
1,
cost_model
.read()
.unwrap()
.find_instruction_cost(&program_key_1)
.get_instruction_cost_table()
.len()
);
assert_eq!(
Some(&expected_cost),
cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.get(&program_key_1)
);
}
}
@@ -306,49 +295,20 @@ mod tests {
total_errored_units: 0,
},
);
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
// If both the `errored_txs_compute_consumed` is empty and `count == 0`, then
// nothing should be inserted into the cost model
assert_eq!(
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings),
0
);
}
// set up current instruction cost to 100
let current_program_cost = 100;
{
execute_timings.details.per_program_timings.insert(
program_key_1,
ProgramTiming {
accumulated_us: 1000,
accumulated_units: current_program_cost,
count: 1,
errored_txs_compute_consumed: vec![],
total_errored_units: 0,
},
);
let update_count =
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
assert_eq!(1, update_count);
assert_eq!(
current_program_cost,
cost_model
.read()
.unwrap()
.find_instruction_cost(&program_key_1)
);
assert!(cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.is_empty());
}
// Test updating cost model with only erroring compute costs where the `cost_per_error` is
// greater than the current instruction cost for the program. Should update with the
// new erroring compute costs
let cost_per_error = 1000;
// expected_cost = (mean + 2*std) of data points:
// [
// 100, // original program_cost
// 1000, // cost_per_error
// ]
let expected_cost = 289u64;
{
let errored_txs_compute_consumed = vec![cost_per_error; 3];
let total_errored_units = errored_txs_compute_consumed.iter().sum();
@@ -362,23 +322,29 @@ mod tests {
total_errored_units,
},
);
let update_count =
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
assert_eq!(1, update_count);
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
assert_eq!(
expected_cost,
1,
cost_model
.read()
.unwrap()
.find_instruction_cost(&program_key_1)
.get_instruction_cost_table()
.len()
);
assert_eq!(
Some(&cost_per_error),
cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.get(&program_key_1)
);
}
// Test updating cost model with only erroring compute costs where the error cost is
// `smaller_cost_per_error`, less than the current instruction cost for the program.
// The cost should not decrease for these new lesser errors
let smaller_cost_per_error = expected_cost - 10;
let smaller_cost_per_error = cost_per_error - 10;
{
let errored_txs_compute_consumed = vec![smaller_cost_per_error; 3];
let total_errored_units = errored_txs_compute_consumed.iter().sum();
@@ -392,23 +358,22 @@ mod tests {
total_errored_units,
},
);
let update_count =
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
// expected_cost = (mean = 2*std) of data points:
// [
// 100, // original program cost,
// 1000, // cost_per_error from above test
// 289, // the smaller_cost_per_error will be coalesced to prev cost
// ]
let expected_cost = 293u64;
assert_eq!(1, update_count);
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
assert_eq!(
expected_cost,
1,
cost_model
.read()
.unwrap()
.find_instruction_cost(&program_key_1)
.get_instruction_cost_table()
.len()
);
assert_eq!(
Some(&cost_per_error),
cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.get(&program_key_1)
);
}
}

View File

@@ -0,0 +1,871 @@
use {
crate::leader_slot_banking_stage_timing_metrics::*,
solana_poh::poh_recorder::BankStart,
solana_sdk::{clock::Slot, saturating_add_assign},
std::time::Instant,
};
/// A summary of what happened to transactions passed to the execution pipeline.
/// Transactions can
/// 1) Did not even make it to execution due to being filtered out by things like AccountInUse
/// lock conflicts or CostModel compute limits. These types of errors are retryable and
/// counted in `Self::retryable_transaction_indexes`.
/// 2) Did not execute due to some fatal error like too old, or duplicate signature. These
/// will be dropped from the transactions queue and not counted in `Self::retryable_transaction_indexes`
/// 3) Were executed and committed, captured by `committed_transactions_count` below.
/// 4) Were executed and failed commit, captured by `failed_commit_count` below.
pub(crate) struct ProcessTransactionsSummary {
// Returns true if we hit the end of the block/max PoH height for the block before
// processing all the transactions in the batch.
pub reached_max_poh_height: bool,
// Total number of transactions that were passed as candidates for execution. See description
// of struct above for possible outcomes for these transactions
pub transactions_attempted_execution_count: usize,
// Total number of transactions that made it into the block
pub committed_transactions_count: usize,
// Total number of transactions that made it into the block where the transactions
// output from execution was success/no error.
pub committed_transactions_with_successful_result_count: usize,
// All transactions that were executed but then failed record because the
// slot ended
pub failed_commit_count: usize,
// Indexes of transactions in the transactions slice that were not committed but are retryable
pub retryable_transaction_indexes: Vec<usize>,
// The number of transactions filtered out by the cost model
pub cost_model_throttled_transactions_count: usize,
// Total amount of time spent running the cost model
pub cost_model_us: u64,
// Breakdown of time spent executing and comitting transactions
pub execute_and_commit_timings: LeaderExecuteAndCommitTimings,
}
// Metrics describing packets ingested/processed in various parts of BankingStage during this
// validator's leader slot
#[derive(Debug, Default)]
struct LeaderSlotPacketCountMetrics {
// total number of live packets TPU received from verified receiver for processing.
total_new_valid_packets: u64,
// total number of packets TPU received from sigverify that failed signature verification.
newly_failed_sigverify_count: u64,
// total number of dropped packet due to the thread's buffered packets capacity being reached.
exceeded_buffer_limit_dropped_packets_count: u64,
// total number of packets that got added to the pending buffer after arriving to BankingStage
newly_buffered_packets_count: u64,
// total number of transactions in the buffer that were filtered out due to things like age and
// duplicate signature checks
retryable_packets_filtered_count: u64,
// total number of transactions that attempted execution in this slot. Should equal the sum
// of `committed_transactions_count`, `retryable_errored_transaction_count`, and
// `nonretryable_errored_transactions_count`.
transactions_attempted_execution_count: u64,
// total number of transactions that were executed and committed into the block
// on this thread
committed_transactions_count: u64,
// total number of transactions that were executed, got a successful execution output/no error,
// and were then committed into the block
committed_transactions_with_successful_result_count: u64,
// total number of transactions that were not executed or failed commit, BUT were added back to the buffered
// queue becaus they were retryable errors
retryable_errored_transaction_count: u64,
// total number of transactions that attempted execution due to some fatal error (too old, duplicate signature, etc.)
// AND were dropped from the buffered queue
nonretryable_errored_transactions_count: u64,
// total number of transactions that were executed, but failed to be committed into the Poh stream because
// the block ended. Some of these may be already counted in `nonretryable_errored_transactions_count` if they
// then hit the age limit after failing to be comitted.
executed_transactions_failed_commit_count: u64,
// total number of transactions that were excluded from the block because they were too expensive
// according to the cost model. These transactions are added back to the buffered queue and are
// already counted in `self.retrayble_errored_transaction_count`.
cost_model_throttled_transactions_count: u64,
// total number of forwardsable packets that failed forwarding
failed_forwarded_packets_count: u64,
// total number of forwardsable packets that were successfully forwarded
successful_forwarded_packets_count: u64,
// total number of attempted forwards that failed. Note this is not a count of the number of packets
// that failed, just the total number of batches of packets that failed forwarding
packet_batch_forward_failure_count: u64,
// total number of valid unprocessed packets in the buffer that were removed after being forwarded
cleared_from_buffer_after_forward_count: u64,
// total number of packets removed at the end of the slot due to being too old, duplicate, etc.
end_of_slot_filtered_invalid_count: u64,
}
impl LeaderSlotPacketCountMetrics {
fn new() -> Self {
Self { ..Self::default() }
}
fn report(&self, id: u32, slot: Slot) {
datapoint_info!(
"banking_stage-leader_slot_packet_counts",
("id", id as i64, i64),
("slot", slot as i64, i64),
(
"total_new_valid_packets",
self.total_new_valid_packets as i64,
i64
),
(
"newly_failed_sigverify_count",
self.newly_failed_sigverify_count as i64,
i64
),
(
"exceeded_buffer_limit_dropped_packets_count",
self.exceeded_buffer_limit_dropped_packets_count as i64,
i64
),
(
"newly_buffered_packets_count",
self.newly_buffered_packets_count as i64,
i64
),
(
"retryable_packets_filtered_count",
self.retryable_packets_filtered_count as i64,
i64
),
(
"transactions_attempted_execution_count",
self.transactions_attempted_execution_count as i64,
i64
),
(
"committed_transactions_count",
self.committed_transactions_count as i64,
i64
),
(
"committed_transactions_with_successful_result_count",
self.committed_transactions_with_successful_result_count as i64,
i64
),
(
"retryable_errored_transaction_count",
self.retryable_errored_transaction_count as i64,
i64
),
(
"nonretryable_errored_transactions_count",
self.nonretryable_errored_transactions_count as i64,
i64
),
(
"executed_transactions_failed_commit_count",
self.executed_transactions_failed_commit_count as i64,
i64
),
(
"cost_model_throttled_transactions_count",
self.cost_model_throttled_transactions_count as i64,
i64
),
(
"failed_forwarded_packets_count",
self.failed_forwarded_packets_count as i64,
i64
),
(
"successful_forwarded_packets_count",
self.successful_forwarded_packets_count as i64,
i64
),
(
"packet_batch_forward_failure_count",
self.packet_batch_forward_failure_count as i64,
i64
),
(
"cleared_from_buffer_after_forward_count",
self.cleared_from_buffer_after_forward_count as i64,
i64
),
(
"end_of_slot_filtered_invalid_count",
self.end_of_slot_filtered_invalid_count as i64,
i64
),
);
}
}
#[derive(Debug)]
pub(crate) struct LeaderSlotMetrics {
// banking_stage creates one QosService instance per working threads, that is uniquely
// identified by id. This field allows to categorize metrics for gossip votes, TPU votes
// and other transactions.
id: u32,
// aggregate metrics per slot
slot: Slot,
packet_count_metrics: LeaderSlotPacketCountMetrics,
timing_metrics: LeaderSlotTimingMetrics,
// Used by tests to check if the `self.report()` method was called
is_reported: bool,
}
impl LeaderSlotMetrics {
pub(crate) fn new(id: u32, slot: Slot, bank_creation_time: &Instant) -> Self {
Self {
id,
slot,
packet_count_metrics: LeaderSlotPacketCountMetrics::new(),
timing_metrics: LeaderSlotTimingMetrics::new(bank_creation_time),
is_reported: false,
}
}
pub(crate) fn report(&mut self) {
self.is_reported = true;
self.timing_metrics.report(self.id, self.slot);
self.packet_count_metrics.report(self.id, self.slot);
}
/// Returns `Some(self.slot)` if the metrics have been reported, otherwise returns None
fn reported_slot(&self) -> Option<Slot> {
if self.is_reported {
Some(self.slot)
} else {
None
}
}
}
#[derive(Debug)]
pub struct LeaderSlotMetricsTracker {
// Only `Some` if BankingStage detects it's time to construct our leader slot,
// otherwise `None`
leader_slot_metrics: Option<LeaderSlotMetrics>,
id: u32,
}
impl LeaderSlotMetricsTracker {
pub fn new(id: u32) -> Self {
Self {
leader_slot_metrics: None,
id,
}
}
// Returns reported slot if metrics were reported
pub(crate) fn update_on_leader_slot_boundary(
&mut self,
bank_start: &Option<BankStart>,
) -> Option<Slot> {
match (self.leader_slot_metrics.as_mut(), bank_start) {
(None, None) => None,
(Some(leader_slot_metrics), None) => {
leader_slot_metrics.report();
// Ensure tests catch that `report()` method was called
let reported_slot = leader_slot_metrics.reported_slot();
// Slot has ended, time to report metrics
self.leader_slot_metrics = None;
reported_slot
}
(None, Some(bank_start)) => {
// Our leader slot has begain, time to create a new slot tracker
self.leader_slot_metrics = Some(LeaderSlotMetrics::new(
self.id,
bank_start.working_bank.slot(),
&bank_start.bank_creation_time,
));
self.leader_slot_metrics.as_ref().unwrap().reported_slot()
}
(Some(leader_slot_metrics), Some(bank_start)) => {
if leader_slot_metrics.slot != bank_start.working_bank.slot() {
// Last slot has ended, new slot has began
leader_slot_metrics.report();
// Ensure tests catch that `report()` method was called
let reported_slot = leader_slot_metrics.reported_slot();
self.leader_slot_metrics = Some(LeaderSlotMetrics::new(
self.id,
bank_start.working_bank.slot(),
&bank_start.bank_creation_time,
));
reported_slot
} else {
leader_slot_metrics.reported_slot()
}
}
}
}
pub(crate) fn accumulate_process_transactions_summary(
&mut self,
process_transactions_summary: &ProcessTransactionsSummary,
) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
let ProcessTransactionsSummary {
transactions_attempted_execution_count,
committed_transactions_count,
committed_transactions_with_successful_result_count,
failed_commit_count,
ref retryable_transaction_indexes,
cost_model_throttled_transactions_count,
cost_model_us,
ref execute_and_commit_timings,
..
} = process_transactions_summary;
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.transactions_attempted_execution_count,
*transactions_attempted_execution_count as u64
);
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.committed_transactions_count,
*committed_transactions_count as u64
);
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.committed_transactions_with_successful_result_count,
*committed_transactions_with_successful_result_count as u64
);
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.executed_transactions_failed_commit_count,
*failed_commit_count as u64
);
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.retryable_errored_transaction_count,
retryable_transaction_indexes.len() as u64
);
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.nonretryable_errored_transactions_count,
transactions_attempted_execution_count
.saturating_sub(*committed_transactions_count)
.saturating_sub(retryable_transaction_indexes.len()) as u64
);
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.cost_model_throttled_transactions_count,
*cost_model_throttled_transactions_count as u64
);
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.process_packets_timings
.cost_model_us,
*cost_model_us as u64
);
leader_slot_metrics
.timing_metrics
.execute_and_commit_timings
.accumulate(execute_and_commit_timings);
}
}
// Packet inflow/outflow/processing metrics
pub(crate) fn increment_total_new_valid_packets(&mut self, count: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.total_new_valid_packets,
count
);
}
}
pub(crate) fn increment_newly_failed_sigverify_count(&mut self, count: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.newly_failed_sigverify_count,
count
);
}
}
pub(crate) fn increment_exceeded_buffer_limit_dropped_packets_count(&mut self, count: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.exceeded_buffer_limit_dropped_packets_count,
count
);
}
}
pub(crate) fn increment_newly_buffered_packets_count(&mut self, count: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.newly_buffered_packets_count,
count
);
}
}
pub(crate) fn increment_retryable_packets_filtered_count(&mut self, count: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.retryable_packets_filtered_count,
count
);
}
}
pub(crate) fn increment_failed_forwarded_packets_count(&mut self, count: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.failed_forwarded_packets_count,
count
);
}
}
pub(crate) fn increment_successful_forwarded_packets_count(&mut self, count: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.successful_forwarded_packets_count,
count
);
}
}
pub(crate) fn increment_packet_batch_forward_failure_count(&mut self, count: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.packet_batch_forward_failure_count,
count
);
}
}
pub(crate) fn increment_cleared_from_buffer_after_forward_count(&mut self, count: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.cleared_from_buffer_after_forward_count,
count
);
}
}
pub(crate) fn increment_end_of_slot_filtered_invalid_count(&mut self, count: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.packet_count_metrics
.end_of_slot_filtered_invalid_count,
count
);
}
}
// Outermost banking thread's loop timing metrics
pub(crate) fn increment_process_buffered_packets_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.outer_loop_timings
.process_buffered_packets_us,
us
);
}
}
pub(crate) fn increment_slot_metrics_check_slot_boundary_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.outer_loop_timings
.slot_metrics_check_slot_boundary_us,
us
);
}
}
pub(crate) fn increment_receive_and_buffer_packets_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.outer_loop_timings
.receive_and_buffer_packets_us,
us
);
}
}
// Processing buffer timing metrics
pub(crate) fn increment_make_decision_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.process_buffered_packets_timings
.make_decision_us,
us
);
}
}
pub(crate) fn increment_consume_buffered_packets_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.process_buffered_packets_timings
.consume_buffered_packets_us,
us
);
}
}
pub(crate) fn increment_forward_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.process_buffered_packets_timings
.forward_us,
us
);
}
}
pub(crate) fn increment_forward_and_hold_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.process_buffered_packets_timings
.forward_and_hold_us,
us
);
}
}
// Consuming buffered packets timing metrics
pub(crate) fn increment_end_of_slot_filtering_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.consume_buffered_packets_timings
.end_of_slot_filtering_us,
us
);
}
}
pub(crate) fn increment_consume_buffered_packets_poh_recorder_lock_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.consume_buffered_packets_timings
.poh_recorder_lock_us,
us
);
}
}
pub(crate) fn increment_process_packets_transactions_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.consume_buffered_packets_timings
.process_packets_transactions_us,
us
);
}
}
// Processing packets timing metrics
pub(crate) fn increment_transactions_from_packets_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.process_packets_timings
.transactions_from_packets_us,
us
);
}
}
pub(crate) fn increment_process_transactions_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.process_packets_timings
.process_transactions_us,
us
);
}
}
pub(crate) fn increment_filter_retryable_packets_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.process_packets_timings
.filter_retryable_packets_us,
us
);
}
}
}
#[cfg(test)]
mod tests {
use {
super::*,
solana_runtime::{bank::Bank, genesis_utils::create_genesis_config},
solana_sdk::pubkey::Pubkey,
std::sync::Arc,
};
struct TestSlotBoundaryComponents {
first_bank: Arc<Bank>,
first_poh_recorder_bank: BankStart,
next_bank: Arc<Bank>,
next_poh_recorder_bank: BankStart,
leader_slot_metrics_tracker: LeaderSlotMetricsTracker,
}
fn setup_test_slot_boundary_banks() -> TestSlotBoundaryComponents {
let genesis = create_genesis_config(10);
let first_bank = Arc::new(Bank::new_for_tests(&genesis.genesis_config));
let first_poh_recorder_bank = BankStart {
working_bank: first_bank.clone(),
bank_creation_time: Arc::new(Instant::now()),
};
// Create a child descended from the first bank
let next_bank = Arc::new(Bank::new_from_parent(
&first_bank,
&Pubkey::new_unique(),
first_bank.slot() + 1,
));
let next_poh_recorder_bank = BankStart {
working_bank: next_bank.clone(),
bank_creation_time: Arc::new(Instant::now()),
};
let banking_stage_thread_id = 0;
let leader_slot_metrics_tracker = LeaderSlotMetricsTracker::new(banking_stage_thread_id);
TestSlotBoundaryComponents {
first_bank,
first_poh_recorder_bank,
next_bank,
next_poh_recorder_bank,
leader_slot_metrics_tracker,
}
}
#[test]
pub fn test_update_on_leader_slot_boundary_not_leader_to_not_leader() {
let TestSlotBoundaryComponents {
mut leader_slot_metrics_tracker,
..
} = setup_test_slot_boundary_banks();
// Test that with no bank being tracked, and no new bank being tracked, nothing is reported
assert!(leader_slot_metrics_tracker
.update_on_leader_slot_boundary(&None)
.is_none());
assert!(leader_slot_metrics_tracker.leader_slot_metrics.is_none());
}
#[test]
pub fn test_update_on_leader_slot_boundary_not_leader_to_leader() {
let TestSlotBoundaryComponents {
first_poh_recorder_bank,
mut leader_slot_metrics_tracker,
..
} = setup_test_slot_boundary_banks();
// Test case where the thread has not detected a leader bank, and now sees a leader bank.
// Metrics should not be reported because leader slot has not ended
assert!(leader_slot_metrics_tracker.leader_slot_metrics.is_none());
assert!(leader_slot_metrics_tracker
.update_on_leader_slot_boundary(&Some(first_poh_recorder_bank))
.is_none());
assert!(leader_slot_metrics_tracker.leader_slot_metrics.is_some());
}
#[test]
pub fn test_update_on_leader_slot_boundary_leader_to_not_leader() {
let TestSlotBoundaryComponents {
first_bank,
first_poh_recorder_bank,
mut leader_slot_metrics_tracker,
..
} = setup_test_slot_boundary_banks();
// Test case where the thread has a leader bank, and now detects there's no more leader bank,
// implying the slot has ended. Metrics should be reported for `first_bank.slot()`,
// because that leader slot has just ended.
assert!(leader_slot_metrics_tracker
.update_on_leader_slot_boundary(&Some(first_poh_recorder_bank))
.is_none());
assert_eq!(
leader_slot_metrics_tracker
.update_on_leader_slot_boundary(&None)
.unwrap(),
first_bank.slot()
);
assert!(leader_slot_metrics_tracker.leader_slot_metrics.is_none());
assert!(leader_slot_metrics_tracker
.update_on_leader_slot_boundary(&None)
.is_none());
}
#[test]
pub fn test_update_on_leader_slot_boundary_leader_to_leader_same_slot() {
let TestSlotBoundaryComponents {
first_bank,
first_poh_recorder_bank,
mut leader_slot_metrics_tracker,
..
} = setup_test_slot_boundary_banks();
// Test case where the thread has a leader bank, and now detects the same leader bank,
// implying the slot is still running. Metrics should not be reported
assert!(leader_slot_metrics_tracker
.update_on_leader_slot_boundary(&Some(first_poh_recorder_bank.clone()))
.is_none());
assert!(leader_slot_metrics_tracker
.update_on_leader_slot_boundary(&Some(first_poh_recorder_bank))
.is_none());
assert_eq!(
leader_slot_metrics_tracker
.update_on_leader_slot_boundary(&None)
.unwrap(),
first_bank.slot()
);
assert!(leader_slot_metrics_tracker.leader_slot_metrics.is_none());
}
#[test]
pub fn test_update_on_leader_slot_boundary_leader_to_leader_bigger_slot() {
let TestSlotBoundaryComponents {
first_bank,
first_poh_recorder_bank,
next_bank,
next_poh_recorder_bank,
mut leader_slot_metrics_tracker,
} = setup_test_slot_boundary_banks();
// Test case where the thread has a leader bank, and now detects there's a new leader bank
// for a bigger slot, implying the slot has ended. Metrics should be reported for the
// smaller slot
assert!(leader_slot_metrics_tracker
.update_on_leader_slot_boundary(&Some(first_poh_recorder_bank))
.is_none());
assert_eq!(
leader_slot_metrics_tracker
.update_on_leader_slot_boundary(&Some(next_poh_recorder_bank))
.unwrap(),
first_bank.slot()
);
assert_eq!(
leader_slot_metrics_tracker
.update_on_leader_slot_boundary(&None)
.unwrap(),
next_bank.slot()
);
assert!(leader_slot_metrics_tracker.leader_slot_metrics.is_none());
}
#[test]
pub fn test_update_on_leader_slot_boundary_leader_to_leader_smaller_slot() {
let TestSlotBoundaryComponents {
first_bank,
first_poh_recorder_bank,
next_bank,
next_poh_recorder_bank,
mut leader_slot_metrics_tracker,
} = setup_test_slot_boundary_banks();
// Test case where the thread has a leader bank, and now detects there's a new leader bank
// for a samller slot, implying the slot has ended. Metrics should be reported for the
// bigger slot
assert!(leader_slot_metrics_tracker
.update_on_leader_slot_boundary(&Some(next_poh_recorder_bank))
.is_none());
assert_eq!(
leader_slot_metrics_tracker
.update_on_leader_slot_boundary(&Some(first_poh_recorder_bank))
.unwrap(),
next_bank.slot()
);
assert_eq!(
leader_slot_metrics_tracker
.update_on_leader_slot_boundary(&None)
.unwrap(),
first_bank.slot()
);
assert!(leader_slot_metrics_tracker.leader_slot_metrics.is_none());
}
}

View File

@@ -0,0 +1,286 @@
use {
solana_program_runtime::timings::ExecuteTimings,
solana_sdk::{clock::Slot, saturating_add_assign},
std::time::Instant,
};
#[derive(Default, Debug)]
pub struct LeaderExecuteAndCommitTimings {
pub collect_balances_us: u64,
pub load_execute_us: u64,
pub freeze_lock_us: u64,
pub record_us: u64,
pub commit_us: u64,
pub find_and_send_votes_us: u64,
pub record_transactions_timings: RecordTransactionsTimings,
pub execute_timings: ExecuteTimings,
}
impl LeaderExecuteAndCommitTimings {
pub fn accumulate(&mut self, other: &LeaderExecuteAndCommitTimings) {
saturating_add_assign!(self.collect_balances_us, other.collect_balances_us);
saturating_add_assign!(self.load_execute_us, other.load_execute_us);
saturating_add_assign!(self.freeze_lock_us, other.freeze_lock_us);
saturating_add_assign!(self.record_us, other.record_us);
saturating_add_assign!(self.commit_us, other.commit_us);
saturating_add_assign!(self.find_and_send_votes_us, other.find_and_send_votes_us);
saturating_add_assign!(self.commit_us, other.commit_us);
self.record_transactions_timings
.accumulate(&other.record_transactions_timings);
self.execute_timings.accumulate(&other.execute_timings);
}
pub fn report(&self, id: u32, slot: Slot) {
datapoint_info!(
"banking_stage-leader_slot_execute_and_commit_timings",
("id", id as i64, i64),
("slot", slot as i64, i64),
("collect_balances_us", self.collect_balances_us as i64, i64),
("load_execute_us", self.load_execute_us as i64, i64),
("freeze_lock_us", self.freeze_lock_us as i64, i64),
("record_us", self.record_us as i64, i64),
("commit_us", self.commit_us as i64, i64),
(
"find_and_send_votes_us",
self.find_and_send_votes_us as i64,
i64
),
);
datapoint_info!(
"banking_stage-leader_slot_record_timings",
("id", id as i64, i64),
("slot", slot as i64, i64),
(
"execution_results_to_transactions_us",
self.record_transactions_timings
.execution_results_to_transactions_us as i64,
i64
),
(
"hash_us",
self.record_transactions_timings.hash_us as i64,
i64
),
(
"poh_record_us",
self.record_transactions_timings.poh_record_us as i64,
i64
),
);
}
}
#[derive(Default, Debug)]
pub struct RecordTransactionsTimings {
pub execution_results_to_transactions_us: u64,
pub hash_us: u64,
pub poh_record_us: u64,
}
impl RecordTransactionsTimings {
pub fn accumulate(&mut self, other: &RecordTransactionsTimings) {
saturating_add_assign!(
self.execution_results_to_transactions_us,
other.execution_results_to_transactions_us
);
saturating_add_assign!(self.hash_us, other.hash_us);
saturating_add_assign!(self.poh_record_us, other.poh_record_us);
}
}
// Metrics capturing wallclock time spent in various parts of BankingStage during this
// validator's leader slot
#[derive(Debug)]
pub(crate) struct LeaderSlotTimingMetrics {
pub outer_loop_timings: OuterLoopTimings,
pub process_buffered_packets_timings: ProcessBufferedPacketsTimings,
pub consume_buffered_packets_timings: ConsumeBufferedPacketsTimings,
pub process_packets_timings: ProcessPacketsTimings,
pub execute_and_commit_timings: LeaderExecuteAndCommitTimings,
}
impl LeaderSlotTimingMetrics {
pub(crate) fn new(bank_creation_time: &Instant) -> Self {
Self {
outer_loop_timings: OuterLoopTimings::new(bank_creation_time),
process_buffered_packets_timings: ProcessBufferedPacketsTimings::default(),
consume_buffered_packets_timings: ConsumeBufferedPacketsTimings::default(),
process_packets_timings: ProcessPacketsTimings::default(),
execute_and_commit_timings: LeaderExecuteAndCommitTimings::default(),
}
}
pub(crate) fn report(&self, id: u32, slot: Slot) {
self.outer_loop_timings.report(id, slot);
self.process_buffered_packets_timings.report(id, slot);
self.consume_buffered_packets_timings.report(id, slot);
self.process_packets_timings.report(id, slot);
self.execute_and_commit_timings.report(id, slot);
}
}
#[derive(Debug)]
pub(crate) struct OuterLoopTimings {
pub bank_detected_time: Instant,
// Delay from when the bank was created to when this thread detected it
pub bank_detected_delay_us: u64,
// Time spent processing buffered packets
pub process_buffered_packets_us: u64,
// Time spent checking for slot boundary and reporting leader slot metrics
pub slot_metrics_check_slot_boundary_us: u64,
// Time spent processing new incoming packets to the banking thread
pub receive_and_buffer_packets_us: u64,
}
impl OuterLoopTimings {
fn new(bank_creation_time: &Instant) -> Self {
Self {
bank_detected_time: Instant::now(),
bank_detected_delay_us: bank_creation_time.elapsed().as_micros() as u64,
process_buffered_packets_us: 0,
slot_metrics_check_slot_boundary_us: 0,
receive_and_buffer_packets_us: 0,
}
}
fn report(&self, id: u32, slot: Slot) {
let bank_detected_to_now_us = self.bank_detected_time.elapsed().as_micros() as u64;
datapoint_info!(
"banking_stage-leader_slot_loop_timings",
("id", id as i64, i64),
("slot", slot as i64, i64),
(
"bank_detected_to_slot_end_detected_us",
bank_detected_to_now_us,
i64
),
(
"bank_creation_to_slot_end_detected_us",
bank_detected_to_now_us + self.bank_detected_delay_us,
i64
),
("bank_detected_delay_us", self.bank_detected_delay_us, i64),
(
"process_buffered_packets_us",
self.process_buffered_packets_us,
i64
),
(
"slot_metrics_check_slot_boundary_us",
self.slot_metrics_check_slot_boundary_us,
i64
),
(
"receive_and_buffer_packets_us",
self.receive_and_buffer_packets_us,
i64
),
);
}
}
#[derive(Debug, Default)]
pub(crate) struct ProcessBufferedPacketsTimings {
pub make_decision_us: u64,
pub consume_buffered_packets_us: u64,
pub forward_us: u64,
pub forward_and_hold_us: u64,
}
impl ProcessBufferedPacketsTimings {
fn report(&self, id: u32, slot: Slot) {
datapoint_info!(
"banking_stage-leader_slot_process_buffered_packets_timings",
("id", id as i64, i64),
("slot", slot as i64, i64),
("make_decision_us", self.make_decision_us as i64, i64),
(
"consume_buffered_packets_us",
self.consume_buffered_packets_us as i64,
i64
),
("forward_us", self.forward_us as i64, i64),
("forward_and_hold_us", self.forward_and_hold_us as i64, i64),
);
}
}
#[derive(Debug, Default)]
pub(crate) struct ConsumeBufferedPacketsTimings {
// Time spent grabbing poh recorder lock
pub poh_recorder_lock_us: u64,
// Time spent filtering invalid packets after leader slot has ended
pub end_of_slot_filtering_us: u64,
// Time spent processing transactions
pub process_packets_transactions_us: u64,
}
impl ConsumeBufferedPacketsTimings {
fn report(&self, id: u32, slot: Slot) {
datapoint_info!(
"banking_stage-leader_slot_consume_buffered_packets_timings",
("id", id as i64, i64),
("slot", slot as i64, i64),
(
"poh_recorder_lock_us",
self.poh_recorder_lock_us as i64,
i64
),
(
"end_of_slot_filtering_us",
self.end_of_slot_filtering_us as i64,
i64
),
(
"process_packets_transactions_us",
self.process_packets_transactions_us as i64,
i64
),
);
}
}
#[derive(Debug, Default)]
pub(crate) struct ProcessPacketsTimings {
// Time spent converting packets to transactions
pub transactions_from_packets_us: u64,
// Time spent processing transactions
pub process_transactions_us: u64,
// Time spent filtering retryable packets that were returned after transaction
// processing
pub filter_retryable_packets_us: u64,
// Time spent running the cost model in processing transactions before executing
// transactions
pub cost_model_us: u64,
}
impl ProcessPacketsTimings {
fn report(&self, id: u32, slot: Slot) {
datapoint_info!(
"banking_stage-leader_slot_process_packets_timings",
("id", id as i64, i64),
("slot", slot as i64, i64),
(
"transactions_from_packets_us",
self.transactions_from_packets_us,
i64
),
("process_transactions_us", self.process_transactions_us, i64),
(
"filter_retryable_packets_us",
self.filter_retryable_packets_us,
i64
),
("cost_model_us", self.cost_model_us, i64),
);
}
}

View File

@@ -28,6 +28,8 @@ pub mod fork_choice;
pub mod gen_keys;
pub mod heaviest_subtree_fork_choice;
pub mod latest_validator_votes_for_frozen_banks;
pub mod leader_slot_banking_stage_metrics;
pub mod leader_slot_banking_stage_timing_metrics;
pub mod ledger_cleanup_service;
pub mod optimistic_confirmation_verifier;
pub mod outstanding_requests;

View File

@@ -104,8 +104,9 @@ impl QosService {
txs_costs
}
// Given a list of transactions and their costs, this function returns a corresponding
// list of Results that indicate if a transaction is selected to be included in the current block,
/// Given a list of transactions and their costs, this function returns a corresponding
/// list of Results that indicate if a transaction is selected to be included in the current block,
/// and a count of the number of transactions that would fit in the block
pub fn select_transactions_per_cost<'a>(
&self,
transactions: impl Iterator<Item = &'a SanitizedTransaction>,

View File

@@ -26,9 +26,9 @@ use {
voting_service::VoteOp,
window_service::DuplicateSlotReceiver,
},
solana_accountsdb_plugin_manager::block_metadata_notifier_interface::BlockMetadataNotifierLock,
solana_client::rpc_response::SlotUpdate,
solana_entry::entry::VerifyRecyclers,
solana_geyser_plugin_manager::block_metadata_notifier_interface::BlockMetadataNotifierLock,
solana_gossip::cluster_info::ClusterInfo,
solana_ledger::{
block_error::BlockError,
@@ -138,6 +138,9 @@ pub struct ReplayStageConfig {
pub wait_for_vote_to_start_leader: bool,
pub ancestor_hashes_replay_update_sender: AncestorHashesReplayUpdateSender,
pub tower_storage: Arc<dyn TowerStorage>,
// Stops voting until this slot has been reached. Should be used to avoid
// duplicate voting which can lead to slashing.
pub wait_to_vote_slot: Option<Slot>,
}
#[derive(Default)]
@@ -374,6 +377,7 @@ impl ReplayStage {
wait_for_vote_to_start_leader,
ancestor_hashes_replay_update_sender,
tower_storage,
wait_to_vote_slot,
} = config;
trace!("replay stage");
@@ -595,6 +599,7 @@ impl ReplayStage {
has_new_vote_been_rooted, &mut
last_vote_refresh_time,
&voting_sender,
wait_to_vote_slot,
);
}
}
@@ -678,6 +683,7 @@ impl ReplayStage {
&voting_sender,
&mut epoch_slots_frozen_slots,
&drop_bank_sender,
wait_to_vote_slot,
);
};
voting_time.stop();
@@ -720,11 +726,16 @@ impl ReplayStage {
restored_tower.adjust_lockouts_after_replay(root_bank.slot(), &slot_history)
}).
unwrap_or_else(|err| {
// It's a fatal error if the tower is not present. This is
// necessary to prevent the validator from violating
// lockouts for its new identity
error!("Failed to load tower for {}: {}", my_pubkey, err);
std::process::exit(1);
if err.is_file_missing() {
Tower::new_from_bankforks(
&bank_forks.read().unwrap(),
&my_pubkey,
&vote_account,
)
} else {
error!("Failed to load tower for {}: {}", my_pubkey, err);
std::process::exit(1);
}
});
// Ensure the validator can land votes with the new identity before
@@ -1651,6 +1662,7 @@ impl ReplayStage {
voting_sender: &Sender<VoteOp>,
epoch_slots_frozen_slots: &mut EpochSlotsFrozenSlots,
bank_drop_sender: &Sender<Vec<Arc<Bank>>>,
wait_to_vote_slot: Option<Slot>,
) {
if bank.is_empty() {
inc_new_counter_info!("replay_stage-voted_empty_bank", 1);
@@ -1739,6 +1751,7 @@ impl ReplayStage {
*has_new_vote_been_rooted,
replay_timing,
voting_sender,
wait_to_vote_slot,
);
}
@@ -1751,10 +1764,16 @@ impl ReplayStage {
switch_fork_decision: &SwitchForkDecision,
vote_signatures: &mut Vec<Signature>,
has_new_vote_been_rooted: bool,
wait_to_vote_slot: Option<Slot>,
) -> Option<Transaction> {
if authorized_voter_keypairs.is_empty() {
return None;
}
if let Some(slot) = wait_to_vote_slot {
if bank.slot() < slot {
return None;
}
}
let vote_account = match bank.get_vote_account(vote_account_pubkey) {
None => {
warn!(
@@ -1849,6 +1868,7 @@ impl ReplayStage {
has_new_vote_been_rooted: bool,
last_vote_refresh_time: &mut LastVoteRefreshTime,
voting_sender: &Sender<VoteOp>,
wait_to_vote_slot: Option<Slot>,
) {
let last_voted_slot = tower.last_voted_slot();
if last_voted_slot.is_none() {
@@ -1891,6 +1911,7 @@ impl ReplayStage {
&SwitchForkDecision::SameFork,
vote_signatures,
has_new_vote_been_rooted,
wait_to_vote_slot,
);
if let Some(vote_tx) = vote_tx {
@@ -1928,6 +1949,7 @@ impl ReplayStage {
has_new_vote_been_rooted: bool,
replay_timing: &mut ReplayTiming,
voting_sender: &Sender<VoteOp>,
wait_to_vote_slot: Option<Slot>,
) {
let mut generate_time = Measure::start("generate_vote");
let vote_tx = Self::generate_vote_tx(
@@ -1939,6 +1961,7 @@ impl ReplayStage {
switch_fork_decision,
vote_signatures,
has_new_vote_been_rooted,
wait_to_vote_slot,
);
generate_time.stop();
replay_timing.generate_vote_us += generate_time.as_us();
@@ -3003,7 +3026,7 @@ pub mod tests {
transaction::TransactionError,
},
solana_streamer::socket::SocketAddrSpace,
solana_transaction_status::TransactionWithStatusMeta,
solana_transaction_status::TransactionWithMetadata,
solana_vote_program::{
vote_state::{VoteState, VoteStateVersions},
vote_transaction,
@@ -3853,36 +3876,35 @@ pub mod tests {
let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1));
let slot = bank1.slot();
let signatures = create_test_transactions_and_populate_blockstore(
let mut test_signatures_iter = create_test_transactions_and_populate_blockstore(
vec![&mint_keypair, &keypair1, &keypair2, &keypair3],
bank0.slot(),
bank1,
blockstore.clone(),
Arc::new(AtomicU64::default()),
);
)
.into_iter();
let confirmed_block = blockstore.get_rooted_block(slot, false).unwrap();
assert_eq!(confirmed_block.transactions.len(), 3);
for TransactionWithStatusMeta { transaction, meta } in
confirmed_block.transactions.into_iter()
{
if transaction.signatures[0] == signatures[0] {
let meta = meta.unwrap();
assert_eq!(meta.status, Ok(()));
} else if transaction.signatures[0] == signatures[1] {
let meta = meta.unwrap();
assert_eq!(
meta.status,
Err(TransactionError::InstructionError(
0,
InstructionError::Custom(1)
))
);
} else {
assert_eq!(meta, None);
}
}
let actual_tx_results: Vec<_> = confirmed_block
.transactions
.into_iter()
.map(|TransactionWithMetadata { transaction, meta }| {
(transaction.signatures[0], meta.status)
})
.collect();
let expected_tx_results = vec![
(test_signatures_iter.next().unwrap(), Ok(())),
(
test_signatures_iter.next().unwrap(),
Err(TransactionError::InstructionError(
0,
InstructionError::Custom(1),
)),
),
];
assert_eq!(actual_tx_results, expected_tx_results);
assert!(test_signatures_iter.next().is_none());
}
Blockstore::destroy(&ledger_path).unwrap();
}
@@ -5727,6 +5749,7 @@ pub mod tests {
has_new_vote_been_rooted,
&mut ReplayTiming::default(),
&voting_sender,
None,
);
let vote_info = voting_receiver
.recv_timeout(Duration::from_secs(1))
@@ -5766,6 +5789,7 @@ pub mod tests {
has_new_vote_been_rooted,
&mut last_vote_refresh_time,
&voting_sender,
None,
);
// No new votes have been submitted to gossip
@@ -5791,6 +5815,7 @@ pub mod tests {
has_new_vote_been_rooted,
&mut ReplayTiming::default(),
&voting_sender,
None,
);
let vote_info = voting_receiver
.recv_timeout(Duration::from_secs(1))
@@ -5822,6 +5847,7 @@ pub mod tests {
has_new_vote_been_rooted,
&mut last_vote_refresh_time,
&voting_sender,
None,
);
// No new votes have been submitted to gossip
@@ -5859,6 +5885,7 @@ pub mod tests {
has_new_vote_been_rooted,
&mut last_vote_refresh_time,
&voting_sender,
None,
);
let vote_info = voting_receiver
.recv_timeout(Duration::from_secs(1))
@@ -5926,6 +5953,7 @@ pub mod tests {
has_new_vote_been_rooted,
&mut last_vote_refresh_time,
&voting_sender,
None,
);
let votes = cluster_info.get_votes(&mut cursor);

View File

@@ -12,7 +12,7 @@ use {
itertools::Itertools,
solana_measure::measure::Measure,
solana_perf::packet::PacketBatch,
solana_perf::sigverify::Deduper,
solana_perf::sigverify::{count_valid_packets, shrink_batches, Deduper},
solana_sdk::timing,
solana_streamer::streamer::{self, PacketBatchReceiver, StreamerError},
std::{
@@ -59,6 +59,9 @@ struct SigVerifierStats {
total_packets: usize,
total_dedup: usize,
total_excess_fail: usize,
total_shrink_time: usize,
total_shrinks: usize,
total_valid_packets: usize,
}
impl SigVerifierStats {
@@ -167,6 +170,9 @@ impl SigVerifierStats {
("total_packets", self.total_packets, i64),
("total_dedup", self.total_dedup, i64),
("total_excess_fail", self.total_excess_fail, i64),
("total_shrink_time", self.total_shrink_time, i64),
("total_shrinks", self.total_shrinks, i64),
("total_valid_packets", self.total_valid_packets, i64),
);
}
}
@@ -242,7 +248,20 @@ impl SigVerifyStage {
discard_time.stop();
let mut verify_batch_time = Measure::start("sigverify_batch_time");
let batches = verifier.verify_batches(batches);
let mut batches = verifier.verify_batches(batches);
verify_batch_time.stop();
let mut shrink_time = Measure::start("sigverify_shrink_time");
let num_valid_packets = count_valid_packets(&batches);
let start_len = batches.len();
const MAX_EMPTY_BATCH_RATIO: usize = 4;
if num_packets > num_valid_packets.saturating_mul(MAX_EMPTY_BATCH_RATIO) {
let valid = shrink_batches(&mut batches);
batches.truncate(valid);
}
let total_shrinks = start_len.saturating_sub(batches.len());
shrink_time.stop();
sendr.send(batches)?;
verify_batch_time.stop();
@@ -276,7 +295,10 @@ impl SigVerifyStage {
stats.total_batches += batches_len;
stats.total_packets += num_packets;
stats.total_dedup += dedup_fail;
stats.total_valid_packets += num_valid_packets;
stats.total_excess_fail += excess_fail;
stats.total_shrink_time += shrink_time.as_us() as usize;
stats.total_shrinks += total_shrinks;
Ok(())
}
@@ -342,6 +364,12 @@ impl SigVerifyStage {
#[cfg(test)]
mod tests {
use crate::sigverify::TransactionSigVerifier;
use crate::sigverify_stage::timing::duration_as_ms;
use crossbeam_channel::unbounded;
use solana_perf::packet::to_packet_batches;
use solana_perf::test_tx::test_tx;
use std::sync::mpsc::channel;
use {super::*, solana_perf::packet::Packet};
fn count_non_discard(packet_batches: &[PacketBatch]) -> usize {
@@ -370,4 +398,58 @@ mod tests {
assert!(!batches[0].packets[0].meta.discard());
assert!(!batches[0].packets[3].meta.discard());
}
fn gen_batches(use_same_tx: bool) -> Vec<PacketBatch> {
let len = 4096;
let chunk_size = 1024;
if use_same_tx {
let tx = test_tx();
to_packet_batches(&vec![tx; len], chunk_size)
} else {
let txs: Vec<_> = (0..len).map(|_| test_tx()).collect();
to_packet_batches(&txs, chunk_size)
}
}
#[test]
fn test_sigverify_stage() {
solana_logger::setup();
trace!("start");
let (packet_s, packet_r) = channel();
let (verified_s, verified_r) = unbounded();
let verifier = TransactionSigVerifier::default();
let stage = SigVerifyStage::new(packet_r, verified_s, verifier);
let use_same_tx = true;
let now = Instant::now();
let mut batches = gen_batches(use_same_tx);
trace!(
"starting... generation took: {} ms batches: {}",
duration_as_ms(&now.elapsed()),
batches.len()
);
let mut sent_len = 0;
for _ in 0..batches.len() {
if let Some(batch) = batches.pop() {
sent_len += batch.packets.len();
packet_s.send(batch).unwrap();
}
}
let mut received = 0;
trace!("sent: {}", sent_len);
loop {
if let Ok(mut verifieds) = verified_r.recv_timeout(Duration::from_millis(10)) {
while let Some(v) = verifieds.pop() {
received += v.packets.len();
batches.push(v);
}
if use_same_tx || received >= sent_len {
break;
}
}
}
trace!("received: {}", received);
drop(packet_s);
stage.join().unwrap();
}
}

View File

@@ -26,7 +26,7 @@ use {
voting_service::VotingService,
},
crossbeam_channel::unbounded,
solana_accountsdb_plugin_manager::block_metadata_notifier_interface::BlockMetadataNotifierLock,
solana_geyser_plugin_manager::block_metadata_notifier_interface::BlockMetadataNotifierLock,
solana_gossip::cluster_info::ClusterInfo,
solana_ledger::{
blockstore::Blockstore, blockstore_processor::TransactionStatusSender,
@@ -149,6 +149,7 @@ impl Tvu {
accounts_package_channel: (AccountsPackageSender, AccountsPackageReceiver),
last_full_snapshot_slot: Option<Slot>,
block_metadata_notifier: Option<BlockMetadataNotifierLock>,
wait_to_vote_slot: Option<Slot>,
) -> Self {
let Sockets {
repair: repair_socket,
@@ -297,6 +298,7 @@ impl Tvu {
wait_for_vote_to_start_leader: tvu_config.wait_for_vote_to_start_leader,
ancestor_hashes_replay_update_sender,
tower_storage: tower_storage.clone(),
wait_to_vote_slot,
};
let (voting_sender, voting_receiver) = channel();
@@ -309,8 +311,12 @@ impl Tvu {
);
let (cost_update_sender, cost_update_receiver) = channel();
let cost_update_service =
CostUpdateService::new(blockstore.clone(), cost_model.clone(), cost_update_receiver);
let cost_update_service = CostUpdateService::new(
exit.clone(),
blockstore.clone(),
cost_model.clone(),
cost_update_receiver,
);
let (drop_bank_sender, drop_bank_receiver) = channel();
@@ -517,6 +523,7 @@ pub mod tests {
accounts_package_channel,
None,
None,
None,
);
exit.store(true, Ordering::Relaxed);
tvu.join().unwrap();

View File

@@ -21,8 +21,8 @@ use {
},
crossbeam_channel::{bounded, unbounded},
rand::{thread_rng, Rng},
solana_accountsdb_plugin_manager::accountsdb_plugin_service::AccountsDbPluginService,
solana_entry::poh::compute_hash_time_ns,
solana_geyser_plugin_manager::geyser_plugin_service::GeyserPluginService,
solana_gossip::{
cluster_info::{
ClusterInfo, Node, DEFAULT_CONTACT_DEBUG_INTERVAL_MILLIS,
@@ -119,7 +119,7 @@ pub struct ValidatorConfig {
pub account_shrink_paths: Option<Vec<PathBuf>>,
pub rpc_config: JsonRpcConfig,
pub accountsdb_repl_service_config: Option<AccountsDbReplServiceConfig>,
pub accountsdb_plugin_config_files: Option<Vec<PathBuf>>,
pub geyser_plugin_config_files: Option<Vec<PathBuf>>,
pub rpc_addrs: Option<(SocketAddr, SocketAddr)>, // (JsonRpc, JsonRpcPubSub)
pub pubsub_config: PubSubConfig,
pub snapshot_config: Option<SnapshotConfig>,
@@ -164,6 +164,7 @@ pub struct ValidatorConfig {
pub validator_exit: Arc<RwLock<Exit>>,
pub no_wait_for_vote_to_start_leader: bool,
pub accounts_shrink_ratio: AccountShrinkThreshold,
pub wait_to_vote_slot: Option<Slot>,
}
impl Default for ValidatorConfig {
@@ -179,7 +180,7 @@ impl Default for ValidatorConfig {
account_shrink_paths: None,
rpc_config: JsonRpcConfig::default(),
accountsdb_repl_service_config: None,
accountsdb_plugin_config_files: None,
geyser_plugin_config_files: None,
rpc_addrs: None,
pubsub_config: PubSubConfig::default(),
snapshot_config: None,
@@ -223,6 +224,7 @@ impl Default for ValidatorConfig {
no_wait_for_vote_to_start_leader: true,
accounts_shrink_ratio: AccountShrinkThreshold::default(),
accounts_db_config: None,
wait_to_vote_slot: None,
}
}
}
@@ -294,8 +296,9 @@ pub struct Validator {
tvu: Tvu,
ip_echo_server: Option<solana_net_utils::IpEchoServer>,
pub cluster_info: Arc<ClusterInfo>,
pub bank_forks: Arc<RwLock<BankForks>>,
accountsdb_repl_service: Option<AccountsDbReplService>,
accountsdb_plugin_service: Option<AccountsDbPluginService>,
geyser_plugin_service: Option<GeyserPluginService>,
}
// in the distant future, get rid of ::new()/exit() and use Result properly...
@@ -334,18 +337,16 @@ impl Validator {
let mut bank_notification_senders = Vec::new();
let accountsdb_plugin_service =
if let Some(accountsdb_plugin_config_files) = &config.accountsdb_plugin_config_files {
let geyser_plugin_service =
if let Some(geyser_plugin_config_files) = &config.geyser_plugin_config_files {
let (confirmed_bank_sender, confirmed_bank_receiver) = unbounded();
bank_notification_senders.push(confirmed_bank_sender);
let result = AccountsDbPluginService::new(
confirmed_bank_receiver,
accountsdb_plugin_config_files,
);
let result =
GeyserPluginService::new(confirmed_bank_receiver, geyser_plugin_config_files);
match result {
Ok(accountsdb_plugin_service) => Some(accountsdb_plugin_service),
Ok(geyser_plugin_service) => Some(geyser_plugin_service),
Err(err) => {
error!("Failed to load the AccountsDb plugin: {:?}", err);
error!("Failed to load the Geyser plugin: {:?}", err);
abort();
}
}
@@ -419,29 +420,20 @@ impl Validator {
let accounts_package_channel = channel();
let accounts_update_notifier =
accountsdb_plugin_service
.as_ref()
.and_then(|accountsdb_plugin_service| {
accountsdb_plugin_service.get_accounts_update_notifier()
});
let accounts_update_notifier = geyser_plugin_service
.as_ref()
.and_then(|geyser_plugin_service| geyser_plugin_service.get_accounts_update_notifier());
let transaction_notifier =
accountsdb_plugin_service
.as_ref()
.and_then(|accountsdb_plugin_service| {
accountsdb_plugin_service.get_transaction_notifier()
});
let transaction_notifier = geyser_plugin_service
.as_ref()
.and_then(|geyser_plugin_service| geyser_plugin_service.get_transaction_notifier());
let block_metadata_notifier =
accountsdb_plugin_service
.as_ref()
.and_then(|accountsdb_plugin_service| {
accountsdb_plugin_service.get_block_metadata_notifier()
});
let block_metadata_notifier = geyser_plugin_service
.as_ref()
.and_then(|geyser_plugin_service| geyser_plugin_service.get_block_metadata_notifier());
info!(
"AccountsDb plugin: accounts_update_notifier: {} transaction_notifier: {}",
"Geyser plugin: accounts_update_notifier: {} transaction_notifier: {}",
accounts_update_notifier.is_some(),
transaction_notifier.is_some()
);
@@ -802,7 +794,8 @@ impl Validator {
let vote_tracker = Arc::<VoteTracker>::default();
let mut cost_model = CostModel::default();
cost_model.initialize_cost_table(&blockstore.read_program_costs().unwrap());
// initialize cost model with built-in instruction costs only
cost_model.initialize_cost_table(&[]);
let cost_model = Arc::new(RwLock::new(cost_model));
let (retransmit_slots_sender, retransmit_slots_receiver) = unbounded();
@@ -892,6 +885,7 @@ impl Validator {
accounts_package_channel,
last_full_snapshot_slot,
block_metadata_notifier,
config.wait_to_vote_slot,
);
let tpu = Tpu::new(
@@ -911,7 +905,7 @@ impl Validator {
&exit,
node.info.shred_version,
vote_tracker,
bank_forks,
bank_forks.clone(),
verified_vote_sender,
gossip_verified_vote_hash_sender,
replay_vote_receiver,
@@ -947,8 +941,9 @@ impl Validator {
ip_echo_server,
validator_exit: config.validator_exit.clone(),
cluster_info,
bank_forks,
accountsdb_repl_service,
accountsdb_plugin_service,
geyser_plugin_service,
}
}
@@ -988,6 +983,7 @@ impl Validator {
}
pub fn join(self) {
drop(self.bank_forks);
drop(self.cluster_info);
self.poh_service.join().expect("poh_service");
@@ -1066,10 +1062,8 @@ impl Validator {
.expect("accountsdb_repl_service");
}
if let Some(accountsdb_plugin_service) = self.accountsdb_plugin_service {
accountsdb_plugin_service
.join()
.expect("accountsdb_plugin_service");
if let Some(geyser_plugin_service) = self.geyser_plugin_service {
geyser_plugin_service.join().expect("geyser_plugin_service");
}
}
}

View File

@@ -8,11 +8,17 @@ set -e
cd "$(dirname "$0")"
output_dir=static/img
svgbob_cli="$(command -v svgbob_cli || true)"
if [[ -z "$svgbob_cli" ]]; then
svgbob_cli="$(command -v svgbob || true)"
[[ -n "$svgbob_cli" ]] || ( echo "svgbob_cli binary not found" && exit 1 )
fi
mkdir -p "$output_dir"
while read -r bob_file; do
out_file=$(basename "${bob_file%.*}".svg)
svgbob "$bob_file" --output "$output_dir/$out_file"
"$svgbob_cli" "$bob_file" --output "$output_dir/$out_file"
done < <(find art/*.bob)
while read -r msc_file; do

View File

@@ -3,17 +3,6 @@ module.exports = {
About: ["introduction", "terminology", "history"],
Wallets: [
"wallet-guide",
"wallet-guide/apps",
{
type: "category",
label: "Web Wallets",
items: ["wallet-guide/web-wallets", "wallet-guide/solflare"],
},
{
type: "category",
label: "Hardware Wallets",
items: ["wallet-guide/ledger-live"],
},
{
type: "category",
label: "Command-line Wallets",
@@ -91,7 +80,7 @@ module.exports = {
},
"developing/test-validator",
"developing/backwards-compatibility",
"developing/plugins/accountsdb_plugin"
"developing/plugins/geyser-plugins"
],
Integrating: ["integrations/exchange"],
Validating: [

View File

@@ -36,7 +36,7 @@ public RPC endpoints currently available and recommended for each public cluster
## Mainnet Beta
#### Endpoints
#### Endpoints*
- `https://api.mainnet-beta.solana.com` - Solana-hosted api node cluster, backed by a load balancer; rate-limited
- `https://solana-api.projectserum.com` - Project Serum-hosted api node
@@ -48,3 +48,17 @@ public RPC endpoints currently available and recommended for each public cluster
- Maximum concurrent connections per IP: 40
- Maximum connection rate per 10 seconds per IP: 40
- Maximum amount of data per 30 second: 100 MB
*The public RPC endpoints are not intended for production applications. Please
use dedicated/private RPC servers when you launch your application, drop NFTs,
etc. The public services are subject to abuse and rate limits may change
without prior notice. Likewise, high-traffic websites may be blocked without
prior notice.
## Common HTTP Error Codes
- 403 -- Your IP address or website has been blocked. It is time to run your own RPC server(s) or find a private service.
- 429 -- Your IP address is exceeding the rate limits. Slow down! Use the
[Retry-After](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After)
HTTP response header to determine how long to wait before making another
request.

View File

@@ -97,8 +97,9 @@ $ solana-validator \
--identity validator-keypair.json \
--vote-account vote-account-keypair.json \
--known-validator 5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on \
--known-validator 7XSY3MrYnK8vq693Rju17bbPkCN3Z7KvvfvJx4kdrsSY \
--known-validator dDzy5SR3AXdYWVqbDEkVFdvSPCtS9ihF5kJkHCtXoFs \
--known-validator Ft5fbkqNa76vnsjYNwjDZUXoTWpP7VYm3mtsaQckQADN \
--known-validator eoKpUABi59aT4rR9HGS3LcMecfut9x7zJyodWWP43YQ \
--known-validator 9QxCLckBiJc783jnMvXZubK4wH86Eqqvashtrwvcsgkv \
--only-known-rpc \
--ledger ledger \
@@ -116,7 +117,9 @@ The identities of the
[`--known-validator`s](running-validator/validator-start.md#known-validators) are:
- `5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on` - Solana Labs (testnet.solana.com)
- `dDzy5SR3AXdYWVqbDEkVFdvSPCtS9ihF5kJkHCtXoFs` - MonkeDAO
- `Ft5fbkqNa76vnsjYNwjDZUXoTWpP7VYm3mtsaQckQADN` - Certus One
- `eoKpUABi59aT4rR9HGS3LcMecfut9x7zJyodWWP43YQ` - SerGo
- `9QxCLckBiJc783jnMvXZubK4wH86Eqqvashtrwvcsgkv` - Algo|Stake
## Mainnet Beta
@@ -126,9 +129,8 @@ A permissionless, persistent cluster for early token holders and launch partners
- Tokens that are issued on Mainnet Beta are **real** SOL
- If you have paid money to purchase/be issued tokens, such as through our
CoinList auction, these tokens will be transferred on Mainnet Beta.
- Note: If you are using a non-command-line wallet such as
[Solflare](wallet-guide/solflare.md),
the wallet will always be connecting to Mainnet Beta.
- Note: If you are using a non-command-line wallet, the wallet will always be
connecting to Mainnet Beta.
- Gossip entrypoint for Mainnet Beta: `entrypoint.mainnet-beta.solana.com:8001`
- Metrics environment variable for Mainnet Beta:

View File

@@ -76,7 +76,7 @@ Major releases:
- [`solana-program`](https://docs.rs/solana-program/) - Rust SDK for writing programs
- [`solana-client`](https://docs.rs/solana-client/) - Rust client for connecting to RPC API
- [`solana-cli-config`](https://docs.rs/solana-cli-config/) - Rust client for managing Solana CLI config files
- [`solana-accountsdb-plugin-interface`](https://docs.rs/solana-accountsdb-plugin-interface/) - Rust interface for developing Solana AccountsDb plugins.
- [`solana-geyser-plugin-interface`](https://docs.rs/solana-geyser-plugin-interface/) - Rust interface for developing Solana Geyser plugins.
Patch releases:

View File

@@ -147,7 +147,7 @@ sendAndConfirmTransaction(
);
```
The above code takes in a `TransactionInstruction` using `SystemProgram`, creates a `Transaction`, and sends it over the network. You use `Connection` in order to define with Solana network you are connecting to, namely `mainnet-beta`, `testnet`, or `devnet`.
The above code takes in a `TransactionInstruction` using `SystemProgram`, creates a `Transaction`, and sends it over the network. You use `Connection` in order to define which Solana network you are connecting to, namely `mainnet-beta`, `testnet`, or `devnet`.
### Interacting with Custom Programs

View File

@@ -98,7 +98,7 @@ Unstable methods may see breaking changes in patch releases and may not be suppo
- [getConfirmedBlocks](jsonrpc-api.md#getconfirmedblocks)
- [getConfirmedBlocksWithLimit](jsonrpc-api.md#getconfirmedblockswithlimit)
- [getConfirmedSignaturesForAddress2](jsonrpc-api.md#getconfirmedsignaturesforaddress2)
- [getConfirmedTransaction](jsonrpc-api.md#getconfirmedtransact)
- [getConfirmedTransaction](jsonrpc-api.md#getconfirmedtransaction)
- [getFeeCalculatorForBlockhash](jsonrpc-api.md#getfeecalculatorforblockhash)
- [getFeeRateGovernor](jsonrpc-api.md#getfeerategovernor)
- [getFees](jsonrpc-api.md#getfees)
@@ -2949,7 +2949,7 @@ curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d '
Result:
```json
{"jsonrpc":"2.0","result":{"solana-core": "1.9.6"},"id":1}
{"jsonrpc":"2.0","result":{"solana-core": "1.9.13"},"id":1}
```
### getVoteAccounts
@@ -3191,7 +3191,7 @@ Before submitting, the following preflight checks are performed:
preflight commitment to avoid confusing behavior.
The returned signature is the first signature in the transaction, which
is used to identify the transaction ([transaction id](../../terminology.md#transanction-id)).
is used to identify the transaction ([transaction id](../../terminology.md#transaction-id)).
This identifier can be easily extracted from the transaction data before
submission.
@@ -3207,7 +3207,7 @@ submission.
#### Results:
- `<string>` - First Transaction Signature embedded in the transaction, as base-58 encoded string ([transaction id](../../terminology.md#transanction-id))
- `<string>` - First Transaction Signature embedded in the transaction, as base-58 encoded string ([transaction id](../../terminology.md#transaction-id))
#### Example:
@@ -5093,7 +5093,7 @@ Result:
### getRecentBlockhash
**DEPRECATED: Please use [getFeeForMessage](jsonrpc-api.md#getfeeformessage) instead**
**DEPRECATED: Please use [getLatestBlockhash](jsonrpc-api.md#getlatestblockhash) instead**
This method is expected to be removed in solana-core v2.0
Returns a recent block hash from the ledger, and a fee schedule that can be used to compute the cost of submitting a transaction using it.

View File

@@ -140,7 +140,7 @@ the [runtime enforcement
policy](developing/programming-model/accounts.md#policy). When an instruction
reference the same account multiple times there may be duplicate
`SolAccountInfo` entries in the array but they both point back to the original
input byte array. A program should handle these case delicately to avoid
input byte array. A program should handle these cases delicately to avoid
overlapping read/writes to the same buffer. If a program implements their own
deserialization function care should be taken to handle duplicate accounts
appropriately.

View File

@@ -1,18 +1,18 @@
---
title: Plugins
title: Geyser Plugins
---
## Overview
Validators under heavy RPC loads, such as when serving getProgramAccounts calls,
can fall behind the network. To solve this problem, the validator has been
enhanced to support a plugin mechanism through which the information about
accounts and slots can be transmitted to external data stores such as relational
databases, NoSQL databases or Kafka. RPC services then can be developed to
consume data from these external data stores with the possibility of more
flexible and targeted optimizations such as caching and indexing. This allows
the validator to focus on processing transactions without being slowed down by
busy RPC requests.
enhanced to support a plugin mechanism, called a "Geyser" plugin, through which
the information about accounts, slots, blocks, and transactions can be
transmitted to external data stores such as relational databases, NoSQL
databases or Kafka. RPC services then can be developed to consume data from
these external data stores with the possibility of more flexible and targeted
optimizations such as caching and indexing. This allows the validator to focus
on processing transactions without being slowed down by busy RPC requests.
This document describes the interfaces of the plugin and the referential plugin
implementation for the PostgreSQL database.
@@ -22,24 +22,24 @@ implementation for the PostgreSQL database.
### Important Crates:
- [`solana-accountsdb-plugin-interface`] &mdash; This crate defines the plugin
- [`solana-geyser-plugin-interface`] &mdash; This crate defines the plugin
interfaces.
- [`solana-accountsdb-plugin-postgres`] &mdash; The crate for the referential
plugin implementation for the PostgreSQL database.
[`solana-accountsdb-plugin-interface`]: https://docs.rs/solana-accountsdb-plugin-interface
[`solana-geyser-plugin-interface`]: https://docs.rs/solana-geyser-plugin-interface
[`solana-accountsdb-plugin-postgres`]: https://docs.rs/solana-accountsdb-plugin-postgres
[`solana-sdk`]: https://docs.rs/solana-sdk
[`solana-transaction-status`]: https://docs.rs/solana-transaction-status
## The Plugin Interface
The Plugin interface is declared in [`solana-accountsdb-plugin-interface`]. It
is defined by the trait `AccountsDbPlugin`. The plugin should implement the
The Plugin interface is declared in [`solana-geyser-plugin-interface`]. It
is defined by the trait `GeyserPlugin`. The plugin should implement the
trait and expose a "C" function `_create_plugin` to return the pointer to this
trait. For example, in the referential implementation, the following code
instantiates the PostgreSQL plugin `AccountsDbPluginPostgres ` and returns its
instantiates the PostgreSQL plugin `GeyserPluginPostgres ` and returns its
pointer.
```
@@ -47,10 +47,10 @@ pointer.
#[allow(improper_ctypes_definitions)]
/// # Safety
///
/// This function returns the AccountsDbPluginPostgres pointer as trait AccountsDbPlugin.
pub unsafe extern "C" fn _create_plugin() -> *mut dyn AccountsDbPlugin {
let plugin = AccountsDbPluginPostgres::new();
let plugin: Box<dyn AccountsDbPlugin> = Box::new(plugin);
/// This function returns the GeyserPluginPostgres pointer as trait GeyserPlugin.
pub unsafe extern "C" fn _create_plugin() -> *mut dyn GeyserPlugin {
let plugin = GeyserPluginPostgres::new();
let plugin: Box<dyn GeyserPlugin> = Box::new(plugin);
Box::into_raw(plugin)
}
```
@@ -62,7 +62,7 @@ file in JSON format. The JSON file must have a field `libpath` that points
to the full path name of the shared library implementing the plugin, and may
have other configuration information, like connection parameters for the external
database. The plugin configuration file is specified by the validator's CLI
parameter `--accountsdb-plugin-config` and the file must be readable to the
parameter `--geyser-plugin-config` and the file must be readable to the
validator process.
Please see the [config file](#config) for the referential
@@ -164,11 +164,11 @@ please refer to [`solana-sdk`] and [`solana-transaction-status`]
The `slot` points to the slot the transaction is executed at.
For more details, please refer to the Rust documentation in
[`solana-accountsdb-plugin-interface`].
[`solana-geyser-plugin-interface`].
## Example PostgreSQL Plugin
The [`solana-accountsdb-plugin-postgres`] crate implements a plugin storing
The [`solana-accountsdb-plugin-postgres`] repository implements a plugin storing
account data to a PostgreSQL database to illustrate how a plugin can be
developed.
@@ -182,7 +182,7 @@ configuration file looks like the following:
```
{
"libpath": "/solana/target/release/libsolana_accountsdb_plugin_postgres.so",
"libpath": "/solana/target/release/libsolana_geyser_plugin_postgres.so",
"host": "postgres-server",
"user": "solana",
"port": 5433,
@@ -366,7 +366,7 @@ Then run the script:
psql -U solana -p 5433 -h 10.138.0.9 -w -d solana -f create_schema.sql
```
After this, start the validator with the plugin by using the `--accountsdb-plugin-config`
After this, start the validator with the plugin by using the `--geyser-plugin-config`
argument mentioned above.
#### Destroy the Schema Objects

View File

@@ -48,8 +48,12 @@ The policy is as follows:
To prevent a program from abusing computation resources each instruction in a
transaction is given a compute budget. The budget consists of computation units
that are consumed as the program performs various operations and bounds that the
program may not exceed. When the program consumes its entire budget or exceeds
a bound then the runtime halts the program and returns an error.
program may not exceed. When the program consumes its entire budget or exceeds a
bound then the runtime halts the program and returns an error.
Note: The compute budget currently applies per-instruction but is moving toward
a per-transaction model. For more information see [Transaction-wide Compute
Budget](#transaction-wide-compute-buget).
The following operations incur a compute cost:
@@ -60,12 +64,12 @@ The following operations incur a compute cost:
- cross-program invocations
- ...
For cross-program invocations the programs invoked inherit the budget of their
For cross-program invocations, the programs invoked inherit the budget of their
parent. If an invoked program consume the budget or exceeds a bound the entire
invocation chain and the parent are halted.
invocation chain is halted.
The current [compute
budget](https://github.com/solana-labs/solana/blob/d3a3a7548c857f26ec2cb10e270da72d373020ec/sdk/src/process_instruction.rs#L65)
budget](https://github.com/solana-labs/solana/blob/0224a8b127ace4c6453dd6492a38c66cb999abd2/sdk/src/compute_budget.rs#L102)
can be found in the Solana SDK.
For example, if the current budget is:
@@ -80,6 +84,7 @@ max_invoke_depth: 4,
max_call_depth: 64,
stack_frame_size: 4096,
log_pubkey_units: 100,
...
```
Then the program
@@ -90,7 +95,7 @@ Then the program
- Can not exceed a BPF call depth of 64
- Cannot exceed 4 levels of cross-program invocations.
Since the compute budget is consumed incrementally as the program executes the
Since the compute budget is consumed incrementally as the program executes, the
total budget consumption will be a combination of the various costs of the
operations it performs.
@@ -98,12 +103,38 @@ At runtime a program may log how much of the compute budget remains. See
[debugging](developing/on-chain-programs/debugging.md#monitoring-compute-budget-consumption)
for more information.
The budget values are conditional on feature enablement, take a look at the
compute budget's
[new](https://github.com/solana-labs/solana/blob/d3a3a7548c857f26ec2cb10e270da72d373020ec/sdk/src/process_instruction.rs#L97)
function to find out how the budget is constructed. An understanding of how
[features](runtime.md#features) work and what features are enabled on the
cluster being used are required to determine the current budget's values.
## Transaction-wide Compute Budget
Transactions are processed as a single entity and are the primary unit of block
scheduling. In order to facilitate better block scheduling and account for the
computational cost of each transaction, the compute budget is moving to a
transaction-wide budget rather than per-instruction.
For information on what the compute budget is and how it is applied see [Compute
Budget](#compute-budget).
With a transaction-wide compute budget the `max_units` cap is applied to the
entire transaction rather than to each instruction within the transaction. The
default number of maximum units remains at 200k which means the sum of the
compute units used by each instruction in the transaction must not exceed that
value. The number of maximum units allows is intentionally kept small to
facilitate optimized programs and form the bases for a minimum fee level.
There are a lot of uses cases that require more than 200k units
transaction-wide. To enable these uses cases transactions can include a
[``ComputeBudgetInstruction`](https://github.com/solana-labs/solana/blob/0224a8b127ace4c6453dd6492a38c66cb999abd2/sdk/src/compute_budget.rs#L44)
requesting a higher compute unit cap. Higher compute caps will be charged
higher fees.
Compute Budget instructions don't require any accounts and must lie in the first
3 instructions of a transaction otherwise they will be ignored.
The `ComputeBudgetInstruction::request_units` function can be used to crate
these instructions:
```rust
let instruction = ComputeBudgetInstruction::request_units(300_000);
```
## New Features

View File

@@ -150,7 +150,7 @@ found in the [Accounts](accounts.md) section.
### Instruction data
Each instruction caries a general purpose byte array that is passed to the
Each instruction carries a general purpose byte array that is passed to the
program along with the accounts. The contents of the instruction data is program
specific and typically used to convey what operations the program should
perform, and any additional information those operations may need above and

View File

@@ -16,8 +16,8 @@ and received the Message's instruction data inside, and also the index of the cu
Two helper functions to extract this data can be used:
```
fn load_current_index(instruction_data: &[u8]) -> u16;
fn load_instruction_at(instruction_index: usize, instruction_data: &[u8]) -> Result<Instruction>;
fn load_current_index_checked(instruction_data: &[u8]) -> u16;
fn load_instruction_at_checked(instruction_index: usize, instruction_sysvar_account_info: &AccountInfo) -> Result<Instruction>;
```
The runtime will recognize this special instruction, serialize the Message instruction data

View File

@@ -181,21 +181,21 @@ To track all the deposit accounts for your exchange, poll for each confirmed
block and inspect for addresses of interest, using the JSON-RPC service of your
Solana API node.
- To identify which blocks are available, send a [`getConfirmedBlocks` request](developing/clients/jsonrpc-api.md#getconfirmedblocks),
- To identify which blocks are available, send a [`getBlocks` request](developing/clients/jsonrpc-api.md#getblocks),
passing the last block you have already processed as the start-slot parameter:
```bash
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlocks","params":[5]}' localhost:8899
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getBlocks","params":[5]}' localhost:8899
{"jsonrpc":"2.0","result":[5,6,8,9,11],"id":1}
```
Not every slot produces a block, so there may be gaps in the sequence of integers.
- For each block, request its contents with a [`getConfirmedBlock` request](developing/clients/jsonrpc-api.md#getconfirmedblock):
- For each block, request its contents with a [`getBlock` request](developing/clients/jsonrpc-api.md#getblock):
```bash
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlock","params":[5, "json"]}' localhost:8899
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getBlock","params":[5, "json"]}' localhost:8899
{
"jsonrpc": "2.0",
@@ -278,11 +278,11 @@ generally _not_ a viable method for tracking all your deposit addresses over all
slots, but may be useful for examining a few accounts for a specific period of
time.
- Send a [`getConfirmedSignaturesForAddress2`](developing/clients/jsonrpc-api.md#getconfirmedsignaturesforaddress2)
- Send a [`getSignaturesForAddress`](developing/clients/jsonrpc-api.md#getsignaturesforaddress)
request to the api node:
```bash
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedSignaturesForAddress2","params":["6H94zdiaYfRfPfKjYLjyr2VFBg6JHXygy84r3qhc3NsC", {"limit": 3}]}' localhost:8899
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getSignaturesForAddress","params":["6H94zdiaYfRfPfKjYLjyr2VFBg6JHXygy84r3qhc3NsC", {"limit": 3}]}' localhost:8899
{
"jsonrpc": "2.0",
@@ -311,10 +311,10 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"m
```
- For each signature returned, get the transaction details by sending a
[`getConfirmedTransaction`](developing/clients/jsonrpc-api.md#getconfirmedtransaction) request:
[`getTransaction`](developing/clients/jsonrpc-api.md#gettransaction) request:
```bash
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedTransaction","params":["dhjhJp2V2ybQGVfELWM1aZy98guVVsxRCB5KhNiXFjCBMK5KEyzV8smhkVvs3xwkAug31KnpzJpiNPtcD5bG1t6", "json"]}' localhost:8899
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getTransaction","params":["dhjhJp2V2ybQGVfELWM1aZy98guVVsxRCB5KhNiXFjCBMK5KEyzV8smhkVvs3xwkAug31KnpzJpiNPtcD5bG1t6", "json"]}' localhost:8899
// Result
{
@@ -714,14 +714,16 @@ wallets using the
scheme and that _only_ deposits from ATA addresses be accepted.
Monitoring for deposit transactions should follow the [block polling](#poll-for-blocks)
method described above. Each new block should be scanned for successful transactions
issuing SPL Token [Transfer](https://github.com/solana-labs/solana-program-library/blob/fc0d6a2db79bd6499f04b9be7ead0c400283845e/token/program/src/instruction.rs#L105)
or [TransferChecked](https://github.com/solana-labs/solana-program-library/blob/fc0d6a2db79bd6499f04b9be7ead0c400283845e/token/program/src/instruction.rs#L268)
instructions referencing user accounts. It is possible that a transfer is initiated
by a smart contract via [Cross Program Invocation](/developing/programming-model/calling-between-programs#cross-program-invocations),
so [inner instructions](/terminology#inner-instruction) must be checked as well.
The `preTokenBalance` and `postTokenBalance` fields from the transaction's metadata
must then be used to determine the effective balance change.
method described above. Each new block should be scanned for successful
transactions referencing user token-account derived addresses. The
`preTokenBalance` and `postTokenBalance` fields from the transaction's metadata
must then be used to determine the effective balance change. These fields will
identify the token mint and account owner (main wallet address) of the affected
account.
Note that if a receiving account is created during the transaction, it will have no
`preTokenBalance` entry as there is no existing account state. In this
case, the initial balance can be assumed to be zero.
### Withdrawing
@@ -730,7 +732,10 @@ The withdrawal address a user provides must be the that of their SOL wallet.
Before executing a withdrawal [transfer](#token-transfers),
the exchange should check the address as
[described above](#validating-user-supplied-account-addresses-for-withdrawals).
Additionally this address must be owned by the System Program and have no account data. If the address has no SOL balance, user confirmation should be obtained before proceeding with the withdrawal. All other withdrawal addresses must be rejected.
Additionally this address must be owned by the System Program and have no
account data. If the address has no SOL balance, user confirmation should be
obtained before proceeding with the withdrawal. All other withdrawal addresses
must be rejected.
From the withdrawal address, the [Associated Token Account](https://spl.solana.com/associated-token-account)
(ATA) for the correct mint is derived and the transfer issued to that account via a

View File

@@ -100,7 +100,7 @@ any traversal issues on their own.
#### Required
- 8000-10000 TCP/UDP - P2P protocols (gossip, turbine, repair, etc). This can
be limited to any free 12 port range with `--dynamic-port-range`
be limited to any free 13 port range with `--dynamic-port-range`
#### Optional
For security purposes, it is not suggested that the following ports be open to

View File

@@ -54,28 +54,15 @@ and do the delegation.
#### Supported Wallets
Staking operations are supported by the following wallet solutions:
- Phantom.app in conjunction with a seed phrase or a Ledger Nano.
Check out Phantom's [guide to staking](https://phantom.app/blog/solana-staking-in-just-3-clicks) for details.
- SolFlare.com in conjunction with a keystore file or a Ledger Nano.
Check out our [guide to using SolFlare](wallet-guide/solflare.md) for details.
Many web and mobile wallets support Solana staking operations. Please check with
your favorite wallet's maintainers regarding status
#### Solana command line tools
- Solana command line tools can perform all stake operations in conjunction
with a CLI-generated keypair file wallet, a paper wallet, or with a connected
Ledger Nano.
[Staking commands using the Solana Command Line Tools](cli/delegate-stake.md).
- [Exodus](https://www.exodus.com/) wallet. They make the process very simple,
but you cannot choose a validator: they assign you to their partner validator.
See their [FAQ](https://support.exodus.com/article/1551-solana-staking-faq)
for details.
- [Binance](https://www.binance.com/) and [FTX](https://ftx.com/) exchanges.
Note that you cannot choose a validator with these services: they assign you
to their partner validator.
#### Create a Stake Account
Follow the wallet's instructions for creating a staking account. This account

View File

@@ -49,13 +49,9 @@ you first will need to create a wallet.**
## Supported Wallets
Solana supports several types of wallets in the Solana native
command-line app as well as wallets from third-parties.
For the majority of users, we recommend using one of the
[app wallets](wallet-guide/apps.md) or a browser-based
[web wallet](wallet-guide/web-wallets.md), which will provide a more familiar
user experience rather than needing to learn command line tools.
Several browser and mobile app based wallets support Solana. Find the right one
for you on the [Solana Ecosystem](https://solana.com/ecosystem/explore?categories=wallet)
page.
For advanced users or developers, the [command-line wallets](wallet-guide/cli.md)
may be more appropriate, as new features on the Solana blockchain will always be

View File

@@ -1,74 +0,0 @@
---
title: Mobile App Wallets
---
Solana is supported by multiple third-party apps which should provide a familiar
experience for most people who are new or experienced with using crypto wallets.
## Coin98
[Coin98](https://coin98.app/) is an app available for iOS and Android and can
be used to send and receive SOL tokens.
_Note: Coin98 does not support stake accounts or staking operations_
## Exodus
Send, receive & exchange cryptocurrency with ease on the world's leading Desktop, Mobile and Hardware crypto wallets.
Download [Exodus](https://exodus.com/) to easily and securely manage your Solana tokens.
Exodus includes live charts, a built-in exchange, and 24/7 human support.
## Solflare
[Solflare Wallet](https://solflare.com/) has mobile applications available for both
iOS and Android. These Mobile apps have support for sending Solana and SPL tokens,
staking, and NFT management in a fully-featured NFT gallery.
Security is a top priority for Solflare - the mobile wallet is non-custodial,
meaning keys are managed by the user who retains total control of their own funds.
The app supports biometric protection alongside passwords for maximum security.
## Trust Wallet
[Trust Wallet](https://trustwallet.com/) is an app available for iOS and Android
and can be used to send and receive SOL tokens.
_Note: Trust Wallet does not support stake accounts or staking operations_
### Trust Wallet Security
Tokens held in Trust Wallet are only as secure as the device on which the app is
installed. Anyone who is able to unlock your phone or tablet may be able to
use the Trust Wallet app and transfer your tokens. To improve security,
you can add a passcode to the Trust Wallet application.
To add a Trust Wallet passcode, open the app and go to
Settings -> Security -> Passcode.
If someone gains access to your Trust Wallet application, they can access your
recovery seed phrase.
Anyone who has access to your seed phrase will be able to recreate
your Trust Wallet keys on a different device. From there, they could
sign transactions from that device rather than on your own phone or tablet.
The seed phrase is displayed when a new wallet is created and it can also be
viewed at any later time in the app by following these steps:
- Go to Setting -> Wallets
- Under the Options menu for a particular wallet tap "Show Recovery Phrase"
## Zelcore
[Zelcore](https://zelcore.io) is a multi-currency wallet now supporting SOL and all Solana tokens (SPL). Each Zelcore account has 3 separate addresses for each asset.
Store, transact, connect to dapps, and (soon) DEX trade in a single app and hold SOL tokens alongside BTC, ETH, and 270+ other cryptos.
Zelcore is available for [Desktop](https://zelcore.io), [iOS](https://apps.apple.com/us/app/zelcore/id1436296839), and [Android](https://play.google.com/store/apps/details?id=com.zelcash.zelcore&hl=en_US&gl=US). One account for all your cryptos, all your devices.
Zelcore also uses a custom Sollet-based wallet adapter solution so users can connect to all their dapps with a single browser tab to keep things tidy.
__**DEVS**__: integrate Zelcore wallet into your products by implementing Sollet solution using "link.zelcore.io" as connection URL.
### Zelcore Wallet Security
Zelcore utilizes a username/password schema to derive private keys for all assets. Security layers include blockchain-based 2FA PIN, mobile device biometrics, and passphrases to allow users to set up as much or as little security as they like. All hashing functions done on-device, and no login/account info is stored nor transmitted off your device. The private keys only exist on-device while logged in, upon logging out there is no digital footprint left of your PKs.
**Treat your username, password, d2FA PIN, passphrases, etc. with utmost care, just like your private keys. Never give these to anyone!**

View File

@@ -5,10 +5,6 @@ title: Command Line Wallets
Solana supports several different types of wallets that can be used to interface
directly with the Solana command-line tools.
**If you are unfamiliar with using command line programs and just want to be able
to send and receive SOL tokens, we recommend setting up a third-party
[App Wallet](apps.md)**.
To use a Command Line Wallet, you must first [install the Solana CLI tools](../cli/install-solana-cli-tools.md)
## File System Wallet
@@ -43,11 +39,7 @@ regenerate a keypair on demand.
In terms of convenience versus security, a paper wallet sits at the opposite
side of the spectrum from an FS wallet. It is terribly inconvenient to use, but
offers excellent security. That high security is further amplified when paper
wallets are used in conjunction with
[offline signing](../offline-signing.md). Custody services such as
[Coinbase Custody](https://custody.coinbase.com/) use this combination.
Paper wallets and custody services are an excellent way to secure a large number
of tokens for a long period of time.
wallets are used in conjunction with [offline signing](../offline-signing.md).
[Paper Wallets](paper-wallet.md)

View File

@@ -37,7 +37,7 @@ usb://<MANUFACTURER>[/<WALLET_ID>][?key=<DERIVATION_PATH>]
`DERVIATION_PATH` is used to navigate to Solana keys within your hardware wallet.
The path has the form `<ACCOUNT>[/<CHANGE>]`, where each `ACCOUNT` and `CHANGE`
are positive integers.
are nonnegative integers.
For example, a fully qualified URL for a Ledger device might be:

View File

@@ -3,12 +3,11 @@ title: Ledger Nano
---
This page describes how to use a Ledger Nano S or Nano X to interact with Solana
using the command line tools. To see other solutions to interact with Solana with
your Nano, [click here](../ledger-live.md#interact-with-the-solana-network).
using the command line tools.
## Before You Begin
- [Set up a Nano with the Solana App](../ledger-live.md)
- [Set up a Nano with the Solana App](https://support.ledger.com/hc/en-us/articles/360016265659-Solana-SOL-?docs=true)
- [Install the Solana command-line tools](../../cli/install-solana-cli-tools.md)
## Use Ledger Nano with Solana CLI

View File

@@ -1,77 +0,0 @@
---
title: Ledger Nano S and Nano X
---
This document describes how to set up a
[Ledger Nano S](https://shop.ledger.com/products/ledger-nano-s) or
[Ledger Nano X](https://shop.ledger.com/pages/ledger-nano-x)
with the [Ledger Live](https://www.ledger.com/ledger-live) software.
Once the setup steps shown below are complete and the Solana app is installed
on your Nano device, users have several options of how to
[use the Nano to interact with the Solana Network](#interact-with-the-solana-network)
## Getting Started
- Order a [Nano S](https://shop.ledger.com/products/ledger-nano-s) or
[Nano X](https://shop.ledger.com/pages/ledger-nano-x) from Ledger.
- Follow the instructions for device setup included in the package,
or [Ledger's Start page](https://www.ledger.com/start/)
- Install [Ledger Live desktop software](https://www.ledger.com/ledger-live/)
- If you already have Ledger Live installed, please update to the latest
version of Ledger Live, which enables the newest firmware and app updates.
- Connect your Nano to your computer and follow the on-screen instructions.
- Update the firmware on your new Nano. This is needed to ensure you are able
to install the latest version of the Solana App.
- [Update Nano S Firmware](https://support.ledger.com/hc/en-us/articles/360002731113-Update-Ledger-Nano-S-firmware)
- [Update Nano X Firmware](https://support.ledger.com/hc/en-us/articles/360013349800)
## Install the Solana App on your Nano
- Open Ledger Live
- Click on "Manager" in the left pane on the app and search for "Solana" in the
App Catalog, then click "Install".
- Make sure your device is plugged in via USB and is unlocked with its PIN
- You may be prompted on the Nano to confirm the install of Solana App
- "Solana" should now show as "Installed" in the Ledger Live Manager
## Upgrade to the latest version of the Solana App
To make sure you have the latest functionality, if you are using an older version
of the Solana App, please upgrade to version `v1.0.1` by following these steps.
- Make sure you have Ledger Live version 2.10.0 or later.
- To check your version of Ledger Live, click on the Settings button in the
upper-right corner, then click "About". If a newer version of Ledger Live is
available, you should see a banner prompting you to upgrade when you first open
Ledger Live.
- Update the firmware on your Nano
- [Update Nano S Firmware](https://support.ledger.com/hc/en-us/articles/360002731113-Update-Ledger-Nano-S-firmware)
- [Update Nano X Firmware](https://support.ledger.com/hc/en-us/articles/360013349800)
- After a successful firmware update, the Solana app should automatically get
re-installed with the latest version of the app.
## Interact with the Solana network
Users can use any of the following options to use their Nano to interact with
Solana:
- [SolFlare.com](https://solflare.com/) is a non-custodial web wallet built
specifically for Solana and supports basic transfers and staking operations
with the Ledger device.
Check out our guide for [using a Nano with SolFlare](solflare.md).
- Developers and advanced users may
[use a Nano with the Solana command line tools](hardware-wallets/ledger.md).
New wallet features are almost always supported in the native command line tools
before being supported by third-party wallets.
## Known Issues
- Nano X sometimes cannot connect to web wallets using the Windows operating
system. This is likely to affect any browser-based wallets that use WebUSB.
The Ledger team is working to resolve this.
## Support
Check out our [Wallet Support Page](support.md) for ways to get help.

View File

@@ -1,201 +0,0 @@
---
title: SolFlare Web Wallet
---
## Introduction
[SolFlare.com](https://solflare.com/) is a community-created web wallet built
specifically for Solana.
SolFlare supports sending and receiving native SOL tokens as well as sending and
receiving SPL Tokens (Solana's ERC-20 equivalent).
SolFlare also supports staking of SOL tokens.
As a _non-custodial_ wallet, your private keys are not stored by the SolFlare
site itself, but rather they are stored in an encrypted
[Keystore File](#using-a-keystore-file) or on a
[Ledger Nano S or X hardware wallet](#using-a-ledger-nano-hardware-wallet).
This guide describes how to set up a wallet using SolFlare, how to send and
receive SOL tokens, and how to create and manage a stake account.
## Getting Started
Go to https://www.solflare.com in a supported browser. Most popular web browsers
should work when interacting with a Keystore File, but currently only
Chrome and Brave are supported when interacting with a Ledger Nano.
### Using a Keystore File
#### Create a new Keystore File
To create a wallet with a Keystore file, click on "Create a Wallet" and select
"Using Keystore File". Follow the prompts to create a password which will be
used to encrypt your Keystore file, and then to download the new file to your
computer. You will be prompted to then upload the Keystore file back to the site
to verify that the download was saved correctly.
**NOTE: If you lose your Keystore file or the password used to encrypt it, any
funds in that wallet will be lost permanently. Neither the Solana team nor the
SolFlare developers can help you recover lost keys.**
You may want to consider saving a backup copy of your Keystore file on an
external drive separate from your main computer, and storing your password in a
separate location.
#### Access your wallet with a Keystore File
To use SolFlare with a previously created Keystore file, click on
"Access a Wallet" and select "Using Keystore File". If you just created a new
Keystore file, you will be taken to the Access page directly.
You will be prompted to enter the password and upload your Keystore file,
then you will be taken to the wallet interface main page.
### Using a Ledger Nano hardware wallet
_NOTE: Please see [known issues](ledger-live.md#known-issues) for any current
limitations in using the Nano._
#### Initial Device Setup
To use a Ledger Nano with SolFlare, first ensure you have
[set up your Nano](ledger-live.md) and have [installed the latest version of
the Solana app](ledger-live.md#upgrade-to-the-latest-version-of-the-solana-app)
on your device.
#### Select a Ledger address to access
Plug in your Nano and open the Solana app so the device screen displays
"Application is Ready".
From the SolFlare home page, click "Access a Wallet" then select "Using Ledger
Nano S | Ledger Nano X". Under "Select derivation path", select the only option:
`` Solana - 44`/501`/ ``
Note: Your browser may prompt you to ask if SolFlare may communicate with your
Ledger device. Click to allow this.
Select an address to interact with from the lower drop down box then click "Access".
The Ledger device can derive a large number of private keys and associated
public addresses. This allows you to manage and interact with an arbitrary
number of different accounts from the same device.
If you deposit funds to an address derived from your Ledger device,
make sure to access the same address when using SolFlare to be able to access
those funds. If you connect to the incorrect address,
simply click Logout and re-connect with the correct address.
## Select a Network
Solana maintains [three distinct networks](../clusters), each of which has
its own purpose in supporting the Solana ecosystem. Mainnet Beta is selected by
default on SolFlare, as this is the permanent network where exchanges and other
production apps are deployed. To select a different network, click on the name
of the currently selected network at the top of the wallet dashboard, either
Mainnet, Testnet or Devnet, then click on the name of the network you wish to be
using.
## Sending and Receiving SOL Tokens
### Receiving
To receive tokens into your wallet, someone must transfer some to your wallet's
address. The address is displayed at the top-left on the screen, and you can
click the Copy icon to copy the address and provide it to whoever is sending you
tokens. If you hold tokens in a different wallet or on an exchange, you can
withdraw to this address as well. Once the transfer is made, the balance shown
on SolFlare should update within a few seconds.
### Sending
Once you have some tokens at your wallet address, you can send them to any other
wallet address or an exchange deposit address by clicking "Transfer SOL" in the
upper-right corner. Enter the recipient address and the amount of SOL to
transfer and click "Submit". You will be prompted to confirm the details of the
transaction before you [use your key to sign the transaction](#signing-a-transaction)
and then it will be submitted to the network.
## Staking SOL Tokens
SolFlare supports creating and managing stake accounts and delegations. To learn
about how staking on Solana works in general, check out our
[Staking Guide](../staking).
### Create a Stake Account
You can use some of the SOL tokens in your wallet to create a new stake account.
From the wallet main page click "Staking" at the top of the page. In the upper-
right, click "Create Account". Enter the amount of SOL you want to use to
fund your new stake account. This amount will be withdrawn from your wallet
and transfered to the stake account. Do not transfer your entire wallet balance
to a stake account, as the wallet is still used to pay any transaction fees
associated with your stake account. Consider leaving at least 1 SOL in your
wallet account.
After you submit and [sign the transaction](#signing-a-transaction) you will see
your new stake account appear in the box labeled "Your Staking Accounts".
Stake accounts created on SolFlare set your wallet address as the
[staking and withdrawing authority](../staking/stake-accounts#understanding-account-authorities)
for your new account, which gives your wallet's key the authority to sign
for any transactions related to the new stake account.
### View your Stake Accounts
On the main Wallet dashboard page or on the Staking dashboard page, your stake
accounts will be visible in the "Your Staking Accounts" box. Stake accounts
exist at a different address from your wallet.
SolFlare will locate any display all stake accounts on the
[selected network](#select-a-network)
for which your wallet address is assigned as the
[stake authority](../staking/stake-accounts#understanding-account-authorities).
Stake accounts that were created outside of SolFlare will also be displayed and
can be managed as long as the wallet you logged in with is assigned as the stake
authority.
### Delegate tokens in a Stake Account
Once you have [selected a validator](../staking#select-a-validator), you may
delegate the tokens in one of your stake accounts to them. From the Staking
dashboard, click "Delegate" at the right side of a displayed stake account.
Select the validator you wish to delegate to from the drop down list and click
Delegate.
To un-delegate your staked tokens (also called deactivating your stake), the
process is similar. On the Staking page, at the right side of a delegated stake
account, click the "Undelegate" button and follow the prompts.
### Split a Stake Account
You may split an existing stake account into two stake accounts. Click on the
address of a stake account controlled by your wallet, and under the Actions bar,
click "Split". Specify the amount of SOL tokens you want to split. This will be
the amount of tokens in your new stake account and your existing stake account
balance will be reduced by the same amount. Splitting your stake account
allows you to delegate to multiple different validators with different amounts
of tokens. You may split a stake account as many times as you want, to create
as many stake accounts as you want.
## Signing a Transaction
Any time you submit a transaction such as sending tokens to another wallet or
delegating stake, you need to use your private key to sign the transaction so
it will be accepted by the network.
### Using a Keystore File
If you accessed your wallet using a Keystore file, you will be prompted to enter
your password any time the key is needed to sign a transaction.
### Using a Ledger Nano
If you accessed your wallet with a Ledger Nano, you will be prompted to
confirm the pending transaction details on your device whenever the key is needed
to sign. On the Nano, use the left and right buttons to view and confirm all of the
transaction details. If everything looks correct, keep clicking the right button
until the screen shows "Approve". Click both buttons to approve the transaction.
If something looks incorrect, press the right button once more so the screen shows
"Reject" and press both buttons to reject the transaction. After you approve
or reject a transaction, you will see this reflected on the SolFlare page.

View File

@@ -1,52 +0,0 @@
---
title: Web Wallets
---
## BitKeep
[BitKeep](https://bitkeep.com) is an digital currency wallet and can send and receive SOL/SPL tokens.
BitKeep also support Solana DApps with BitKeep Browser and BitKeep Chrome.
## MathWallet
[MathWallet](https://mathwallet.org/) supports wallet
addresses for sending and receiving SOL and SPL Tokens through its
browser extension and web wallet interface.
Note: The MathWallet iOS and Android app do not yet support SOL and SPL Tokens_
## Phantom
[Phantom](https://phantom.app/) is a friendly non-custodial, browser
extension, Solana wallet that makes it safe & easy for you to store, send,
receive, collect, and swap tokens.
- Use Solana dapps
- Beautiful NFT support
- Ledger hardware wallet support
Available for Chrome, Brave, Firefox, Vivaldi, and Edge
## Solflare
[Solflare](https://solflare.com/) is a non-custodial web wallet created by the
[Solrise Finance](https://solrise.finance) team that was built specifically for Solana.
Solflare is accessible and easy to use but also has a very comprehensive set of features, including:
- The ability to connect your wallet to almost any Solana Dapp
- Transaction simulations, which show the balance changes expected from a transaction and protect against malicious dapps
- Deep staking support with the ability to create and manage all your staking accounts
- Comprehensive NFT functionality, including the ability to send, receive and preview NFTs from a Metaplex-compatible NFT gallery. Support is provided for image, video, audio, and 3d/VR NFTs.
- An in wallet swap for SPL tokens
- Compatibility with the Ledger hardware wallet
Solflare is available on web, as a browser extension, and as a mobile app for both Android and iOS.
The extension is available on Chrome, Brave, Firefox, Opera, and Edge.
Check out our [guide for using SolFlare](solflare.md).
## Sollet
[sollet.io](https://www.sollet.io/) is a non-custodial web wallet created by the
[Project Serum](https://projectserum.com/) team. sollet.io can be used to send
and receive SOL and any SPL Token.

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-dos"
version = "1.9.6"
version = "1.9.13"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -13,15 +13,15 @@ bincode = "1.3.3"
clap = "2.33.1"
log = "0.4.14"
rand = "0.7.0"
solana-core = { path = "../core", version = "=1.9.6" }
solana-gossip = { path = "../gossip", version = "=1.9.6" }
solana-logger = { path = "../logger", version = "=1.9.6" }
solana-net-utils = { path = "../net-utils", version = "=1.9.6" }
solana-perf = { path = "../perf", version = "=1.9.6" }
solana-sdk = { path = "../sdk", version = "=1.9.6" }
solana-streamer = { path = "../streamer", version = "=1.9.6" }
solana-version = { path = "../version", version = "=1.9.6" }
solana-client = { path = "../client", version = "=1.9.6" }
solana-core = { path = "../core", version = "=1.9.13" }
solana-gossip = { path = "../gossip", version = "=1.9.13" }
solana-logger = { path = "../logger", version = "=1.9.13" }
solana-net-utils = { path = "../net-utils", version = "=1.9.13" }
solana-perf = { path = "../perf", version = "=1.9.13" }
solana-sdk = { path = "../sdk", version = "=1.9.13" }
solana-streamer = { path = "../streamer", version = "=1.9.13" }
solana-version = { path = "../version", version = "=1.9.13" }
solana-client = { path = "../client", version = "=1.9.13" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-download-utils"
version = "1.9.6"
version = "1.9.13"
description = "Solana Download Utils"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -14,8 +14,8 @@ console = "0.15.0"
indicatif = "0.16.2"
log = "0.4.14"
reqwest = { version = "0.11.6", default-features = false, features = ["blocking", "rustls-tls", "json"] }
solana-sdk = { path = "../sdk", version = "=1.9.6" }
solana-runtime = { path = "../runtime", version = "=1.9.6" }
solana-sdk = { path = "../sdk", version = "=1.9.13" }
solana-runtime = { path = "../runtime", version = "=1.9.13" }
[lib]
crate-type = ["lib"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-entry"
version = "1.9.6"
version = "1.9.13"
description = "Solana Entry"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -16,17 +16,17 @@ log = "0.4.11"
rand = "0.7.0"
rayon = "1.5.1"
serde = "1.0.130"
solana-measure = { path = "../measure", version = "=1.9.6" }
solana-merkle-tree = { path = "../merkle-tree", version = "=1.9.6" }
solana-metrics = { path = "../metrics", version = "=1.9.6" }
solana-perf = { path = "../perf", version = "=1.9.6" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.6" }
solana-sdk = { path = "../sdk", version = "=1.9.6" }
solana-measure = { path = "../measure", version = "=1.9.13" }
solana-merkle-tree = { path = "../merkle-tree", version = "=1.9.13" }
solana-metrics = { path = "../metrics", version = "=1.9.13" }
solana-perf = { path = "../perf", version = "=1.9.13" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.13" }
solana-sdk = { path = "../sdk", version = "=1.9.13" }
bincode = "1.3.3"
[dev-dependencies]
matches = "0.1.9"
solana-logger = { path = "../logger", version = "=1.9.6" }
solana-logger = { path = "../logger", version = "=1.9.13" }
[lib]
crate-type = ["lib"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-faucet"
version = "1.9.6"
version = "1.9.13"
description = "Solana Faucet"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -16,12 +16,12 @@ clap = "2.33"
log = "0.4.14"
serde = "1.0.130"
serde_derive = "1.0.103"
solana-clap-utils = { path = "../clap-utils", version = "=1.9.6" }
solana-cli-config = { path = "../cli-config", version = "=1.9.6" }
solana-logger = { path = "../logger", version = "=1.9.6" }
solana-metrics = { path = "../metrics", version = "=1.9.6" }
solana-sdk = { path = "../sdk", version = "=1.9.6" }
solana-version = { path = "../version", version = "=1.9.6" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.13" }
solana-cli-config = { path = "../cli-config", version = "=1.9.13" }
solana-logger = { path = "../logger", version = "=1.9.13" }
solana-metrics = { path = "../metrics", version = "=1.9.13" }
solana-sdk = { path = "../sdk", version = "=1.9.13" }
solana-version = { path = "../version", version = "=1.9.13" }
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
thiserror = "1.0"
tokio = { version = "1", features = ["full"] }

View File

@@ -20,7 +20,7 @@ async fn main() {
let default_keypair = solana_cli_config::Config::default().keypair_path;
solana_logger::setup_with_default("solana=info");
solana_metrics::set_panic_hook("faucet");
solana_metrics::set_panic_hook("faucet", /*version:*/ None);
let matches = App::new(crate_name!())
.about(crate_description!())
.version(solana_version::version!())

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-frozen-abi"
version = "1.9.6"
version = "1.9.13"
description = "Solana Frozen ABI"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -16,11 +16,11 @@ log = "0.4.14"
serde = "1.0.130"
serde_derive = "1.0.103"
sha2 = "0.9.8"
solana-frozen-abi-macro = { path = "macro", version = "=1.9.6" }
solana-frozen-abi-macro = { path = "macro", version = "=1.9.13" }
thiserror = "1.0"
[target.'cfg(not(target_arch = "bpf"))'.dependencies]
solana-logger = { path = "../logger", version = "=1.9.6" }
solana-logger = { path = "../logger", version = "=1.9.13" }
generic-array = { version = "0.14.4", default-features = false, features = ["serde", "more_lengths"]}
memmap2 = "0.5.0"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-frozen-abi-macro"
version = "1.9.6"
version = "1.9.13"
description = "Solana Frozen ABI Macro"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-genesis-utils"
version = "1.9.6"
version = "1.9.13"
description = "Solana Genesis Utils"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -10,9 +10,9 @@ documentation = "https://docs.rs/solana-download-utils"
edition = "2021"
[dependencies]
solana-sdk = { path = "../sdk", version = "=1.9.6" }
solana-download-utils = { path = "../download-utils", version = "=1.9.6" }
solana-runtime = { path = "../runtime", version = "=1.9.6" }
solana-sdk = { path = "../sdk", version = "=1.9.13" }
solana-download-utils = { path = "../download-utils", version = "=1.9.13" }
solana-runtime = { path = "../runtime", version = "=1.9.13" }
[lib]
crate-type = ["lib"]

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-genesis"
description = "Blockchain, Rebuilt for Scale"
version = "1.9.6"
version = "1.9.13"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -15,16 +15,16 @@ clap = "2.33.1"
serde = "1.0.130"
serde_json = "1.0.72"
serde_yaml = "0.8.21"
solana-clap-utils = { path = "../clap-utils", version = "=1.9.6" }
solana-cli-config = { path = "../cli-config", version = "=1.9.6" }
solana-entry = { path = "../entry", version = "=1.9.6" }
solana-ledger = { path = "../ledger", version = "=1.9.6" }
solana-logger = { path = "../logger", version = "=1.9.6" }
solana-runtime = { path = "../runtime", version = "=1.9.6" }
solana-sdk = { path = "../sdk", version = "=1.9.6" }
solana-stake-program = { path = "../programs/stake", version = "=1.9.6" }
solana-version = { path = "../version", version = "=1.9.6" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.6" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.13" }
solana-cli-config = { path = "../cli-config", version = "=1.9.13" }
solana-entry = { path = "../entry", version = "=1.9.13" }
solana-ledger = { path = "../ledger", version = "=1.9.13" }
solana-logger = { path = "../logger", version = "=1.9.13" }
solana-runtime = { path = "../runtime", version = "=1.9.13" }
solana-sdk = { path = "../sdk", version = "=1.9.13" }
solana-stake-program = { path = "../programs/stake", version = "=1.9.13" }
solana-version = { path = "../version", version = "=1.9.13" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.13" }
tempfile = "3.2.0"
[[bin]]

View File

@@ -1,19 +1,19 @@
[package]
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accountsdb-plugin-interface"
description = "The Solana AccountsDb plugin interface."
version = "1.9.6"
name = "solana-geyser-plugin-interface"
description = "The Solana Geyser plugin interface."
version = "1.9.13"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-accountsdb-plugin-interface"
documentation = "https://docs.rs/solana-geyser-plugin-interface"
[dependencies]
log = "0.4.11"
thiserror = "1.0.30"
solana-sdk = { path = "../sdk", version = "=1.9.6" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.6" }
solana-sdk = { path = "../sdk", version = "=1.9.13" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.13" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -0,0 +1,25 @@
<p align="center">
<a href="https://solana.com">
<img alt="Solana" src="https://i.imgur.com/IKyzQ6T.png" width="250" />
</a>
</p>
# Solana Geyser Plugin Interface
This crate enables an plugin to be added into the Solana Validator runtime to
take actions at the time of account updates or block and transaction processing;
for example, saving the account state to an external database. The plugin must
implement the `GeyserPlugin` trait. Please see the detail of the
`geyser_plugin_interface.rs` for the interface definition.
The plugin should produce a `cdylib` dynamic library, which must expose a `C`
function `_create_plugin()` that instantiates the implementation of the
interface.
The https://github.com/solana-labs/solana-accountsdb-plugin-postgres repository
provides an example of how to create a plugin which saves the accounts data into
an external PostgreSQL databases.
More information about Solana is available in the [Solana documentation](https://docs.solana.com/).
Still have questions? Ask us on [Discord](https://discordapp.com/invite/pquxPsq)

View File

@@ -1,5 +1,5 @@
/// The interface for AccountsDb plugins. A plugin must implement
/// the AccountsDbPlugin trait to work with the runtime.
/// The interface for Geyser plugins. A plugin must implement
/// the GeyserPlugin trait to work with the runtime.
/// In addition, the dynamic library must export a "C" function _create_plugin which
/// creates the implementation of the plugin.
use {
@@ -87,7 +87,7 @@ pub enum ReplicaBlockInfoVersions<'a> {
/// Errors returned by plugin calls
#[derive(Error, Debug)]
pub enum AccountsDbPluginError {
pub enum GeyserPluginError {
/// Error opening the configuration file; for example, when the file
/// is not found or when the validator process has no permission to read it.
#[error("Error opening config file. Error detail: ({0}).")]
@@ -136,12 +136,12 @@ impl SlotStatus {
}
}
pub type Result<T> = std::result::Result<T, AccountsDbPluginError>;
pub type Result<T> = std::result::Result<T, GeyserPluginError>;
/// Defines an AccountsDb plugin, to stream data from the runtime.
/// AccountsDb plugins must describe desired behavior for load and unload,
/// Defines a Geyser plugin, to stream data from the runtime.
/// Geyser plugins must describe desired behavior for load and unload,
/// as well as how they will handle streamed data.
pub trait AccountsDbPlugin: Any + Send + Sync + std::fmt::Debug {
pub trait GeyserPlugin: Any + Send + Sync + std::fmt::Debug {
fn name(&self) -> &'static str;
/// The callback called when a plugin is loaded by the system,

View File

@@ -0,0 +1 @@
pub mod geyser_plugin_interface;

View File

@@ -0,0 +1,31 @@
[package]
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-geyser-plugin-manager"
description = "The Solana Geyser plugin manager."
version = "1.9.13"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-geyser-plugin-manager"
[dependencies]
bs58 = "0.4.0"
crossbeam-channel = "0.5"
libloading = "0.7.2"
log = "0.4.11"
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.72"
solana-geyser-plugin-interface = { path = "../geyser-plugin-interface", version = "=1.9.13" }
solana-logger = { path = "../logger", version = "=1.9.13" }
solana-measure = { path = "../measure", version = "=1.9.13" }
solana-metrics = { path = "../metrics", version = "=1.9.13" }
solana-rpc = { path = "../rpc", version = "=1.9.13" }
solana-runtime = { path = "../runtime", version = "=1.9.13" }
solana-sdk = { path = "../sdk", version = "=1.9.13" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.13" }
thiserror = "1.0.30"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,8 +1,8 @@
/// Module responsible for notifying plugins of account updates
use {
crate::accountsdb_plugin_manager::AccountsDbPluginManager,
crate::geyser_plugin_manager::GeyserPluginManager,
log::*,
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
solana_geyser_plugin_interface::geyser_plugin_interface::{
ReplicaAccountInfo, ReplicaAccountInfoVersions,
},
solana_measure::measure::Measure,
@@ -19,7 +19,7 @@ use {
};
#[derive(Debug)]
pub(crate) struct AccountsUpdateNotifierImpl {
plugin_manager: Arc<RwLock<AccountsDbPluginManager>>,
plugin_manager: Arc<RwLock<GeyserPluginManager>>,
}
impl AccountsUpdateNotifierInterface for AccountsUpdateNotifierImpl {
@@ -30,14 +30,14 @@ impl AccountsUpdateNotifierInterface for AccountsUpdateNotifierImpl {
}
fn notify_account_restore_from_snapshot(&self, slot: Slot, account: &StoredAccountMeta) {
let mut measure_all = Measure::start("accountsdb-plugin-notify-account-restore-all");
let mut measure_copy = Measure::start("accountsdb-plugin-copy-stored-account-info");
let mut measure_all = Measure::start("geyser-plugin-notify-account-restore-all");
let mut measure_copy = Measure::start("geyser-plugin-copy-stored-account-info");
let account = self.accountinfo_from_stored_account_meta(account);
measure_copy.stop();
inc_new_counter_debug!(
"accountsdb-plugin-copy-stored-account-info-us",
"geyser-plugin-copy-stored-account-info-us",
measure_copy.as_us() as usize,
100000,
100000
@@ -49,7 +49,7 @@ impl AccountsUpdateNotifierInterface for AccountsUpdateNotifierImpl {
measure_all.stop();
inc_new_counter_debug!(
"accountsdb-plugin-notify-account-restore-all-us",
"geyser-plugin-notify-account-restore-all-us",
measure_all.as_us() as usize,
100000,
100000
@@ -63,7 +63,7 @@ impl AccountsUpdateNotifierInterface for AccountsUpdateNotifierImpl {
}
for plugin in plugin_manager.plugins.iter_mut() {
let mut measure = Measure::start("accountsdb-plugin-end-of-restore-from-snapshot");
let mut measure = Measure::start("geyser-plugin-end-of-restore-from-snapshot");
match plugin.notify_end_of_startup() {
Err(err) => {
error!(
@@ -81,7 +81,7 @@ impl AccountsUpdateNotifierInterface for AccountsUpdateNotifierImpl {
}
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-end-of-restore-from-snapshot",
"geyser-plugin-end-of-restore-from-snapshot",
measure.as_us() as usize
);
}
@@ -89,7 +89,7 @@ impl AccountsUpdateNotifierInterface for AccountsUpdateNotifierImpl {
}
impl AccountsUpdateNotifierImpl {
pub fn new(plugin_manager: Arc<RwLock<AccountsDbPluginManager>>) -> Self {
pub fn new(plugin_manager: Arc<RwLock<GeyserPluginManager>>) -> Self {
AccountsUpdateNotifierImpl { plugin_manager }
}
@@ -130,14 +130,14 @@ impl AccountsUpdateNotifierImpl {
slot: Slot,
is_startup: bool,
) {
let mut measure2 = Measure::start("accountsdb-plugin-notify_plugins_of_account_update");
let mut measure2 = Measure::start("geyser-plugin-notify_plugins_of_account_update");
let mut plugin_manager = self.plugin_manager.write().unwrap();
if plugin_manager.plugins.is_empty() {
return;
}
for plugin in plugin_manager.plugins.iter_mut() {
let mut measure = Measure::start("accountsdb-plugin-update-account");
let mut measure = Measure::start("geyser-plugin-update-account");
match plugin.update_account(
ReplicaAccountInfoVersions::V0_0_1(&account),
slot,
@@ -163,7 +163,7 @@ impl AccountsUpdateNotifierImpl {
}
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-update-account-us",
"geyser-plugin-update-account-us",
measure.as_us() as usize,
100000,
100000
@@ -171,7 +171,7 @@ impl AccountsUpdateNotifierImpl {
}
measure2.stop();
inc_new_counter_debug!(
"accountsdb-plugin-notify_plugins_of_account_update-us",
"geyser-plugin-notify_plugins_of_account_update-us",
measure2.as_us() as usize,
100000,
100000

Some files were not shown because too many files have changed in this diff Show More