Compare commits

...

88 Commits

Author SHA1 Message Date
Tyera Eulberg
f58b87befe v1.9: bump tarpc from 0.26.2 to 0.27.2 and add BanksClientError (#22055)
* chore: bump tarpc from 0.26.2 to 0.27.2

Bumps [tarpc](https://github.com/google/tarpc) from 0.26.2 to 0.27.2.
- [Release notes](https://github.com/google/tarpc/releases)
- [Changelog](https://github.com/google/tarpc/blob/master/RELEASES.md)
- [Commits](https://github.com/google/tarpc/commits)

---
updated-dependencies:
- dependency-name: tarpc
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

* Accommodate breaking changes

* Reword incorrect error message

* Add error module

* Revert client Error type to io::Error; easy transition to BanksClientError

* Bump tracing crates in programs

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
2021-12-22 03:41:16 +00:00
mergify[bot]
1a2823b875 chore: bump lru from 0.7.0 to 0.7.1 (#22018) (#22056)
Bumps [lru](https://github.com/jeromefroe/lru-rs) from 0.7.0 to 0.7.1.
- [Release notes](https://github.com/jeromefroe/lru-rs/releases)
- [Changelog](https://github.com/jeromefroe/lru-rs/blob/master/CHANGELOG.md)
- [Commits](https://github.com/jeromefroe/lru-rs/compare/0.7.0...0.7.1)

---
updated-dependencies:
- dependency-name: lru
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
(cherry picked from commit 69d0b08dd8)

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2021-12-21 16:27:43 -07:00
mergify[bot]
75fe0d3ecf Fix #21986 (#22035) (#22049)
* Partial revert "Updates documentation around what needs to be passed in CPI. (#21633)"

* Enforces the program_id being passed explicitly by removing it from get_instruction_keyed_accounts().

* instruction_accounts => instructions_account

(cherry picked from commit ba8e15848e)

Co-authored-by: Alexander Meißner <AlexanderMeissner@gmx.net>
2021-12-21 17:54:18 +00:00
mergify[bot]
c296a6c9ed The sidebar for the plugin doc is showing the item as "Overview", corrected the styles (#22033) (#22040)
(cherry picked from commit 2347f65133)

Co-authored-by: Lijun Wang <83639177+lijunwangs@users.noreply.github.com>
2021-12-21 02:58:53 +00:00
mergify[bot]
57e5406476 Add deactivation cooldown before address lookup tables can be closed (#22011) (#22036)
(cherry picked from commit f5d1115468)

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-21 02:10:14 +00:00
mergify[bot]
4f57c4a4fe Fix weird formatting of bullets (#22013) (#22030)
(cherry picked from commit 116517fb6d)

Co-authored-by: Kardashev <96332127+0xkardashev@users.noreply.github.com>
2021-12-20 20:41:18 +00:00
mergify[bot]
c4b3b2865d Update program close docs (#22026) (#22027)
(cherry picked from commit b8eff3456c)

Co-authored-by: Jack May <jack@solana.com>
2021-12-20 18:55:39 +00:00
mergify[bot]
f58c375b1f typo: lanaguage -> language (#22009) (#22015)
(cherry picked from commit e92a81b741)

Co-authored-by: Peter Johnson <peter@geocode.earth>
2021-12-20 07:34:31 +00:00
mergify[bot]
bf41c53f11 chore: add blockSubscribe api docs (#22002) (#22008)
Co-authored-by: Zano <segfaultdoctor@protonmail.com>
(cherry picked from commit df6a4930b9)

Co-authored-by: segfaultdoctor <seg@jito.network>
2021-12-19 16:48:36 +00:00
mergify[bot]
e3a4b98432 removes Select in favor of recv_timeout/try_iter (#21981) (#22001)
crossbeam_channel::Select::ready_timeout might return with success spuriously.

(cherry picked from commit 7476dfeec0)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-18 19:37:07 +00:00
mergify[bot]
91657ba8fe new net-stats require a new table (#21996) (#22000)
(cherry picked from commit 3fe942ab30)

Co-authored-by: Jeff Biseda <jbiseda@gmail.com>
2021-12-18 10:26:16 +00:00
mergify[bot]
35ee48bec9 RPC Block Subscription (backport #21787) (#21992)
* RPC Block Subscription (#21787)

* add stuff

* compiling

* add notify block

* wip

* feat: add blockSubscribe pubsub method

* address PR comments

Co-authored-by: Lucas B <buffalu@jito.network>
Co-authored-by: Zano <segfaultdoctor@protonmail.com>
(cherry picked from commit 76098dd42a)

# Conflicts:
#	Cargo.lock
#	client-test/Cargo.toml
#	rpc/src/rpc_subscriptions.rs

* Fix conflicts

Co-authored-by: segfaultdoctor <seg@jito.network>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2021-12-18 01:43:37 +00:00
mergify[bot]
02cfa85214 Update to reed-solomon-erasure 5.0.1, to get simd-accel on M1 macs (#21990)
(cherry picked from commit 5f054cd51b)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-12-18 00:52:21 +00:00
mergify[bot]
02be3a6568 Check file size of snapshot_version when unarchiving snapshot (#21925) (#21983)
(cherry picked from commit 0f6e8d3385)

Co-authored-by: mooori <moritz.zielke@gmail.com>
2021-12-17 21:02:53 +00:00
mergify[bot]
b20fae5a09 simplifies ShredIndex api (#21932) (#21959)
(cherry picked from commit efd64a3862)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-17 19:50:49 +00:00
mergify[bot]
e572678176 removes next_shred_index from return value of entries to shreds api (#21961) (#21980)
next-shred-index is already readily available from returned data shreds.
The commit simplifies the api for upcoming changes to erasure coding
schema which will require explicit tracking of indices for coding shreds
as well as data shreds.

(cherry picked from commit 89d66c3210)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-17 17:57:57 +00:00
mergify[bot]
f4521002b9 Clean up demote program write lock feature (backport #21949) (#21969)
* Clean up demote program write lock feature (#21949)

* Clean up demote program write lock feature

* fix test

(cherry picked from commit 6ff0be6a82)

# Conflicts:
#	programs/bpf_loader/src/syscalls.rs
#	runtime/src/accounts.rs

* resolve conflicts

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-17 04:45:22 +00:00
mergify[bot]
0c5a2bcd5a Update getSignaturesForAddress and getConfirmedSignaturesForAddress2 RPC call description (#21955) (#21960)
* Update jsonrpc-api.md

* Update docs/src/developing/clients/jsonrpc-api.md

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

* Wrap 80chars

* Update docs/src/developing/clients/jsonrpc-api.md

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
(cherry picked from commit 3398f5a2f5)

Co-authored-by: jdcaballerov <743513+jdcaballerov@users.noreply.github.com>
2021-12-16 20:59:51 +00:00
mergify[bot]
c25d16bf0d adds ErasureSetId identifying erasure coding sets of shreds (backport #21928) (#21946)
* adds ErasureSetId identifying erasure coding sets of shreds (#21928)

(cherry picked from commit 8183f28636)

# Conflicts:
#	ledger/src/blockstore.rs

* removes backport merge conflicts

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-16 17:49:39 +00:00
mergify[bot]
301e38044a Fixes the calculation of the "compute_meter_consumption" across process_instruction() and process_message(). (#21944) (#21945)
(cherry picked from commit 49cb161203)

Co-authored-by: Alexander Meißner <AlexanderMeissner@gmx.net>
2021-12-16 16:28:28 +00:00
Michael Vines
bfa6302985 Bump version to 1.9.2 2021-12-15 16:18:14 -08:00
Kirill Fomichev
b66e2ae353 add caching_enabled option to test-validator
(cherry picked from commit 5fb7da12f2)
2021-12-15 16:11:51 -08:00
Michael Vines
3967dc8685 rebase 2021-12-15 15:33:45 -08:00
Michael Vines
569c83295d Update argument name
(cherry picked from commit ed924e3bc4)
2021-12-15 15:33:45 -08:00
losman0s
a462c58594 Add option to load accounts from file
This introduces the `--clone-from-file` option for
solana-test-validator. It allows specifying any number of files
(without extension) containing account info and data, which will be
loaded at genesis. This is similar to `--bpf-program` for programs
loading.

The files will be searched for in the CWD or in `tests/fixtures`.

Example: `solana-test-validator --clone-from-file SRM_token USD_token`
(cherry picked from commit 9b06d64eb8)

# Conflicts:
#	test-validator/Cargo.toml
2021-12-15 15:33:45 -08:00
losman0s
7dba8bb49f Add complete account dump to file
This commit introduces the ability to dump the complete content of an
account to a JSON file (compact or not depending on the provided format
option).

Example:

```sh
solana account -u m \
  --output json-compact \
  --output-file SRM_token.json \
  SRMuApVNdxXokk5GT7XD5cUUgXMBCoAz2LHeuAoKWRt
```

Note: Behavior remains untouched if format option `--output` is not
provided (only account data gets written to file).

(cherry picked from commit 0e9e67b65d)
2021-12-15 15:33:45 -08:00
mergify[bot]
c907d4444d add accountsdb-plugin-config to test-validator (#21918)
(cherry picked from commit c2a94a8fb0)

Co-authored-by: Kirill Fomichev <fanatid@ya.ru>
2021-12-15 09:48:12 +00:00
Michael Vines
b4c847557b Restore solana_validator::test_validator export
(cherry picked from commit e124659aca)
2021-12-15 00:29:04 -08:00
mergify[bot]
de48347078 Add json support for feature sets; also print output after feature list (#21905) (#21914)
* Add json support for feature sets; also print output after feature list

* Move stringifying into Display implementation

(cherry picked from commit dcd2854829)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-12-15 07:08:39 +00:00
Michael Vines
9f173d3717 Add helper crate to generate syscalls.txt 2021-12-14 21:34:36 -08:00
Michael Vines
dcd76e484f Update openssl-src package to resolve cargo audit complaint
(cherry picked from commit 7ba27e5cae)
2021-12-14 19:09:26 -08:00
mergify[bot]
2246135654 Document solana_program::instruction (#21817) (#21906)
* Document solana_program::instruction

* Apply suggestions from code review

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
(cherry picked from commit dcb5849484)

Co-authored-by: Brian Anderson <andersrb@gmail.com>
2021-12-15 00:55:56 +00:00
mergify[bot]
41ea597256 Fix subtraction overflow (#21871) (#21901)
(cherry picked from commit cb395abff7)

Co-authored-by: carllin <carl@solana.com>
2021-12-14 23:22:47 +00:00
Michael Vines
fb955bd4ec Update Cargo.toml 2021-12-14 14:18:20 -08:00
Michael Vines
5c3fbb384f Futures 0.3.18 has been yanked, back off to .17
(cherry picked from commit 2a6dcb2ffd)

# Conflicts:
#	ledger/Cargo.toml
2021-12-14 14:18:20 -08:00
mergify[bot]
a056fd88cb uses Option<Slot> for SlotMeta.parent_slot (backport #21808) (#21899)
* uses Option<Slot> for SlotMeta.parent_slot (#21808)

SlotMeta.parent_slot for the head of a detached chain of slots is
unknown and that is indicated by u64::MAX which lacks type-safety:
https://github.com/solana-labs/solana/blob/6c108c8fc/ledger/src/blockstore_meta.rs#L203-L205

The commit changes the type to Option<Slot>. Backward compatibility is
maintained by customizing serde serialize/deserialize implementations.

(cherry picked from commit 8d980f07ba)

# Conflicts:
#	ledger-tool/src/main.rs

* removes backport merge conflicts

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-14 21:42:57 +00:00
mergify[bot]
2f1816d1db adds ShredId uniquely identifying each shred (backport #21820) (#21897)
* adds ShredId uniquely identifying each shred (#21820)

(cherry picked from commit 4ceb2689f5)

# Conflicts:
#	ledger/src/blockstore.rs

* removes backport merge conflicts

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-14 21:03:08 +00:00
mergify[bot]
2cd2f3ba7b Bump rbpf to v0.2.19 (#21880) (#21891)
* Bump rbpf to v0.2.19

Co-authored-by: Alexander Meißner <AlexanderMeissner@gmx.net>
(cherry picked from commit 509bcd2e74)

Co-authored-by: Jack May <jack@solana.com>
2021-12-14 20:30:31 +00:00
Michael Vines
135dfdbf1e Don't publish rbpf-cli to crates.io 2021-12-14 12:12:19 -08:00
Michael Vines
fad4bfdf2a Don't publish poh-bench to crates.io 2021-12-14 12:10:03 -08:00
mergify[bot]
a9d4728c35 Deserialize accounts before acquiring stakes cache lock (#21733) (#21889)
* Deserialize stored accounts before locking stakes cache

* fix test

(cherry picked from commit 2bbe1d875a)

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-14 16:47:01 +00:00
mergify[bot]
3977bcde63 Add missing word "that" (#21878) (#21884)
(cherry picked from commit 746869fdac)

Co-authored-by: Raza <42661870+AlmostEfficient@users.noreply.github.com>
2021-12-14 14:44:48 +00:00
mergify[bot]
cf2a9de19c Add solana-cli-config link to rust-api.md (#21840) (#21874)
(cherry picked from commit 033106ed81)

Co-authored-by: Brian Anderson <andersrb@gmail.com>
2021-12-14 08:22:13 +00:00
mergify[bot]
5e2b12aee5 Restore ALL behavior; add enum variant, comments, and help text to make behavior clearer (#21854) (#21863)
(cherry picked from commit bed1b143a5)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-12-14 05:21:25 +00:00
mergify[bot]
6c329e2fd3 Fixup RPC docs (backport #21858) (#21864)
* Remove old notes referring to EOL versions

(cherry picked from commit eebaf89874)

* Add notes about new v1.9 rpc apis

(cherry picked from commit fd212fd2a4)

Co-authored-by: Tyera Eulberg <tyera@solana.com>
2021-12-14 02:46:44 +00:00
mergify[bot]
0376045c7d cli: Order displayed feature list by status (#21810) (#21830)
(cherry picked from commit 1149c1880d)

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-13 14:39:47 +00:00
Michael Vines
c1f54c22ed Remove the 5 integer msg! form
(cherry picked from commit c5c699a918)
2021-12-11 12:47:43 -08:00
Lijun Wang
0576d133ad Add Accountsdb plugin documentations (#21746) (#21799)
Add the public facing documentation about the plugin framework: explaining the interface, how to load plugin and the example PostgreSQL plugin implementation.
Updated the rust documentation for the plugin interfaces for accounts and slot.
This changes are targeted for v1.8. Information about transactions will be updated later.
2021-12-11 11:04:22 -08:00
mergify[bot]
9956afb2bd uses Option<u64> for SlotMeta.last_index (#21775) (#21806)
SlotMeta.last_index may be unknown and current code is using u64::MAX to
indicate that:
https://github.com/solana-labs/solana/blob/6c108c8fc/ledger/src/blockstore_meta.rs#L169-L174

This lacks type-safety and can introduce bugs if not always checked for
Several instances of slot_meta.last_index + 1 are also subject to
overflow.

This commit updates the type to Option<u64>. Backward compatibility is
maintained by customizing serde serialize/deserialize implementations.

(cherry picked from commit e08139f949)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-11 17:39:05 +00:00
mergify[bot]
01941cf3de Rename Packets to PacketBatch (backport #21794) (#21805)
* Rename Packets to PacketBatch (#21794)

(cherry picked from commit 254ef3e7b6)

# Conflicts:
#	core/src/verified_vote_packets.rs

* resolve conflicts

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-11 16:53:23 +00:00
Tao Zhu
4b63d51e3e Bump version to 1.9.1 (#21802) 2021-12-11 12:50:36 +00:00
mergify[bot]
5bf4445ae6 Add address lookup table program (backport #21616) (#21789)
* Add address lookup table program (#21616)

* Add address lookup table program

* feedback

(cherry picked from commit 9b41ddd9ba)

# Conflicts:
#	runtime/Cargo.toml

* resolve conflicts

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-11 05:26:46 +00:00
Justin Starry
7782d34bbf Add StakesCache struct to abstract away locking (#21738) (#21796) 2021-12-10 22:38:04 -05:00
mergify[bot]
2c4765e75a Bump solana_rbpf to version v0.2.18 (#21774) (#21786)
(cherry picked from commit a5a0dabe7b)

Co-authored-by: Alexander Meißner <AlexanderMeissner@gmx.net>
2021-12-11 02:38:03 +00:00
mergify[bot]
e71ea19e60 adds back ErasureMeta::first_coding_index field (#21623) (#21785)
https://github.com/solana-labs/solana/pull/16646
removed first_coding_index since the field is currently redundant and
always equal to fec_set_index.
However, with upcoming changes to erasure coding schema, this will no
longer be the same as fec_set_index and so requires a separate field to
represent.

(cherry picked from commit 49ba09b333)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-10 23:14:10 +00:00
mergify[bot]
ed0040d555 Update to Rust 1.57.0 (#21779)
(cherry picked from commit 15a9fa6f53)

Co-authored-by: Steven Czabaniuk <steven@solana.com>
2021-12-10 22:23:48 +00:00
mergify[bot]
da9e6826ac Move type alias and use it more broadly (#21763) (#21777)
(cherry picked from commit 350845c513)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-12-10 21:44:41 +00:00
mergify[bot]
68fc72a7f4 Add more reporting for invalid stake cache members and prune them (#21654) (#21741)
* Add more reporting for invalid stake cache members

* feedback

(cherry picked from commit 6fc329180b)

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-10 18:30:16 +00:00
mergify[bot]
2a6bb2b954 Migrate from address maps to address lookup tables (#21634) (#21773)
* Migrate from address maps to address lookup tables

* update sanitize error

* cargo fmt

* update abi

(cherry picked from commit 6c108c8fc3)

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-10 18:10:37 +00:00
mergify[bot]
ef51778c78 Nits in message-processor (#21755) (#21762)
* Fixup typo

* Simplify types slightly

(cherry picked from commit c1386d66e6)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-12-10 11:24:20 -05:00
mergify[bot]
abecf292a3 Expand docs for Pubkey::create_program_address (#21750) (#21759)
* Expand docs for Pubkey::create_program_address

* Update sdk/program/src/pubkey.rs

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
(cherry picked from commit 6919c4863b)

Co-authored-by: Brian Anderson <andersrb@gmail.com>
2021-12-10 11:23:54 -05:00
Michael Vines
a31660815f rebase 2021-12-09 18:41:47 -08:00
Michael Vines
539ad4bea6 Remove libcurl to prevent wasm-pack segfault in libssl
(cherry picked from commit f32216588d)
2021-12-09 18:41:47 -08:00
Michael Vines
85f601993f Cargo.lock
(cherry picked from commit f4babb7566)

# Conflicts:
#	Cargo.lock
#	programs/bpf/Cargo.lock
2021-12-09 18:41:47 -08:00
Michael Vines
b0754cc575 Add initial wasm bindings for Instruction, SystemProgram and Transaction
(cherry picked from commit a35df1cb02)
2021-12-09 18:41:47 -08:00
Michael Vines
effd0b2547 Add wasm bindings for Hash
(cherry picked from commit 03a956e8d9)
2021-12-09 18:41:47 -08:00
Michael Vines
8836069719 Add wasm bindings for Pubkey and Keypair
(cherry picked from commit 488dc37fec)
2021-12-09 18:41:47 -08:00
mergify[bot]
2698a5c705 AcctIdx: env var to enable testing of disk buckets (#21494) (#21723)
(cherry picked from commit 54862eba0d)

Co-authored-by: Jeff Washington (jwash) <wash678@gmail.com>
2021-12-09 23:39:06 +00:00
mergify[bot]
dd157fd47f Fixed minor issues with the cluster overview docs which had confused some (#21744) (#21745)
new users.

(cherry picked from commit 6d18b6bab5)

Co-authored-by: bji <bryan@ischo.com>
2021-12-09 20:41:21 +00:00
mergify[bot]
8cacf82cb8 adds more sanity checks to shreds (#21675) (#21734)
(cherry picked from commit 8063273d09)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-09 18:44:43 +00:00
mergify[bot]
8ee5fbc5c0 simulateTransaction now returns the correct error code if accounts are provided as input (#21716)
(cherry picked from commit 824994db69)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-12-09 01:12:42 +00:00
mergify[bot]
f2a6b94e5c SDK: Add stdlib.h include to pull in abort() (#21700) (#21705)
(cherry picked from commit 923720f529)

Co-authored-by: Jon Cinque <jon.cinque@gmail.com>
2021-12-08 17:31:11 +00:00
mergify[bot]
ef970bb14a - Implicitly fixes invoke_context.return_data not being reset between instructions in process_message. (#21671) (#21684)
- Lets InvokeContext::process_cross_program_instruction() handle the first invocation depth too.
- Marks InvokeContext::verify(), InvokeContext::verify_and_update() and InvokeContext::process_executable_chain() private.
- Renames InvokeContext::process_cross_program_instruction() to InvokeContext::process_instruction().
- Removes InvokeContext::new_mock_with_sysvars().

(cherry picked from commit 1df88837c8)

Co-authored-by: Alexander Meißner <AlexanderMeissner@gmx.net>
2021-12-08 10:48:49 +00:00
Jarred Nicholls
cabd851904 Avoid entropy sources when constructing a solana_program::message::Message.
The solana-program crate can be used in certain embedded environments (HSMs) where
the source of entropy, whether used for cryptographic purposes or not, is tightly
controlled. In these cases, using the default OS source of entrophy is not always
acceptable. Thus, using the default Rust stdlib entropy source for seeding its
default hasher, is prohibited. This means any use of HashMap/HashSet must be able
to be constructed and used with a custom hasher implementation.

This commit removes the use of Itertools::unique() to dedupe Instructions that are
being compiled into a new Message, which uses a default-configured HashMap
under-the-hood. Instead, we use a BTreeSet which does not invoke any entropy
source in order to seed a hash implementation.

(cherry picked from commit 4da435f2a0)
2021-12-07 22:36:21 -08:00
mergify[bot]
2d2ef59550 Ensure we have keys to activate these features (#21669) (#21674)
(cherry picked from commit 45e56c599d)

Co-authored-by: Sean Young <sean@mess.org>
2021-12-07 23:24:11 +00:00
mergify[bot]
b7b56d5016 Docs: Solflare web/app updates (#21540) (#21668)
* Update Solflare description

* Add Solflare to mobile wallets

* Sort mobile wallets alphabetically

* Sort web wollets alphabetically

* Update docs/src/wallet-guide/apps.md

* Update docs/src/wallet-guide/apps.md

* Update docs/src/wallet-guide/web-wallets.md

* Update docs/src/wallet-guide/web-wallets.md

* Update docs/src/wallet-guide/apps.md

Co-authored-by: Justin Starry <justin.m.starry@gmail.com>
(cherry picked from commit a2477c1f32)

Co-authored-by: Boris Vujicic <turshija@gmail.com>
2021-12-07 16:44:28 +00:00
mergify[bot]
18e3a635b4 docs: Fix SOL staked formula (#21615) (#21667)
Fix the formula on the proposal page: https://docs.solana.com/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards

(cherry picked from commit b57097ef18)

Co-authored-by: Melroy van den Berg <melroy@melroy.org>
2021-12-07 16:01:12 +00:00
mergify[bot]
2b4347d502 Add option to reclaim accounts-cluster-bench accounts/lamports (backport #21656) (#21658)
* Add option to reclaim accounts-cluster-bench accounts/lamports (#21656)

* Add option to reclaim accounts-cluster-bench accounts/lamports

* lint

(cherry picked from commit 205fd95722)

# Conflicts:
#	accounts-cluster-bench/Cargo.toml

* Fix conflict

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2021-12-07 09:18:48 +00:00
mergify[bot]
87accd16d8 Fixup flaky tests (#21617) (#21647)
* Fixup flaky tests

* Fixup listeners

(cherry picked from commit f493a88258)

Co-authored-by: carllin <carl@solana.com>
2021-12-07 03:54:14 +00:00
mergify[bot]
0e969015fc Add offline and fee-payer utilities to CLI vote module (#21579) (#21649)
* create-vote-account: add offline, nonce, fee_payer capabilities

* vote-authorize: add offline, nonce, fee-payer

* vote-update-things: add offline, nonce, fee-payer

* withdraw-vote: add offline, nonce, fee-payer

* close-vote-acct: add fee-payer

* Allow WithdrawVoteAccount to empty account, since offline operations cannot perform account state queries as in CloseVoteAccount

* Fix lint

* Update offline-signing docs

* Add some parse unit tests

* Add offline integration test

(cherry picked from commit 873fe81bc0)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-12-07 01:51:02 +00:00
mergify[bot]
46935c022e Ensure that StakeDelegations and StakeHistory serde (#21640) (#21653)
Add tests to StakeDelegations and StakeHistory to ensure that the outer
types serialize and deserialize correctly to/from the inner types.

(cherry picked from commit da4015a959)

Co-authored-by: Brooks Prumo <brooks@solana.com>
2021-12-07 01:44:49 +00:00
mergify[bot]
8a7106bc08 Remove activated feature for filtering invalid stakes from rewards (#21641) (#21651)
(cherry picked from commit a1adcb23b6)

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-07 00:58:31 +00:00
mergify[bot]
89d2f34a03 Reject vote withdraws that create non-rent-exempt accounts (backport #21639) (#21645)
* Reject vote withdraws that create non-rent-exempt accounts (#21639)

* Reject vote withdraws that create non-rent-exempt accounts

* fix mocked instruction test

(cherry picked from commit e123883b26)

# Conflicts:
#	sdk/src/feature_set.rs

* resolve conflicts

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-07 00:42:01 +00:00
mergify[bot]
b3fa1e4550 Move transaction error code into new module (#21635) (#21638)
(cherry picked from commit 3dab1e711d)

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-06 20:11:20 +00:00
mergify[bot]
58c755e1d4 Rework docs for Pubkey::find_program_address and friends (#21528) (#21637)
* Rework docs for Pubkey::find_program_address and friends

* Remove circular dependency

* Minor tweaks

* Apply suggestions from code review

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

* Sort solana-program dev-dependencies

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
(cherry picked from commit d1c101cde2)

Co-authored-by: Brian Anderson <andersrb@gmail.com>
2021-12-06 19:04:35 +00:00
mergify[bot]
60085305b4 Fix spelling of 'Borsh' (#21624)
(cherry picked from commit f3c2803af9)

Co-authored-by: Brian Anderson <andersrb@gmail.com>
2021-12-06 05:31:28 +00:00
mergify[bot]
b4c8e095bd adds back position field to coding-shred-header (#21600) (#21620)
https://github.com/solana-labs/solana/pull/17004
removed position field from coding-shred-header because as it stands the
field is redundant and unused.
However, with the upcoming changes to erasure coding schema this field
will no longer be redundant and needs to be populated.

(cherry picked from commit cd17f63d81)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-05 16:40:22 +00:00
mergify[bot]
3e28ffa884 Bump RpcClient node versions (#21612) (#21613)
* Bump blockhash/fee api check versions

* Bump snapshot api check version

(cherry picked from commit 3e5a5a834f)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-12-05 01:08:22 +00:00
330 changed files with 11956 additions and 4473 deletions

1
.gitignore vendored
View File

@@ -4,6 +4,7 @@
/solana-metrics/
/solana-metrics.tar.bz2
/target/
/test-ledger/
**/*.rs.bk
.cargo

434
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -46,7 +46,10 @@ members = [
"poh",
"poh-bench",
"program-test",
"programs/address-lookup-table",
"programs/address-lookup-table-tests",
"programs/bpf_loader",
"programs/bpf_loader/gen-syscall-list",
"programs/compute-budget",
"programs/config",
"programs/stake",

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-account-decoder"
version = "1.9.0"
version = "1.9.2"
description = "Solana account decoder"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -19,9 +19,9 @@ lazy_static = "1.4.0"
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.72"
solana-config-program = { path = "../programs/config", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.0" }
solana-config-program = { path = "../programs/config", version = "=1.9.2" }
solana-sdk = { path = "../sdk", version = "=1.9.2" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.2" }
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
thiserror = "1.0"
zstd = "0.9.0"

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accounts-bench"
version = "1.9.0"
version = "1.9.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -11,11 +11,11 @@ publish = false
[dependencies]
log = "0.4.14"
rayon = "1.5.1"
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.2" }
solana-runtime = { path = "../runtime", version = "=1.9.2" }
solana-measure = { path = "../measure", version = "=1.9.2" }
solana-sdk = { path = "../sdk", version = "=1.9.2" }
solana-version = { path = "../version", version = "=1.9.2" }
clap = "2.33.1"
[package.metadata.docs.rs]

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accounts-cluster-bench"
version = "1.9.0"
version = "1.9.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -13,24 +13,25 @@ clap = "2.33.1"
log = "0.4.14"
rand = "0.7.0"
rayon = "1.5.1"
solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" }
solana-client = { path = "../client", version = "=1.9.0" }
solana-core = { path = "../core", version = "=1.9.0" }
solana-faucet = { path = "../faucet", version = "=1.9.0" }
solana-gossip = { path = "../gossip", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-net-utils = { path = "../net-utils", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-streamer = { path = "../streamer", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-account-decoder = { path = "../account-decoder", version = "=1.9.2" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.2" }
solana-client = { path = "../client", version = "=1.9.2" }
solana-core = { path = "../core", version = "=1.9.2" }
solana-faucet = { path = "../faucet", version = "=1.9.2" }
solana-gossip = { path = "../gossip", version = "=1.9.2" }
solana-logger = { path = "../logger", version = "=1.9.2" }
solana-measure = { path = "../measure", version = "=1.9.2" }
solana-net-utils = { path = "../net-utils", version = "=1.9.2" }
solana-runtime = { path = "../runtime", version = "=1.9.2" }
solana-sdk = { path = "../sdk", version = "=1.9.2" }
solana-streamer = { path = "../streamer", version = "=1.9.2" }
solana-test-validator = { path = "../test-validator", version = "=1.9.2" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.2" }
solana-version = { path = "../version", version = "=1.9.2" }
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
[dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "=1.9.0" }
solana-local-cluster = { path = "../local-cluster", version = "=1.9.2" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -23,6 +23,7 @@ use {
solana_streamer::socket::SocketAddrSpace,
solana_transaction_status::parse_token::spl_token_instruction,
std::{
cmp::min,
net::SocketAddr,
process::exit,
sync::{
@@ -156,24 +157,30 @@ fn make_create_message(
fn make_close_message(
keypair: &Keypair,
base_keypair: &Keypair,
max_closed_seed: Arc<AtomicU64>,
max_created: Arc<AtomicU64>,
max_closed: Arc<AtomicU64>,
num_instructions: usize,
balance: u64,
spl_token: bool,
) -> Message {
let instructions: Vec<_> = (0..num_instructions)
.into_iter()
.map(|_| {
.filter_map(|_| {
let program_id = if spl_token {
inline_spl_token::id()
} else {
system_program::id()
};
let seed = max_closed_seed.fetch_add(1, Ordering::Relaxed).to_string();
let max_created_seed = max_created.load(Ordering::Relaxed);
let max_closed_seed = max_closed.load(Ordering::Relaxed);
if max_closed_seed >= max_created_seed {
return None;
}
let seed = max_closed.fetch_add(1, Ordering::Relaxed).to_string();
let address =
Pubkey::create_with_seed(&base_keypair.pubkey(), &seed, &program_id).unwrap();
if spl_token {
spl_token_instruction(
Some(spl_token_instruction(
spl_token::instruction::close_account(
&spl_token::id(),
&spl_token_pubkey(&address),
@@ -182,16 +189,16 @@ fn make_close_message(
&[],
)
.unwrap(),
)
))
} else {
system_instruction::transfer_with_seed(
Some(system_instruction::transfer_with_seed(
&address,
&base_keypair.pubkey(),
seed,
&program_id,
&keypair.pubkey(),
balance,
)
))
}
})
.collect();
@@ -211,6 +218,7 @@ fn run_accounts_bench(
maybe_lamports: Option<u64>,
num_instructions: usize,
mint: Option<Pubkey>,
reclaim_accounts: bool,
) {
assert!(num_instructions > 0);
let client =
@@ -350,6 +358,7 @@ fn run_accounts_bench(
let message = make_close_message(
payer_keypairs[0],
&base_keypair,
seed_tracker.max_created.clone(),
seed_tracker.max_closed.clone(),
1,
min_balance,
@@ -372,7 +381,7 @@ fn run_accounts_bench(
}
count += 1;
if last_log.elapsed().as_millis() > 3000 {
if last_log.elapsed().as_millis() > 3000 || count >= iterations {
info!(
"total_accounts_created: {} total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance(s): {:?}",
total_accounts_created, total_accounts_closed, tx_sent_count, count, balances
@@ -387,6 +396,83 @@ fn run_accounts_bench(
}
}
executor.close();
if reclaim_accounts {
let executor = TransactionExecutor::new(entrypoint_addr);
loop {
let max_closed_seed = seed_tracker.max_closed.load(Ordering::Relaxed);
let max_created_seed = seed_tracker.max_created.load(Ordering::Relaxed);
if latest_blockhash.elapsed().as_millis() > 10_000 {
blockhash = client.get_latest_blockhash().expect("blockhash");
latest_blockhash = Instant::now();
}
message.recent_blockhash = blockhash;
let fee = client
.get_fee_for_message(&message)
.expect("get_fee_for_message");
let sigs_len = executor.num_outstanding();
if sigs_len < batch_size && max_closed_seed < max_created_seed {
let num_to_close = min(
batch_size - sigs_len,
(max_created_seed - max_closed_seed) as usize,
);
if num_to_close >= payer_keypairs.len() {
info!("closing {} accounts", num_to_close);
let chunk_size = num_to_close / payer_keypairs.len();
info!("{:?} chunk_size", chunk_size);
if chunk_size > 0 {
for (i, keypair) in payer_keypairs.iter().enumerate() {
let txs: Vec<_> = (0..chunk_size)
.into_par_iter()
.filter_map(|_| {
let message = make_close_message(
keypair,
&base_keypair,
seed_tracker.max_created.clone(),
seed_tracker.max_closed.clone(),
num_instructions,
min_balance,
mint.is_some(),
);
if message.instructions.is_empty() {
return None;
}
let signers: Vec<&Keypair> = vec![keypair, &base_keypair];
Some(Transaction::new(&signers, message, blockhash))
})
.collect();
balances[i] = balances[i].saturating_sub(fee * txs.len() as u64);
info!("close txs: {}", txs.len());
let new_ids = executor.push_transactions(txs);
info!("close ids: {}", new_ids.len());
tx_sent_count += new_ids.len();
total_accounts_closed += (num_instructions * new_ids.len()) as u64;
}
}
}
} else {
let _ = executor.drain_cleared();
}
count += 1;
if last_log.elapsed().as_millis() > 3000 || max_closed_seed >= max_created_seed {
info!(
"total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance(s): {:?}",
total_accounts_closed, tx_sent_count, count, balances
);
last_log = Instant::now();
}
if max_closed_seed >= max_created_seed {
break;
}
if executor.num_outstanding() >= batch_size {
sleep(Duration::from_millis(500));
}
}
executor.close();
}
}
fn main() {
@@ -462,7 +548,7 @@ fn main() {
.long("iterations")
.takes_value(true)
.value_name("NUM")
.help("Number of iterations to make"),
.help("Number of iterations to make. 0 = unlimited iterations."),
)
.arg(
Arg::with_name("check_gossip")
@@ -475,6 +561,12 @@ fn main() {
.takes_value(true)
.help("Mint address to initialize account"),
)
.arg(
Arg::with_name("reclaim_accounts")
.long("reclaim-accounts")
.takes_value(false)
.help("Reclaim accounts after session ends; incompatible with --iterations 0"),
)
.get_matches();
let skip_gossip = !matches.is_present("check_gossip");
@@ -556,6 +648,7 @@ fn main() {
lamports,
num_instructions,
mint,
matches.is_present("reclaim_accounts"),
);
}
@@ -564,12 +657,18 @@ pub mod test {
use {
super::*,
solana_core::validator::ValidatorConfig,
solana_faucet::faucet::run_local_faucet,
solana_local_cluster::{
local_cluster::{ClusterConfig, LocalCluster},
validator_configs::make_identical_validator_configs,
},
solana_measure::measure::Measure,
solana_sdk::poh_config::PohConfig,
solana_sdk::{native_token::sol_to_lamports, poh_config::PohConfig},
solana_test_validator::TestValidator,
spl_token::{
solana_program::program_pack::Pack,
state::{Account, Mint},
},
};
#[test]
@@ -605,6 +704,108 @@ pub mod test {
maybe_lamports,
num_instructions,
None,
false,
);
start.stop();
info!("{}", start);
}
#[test]
fn test_create_then_reclaim_spl_token_accounts() {
solana_logger::setup();
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_custom_fees(
mint_pubkey,
1,
Some(faucet_addr),
SocketAddrSpace::Unspecified,
);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
// Created funder
let funder = Keypair::new();
let latest_blockhash = rpc_client.get_latest_blockhash().unwrap();
let signature = rpc_client
.request_airdrop_with_blockhash(
&funder.pubkey(),
sol_to_lamports(1.0),
&latest_blockhash,
)
.unwrap();
rpc_client
.confirm_transaction_with_spinner(
&signature,
&latest_blockhash,
CommitmentConfig::confirmed(),
)
.unwrap();
// Create Mint
let spl_mint_keypair = Keypair::new();
let spl_mint_len = Mint::get_packed_len();
let spl_mint_rent = rpc_client
.get_minimum_balance_for_rent_exemption(spl_mint_len)
.unwrap();
let transaction = Transaction::new_signed_with_payer(
&[
system_instruction::create_account(
&funder.pubkey(),
&spl_mint_keypair.pubkey(),
spl_mint_rent,
spl_mint_len as u64,
&inline_spl_token::id(),
),
spl_token_instruction(
spl_token::instruction::initialize_mint(
&spl_token::id(),
&spl_token_pubkey(&spl_mint_keypair.pubkey()),
&spl_token_pubkey(&spl_mint_keypair.pubkey()),
None,
2,
)
.unwrap(),
),
],
Some(&funder.pubkey()),
&[&funder, &spl_mint_keypair],
latest_blockhash,
);
let _sig = rpc_client
.send_and_confirm_transaction(&transaction)
.unwrap();
let account_len = Account::get_packed_len();
let minimum_balance = rpc_client
.get_minimum_balance_for_rent_exemption(account_len)
.unwrap();
let iterations = 5;
let batch_size = 100;
let close_nth_batch = 0;
let num_instructions = 4;
let mut start = Measure::start("total accounts run");
let keypair0 = Keypair::new();
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
run_accounts_bench(
test_validator
.rpc_url()
.replace("http://", "")
.parse()
.unwrap(),
faucet_addr,
&[&keypair0, &keypair1, &keypair2],
iterations,
Some(account_len as u64),
batch_size,
close_nth_batch,
Some(minimum_balance),
num_instructions,
Some(spl_mint_keypair.pubkey()),
true,
);
start.stop();
info!("{}", start);

View File

@@ -3,17 +3,17 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accountsdb-plugin-interface"
description = "The Solana AccountsDb plugin interface."
version = "1.9.0"
version = "1.9.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-validator"
documentation = "https://docs.rs/solana-accountsdb-plugin-interface"
[dependencies]
log = "0.4.11"
thiserror = "1.0.30"
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.2" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.2" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -12,16 +12,38 @@ use {
impl Eq for ReplicaAccountInfo<'_> {}
#[derive(Clone, PartialEq, Debug)]
/// Information about an account being updated
pub struct ReplicaAccountInfo<'a> {
/// The Pubkey for the account
pub pubkey: &'a [u8],
/// The lamports for the account
pub lamports: u64,
/// The Pubkey of the owner program account
pub owner: &'a [u8],
/// This account's data contains a loaded program (and is now read-only)
pub executable: bool,
/// The epoch at which this account will next owe rent
pub rent_epoch: u64,
/// The data held in this account.
pub data: &'a [u8],
/// A global monotonically increasing atomic number, which can be used
/// to tell the order of the account update. For example, when an
/// account is updated in the same slot multiple times, the update
/// with higher write_version should supersede the one with lower
/// write_version.
pub write_version: u64,
}
/// A wrapper to future-proof ReplicaAccountInfo handling.
/// If there were a change to the structure of ReplicaAccountInfo,
/// there would be new enum entry for the newer version, forcing
/// plugin implementations to handle the change.
pub enum ReplicaAccountInfoVersions<'a> {
V0_0_1(&'a ReplicaAccountInfo<'a>),
}
@@ -38,28 +60,44 @@ pub enum ReplicaTransactionInfoVersions<'a> {
V0_0_1(&'a ReplicaTransactionInfo<'a>),
}
/// Errors returned by plugin calls
#[derive(Error, Debug)]
pub enum AccountsDbPluginError {
/// Error opening the configuration file; for example, when the file
/// is not found or when the validator process has no permission to read it.
#[error("Error opening config file. Error detail: ({0}).")]
ConfigFileOpenError(#[from] io::Error),
/// Error in reading the content of the config file or the content
/// is not in the expected format.
#[error("Error reading config file. Error message: ({msg})")]
ConfigFileReadError { msg: String },
/// Error when updating the account.
#[error("Error updating account. Error message: ({msg})")]
AccountsUpdateError { msg: String },
/// Error when updating the slot status
#[error("Error updating slot status. Error message: ({msg})")]
SlotStatusUpdateError { msg: String },
/// Any custom error defined by the plugin.
#[error("Plugin-defined custom error. Error message: ({0})")]
Custom(Box<dyn error::Error + Send + Sync>),
}
/// The current status of a slot
#[derive(Debug, Clone)]
pub enum SlotStatus {
/// The highest slot of the heaviest fork processed by the node. Ledger state at this slot is
/// not derived from a confirmed or finalized block, but if multiple forks are present, is from
/// the fork the validator believes is most likely to finalize.
Processed,
/// The highest slot having reached max vote lockout.
Rooted,
/// The highest slot that has been voted on by supermajority of the cluster, ie. is confirmed.
Confirmed,
}
@@ -75,6 +113,9 @@ impl SlotStatus {
pub type Result<T> = std::result::Result<T, AccountsDbPluginError>;
/// Defines an AccountsDb plugin, to stream data from the runtime.
/// AccountsDb plugins must describe desired behavior for load and unload,
/// as well as how they will handle streamed data.
pub trait AccountsDbPlugin: Any + Send + Sync + std::fmt::Debug {
fn name(&self) -> &'static str;
@@ -93,6 +134,9 @@ pub trait AccountsDbPlugin: Any + Send + Sync + std::fmt::Debug {
fn on_unload(&mut self) {}
/// Called when an account is updated at a slot.
/// When `is_startup` is true, it indicates the account is loaded from
/// snapshots when the validator starts up. When `is_startup` is false,
/// the account is updated during transaction processing.
#[allow(unused_variables)]
fn update_account(
&mut self,

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accountsdb-plugin-manager"
description = "The Solana AccountsDb plugin manager."
version = "1.9.0"
version = "1.9.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -17,14 +17,14 @@ log = "0.4.11"
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.72"
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-metrics = { path = "../metrics", version = "=1.9.0" }
solana-rpc = { path = "../rpc", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.9.2" }
solana-logger = { path = "../logger", version = "=1.9.2" }
solana-measure = { path = "../measure", version = "=1.9.2" }
solana-metrics = { path = "../metrics", version = "=1.9.2" }
solana-rpc = { path = "../rpc", version = "=1.9.2" }
solana-runtime = { path = "../runtime", version = "=1.9.2" }
solana-sdk = { path = "../sdk", version = "=1.9.2" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.2" }
thiserror = "1.0.30"
[package.metadata.docs.rs]

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accountsdb-plugin-postgres"
description = "The Solana AccountsDb plugin for PostgreSQL database."
version = "1.9.0"
version = "1.9.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -22,18 +22,18 @@ postgres-types = { version = "0.2.2", features = ["derive"] }
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.72"
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-metrics = { path = "../metrics", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.9.2" }
solana-logger = { path = "../logger", version = "=1.9.2" }
solana-measure = { path = "../measure", version = "=1.9.2" }
solana-metrics = { path = "../metrics", version = "=1.9.2" }
solana-runtime = { path = "../runtime", version = "=1.9.2" }
solana-sdk = { path = "../sdk", version = "=1.9.2" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.2" }
thiserror = "1.0.30"
tokio-postgres = "0.7.4"
[dev-dependencies]
solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" }
solana-account-decoder = { path = "../account-decoder", version = "=1.9.2" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -113,9 +113,10 @@ CREATE TYPE "TransactionMessage" AS (
instructions "CompiledInstruction"[]
);
CREATE TYPE "AddressMapIndexes" AS (
writable SMALLINT[],
readonly SMALLINT[]
CREATE TYPE "TransactionMessageAddressTableLookup" AS (
account_key: BYTEA[],
writable_indexes SMALLINT[],
readonly_indexes SMALLINT[]
);
CREATE TYPE "TransactionMessageV0" AS (
@@ -123,17 +124,17 @@ CREATE TYPE "TransactionMessageV0" AS (
account_keys BYTEA[],
recent_blockhash BYTEA,
instructions "CompiledInstruction"[],
address_map_indexes "AddressMapIndexes"[]
address_table_lookups "TransactionMessageAddressTableLookup"[]
);
CREATE TYPE "MappedAddresses" AS (
CREATE TYPE "LoadedAddresses" AS (
writable BYTEA[],
readonly BYTEA[]
);
CREATE TYPE "MappedMessage" AS (
CREATE TYPE "LoadedMessageV0" AS (
message "TransactionMessageV0",
mapped_addresses "MappedAddresses"
loaded_addresses "LoadedAddresses"
);
-- The table storing transactions
@@ -143,7 +144,7 @@ CREATE TABLE transaction (
is_vote BOOL NOT NULL,
message_type SMALLINT, -- 0: legacy, 1: v0 message
legacy_message "TransactionMessage",
v0_mapped_message "MappedMessage",
v0_loaded_message "LoadedMessageV0",
signatures BYTEA[],
message_hash BYTEA,
meta "TransactionStatusMeta",

View File

@@ -11,12 +11,12 @@ DROP TABLE transaction;
DROP TYPE "TransactionError" CASCADE;
DROP TYPE "TransactionErrorCode" CASCADE;
DROP TYPE "MappedMessage" CASCADE;
DROP TYPE "MappedAddresses" CASCADE;
DROP TYPE "LoadedMessageV0" CASCADE;
DROP TYPE "LoadedAddresses" CASCADE;
DROP TYPE "TransactionMessageV0" CASCADE;
DROP TYPE "AddressMapIndexes" CASCADE;
DROP TYPE "TransactionMessage" CASCADE;
DROP TYPE "TransactionMessageHeader" CASCADE;
DROP TYPE "TransactionMessageAddressTableLookup" CASCADE;
DROP TYPE "TransactionStatusMeta" CASCADE;
DROP TYPE "RewardType" CASCADE;
DROP TYPE "Reward" CASCADE;

View File

@@ -18,8 +18,8 @@ use {
solana_sdk::{
instruction::CompiledInstruction,
message::{
v0::{self, AddressMapIndexes},
MappedAddresses, MappedMessage, Message, MessageHeader, SanitizedMessage,
v0::{self, LoadedAddresses, MessageAddressTableLookup},
Message, MessageHeader, SanitizedMessage,
},
transaction::TransactionError,
},
@@ -105,10 +105,11 @@ pub struct DbTransactionMessage {
}
#[derive(Clone, Debug, ToSql)]
#[postgres(name = "AddressMapIndexes")]
pub struct DbAddressMapIndexes {
pub writable: Vec<i16>,
pub readonly: Vec<i16>,
#[postgres(name = "TransactionMessageAddressTableLookup")]
pub struct DbTransactionMessageAddressTableLookup {
pub account_key: Vec<u8>,
pub writable_indexes: Vec<i16>,
pub readonly_indexes: Vec<i16>,
}
#[derive(Clone, Debug, ToSql)]
@@ -118,21 +119,21 @@ pub struct DbTransactionMessageV0 {
pub account_keys: Vec<Vec<u8>>,
pub recent_blockhash: Vec<u8>,
pub instructions: Vec<DbCompiledInstruction>,
pub address_map_indexes: Vec<DbAddressMapIndexes>,
pub address_table_lookups: Vec<DbTransactionMessageAddressTableLookup>,
}
#[derive(Clone, Debug, ToSql)]
#[postgres(name = "MappedAddresses")]
pub struct DbMappedAddresses {
#[postgres(name = "LoadedAddresses")]
pub struct DbLoadedAddresses {
pub writable: Vec<Vec<u8>>,
pub readonly: Vec<Vec<u8>>,
}
#[derive(Clone, Debug, ToSql)]
#[postgres(name = "MappedMessage")]
pub struct DbMappedMessage {
#[postgres(name = "LoadedMessageV0")]
pub struct DbLoadedMessageV0 {
pub message: DbTransactionMessageV0,
pub mapped_addresses: DbMappedAddresses,
pub loaded_addresses: DbLoadedAddresses,
}
pub struct DbTransaction {
@@ -141,7 +142,7 @@ pub struct DbTransaction {
pub slot: i64,
pub message_type: i16,
pub legacy_message: Option<DbTransactionMessage>,
pub v0_mapped_message: Option<DbMappedMessage>,
pub v0_loaded_message: Option<DbLoadedMessageV0>,
pub message_hash: Vec<u8>,
pub meta: DbTransactionStatusMeta,
pub signatures: Vec<Vec<u8>>,
@@ -151,32 +152,33 @@ pub struct LogTransactionRequest {
pub transaction_info: DbTransaction,
}
impl From<&AddressMapIndexes> for DbAddressMapIndexes {
fn from(address_map_indexes: &AddressMapIndexes) -> Self {
impl From<&MessageAddressTableLookup> for DbTransactionMessageAddressTableLookup {
fn from(address_table_lookup: &MessageAddressTableLookup) -> Self {
Self {
writable: address_map_indexes
.writable
account_key: address_table_lookup.account_key.as_ref().to_vec(),
writable_indexes: address_table_lookup
.writable_indexes
.iter()
.map(|address_idx| *address_idx as i16)
.map(|idx| *idx as i16)
.collect(),
readonly: address_map_indexes
.readonly
readonly_indexes: address_table_lookup
.readonly_indexes
.iter()
.map(|address_idx| *address_idx as i16)
.map(|idx| *idx as i16)
.collect(),
}
}
}
impl From<&MappedAddresses> for DbMappedAddresses {
fn from(mapped_addresses: &MappedAddresses) -> Self {
impl From<&LoadedAddresses> for DbLoadedAddresses {
fn from(loaded_addresses: &LoadedAddresses) -> Self {
Self {
writable: mapped_addresses
writable: loaded_addresses
.writable
.iter()
.map(|pubkey| pubkey.as_ref().to_vec())
.collect(),
readonly: mapped_addresses
readonly: loaded_addresses
.readonly
.iter()
.map(|pubkey| pubkey.as_ref().to_vec())
@@ -243,20 +245,20 @@ impl From<&v0::Message> for DbTransactionMessageV0 {
.iter()
.map(DbCompiledInstruction::from)
.collect(),
address_map_indexes: message
.address_map_indexes
address_table_lookups: message
.address_table_lookups
.iter()
.map(DbAddressMapIndexes::from)
.map(DbTransactionMessageAddressTableLookup::from)
.collect(),
}
}
}
impl From<&MappedMessage> for DbMappedMessage {
fn from(message: &MappedMessage) -> Self {
impl From<&v0::LoadedMessage> for DbLoadedMessageV0 {
fn from(message: &v0::LoadedMessage) -> Self {
Self {
message: DbTransactionMessageV0::from(&message.message),
mapped_addresses: DbMappedAddresses::from(&message.mapped_addresses),
loaded_addresses: DbLoadedAddresses::from(&message.loaded_addresses),
}
}
}
@@ -460,8 +462,8 @@ fn build_db_transaction(slot: u64, transaction_info: &ReplicaTransactionInfo) ->
}
_ => None,
},
v0_mapped_message: match transaction_info.transaction.message() {
SanitizedMessage::V0(mapped_message) => Some(DbMappedMessage::from(mapped_message)),
v0_loaded_message: match transaction_info.transaction.message() {
SanitizedMessage::V0(loaded_message) => Some(DbLoadedMessageV0::from(loaded_message)),
_ => None,
},
signatures: transaction_info
@@ -485,7 +487,7 @@ impl SimplePostgresClient {
config: &AccountsDbPluginPostgresConfig,
) -> Result<Statement, AccountsDbPluginError> {
let stmt = "INSERT INTO transaction AS txn (signature, is_vote, slot, message_type, legacy_message, \
v0_mapped_message, signatures, message_hash, meta, updated_on) \
v0_loaded_message, signatures, message_hash, meta, updated_on) \
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)";
let stmt = client.prepare(stmt);
@@ -521,7 +523,7 @@ impl SimplePostgresClient {
&transaction_info.slot,
&transaction_info.message_type,
&transaction_info.legacy_message,
&transaction_info.v0_mapped_message,
&transaction_info.v0_loaded_message,
&transaction_info.signatures,
&transaction_info.message_hash,
&transaction_info.meta,
@@ -670,42 +672,44 @@ pub(crate) mod tests {
check_inner_instructions_equality(&inner_instructions, &db_inner_instructions);
}
fn check_address_map_indexes_equality(
address_map_indexes: &AddressMapIndexes,
db_address_map_indexes: &DbAddressMapIndexes,
fn check_address_table_lookups_equality(
address_table_lookups: &MessageAddressTableLookup,
db_address_table_lookups: &DbTransactionMessageAddressTableLookup,
) {
assert_eq!(
address_map_indexes.writable.len(),
db_address_map_indexes.writable.len()
address_table_lookups.writable_indexes.len(),
db_address_table_lookups.writable_indexes.len()
);
assert_eq!(
address_map_indexes.readonly.len(),
db_address_map_indexes.readonly.len()
address_table_lookups.readonly_indexes.len(),
db_address_table_lookups.readonly_indexes.len()
);
for i in 0..address_map_indexes.writable.len() {
for i in 0..address_table_lookups.writable_indexes.len() {
assert_eq!(
address_map_indexes.writable[i],
db_address_map_indexes.writable[i] as u8
address_table_lookups.writable_indexes[i],
db_address_table_lookups.writable_indexes[i] as u8
)
}
for i in 0..address_map_indexes.readonly.len() {
for i in 0..address_table_lookups.readonly_indexes.len() {
assert_eq!(
address_map_indexes.readonly[i],
db_address_map_indexes.readonly[i] as u8
address_table_lookups.readonly_indexes[i],
db_address_table_lookups.readonly_indexes[i] as u8
)
}
}
#[test]
fn test_transform_address_map_indexes() {
let address_map_indexes = AddressMapIndexes {
writable: vec![1, 2, 3],
readonly: vec![4, 5, 6],
fn test_transform_address_table_lookups() {
let address_table_lookups = MessageAddressTableLookup {
account_key: Pubkey::new_unique(),
writable_indexes: vec![1, 2, 3],
readonly_indexes: vec![4, 5, 6],
};
let db_address_map_indexes = DbAddressMapIndexes::from(&address_map_indexes);
check_address_map_indexes_equality(&address_map_indexes, &db_address_map_indexes);
let db_address_table_lookups =
DbTransactionMessageAddressTableLookup::from(&address_table_lookups);
check_address_table_lookups_equality(&address_table_lookups, &db_address_table_lookups);
}
fn check_reward_equality(reward: &Reward, db_reward: &DbReward) {
@@ -1089,7 +1093,7 @@ pub(crate) mod tests {
check_transaction_message_equality(&message, &db_message);
}
fn check_transaction_messagev0_equality(
fn check_transaction_message_v0_equality(
message: &v0::Message,
db_message: &DbTransactionMessageV0,
) {
@@ -1106,18 +1110,18 @@ pub(crate) mod tests {
);
}
assert_eq!(
message.address_map_indexes.len(),
db_message.address_map_indexes.len()
message.address_table_lookups.len(),
db_message.address_table_lookups.len()
);
for i in 0..message.address_map_indexes.len() {
check_address_map_indexes_equality(
&message.address_map_indexes[i],
&db_message.address_map_indexes[i],
for i in 0..message.address_table_lookups.len() {
check_address_table_lookups_equality(
&message.address_table_lookups[i],
&db_message.address_table_lookups[i],
);
}
}
fn build_transaction_messagev0() -> v0::Message {
fn build_transaction_message_v0() -> v0::Message {
v0::Message {
header: MessageHeader {
num_readonly_signed_accounts: 2,
@@ -1144,71 +1148,76 @@ pub(crate) mod tests {
data: vec![14, 15, 16],
},
],
address_map_indexes: vec![
AddressMapIndexes {
writable: vec![0],
readonly: vec![1, 2],
address_table_lookups: vec![
MessageAddressTableLookup {
account_key: Pubkey::new_unique(),
writable_indexes: vec![0],
readonly_indexes: vec![1, 2],
},
AddressMapIndexes {
writable: vec![1],
readonly: vec![0, 2],
MessageAddressTableLookup {
account_key: Pubkey::new_unique(),
writable_indexes: vec![1],
readonly_indexes: vec![0, 2],
},
],
}
}
#[test]
fn test_transform_transaction_messagev0() {
let message = build_transaction_messagev0();
fn test_transform_transaction_message_v0() {
let message = build_transaction_message_v0();
let db_message = DbTransactionMessageV0::from(&message);
check_transaction_messagev0_equality(&message, &db_message);
check_transaction_message_v0_equality(&message, &db_message);
}
fn check_mapped_addresses(
mapped_addresses: &MappedAddresses,
db_mapped_addresses: &DbMappedAddresses,
fn check_loaded_addresses(
loaded_addresses: &LoadedAddresses,
db_loaded_addresses: &DbLoadedAddresses,
) {
assert_eq!(
mapped_addresses.writable.len(),
db_mapped_addresses.writable.len()
loaded_addresses.writable.len(),
db_loaded_addresses.writable.len()
);
for i in 0..mapped_addresses.writable.len() {
for i in 0..loaded_addresses.writable.len() {
assert_eq!(
mapped_addresses.writable[i].as_ref(),
db_mapped_addresses.writable[i]
loaded_addresses.writable[i].as_ref(),
db_loaded_addresses.writable[i]
);
}
assert_eq!(
mapped_addresses.readonly.len(),
db_mapped_addresses.readonly.len()
loaded_addresses.readonly.len(),
db_loaded_addresses.readonly.len()
);
for i in 0..mapped_addresses.readonly.len() {
for i in 0..loaded_addresses.readonly.len() {
assert_eq!(
mapped_addresses.readonly[i].as_ref(),
db_mapped_addresses.readonly[i]
loaded_addresses.readonly[i].as_ref(),
db_loaded_addresses.readonly[i]
);
}
}
fn check_mapped_message_equality(message: &MappedMessage, db_message: &DbMappedMessage) {
check_transaction_messagev0_equality(&message.message, &db_message.message);
check_mapped_addresses(&message.mapped_addresses, &db_message.mapped_addresses);
fn check_loaded_message_v0_equality(
message: &v0::LoadedMessage,
db_message: &DbLoadedMessageV0,
) {
check_transaction_message_v0_equality(&message.message, &db_message.message);
check_loaded_addresses(&message.loaded_addresses, &db_message.loaded_addresses);
}
#[test]
fn test_transform_mapped_message() {
let message = MappedMessage {
message: build_transaction_messagev0(),
mapped_addresses: MappedAddresses {
fn test_transform_loaded_message_v0() {
let message = v0::LoadedMessage {
message: build_transaction_message_v0(),
loaded_addresses: LoadedAddresses {
writable: vec![Pubkey::new_unique(), Pubkey::new_unique()],
readonly: vec![Pubkey::new_unique(), Pubkey::new_unique()],
},
};
let db_message = DbMappedMessage::from(&message);
check_mapped_message_equality(&message, &db_message);
let db_message = DbLoadedMessageV0::from(&message);
check_loaded_message_v0_equality(&message, &db_message);
}
fn check_transaction(
@@ -1229,9 +1238,9 @@ pub(crate) mod tests {
}
SanitizedMessage::V0(message) => {
assert_eq!(db_transaction.message_type, 1);
check_mapped_message_equality(
check_loaded_message_v0_equality(
message,
db_transaction.v0_mapped_message.as_ref().unwrap(),
db_transaction.v0_loaded_message.as_ref().unwrap(),
);
}
}
@@ -1298,7 +1307,7 @@ pub(crate) mod tests {
Signature::new(&[2u8; 64]),
Signature::new(&[3u8; 64]),
],
message: VersionedMessage::V0(build_transaction_messagev0()),
message: VersionedMessage::V0(build_transaction_message_v0()),
}
}
@@ -1313,7 +1322,7 @@ pub(crate) mod tests {
let transaction =
SanitizedTransaction::try_create(transaction, message_hash, Some(true), |_message| {
Ok(MappedAddresses {
Ok(LoadedAddresses {
writable: vec![Pubkey::new_unique(), Pubkey::new_unique()],
readonly: vec![Pubkey::new_unique(), Pubkey::new_unique()],
})

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-banking-bench"
version = "1.9.0"
version = "1.9.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -14,17 +14,17 @@ crossbeam-channel = "0.5"
log = "0.4.14"
rand = "0.7.0"
rayon = "1.5.1"
solana-core = { path = "../core", version = "=1.9.0" }
solana-gossip = { path = "../gossip", version = "=1.9.0" }
solana-ledger = { path = "../ledger", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-perf = { path = "../perf", version = "=1.9.0" }
solana-poh = { path = "../poh", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-streamer = { path = "../streamer", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-core = { path = "../core", version = "=1.9.2" }
solana-gossip = { path = "../gossip", version = "=1.9.2" }
solana-ledger = { path = "../ledger", version = "=1.9.2" }
solana-logger = { path = "../logger", version = "=1.9.2" }
solana-measure = { path = "../measure", version = "=1.9.2" }
solana-perf = { path = "../perf", version = "=1.9.2" }
solana-poh = { path = "../poh", version = "=1.9.2" }
solana-runtime = { path = "../runtime", version = "=1.9.2" }
solana-streamer = { path = "../streamer", version = "=1.9.2" }
solana-sdk = { path = "../sdk", version = "=1.9.2" }
solana-version = { path = "../version", version = "=1.9.2" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -13,7 +13,7 @@ use {
get_tmp_ledger_path,
},
solana_measure::measure::Measure,
solana_perf::packet::to_packets_chunked,
solana_perf::packet::to_packet_batches,
solana_poh::poh_recorder::{create_test_recorder, PohRecorder, WorkingBankEntry},
solana_runtime::{
accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks,
@@ -212,7 +212,7 @@ fn main() {
bank.clear_signatures();
}
let mut verified: Vec<_> = to_packets_chunked(&transactions, packets_per_chunk);
let mut verified: Vec<_> = to_packet_batches(&transactions, packets_per_chunk);
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(
@@ -364,7 +364,7 @@ fn main() {
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
tx.signatures[0] = Signature::new(&sig[0..64]);
}
verified = to_packets_chunked(&transactions.clone(), packets_per_chunk);
verified = to_packet_batches(&transactions.clone(), packets_per_chunk);
}
start += chunk_len;

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-client"
version = "1.9.0"
version = "1.9.2"
description = "Solana banks client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,16 +12,17 @@ edition = "2021"
[dependencies]
borsh = "0.9.1"
futures = "0.3"
solana-banks-interface = { path = "../banks-interface", version = "=1.9.0" }
solana-program = { path = "../sdk/program", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
tarpc = { version = "0.26.2", features = ["full"] }
solana-banks-interface = { path = "../banks-interface", version = "=1.9.2" }
solana-program = { path = "../sdk/program", version = "=1.9.2" }
solana-sdk = { path = "../sdk", version = "=1.9.2" }
tarpc = { version = "0.27.2", features = ["full"] }
thiserror = "1.0"
tokio = { version = "1", features = ["full"] }
tokio-serde = { version = "0.8", features = ["bincode"] }
[dev-dependencies]
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-banks-server = { path = "../banks-server", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.2" }
solana-banks-server = { path = "../banks-server", version = "=1.9.2" }
[lib]
crate-type = ["lib"]

62
banks-client/src/error.rs Normal file
View File

@@ -0,0 +1,62 @@
use {
solana_sdk::{transaction::TransactionError, transport::TransportError},
std::io,
tarpc::client::RpcError,
thiserror::Error,
};
/// Errors from BanksClient
#[derive(Error, Debug)]
pub enum BanksClientError {
#[error("client error: {0}")]
ClientError(&'static str),
#[error(transparent)]
Io(#[from] io::Error),
#[error(transparent)]
RpcError(#[from] RpcError),
#[error("transport transaction error: {0}")]
TransactionError(#[from] TransactionError),
}
impl BanksClientError {
pub fn unwrap(&self) -> TransactionError {
if let BanksClientError::TransactionError(err) = self {
err.clone()
} else {
panic!("unexpected transport error")
}
}
}
impl From<BanksClientError> for io::Error {
fn from(err: BanksClientError) -> Self {
match err {
BanksClientError::ClientError(err) => Self::new(io::ErrorKind::Other, err.to_string()),
BanksClientError::Io(err) => err,
BanksClientError::RpcError(err) => Self::new(io::ErrorKind::Other, err.to_string()),
BanksClientError::TransactionError(err) => {
Self::new(io::ErrorKind::Other, err.to_string())
}
}
}
}
impl From<BanksClientError> for TransportError {
fn from(err: BanksClientError) -> Self {
match err {
BanksClientError::ClientError(err) => {
Self::IoError(io::Error::new(io::ErrorKind::Other, err.to_string()))
}
BanksClientError::Io(err) => {
Self::IoError(io::Error::new(io::ErrorKind::Other, err.to_string()))
}
BanksClientError::RpcError(err) => {
Self::IoError(io::Error::new(io::ErrorKind::Other, err.to_string()))
}
BanksClientError::TransactionError(err) => Self::TransactionError(err),
}
}
}

View File

@@ -7,8 +7,9 @@
pub use solana_banks_interface::{BanksClient as TarpcClient, TransactionStatus};
use {
crate::error::BanksClientError,
borsh::BorshDeserialize,
futures::{future::join_all, Future, FutureExt},
futures::{future::join_all, Future, FutureExt, TryFutureExt},
solana_banks_interface::{BanksRequest, BanksResponse},
solana_program::{
clock::Slot, fee_calculator::FeeCalculator, hash::Hash, program_pack::Pack, pubkey::Pubkey,
@@ -22,7 +23,7 @@ use {
transaction::{self, Transaction},
transport,
},
std::io::{self, Error, ErrorKind},
std::io,
tarpc::{
client::{self, NewClient, RequestDispatch},
context::{self, Context},
@@ -33,6 +34,8 @@ use {
tokio_serde::formats::Bincode,
};
mod error;
// This exists only for backward compatibility
pub trait BanksClientExt {}
@@ -58,7 +61,10 @@ impl BanksClient {
ctx: Context,
transaction: Transaction,
) -> impl Future<Output = io::Result<()>> + '_ {
self.inner.send_transaction_with_context(ctx, transaction)
self.inner
.send_transaction_with_context(ctx, transaction)
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
.map_err(Into::into)
}
#[deprecated(
@@ -73,6 +79,8 @@ impl BanksClient {
#[allow(deprecated)]
self.inner
.get_fees_with_commitment_and_context(ctx, commitment)
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
.map_err(Into::into)
}
pub fn get_transaction_status_with_context(
@@ -82,6 +90,8 @@ impl BanksClient {
) -> impl Future<Output = io::Result<Option<TransactionStatus>>> + '_ {
self.inner
.get_transaction_status_with_context(ctx, signature)
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
.map_err(Into::into)
}
pub fn get_slot_with_context(
@@ -89,7 +99,10 @@ impl BanksClient {
ctx: Context,
commitment: CommitmentLevel,
) -> impl Future<Output = io::Result<Slot>> + '_ {
self.inner.get_slot_with_context(ctx, commitment)
self.inner
.get_slot_with_context(ctx, commitment)
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
.map_err(Into::into)
}
pub fn get_block_height_with_context(
@@ -97,7 +110,10 @@ impl BanksClient {
ctx: Context,
commitment: CommitmentLevel,
) -> impl Future<Output = io::Result<Slot>> + '_ {
self.inner.get_block_height_with_context(ctx, commitment)
self.inner
.get_block_height_with_context(ctx, commitment)
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
.map_err(Into::into)
}
pub fn process_transaction_with_commitment_and_context(
@@ -108,6 +124,8 @@ impl BanksClient {
) -> impl Future<Output = io::Result<Option<transaction::Result<()>>>> + '_ {
self.inner
.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
.map_err(Into::into)
}
pub fn get_account_with_commitment_and_context(
@@ -118,6 +136,8 @@ impl BanksClient {
) -> impl Future<Output = io::Result<Option<Account>>> + '_ {
self.inner
.get_account_with_commitment_and_context(ctx, address, commitment)
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
.map_err(Into::into)
}
/// Send a transaction and return immediately. The server will resend the
@@ -148,9 +168,13 @@ impl BanksClient {
pub fn get_sysvar<T: Sysvar>(&mut self) -> impl Future<Output = io::Result<T>> + '_ {
self.get_account(T::id()).map(|result| {
let sysvar = result?
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Sysvar not present"))?;
.ok_or(BanksClientError::ClientError("Sysvar not present"))
.map_err(io::Error::from)?; // Remove this map when return Err type updated to BanksClientError
from_account::<T, _>(&sysvar)
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Failed to deserialize sysvar"))
.ok_or(BanksClientError::ClientError(
"Failed to deserialize sysvar",
))
.map_err(Into::into) // Remove this when return Err type updated to BanksClientError
})
}
@@ -164,7 +188,8 @@ impl BanksClient {
/// method to get both a blockhash and the blockhash's last valid slot.
#[deprecated(since = "1.9.0", note = "Please use `get_latest_blockhash` instead")]
pub fn get_recent_blockhash(&mut self) -> impl Future<Output = io::Result<Hash>> + '_ {
self.get_latest_blockhash()
#[allow(deprecated)]
self.get_fees().map(|result| Ok(result?.1))
}
/// Send a transaction and return after the transaction has been rejected or
@@ -178,11 +203,12 @@ impl BanksClient {
ctx.deadline += Duration::from_secs(50);
self.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
.map(|result| match result? {
None => {
Err(Error::new(ErrorKind::TimedOut, "invalid blockhash or fee-payer").into())
}
None => Err(BanksClientError::ClientError(
"invalid blockhash or fee-payer",
)),
Some(transaction_result) => Ok(transaction_result?),
})
.map_err(Into::into) // Remove this when return Err type updated to BanksClientError
}
/// Send a transaction and return until the transaction has been finalized or rejected.
@@ -255,10 +281,12 @@ impl BanksClient {
address: Pubkey,
) -> impl Future<Output = io::Result<T>> + '_ {
self.get_account(address).map(|result| {
let account =
result?.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Account not found"))?;
let account = result?
.ok_or(BanksClientError::ClientError("Account not found"))
.map_err(io::Error::from)?; // Remove this map when return Err type updated to BanksClientError
T::unpack_from_slice(&account.data)
.map_err(|_| io::Error::new(io::ErrorKind::Other, "Failed to deserialize account"))
.map_err(|_| BanksClientError::ClientError("Failed to deserialize account"))
.map_err(Into::into) // Remove this when return Err type updated to BanksClientError
})
}
@@ -269,9 +297,8 @@ impl BanksClient {
address: Pubkey,
) -> impl Future<Output = io::Result<T>> + '_ {
self.get_account(address).map(|result| {
let account =
result?.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "account not found"))?;
T::try_from_slice(&account.data)
let account = result?.ok_or(BanksClientError::ClientError("Account not found"))?;
T::try_from_slice(&account.data).map_err(Into::into)
})
}
@@ -330,7 +357,8 @@ impl BanksClient {
.map(|result| {
result?
.map(|x| x.0)
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "account not found"))
.ok_or(BanksClientError::ClientError("valid blockhash not found"))
.map_err(Into::into)
})
}
@@ -348,6 +376,8 @@ impl BanksClient {
) -> impl Future<Output = io::Result<Option<(Hash, u64)>>> + '_ {
self.inner
.get_latest_blockhash_with_commitment_and_context(ctx, commitment)
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
.map_err(Into::into)
}
pub fn get_fee_for_message_with_commitment_and_context(
@@ -358,6 +388,8 @@ impl BanksClient {
) -> impl Future<Output = io::Result<Option<u64>>> + '_ {
self.inner
.get_fee_for_message_with_commitment_and_context(ctx, commitment, message)
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
.map_err(Into::into)
}
}
@@ -399,7 +431,7 @@ mod tests {
}
#[test]
fn test_banks_server_transfer_via_server() -> io::Result<()> {
fn test_banks_server_transfer_via_server() -> Result<(), BanksClientError> {
// This test shows the preferred way to interact with BanksServer.
// It creates a runtime explicitly (no globals via tokio macros) and calls
// `runtime.block_on()` just once, to run all the async code.
@@ -432,7 +464,7 @@ mod tests {
}
#[test]
fn test_banks_server_transfer_via_client() -> io::Result<()> {
fn test_banks_server_transfer_via_client() -> Result<(), BanksClientError> {
// The caller may not want to hold the connection open until the transaction
// is processed (or blockhash expires). In this test, we verify the
// server-side functionality is available to the client.

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-interface"
version = "1.9.0"
version = "1.9.2"
description = "Solana banks RPC interface"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,8 +11,8 @@ edition = "2021"
[dependencies]
serde = { version = "1.0.130", features = ["derive"] }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
tarpc = { version = "0.26.2", features = ["full"] }
solana-sdk = { path = "../sdk", version = "=1.9.2" }
tarpc = { version = "0.27.2", features = ["full"] }
[lib]
crate-type = ["lib"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-server"
version = "1.9.0"
version = "1.9.2"
description = "Solana banks server"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,11 +12,11 @@ edition = "2021"
[dependencies]
bincode = "1.3.3"
futures = "0.3"
solana-banks-interface = { path = "../banks-interface", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.0" }
tarpc = { version = "0.26.2", features = ["full"] }
solana-banks-interface = { path = "../banks-interface", version = "=1.9.2" }
solana-runtime = { path = "../runtime", version = "=1.9.2" }
solana-sdk = { path = "../sdk", version = "=1.9.2" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.2" }
tarpc = { version = "0.27.2", features = ["full"] }
tokio = { version = "1", features = ["full"] }
tokio-serde = { version = "0.8", features = ["bincode"] }
tokio-stream = "0.1"

View File

@@ -35,7 +35,7 @@ use {
tarpc::{
context::Context,
serde_transport::tcp,
server::{self, Channel, Incoming},
server::{self, incoming::Incoming, Channel},
transport::{self, channel::UnboundedChannel},
ClientMessage, Response,
},

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-bench-streamer"
version = "1.9.0"
version = "1.9.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -10,11 +10,11 @@ publish = false
[dependencies]
clap = "2.33.1"
solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" }
solana-streamer = { path = "../streamer", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-net-utils = { path = "../net-utils", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.2" }
solana-streamer = { path = "../streamer", version = "=1.9.2" }
solana-logger = { path = "../logger", version = "=1.9.2" }
solana-net-utils = { path = "../net-utils", version = "=1.9.2" }
solana-version = { path = "../version", version = "=1.9.2" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,8 +2,8 @@
use {
clap::{crate_description, crate_name, App, Arg},
solana_streamer::{
packet::{Packet, Packets, PacketsRecycler, PACKET_DATA_SIZE},
streamer::{receiver, PacketReceiver},
packet::{Packet, PacketBatch, PacketBatchRecycler, PACKET_DATA_SIZE},
streamer::{receiver, PacketBatchReceiver},
},
std::{
cmp::max,
@@ -20,19 +20,19 @@ use {
fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut msgs = Packets::default();
msgs.packets.resize(10, Packet::default());
for w in msgs.packets.iter_mut() {
let mut packet_batch = PacketBatch::default();
packet_batch.packets.resize(10, Packet::default());
for w in packet_batch.packets.iter_mut() {
w.meta.size = PACKET_DATA_SIZE;
w.meta.set_addr(addr);
}
let msgs = Arc::new(msgs);
let packet_batch = Arc::new(packet_batch);
spawn(move || loop {
if exit.load(Ordering::Relaxed) {
return;
}
let mut num = 0;
for p in &msgs.packets {
for p in &packet_batch.packets {
let a = p.meta.addr();
assert!(p.meta.size <= PACKET_DATA_SIZE);
send.send_to(&p.data[..p.meta.size], &a).unwrap();
@@ -42,14 +42,14 @@ fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
})
}
fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketReceiver) -> JoinHandle<()> {
fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketBatchReceiver) -> JoinHandle<()> {
spawn(move || loop {
if exit.load(Ordering::Relaxed) {
return;
}
let timer = Duration::new(1, 0);
if let Ok(msgs) = r.recv_timeout(timer) {
rvs.fetch_add(msgs.packets.len(), Ordering::Relaxed);
if let Ok(packet_batch) = r.recv_timeout(timer) {
rvs.fetch_add(packet_batch.packets.len(), Ordering::Relaxed);
}
})
}
@@ -81,7 +81,7 @@ fn main() -> Result<()> {
let mut read_channels = Vec::new();
let mut read_threads = Vec::new();
let recycler = PacketsRecycler::default();
let recycler = PacketBatchRecycler::default();
for _ in 0..num_sockets {
let read = solana_net_utils::bind_to(ip_addr, port, false).unwrap();
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-bench-tps"
version = "1.9.0"
version = "1.9.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -14,23 +14,23 @@ log = "0.4.14"
rayon = "1.5.1"
serde_json = "1.0.72"
serde_yaml = "0.8.21"
solana-core = { path = "../core", version = "=1.9.0" }
solana-genesis = { path = "../genesis", version = "=1.9.0" }
solana-client = { path = "../client", version = "=1.9.0" }
solana-faucet = { path = "../faucet", version = "=1.9.0" }
solana-gossip = { path = "../gossip", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-metrics = { path = "../metrics", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-net-utils = { path = "../net-utils", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-streamer = { path = "../streamer", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-core = { path = "../core", version = "=1.9.2" }
solana-genesis = { path = "../genesis", version = "=1.9.2" }
solana-client = { path = "../client", version = "=1.9.2" }
solana-faucet = { path = "../faucet", version = "=1.9.2" }
solana-gossip = { path = "../gossip", version = "=1.9.2" }
solana-logger = { path = "../logger", version = "=1.9.2" }
solana-metrics = { path = "../metrics", version = "=1.9.2" }
solana-measure = { path = "../measure", version = "=1.9.2" }
solana-net-utils = { path = "../net-utils", version = "=1.9.2" }
solana-runtime = { path = "../runtime", version = "=1.9.2" }
solana-sdk = { path = "../sdk", version = "=1.9.2" }
solana-streamer = { path = "../streamer", version = "=1.9.2" }
solana-version = { path = "../version", version = "=1.9.2" }
[dev-dependencies]
serial_test = "0.5.1"
solana-local-cluster = { path = "../local-cluster", version = "=1.9.0" }
solana-local-cluster = { path = "../local-cluster", version = "=1.9.2" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-bucket-map"
version = "1.9.0"
version = "1.9.2"
description = "solana-bucket-map"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-bucket-map"
@@ -12,11 +12,11 @@ edition = "2021"
[dependencies]
rayon = "1.5.0"
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.2" }
solana-sdk = { path = "../sdk", version = "=1.9.2" }
memmap2 = "0.5.0"
log = { version = "0.4.11" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.2" }
rand = "0.7.0"
fs_extra = "1.2.0"
tempfile = "3.2.0"

View File

@@ -9,5 +9,8 @@ for a in "$@"; do
fi
done
set -x
set -ex
if [[ ! -f sdk/bpf/syscalls.txt ]]; then
"$here"/cargo build --manifest-path "$here"/programs/bpf_loader/gen-syscall-list/Cargo.toml
fi
exec "$here"/cargo run --manifest-path "$here"/sdk/cargo-build-bpf/Cargo.toml -- $maybe_bpf_sdk "$@"

View File

@@ -226,6 +226,19 @@ EOF
annotate --style info \
"downstream-projects skipped as no relevant files were modified"
fi
# Wasm support
if affects \
^ci/test-wasm.sh \
^ci/test-stable.sh \
^sdk/ \
; then
command_step wasm ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-wasm.sh" 20
else
annotate --style info \
"wasm skipped as no relevant files were modified"
fi
# Benches...
if affects \
.rs$ \

View File

@@ -1,4 +1,4 @@
FROM solanalabs/rust:1.56.1
FROM solanalabs/rust:1.57.0
ARG date
RUN set -x \

View File

@@ -1,6 +1,6 @@
# Note: when the rust version is changed also modify
# ci/rust-version.sh to pick up the new image tag
FROM rust:1.56.1
FROM rust:1.57.0
# Add Google Protocol Buffers for Libra's metrics library.
ENV PROTOC_VERSION 3.8.0
@@ -11,6 +11,7 @@ RUN set -x \
&& apt-get install apt-transport-https \
&& echo deb https://apt.buildkite.com/buildkite-agent stable main > /etc/apt/sources.list.d/buildkite-agent.list \
&& apt-key adv --no-tty --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 32A37959C2FA5C3C99EFBC32A79206696452D198 \
&& curl -fsSL https://deb.nodesource.com/setup_current.x | bash - \
&& apt update \
&& apt install -y \
buildkite-agent \
@@ -19,15 +20,20 @@ RUN set -x \
lcov \
libudev-dev \
mscgen \
nodejs \
net-tools \
rsync \
sudo \
golang \
unzip \
\
&& apt remove -y libcurl4-openssl-dev \
&& rm -rf /var/lib/apt/lists/* \
&& node --version \
&& npm --version \
&& rustup component add rustfmt \
&& rustup component add clippy \
&& rustup target add wasm32-unknown-unknown \
&& cargo install cargo-audit \
&& cargo install mdbook \
&& cargo install mdbook-linkcheck \

View File

@@ -18,13 +18,13 @@
if [[ -n $RUST_STABLE_VERSION ]]; then
stable_version="$RUST_STABLE_VERSION"
else
stable_version=1.56.1
stable_version=1.57.0
fi
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
nightly_version="$RUST_NIGHTLY_VERSION"
else
nightly_version=2021-11-30
nightly_version=2021-12-03
fi

View File

@@ -103,6 +103,19 @@ test-local-cluster)
_ "$cargo" stable test --release --package solana-local-cluster ${V:+--verbose} -- --nocapture --test-threads=1
exit 0
;;
test-wasm)
_ node --version
_ npm --version
for dir in sdk/{program,}; do
if [[ -r "$dir"/package.json ]]; then
pushd "$dir"
_ npm install
_ npm test
popd
fi
done
exit 0
;;
*)
echo "Error: Unknown test: $testName"
;;

1
ci/test-wasm.sh Symbolic link
View File

@@ -0,0 +1 @@
test-stable.sh

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-clap-utils"
version = "1.9.0"
version = "1.9.2"
description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,9 +12,9 @@ edition = "2021"
[dependencies]
clap = "2.33.0"
rpassword = "5.0"
solana-perf = { path = "../perf", version = "=1.9.0" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-perf = { path = "../perf", version = "=1.9.2" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.2" }
solana-sdk = { path = "../sdk", version = "=1.9.2" }
thiserror = "1.0.30"
tiny-bip39 = "0.8.2"
uriparse = "0.6.3"

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-cli-config"
description = "Blockchain, Rebuilt for Scale"
version = "1.9.0"
version = "1.9.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-cli-output"
description = "Blockchain, Rebuilt for Scale"
version = "1.9.0"
version = "1.9.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -19,12 +19,12 @@ Inflector = "0.11.4"
indicatif = "0.16.2"
serde = "1.0.130"
serde_json = "1.0.72"
solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" }
solana-client = { path = "../client", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.0" }
solana-account-decoder = { path = "../account-decoder", version = "=1.9.2" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.2" }
solana-client = { path = "../client", version = "=1.9.2" }
solana-sdk = { path = "../sdk", version = "=1.9.2" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.2" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.2" }
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
[package.metadata.docs.rs]

View File

@@ -99,7 +99,7 @@ impl OutputFormat {
pub struct CliAccount {
#[serde(flatten)]
pub keyed_account: RpcKeyedAccount,
#[serde(skip_serializing)]
#[serde(skip_serializing, skip_deserializing)]
pub use_lamports_unit: bool,
}

View File

@@ -139,7 +139,7 @@ fn format_account_mode(message: &Message, index: usize) -> String {
} else {
"-"
},
if message.is_writable(index, /*demote_program_write_locks=*/ true) {
if message.is_writable(index) {
"w" // comment for consistent rust fmt (no joking; lol)
} else {
"-"

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-cli"
description = "Blockchain, Rebuilt for Scale"
version = "1.9.0"
version = "1.9.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -26,29 +26,29 @@ semver = "1.0.4"
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.72"
solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.9.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" }
solana-cli-config = { path = "../cli-config", version = "=1.9.0" }
solana-cli-output = { path = "../cli-output", version = "=1.9.0" }
solana-client = { path = "../client", version = "=1.9.0" }
solana-config-program = { path = "../programs/config", version = "=1.9.0" }
solana-faucet = { path = "../faucet", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-program-runtime = { path = "../program-runtime", version = "=1.9.0" }
solana_rbpf = "=0.2.16"
solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.0" }
solana-account-decoder = { path = "../account-decoder", version = "=1.9.2" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.9.2" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.2" }
solana-cli-config = { path = "../cli-config", version = "=1.9.2" }
solana-cli-output = { path = "../cli-output", version = "=1.9.2" }
solana-client = { path = "../client", version = "=1.9.2" }
solana-config-program = { path = "../programs/config", version = "=1.9.2" }
solana-faucet = { path = "../faucet", version = "=1.9.2" }
solana-logger = { path = "../logger", version = "=1.9.2" }
solana-program-runtime = { path = "../program-runtime", version = "=1.9.2" }
solana_rbpf = "=0.2.19"
solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.2" }
solana-sdk = { path = "../sdk", version = "=1.9.2" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.2" }
solana-version = { path = "../version", version = "=1.9.2" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.2" }
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
thiserror = "1.0.30"
tiny-bip39 = "0.8.2"
[dev-dependencies]
solana-streamer = { path = "../streamer", version = "=1.9.0" }
solana-test-validator = { path = "../test-validator", version = "=1.9.0" }
solana-streamer = { path = "../streamer", version = "=1.9.2" }
solana-test-validator = { path = "../test-validator", version = "=1.9.2" }
tempfile = "3.2.0"
[[bin]]

View File

@@ -298,7 +298,13 @@ pub enum CliCommand {
authorized_voter: Option<Pubkey>,
authorized_withdrawer: Pubkey,
commission: u8,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
memo: Option<String>,
fee_payer: SignerIndex,
},
ShowVoteAccount {
pubkey: Pubkey,
@@ -310,19 +316,32 @@ pub enum CliCommand {
destination_account_pubkey: Pubkey,
withdraw_authority: SignerIndex,
withdraw_amount: SpendAmount,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
memo: Option<String>,
fee_payer: SignerIndex,
},
CloseVoteAccount {
vote_account_pubkey: Pubkey,
destination_account_pubkey: Pubkey,
withdraw_authority: SignerIndex,
memo: Option<String>,
fee_payer: SignerIndex,
},
VoteAuthorize {
vote_account_pubkey: Pubkey,
new_authorized_pubkey: Pubkey,
vote_authorize: VoteAuthorize,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
memo: Option<String>,
fee_payer: SignerIndex,
authorized: SignerIndex,
new_authorized: Option<SignerIndex>,
},
@@ -330,13 +349,25 @@ pub enum CliCommand {
vote_account_pubkey: Pubkey,
new_identity_account: SignerIndex,
withdraw_authority: SignerIndex,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
memo: Option<String>,
fee_payer: SignerIndex,
},
VoteUpdateCommission {
vote_account_pubkey: Pubkey,
commission: u8,
withdraw_authority: SignerIndex,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
memo: Option<String>,
fee_payer: SignerIndex,
},
// Wallet Commands
Address,
@@ -1384,7 +1415,13 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
authorized_voter,
authorized_withdrawer,
commission,
sign_only,
dump_transaction_message,
blockhash_query,
ref nonce_account,
nonce_authority,
memo,
fee_payer,
} => process_create_vote_account(
&rpc_client,
config,
@@ -1394,7 +1431,13 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
authorized_voter,
*authorized_withdrawer,
*commission,
*sign_only,
*dump_transaction_message,
blockhash_query,
nonce_account.as_ref(),
*nonce_authority,
memo.as_ref(),
*fee_payer,
),
CliCommand::ShowVoteAccount {
pubkey: vote_account_pubkey,
@@ -1412,7 +1455,13 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
withdraw_authority,
withdraw_amount,
destination_account_pubkey,
sign_only,
dump_transaction_message,
blockhash_query,
ref nonce_account,
nonce_authority,
memo,
fee_payer,
} => process_withdraw_from_vote_account(
&rpc_client,
config,
@@ -1420,13 +1469,20 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
*withdraw_authority,
*withdraw_amount,
destination_account_pubkey,
*sign_only,
*dump_transaction_message,
blockhash_query,
nonce_account.as_ref(),
*nonce_authority,
memo.as_ref(),
*fee_payer,
),
CliCommand::CloseVoteAccount {
vote_account_pubkey,
withdraw_authority,
destination_account_pubkey,
memo,
fee_payer,
} => process_close_vote_account(
&rpc_client,
config,
@@ -1434,12 +1490,19 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
*withdraw_authority,
destination_account_pubkey,
memo.as_ref(),
*fee_payer,
),
CliCommand::VoteAuthorize {
vote_account_pubkey,
new_authorized_pubkey,
vote_authorize,
sign_only,
dump_transaction_message,
blockhash_query,
nonce_account,
nonce_authority,
memo,
fee_payer,
authorized,
new_authorized,
} => process_vote_authorize(
@@ -1450,33 +1513,63 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
*vote_authorize,
*authorized,
*new_authorized,
*sign_only,
*dump_transaction_message,
blockhash_query,
*nonce_account,
*nonce_authority,
memo.as_ref(),
*fee_payer,
),
CliCommand::VoteUpdateValidator {
vote_account_pubkey,
new_identity_account,
withdraw_authority,
sign_only,
dump_transaction_message,
blockhash_query,
nonce_account,
nonce_authority,
memo,
fee_payer,
} => process_vote_update_validator(
&rpc_client,
config,
vote_account_pubkey,
*new_identity_account,
*withdraw_authority,
*sign_only,
*dump_transaction_message,
blockhash_query,
*nonce_account,
*nonce_authority,
memo.as_ref(),
*fee_payer,
),
CliCommand::VoteUpdateCommission {
vote_account_pubkey,
commission,
withdraw_authority,
sign_only,
dump_transaction_message,
blockhash_query,
nonce_account,
nonce_authority,
memo,
fee_payer,
} => process_vote_update_commission(
&rpc_client,
config,
vote_account_pubkey,
*commission,
*withdraw_authority,
*sign_only,
*dump_transaction_message,
blockhash_query,
*nonce_account,
*nonce_authority,
memo.as_ref(),
*fee_payer,
),
// Wallet Commands
@@ -1975,7 +2068,13 @@ mod tests {
authorized_voter: Some(bob_pubkey),
authorized_withdrawer: bob_pubkey,
commission: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
config.signers = vec![&keypair, &bob_keypair, &identity_keypair];
let result = process_command(&config);
@@ -2006,7 +2105,13 @@ mod tests {
vote_account_pubkey: bob_pubkey,
new_authorized_pubkey,
vote_authorize: VoteAuthorize::Withdrawer,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
authorized: 0,
new_authorized: None,
};
@@ -2019,7 +2124,13 @@ mod tests {
vote_account_pubkey: bob_pubkey,
new_identity_account: 2,
withdraw_authority: 1,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
let result = process_command(&config);
assert!(result.is_ok());
@@ -2195,7 +2306,13 @@ mod tests {
authorized_voter: Some(bob_pubkey),
authorized_withdrawer: bob_pubkey,
commission: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
config.signers = vec![&keypair, &bob_keypair, &identity_keypair];
assert!(process_command(&config).is_err());
@@ -2204,7 +2321,13 @@ mod tests {
vote_account_pubkey: bob_pubkey,
new_authorized_pubkey: bob_pubkey,
vote_authorize: VoteAuthorize::Voter,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
authorized: 0,
new_authorized: None,
};
@@ -2214,7 +2337,13 @@ mod tests {
vote_account_pubkey: bob_pubkey,
new_identity_account: 1,
withdraw_authority: 1,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
assert!(process_command(&config).is_err());

View File

@@ -5,7 +5,7 @@ use {
},
clap::{App, AppSettings, Arg, ArgMatches, SubCommand},
console::style,
serde::{Deserialize, Serialize},
serde::{Deserialize, Deserializer, Serialize, Serializer},
solana_clap_utils::{input_parsers::*, input_validators::*, keypair::*},
solana_cli_output::{QuietDisplay, VerboseDisplay},
solana_client::{client_error::ClientError, rpc_client::RpcClient},
@@ -23,6 +23,7 @@ use {
cmp::Ordering,
collections::{HashMap, HashSet},
fmt,
str::FromStr,
sync::Arc,
},
};
@@ -45,7 +46,7 @@ pub enum FeatureCliCommand {
},
}
#[derive(Serialize, Deserialize)]
#[derive(Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "camelCase", tag = "status", content = "sinceSlot")]
pub enum CliFeatureStatus {
Inactive,
@@ -53,7 +54,29 @@ pub enum CliFeatureStatus {
Active(Slot),
}
#[derive(Serialize, Deserialize)]
impl PartialOrd for CliFeatureStatus {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for CliFeatureStatus {
fn cmp(&self, other: &Self) -> Ordering {
match (self, other) {
(Self::Inactive, Self::Inactive) => Ordering::Equal,
(Self::Inactive, _) => Ordering::Greater,
(_, Self::Inactive) => Ordering::Less,
(Self::Pending, Self::Pending) => Ordering::Equal,
(Self::Pending, _) => Ordering::Greater,
(_, Self::Pending) => Ordering::Less,
(Self::Active(self_active_slot), Self::Active(other_active_slot)) => {
self_active_slot.cmp(other_active_slot)
}
}
}
}
#[derive(Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub struct CliFeature {
pub id: String,
@@ -62,11 +85,28 @@ pub struct CliFeature {
pub status: CliFeatureStatus,
}
impl PartialOrd for CliFeature {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for CliFeature {
fn cmp(&self, other: &Self) -> Ordering {
match self.status.cmp(&other.status) {
Ordering::Equal => self.id.cmp(&other.id),
ordering => ordering,
}
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliFeatures {
pub features: Vec<CliFeature>,
pub feature_activation_allowed: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub cluster_feature_sets: Option<CliClusterFeatureSets>,
#[serde(skip)]
pub inactive: bool,
}
@@ -93,11 +133,16 @@ impl fmt::Display for CliFeatures {
CliFeatureStatus::Inactive => style("inactive".to_string()).red(),
CliFeatureStatus::Pending => style("activation pending".to_string()).yellow(),
CliFeatureStatus::Active(activation_slot) =>
style(format!("active since slot {}", activation_slot)).green(),
style(format!("active since slot {:>9}", activation_slot)).green(),
},
feature.description,
)?;
}
if let Some(feature_sets) = &self.cluster_feature_sets {
write!(f, "{}", feature_sets)?;
}
if self.inactive && !self.feature_activation_allowed {
writeln!(
f,
@@ -114,6 +159,191 @@ impl fmt::Display for CliFeatures {
impl QuietDisplay for CliFeatures {}
impl VerboseDisplay for CliFeatures {}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliClusterFeatureSets {
pub tool_feature_set: u32,
pub feature_sets: Vec<CliFeatureSet>,
#[serde(skip)]
pub stake_allowed: bool,
#[serde(skip)]
pub rpc_allowed: bool,
}
impl fmt::Display for CliClusterFeatureSets {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut tool_feature_set_matches_cluster = false;
let software_versions_title = "Software Version";
let feature_set_title = "Feature Set";
let stake_percent_title = "Stake";
let rpc_percent_title = "RPC";
let mut max_software_versions_len = software_versions_title.len();
let mut max_feature_set_len = feature_set_title.len();
let mut max_stake_percent_len = stake_percent_title.len();
let mut max_rpc_percent_len = rpc_percent_title.len();
let feature_sets: Vec<_> = self
.feature_sets
.iter()
.map(|feature_set_info| {
let me = if self.tool_feature_set == feature_set_info.feature_set {
tool_feature_set_matches_cluster = true;
true
} else {
false
};
let software_versions: Vec<_> = feature_set_info
.software_versions
.iter()
.map(ToString::to_string)
.collect();
let software_versions = software_versions.join(", ");
let feature_set = if feature_set_info.feature_set == 0 {
"unknown".to_string()
} else {
feature_set_info.feature_set.to_string()
};
let stake_percent = format!("{:.2}%", feature_set_info.stake_percent);
let rpc_percent = format!("{:.2}%", feature_set_info.rpc_percent);
max_software_versions_len = max_software_versions_len.max(software_versions.len());
max_feature_set_len = max_feature_set_len.max(feature_set.len());
max_stake_percent_len = max_stake_percent_len.max(stake_percent.len());
max_rpc_percent_len = max_rpc_percent_len.max(rpc_percent.len());
(
software_versions,
feature_set,
stake_percent,
rpc_percent,
me,
)
})
.collect();
if !tool_feature_set_matches_cluster {
writeln!(
f,
"\n{}",
style("To activate features the tool and cluster feature sets must match, select a tool version that matches the cluster")
.bold())?;
} else {
if !self.stake_allowed {
write!(
f,
"\n{}",
style("To activate features the stake must be >= 95%")
.bold()
.red()
)?;
}
if !self.rpc_allowed {
write!(
f,
"\n{}",
style("To activate features the RPC nodes must be >= 95%")
.bold()
.red()
)?;
}
}
writeln!(
f,
"\n\n{}",
style(format!("Tool Feature Set: {}", self.tool_feature_set)).bold()
)?;
writeln!(
f,
"{}",
style(format!(
"{1:<0$} {3:<2$} {5:<4$} {7:<6$}",
max_software_versions_len,
software_versions_title,
max_feature_set_len,
feature_set_title,
max_stake_percent_len,
stake_percent_title,
max_rpc_percent_len,
rpc_percent_title,
))
.bold(),
)?;
for (software_versions, feature_set, stake_percent, rpc_percent, me) in feature_sets {
writeln!(
f,
"{1:<0$} {3:>2$} {5:>4$} {7:>6$} {8}",
max_software_versions_len,
software_versions,
max_feature_set_len,
feature_set,
max_stake_percent_len,
stake_percent,
max_rpc_percent_len,
rpc_percent,
if me { "<-- me" } else { "" },
)?;
}
writeln!(f)
}
}
impl QuietDisplay for CliClusterFeatureSets {}
impl VerboseDisplay for CliClusterFeatureSets {}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliFeatureSet {
software_versions: Vec<CliVersion>,
feature_set: u32,
stake_percent: f64,
rpc_percent: f32,
}
#[derive(Eq, PartialEq, Ord, PartialOrd)]
struct CliVersion(Option<semver::Version>);
impl fmt::Display for CliVersion {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let s = match &self.0 {
None => "unknown".to_string(),
Some(version) => version.to_string(),
};
write!(f, "{}", s)
}
}
impl FromStr for CliVersion {
type Err = semver::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let version_option = if s == "unknown" {
None
} else {
Some(semver::Version::from_str(s)?)
};
Ok(CliVersion(version_option))
}
}
impl Serialize for CliVersion {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&self.to_string())
}
}
impl<'de> Deserialize<'de> for CliVersion {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s: &str = Deserialize::deserialize(deserializer)?;
CliVersion::from_str(s).map_err(serde::de::Error::custom)
}
}
pub trait FeatureSubCommands {
fn feature_subcommands(self) -> Self;
}
@@ -330,7 +560,10 @@ fn feature_set_stats(rpc_client: &RpcClient) -> Result<FeatureSetStats, ClientEr
}
// Feature activation is only allowed when 95% of the active stake is on the current feature set
fn feature_activation_allowed(rpc_client: &RpcClient, quiet: bool) -> Result<bool, ClientError> {
fn feature_activation_allowed(
rpc_client: &RpcClient,
quiet: bool,
) -> Result<(bool, Option<CliClusterFeatureSets>), ClientError> {
let my_feature_set = solana_version::Version::default().feature_set;
let feature_set_stats = feature_set_stats(rpc_client)?;
@@ -346,54 +579,43 @@ fn feature_activation_allowed(rpc_client: &RpcClient, quiet: bool) -> Result<boo
)
.unwrap_or((false, false));
if !quiet {
if feature_set_stats.get(&my_feature_set).is_none() {
println!(
"{}",
style("To activate features the tool and cluster feature sets must match, select a tool version that matches the cluster")
.bold());
} else {
if !stake_allowed {
print!(
"\n{}",
style("To activate features the stake must be >= 95%")
.bold()
.red()
);
}
if !rpc_allowed {
print!(
"\n{}",
style("To activate features the RPC nodes must be >= 95%")
.bold()
.red()
);
}
}
println!(
"\n\n{}",
style(format!("Tool Feature Set: {}", my_feature_set)).bold()
);
let mut feature_set_stats = feature_set_stats.into_iter().collect::<Vec<_>>();
feature_set_stats.sort_by(|l, r| {
match l.1.software_versions[0]
.cmp(&r.1.software_versions[0])
let cluster_feature_sets = if quiet {
None
} else {
let mut feature_sets = feature_set_stats
.into_iter()
.map(
|(
feature_set,
FeatureSetStatsEntry {
stake_percent,
rpc_nodes_percent: rpc_percent,
software_versions,
},
)| {
CliFeatureSet {
software_versions: software_versions.into_iter().map(CliVersion).collect(),
feature_set,
stake_percent,
rpc_percent,
}
},
)
.collect::<Vec<_>>();
feature_sets.sort_by(|l, r| {
match l.software_versions[0]
.cmp(&r.software_versions[0])
.reverse()
{
Ordering::Equal => {
match l
.1
.stake_percent
.partial_cmp(&r.1.stake_percent)
.partial_cmp(&r.stake_percent)
.unwrap()
.reverse()
{
Ordering::Equal => {
l.1.rpc_nodes_percent
.partial_cmp(&r.1.rpc_nodes_percent)
.unwrap()
.reverse()
l.rpc_percent.partial_cmp(&r.rpc_percent).unwrap().reverse()
}
o => o,
}
@@ -401,96 +623,15 @@ fn feature_activation_allowed(rpc_client: &RpcClient, quiet: bool) -> Result<boo
o => o,
}
});
Some(CliClusterFeatureSets {
tool_feature_set: my_feature_set,
feature_sets,
stake_allowed,
rpc_allowed,
})
};
let software_versions_title = "Software Version";
let feature_set_title = "Feature Set";
let stake_percent_title = "Stake";
let rpc_percent_title = "RPC";
let mut stats_output = Vec::new();
let mut max_software_versions_len = software_versions_title.len();
let mut max_feature_set_len = feature_set_title.len();
let mut max_stake_percent_len = stake_percent_title.len();
let mut max_rpc_percent_len = rpc_percent_title.len();
for (
feature_set,
FeatureSetStatsEntry {
stake_percent,
rpc_nodes_percent,
software_versions,
},
) in feature_set_stats.into_iter()
{
let me = feature_set == my_feature_set;
let feature_set = if feature_set == 0 {
"unknown".to_string()
} else {
feature_set.to_string()
};
let stake_percent = format!("{:.2}%", stake_percent);
let rpc_percent = format!("{:.2}%", rpc_nodes_percent);
let mut has_unknown = false;
let mut software_versions = software_versions
.iter()
.filter_map(|v| {
if v.is_none() {
has_unknown = true;
}
v.as_ref()
})
.map(ToString::to_string)
.collect::<Vec<_>>();
if has_unknown {
software_versions.push("unknown".to_string());
}
let software_versions = software_versions.join(", ");
max_software_versions_len = max_software_versions_len.max(software_versions.len());
max_feature_set_len = max_feature_set_len.max(feature_set.len());
max_stake_percent_len = max_stake_percent_len.max(stake_percent.len());
max_rpc_percent_len = max_rpc_percent_len.max(rpc_percent.len());
stats_output.push((
software_versions,
feature_set,
stake_percent,
rpc_percent,
me,
));
}
println!(
"{}",
style(format!(
"{1:<0$} {3:<2$} {5:<4$} {7:<6$}",
max_software_versions_len,
software_versions_title,
max_feature_set_len,
feature_set_title,
max_stake_percent_len,
stake_percent_title,
max_rpc_percent_len,
rpc_percent_title,
))
.bold(),
);
for (software_versions, feature_set, stake_percent, rpc_percent, me) in stats_output {
println!(
"{1:<0$} {3:>2$} {5:>4$} {7:>6$} {8}",
max_software_versions_len,
software_versions,
max_feature_set_len,
feature_set,
max_stake_percent_len,
stake_percent,
max_rpc_percent_len,
rpc_percent,
if me { "<-- me" } else { "" },
);
}
println!();
}
Ok(stake_allowed && rpc_allowed)
Ok((stake_allowed && rpc_allowed, cluster_feature_sets))
}
fn status_from_account(account: Account) -> Option<CliFeatureStatus> {
@@ -550,10 +691,14 @@ fn process_status(
});
}
let feature_activation_allowed = feature_activation_allowed(rpc_client, features.len() <= 1)?;
features.sort_unstable();
let (feature_activation_allowed, cluster_feature_sets) =
feature_activation_allowed(rpc_client, features.len() <= 1)?;
let feature_set = CliFeatures {
features,
feature_activation_allowed,
cluster_feature_sets,
inactive,
};
Ok(config.output_format.formatted_string(&feature_set))
@@ -577,7 +722,7 @@ fn process_activate(
}
}
if !feature_activation_allowed(rpc_client, false)? {
if !feature_activation_allowed(rpc_client, false)?.0 {
match force {
ForceActivation::Almost =>
return Err("Add force argument once more to override the sanity check to force feature activation ".into()),

View File

@@ -16,6 +16,7 @@ use {
pub enum SpendAmount {
All,
Some(u64),
RentExempt,
}
impl Default for SpendAmount {
@@ -90,6 +91,7 @@ where
0,
from_pubkey,
fee_pubkey,
0,
build_message,
)?;
Ok((message, spend))
@@ -97,6 +99,12 @@ where
let from_balance = rpc_client
.get_balance_with_commitment(from_pubkey, commitment)?
.value;
let from_rent_exempt_minimum = if amount == SpendAmount::RentExempt {
let data = rpc_client.get_account_data(from_pubkey)?;
rpc_client.get_minimum_balance_for_rent_exemption(data.len())?
} else {
0
};
let (message, SpendAndFee { spend, fee }) = resolve_spend_message(
rpc_client,
amount,
@@ -104,6 +112,7 @@ where
from_balance,
from_pubkey,
fee_pubkey,
from_rent_exempt_minimum,
build_message,
)?;
if from_pubkey == fee_pubkey {
@@ -140,6 +149,7 @@ fn resolve_spend_message<F>(
from_balance: u64,
from_pubkey: &Pubkey,
fee_pubkey: &Pubkey,
from_rent_exempt_minimum: u64,
build_message: F,
) -> Result<(Message, SpendAndFee), CliError>
where
@@ -176,5 +186,20 @@ where
},
))
}
SpendAmount::RentExempt => {
let mut lamports = if from_pubkey == fee_pubkey {
from_balance.saturating_sub(fee)
} else {
from_balance
};
lamports = lamports.saturating_sub(from_rent_exempt_minimum);
Ok((
build_message(lamports),
SpendAndFee {
spend: lamports,
fee,
},
))
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -462,18 +462,27 @@ pub fn process_show_account(
let mut account_string = config.output_format.formatted_string(&cli_account);
if config.output_format == OutputFormat::Display
|| config.output_format == OutputFormat::DisplayVerbose
{
if let Some(output_file) = output_file {
let mut f = File::create(output_file)?;
f.write_all(&data)?;
writeln!(&mut account_string)?;
writeln!(&mut account_string, "Wrote account data to {}", output_file)?;
} else if !data.is_empty() {
use pretty_hex::*;
writeln!(&mut account_string, "{:?}", data.hex_dump())?;
match config.output_format {
OutputFormat::Json | OutputFormat::JsonCompact => {
if let Some(output_file) = output_file {
let mut f = File::create(output_file)?;
f.write_all(account_string.as_bytes())?;
writeln!(&mut account_string)?;
writeln!(&mut account_string, "Wrote account to {}", output_file)?;
}
}
OutputFormat::Display | OutputFormat::DisplayVerbose => {
if let Some(output_file) = output_file {
let mut f = File::create(output_file)?;
f.write_all(&data)?;
writeln!(&mut account_string)?;
writeln!(&mut account_string, "Wrote account data to {}", output_file)?;
} else if !data.is_empty() {
use pretty_hex::*;
writeln!(&mut account_string, "{:?}", data.hex_dump())?;
}
}
OutputFormat::DisplayQuiet => (),
}
Ok(account_string)

View File

@@ -59,7 +59,13 @@ fn test_stake_delegation_force() {
authorized_voter: None,
authorized_withdrawer,
commission: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
process_command(&config).unwrap();

View File

@@ -4,6 +4,7 @@ use {
spend_utils::SpendAmount,
test_utils::check_recent_balance,
},
solana_cli_output::{parse_sign_only_reply_string, OutputFormat},
solana_client::{
blockhash_query::{self, BlockhashQuery},
rpc_client::RpcClient,
@@ -12,7 +13,7 @@ use {
solana_sdk::{
account_utils::StateMut,
commitment_config::CommitmentConfig,
signature::{Keypair, Signer},
signature::{Keypair, NullSigner, Signer},
},
solana_streamer::socket::SocketAddrSpace,
solana_test_validator::TestValidator,
@@ -49,7 +50,13 @@ fn test_vote_authorize_and_withdraw() {
authorized_voter: None,
authorized_withdrawer: config.signers[0].pubkey(),
commission: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
process_command(&config).unwrap();
let vote_account = rpc_client
@@ -93,7 +100,13 @@ fn test_vote_authorize_and_withdraw() {
vote_account_pubkey,
new_authorized_pubkey: first_withdraw_authority.pubkey(),
vote_authorize: VoteAuthorize::Withdrawer,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
authorized: 0,
new_authorized: None,
};
@@ -112,7 +125,13 @@ fn test_vote_authorize_and_withdraw() {
vote_account_pubkey,
new_authorized_pubkey: withdraw_authority.pubkey(),
vote_authorize: VoteAuthorize::Withdrawer,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
authorized: 1,
new_authorized: Some(1),
};
@@ -126,7 +145,13 @@ fn test_vote_authorize_and_withdraw() {
vote_account_pubkey,
new_authorized_pubkey: withdraw_authority.pubkey(),
vote_authorize: VoteAuthorize::Withdrawer,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
authorized: 1,
new_authorized: Some(2),
};
@@ -146,7 +171,13 @@ fn test_vote_authorize_and_withdraw() {
withdraw_authority: 1,
withdraw_amount: SpendAmount::Some(100),
destination_account_pubkey: destination_account,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
process_command(&config).unwrap();
let expected_balance = expected_balance - 100;
@@ -160,7 +191,13 @@ fn test_vote_authorize_and_withdraw() {
vote_account_pubkey,
new_identity_account: 2,
withdraw_authority: 1,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
process_command(&config).unwrap();
@@ -172,8 +209,283 @@ fn test_vote_authorize_and_withdraw() {
withdraw_authority: 1,
destination_account_pubkey: destination_account,
memo: None,
fee_payer: 0,
};
process_command(&config).unwrap();
check_recent_balance(0, &rpc_client, &vote_account_pubkey);
check_recent_balance(expected_balance, &rpc_client, &destination_account);
}
#[test]
fn test_offline_vote_authorize_and_withdraw() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
let default_signer = Keypair::new();
let mut config_payer = CliConfig::recent_for_tests();
config_payer.json_rpc_url = test_validator.rpc_url();
config_payer.signers = vec![&default_signer];
let mut config_offline = CliConfig::recent_for_tests();
config_offline.json_rpc_url = String::default();
config_offline.command = CliCommand::ClusterVersion;
let offline_keypair = Keypair::new();
config_offline.signers = vec![&offline_keypair];
// Verify that we cannot reach the cluster
process_command(&config_offline).unwrap_err();
request_and_confirm_airdrop(
&rpc_client,
&config_payer,
&config_payer.signers[0].pubkey(),
100_000,
)
.unwrap();
check_recent_balance(100_000, &rpc_client, &config_payer.signers[0].pubkey());
request_and_confirm_airdrop(
&rpc_client,
&config_offline,
&config_offline.signers[0].pubkey(),
100_000,
)
.unwrap();
check_recent_balance(100_000, &rpc_client, &config_offline.signers[0].pubkey());
// Create vote account with specific withdrawer
let vote_account_keypair = Keypair::new();
let vote_account_pubkey = vote_account_keypair.pubkey();
config_payer.signers = vec![&default_signer, &vote_account_keypair];
config_payer.command = CliCommand::CreateVoteAccount {
vote_account: 1,
seed: None,
identity_account: 0,
authorized_voter: None,
authorized_withdrawer: offline_keypair.pubkey(),
commission: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
process_command(&config_payer).unwrap();
let vote_account = rpc_client
.get_account(&vote_account_keypair.pubkey())
.unwrap();
let vote_state: VoteStateVersions = vote_account.state().unwrap();
let authorized_withdrawer = vote_state.convert_to_current().authorized_withdrawer;
assert_eq!(authorized_withdrawer, offline_keypair.pubkey());
let expected_balance = rpc_client
.get_minimum_balance_for_rent_exemption(VoteState::size_of())
.unwrap()
.max(1);
check_recent_balance(expected_balance, &rpc_client, &vote_account_pubkey);
// Transfer in some more SOL
config_payer.signers = vec![&default_signer];
config_payer.command = CliCommand::Transfer {
amount: SpendAmount::Some(1_000),
to: vote_account_pubkey,
from: 0,
sign_only: false,
dump_transaction_message: false,
allow_unfunded_recipient: true,
no_wait: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
derived_address_seed: None,
derived_address_program_id: None,
};
process_command(&config_payer).unwrap();
let expected_balance = expected_balance + 1_000;
check_recent_balance(expected_balance, &rpc_client, &vote_account_pubkey);
// Authorize vote account withdrawal to another signer, offline
let withdraw_authority = Keypair::new();
let blockhash = rpc_client.get_latest_blockhash().unwrap();
config_offline.command = CliCommand::VoteAuthorize {
vote_account_pubkey,
new_authorized_pubkey: withdraw_authority.pubkey(),
vote_authorize: VoteAuthorize::Withdrawer,
sign_only: true,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
authorized: 0,
new_authorized: None,
};
config_offline.output_format = OutputFormat::JsonCompact;
let sig_response = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
assert!(sign_only.has_all_signers());
let offline_presigner = sign_only
.presigner_of(&config_offline.signers[0].pubkey())
.unwrap();
config_payer.signers = vec![&offline_presigner];
config_payer.command = CliCommand::VoteAuthorize {
vote_account_pubkey,
new_authorized_pubkey: withdraw_authority.pubkey(),
vote_authorize: VoteAuthorize::Withdrawer,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
authorized: 0,
new_authorized: None,
};
process_command(&config_payer).unwrap();
let vote_account = rpc_client
.get_account(&vote_account_keypair.pubkey())
.unwrap();
let vote_state: VoteStateVersions = vote_account.state().unwrap();
let authorized_withdrawer = vote_state.convert_to_current().authorized_withdrawer;
assert_eq!(authorized_withdrawer, withdraw_authority.pubkey());
// Withdraw from vote account offline
let destination_account = solana_sdk::pubkey::new_rand(); // Send withdrawal to new account to make balance check easy
let blockhash = rpc_client.get_latest_blockhash().unwrap();
let fee_payer_null_signer = NullSigner::new(&default_signer.pubkey());
config_offline.signers = vec![&fee_payer_null_signer, &withdraw_authority];
config_offline.command = CliCommand::WithdrawFromVoteAccount {
vote_account_pubkey,
withdraw_authority: 1,
withdraw_amount: SpendAmount::Some(100),
destination_account_pubkey: destination_account,
sign_only: true,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
config_offline.output_format = OutputFormat::JsonCompact;
let sig_response = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
let offline_presigner = sign_only
.presigner_of(&config_offline.signers[1].pubkey())
.unwrap();
config_payer.signers = vec![&default_signer, &offline_presigner];
config_payer.command = CliCommand::WithdrawFromVoteAccount {
vote_account_pubkey,
withdraw_authority: 1,
withdraw_amount: SpendAmount::Some(100),
destination_account_pubkey: destination_account,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
process_command(&config_payer).unwrap();
let expected_balance = expected_balance - 100;
check_recent_balance(expected_balance, &rpc_client, &vote_account_pubkey);
check_recent_balance(100, &rpc_client, &destination_account);
// Re-assign validator identity offline
let blockhash = rpc_client.get_latest_blockhash().unwrap();
let new_identity_keypair = Keypair::new();
let new_identity_null_signer = NullSigner::new(&new_identity_keypair.pubkey());
config_offline.signers = vec![
&fee_payer_null_signer,
&withdraw_authority,
&new_identity_null_signer,
];
config_offline.command = CliCommand::VoteUpdateValidator {
vote_account_pubkey,
new_identity_account: 2,
withdraw_authority: 1,
sign_only: true,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
process_command(&config_offline).unwrap();
config_offline.output_format = OutputFormat::JsonCompact;
let sig_response = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
let offline_presigner = sign_only
.presigner_of(&config_offline.signers[1].pubkey())
.unwrap();
config_payer.signers = vec![&default_signer, &offline_presigner, &new_identity_keypair];
config_payer.command = CliCommand::VoteUpdateValidator {
vote_account_pubkey,
new_identity_account: 2,
withdraw_authority: 1,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
process_command(&config_payer).unwrap();
// Close vote account offline. Must use WithdrawFromVoteAccount and specify amount, since
// CloseVoteAccount requires RpcClient
let destination_account = solana_sdk::pubkey::new_rand(); // Send withdrawal to new account to make balance check easy
config_offline.signers = vec![&fee_payer_null_signer, &withdraw_authority];
config_offline.command = CliCommand::WithdrawFromVoteAccount {
vote_account_pubkey,
withdraw_authority: 1,
withdraw_amount: SpendAmount::Some(expected_balance),
destination_account_pubkey: destination_account,
sign_only: true,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
process_command(&config_offline).unwrap();
config_offline.output_format = OutputFormat::JsonCompact;
let sig_response = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
let offline_presigner = sign_only
.presigner_of(&config_offline.signers[1].pubkey())
.unwrap();
config_payer.signers = vec![&default_signer, &offline_presigner];
config_payer.command = CliCommand::WithdrawFromVoteAccount {
vote_account_pubkey,
withdraw_authority: 1,
withdraw_amount: SpendAmount::Some(expected_balance),
destination_account_pubkey: destination_account,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
let result = process_command(&config_payer).unwrap();
println!("{:?}", result);
check_recent_balance(0, &rpc_client, &vote_account_pubkey);
println!("what");
check_recent_balance(expected_balance, &rpc_client, &destination_account);
}

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-client-test"
version = "1.9.0"
version = "1.9.2"
description = "Solana RPC Test"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,22 +12,24 @@ edition = "2021"
[dependencies]
serde_json = "1.0.72"
serial_test = "0.5.1"
solana-client = { path = "../client", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-merkle-tree = { path = "../merkle-tree", version = "=1.9.0" }
solana-metrics = { path = "../metrics", version = "=1.9.0" }
solana-perf = { path = "../perf", version = "=1.9.0" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.0" }
solana-rpc = { path = "../rpc", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-streamer = { path = "../streamer", version = "=1.9.0" }
solana-test-validator = { path = "../test-validator", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-client = { path = "../client", version = "=1.9.2" }
solana-ledger = { path = "../ledger", version = "=1.9.2" }
solana-measure = { path = "../measure", version = "=1.9.2" }
solana-merkle-tree = { path = "../merkle-tree", version = "=1.9.2" }
solana-metrics = { path = "../metrics", version = "=1.9.2" }
solana-perf = { path = "../perf", version = "=1.9.2" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.2" }
solana-rpc = { path = "../rpc", version = "=1.9.2" }
solana-runtime = { path = "../runtime", version = "=1.9.2" }
solana-sdk = { path = "../sdk", version = "=1.9.2" }
solana-streamer = { path = "../streamer", version = "=1.9.2" }
solana-test-validator = { path = "../test-validator", version = "=1.9.2" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.2" }
solana-version = { path = "../version", version = "=1.9.2" }
systemstat = "0.1.10"
[dev-dependencies]
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.2" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -4,11 +4,16 @@ use {
solana_client::{
pubsub_client::PubsubClient,
rpc_client::RpcClient,
rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig},
rpc_response::SlotInfo,
rpc_config::{
RpcAccountInfoConfig, RpcBlockSubscribeConfig, RpcBlockSubscribeFilter,
RpcProgramAccountsConfig,
},
rpc_response::{RpcBlockUpdate, SlotInfo},
},
solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path},
solana_rpc::{
optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank,
rpc::create_test_transactions_and_populate_blockstore,
rpc_pubsub_service::{PubSubConfig, PubSubService},
rpc_subscriptions::RpcSubscriptions,
},
@@ -20,7 +25,7 @@ use {
},
solana_sdk::{
clock::Slot,
commitment_config::CommitmentConfig,
commitment_config::{CommitmentConfig, CommitmentLevel},
native_token::sol_to_lamports,
pubkey::Pubkey,
rpc_port,
@@ -29,11 +34,12 @@ use {
},
solana_streamer::socket::SocketAddrSpace,
solana_test_validator::TestValidator,
solana_transaction_status::{TransactionDetails, UiTransactionEncoding},
std::{
collections::HashSet,
net::{IpAddr, SocketAddr},
sync::{
atomic::{AtomicBool, Ordering},
atomic::{AtomicBool, AtomicU64, Ordering},
Arc, RwLock,
},
thread::sleep,
@@ -119,9 +125,10 @@ fn test_account_subscription() {
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.write().unwrap().insert(bank1);
let bob = Keypair::new();
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
&exit,
max_complete_transaction_status_slot,
bank_forks.clone(),
Arc::new(RwLock::new(BlockCommitmentCache::default())),
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
@@ -194,6 +201,112 @@ fn test_account_subscription() {
assert_eq!(errors, [].to_vec());
}
#[test]
#[serial]
fn test_block_subscription() {
// setup BankForks
let exit = Arc::new(AtomicBool::new(false));
let GenesisConfigInfo {
genesis_config,
mint_keypair: alice,
..
} = create_genesis_config(10_000);
let bank = Bank::new_for_tests(&genesis_config);
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
// setup Blockstore
let ledger_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&ledger_path).unwrap();
let blockstore = Arc::new(blockstore);
// populate ledger with test txs
let bank = bank_forks.read().unwrap().working_bank();
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
let keypair3 = Keypair::new();
let max_complete_transaction_status_slot = Arc::new(AtomicU64::new(blockstore.max_root()));
let _confirmed_block_signatures = create_test_transactions_and_populate_blockstore(
vec![&alice, &keypair1, &keypair2, &keypair3],
0,
bank,
blockstore.clone(),
max_complete_transaction_status_slot,
);
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
// setup RpcSubscriptions && PubSubService
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests_with_blockstore(
&exit,
max_complete_transaction_status_slot,
blockstore.clone(),
bank_forks.clone(),
Arc::new(RwLock::new(BlockCommitmentCache::default())),
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
));
let pubsub_addr = SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)),
rpc_port::DEFAULT_RPC_PUBSUB_PORT,
);
let pub_cfg = PubSubConfig {
enable_block_subscription: true,
..PubSubConfig::default()
};
let (trigger, pubsub_service) = PubSubService::new(pub_cfg, &subscriptions, pubsub_addr);
std::thread::sleep(Duration::from_millis(400));
// setup PubsubClient
let (mut client, receiver) = PubsubClient::block_subscribe(
&format!("ws://0.0.0.0:{}/", pubsub_addr.port()),
RpcBlockSubscribeFilter::All,
Some(RpcBlockSubscribeConfig {
commitment: Some(CommitmentConfig {
commitment: CommitmentLevel::Confirmed,
}),
encoding: Some(UiTransactionEncoding::Json),
transaction_details: Some(TransactionDetails::Signatures),
show_rewards: None,
}),
)
.unwrap();
// trigger Gossip notification
let slot = bank_forks.read().unwrap().highest_slot();
subscriptions.notify_gossip_subscribers(slot);
let maybe_actual = receiver.recv_timeout(Duration::from_millis(400));
match maybe_actual {
Ok(actual) => {
let complete_block = blockstore.get_complete_block(slot, false).unwrap();
let block = complete_block.clone().configure(
UiTransactionEncoding::Json,
TransactionDetails::Signatures,
false,
);
let expected = RpcBlockUpdate {
slot,
block: Some(block),
err: None,
};
let block = complete_block.configure(
UiTransactionEncoding::Json,
TransactionDetails::Signatures,
false,
);
assert_eq!(actual.value.slot, expected.slot);
assert!(block.eq(&actual.value.block.unwrap()));
}
Err(e) => {
eprintln!("unexpected websocket receive timeout");
assert_eq!(Some(e), None);
}
}
// cleanup
exit.store(true, Ordering::Relaxed);
trigger.cancel();
client.shutdown().unwrap();
pubsub_service.close().unwrap();
}
#[test]
#[serial]
fn test_program_subscription() {
@@ -215,9 +328,10 @@ fn test_program_subscription() {
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.write().unwrap().insert(bank1);
let bob = Keypair::new();
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
&exit,
max_complete_transaction_status_slot,
bank_forks.clone(),
Arc::new(RwLock::new(BlockCommitmentCache::default())),
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
@@ -300,9 +414,10 @@ fn test_root_subscription() {
let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone();
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.write().unwrap().insert(bank1);
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
&exit,
max_complete_transaction_status_slot,
bank_forks.clone(),
Arc::new(RwLock::new(BlockCommitmentCache::default())),
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
@@ -350,8 +465,10 @@ fn test_slot_subscription() {
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
let optimistically_confirmed_bank =
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks);
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
&exit,
max_complete_transaction_status_slot,
bank_forks,
Arc::new(RwLock::new(BlockCommitmentCache::default())),
optimistically_confirmed_bank,

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-client"
version = "1.9.0"
version = "1.9.2"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -23,15 +23,15 @@ semver = "1.0.4"
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.72"
solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" }
solana-faucet = { path = "../faucet", version = "=1.9.0" }
solana-net-utils = { path = "../net-utils", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.0" }
solana-account-decoder = { path = "../account-decoder", version = "=1.9.2" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.2" }
solana-faucet = { path = "../faucet", version = "=1.9.2" }
solana-net-utils = { path = "../net-utils", version = "=1.9.2" }
solana-measure = { path = "../measure", version = "=1.9.2" }
solana-sdk = { path = "../sdk", version = "=1.9.2" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.2" }
solana-version = { path = "../version", version = "=1.9.2" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.2" }
thiserror = "1.0"
tokio = { version = "1", features = ["full"] }
tungstenite = { version = "0.16.0", features = ["rustls-tls-webpki-roots"] }
@@ -40,7 +40,7 @@ url = "2.2.2"
[dev-dependencies]
assert_matches = "1.5.0"
jsonrpc-http-server = "18.0.0"
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.2" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,12 +1,13 @@
use {
crate::{
rpc_config::{
RpcAccountInfoConfig, RpcProgramAccountsConfig, RpcSignatureSubscribeConfig,
RpcTransactionLogsConfig, RpcTransactionLogsFilter,
RpcAccountInfoConfig, RpcBlockSubscribeConfig, RpcBlockSubscribeFilter,
RpcProgramAccountsConfig, RpcSignatureSubscribeConfig, RpcTransactionLogsConfig,
RpcTransactionLogsFilter,
},
rpc_response::{
Response as RpcResponse, RpcKeyedAccount, RpcLogsResponse, RpcSignatureResult,
SlotInfo, SlotUpdate,
Response as RpcResponse, RpcBlockUpdate, RpcKeyedAccount, RpcLogsResponse,
RpcSignatureResult, SlotInfo, SlotUpdate,
},
},
log::*,
@@ -173,6 +174,12 @@ pub type SignatureSubscription = (
Receiver<RpcResponse<RpcSignatureResult>>,
);
pub type PubsubBlockClientSubscription = PubsubClientSubscription<RpcResponse<RpcBlockUpdate>>;
pub type BlockSubscription = (
PubsubBlockClientSubscription,
Receiver<RpcResponse<RpcBlockUpdate>>,
);
pub type PubsubProgramClientSubscription = PubsubClientSubscription<RpcResponse<RpcKeyedAccount>>;
pub type ProgramSubscription = (
PubsubProgramClientSubscription,
@@ -266,6 +273,45 @@ impl PubsubClient {
Ok((result, receiver))
}
pub fn block_subscribe(
url: &str,
filter: RpcBlockSubscribeFilter,
config: Option<RpcBlockSubscribeConfig>,
) -> Result<BlockSubscription, PubsubClientError> {
let url = Url::parse(url)?;
let socket = connect_with_retry(url)?;
let (sender, receiver) = channel();
let socket = Arc::new(RwLock::new(socket));
let socket_clone = socket.clone();
let exit = Arc::new(AtomicBool::new(false));
let exit_clone = exit.clone();
let body = json!({
"jsonrpc":"2.0",
"id":1,
"method":"blockSubscribe",
"params":[filter, config]
})
.to_string();
let subscription_id = PubsubBlockClientSubscription::send_subscribe(&socket_clone, body)?;
let t_cleanup = std::thread::spawn(move || {
Self::cleanup_with_sender(exit_clone, &socket_clone, sender)
});
let result = PubsubClientSubscription {
message_type: PhantomData,
operation: "blocks",
socket,
subscription_id,
t_cleanup: Some(t_cleanup),
exit,
};
Ok((result, receiver))
}
pub fn logs_subscribe(
url: &str,
filter: RpcTransactionLogsFilter,

View File

@@ -1329,7 +1329,7 @@ impl RpcClient {
/// # Ok::<(), ClientError>(())
/// ```
pub fn get_highest_snapshot_slot(&self) -> ClientResult<RpcSnapshotSlotInfo> {
if self.get_node_version()? < semver::Version::new(1, 8, 0) {
if self.get_node_version()? < semver::Version::new(1, 9, 0) {
#[allow(deprecated)]
self.get_snapshot_slot().map(|full| RpcSnapshotSlotInfo {
full,
@@ -4747,7 +4747,7 @@ impl RpcClient {
commitment: CommitmentConfig,
) -> ClientResult<(Hash, u64)> {
let (blockhash, last_valid_block_height) =
if self.get_node_version()? < semver::Version::new(1, 8, 0) {
if self.get_node_version()? < semver::Version::new(1, 9, 0) {
let Fees {
blockhash,
last_valid_block_height,
@@ -4781,7 +4781,7 @@ impl RpcClient {
blockhash: &Hash,
commitment: CommitmentConfig,
) -> ClientResult<bool> {
let result = if self.get_node_version()? < semver::Version::new(1, 8, 0) {
let result = if self.get_node_version()? < semver::Version::new(1, 9, 0) {
self.get_fee_calculator_for_blockhash_with_commitment(blockhash, commitment)?
.value
.is_some()

View File

@@ -182,6 +182,23 @@ pub struct RpcSignatureSubscribeConfig {
pub enable_received_notification: Option<bool>,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum RpcBlockSubscribeFilter {
All,
MentionsAccountOrProgram(String),
}
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcBlockSubscribeConfig {
#[serde(flatten)]
pub commitment: Option<CommitmentConfig>,
pub encoding: Option<UiTransactionEncoding>,
pub transaction_details: Option<TransactionDetails>,
pub show_rewards: Option<bool>,
}
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcSignaturesForAddressConfig {

View File

@@ -9,9 +9,10 @@ use {
transaction::{Result, TransactionError},
},
solana_transaction_status::{
ConfirmedTransactionStatusWithSignature, TransactionConfirmationStatus,
ConfirmedTransactionStatusWithSignature, TransactionConfirmationStatus, UiConfirmedBlock,
},
std::{collections::HashMap, fmt, net::SocketAddr},
thiserror::Error,
};
pub type RpcResult<T> = client_error::Result<Response<T>>;
@@ -424,6 +425,20 @@ pub struct RpcInflationReward {
pub commission: Option<u8>, // Vote account commission when the reward was credited
}
#[derive(Clone, Deserialize, Serialize, Debug, Error, Eq, PartialEq)]
pub enum RpcBlockUpdateError {
#[error("block store error")]
BlockStoreError,
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcBlockUpdate {
pub slot: Slot,
pub block: Option<UiConfirmedBlock>,
pub err: Option<RpcBlockUpdateError>,
}
impl From<ConfirmedTransactionStatusWithSignature> for RpcConfirmedTransactionStatusWithSignature {
fn from(value: ConfirmedTransactionStatusWithSignature) -> Self {
let ConfirmedTransactionStatusWithSignature {

View File

@@ -1,7 +1,7 @@
[package]
name = "solana-core"
description = "Blockchain, Rebuilt for Scale"
version = "1.9.0"
version = "1.9.2"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-core"
readme = "../README.md"
@@ -26,7 +26,7 @@ fs_extra = "1.2.0"
histogram = "0.6.9"
itertools = "0.10.1"
log = "0.4.14"
lru = "0.7.0"
lru = "0.7.1"
rand = "0.7.0"
rand_chacha = "0.2.2"
raptorq = "1.6.4"
@@ -34,30 +34,30 @@ rayon = "1.5.1"
retain_mut = "0.1.5"
serde = "1.0.130"
serde_derive = "1.0.103"
solana-accountsdb-plugin-manager = { path = "../accountsdb-plugin-manager", version = "=1.9.0" }
solana-client = { path = "../client", version = "=1.9.0" }
solana-entry = { path = "../entry", version = "=1.9.0" }
solana-gossip = { path = "../gossip", version = "=1.9.0" }
solana-ledger = { path = "../ledger", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-metrics = { path = "../metrics", version = "=1.9.0" }
solana-net-utils = { path = "../net-utils", version = "=1.9.0" }
solana-perf = { path = "../perf", version = "=1.9.0" }
solana-poh = { path = "../poh", version = "=1.9.0" }
solana-rpc = { path = "../rpc", version = "=1.9.0" }
solana-replica-lib = { path = "../replica-lib", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.0" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.0" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.0" }
solana-streamer = { path = "../streamer", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.0" }
solana-accountsdb-plugin-manager = { path = "../accountsdb-plugin-manager", version = "=1.9.2" }
solana-client = { path = "../client", version = "=1.9.2" }
solana-entry = { path = "../entry", version = "=1.9.2" }
solana-gossip = { path = "../gossip", version = "=1.9.2" }
solana-ledger = { path = "../ledger", version = "=1.9.2" }
solana-logger = { path = "../logger", version = "=1.9.2" }
solana-measure = { path = "../measure", version = "=1.9.2" }
solana-metrics = { path = "../metrics", version = "=1.9.2" }
solana-net-utils = { path = "../net-utils", version = "=1.9.2" }
solana-perf = { path = "../perf", version = "=1.9.2" }
solana-poh = { path = "../poh", version = "=1.9.2" }
solana-rpc = { path = "../rpc", version = "=1.9.2" }
solana-replica-lib = { path = "../replica-lib", version = "=1.9.2" }
solana-runtime = { path = "../runtime", version = "=1.9.2" }
solana-sdk = { path = "../sdk", version = "=1.9.2" }
solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.2" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.2" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.2" }
solana-streamer = { path = "../streamer", version = "=1.9.2" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.2" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.2" }
tempfile = "3.2.0"
thiserror = "1.0"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.0" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.2" }
sys-info = "0.9.1"
tokio = { version = "1", features = ["full"] }
trees = "0.4.2"
@@ -71,9 +71,9 @@ matches = "0.1.9"
reqwest = { version = "0.11.6", default-features = false, features = ["blocking", "rustls-tls", "json"] }
serde_json = "1.0.72"
serial_test = "0.5.1"
solana-program-runtime = { path = "../program-runtime", version = "=1.9.0" }
solana-stake-program = { path = "../programs/stake", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-program-runtime = { path = "../program-runtime", version = "=1.9.2" }
solana-stake-program = { path = "../programs/stake", version = "=1.9.2" }
solana-version = { path = "../version", version = "=1.9.2" }
static_assertions = "1.1.0"
systemstat = "0.1.10"

View File

@@ -20,7 +20,7 @@ use {
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
},
solana_perf::{packet::to_packets_chunked, test_tx::test_tx},
solana_perf::{packet::to_packet_batches, test_tx::test_tx},
solana_poh::poh_recorder::{create_test_recorder, WorkingBankEntry},
solana_runtime::{bank::Bank, cost_model::CostModel},
solana_sdk::{
@@ -77,11 +77,11 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
let tx = test_tx();
let len = 4096;
let chunk_size = 1024;
let batches = to_packets_chunked(&vec![tx; len], chunk_size);
let mut packets = VecDeque::new();
let batches = to_packet_batches(&vec![tx; len], chunk_size);
let mut packet_batches = VecDeque::new();
for batch in batches {
let batch_len = batch.packets.len();
packets.push_back((batch, vec![0usize; batch_len], false));
packet_batches.push_back((batch, vec![0usize; batch_len], false));
}
let (s, _r) = unbounded();
// This tests the performance of buffering packets.
@@ -91,7 +91,7 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
&my_pubkey,
std::u128::MAX,
&poh_recorder,
&mut packets,
&mut packet_batches,
None,
&s,
None::<Box<dyn Fn()>>,
@@ -206,7 +206,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
assert!(r.is_ok(), "sanity parallel execution");
}
bank.clear_signatures();
let verified: Vec<_> = to_packets_chunked(&transactions, PACKETS_PER_BATCH);
let verified: Vec<_> = to_packet_batches(&transactions, PACKETS_PER_BATCH);
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(

View File

@@ -40,16 +40,14 @@ fn make_shreds(num_shreds: usize) -> Vec<Shred> {
);
let entries = make_large_unchained_entries(txs_per_entry, num_entries);
let shredder = Shredder::new(1, 0, 0, 0).unwrap();
let data_shreds = shredder
.entries_to_data_shreds(
&Keypair::new(),
&entries,
true, // is_last_in_slot
0, // next_shred_index
0, // fec_set_offset
&mut ProcessShredsStats::default(),
)
.0;
let data_shreds = shredder.entries_to_data_shreds(
&Keypair::new(),
&entries,
true, // is_last_in_slot
0, // next_shred_index
0, // fec_set_offset
&mut ProcessShredsStats::default(),
);
assert!(data_shreds.len() >= num_shreds);
data_shreds
}

View File

@@ -8,7 +8,7 @@ use {
log::*,
rand::{thread_rng, Rng},
solana_core::{sigverify::TransactionSigVerifier, sigverify_stage::SigVerifyStage},
solana_perf::{packet::to_packets_chunked, test_tx::test_tx},
solana_perf::{packet::to_packet_batches, test_tx::test_tx},
solana_sdk::{
hash::Hash,
signature::{Keypair, Signer},
@@ -28,7 +28,7 @@ fn bench_packet_discard(bencher: &mut Bencher) {
let len = 30 * 1000;
let chunk_size = 1024;
let tx = test_tx();
let mut batches = to_packets_chunked(&vec![tx; len], chunk_size);
let mut batches = to_packet_batches(&vec![tx; len], chunk_size);
let mut total = 0;
@@ -74,7 +74,7 @@ fn bench_sigverify_stage(bencher: &mut Bencher) {
let chunk_size = 1024;
let mut batches = if use_same_tx {
let tx = test_tx();
to_packets_chunked(&vec![tx; len], chunk_size)
to_packet_batches(&vec![tx; len], chunk_size)
} else {
let from_keypair = Keypair::new();
let to_keypair = Keypair::new();
@@ -89,7 +89,7 @@ fn bench_sigverify_stage(bencher: &mut Bencher) {
)
})
.collect();
to_packets_chunked(&txs, chunk_size)
to_packet_batches(&txs, chunk_size)
};
trace!(

View File

@@ -14,7 +14,7 @@ use {
solana_ledger::{blockstore::Blockstore, shred::SIZE_OF_NONCE},
solana_measure::measure::Measure,
solana_perf::{
packet::{limited_deserialize, Packet, Packets},
packet::{limited_deserialize, Packet, PacketBatch},
recycler::Recycler,
},
solana_runtime::bank::Bank,
@@ -23,7 +23,7 @@ use {
pubkey::Pubkey,
timing::timestamp,
},
solana_streamer::streamer::{self, PacketReceiver},
solana_streamer::streamer::{self, PacketBatchReceiver},
std::{
collections::HashSet,
net::UdpSocket,
@@ -197,7 +197,7 @@ impl AncestorHashesService {
/// Listen for responses to our ancestors hashes repair requests
fn run_responses_listener(
ancestor_hashes_request_statuses: Arc<DashMap<Slot, DeadSlotAncestorRequestStatus>>,
response_receiver: PacketReceiver,
response_receiver: PacketBatchReceiver,
blockstore: Arc<Blockstore>,
outstanding_requests: Arc<RwLock<OutstandingAncestorHashesRepairs>>,
exit: Arc<AtomicBool>,
@@ -240,7 +240,7 @@ impl AncestorHashesService {
/// Process messages from the network
fn process_new_packets_from_channel(
ancestor_hashes_request_statuses: &DashMap<Slot, DeadSlotAncestorRequestStatus>,
response_receiver: &PacketReceiver,
response_receiver: &PacketBatchReceiver,
blockstore: &Blockstore,
outstanding_requests: &RwLock<OutstandingAncestorHashesRepairs>,
stats: &mut AncestorHashesResponsesStats,
@@ -249,17 +249,17 @@ impl AncestorHashesService {
retryable_slots_sender: &RetryableSlotsSender,
) -> Result<()> {
let timeout = Duration::new(1, 0);
let mut responses = vec![response_receiver.recv_timeout(timeout)?];
let mut total_packets = responses[0].packets.len();
let mut packet_batches = vec![response_receiver.recv_timeout(timeout)?];
let mut total_packets = packet_batches[0].packets.len();
let mut dropped_packets = 0;
while let Ok(more) = response_receiver.try_recv() {
total_packets += more.packets.len();
while let Ok(batch) = response_receiver.try_recv() {
total_packets += batch.packets.len();
if total_packets < *max_packets {
// Drop the rest in the channel in case of DOS
responses.push(more);
packet_batches.push(batch);
} else {
dropped_packets += more.packets.len();
dropped_packets += batch.packets.len();
}
}
@@ -267,10 +267,10 @@ impl AncestorHashesService {
stats.total_packets += total_packets;
let mut time = Measure::start("ancestor_hashes::handle_packets");
for response in responses {
Self::process_single_packets(
for packet_batch in packet_batches {
Self::process_packet_batch(
ancestor_hashes_request_statuses,
response,
packet_batch,
stats,
outstanding_requests,
blockstore,
@@ -289,16 +289,16 @@ impl AncestorHashesService {
Ok(())
}
fn process_single_packets(
fn process_packet_batch(
ancestor_hashes_request_statuses: &DashMap<Slot, DeadSlotAncestorRequestStatus>,
packets: Packets,
packet_batch: PacketBatch,
stats: &mut AncestorHashesResponsesStats,
outstanding_requests: &RwLock<OutstandingAncestorHashesRepairs>,
blockstore: &Blockstore,
duplicate_slots_reset_sender: &DuplicateSlotsResetSender,
retryable_slots_sender: &RetryableSlotsSender,
) {
packets.packets.iter().for_each(|packet| {
packet_batch.packets.iter().for_each(|packet| {
let decision = Self::verify_and_process_ancestor_response(
packet,
ancestor_hashes_request_statuses,
@@ -328,7 +328,7 @@ impl AncestorHashesService {
blockstore: &Blockstore,
) -> Option<(Slot, DuplicateAncestorDecision)> {
let from_addr = packet.meta.addr();
limited_deserialize(&packet.data[..packet.meta.size - SIZE_OF_NONCE])
limited_deserialize(&packet.data[..packet.meta.size.saturating_sub(SIZE_OF_NONCE)])
.ok()
.and_then(|ancestor_hashes_response| {
// Verify the response
@@ -871,7 +871,7 @@ mod test {
t_listen: JoinHandle<()>,
exit: Arc<AtomicBool>,
responder_info: ContactInfo,
response_receiver: PacketReceiver,
response_receiver: PacketBatchReceiver,
correct_bank_hashes: HashMap<Slot, Hash>,
}
@@ -1033,15 +1033,6 @@ mod test {
is_frozen,
);
/*{
let w_bank_forks = bank_forks.write().unwrap();
assert!(w_bank_forks.get(dead_slot).is_none());
let parent = w_bank_forks.get(dead_slot - 1).unwrap().clone();
let dead_bank = Bank::new_from_parent(&parent, &Pubkey::default(), dead_slot);
bank_forks.insert(dead_bank);
}*/
// Create slots [slot, slot + num_ancestors) with 5 shreds apiece
let (shreds, _) = make_many_slot_entries(dead_slot, dead_slot, 5);
blockstore
@@ -1369,6 +1360,34 @@ mod test {
assert!(ancestor_hashes_request_statuses.is_empty());
}
#[test]
fn test_verify_and_process_ancestor_responses_invalid_packet() {
let bank0 = Bank::default_for_tests();
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank0)));
let ManageAncestorHashesState {
ancestor_hashes_request_statuses,
outstanding_requests,
..
} = ManageAncestorHashesState::new(bank_forks);
let ledger_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&ledger_path).unwrap();
// Create invalid packet with fewer bytes than the size of the nonce
let mut packet = Packet::default();
packet.meta.size = 0;
assert!(AncestorHashesService::verify_and_process_ancestor_response(
&packet,
&ancestor_hashes_request_statuses,
&mut AncestorHashesResponsesStats::default(),
&outstanding_requests,
&blockstore,
)
.is_none());
}
#[test]
fn test_ancestor_hashes_service_manage_ancestor_hashes_after_replay_dump() {
let dead_slot = MAX_ANCESTOR_RESPONSES as Slot;

View File

@@ -15,7 +15,7 @@ use {
solana_perf::{
cuda_runtime::PinnedVec,
data_budget::DataBudget,
packet::{limited_deserialize, Packet, Packets, PACKETS_PER_BATCH},
packet::{limited_deserialize, Packet, PacketBatch, PACKETS_PER_BATCH},
perf_libs,
},
solana_poh::poh_recorder::{BankStart, PohRecorder, PohRecorderError, TransactionRecorder},
@@ -64,10 +64,10 @@ use {
};
/// (packets, valid_indexes, forwarded)
/// Set of packets with a list of which are valid and if this batch has been forwarded.
type PacketsAndOffsets = (Packets, Vec<usize>, bool);
/// Batch of packets with a list of which are valid and if this batch has been forwarded.
type PacketBatchAndOffsets = (PacketBatch, Vec<usize>, bool);
pub type UnprocessedPackets = VecDeque<PacketsAndOffsets>;
pub type UnprocessedPacketBatches = VecDeque<PacketBatchAndOffsets>;
/// Transaction forwarding
pub const FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET: u64 = 2;
@@ -255,9 +255,9 @@ impl BankingStage {
pub fn new(
cluster_info: &Arc<ClusterInfo>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
tpu_verified_vote_receiver: CrossbeamReceiver<Vec<Packets>>,
verified_vote_receiver: CrossbeamReceiver<Vec<Packets>>,
verified_receiver: CrossbeamReceiver<Vec<PacketBatch>>,
tpu_verified_vote_receiver: CrossbeamReceiver<Vec<PacketBatch>>,
verified_vote_receiver: CrossbeamReceiver<Vec<PacketBatch>>,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: ReplayVoteSender,
cost_model: Arc<RwLock<CostModel>>,
@@ -278,9 +278,9 @@ impl BankingStage {
fn new_num_threads(
cluster_info: &Arc<ClusterInfo>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
tpu_verified_vote_receiver: CrossbeamReceiver<Vec<Packets>>,
verified_vote_receiver: CrossbeamReceiver<Vec<Packets>>,
verified_receiver: CrossbeamReceiver<Vec<PacketBatch>>,
tpu_verified_vote_receiver: CrossbeamReceiver<Vec<PacketBatch>>,
verified_vote_receiver: CrossbeamReceiver<Vec<PacketBatch>>,
num_threads: u32,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: ReplayVoteSender,
@@ -346,12 +346,12 @@ impl BankingStage {
}
fn filter_valid_packets_for_forwarding<'a>(
all_packets: impl Iterator<Item = &'a PacketsAndOffsets>,
packet_batches: impl Iterator<Item = &'a PacketBatchAndOffsets>,
) -> Vec<&'a Packet> {
all_packets
.filter(|(_p, _indexes, forwarded)| !forwarded)
.flat_map(|(p, valid_indexes, _forwarded)| {
valid_indexes.iter().map(move |x| &p.packets[*x])
packet_batches
.filter(|(_batch, _indexes, forwarded)| !forwarded)
.flat_map(|(batch, valid_indexes, _forwarded)| {
valid_indexes.iter().map(move |x| &batch.packets[*x])
})
.collect()
}
@@ -359,10 +359,10 @@ impl BankingStage {
fn forward_buffered_packets(
socket: &std::net::UdpSocket,
tpu_forwards: &std::net::SocketAddr,
unprocessed_packets: &UnprocessedPackets,
buffered_packet_batches: &UnprocessedPacketBatches,
data_budget: &DataBudget,
) -> std::io::Result<()> {
let packets = Self::filter_valid_packets_for_forwarding(unprocessed_packets.iter());
let packets = Self::filter_valid_packets_for_forwarding(buffered_packet_batches.iter());
inc_new_counter_info!("banking_stage-forwarded_packets", packets.len());
const INTERVAL_MS: u64 = 100;
const MAX_BYTES_PER_SECOND: usize = 10_000 * 1200;
@@ -385,7 +385,7 @@ impl BankingStage {
Ok(())
}
// Returns whether the given `Packets` has any more remaining unprocessed
// Returns whether the given `PacketBatch` has any more remaining unprocessed
// transactions
fn update_buffered_packets_with_new_unprocessed(
original_unprocessed_indexes: &mut Vec<usize>,
@@ -404,7 +404,7 @@ impl BankingStage {
my_pubkey: &Pubkey,
max_tx_ingestion_ns: u128,
poh_recorder: &Arc<Mutex<PohRecorder>>,
buffered_packets: &mut UnprocessedPackets,
buffered_packet_batches: &mut UnprocessedPacketBatches,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
test_fn: Option<impl Fn()>,
@@ -412,19 +412,21 @@ impl BankingStage {
recorder: &TransactionRecorder,
qos_service: &Arc<QosService>,
) {
let mut rebuffered_packets_len = 0;
let mut rebuffered_packet_count = 0;
let mut new_tx_count = 0;
let buffered_len = buffered_packets.len();
let buffered_packet_batches_len = buffered_packet_batches.len();
let mut proc_start = Measure::start("consume_buffered_process");
let mut reached_end_of_slot = None;
buffered_packets.retain_mut(|(msgs, ref mut original_unprocessed_indexes, _forwarded)| {
buffered_packet_batches.retain_mut(|buffered_packet_batch_and_offsets| {
let (packet_batch, ref mut original_unprocessed_indexes, _forwarded) =
buffered_packet_batch_and_offsets;
if let Some((next_leader, bank)) = &reached_end_of_slot {
// We've hit the end of this slot, no need to perform more processing,
// just filter the remaining packets for the invalid (e.g. too old) ones
let new_unprocessed_indexes = Self::filter_unprocessed_packets(
bank,
msgs,
packet_batch,
original_unprocessed_indexes,
my_pubkey,
*next_leader,
@@ -446,7 +448,7 @@ impl BankingStage {
&working_bank,
&bank_creation_time,
recorder,
msgs,
packet_batch,
original_unprocessed_indexes.to_owned(),
transaction_status_sender.clone(),
gossip_vote_sender,
@@ -467,7 +469,7 @@ impl BankingStage {
new_tx_count += processed;
// Out of the buffered packets just retried, collect any still unprocessed
// transactions in this batch for forwarding
rebuffered_packets_len += new_unprocessed_indexes.len();
rebuffered_packet_count += new_unprocessed_indexes.len();
let has_more_unprocessed_transactions =
Self::update_buffered_packets_with_new_unprocessed(
original_unprocessed_indexes,
@@ -478,7 +480,7 @@ impl BankingStage {
}
has_more_unprocessed_transactions
} else {
rebuffered_packets_len += original_unprocessed_indexes.len();
rebuffered_packet_count += original_unprocessed_indexes.len();
// `original_unprocessed_indexes` must have remaining packets to process
// if not yet processed.
assert!(Self::packet_has_more_unprocessed_transactions(
@@ -494,7 +496,7 @@ impl BankingStage {
debug!(
"@{:?} done processing buffered batches: {} time: {:?}ms tx count: {} tx/s: {}",
timestamp(),
buffered_len,
buffered_packet_batches_len,
proc_start.as_ms(),
new_tx_count,
(new_tx_count as f32) / (proc_start.as_s())
@@ -505,7 +507,7 @@ impl BankingStage {
.fetch_add(proc_start.as_us(), Ordering::Relaxed);
banking_stage_stats
.rebuffered_packets_count
.fetch_add(rebuffered_packets_len, Ordering::Relaxed);
.fetch_add(rebuffered_packet_count, Ordering::Relaxed);
banking_stage_stats
.consumed_buffered_packets_count
.fetch_add(new_tx_count, Ordering::Relaxed);
@@ -550,7 +552,7 @@ impl BankingStage {
socket: &std::net::UdpSocket,
poh_recorder: &Arc<Mutex<PohRecorder>>,
cluster_info: &ClusterInfo,
buffered_packets: &mut UnprocessedPackets,
buffered_packet_batches: &mut UnprocessedPacketBatches,
forward_option: &ForwardOption,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
@@ -592,7 +594,7 @@ impl BankingStage {
my_pubkey,
max_tx_ingestion_ns,
poh_recorder,
buffered_packets,
buffered_packet_batches,
transaction_status_sender,
gossip_vote_sender,
None::<Box<dyn Fn()>>,
@@ -605,7 +607,7 @@ impl BankingStage {
Self::handle_forwarding(
forward_option,
cluster_info,
buffered_packets,
buffered_packet_batches,
poh_recorder,
socket,
false,
@@ -616,7 +618,7 @@ impl BankingStage {
Self::handle_forwarding(
forward_option,
cluster_info,
buffered_packets,
buffered_packet_batches,
poh_recorder,
socket,
true,
@@ -631,7 +633,7 @@ impl BankingStage {
fn handle_forwarding(
forward_option: &ForwardOption,
cluster_info: &ClusterInfo,
buffered_packets: &mut UnprocessedPackets,
buffered_packet_batches: &mut UnprocessedPacketBatches,
poh_recorder: &Arc<Mutex<PohRecorder>>,
socket: &UdpSocket,
hold: bool,
@@ -640,7 +642,7 @@ impl BankingStage {
let addr = match forward_option {
ForwardOption::NotForward => {
if !hold {
buffered_packets.clear();
buffered_packet_batches.clear();
}
return;
}
@@ -653,20 +655,20 @@ impl BankingStage {
Some(addr) => addr,
None => return,
};
let _ = Self::forward_buffered_packets(socket, &addr, buffered_packets, data_budget);
let _ = Self::forward_buffered_packets(socket, &addr, buffered_packet_batches, data_budget);
if hold {
buffered_packets.retain(|(_, index, _)| !index.is_empty());
for (_, _, forwarded) in buffered_packets.iter_mut() {
buffered_packet_batches.retain(|(_, index, _)| !index.is_empty());
for (_, _, forwarded) in buffered_packet_batches.iter_mut() {
*forwarded = true;
}
} else {
buffered_packets.clear();
buffered_packet_batches.clear();
}
}
#[allow(clippy::too_many_arguments)]
fn process_loop(
verified_receiver: &CrossbeamReceiver<Vec<Packets>>,
verified_receiver: &CrossbeamReceiver<Vec<PacketBatch>>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
cluster_info: &ClusterInfo,
recv_start: &mut Instant,
@@ -681,17 +683,17 @@ impl BankingStage {
) {
let recorder = poh_recorder.lock().unwrap().recorder();
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut buffered_packets = VecDeque::with_capacity(batch_limit);
let mut buffered_packet_batches = VecDeque::with_capacity(batch_limit);
let banking_stage_stats = BankingStageStats::new(id);
loop {
let my_pubkey = cluster_info.id();
while !buffered_packets.is_empty() {
while !buffered_packet_batches.is_empty() {
let decision = Self::process_buffered_packets(
&my_pubkey,
&socket,
poh_recorder,
cluster_info,
&mut buffered_packets,
&mut buffered_packet_batches,
&forward_option,
transaction_status_sender.clone(),
&gossip_vote_sender,
@@ -709,7 +711,7 @@ impl BankingStage {
}
}
let recv_timeout = if !buffered_packets.is_empty() {
let recv_timeout = if !buffered_packet_batches.is_empty() {
// If packets are buffered, let's wait for less time on recv from the channel.
// This helps detect the next leader faster, and processing the buffered
// packets quickly
@@ -729,7 +731,7 @@ impl BankingStage {
batch_limit,
transaction_status_sender.clone(),
&gossip_vote_sender,
&mut buffered_packets,
&mut buffered_packet_batches,
&banking_stage_stats,
duplicates,
&recorder,
@@ -933,8 +935,7 @@ impl BankingStage {
gossip_vote_sender: &ReplayVoteSender,
qos_service: &Arc<QosService>,
) -> (Result<usize, PohRecorderError>, Vec<usize>) {
let tx_costs =
qos_service.compute_transaction_costs(txs.iter(), bank.demote_program_write_locks());
let tx_costs = qos_service.compute_transaction_costs(txs.iter());
let transactions_qos_results =
qos_service.select_transactions_per_cost(txs.iter(), tx_costs.iter(), bank);
@@ -1076,7 +1077,7 @@ impl BankingStage {
// with their packet indexes.
#[allow(clippy::needless_collect)]
fn transactions_from_packets(
msgs: &Packets,
packet_batch: &PacketBatch,
transaction_indexes: &[usize],
feature_set: &Arc<feature_set::FeatureSet>,
votes_only: bool,
@@ -1084,7 +1085,7 @@ impl BankingStage {
transaction_indexes
.iter()
.filter_map(|tx_index| {
let p = &msgs.packets[*tx_index];
let p = &packet_batch.packets[*tx_index];
if votes_only && !p.meta.is_simple_vote_tx {
return None;
}
@@ -1149,7 +1150,7 @@ impl BankingStage {
bank: &Arc<Bank>,
bank_creation_time: &Instant,
poh: &TransactionRecorder,
msgs: &Packets,
packet_batch: &PacketBatch,
packet_indexes: Vec<usize>,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
@@ -1158,7 +1159,7 @@ impl BankingStage {
) -> (usize, usize, Vec<usize>) {
let mut packet_conversion_time = Measure::start("packet_conversion");
let (transactions, transaction_to_packet_indexes) = Self::transactions_from_packets(
msgs,
packet_batch,
&packet_indexes,
&bank.feature_set,
bank.vote_only_bank(),
@@ -1214,7 +1215,7 @@ impl BankingStage {
fn filter_unprocessed_packets(
bank: &Arc<Bank>,
msgs: &Packets,
packet_batch: &PacketBatch,
transaction_indexes: &[usize],
my_pubkey: &Pubkey,
next_leader: Option<Pubkey>,
@@ -1232,7 +1233,7 @@ impl BankingStage {
let mut unprocessed_packet_conversion_time =
Measure::start("unprocessed_packet_conversion");
let (transactions, transaction_to_packet_indexes) = Self::transactions_from_packets(
msgs,
packet_batch,
transaction_indexes,
&bank.feature_set,
bank.vote_only_bank(),
@@ -1282,7 +1283,7 @@ impl BankingStage {
/// Process the incoming packets
fn process_packets(
my_pubkey: &Pubkey,
verified_receiver: &CrossbeamReceiver<Vec<Packets>>,
verified_receiver: &CrossbeamReceiver<Vec<PacketBatch>>,
poh: &Arc<Mutex<PohRecorder>>,
recv_start: &mut Instant,
recv_timeout: Duration,
@@ -1290,41 +1291,41 @@ impl BankingStage {
batch_limit: usize,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
buffered_packets: &mut UnprocessedPackets,
buffered_packet_batches: &mut UnprocessedPacketBatches,
banking_stage_stats: &BankingStageStats,
duplicates: &Arc<Mutex<(LruCache<u64, ()>, PacketHasher)>>,
recorder: &TransactionRecorder,
qos_service: &Arc<QosService>,
) -> Result<(), RecvTimeoutError> {
let mut recv_time = Measure::start("process_packets_recv");
let mms = verified_receiver.recv_timeout(recv_timeout)?;
let packet_batches = verified_receiver.recv_timeout(recv_timeout)?;
recv_time.stop();
let mms_len = mms.len();
let count: usize = mms.iter().map(|x| x.packets.len()).sum();
let packet_batches_len = packet_batches.len();
let packet_count: usize = packet_batches.iter().map(|x| x.packets.len()).sum();
debug!(
"@{:?} process start stalled for: {:?}ms txs: {} id: {}",
timestamp(),
duration_as_ms(&recv_start.elapsed()),
count,
packet_count,
id,
);
inc_new_counter_debug!("banking_stage-transactions_received", count);
inc_new_counter_debug!("banking_stage-transactions_received", packet_count);
let mut proc_start = Measure::start("process_packets_transactions_process");
let mut new_tx_count = 0;
let mut mms_iter = mms.into_iter();
let mut packet_batch_iter = packet_batches.into_iter();
let mut dropped_packets_count = 0;
let mut dropped_packet_batches_count = 0;
let mut newly_buffered_packets_count = 0;
while let Some(msgs) = mms_iter.next() {
let packet_indexes = Self::generate_packet_indexes(&msgs.packets);
while let Some(packet_batch) = packet_batch_iter.next() {
let packet_indexes = Self::generate_packet_indexes(&packet_batch.packets);
let poh_recorder_bank = poh.lock().unwrap().get_poh_recorder_bank();
let working_bank_start = poh_recorder_bank.working_bank_start();
if PohRecorder::get_working_bank_if_not_expired(&working_bank_start).is_none() {
Self::push_unprocessed(
buffered_packets,
msgs,
buffered_packet_batches,
packet_batch,
packet_indexes,
&mut dropped_packet_batches_count,
&mut dropped_packets_count,
@@ -1347,7 +1348,7 @@ impl BankingStage {
working_bank,
bank_creation_time,
recorder,
&msgs,
&packet_batch,
packet_indexes,
transaction_status_sender.clone(),
gossip_vote_sender,
@@ -1359,8 +1360,8 @@ impl BankingStage {
// Collect any unprocessed transactions in this batch for forwarding
Self::push_unprocessed(
buffered_packets,
msgs,
buffered_packet_batches,
packet_batch,
unprocessed_indexes,
&mut dropped_packet_batches_count,
&mut dropped_packets_count,
@@ -1376,19 +1377,19 @@ impl BankingStage {
let next_leader = poh.lock().unwrap().next_slot_leader();
// Walk thru rest of the transactions and filter out the invalid (e.g. too old) ones
#[allow(clippy::while_let_on_iterator)]
while let Some(msgs) = mms_iter.next() {
let packet_indexes = Self::generate_packet_indexes(&msgs.packets);
while let Some(packet_batch) = packet_batch_iter.next() {
let packet_indexes = Self::generate_packet_indexes(&packet_batch.packets);
let unprocessed_indexes = Self::filter_unprocessed_packets(
working_bank,
&msgs,
&packet_batch,
&packet_indexes,
my_pubkey,
next_leader,
banking_stage_stats,
);
Self::push_unprocessed(
buffered_packets,
msgs,
buffered_packet_batches,
packet_batch,
unprocessed_indexes,
&mut dropped_packet_batches_count,
&mut dropped_packets_count,
@@ -1409,11 +1410,11 @@ impl BankingStage {
debug!(
"@{:?} done processing transaction batches: {} time: {:?}ms tx count: {} tx/s: {} total count: {} id: {}",
timestamp(),
mms_len,
packet_batches_len,
proc_start.as_ms(),
new_tx_count,
(new_tx_count as f32) / (proc_start.as_s()),
count,
packet_count,
id,
);
banking_stage_stats
@@ -1421,7 +1422,7 @@ impl BankingStage {
.fetch_add(proc_start.as_us(), Ordering::Relaxed);
banking_stage_stats
.process_packets_count
.fetch_add(count, Ordering::Relaxed);
.fetch_add(packet_count, Ordering::Relaxed);
banking_stage_stats
.new_tx_count
.fetch_add(new_tx_count, Ordering::Relaxed);
@@ -1436,9 +1437,12 @@ impl BankingStage {
.fetch_add(newly_buffered_packets_count, Ordering::Relaxed);
banking_stage_stats
.current_buffered_packet_batches_count
.swap(buffered_packets.len(), Ordering::Relaxed);
.swap(buffered_packet_batches.len(), Ordering::Relaxed);
banking_stage_stats.current_buffered_packets_count.swap(
buffered_packets.iter().map(|packets| packets.1.len()).sum(),
buffered_packet_batches
.iter()
.map(|packets| packets.1.len())
.sum(),
Ordering::Relaxed,
);
*recv_start = Instant::now();
@@ -1446,8 +1450,8 @@ impl BankingStage {
}
fn push_unprocessed(
unprocessed_packets: &mut UnprocessedPackets,
packets: Packets,
unprocessed_packet_batches: &mut UnprocessedPacketBatches,
packet_batch: PacketBatch,
mut packet_indexes: Vec<usize>,
dropped_packet_batches_count: &mut usize,
dropped_packets_count: &mut usize,
@@ -1462,7 +1466,7 @@ impl BankingStage {
let mut duplicates = duplicates.lock().unwrap();
let (cache, hasher) = duplicates.deref_mut();
packet_indexes.retain(|i| {
let packet_hash = hasher.hash_packet(&packets.packets[*i]);
let packet_hash = hasher.hash_packet(&packet_batch.packets[*i]);
match cache.get_mut(&packet_hash) {
Some(_hash) => false,
None => {
@@ -1483,14 +1487,14 @@ impl BankingStage {
);
}
if Self::packet_has_more_unprocessed_transactions(&packet_indexes) {
if unprocessed_packets.len() >= batch_limit {
if unprocessed_packet_batches.len() >= batch_limit {
*dropped_packet_batches_count += 1;
if let Some(dropped_batch) = unprocessed_packets.pop_front() {
if let Some(dropped_batch) = unprocessed_packet_batches.pop_front() {
*dropped_packets_count += dropped_batch.1.len();
}
}
*newly_buffered_packets_count += packet_indexes.len();
unprocessed_packets.push_back((packets, packet_indexes, false));
unprocessed_packet_batches.push_back((packet_batch, packet_indexes, false));
}
}
@@ -1560,7 +1564,7 @@ mod tests {
get_tmp_ledger_path,
leader_schedule_cache::LeaderScheduleCache,
},
solana_perf::packet::to_packets_chunked,
solana_perf::packet::to_packet_batches,
solana_poh::{
poh_recorder::{create_test_recorder, Record, WorkingBankEntry},
poh_service::PohService,
@@ -1697,7 +1701,9 @@ mod tests {
Blockstore::destroy(&ledger_path).unwrap();
}
pub fn convert_from_old_verified(mut with_vers: Vec<(Packets, Vec<u8>)>) -> Vec<Packets> {
pub fn convert_from_old_verified(
mut with_vers: Vec<(PacketBatch, Vec<u8>)>,
) -> Vec<PacketBatch> {
with_vers.iter_mut().for_each(|(b, v)| {
b.packets
.iter_mut()
@@ -1769,18 +1775,18 @@ mod tests {
let tx_anf = system_transaction::transfer(&keypair, &to3, 1, start_hash);
// send 'em over
let packets = to_packets_chunked(&[tx_no_ver, tx_anf, tx], 3);
let packet_batches = to_packet_batches(&[tx_no_ver, tx_anf, tx], 3);
// glad they all fit
assert_eq!(packets.len(), 1);
assert_eq!(packet_batches.len(), 1);
let packets = packets
let packet_batches = packet_batches
.into_iter()
.map(|packets| (packets, vec![0u8, 1u8, 1u8]))
.map(|batch| (batch, vec![0u8, 1u8, 1u8]))
.collect();
let packets = convert_from_old_verified(packets);
let packet_batches = convert_from_old_verified(packet_batches);
verified_sender // no_ver, anf, tx
.send(packets)
.send(packet_batches)
.unwrap();
drop(verified_sender);
@@ -1846,24 +1852,24 @@ mod tests {
let tx =
system_transaction::transfer(&mint_keypair, &alice.pubkey(), 2, genesis_config.hash());
let packets = to_packets_chunked(&[tx], 1);
let packets = packets
let packet_batches = to_packet_batches(&[tx], 1);
let packet_batches = packet_batches
.into_iter()
.map(|packets| (packets, vec![1u8]))
.map(|batch| (batch, vec![1u8]))
.collect();
let packets = convert_from_old_verified(packets);
verified_sender.send(packets).unwrap();
let packet_batches = convert_from_old_verified(packet_batches);
verified_sender.send(packet_batches).unwrap();
// Process a second batch that uses the same from account, so conflicts with above TX
let tx =
system_transaction::transfer(&mint_keypair, &alice.pubkey(), 1, genesis_config.hash());
let packets = to_packets_chunked(&[tx], 1);
let packets = packets
let packet_batches = to_packet_batches(&[tx], 1);
let packet_batches = packet_batches
.into_iter()
.map(|packets| (packets, vec![1u8]))
.map(|batch| (batch, vec![1u8]))
.collect();
let packets = convert_from_old_verified(packets);
verified_sender.send(packets).unwrap();
let packet_batches = convert_from_old_verified(packet_batches);
verified_sender.send(packet_batches).unwrap();
let (vote_sender, vote_receiver) = unbounded();
let (tpu_vote_sender, tpu_vote_receiver) = unbounded();
@@ -2381,9 +2387,9 @@ mod tests {
fn test_filter_valid_packets() {
solana_logger::setup();
let mut all_packets = (0..16)
let mut packet_batches = (0..16)
.map(|packets_id| {
let packets = Packets::new(
let packet_batch = PacketBatch::new(
(0..32)
.map(|packet_id| {
let mut p = Packet::default();
@@ -2395,11 +2401,11 @@ mod tests {
let valid_indexes = (0..32)
.filter_map(|x| if x % 2 != 0 { Some(x as usize) } else { None })
.collect_vec();
(packets, valid_indexes, false)
(packet_batch, valid_indexes, false)
})
.collect_vec();
let result = BankingStage::filter_valid_packets_for_forwarding(all_packets.iter());
let result = BankingStage::filter_valid_packets_for_forwarding(packet_batches.iter());
assert_eq!(result.len(), 256);
@@ -2413,8 +2419,8 @@ mod tests {
})
.collect_vec();
all_packets[0].2 = true;
let result = BankingStage::filter_valid_packets_for_forwarding(all_packets.iter());
packet_batches[0].2 = true;
let result = BankingStage::filter_valid_packets_for_forwarding(packet_batches.iter());
assert_eq!(result.len(), 240);
}
@@ -2666,12 +2672,15 @@ mod tests {
setup_conflicting_transactions(&ledger_path);
let recorder = poh_recorder.lock().unwrap().recorder();
let num_conflicting_transactions = transactions.len();
let mut packets_vec = to_packets_chunked(&transactions, num_conflicting_transactions);
assert_eq!(packets_vec.len(), 1);
assert_eq!(packets_vec[0].packets.len(), num_conflicting_transactions);
let all_packets = packets_vec.pop().unwrap();
let mut buffered_packets: UnprocessedPackets = vec![(
all_packets,
let mut packet_batches = to_packet_batches(&transactions, num_conflicting_transactions);
assert_eq!(packet_batches.len(), 1);
assert_eq!(
packet_batches[0].packets.len(),
num_conflicting_transactions
);
let packet_batch = packet_batches.pop().unwrap();
let mut buffered_packet_batches: UnprocessedPacketBatches = vec![(
packet_batch,
(0..num_conflicting_transactions).into_iter().collect(),
false,
)]
@@ -2687,7 +2696,7 @@ mod tests {
&Pubkey::default(),
max_tx_processing_ns,
&poh_recorder,
&mut buffered_packets,
&mut buffered_packet_batches,
None,
&gossip_vote_sender,
None::<Box<dyn Fn()>>,
@@ -2695,7 +2704,10 @@ mod tests {
&recorder,
&Arc::new(QosService::new(Arc::new(RwLock::new(CostModel::default())))),
);
assert_eq!(buffered_packets[0].1.len(), num_conflicting_transactions);
assert_eq!(
buffered_packet_batches[0].1.len(),
num_conflicting_transactions
);
// When the poh recorder has a bank, should process all non conflicting buffered packets.
// Processes one packet per iteration of the loop
for num_expected_unprocessed in (0..num_conflicting_transactions).rev() {
@@ -2704,7 +2716,7 @@ mod tests {
&Pubkey::default(),
max_tx_processing_ns,
&poh_recorder,
&mut buffered_packets,
&mut buffered_packet_batches,
None,
&gossip_vote_sender,
None::<Box<dyn Fn()>>,
@@ -2713,9 +2725,9 @@ mod tests {
&Arc::new(QosService::new(Arc::new(RwLock::new(CostModel::default())))),
);
if num_expected_unprocessed == 0 {
assert!(buffered_packets.is_empty())
assert!(buffered_packet_batches.is_empty())
} else {
assert_eq!(buffered_packets[0].1.len(), num_expected_unprocessed);
assert_eq!(buffered_packet_batches[0].1.len(), num_expected_unprocessed);
}
}
poh_recorder
@@ -2735,12 +2747,12 @@ mod tests {
let (transactions, bank, poh_recorder, _entry_receiver, poh_simulator) =
setup_conflicting_transactions(&ledger_path);
let num_conflicting_transactions = transactions.len();
let packets_vec = to_packets_chunked(&transactions, 1);
assert_eq!(packets_vec.len(), num_conflicting_transactions);
for single_packets in &packets_vec {
assert_eq!(single_packets.packets.len(), 1);
let packet_batches = to_packet_batches(&transactions, 1);
assert_eq!(packet_batches.len(), num_conflicting_transactions);
for single_packet_batch in &packet_batches {
assert_eq!(single_packet_batch.packets.len(), 1);
}
let mut buffered_packets: UnprocessedPackets = packets_vec
let mut buffered_packet_batches: UnprocessedPacketBatches = packet_batches
.clone()
.into_iter()
.map(|single_packets| (single_packets, vec![0], false))
@@ -2754,8 +2766,8 @@ mod tests {
continue_receiver.recv().unwrap();
});
// When the poh recorder has a bank, it should process all non conflicting buffered packets.
// Because each conflicting transaction is in it's own `Packet` within `packets_vec`, then
// each iteration of this loop will process one element of `packets_vec`per iteration of the
// Because each conflicting transaction is in it's own `Packet` within a `PacketBatch`, then
// each iteration of this loop will process one element of the batch per iteration of the
// loop.
let interrupted_iteration = 1;
poh_recorder.lock().unwrap().set_bank(&bank);
@@ -2770,7 +2782,7 @@ mod tests {
&Pubkey::default(),
std::u128::MAX,
&poh_recorder_,
&mut buffered_packets,
&mut buffered_packet_batches,
None,
&gossip_vote_sender,
test_fn,
@@ -2782,13 +2794,13 @@ mod tests {
// Check everything is correct. All indexes after `interrupted_iteration`
// should still be unprocessed
assert_eq!(
buffered_packets.len(),
packets_vec[interrupted_iteration + 1..].len()
buffered_packet_batches.len(),
packet_batches[interrupted_iteration + 1..].len()
);
for ((remaining_unprocessed_packet, _, _forwarded), original_packet) in
buffered_packets
buffered_packet_batches
.iter()
.zip(&packets_vec[interrupted_iteration + 1..])
.zip(&packet_batches[interrupted_iteration + 1..])
{
assert_eq!(
remaining_unprocessed_packet.packets[0],
@@ -2823,10 +2835,10 @@ mod tests {
#[test]
fn test_forwarder_budget() {
solana_logger::setup();
// Create `Packets` with 1 unprocessed element
let single_element_packets = Packets::new(vec![Packet::default()]);
let mut unprocessed_packets: UnprocessedPackets =
vec![(single_element_packets, vec![0], false)]
// Create `PacketBatch` with 1 unprocessed packet
let single_packet_batch = PacketBatch::new(vec![Packet::default()]);
let mut unprocessed_packets: UnprocessedPacketBatches =
vec![(single_packet_batch, vec![0], false)]
.into_iter()
.collect();
@@ -2872,14 +2884,16 @@ mod tests {
#[test]
fn test_push_unprocessed_batch_limit() {
solana_logger::setup();
// Create `Packets` with 2 unprocessed elements
let new_packets = Packets::new(vec![Packet::default(); 2]);
let mut unprocessed_packets: UnprocessedPackets =
vec![(new_packets, vec![0, 1], false)].into_iter().collect();
// Create `PacketBatch` with 2 unprocessed packets
let new_packet_batch = PacketBatch::new(vec![Packet::default(); 2]);
let mut unprocessed_packets: UnprocessedPacketBatches =
vec![(new_packet_batch, vec![0, 1], false)]
.into_iter()
.collect();
// Set the limit to 2
let batch_limit = 2;
// Create some new unprocessed packets
let new_packets = Packets::new(vec![Packet::default()]);
// Create new unprocessed packets and add to a batch
let new_packet_batch = PacketBatch::new(vec![Packet::default()]);
let packet_indexes = vec![];
let duplicates = Arc::new(Mutex::new((
@@ -2894,7 +2908,7 @@ mod tests {
// packets are not added to the unprocessed queue
BankingStage::push_unprocessed(
&mut unprocessed_packets,
new_packets.clone(),
new_packet_batch.clone(),
packet_indexes,
&mut dropped_packet_batches_count,
&mut dropped_packets_count,
@@ -2913,7 +2927,7 @@ mod tests {
let packet_indexes = vec![0];
BankingStage::push_unprocessed(
&mut unprocessed_packets,
new_packets,
new_packet_batch,
packet_indexes.clone(),
&mut dropped_packet_batches_count,
&mut dropped_packets_count,
@@ -2929,7 +2943,7 @@ mod tests {
// Because we've reached the batch limit, old unprocessed packets are
// dropped and the new one is appended to the end
let new_packets = Packets::new(vec![Packet::from_data(
let new_packet_batch = PacketBatch::new(vec![Packet::from_data(
Some(&SocketAddr::from(([127, 0, 0, 1], 8001))),
42,
)
@@ -2937,7 +2951,7 @@ mod tests {
assert_eq!(unprocessed_packets.len(), batch_limit);
BankingStage::push_unprocessed(
&mut unprocessed_packets,
new_packets.clone(),
new_packet_batch.clone(),
packet_indexes.clone(),
&mut dropped_packet_batches_count,
&mut dropped_packets_count,
@@ -2947,7 +2961,10 @@ mod tests {
&banking_stage_stats,
);
assert_eq!(unprocessed_packets.len(), 2);
assert_eq!(unprocessed_packets[1].0.packets[0], new_packets.packets[0]);
assert_eq!(
unprocessed_packets[1].0.packets[0],
new_packet_batch.packets[0]
);
assert_eq!(dropped_packet_batches_count, 1);
assert_eq!(dropped_packets_count, 2);
assert_eq!(newly_buffered_packets_count, 2);
@@ -2955,7 +2972,7 @@ mod tests {
// Check duplicates are dropped (newly buffered shouldn't change)
BankingStage::push_unprocessed(
&mut unprocessed_packets,
new_packets.clone(),
new_packet_batch.clone(),
packet_indexes,
&mut dropped_packet_batches_count,
&mut dropped_packets_count,
@@ -2965,7 +2982,10 @@ mod tests {
&banking_stage_stats,
);
assert_eq!(unprocessed_packets.len(), 2);
assert_eq!(unprocessed_packets[1].0.packets[0], new_packets.packets[0]);
assert_eq!(
unprocessed_packets[1].0.packets[0],
new_packet_batch.packets[0]
);
assert_eq!(dropped_packet_batches_count, 1);
assert_eq!(dropped_packets_count, 2);
assert_eq!(newly_buffered_packets_count, 2);
@@ -2988,19 +3008,19 @@ mod tests {
fn make_test_packets(
transactions: Vec<Transaction>,
vote_indexes: Vec<usize>,
) -> (Packets, Vec<usize>) {
) -> (PacketBatch, Vec<usize>) {
let capacity = transactions.len();
let mut packets = Packets::with_capacity(capacity);
let mut packet_batch = PacketBatch::with_capacity(capacity);
let mut packet_indexes = Vec::with_capacity(capacity);
packets.packets.resize(capacity, Packet::default());
packet_batch.packets.resize(capacity, Packet::default());
for (index, tx) in transactions.iter().enumerate() {
Packet::populate_packet(&mut packets.packets[index], None, tx).ok();
Packet::populate_packet(&mut packet_batch.packets[index], None, tx).ok();
packet_indexes.push(index);
}
for index in vote_indexes.iter() {
packets.packets[*index].meta.is_simple_vote_tx = true;
packet_batch.packets[*index].meta.is_simple_vote_tx = true;
}
(packets, packet_indexes)
(packet_batch, packet_indexes)
}
#[test]
@@ -3022,12 +3042,12 @@ mod tests {
// packets with no votes
{
let vote_indexes = vec![];
let (packets, packet_indexes) =
let (packet_batch, packet_indexes) =
make_test_packets(vec![transfer_tx.clone(), transfer_tx.clone()], vote_indexes);
let mut votes_only = false;
let (txs, tx_packet_index) = BankingStage::transactions_from_packets(
&packets,
&packet_batch,
&packet_indexes,
&Arc::new(FeatureSet::default()),
votes_only,
@@ -3037,7 +3057,7 @@ mod tests {
votes_only = true;
let (txs, tx_packet_index) = BankingStage::transactions_from_packets(
&packets,
&packet_batch,
&packet_indexes,
&Arc::new(FeatureSet::default()),
votes_only,
@@ -3049,14 +3069,14 @@ mod tests {
// packets with some votes
{
let vote_indexes = vec![0, 2];
let (packets, packet_indexes) = make_test_packets(
let (packet_batch, packet_indexes) = make_test_packets(
vec![vote_tx.clone(), transfer_tx, vote_tx.clone()],
vote_indexes,
);
let mut votes_only = false;
let (txs, tx_packet_index) = BankingStage::transactions_from_packets(
&packets,
&packet_batch,
&packet_indexes,
&Arc::new(FeatureSet::default()),
votes_only,
@@ -3066,7 +3086,7 @@ mod tests {
votes_only = true;
let (txs, tx_packet_index) = BankingStage::transactions_from_packets(
&packets,
&packet_batch,
&packet_indexes,
&Arc::new(FeatureSet::default()),
votes_only,
@@ -3078,14 +3098,14 @@ mod tests {
// packets with all votes
{
let vote_indexes = vec![0, 1, 2];
let (packets, packet_indexes) = make_test_packets(
let (packet_batch, packet_indexes) = make_test_packets(
vec![vote_tx.clone(), vote_tx.clone(), vote_tx],
vote_indexes,
);
let mut votes_only = false;
let (txs, tx_packet_index) = BankingStage::transactions_from_packets(
&packets,
&packet_batch,
&packet_indexes,
&Arc::new(FeatureSet::default()),
votes_only,
@@ -3095,7 +3115,7 @@ mod tests {
votes_only = true;
let (txs, tx_packet_index) = BankingStage::transactions_from_packets(
&packets,
&packet_batch,
&packet_indexes,
&Arc::new(FeatureSet::default()),
votes_only,

View File

@@ -154,7 +154,7 @@ impl BroadcastRun for BroadcastDuplicatesRun {
)
.expect("Expected to create a new shredder");
let (data_shreds, _, _) = shredder.entries_to_shreds(
let (data_shreds, _) = shredder.entries_to_shreds(
keypair,
&receive_results.entries,
last_tick_height == bank.max_tick_height() && last_entries.is_none(),
@@ -163,10 +163,10 @@ impl BroadcastRun for BroadcastDuplicatesRun {
self.next_shred_index += data_shreds.len() as u32;
let last_shreds = last_entries.map(|(original_last_entry, duplicate_extra_last_entries)| {
let (original_last_data_shred, _, _) =
let (original_last_data_shred, _) =
shredder.entries_to_shreds(keypair, &[original_last_entry], true, self.next_shred_index);
let (partition_last_data_shred, _, _) =
let (partition_last_data_shred, _) =
// Don't mark the last shred as last so that validators won't know that
// they've gotten all the shreds, and will continue trying to repair
shredder.entries_to_shreds(keypair, &duplicate_extra_last_entries, true, self.next_shred_index);

View File

@@ -52,7 +52,7 @@ impl BroadcastRun for BroadcastFakeShredsRun {
)
.expect("Expected to create a new shredder");
let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds(
let (data_shreds, coding_shreds) = shredder.entries_to_shreds(
keypair,
&receive_results.entries,
last_tick_height == bank.max_tick_height(),
@@ -69,7 +69,7 @@ impl BroadcastRun for BroadcastFakeShredsRun {
.map(|_| Entry::new(&self.last_blockhash, 0, vec![]))
.collect();
let (fake_data_shreds, fake_coding_shreds, _) = shredder.entries_to_shreds(
let (fake_data_shreds, fake_coding_shreds) = shredder.entries_to_shreds(
keypair,
&fake_entries,
last_tick_height == bank.max_tick_height(),

View File

@@ -83,7 +83,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
)
.expect("Expected to create a new shredder");
let (data_shreds, _, _) = shredder.entries_to_shreds(
let (data_shreds, _) = shredder.entries_to_shreds(
keypair,
&receive_results.entries,
last_tick_height == bank.max_tick_height() && last_entries.is_none(),
@@ -92,10 +92,10 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
self.next_shred_index += data_shreds.len() as u32;
let last_shreds = last_entries.map(|(good_last_entry, bad_last_entry)| {
let (good_last_data_shred, _, _) =
let (good_last_data_shred, _) =
shredder.entries_to_shreds(keypair, &[good_last_entry], true, self.next_shred_index);
let (bad_last_data_shred, _, _) =
let (bad_last_data_shred, _) =
// Don't mark the last shred as last so that validators won't know that
// they've gotten all the shreds, and will continue trying to repair
shredder.entries_to_shreds(keypair, &[bad_last_entry], false, self.next_shred_index);

View File

@@ -119,17 +119,16 @@ impl StandardBroadcastRun {
None => (0, 0),
},
};
let (data_shreds, next_shred_index) =
Shredder::new(slot, parent_slot, reference_tick, self.shred_version)
.unwrap()
.entries_to_data_shreds(
keypair,
entries,
is_slot_end,
next_shred_index,
fec_set_offset,
process_stats,
);
let data_shreds = Shredder::new(slot, parent_slot, reference_tick, self.shred_version)
.unwrap()
.entries_to_data_shreds(
keypair,
entries,
is_slot_end,
next_shred_index,
fec_set_offset,
process_stats,
);
let mut data_shreds_buffer = match &mut self.unfinished_slot {
Some(state) => {
assert_eq!(state.slot, slot);
@@ -138,6 +137,10 @@ impl StandardBroadcastRun {
None => Vec::default(),
};
data_shreds_buffer.extend(data_shreds.clone());
let next_shred_index = match data_shreds.iter().map(Shred::index).max() {
Some(index) => index + 1,
None => next_shred_index,
};
self.unfinished_slot = Some(UnfinishedSlotInfo {
next_shred_index,
slot,
@@ -596,7 +599,7 @@ mod test {
.expect("Expected a shred that signals an interrupt");
// Validate the shred
assert_eq!(shred.parent(), Some(parent));
assert_eq!(shred.parent().unwrap(), parent);
assert_eq!(shred.slot(), slot);
assert_eq!(shred.index(), next_shred_index);
assert!(shred.is_data());

View File

@@ -22,7 +22,7 @@ use {
solana_ledger::blockstore::Blockstore,
solana_measure::measure::Measure,
solana_metrics::inc_new_counter_debug,
solana_perf::packet::{self, Packets},
solana_perf::packet::{self, PacketBatch},
solana_poh::poh_recorder::PohRecorder,
solana_rpc::{
optimistically_confirmed_bank_tracker::{BankNotification, BankNotificationSender},
@@ -296,7 +296,7 @@ impl ClusterInfoVoteListener {
pub fn new(
exit: &Arc<AtomicBool>,
cluster_info: Arc<ClusterInfo>,
verified_packets_sender: CrossbeamSender<Vec<Packets>>,
verified_packets_sender: CrossbeamSender<Vec<PacketBatch>>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
vote_tracker: Arc<VoteTracker>,
bank_forks: Arc<RwLock<BankForks>>,
@@ -393,14 +393,14 @@ impl ClusterInfoVoteListener {
#[allow(clippy::type_complexity)]
fn verify_votes(votes: Vec<Transaction>) -> (Vec<Transaction>, Vec<VerifiedVoteMetadata>) {
let mut msgs = packet::to_packets_chunked(&votes, 1);
let mut packet_batches = packet::to_packet_batches(&votes, 1);
// Votes should already be filtered by this point.
let reject_non_vote = false;
sigverify::ed25519_verify_cpu(&mut msgs, reject_non_vote);
sigverify::ed25519_verify_cpu(&mut packet_batches, reject_non_vote);
let (vote_txs, vote_metadata) = izip!(votes.into_iter(), msgs,)
.filter_map(|(vote_tx, packet)| {
let (vote_txs, vote_metadata) = izip!(votes.into_iter(), packet_batches)
.filter_map(|(vote_tx, packet_batch)| {
let (vote, vote_account_key) = vote_transaction::parse_vote_transaction(&vote_tx)
.and_then(|(vote_account_key, vote, _)| {
if vote.slots.is_empty() {
@@ -410,16 +410,16 @@ impl ClusterInfoVoteListener {
}
})?;
// to_packets_chunked() above split into 1 packet long chunks
assert_eq!(packet.packets.len(), 1);
if !packet.packets[0].meta.discard {
// to_packet_batches() above splits into 1 packet long batches
assert_eq!(packet_batch.packets.len(), 1);
if !packet_batch.packets[0].meta.discard {
if let Some(signature) = vote_tx.signatures.first().cloned() {
return Some((
vote_tx,
VerifiedVoteMetadata {
vote_account_key,
vote,
packet,
packet_batch,
signature,
},
));
@@ -435,7 +435,7 @@ impl ClusterInfoVoteListener {
exit: Arc<AtomicBool>,
verified_vote_label_packets_receiver: VerifiedLabelVotePacketsReceiver,
poh_recorder: Arc<Mutex<PohRecorder>>,
verified_packets_sender: &CrossbeamSender<Vec<Packets>>,
verified_packets_sender: &CrossbeamSender<Vec<PacketBatch>>,
) -> Result<()> {
let mut verified_vote_packets = VerifiedVotePackets::default();
let mut time_since_lock = Instant::now();
@@ -483,7 +483,7 @@ impl ClusterInfoVoteListener {
fn check_for_leader_bank_and_send_votes(
bank_vote_sender_state_option: &mut Option<BankVoteSenderState>,
current_working_bank: Arc<Bank>,
verified_packets_sender: &CrossbeamSender<Vec<Packets>>,
verified_packets_sender: &CrossbeamSender<Vec<PacketBatch>>,
verified_vote_packets: &VerifiedVotePackets,
) -> Result<()> {
// We will take this lock at most once every `BANK_SEND_VOTES_LOOP_SLEEP_MS`
@@ -960,7 +960,10 @@ mod tests {
signature::{Keypair, Signature, Signer},
},
solana_vote_program::vote_state::Vote,
std::{collections::BTreeSet, sync::Arc},
std::{
collections::BTreeSet,
sync::{atomic::AtomicU64, Arc},
},
};
#[test]
@@ -983,9 +986,9 @@ mod tests {
use bincode::serialized_size;
info!("max vote size {}", serialized_size(&vote_tx).unwrap());
let msgs = packet::to_packets_chunked(&[vote_tx], 1); // panics if won't fit
let packet_batches = packet::to_packet_batches(&[vote_tx], 1); // panics if won't fit
assert_eq!(msgs.len(), 1);
assert_eq!(packet_batches.len(), 1);
}
fn run_vote_contains_authorized_voter(hash: Option<Hash>) {
@@ -1646,8 +1649,10 @@ mod tests {
let vote_tracker = VoteTracker::new(&bank);
let optimistically_confirmed_bank =
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks);
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
&exit,
max_complete_transaction_status_slot,
bank_forks,
Arc::new(RwLock::new(BlockCommitmentCache::default())),
optimistically_confirmed_bank,
@@ -1765,8 +1770,10 @@ mod tests {
let bank = bank_forks.read().unwrap().get(0).unwrap().clone();
let optimistically_confirmed_bank =
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks);
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
&exit,
max_complete_transaction_status_slot,
bank_forks,
Arc::new(RwLock::new(BlockCommitmentCache::default())),
optimistically_confirmed_bank,
@@ -1815,7 +1822,7 @@ mod tests {
fn verify_packets_len(packets: &[VerifiedVoteMetadata], ref_value: usize) {
let num_packets: usize = packets
.iter()
.map(|vote_metadata| vote_metadata.packet.packets.len())
.map(|vote_metadata| vote_metadata.packet_batch.packets.len())
.sum();
assert_eq!(num_packets, ref_value);
}

View File

@@ -97,11 +97,8 @@ impl AggregateCommitmentService {
return Ok(());
}
let mut aggregation_data = receiver.recv_timeout(Duration::from_secs(1))?;
while let Ok(new_data) = receiver.try_recv() {
aggregation_data = new_data;
}
let aggregation_data = receiver.recv_timeout(Duration::from_secs(1))?;
let aggregation_data = receiver.try_iter().last().unwrap_or(aggregation_data);
let ancestors = aggregation_data.bank.status_cache_ancestors();
if ancestors.is_empty() {

View File

@@ -6,10 +6,10 @@ use {
result::{Error, Result},
},
solana_metrics::{inc_new_counter_debug, inc_new_counter_info},
solana_perf::{packet::PacketsRecycler, recycler::Recycler},
solana_perf::{packet::PacketBatchRecycler, recycler::Recycler},
solana_poh::poh_recorder::PohRecorder,
solana_sdk::clock::DEFAULT_TICKS_PER_SLOT,
solana_streamer::streamer::{self, PacketReceiver, PacketSender},
solana_streamer::streamer::{self, PacketBatchReceiver, PacketBatchSender},
std::{
net::UdpSocket,
sync::{
@@ -34,7 +34,7 @@ impl FetchStage {
exit: &Arc<AtomicBool>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
coalesce_ms: u64,
) -> (Self, PacketReceiver, PacketReceiver) {
) -> (Self, PacketBatchReceiver, PacketBatchReceiver) {
let (sender, receiver) = channel();
let (vote_sender, vote_receiver) = channel();
(
@@ -58,8 +58,8 @@ impl FetchStage {
tpu_forwards_sockets: Vec<UdpSocket>,
tpu_vote_sockets: Vec<UdpSocket>,
exit: &Arc<AtomicBool>,
sender: &PacketSender,
vote_sender: &PacketSender,
sender: &PacketBatchSender,
vote_sender: &PacketBatchSender,
poh_recorder: &Arc<Mutex<PohRecorder>>,
coalesce_ms: u64,
) -> Self {
@@ -79,18 +79,18 @@ impl FetchStage {
}
fn handle_forwarded_packets(
recvr: &PacketReceiver,
sendr: &PacketSender,
recvr: &PacketBatchReceiver,
sendr: &PacketBatchSender,
poh_recorder: &Arc<Mutex<PohRecorder>>,
) -> Result<()> {
let msgs = recvr.recv()?;
let mut len = msgs.packets.len();
let mut batch = vec![msgs];
while let Ok(more) = recvr.try_recv() {
len += more.packets.len();
batch.push(more);
let packet_batch = recvr.recv()?;
let mut num_packets = packet_batch.packets.len();
let mut packet_batches = vec![packet_batch];
while let Ok(packet_batch) = recvr.try_recv() {
num_packets += packet_batch.packets.len();
packet_batches.push(packet_batch);
// Read at most 1K transactions in a loop
if len > 1024 {
if num_packets > 1024 {
break;
}
}
@@ -100,15 +100,15 @@ impl FetchStage {
.unwrap()
.would_be_leader(HOLD_TRANSACTIONS_SLOT_OFFSET.saturating_mul(DEFAULT_TICKS_PER_SLOT))
{
inc_new_counter_debug!("fetch_stage-honor_forwards", len);
for packets in batch {
inc_new_counter_debug!("fetch_stage-honor_forwards", num_packets);
for packet_batch in packet_batches {
#[allow(clippy::question_mark)]
if sendr.send(packets).is_err() {
if sendr.send(packet_batch).is_err() {
return Err(Error::Send);
}
}
} else {
inc_new_counter_info!("fetch_stage-discard_forwards", len);
inc_new_counter_info!("fetch_stage-discard_forwards", num_packets);
}
Ok(())
@@ -119,12 +119,12 @@ impl FetchStage {
tpu_forwards_sockets: Vec<Arc<UdpSocket>>,
tpu_vote_sockets: Vec<Arc<UdpSocket>>,
exit: &Arc<AtomicBool>,
sender: &PacketSender,
vote_sender: &PacketSender,
sender: &PacketBatchSender,
vote_sender: &PacketBatchSender,
poh_recorder: &Arc<Mutex<PohRecorder>>,
coalesce_ms: u64,
) -> Self {
let recycler: PacketsRecycler = Recycler::warmed(1000, 1024);
let recycler: PacketBatchRecycler = Recycler::warmed(1000, 1024);
let tpu_threads = sockets.into_iter().map(|socket| {
streamer::receiver(

View File

@@ -164,12 +164,9 @@ impl LedgerCleanupService {
}
fn receive_new_roots(new_root_receiver: &Receiver<Slot>) -> Result<Slot, RecvTimeoutError> {
let mut root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
let root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
// Get the newest root
while let Ok(new_root) = new_root_receiver.try_recv() {
root = new_root;
}
Ok(root)
Ok(new_root_receiver.try_iter().last().unwrap_or(root))
}
pub fn cleanup_ledger(

View File

@@ -78,13 +78,12 @@ impl QosService {
pub fn compute_transaction_costs<'a>(
&self,
transactions: impl Iterator<Item = &'a SanitizedTransaction>,
demote_program_write_locks: bool,
) -> Vec<TransactionCost> {
let mut compute_cost_time = Measure::start("compute_cost_time");
let cost_model = self.cost_model.read().unwrap();
let txs_costs: Vec<_> = transactions
.map(|tx| {
let cost = cost_model.calculate_cost(tx, demote_program_write_locks);
let cost = cost_model.calculate_cost(tx);
debug!(
"transaction {:?}, cost {:?}, cost sum {}",
tx,
@@ -250,7 +249,7 @@ mod tests {
let cost_model = Arc::new(RwLock::new(CostModel::default()));
let qos_service = QosService::new(cost_model.clone());
let txs_costs = qos_service.compute_transaction_costs(txs.iter(), false);
let txs_costs = qos_service.compute_transaction_costs(txs.iter());
// verify the size of txs_costs and its contents
assert_eq!(txs_costs.len(), txs.len());
@@ -260,11 +259,7 @@ mod tests {
.map(|(index, cost)| {
assert_eq!(
cost.sum(),
cost_model
.read()
.unwrap()
.calculate_cost(&txs[index], false)
.sum()
cost_model.read().unwrap().calculate_cost(&txs[index]).sum()
);
})
.collect_vec();
@@ -295,14 +290,14 @@ mod tests {
let transfer_tx_cost = cost_model
.read()
.unwrap()
.calculate_cost(&transfer_tx, false)
.calculate_cost(&transfer_tx)
.sum();
// make a vec of txs
let txs = vec![transfer_tx.clone(), vote_tx.clone(), transfer_tx, vote_tx];
let qos_service = QosService::new(cost_model);
let txs_costs = qos_service.compute_transaction_costs(txs.iter(), false);
let txs_costs = qos_service.compute_transaction_costs(txs.iter());
// set cost tracker limit to fit 1 transfer tx, vote tx bypasses limit check
let cost_limit = transfer_tx_cost;
@@ -348,7 +343,7 @@ mod tests {
.name("test-producer-1".to_string())
.spawn(move || {
debug!("thread 1 starts with {} txs", txs_1.len());
let tx_costs = qos_service_1.compute_transaction_costs(txs_1.iter(), false);
let tx_costs = qos_service_1.compute_transaction_costs(txs_1.iter());
assert_eq!(txs_count, tx_costs.len());
debug!(
"thread 1 done, generated {} count, see service count as {}",
@@ -365,7 +360,7 @@ mod tests {
.name("test-producer-2".to_string())
.spawn(move || {
debug!("thread 2 starts with {} txs", txs_2.len());
let tx_costs = qos_service_2.compute_transaction_costs(txs_2.iter(), false);
let tx_costs = qos_service_2.compute_transaction_costs(txs_2.iter());
assert_eq!(txs_count, tx_costs.len());
debug!(
"thread 2 done, generated {} count, see service count as {}",

View File

@@ -57,7 +57,7 @@ pub fn get_unknown_last_index(
.entry(slot)
.or_insert_with(|| blockstore.meta(slot).unwrap());
if let Some(slot_meta) = slot_meta {
if slot_meta.known_last_index().is_none() {
if slot_meta.last_index.is_none() {
let shred_index = blockstore.get_index(slot).unwrap();
let num_processed_shreds = if let Some(shred_index) = shred_index {
shred_index.data().num_shreds() as u64
@@ -86,17 +86,17 @@ fn get_unrepaired_path(
) -> Vec<Slot> {
let mut path = Vec::new();
let mut slot = start_slot;
while !visited.contains(&slot) {
visited.insert(slot);
while visited.insert(slot) {
let slot_meta = slot_meta_cache
.entry(slot)
.or_insert_with(|| blockstore.meta(slot).unwrap());
if let Some(slot_meta) = slot_meta {
if slot_meta.is_full() {
break;
if !slot_meta.is_full() {
path.push(slot);
if let Some(parent_slot) = slot_meta.parent_slot {
slot = parent_slot
}
}
path.push(slot);
slot = slot_meta.parent_slot;
}
}
path.reverse();
@@ -123,7 +123,7 @@ pub fn get_closest_completion(
if slot_meta.is_full() {
continue;
}
if let Some(last_index) = slot_meta.known_last_index() {
if let Some(last_index) = slot_meta.last_index {
let shred_index = blockstore.get_index(slot).unwrap();
let dist = if let Some(shred_index) = shred_index {
let shred_count = shred_index.data().num_shreds() as u64;

View File

@@ -3033,8 +3033,10 @@ pub mod tests {
let optimistically_confirmed_bank =
OptimisticallyConfirmedBank::locked_from_bank_forks_root(bank_forks);
let exit = Arc::new(AtomicBool::new(false));
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
let rpc_subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
&exit,
max_complete_transaction_status_slot,
bank_forks.clone(),
Arc::new(RwLock::new(BlockCommitmentCache::default())),
optimistically_confirmed_bank,
@@ -3568,8 +3570,10 @@ pub mod tests {
&replay_vote_sender,
&VerifyRecyclers::default(),
);
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
let rpc_subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
&exit,
max_complete_transaction_status_slot,
bank_forks.clone(),
block_commitment_cache,
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
@@ -3636,8 +3640,10 @@ pub mod tests {
let exit = Arc::new(AtomicBool::new(false));
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
let rpc_subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
&exit,
max_complete_transaction_status_slot,
bank_forks.clone(),
block_commitment_cache.clone(),
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),

View File

@@ -24,10 +24,10 @@ use {
solana_ledger::{
blockstore::Blockstore,
leader_schedule_cache::LeaderScheduleCache,
shred::{Shred, ShredType},
shred::{Shred, ShredId},
},
solana_measure::measure::Measure,
solana_perf::packet::Packets,
solana_perf::packet::PacketBatch,
solana_rayon_threadlimit::get_thread_count,
solana_rpc::{max_slots::MaxSlots, rpc_subscriptions::RpcSubscriptions},
solana_runtime::{bank::Bank, bank_forks::BankForks},
@@ -145,13 +145,13 @@ impl RetransmitStats {
}
// Map of shred (slot, index, type) => list of hash values seen for that key.
type ShredFilter = LruCache<(Slot, u32, ShredType), Vec<u64>>;
type ShredFilter = LruCache<ShredId, Vec<u64>>;
type ShredFilterAndHasher = (ShredFilter, PacketHasher);
// Returns true if shred is already received and should skip retransmit.
fn should_skip_retransmit(shred: &Shred, shreds_received: &Mutex<ShredFilterAndHasher>) -> bool {
let key = (shred.slot(), shred.index(), shred.shred_type());
let key = shred.id();
let mut shreds_received = shreds_received.lock().unwrap();
let (cache, hasher) = shreds_received.deref_mut();
match cache.get_mut(&key) {
@@ -433,7 +433,7 @@ impl RetransmitStage {
cluster_info: Arc<ClusterInfo>,
retransmit_sockets: Arc<Vec<UdpSocket>>,
repair_socket: Arc<UdpSocket>,
verified_receiver: Receiver<Vec<Packets>>,
verified_receiver: Receiver<Vec<PacketBatch>>,
exit: Arc<AtomicBool>,
cluster_slots_update_receiver: ClusterSlotsUpdateReceiver,
epoch_schedule: EpochSchedule,
@@ -610,10 +610,10 @@ mod tests {
let shred = Shred::new_from_data(0, 0, 0, None, true, true, 0, 0x20, 0);
// it should send this over the sockets.
retransmit_sender.send(vec![shred]).unwrap();
let mut packets = Packets::new(vec![]);
solana_streamer::packet::recv_from(&mut packets, &me_retransmit, 1).unwrap();
assert_eq!(packets.packets.len(), 1);
assert!(!packets.packets[0].meta.repair);
let mut packet_batch = PacketBatch::new(vec![]);
solana_streamer::packet::recv_from(&mut packet_batch, &me_retransmit, 1).unwrap();
assert_eq!(packet_batch.packets.len(), 1);
assert!(!packet_batch.packets[0].meta.repair);
}
#[test]
@@ -639,19 +639,19 @@ mod tests {
assert!(should_skip_retransmit(&shred, &shreds_received));
assert!(should_skip_retransmit(&shred, &shreds_received));
let shred = Shred::new_empty_coding(slot, index, 0, 1, 1, version);
let shred = Shred::new_empty_coding(slot, index, 0, 1, 1, 0, version);
// Coding at (1, 5) passes
assert!(!should_skip_retransmit(&shred, &shreds_received));
// then blocked
assert!(should_skip_retransmit(&shred, &shreds_received));
let shred = Shred::new_empty_coding(slot, index, 2, 1, 1, version);
let shred = Shred::new_empty_coding(slot, index, 2, 1, 1, 0, version);
// 2nd unique coding at (1, 5) passes
assert!(!should_skip_retransmit(&shred, &shreds_received));
// same again is blocked
assert!(should_skip_retransmit(&shred, &shreds_received));
let shred = Shred::new_empty_coding(slot, index, 3, 1, 1, version);
let shred = Shred::new_empty_coding(slot, index, 3, 1, 1, 0, version);
// Another unique coding at (1, 5) always blocked
assert!(should_skip_retransmit(&shred, &shreds_received));
assert!(should_skip_retransmit(&shred, &shreds_received));

View File

@@ -25,11 +25,11 @@ use {
},
solana_measure::measure::Measure,
solana_metrics::inc_new_counter_debug,
solana_perf::packet::{limited_deserialize, Packets, PacketsRecycler},
solana_perf::packet::{limited_deserialize, PacketBatch, PacketBatchRecycler},
solana_sdk::{
clock::Slot, hash::Hash, packet::PACKET_DATA_SIZE, pubkey::Pubkey, timing::duration_as_ms,
},
solana_streamer::streamer::{PacketReceiver, PacketSender},
solana_streamer::streamer::{PacketBatchReceiver, PacketBatchSender},
std::{
collections::HashSet,
net::SocketAddr,
@@ -229,12 +229,12 @@ impl ServeRepair {
fn handle_repair(
me: &Arc<RwLock<Self>>,
recycler: &PacketsRecycler,
recycler: &PacketBatchRecycler,
from_addr: &SocketAddr,
blockstore: Option<&Arc<Blockstore>>,
request: RepairProtocol,
stats: &mut ServeRepairStats,
) -> Option<Packets> {
) -> Option<PacketBatch> {
let now = Instant::now();
let my_id = me.read().unwrap().my_id();
@@ -317,10 +317,10 @@ impl ServeRepair {
/// Process messages from the network
fn run_listen(
obj: &Arc<RwLock<Self>>,
recycler: &PacketsRecycler,
recycler: &PacketBatchRecycler,
blockstore: Option<&Arc<Blockstore>>,
requests_receiver: &PacketReceiver,
response_sender: &PacketSender,
requests_receiver: &PacketBatchReceiver,
response_sender: &PacketBatchSender,
stats: &mut ServeRepairStats,
max_packets: &mut usize,
) -> Result<()> {
@@ -392,12 +392,12 @@ impl ServeRepair {
pub fn listen(
me: Arc<RwLock<Self>>,
blockstore: Option<Arc<Blockstore>>,
requests_receiver: PacketReceiver,
response_sender: PacketSender,
requests_receiver: PacketBatchReceiver,
response_sender: PacketBatchSender,
exit: &Arc<AtomicBool>,
) -> JoinHandle<()> {
let exit = exit.clone();
let recycler = PacketsRecycler::default();
let recycler = PacketBatchRecycler::default();
Builder::new()
.name("solana-repair-listen".to_string())
.spawn(move || {
@@ -432,14 +432,14 @@ impl ServeRepair {
fn handle_packets(
me: &Arc<RwLock<Self>>,
recycler: &PacketsRecycler,
recycler: &PacketBatchRecycler,
blockstore: Option<&Arc<Blockstore>>,
packets: Packets,
response_sender: &PacketSender,
packet_batch: PacketBatch,
response_sender: &PacketBatchSender,
stats: &mut ServeRepairStats,
) {
// iter over the packets
packets.packets.iter().for_each(|packet| {
packet_batch.packets.iter().for_each(|packet| {
let from_addr = packet.meta.addr();
limited_deserialize(&packet.data[..packet.meta.size])
.into_iter()
@@ -609,7 +609,7 @@ impl ServeRepair {
}
fn run_window_request(
recycler: &PacketsRecycler,
recycler: &PacketBatchRecycler,
from: &ContactInfo,
from_addr: &SocketAddr,
blockstore: Option<&Arc<Blockstore>>,
@@ -617,7 +617,7 @@ impl ServeRepair {
slot: Slot,
shred_index: u64,
nonce: Nonce,
) -> Option<Packets> {
) -> Option<PacketBatch> {
if let Some(blockstore) = blockstore {
// Try to find the requested index in one of the slots
let packet = repair_response::repair_response_packet(
@@ -630,7 +630,7 @@ impl ServeRepair {
if let Some(packet) = packet {
inc_new_counter_debug!("serve_repair-window-request-ledger", 1);
return Some(Packets::new_unpinned_with_recycler_data(
return Some(PacketBatch::new_unpinned_with_recycler_data(
recycler,
"run_window_request",
vec![packet],
@@ -651,13 +651,13 @@ impl ServeRepair {
}
fn run_highest_window_request(
recycler: &PacketsRecycler,
recycler: &PacketBatchRecycler,
from_addr: &SocketAddr,
blockstore: Option<&Arc<Blockstore>>,
slot: Slot,
highest_index: u64,
nonce: Nonce,
) -> Option<Packets> {
) -> Option<PacketBatch> {
let blockstore = blockstore?;
// Try to find the requested index in one of the slots
let meta = blockstore.meta(slot).ok()??;
@@ -670,7 +670,7 @@ impl ServeRepair {
from_addr,
nonce,
)?;
return Some(Packets::new_unpinned_with_recycler_data(
return Some(PacketBatch::new_unpinned_with_recycler_data(
recycler,
"run_highest_window_request",
vec![packet],
@@ -680,14 +680,14 @@ impl ServeRepair {
}
fn run_orphan(
recycler: &PacketsRecycler,
recycler: &PacketBatchRecycler,
from_addr: &SocketAddr,
blockstore: Option<&Arc<Blockstore>>,
mut slot: Slot,
max_responses: usize,
nonce: Nonce,
) -> Option<Packets> {
let mut res = Packets::new_unpinned_with_recycler(recycler.clone(), 64, "run_orphan");
) -> Option<PacketBatch> {
let mut res = PacketBatch::new_unpinned_with_recycler(recycler.clone(), 64, "run_orphan");
if let Some(blockstore) = blockstore {
// Try to find the next "n" parent slots of the input slot
while let Ok(Some(meta)) = blockstore.meta(slot) {
@@ -706,8 +706,8 @@ impl ServeRepair {
} else {
break;
}
if meta.is_parent_set() && res.packets.len() <= max_responses {
slot = meta.parent_slot;
if meta.parent_slot.is_some() && res.packets.len() <= max_responses {
slot = meta.parent_slot.unwrap();
} else {
break;
}
@@ -720,12 +720,12 @@ impl ServeRepair {
}
fn run_ancestor_hashes(
recycler: &PacketsRecycler,
recycler: &PacketBatchRecycler,
from_addr: &SocketAddr,
blockstore: Option<&Arc<Blockstore>>,
slot: Slot,
nonce: Nonce,
) -> Option<Packets> {
) -> Option<PacketBatch> {
let blockstore = blockstore?;
let ancestor_slot_hashes = if blockstore.is_duplicate_confirmed(slot) {
let ancestor_iterator =
@@ -746,7 +746,7 @@ impl ServeRepair {
from_addr,
nonce,
)?;
Some(Packets::new_unpinned_with_recycler_data(
Some(PacketBatch::new_unpinned_with_recycler_data(
recycler,
"run_ancestor_hashes",
vec![packet],
@@ -778,7 +778,7 @@ mod tests {
/// test run_window_request responds with the right shred, and do not overrun
fn run_highest_window_request(slot: Slot, num_slots: u64, nonce: Nonce) {
let recycler = PacketsRecycler::default();
let recycler = PacketBatchRecycler::default();
solana_logger::setup();
let ledger_path = get_tmp_ledger_path!();
{
@@ -848,7 +848,7 @@ mod tests {
/// test window requests respond with the right shred, and do not overrun
fn run_window_request(slot: Slot, nonce: Nonce) {
let recycler = PacketsRecycler::default();
let recycler = PacketBatchRecycler::default();
solana_logger::setup();
let ledger_path = get_tmp_ledger_path!();
{
@@ -1017,7 +1017,7 @@ mod tests {
fn run_orphan(slot: Slot, num_slots: u64, nonce: Nonce) {
solana_logger::setup();
let recycler = PacketsRecycler::default();
let recycler = PacketBatchRecycler::default();
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
@@ -1091,7 +1091,7 @@ mod tests {
#[test]
fn run_orphan_corrupted_shred_size() {
solana_logger::setup();
let recycler = PacketsRecycler::default();
let recycler = PacketBatchRecycler::default();
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
@@ -1152,7 +1152,7 @@ mod tests {
#[test]
fn test_run_ancestor_hashes() {
solana_logger::setup();
let recycler = PacketsRecycler::default();
let recycler = PacketBatchRecycler::default();
let ledger_path = get_tmp_ledger_path!();
{
let slot = 0;

View File

@@ -6,12 +6,12 @@ use {
solana_ledger::shred::{get_shred_slot_index_type, ShredFetchStats},
solana_perf::{
cuda_runtime::PinnedVec,
packet::{Packet, PacketsRecycler},
packet::{Packet, PacketBatchRecycler},
recycler::Recycler,
},
solana_runtime::bank_forks::BankForks,
solana_sdk::clock::{Slot, DEFAULT_MS_PER_SLOT},
solana_streamer::streamer::{self, PacketReceiver, PacketSender},
solana_streamer::streamer::{self, PacketBatchReceiver, PacketBatchSender},
std::{
net::UdpSocket,
sync::{atomic::AtomicBool, mpsc::channel, Arc, RwLock},
@@ -63,8 +63,8 @@ impl ShredFetchStage {
// updates packets received on a channel and sends them on another channel
fn modify_packets<F>(
recvr: PacketReceiver,
sendr: PacketSender,
recvr: PacketBatchReceiver,
sendr: PacketBatchSender,
bank_forks: Option<Arc<RwLock<BankForks>>>,
name: &'static str,
modify: F,
@@ -83,7 +83,7 @@ impl ShredFetchStage {
let mut stats = ShredFetchStats::default();
let mut packet_hasher = PacketHasher::default();
while let Some(mut p) = recvr.iter().next() {
while let Some(mut packet_batch) = recvr.iter().next() {
if last_updated.elapsed().as_millis() as u64 > DEFAULT_MS_PER_SLOT {
last_updated = Instant::now();
packet_hasher.reset();
@@ -97,8 +97,8 @@ impl ShredFetchStage {
slots_per_epoch = root_bank.get_slots_in_epoch(root_bank.epoch());
}
}
stats.shred_count += p.packets.len();
p.packets.iter_mut().for_each(|packet| {
stats.shred_count += packet_batch.packets.len();
packet_batch.packets.iter_mut().for_each(|packet| {
Self::process_packet(
packet,
&mut shreds_received,
@@ -124,7 +124,7 @@ impl ShredFetchStage {
stats = ShredFetchStats::default();
last_stats = Instant::now();
}
if sendr.send(p).is_err() {
if sendr.send(packet_batch).is_err() {
break;
}
}
@@ -133,7 +133,7 @@ impl ShredFetchStage {
fn packet_modifier<F>(
sockets: Vec<Arc<UdpSocket>>,
exit: &Arc<AtomicBool>,
sender: PacketSender,
sender: PacketBatchSender,
recycler: Recycler<PinnedVec<Packet>>,
bank_forks: Option<Arc<RwLock<BankForks>>>,
name: &'static str,
@@ -169,11 +169,11 @@ impl ShredFetchStage {
sockets: Vec<Arc<UdpSocket>>,
forward_sockets: Vec<Arc<UdpSocket>>,
repair_socket: Arc<UdpSocket>,
sender: &PacketSender,
sender: &PacketBatchSender,
bank_forks: Option<Arc<RwLock<BankForks>>>,
exit: &Arc<AtomicBool>,
) -> Self {
let recycler: PacketsRecycler = Recycler::warmed(100, 1024);
let recycler: PacketBatchRecycler = Recycler::warmed(100, 1024);
let (mut tvu_threads, tvu_filter) = Self::packet_modifier(
sockets,

View File

@@ -5,11 +5,11 @@
//!
pub use solana_perf::sigverify::{
batch_size, ed25519_verify_cpu, ed25519_verify_disabled, init, TxOffset,
count_packets_in_batches, ed25519_verify_cpu, ed25519_verify_disabled, init, TxOffset,
};
use {
crate::sigverify_stage::SigVerifier,
solana_perf::{cuda_runtime::PinnedVec, packet::Packets, recycler::Recycler, sigverify},
solana_perf::{cuda_runtime::PinnedVec, packet::PacketBatch, recycler::Recycler, sigverify},
};
#[derive(Clone)]
@@ -40,13 +40,13 @@ impl Default for TransactionSigVerifier {
}
impl SigVerifier for TransactionSigVerifier {
fn verify_batch(&self, mut batch: Vec<Packets>) -> Vec<Packets> {
fn verify_batches(&self, mut batches: Vec<PacketBatch>) -> Vec<PacketBatch> {
sigverify::ed25519_verify(
&mut batch,
&mut batches,
&self.recycler,
&self.recycler_out,
self.reject_non_vote,
);
batch
batches
}
}

View File

@@ -5,7 +5,7 @@ use {
leader_schedule_cache::LeaderScheduleCache, shred::Shred,
sigverify_shreds::verify_shreds_gpu,
},
solana_perf::{self, packet::Packets, recycler_cache::RecyclerCache},
solana_perf::{self, packet::PacketBatch, recycler_cache::RecyclerCache},
solana_runtime::bank_forks::BankForks,
std::{
collections::{HashMap, HashSet},
@@ -32,7 +32,7 @@ impl ShredSigVerifier {
recycler_cache: RecyclerCache::warmed(),
}
}
fn read_slots(batches: &[Packets]) -> HashSet<u64> {
fn read_slots(batches: &[PacketBatch]) -> HashSet<u64> {
batches
.iter()
.flat_map(|batch| batch.packets.iter().filter_map(Shred::get_slot_from_packet))
@@ -41,7 +41,7 @@ impl ShredSigVerifier {
}
impl SigVerifier for ShredSigVerifier {
fn verify_batch(&self, mut batches: Vec<Packets>) -> Vec<Packets> {
fn verify_batches(&self, mut batches: Vec<PacketBatch>) -> Vec<PacketBatch> {
let r_bank = self.bank_forks.read().unwrap().working_bank();
let slots: HashSet<u64> = Self::read_slots(&batches);
let mut leader_slots: HashMap<u64, [u8; 32]> = slots
@@ -88,13 +88,13 @@ pub mod tests {
0,
0xc0de,
);
let mut batch = [Packets::default(), Packets::default()];
let mut batches = [PacketBatch::default(), PacketBatch::default()];
let keypair = Keypair::new();
Shredder::sign_shred(&keypair, &mut shred);
batch[0].packets.resize(1, Packet::default());
batch[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload);
batch[0].packets[0].meta.size = shred.payload.len();
batches[0].packets.resize(1, Packet::default());
batches[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload);
batches[0].packets[0].meta.size = shred.payload.len();
let mut shred = Shred::new_from_data(
0xc0de_dead,
@@ -108,16 +108,16 @@ pub mod tests {
0xc0de,
);
Shredder::sign_shred(&keypair, &mut shred);
batch[1].packets.resize(1, Packet::default());
batch[1].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload);
batch[1].packets[0].meta.size = shred.payload.len();
batches[1].packets.resize(1, Packet::default());
batches[1].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload);
batches[1].packets[0].meta.size = shred.payload.len();
let expected: HashSet<u64> = [0xc0de_dead, 0xdead_c0de].iter().cloned().collect();
assert_eq!(ShredSigVerifier::read_slots(&batch), expected);
assert_eq!(ShredSigVerifier::read_slots(&batches), expected);
}
#[test]
fn test_sigverify_shreds_verify_batch() {
fn test_sigverify_shreds_verify_batches() {
let leader_keypair = Arc::new(Keypair::new());
let leader_pubkey = leader_keypair.pubkey();
let bank = Bank::new_for_tests(
@@ -127,8 +127,8 @@ pub mod tests {
let bf = Arc::new(RwLock::new(BankForks::new(bank)));
let verifier = ShredSigVerifier::new(bf, cache);
let mut batch = vec![Packets::default()];
batch[0].packets.resize(2, Packet::default());
let mut batches = vec![PacketBatch::default()];
batches[0].packets.resize(2, Packet::default());
let mut shred = Shred::new_from_data(
0,
@@ -142,8 +142,8 @@ pub mod tests {
0xc0de,
);
Shredder::sign_shred(&leader_keypair, &mut shred);
batch[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload);
batch[0].packets[0].meta.size = shred.payload.len();
batches[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload);
batches[0].packets[0].meta.size = shred.payload.len();
let mut shred = Shred::new_from_data(
0,
@@ -158,10 +158,10 @@ pub mod tests {
);
let wrong_keypair = Keypair::new();
Shredder::sign_shred(&wrong_keypair, &mut shred);
batch[0].packets[1].data[0..shred.payload.len()].copy_from_slice(&shred.payload);
batch[0].packets[1].meta.size = shred.payload.len();
batches[0].packets[1].data[0..shred.payload.len()].copy_from_slice(&shred.payload);
batches[0].packets[1].meta.size = shred.payload.len();
let rv = verifier.verify_batch(batch);
let rv = verifier.verify_batches(batches);
assert!(!rv[0].packets[0].meta.discard);
assert!(rv[0].packets[1].meta.discard);
}

View File

@@ -9,9 +9,9 @@ use {
crate::sigverify,
crossbeam_channel::{SendError, Sender as CrossbeamSender},
solana_measure::measure::Measure,
solana_perf::packet::Packets,
solana_perf::packet::PacketBatch,
solana_sdk::timing,
solana_streamer::streamer::{self, PacketReceiver, StreamerError},
solana_streamer::streamer::{self, PacketBatchReceiver, StreamerError},
std::{
collections::HashMap,
sync::mpsc::{Receiver, RecvTimeoutError},
@@ -26,7 +26,7 @@ const MAX_SIGVERIFY_BATCH: usize = 10_000;
#[derive(Error, Debug)]
pub enum SigVerifyServiceError {
#[error("send packets batch error")]
Send(#[from] SendError<Vec<Packets>>),
Send(#[from] SendError<Vec<PacketBatch>>),
#[error("streamer error")]
Streamer(#[from] StreamerError),
@@ -39,7 +39,7 @@ pub struct SigVerifyStage {
}
pub trait SigVerifier {
fn verify_batch(&self, batch: Vec<Packets>) -> Vec<Packets>;
fn verify_batches(&self, batches: Vec<PacketBatch>) -> Vec<PacketBatch>;
}
#[derive(Default, Clone)]
@@ -49,7 +49,7 @@ pub struct DisabledSigVerifier {}
struct SigVerifierStats {
recv_batches_us_hist: histogram::Histogram, // time to call recv_batch
verify_batches_pp_us_hist: histogram::Histogram, // per-packet time to call verify_batch
batches_hist: histogram::Histogram, // number of Packets structures per verify call
batches_hist: histogram::Histogram, // number of packet batches per verify call
packets_hist: histogram::Histogram, // number of packets per verify call
total_batches: usize,
total_packets: usize,
@@ -122,24 +122,24 @@ impl SigVerifierStats {
}
impl SigVerifier for DisabledSigVerifier {
fn verify_batch(&self, mut batch: Vec<Packets>) -> Vec<Packets> {
sigverify::ed25519_verify_disabled(&mut batch);
batch
fn verify_batches(&self, mut batches: Vec<PacketBatch>) -> Vec<PacketBatch> {
sigverify::ed25519_verify_disabled(&mut batches);
batches
}
}
impl SigVerifyStage {
#[allow(clippy::new_ret_no_self)]
pub fn new<T: SigVerifier + 'static + Send + Clone>(
packet_receiver: Receiver<Packets>,
verified_sender: CrossbeamSender<Vec<Packets>>,
packet_receiver: Receiver<PacketBatch>,
verified_sender: CrossbeamSender<Vec<PacketBatch>>,
verifier: T,
) -> Self {
let thread_hdl = Self::verifier_services(packet_receiver, verified_sender, verifier);
Self { thread_hdl }
}
pub fn discard_excess_packets(batches: &mut Vec<Packets>, max_packets: usize) {
pub fn discard_excess_packets(batches: &mut Vec<PacketBatch>, max_packets: usize) {
let mut received_ips = HashMap::new();
for (batch_index, batch) in batches.iter().enumerate() {
for (packet_index, packets) in batch.packets.iter().enumerate() {
@@ -169,12 +169,12 @@ impl SigVerifyStage {
}
fn verifier<T: SigVerifier>(
recvr: &PacketReceiver,
sendr: &CrossbeamSender<Vec<Packets>>,
recvr: &PacketBatchReceiver,
sendr: &CrossbeamSender<Vec<PacketBatch>>,
verifier: &T,
stats: &mut SigVerifierStats,
) -> Result<()> {
let (mut batches, num_packets, recv_duration) = streamer::recv_batch(recvr)?;
let (mut batches, num_packets, recv_duration) = streamer::recv_packet_batches(recvr)?;
let batches_len = batches.len();
debug!(
@@ -187,7 +187,7 @@ impl SigVerifyStage {
}
let mut verify_batch_time = Measure::start("sigverify_batch_time");
sendr.send(verifier.verify_batch(batches))?;
sendr.send(verifier.verify_batches(batches))?;
verify_batch_time.stop();
debug!(
@@ -216,8 +216,8 @@ impl SigVerifyStage {
}
fn verifier_service<T: SigVerifier + 'static + Send + Clone>(
packet_receiver: PacketReceiver,
verified_sender: CrossbeamSender<Vec<Packets>>,
packet_receiver: PacketBatchReceiver,
verified_sender: CrossbeamSender<Vec<PacketBatch>>,
verifier: &T,
) -> JoinHandle<()> {
let verifier = verifier.clone();
@@ -252,8 +252,8 @@ impl SigVerifyStage {
}
fn verifier_services<T: SigVerifier + 'static + Send + Clone>(
packet_receiver: PacketReceiver,
verified_sender: CrossbeamSender<Vec<Packets>>,
packet_receiver: PacketBatchReceiver,
verified_sender: CrossbeamSender<Vec<PacketBatch>>,
verifier: T,
) -> JoinHandle<()> {
Self::verifier_service(packet_receiver, verified_sender, &verifier)
@@ -268,11 +268,12 @@ impl SigVerifyStage {
mod tests {
use {super::*, solana_perf::packet::Packet};
fn count_non_discard(packets: &[Packets]) -> usize {
packets
fn count_non_discard(packet_batches: &[PacketBatch]) -> usize {
packet_batches
.iter()
.map(|pp| {
pp.packets
.map(|batch| {
batch
.packets
.iter()
.map(|p| if p.meta.discard { 0 } else { 1 })
.sum::<usize>()
@@ -283,14 +284,14 @@ mod tests {
#[test]
fn test_packet_discard() {
solana_logger::setup();
let mut p = Packets::default();
p.packets.resize(10, Packet::default());
p.packets[3].meta.addr = [1u16; 8];
let mut packets = vec![p];
let mut batch = PacketBatch::default();
batch.packets.resize(10, Packet::default());
batch.packets[3].meta.addr = [1u16; 8];
let mut batches = vec![batch];
let max = 3;
SigVerifyStage::discard_excess_packets(&mut packets, max);
assert_eq!(count_non_discard(&packets), max);
assert!(!packets[0].packets[0].meta.discard);
assert!(!packets[0].packets[3].meta.discard);
SigVerifyStage::discard_excess_packets(&mut batches, max);
assert_eq!(count_non_discard(&batches), max);
assert!(!batches[0].packets[0].meta.discard);
assert!(!batches[0].packets[3].meta.discard);
}
}

View File

@@ -15,7 +15,7 @@ use {
};
const MS_PER_S: u64 = 1_000;
const SAMPLE_INTERVAL_UDP_MS: u64 = 60 * MS_PER_S;
const SAMPLE_INTERVAL_UDP_MS: u64 = 2 * MS_PER_S;
const SAMPLE_INTERVAL_MEM_MS: u64 = MS_PER_S;
const SLEEP_INTERVAL: Duration = Duration::from_millis(500);
@@ -130,7 +130,7 @@ impl SystemMonitorService {
#[cfg(target_os = "linux")]
fn report_udp_stats(old_stats: &UdpStats, new_stats: &UdpStats) {
datapoint_info!(
"net-stats",
"net-stats-validator",
(
"in_datagrams_delta",
new_stats.in_datagrams - old_stats.in_datagrams,

View File

@@ -399,6 +399,7 @@ pub mod tests {
solana_runtime::bank::Bank,
solana_sdk::signature::{Keypair, Signer},
solana_streamer::socket::SocketAddrSpace,
std::sync::atomic::AtomicU64,
std::sync::atomic::Ordering,
};
@@ -448,6 +449,7 @@ pub mod tests {
let bank_forks = Arc::new(RwLock::new(bank_forks));
let tower = Tower::default();
let accounts_package_channel = channel();
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
let tvu = Tvu::new(
&vote_keypair.pubkey(),
Arc::new(RwLock::new(vec![Arc::new(vote_keypair)])),
@@ -465,6 +467,7 @@ pub mod tests {
ledger_signal_receiver,
&Arc::new(RpcSubscriptions::new_for_tests(
&exit,
max_complete_transaction_status_slot,
bank_forks.clone(),
block_commitment_cache.clone(),
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),

View File

@@ -538,6 +538,8 @@ impl Validator {
let rpc_subscriptions = Arc::new(RpcSubscriptions::new_with_config(
&exit,
max_complete_transaction_status_slot.clone(),
blockstore.clone(),
bank_forks.clone(),
block_commitment_cache.clone(),
optimistically_confirmed_bank.clone(),

View File

@@ -1,7 +1,6 @@
use {
crate::{cluster_info_vote_listener::VerifiedLabelVotePacketsReceiver, result::Result},
crossbeam_channel::Select,
solana_perf::packet::Packets,
solana_perf::packet::PacketBatch,
solana_runtime::bank::Bank,
solana_sdk::{
account::from_account, clock::Slot, hash::Hash, pubkey::Pubkey, signature::Signature,
@@ -20,7 +19,7 @@ const MAX_VOTES_PER_VALIDATOR: usize = 1000;
pub struct VerifiedVoteMetadata {
pub vote_account_key: Pubkey,
pub vote: Vote,
pub packet: Packets,
pub packet_batch: PacketBatch,
pub signature: Signature,
}
@@ -70,7 +69,7 @@ impl<'a> ValidatorGossipVotesIterator<'a> {
///
/// Iterator is done after iterating through all vote accounts
impl<'a> Iterator for ValidatorGossipVotesIterator<'a> {
type Item = Vec<Packets>;
type Item = Vec<PacketBatch>;
fn next(&mut self) -> Option<Self::Item> {
// TODO: Maybe prioritize by stake weight
@@ -116,7 +115,7 @@ impl<'a> Iterator for ValidatorGossipVotesIterator<'a> {
None
}
})
.collect::<Vec<Packets>>()
.collect::<Vec<PacketBatch>>()
})
})
});
@@ -130,7 +129,7 @@ impl<'a> Iterator for ValidatorGossipVotesIterator<'a> {
}
}
pub type SingleValidatorVotes = BTreeMap<(Slot, Hash), (Packets, Signature)>;
pub type SingleValidatorVotes = BTreeMap<(Slot, Hash), (PacketBatch, Signature)>;
#[derive(Default)]
pub struct VerifiedVotePackets(HashMap<Pubkey, SingleValidatorVotes>);
@@ -141,16 +140,16 @@ impl VerifiedVotePackets {
vote_packets_receiver: &VerifiedLabelVotePacketsReceiver,
would_be_leader: bool,
) -> Result<()> {
let mut sel = Select::new();
sel.recv(vote_packets_receiver);
let _ = sel.ready_timeout(Duration::from_millis(200))?;
for gossip_votes in vote_packets_receiver.try_iter() {
const RECV_TIMEOUT: Duration = Duration::from_millis(200);
let vote_packets = vote_packets_receiver.recv_timeout(RECV_TIMEOUT)?;
let vote_packets = std::iter::once(vote_packets).chain(vote_packets_receiver.try_iter());
for gossip_votes in vote_packets {
if would_be_leader {
for verfied_vote_metadata in gossip_votes {
let VerifiedVoteMetadata {
vote_account_key,
vote,
packet,
packet_batch,
signature,
} = verfied_vote_metadata;
if vote.slots.is_empty() {
@@ -161,7 +160,7 @@ impl VerifiedVotePackets {
let hash = vote.hash;
let validator_votes = self.0.entry(vote_account_key).or_default();
validator_votes.insert((*slot, hash), (packet, signature));
validator_votes.insert((*slot, hash), (packet_batch, signature));
if validator_votes.len() > MAX_VOTES_PER_VALIDATOR {
let smallest_key = validator_votes.keys().next().cloned().unwrap();
@@ -199,7 +198,7 @@ mod tests {
s.send(vec![VerifiedVoteMetadata {
vote_account_key,
vote: vote.clone(),
packet: Packets::default(),
packet_batch: PacketBatch::default(),
signature: Signature::new(&[1u8; 64]),
}])
.unwrap();
@@ -219,7 +218,7 @@ mod tests {
s.send(vec![VerifiedVoteMetadata {
vote_account_key,
vote,
packet: Packets::default(),
packet_batch: PacketBatch::default(),
signature: Signature::new(&[1u8; 64]),
}])
.unwrap();
@@ -241,7 +240,7 @@ mod tests {
s.send(vec![VerifiedVoteMetadata {
vote_account_key,
vote,
packet: Packets::default(),
packet_batch: PacketBatch::default(),
signature: Signature::new(&[1u8; 64]),
}])
.unwrap();
@@ -264,7 +263,7 @@ mod tests {
s.send(vec![VerifiedVoteMetadata {
vote_account_key,
vote,
packet: Packets::default(),
packet_batch: PacketBatch::default(),
signature: Signature::new(&[2u8; 64]),
}])
.unwrap();
@@ -283,7 +282,7 @@ mod tests {
// No new messages, should time out
assert_matches!(
verified_vote_packets.receive_and_process_vote_packets(&r, true),
Err(Error::ReadyTimeout)
Err(Error::CrossbeamRecvTimeout(_))
);
}
@@ -303,7 +302,7 @@ mod tests {
s.send(vec![VerifiedVoteMetadata {
vote_account_key,
vote,
packet: Packets::default(),
packet_batch: PacketBatch::default(),
signature: Signature::new(&[1u8; 64]),
}])
.unwrap();
@@ -340,7 +339,7 @@ mod tests {
s.send(vec![VerifiedVoteMetadata {
vote_account_key,
vote,
packet: Packets::default(),
packet_batch: PacketBatch::default(),
signature: Signature::new_unique(),
}])
.unwrap();
@@ -394,7 +393,7 @@ mod tests {
s.send(vec![VerifiedVoteMetadata {
vote_account_key,
vote,
packet: Packets::new(vec![Packet::default(); num_packets]),
packet_batch: PacketBatch::new(vec![Packet::default(); num_packets]),
signature: Signature::new_unique(),
}])
.unwrap();
@@ -427,12 +426,12 @@ mod tests {
// Get and verify batches
let num_expected_batches = 2;
for _ in 0..num_expected_batches {
let validator_batch: Vec<Packets> = gossip_votes_iterator.next().unwrap();
let validator_batch: Vec<PacketBatch> = gossip_votes_iterator.next().unwrap();
assert_eq!(validator_batch.len(), slot_hashes.slot_hashes().len());
let expected_len = validator_batch[0].packets.len();
assert!(validator_batch
.iter()
.all(|p| p.packets.len() == expected_len));
.all(|batch| batch.packets.len() == expected_len));
}
// Should be empty now
@@ -461,7 +460,7 @@ mod tests {
s.send(vec![VerifiedVoteMetadata {
vote_account_key,
vote,
packet: Packets::default(),
packet_batch: PacketBatch::default(),
signature: Signature::new_unique(),
}])
.unwrap();

View File

@@ -22,7 +22,7 @@ use {
},
solana_measure::measure::Measure,
solana_metrics::{inc_new_counter_debug, inc_new_counter_error},
solana_perf::packet::{Packet, Packets},
solana_perf::packet::{Packet, PacketBatch},
solana_rayon_threadlimit::get_thread_count,
solana_runtime::{bank::Bank, bank_forks::BankForks},
solana_sdk::{clock::Slot, packet::PACKET_DATA_SIZE, pubkey::Pubkey},
@@ -164,8 +164,8 @@ fn verify_shred_slot(shred: &Shred, root: u64) -> bool {
match shred.shred_type() {
// Only data shreds have parent information
ShredType::Data => match shred.parent() {
Some(parent) => blockstore::verify_shred_slots(shred.slot(), parent, root),
None => false,
Ok(parent) => blockstore::verify_shred_slots(shred.slot(), parent, root),
Err(_) => false,
},
// Filter out outdated coding shreds
ShredType::Code => shred.slot() >= root,
@@ -217,12 +217,9 @@ fn run_check_duplicate(
let check_duplicate = |shred: Shred| -> Result<()> {
let shred_slot = shred.slot();
if !blockstore.has_duplicate_shreds_in_slot(shred_slot) {
if let Some(existing_shred_payload) = blockstore.is_shred_duplicate(
shred_slot,
shred.index(),
shred.payload.clone(),
shred.shred_type(),
) {
if let Some(existing_shred_payload) =
blockstore.is_shred_duplicate(shred.id(), shred.payload.clone())
{
cluster_info.push_duplicate_shred(&shred, &existing_shred_payload)?;
blockstore.store_duplicate_slot(
shred_slot,
@@ -236,14 +233,10 @@ fn run_check_duplicate(
Ok(())
};
let timer = Duration::from_millis(200);
let shred = shred_receiver.recv_timeout(timer)?;
check_duplicate(shred)?;
while let Ok(shred) = shred_receiver.try_recv() {
check_duplicate(shred)?;
}
Ok(())
const RECV_TIMEOUT: Duration = Duration::from_millis(200);
std::iter::once(shred_receiver.recv_timeout(RECV_TIMEOUT)?)
.chain(shred_receiver.try_iter())
.try_for_each(check_duplicate)
}
fn verify_repair(
@@ -353,7 +346,7 @@ fn recv_window<F>(
blockstore: &Blockstore,
bank_forks: &RwLock<BankForks>,
insert_shred_sender: &CrossbeamSender<(Vec<Shred>, Vec<Option<RepairMeta>>)>,
verified_receiver: &CrossbeamReceiver<Vec<Packets>>,
verified_receiver: &CrossbeamReceiver<Vec<PacketBatch>>,
retransmit_sender: &Sender<Vec<Shred>>,
shred_filter: F,
thread_pool: &ThreadPool,
@@ -458,7 +451,7 @@ impl WindowService {
#[allow(clippy::too_many_arguments)]
pub(crate) fn new<F>(
blockstore: Arc<Blockstore>,
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
verified_receiver: CrossbeamReceiver<Vec<PacketBatch>>,
retransmit_sender: Sender<Vec<Shred>>,
repair_socket: Arc<UdpSocket>,
exit: Arc<AtomicBool>,
@@ -629,7 +622,7 @@ impl WindowService {
exit: Arc<AtomicBool>,
blockstore: Arc<Blockstore>,
insert_sender: CrossbeamSender<(Vec<Shred>, Vec<Option<RepairMeta>>)>,
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
verified_receiver: CrossbeamReceiver<Vec<PacketBatch>>,
shred_filter: F,
bank_forks: Arc<RwLock<BankForks>>,
retransmit_sender: Sender<Vec<Shred>>,
@@ -878,7 +871,15 @@ mod test {
));
// coding shreds don't contain parent slot information, test that slot >= root
let (common, coding) = Shredder::new_coding_shred_header(5, 5, 5, 6, 6, 0);
let (common, coding) = Shredder::new_coding_shred_header(
5, // slot
5, // index
5, // fec_set_index
6, // num_data_shreds
6, // num_coding_shreds
3, // position
0, // version
);
let mut coding_shred =
Shred::new_empty_from_header(common, DataShredHeader::default(), coding);
Shredder::sign_shred(&leader_keypair, &mut coding_shred);
@@ -954,7 +955,15 @@ mod test {
std::net::{IpAddr, Ipv4Addr},
};
solana_logger::setup();
let (common, coding) = Shredder::new_coding_shred_header(5, 5, 5, 6, 6, 0);
let (common, coding) = Shredder::new_coding_shred_header(
5, // slot
5, // index
5, // fec_set_index
6, // num_data_shreds
6, // num_coding_shreds
4, // position
0, // version
);
let shred = Shred::new_empty_from_header(common, DataShredHeader::default(), coding);
let mut shreds = vec![shred.clone(), shred.clone(), shred];
let _from_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);

View File

@@ -91,6 +91,7 @@ module.exports = {
},
"developing/test-validator",
"developing/backwards-compatibility",
"developing/plugins/accountsdb_plugin"
],
Integrating: ["integrations/exchange"],
Validating: [

View File

@@ -152,42 +152,47 @@ Then issue a new `deploy` command and specify the buffer:
solana program deploy --buffer <KEYPAIR_PATH> <PROGRAM_FILEPATH>
```
### Closing buffer accounts and reclaiming their lamports
### Closing program and buffer accounts, and reclaiming their lamports
Both program and buffer accounts can be closed and their lamport balances
transferred to a recipient's account.
If deployment fails there will be a left over buffer account that holds
lamports. The buffer account can either be used to [resume a
deploy](#resuming-a-failed-deploy) or closed. When closed, the full balance of
the buffer account will be transferred to the recipient's account.
deploy](#resuming-a-failed-deploy) or closed.
The buffer account's authority must be present to close a buffer account, to
list all the open buffer accounts that match the default authority:
The program or buffer account's authority must be present to close an account,
to list all the open program or buffer accounts that match the default
authority:
```bash
solana program show --programs
solana program show --buffers
```
To specify a different authority:
```bash
solana program show --programs --buffer-authority <AURTHORITY_ADRESS>
solana program show --buffers --buffer-authority <AURTHORITY_ADRESS>
```
To close a single account:
```bash
solana program close <BUFFER_ADDRESS>
solana program close <BADDRESS>
```
To close a single account and specify a different authority than the default:
```bash
solana program close <BUFFER_ADDRESS> --buffer-authority <KEYPAIR_FILEPATH>
solana program close <ADDRESS> --buffer-authority <KEYPAIR_FILEPATH>
```
To close a single account and specify a different recipient than the default:
```bash
solana program close <BUFFER_ADDRESS> --recipient <RECIPIENT_ADDRESS>
solana program close <ADDRESS> --recipient <RECIPIENT_ADDRESS>
```
To close all the buffer accounts associated with the current authority:

View File

@@ -20,18 +20,15 @@ Clients send transactions to any validator's Transaction Processing Unit \(TPU\)
## Confirming Transactions
A Solana cluster is capable of subsecond _confirmation_ for up to 150 nodes with plans to scale up to hundreds of thousands of nodes. Once fully implemented, confirmation times are expected to increase only with the logarithm of the number of validators, where the logarithm's base is very high. If the base is one thousand, for example, it means that for the first thousand nodes, confirmation will be the duration of three network hops plus the time it takes the slowest validator of a supermajority to vote. For the next million nodes, confirmation increases by only one network hop.
A Solana cluster is capable of subsecond _confirmation_ for thousands of nodes with plans to scale up to hundreds of thousands of nodes. Confirmation times are expected to increase only with the logarithm of the number of validators, where the logarithm's base is very high. If the base is one thousand, for example, it means that for the first thousand nodes, confirmation will be the duration of three network hops plus the time it takes the slowest validator of a supermajority to vote. For the next million nodes, confirmation increases by only one network hop.
Solana defines confirmation as the duration of time from when the leader timestamps a new entry to the moment when it recognizes a supermajority of ledger votes.
A gossip network is much too slow to achieve subsecond confirmation once the network grows beyond a certain size. The time it takes to send messages to all nodes is proportional to the square of the number of nodes. If a blockchain wants to achieve low confirmation and attempts to do it using a gossip network, it will be forced to centralize to just a handful of nodes.
Scalable confirmation can be achieved using the follow combination of techniques:
1. Timestamp transactions with a VDF sample and sign the timestamp.
2. Split the transactions into batches, send each to separate nodes and have
each node share its batch with its peers.
2. Split the transactions into batches, send each to separate nodes and have each node share its batch with its peers.
3. Repeat the previous step recursively until all nodes have all batches.
@@ -39,4 +36,4 @@ Solana rotates leaders at fixed intervals, called _slots_. Each leader may only
Next, transactions are broken into batches so that a node can send transactions to multiple parties without making multiple copies. If, for example, the leader needed to send 60 transactions to 6 nodes, it would break that collection of 60 into batches of 10 transactions and send one to each node. This allows the leader to put 60 transactions on the wire, not 60 transactions for each node. Each node then shares its batch with its peers. Once the node has collected all 6 batches, it reconstructs the original set of 60 transactions.
A batch of transactions can only be split so many times before it is so small that header information becomes the primary consumer of network bandwidth. At the time of this writing, the approach is scaling well up to about 150 validators. To scale up to hundreds of thousands of validators, each node can apply the same technique as the leader node to another set of nodes of equal size. We call the technique [_Turbine Block Propogation_](turbine-block-propagation.md).
A batch of transactions can only be split so many times before it is so small that header information becomes the primary consumer of network bandwidth. At the time of this writing (December, 2021), the approach is scaling well up to about 1,250 validators. To scale up to hundreds of thousands of validators, each node can apply the same technique as the leader node to another set of nodes of equal size. We call the technique [_Turbine Block Propogation_](turbine-block-propagation.md).

View File

@@ -428,7 +428,7 @@ await web3.sendAndConfirmTransaction(connection, transaction, [fromPublicKey])
[SourceDocumentation](https://solana-labs.github.io/solana-web3.js/classes/Struct.html)
The struct class is used to create Rust compatible structs in javascript. This class is only compatible with Borsch encoded Rust structs.
The struct class is used to create Rust compatible structs in javascript. This class is only compatible with Borsh encoded Rust structs.
#### Example Usage
@@ -455,7 +455,7 @@ export class Fee extends Struct {
[Source Documentation](https://solana-labs.github.io/solana-web3.js/classes/Enum.html)
The Enum class is used to represent a Rust compatible Enum in javascript. The enum will just be a string representation if logged but can be properly encoded/decoded when used in conjunction with [Struct](javascript-api.md#Struct). This class is only compatible with Borsch encoded Rust enumerations.
The Enum class is used to represent a Rust compatible Enum in javascript. The enum will just be a string representation if logged but can be properly encoded/decoded when used in conjunction with [Struct](javascript-api.md#Struct). This class is only compatible with Borsh encoded Rust enumerations.
#### Example Usage

View File

@@ -85,6 +85,8 @@ gives a convenient interface for the RPC methods.
Unstable methods may see breaking changes in patch releases and may not be supported in perpetuity.
- [blockSubscribe](jsonrpc-api.md#blocksubscribe---unstable-disabled-by-default)
- [blockUnsubscribe](jsonrpc-api.md#blockunsubscribe)
- [slotsUpdatesSubscribe](jsonrpc-api.md#slotsupdatessubscribe---unstable)
- [slotsUpdatesUnsubscribe](jsonrpc-api.md#slotsupdatesunsubscribe)
- [voteSubscribe](jsonrpc-api.md#votesubscribe---unstable-disabled-by-default)
@@ -363,9 +365,6 @@ Result:
### getBlock
**NEW: This method is only available in solana-core v1.7 or newer. Please use
[getConfirmedBlock](jsonrpc-api.md#getconfirmedblock) for solana-core v1.6**
Returns identity and transaction information about a confirmed block in the ledger
#### Parameters:
@@ -768,9 +767,6 @@ Result:
### getBlocks
**NEW: This method is only available in solana-core v1.7 or newer. Please use
[getConfirmedBlocks](jsonrpc-api.md#getconfirmedblocks) for solana-core v1.6**
Returns a list of confirmed blocks between two slots
#### Parameters:
@@ -802,9 +798,6 @@ Result:
### getBlocksWithLimit
**NEW: This method is only available in solana-core v1.7 or newer. Please use
[getConfirmedBlocksWithLimit](jsonrpc-api.md#getconfirmedblockswithlimit) for solana-core v1.6**
Returns a list of confirmed blocks starting at the given slot
#### Parameters:
@@ -999,7 +992,7 @@ Result:
### getFeeForMessage
**NEW: This method is only available in solana-core v1.9 or newer. Please use
[getFees](jsonrpc-api.md#getfees) for solana-core v1.7/v1.8**
[getFees](jsonrpc-api.md#getfees) for solana-core v1.8**
Get the fee the network will charge for a particular Message
@@ -1156,7 +1149,7 @@ Unhealthy Result (if additional information is available)
### getHighestSnapshotSlot
**NEW: This method is only available in solana-core v1.9 or newer. Please use
[getSnapshotSlot](jsonrpc-api.md#getsnapshotslot) for solana-core v1.7/v1.8**
[getSnapshotSlot](jsonrpc-api.md#getsnapshotslot) for solana-core v1.8**
Returns the highest slot information that the node has snapshots for.
@@ -1464,6 +1457,9 @@ Result:
### getLatestBlockhash
**NEW: This method is only available in solana-core v1.9 or newer. Please use
[getRecentBlockhash](jsonrpc-api.md#getrecentblockhash) for solana-core v1.8**
Returns the latest blockhash
#### Parameters:
@@ -1976,12 +1972,10 @@ Result:
### getSignaturesForAddress
**NEW: This method is only available in solana-core v1.7 or newer. Please use
[getConfirmedSignaturesForAddress2](jsonrpc-api.md#getconfirmedsignaturesforaddress2) for solana-core v1.6**
Returns confirmed signatures for transactions involving an
address backwards in time from the provided signature or most recent confirmed block
Returns signatures for confirmed transactions that include the given address in
their `accountKeys` list. Returns signatures backwards in time from the
provided signature or most recent confirmed block
#### Parameters:
* `<string>` - account address as base-58 encoded string
@@ -2727,9 +2721,6 @@ Result:
### getTransaction
**NEW: This method is only available in solana-core v1.7 or newer. Please use
[getConfirmedTransaction](jsonrpc-api.md#getconfirmedtransaction) for solana-core v1.6**
Returns transaction details for a confirmed transaction
#### Parameters:
@@ -2958,7 +2949,7 @@ curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d '
Result:
```json
{"jsonrpc":"2.0","result":{"solana-core": "1.9.0"},"id":1}
{"jsonrpc":"2.0","result":{"solana-core": "1.9.2"},"id":1}
```
### getVoteAccounts
@@ -3074,6 +3065,9 @@ Result:
### isBlockhashValid
**NEW: This method is only available in solana-core v1.9 or newer. Please use
[getFeeCalculatorForBlockhash](jsonrpc-api.md#getfeecalculatorforblockhash) for solana-core v1.8**
Returns whether a blockhash is still valid or not
#### Parameters:
@@ -3451,6 +3445,339 @@ Result:
{"jsonrpc": "2.0","result": true,"id": 1}
```
### blockSubscribe - Unstable, disabled by default
**This subscription is unstable and only available if the validator was started
with the `--rpc-pubsub-enable-block-subscription` flag. The format of this
subscription may change in the future**
Subscribe to receive notification anytime a new block is Confirmed or Finalized.
#### Parameters:
- `filter: <string>|<object>` - filter criteria for the logs to receive results by account type; currently supported:
- "all" - include all transactions in block
- `{ "mentionsAccountOrProgram": <string> }` - return only transactions that mention the provided public key (as base-58 encoded string). If no mentions in a given block, then no notification will be sent.
- `<object>` - (optional) Configuration object containing the following optional fields:
- (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
- (optional) `encoding: <string>` - encoding for Account data, either "base58" (*slow*), "base64", "base64+zstd" or "jsonParsed".
"jsonParsed" encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If "jsonParsed" is requested but a parser cannot be found, the field falls back to base64 encoding, detectable when the `data` field is type `<string>`. Default is "base64".
- (optional) `transactionDetails: <string>` - level of transaction detail to return, either "full", "signatures", or "none". If parameter not provided, the default detail level is "full".
- (optional) `showRewards: bool` - whether to populate the `rewards` array. If parameter not provided, the default includes rewards.
#### Results:
- `integer` - subscription id \(needed to unsubscribe\)
#### Example:
Request:
```json
{"jsonrpc": "2.0", "id": "1", "method": "blockSubscribe", "params": ["all"]}
```
```json
{
"jsonrpc": "2.0",
"id": "1",
"method": "blockSubscribe",
"params": [
{"mentionsAccountOrProgram": "LieKvPRE8XeX3Y2xVNHjKlpAScD12lYySBVQ4HqoJ5op"},
{
"commitment": "confirmed",
"encoding": "base64",
"showRewards": true,
"transactionDetails": "full"
}
]
}
```
Result:
```json
{"jsonrpc": "2.0","result": 0,"id": 1}
```
#### Notification Format:
The notification will be an object with the following fields:
-`slot: <u64>` - The corresponding slot.
- `err: <object | null>` - Error if something went wrong publishing the notification otherwise null.
- `block: <object | null>` - A block object as seen in the [getBlock](jsonrpc-api.md#getblock) RPC HTTP method.
```json
{
"jsonrpc": "2.0",
"method": "blockNotification",
"params": {
"result": {
"context": {
"slot": 112301554
},
"value": {
"slot": 112301554,
"block": {
"previousBlockhash": "GJp125YAN4ufCSUvZJVdCyWQJ7RPWMmwxoyUQySydZA",
"blockhash": "6ojMHjctdqfB55JDpEpqfHnP96fiaHEcvzEQ2NNcxzHP",
"parentSlot": 112301553,
"transactions": [
{
"transaction": [
"OpltwoUvWxYi1P2U8vbIdE/aPntjYo5Aa0VQ2JJyeJE2g9Vvxk8dDGgFMruYfDu8/IfUWb0REppTe7IpAuuLRgIBAAkWnj4KHRpEWWW7gvO1c0BHy06wZi2g7/DLqpEtkRsThAXIdBbhXCLvltw50ZnjDx2hzw74NVn49kmpYj2VZHQJoeJoYJqaKcvuxCi/2i4yywedcVNDWkM84Iuw+cEn9/ROCrXY4qBFI9dveEERQ1c4kdU46xjxj9Vi+QXkb2Kx45QFVkG4Y7HHsoS6WNUiw2m4ffnMNnOVdF9tJht7oeuEfDMuUEaO7l9JeUxppCvrGk3CP45saO51gkwVYEgKzhpKjCx3rgsYxNR81fY4hnUQXSbbc2Y55FkwgRBpVvQK7/+clR4Gjhd3L4y+OtPl7QF93Akg1LaU9wRMs5nvfDFlggqI9PqJl+IvVWrNRdBbPS8LIIhcwbRTkSbqlJQWxYg3Bo2CTVbw7rt1ZubuHWWp0mD/UJpLXGm2JprWTePNULzHu67sfqaWF99LwmwjTyYEkqkRt1T0Je5VzHgJs0N5jY4iIU9K3lMqvrKOIn/2zEMZ+ol2gdgjshx+sphIyhw65F3J/Dbzk04LLkK+CULmN571Y+hFlXF2ke0BIuUG6AUF+4214Cu7FXnqo3rkxEHDZAk0lRrAJ8X/Z+iwuwI5cgbd9uHXZaGT2cvhRs7reawctIXtX1s3kTqM9YV+/wCpDLAp8axcEkaQkLDKRoWxqp8XLNZSKial7Rk+ELAVVKWoWLRXRZ+OIggu0OzMExvVLE5VHqy71FNHq4gGitkiKYNFWSLIE4qGfdFLZXy/6hwS+wq9ewjikCpd//C9BcCL7Wl0iQdUslxNVCBZHnCoPYih9JXvGefOb9WWnjGy14sG9j70+RSVx6BlkFELWwFvIlWR/tHn3EhHAuL0inS2pwX7ZQTAU6gDVaoqbR2EiJ47cKoPycBNvHLoKxoY9AZaBjPl6q8SKQJSFyFd9n44opAgI6zMTjYF/8Ok4VpXEESp3QaoUyTI9sOJ6oFP6f4dwnvQelgXS+AEfAsHsKXxGAIUDQENAgMEBQAGBwgIDg8IBJCER3QXl1AVDBADCQoOAAQLERITDAjb7ugh3gOuTy==",
"base64"
],
"meta": {
"err": null,
"status": {
"Ok": null
},
"fee": 5000,
"preBalances": [
1758510880,
2067120,
1566000,
1461600,
2039280,
2039280,
1900080,
1865280,
0,
3680844220,
2039280
],
"postBalances": [
1758505880,
2067120,
1566000,
1461600,
2039280,
2039280,
1900080,
1865280,
0,
3680844220,
2039280
],
"innerInstructions": [
{
"index": 0,
"instructions": [
{
"programIdIndex": 13,
"accounts": [
1,
15,
3,
4,
2,
14
],
"data": "21TeLgZXNbtHXVBzCaiRmH"
},
{
"programIdIndex": 14,
"accounts": [
3,
4,
1
],
"data": "6qfC8ic7Aq99"
},
{
"programIdIndex": 13,
"accounts": [
1,
15,
3,
5,
2,
14
],
"data": "21TeLgZXNbsn4QEpaSEr3q"
},
{
"programIdIndex": 14,
"accounts": [
3,
5,
1
],
"data": "6LC7BYyxhFRh"
}
]
},
{
"index": 1,
"instructions": [
{
"programIdIndex": 14,
"accounts": [
4,
3,
0
],
"data": "7aUiLHFjSVdZ"
},
{
"programIdIndex": 19,
"accounts": [
17,
18,
16,
9,
11,
12,
14
],
"data": "8kvZyjATKQWYxaKR1qD53V"
},
{
"programIdIndex": 14,
"accounts": [
9,
11,
18
],
"data": "6qfC8ic7Aq99"
}
]
}
],
"logMessages": [
"Program QMNeHCGYnLVDn1icRAfQZpjPLBNkfGbSKRB83G5d8KB invoke [1]",
"Program QMWoBmAyJLAsA1Lh9ugMTw2gciTihncciphzdNzdZYV invoke [2]"
],
"preTokenBalances": [
{
"accountIndex": 4,
"mint": "iouQcQBAiEXe6cKLS85zmZxUqaCqBdeHFpqKoSz615u",
"uiTokenAmount": {
"uiAmount": null,
"decimals": 6,
"amount": "0",
"uiAmountString": "0"
},
"owner": "LieKvPRE8XeX3Y2xVNHjKlpAScD12lYySBVQ4HqoJ5op"
},
{
"accountIndex": 5,
"mint": "iouQcQBAiEXe6cKLS85zmZxUqaCqBdeHFpqKoSz615u",
"uiTokenAmount": {
"uiAmount": 11513.0679,
"decimals": 6,
"amount": "11513067900",
"uiAmountString": "11513.0679"
},
"owner": "rXhAofQCT7NN9TUqigyEAUzV1uLL4boeD8CRkNBSkYk"
},
{
"accountIndex": 10,
"mint": "Saber2gLauYim4Mvftnrasomsv6NvAuncvMEZwcLpD1",
"uiTokenAmount": {
"uiAmount": null,
"decimals": 6,
"amount": "0",
"uiAmountString": "0"
},
"owner": "CL9wkGFT3SZRRNa9dgaovuRV7jrVVigBUZ6DjcgySsCU"
},
{
"accountIndex": 11,
"mint": "Saber2gLauYim4Mvftnrasomsv6NvAuncvMEZwcLpD1",
"uiTokenAmount": {
"uiAmount": 15138.514093,
"decimals": 6,
"amount": "15138514093",
"uiAmountString": "15138.514093"
},
"owner": "LieKvPRE8XeX3Y2xVNHjKlpAScD12lYySBVQ4HqoJ5op"
}
],
"postTokenBalances": [
{
"accountIndex": 4,
"mint": "iouQcQBAiEXe6cKLS85zmZxUqaCqBdeHFpqKoSz615u",
"uiTokenAmount": {
"uiAmount": null,
"decimals": 6,
"amount": "0",
"uiAmountString": "0"
},
"owner": "LieKvPRE8XeX3Y2xVNHjKlpAScD12lYySBVQ4HqoJ5op"
},
{
"accountIndex": 5,
"mint": "iouQcQBAiEXe6cKLS85zmZxUqaCqBdeHFpqKoSz615u",
"uiTokenAmount": {
"uiAmount": 11513.103028,
"decimals": 6,
"amount": "11513103028",
"uiAmountString": "11513.103028"
},
"owner": "rXhAofQCT7NN9TUqigyEAUzV1uLL4boeD8CRkNBSkYk"
},
{
"accountIndex": 10,
"mint": "Saber2gLauYim4Mvftnrasomsv6NvAuncvMEZwcLpD1",
"uiTokenAmount": {
"uiAmount": null,
"decimals": 6,
"amount": "0",
"uiAmountString": "0"
},
"owner": "CL9wkGFT3SZRRNa9dgaovuRV7jrVVigBUZ6DjcgySsCU"
},
{
"accountIndex": 11,
"mint": "Saber2gLauYim4Mvftnrasomsv6NvAuncvMEZwcLpD1",
"uiTokenAmount": {
"uiAmount": 15489.767829,
"decimals": 6,
"amount": "15489767829",
"uiAmountString": "15489.767829"
},
"owner": "BeiHVPRE8XeX3Y2xVNrSsTpAScH94nYySBVQ4HqgN9at"
}
],
"rewards": []
}
}
],
"blockTime": 1639926816,
"blockHeight": 101210751
},
"err": null
}
},
"subscription": 14
}
}
```
### blockUnsubscribe
Unsubscribe from block notifications
#### Parameters:
- `<integer>` - subscription id to cancel
#### Results:
- `<bool>` - unsubscribe success message
#### Example:
Request:
```json
{"jsonrpc":"2.0", "id":1, "method":"blockUnsubscribe", "params":[0]}
```
Response:
```json
{"jsonrpc": "2.0","result": true,"id": 1}
```
### logsSubscribe
Subscribe to transaction logging
@@ -4376,8 +4703,10 @@ Result:
**DEPRECATED: Please use [getSignaturesForAddress](jsonrpc-api.md#getsignaturesforaddress) instead**
This method is expected to be removed in solana-core v2.0
Returns confirmed signatures for transactions involving an
address backwards in time from the provided signature or most recent confirmed block
Returns signatures for confirmed transactions that include the given address in
their `accountKeys` list. Returns signatures backwards in time from the
provided signature or most recent confirmed block
#### Parameters:
* `<string>` - account address as base-58 encoded string

View File

@@ -21,11 +21,16 @@ Some important crates:
- [`solana-client`] &mdash; For interacting with a Solana node via the
[JSON RPC API](jsonrpc-api).
- [`solana-cli-config`] &mdash; Loading and saving the Solana CLI configuration
file.
- [`solana-clap-utils`] &mdash; Routines for setting up a CLI, using [`clap`],
as used by the main Solana CLI.
as used by the main Solana CLI. Includes functions for loading all types of
signers supported by the CLI.
[`solana-program`]: https://docs.rs/solana-program
[`solana-sdk`]: https://docs.rs/solana-sdk
[`solana-client`]: https://docs.rs/solana-client
[`solana-cli-config`]: https://docs.rs/solana-cli-config
[`solana-clap-utils`]: https://docs.rs/solana-clap-utils
[`clap`]: https://docs.rs/clap

View File

@@ -0,0 +1,360 @@
---
title: Plugins
---
## Overview
Validators under heavy RPC loads, such as when serving getProgramAccounts calls,
can fall behind the network. To solve this problem, the validator has been
enhanced to support a plugin mechanism through which the information about
accounts and slots can be transmitted to external data stores such as relational
databases, NoSQL databases or Kafka. RPC services then can be developed to
consume data from these external data stores with the possibility of more
flexible and targeted optimizations such as caching and indexing. This allows
the validator to focus on processing transactions without being slowed down by
busy RPC requests.
This document describes the interfaces of the plugin and the referential plugin
implementation for the PostgreSQL database.
[crates.io]: https://crates.io/search?q=solana-
[docs.rs]: https://docs.rs/releases/search?query=solana-
### Important Crates:
- [`solana-accountsdb-plugin-interface`] &mdash; This crate defines the plugin
interfaces.
- [`solana-accountsdb-plugin-postgres`] &mdash; The crate for the referential
plugin implementation for the PostgreSQL database.
[`solana-accountsdb-plugin-interface`]: https://docs.rs/solana-accountsdb-plugin-interface
[`solana-accountsdb-plugin-postgres`]: https://docs.rs/solana-accountsdb-plugin-postgres
## The Plugin Interface
The Plugin interface is declared in [`solana-accountsdb-plugin-interface`]. It
is defined by the trait `AccountsDbPlugin`. The plugin should implement the
trait and expose a "C" function `_create_plugin` to return the pointer to this
trait. For example, in the referential implementation, the following code
instantiates the PostgreSQL plugin `AccountsDbPluginPostgres ` and returns its
pointer.
```
#[no_mangle]
#[allow(improper_ctypes_definitions)]
/// # Safety
///
/// This function returns the AccountsDbPluginPostgres pointer as trait AccountsDbPlugin.
pub unsafe extern "C" fn _create_plugin() -> *mut dyn AccountsDbPlugin {
let plugin = AccountsDbPluginPostgres::new();
let plugin: Box<dyn AccountsDbPlugin> = Box::new(plugin);
Box::into_raw(plugin)
}
```
A plugin implementation can implement the `on_load` method to initialize itself.
This function is invoked after a plugin is dynamically loaded into the validator
when it starts. The configuration of the plugin is controlled by a configuration
file in JSON format. The JSON file must have a field `libpath` that points
to the full path name of the shared library implementing the plugin, and may
have other configuration information, like connection parameters for the external
database. The plugin configuration file is specified by the validator's CLI
parameter `--accountsdb-plugin-config` and the file must be readable to the
validator process.
Please see the [config file](#config) for the referential
PostgreSQL plugin below for an example.
The plugin can implement the `on_unload` method to do any cleanup before the
plugin is unloaded when the validator is gracefully shutdown.
The following method is used for notifying on an account update:
```
fn update_account(
&mut self,
account: ReplicaAccountInfoVersions,
slot: u64,
is_startup: bool,
) -> Result<()>
```
The `ReplicaAccountInfoVersions` struct contains the metadata and data of the account
streamed. The `slot` points to the slot the account is being updated at. When
`is_startup` is true, it indicates the account is loaded from snapshots when
the validator starts up. When `is_startup` is false, the account is updated
when processing a transaction.
The following method is called when all accounts have been notified when the
validator restores the AccountsDb from snapshots at startup.
```
fn notify_end_of_startup(&mut self) -> Result<()>
```
When `update_account` is called during processing transactions, the plugin
should process the notification as fast as possible because any delay may
cause the validator to fall behind the network. Persistence to external data
store is best to be done asynchronously.
The following method is used for notifying slot status changes:
```
fn update_slot_status(
&mut self,
slot: u64,
parent: Option<u64>,
status: SlotStatus,
) -> Result<()>
```
To ensure data consistency, the plugin implementation can choose to abort
the validator in case of error persisting to external stores. When the
validator restarts the account data will be re-transmitted.
For more details, please refer to the Rust documentation in
[`solana-accountsdb-plugin-interface`].
## Example PostgreSQL Plugin
The [`solana-accountsdb-plugin-postgres`] crate implements a plugin storing
account data to a PostgreSQL database to illustrate how a plugin can be
developed.
<a name="config">
### Configuration File Format
</a>
The plugin is configured using the input configuration file. An example
configuration file looks like the following:
```
{
"libpath": "/solana/target/release/libsolana_accountsdb_plugin_postgres.so",
"host": "postgres-server",
"user": "solana",
"port": 5433,
"threads": 20,
"batch_size": 20,
"panic_on_db_errors": true,
"accounts_selector" : {
"accounts" : ["*"]
}
}
```
The `host`, `user`, and `port` control the PostgreSQL configuration
information. For more advanced connection options, please use the
`connection_str` field. Please see [Rust postgres configuration]
(https://docs.rs/postgres/0.19.2/postgres/config/struct.Config.html).
To improve the throughput to the database, the plugin supports connection pooling
using multiple threads, each maintaining a connection to the PostgreSQL database.
The count of the threads is controlled by the `threads` field. A higher thread
count usually offers better performance.
To further improve performance when saving large numbers of accounts at
startup, the plugin uses bulk inserts. The batch size is controlled by the
`batch_size` parameter. This can help reduce the round trips to the database.
The `panic_on_db_errors` can be used to panic the validator in case of database
errors to ensure data consistency.
### Account Selection
The `accounts_selector` can be used to filter the accounts that should be persisted.
For example, one can use the following to persist only the accounts with particular
Base58-encoded Pubkeys,
```
"accounts_selector" : {
"accounts" : ["pubkey-1", "pubkey-2", ..., "pubkey-n"],
}
```
Or use the following to select accounts with certain program owners:
```
"accounts_selector" : {
"owners" : ["pubkey-owner-1", "pubkey-owner-2", ..., "pubkey-owner-m"],
}
```
To select all accounts, use the wildcard character (*):
```
"accounts_selector" : {
"accounts" : ["*"],
}
```
### Database Setup
#### Install PostgreSQL Server
Please follow [PostgreSQL Ubuntu Installation](https://www.postgresql.org/download/linux/ubuntu/)
on instructions to install the PostgreSQL database server. For example, to
install postgresql-14,
```
sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
sudo apt-get update
sudo apt-get -y install postgresql-14
```
#### Control the Database Access
Modify the pg_hba.conf as necessary to grant the plugin to access the database.
For example, in /etc/postgresql/14/main/pg_hba.conf, the following entry allows
nodes with IPs in the CIDR 10.138.0.0/24 to access all databases. The validator
runs in a node with an ip in the specified range.
```
host all all 10.138.0.0/24 trust
```
It is recommended to run the database server on a separate node from the validator for
better performance.
#### Configure the Database Performance Parameters
Please refer to the [PostgreSQL Server Configuration](https://www.postgresql.org/docs/14/runtime-config.html)
for configuration details. The referential implementation uses the following
configurations for better database performance in the /etc/postgresql/14/main/postgresql.conf
which are different from the default postgresql-14 installation.
```
max_connections = 200 # (change requires restart)
shared_buffers = 1GB # min 128kB
effective_io_concurrency = 1000 # 1-1000; 0 disables prefetching
wal_level = minimal # minimal, replica, or logical
fsync = off # flush data to disk for crash safety
synchronous_commit = off # synchronization level;
full_page_writes = off # recover from partial page writes
max_wal_senders = 0 # max number of walsender processes
```
The sample [postgresql.conf](https://github.com/solana-labs/solana/blob/7ac43b16d2c766df61ae0a06d7aaf14ba61996ac/accountsdb-plugin-postgres/scripts/postgresql.conf)
can be used for reference.
#### Create the Database Instance and the Role
Start the server:
```
sudo systemctl start postgresql@14-main
```
Create the database. For example, the following creates a database named 'solana':
```
sudo -u postgres createdb solana -p 5433
```
Create the database user. For example, the following creates a regular user named 'solana':
```
sudo -u postgres createuser -p 5433 solana
```
Verify the database is working using psql. For example, assuming the node running
PostgreSQL has the ip 10.138.0.9, the following command will land in a shell where
SQL commands can be entered:
```
psql -U solana -p 5433 -h 10.138.0.9 -w -d solana
```
#### Create the Schema Objects
Use the [create_schema.sql](https://github.com/solana-labs/solana/blob/7ac43b16d2c766df61ae0a06d7aaf14ba61996ac/accountsdb-plugin-postgres/scripts/create_schema.sql)
to create the objects for storing accounts and slots.
Download the script from github:
```
wget https://raw.githubusercontent.com/solana-labs/solana/7ac43b16d2c766df61ae0a06d7aaf14ba61996ac/accountsdb-plugin-postgres/scripts/create_schema.sql
```
Then run the script:
```
psql -U solana -p 5433 -h 10.138.0.9 -w -d solana -f create_schema.sql
```
After this, start the validator with the plugin by using the `--accountsdb-plugin-config`
argument mentioned above.
#### Destroy the Schema Objects
To destroy the database objects, created by `create_schema.sql`, use
[drop_schema.sql](https://github.com/solana-labs/solana/blob/7ac43b16d2c766df61ae0a06d7aaf14ba61996ac/accountsdb-plugin-postgres/scripts/drop_schema.sql).
For example,
```
psql -U solana -p 5433 -h 10.138.0.9 -w -d solana -f drop_schema.sql
```
### Capture Historical Account Data
The account historical data is captured using a database trigger as shown in
`create_schema.sql`,
```
CREATE FUNCTION audit_account_update() RETURNS trigger AS $audit_account_update$
BEGIN
INSERT INTO account_audit (pubkey, owner, lamports, slot, executable, rent_epoch, data, write_version, updated_on)
VALUES (OLD.pubkey, OLD.owner, OLD.lamports, OLD.slot,
OLD.executable, OLD.rent_epoch, OLD.data, OLD.write_version, OLD.updated_on);
RETURN NEW;
END;
$audit_account_update$ LANGUAGE plpgsql;
CREATE TRIGGER account_update_trigger AFTER UPDATE OR DELETE ON account
FOR EACH ROW EXECUTE PROCEDURE audit_account_update();
```
The historical data is stored in the account_audit table.
The trigger can be dropped to disable this feature, for example,
```
DROP TRIGGER account_update_trigger ON account;
```
Over time, the account_audit can accumulate large amount of data. You may choose to
limit that by deleting older historical data.
For example, the following SQL statement can be used to keep up to 1000 of the most
recent records for an account:
```
delete from account_audit a2 where (pubkey, write_version) in
(select pubkey, write_version from
(select a.pubkey, a.updated_on, a.slot, a.write_version, a.lamports,
rank() OVER ( partition by pubkey order by write_version desc) as rnk
from account_audit a) ranked
where ranked.rnk > 1000)
```
### Performance Considerations
When a validator lacks sufficient compute power, the overhead of saving the
account data can cause it to fall behind the network especially when all
accounts or a large number of accounts are selected. The node hosting the
PostgreSQL database need to be powerful enough to handle the database loads
as well. It has been found using GCP n2-standard-64 machine type for the
validator and n2-highmem-32 for the PostgreSQL node is adequate for handling
transmiting all accounts while keeping up with the network. In addition, it is
best to keep the validator and the PostgreSQL in the same local network to
reduce latency. You may need to size the validator and database nodes
differently if serving other loads.

View File

@@ -14,6 +14,7 @@ starts a full-featured, single-node cluster on the developer's workstation.
- Direct [on-chain program](on-chain-programs/overview) deployment
(`--bpf-program ...`)
- Clone accounts from a public cluster, including programs (`--clone ...`)
- Load accounts from files
- Configurable transaction history retention (`--limit-ledger-size ...`)
- Configurable epoch length (`--slots-per-epoch ...`)
- Jump to an arbitrary slot (`--warp-slot ...`)

View File

@@ -39,7 +39,9 @@ From these simulated _Inflation Schedules_, we can also project ranges for token
Finally we can estimate the _Staked Yield_ on staked SOL, if we introduce an additional parameter, previously discussed, _% of Staked SOL_:
%~\text{SOL Staked} = \frac{\text{Total SOL Staked}}{\text{Total Current Supply}}
$$
\%~\text{SOL Staked} = \frac{\text{Total SOL Staked}}{\text{Total Current Supply}}
$$
In this case, because _% of Staked SOL_ is a parameter that must be estimated (unlike the _Inflation Schedule_ parameters), it is easier to use specific _Inflation Schedule_ parameters and explore a range of _% of Staked SOL_. For the below example, weve chosen the middle of the parameter ranges explored above:

View File

@@ -4,17 +4,11 @@ title: Staking Rewards
A Proof of Stake \(PoS\), \(i.e. using in-protocol asset, SOL, to provide secure consensus\) design is outlined here. Solana implements a proof of stake reward/security scheme for validator nodes in the cluster. The purpose is threefold:
- Align validator incentives with that of the greater cluster through
- Align validator incentives with that of the greater cluster through skin-in-the-game deposits at risk
skin-in-the-game deposits at risk
- Avoid 'nothing at stake' fork voting issues by implementing slashing rules aimed at promoting fork convergence
- Avoid 'nothing at stake' fork voting issues by implementing slashing rules
aimed at promoting fork convergence
- Provide an avenue for validator rewards provided as a function of validator
participation in the cluster.
- Provide an avenue for validator rewards provided as a function of validator participation in the cluster.
While many of the details of the specific implementation are currently under consideration and are expected to come into focus through specific modeling studies and parameter exploration on the Solana testnet, we outline here our current thinking on the main components of the PoS system. Much of this thinking is based on the current status of Casper FFG, with optimizations and specific attributes to be modified as is allowed by Solana's Proof of History \(PoH\) blockchain data structure.
@@ -24,29 +18,11 @@ Solana's ledger validation design is based on a rotating, stake-weighted selecte
To become a Solana validator, one must deposit/lock-up some amount of SOL in a contract. This SOL will not be accessible for a specific time period. The precise duration of the staking lockup period has not been determined. However we can consider three phases of this time for which specific parameters will be necessary:
- _Warm-up period_: which SOL is deposited and inaccessible to the node,
- _Warm-up period_: which SOL is deposited and inaccessible to the node, however PoH transaction validation has not begun. Most likely on the order of days to weeks
however PoH transaction validation has not begun. Most likely on the order of
- _Validation period_: a minimum duration for which the deposited SOL will be inaccessible, at risk of slashing \(see slashing rules below\) and earning rewards for the validator participation. Likely duration of months to a year.
days to weeks
- _Validation period_: a minimum duration for which the deposited SOL will be
inaccessible, at risk of slashing \(see slashing rules below\) and earning
rewards for the validator participation. Likely duration of months to a
year.
- _Cool-down period_: a duration of time following the submission of a
'withdrawal' transaction. During this period validation responsibilities have
been removed and the funds continue to be inaccessible. Accumulated rewards
should be delivered at the end of this period, along with the return of the
initial deposit.
- _Cool-down period_: a duration of time following the submission of a 'withdrawal' transaction. During this period validation responsibilities have been removed and the funds continue to be inaccessible. Accumulated rewards should be delivered at the end of this period, along with the return of the initial deposit.
Solana's trustless sense of time and ordering provided by its PoH data structure, along with its [turbine](https://www.youtube.com/watch?v=qt_gDRXHrHQ&t=1s) data broadcast and transmission design, should provide sub-second transaction confirmation times that scale with the log of the number of nodes in the cluster. This means we shouldn't have to restrict the number of validating nodes with a prohibitive 'minimum deposits' and expect nodes to be able to become validators with nominal amounts of SOL staked. At the same time, Solana's focus on high-throughput should create incentive for validation clients to provide high-performant and reliable hardware. Combined with potential a minimum network speed threshold to join as a validation-client, we expect a healthy validation delegation market to emerge.

View File

@@ -19,14 +19,26 @@ transaction.
At present, the following commands support offline signing:
- [`create-stake-account`](cli/usage.md#solana-create-stake-account)
- [`create-stake-account-checked`](cli/usage.md#solana-create-stake-account-checked)
- [`deactivate-stake`](cli/usage.md#solana-deactivate-stake)
- [`delegate-stake`](cli/usage.md#solana-delegate-stake)
- [`split-stake`](cli/usage.md#solana-split-stake)
- [`stake-authorize`](cli/usage.md#solana-stake-authorize)
- [`stake-authorize-checked`](cli/usage.md#solana-stake-authorize-checked)
- [`stake-set-lockup`](cli/usage.md#solana-stake-set-lockup)
- [`stake-set-lockup-checked`](cli/usage.md#solana-stake-set-lockup-checked)
- [`transfer`](cli/usage.md#solana-transfer)
- [`withdraw-stake`](cli/usage.md#solana-withdraw-stake)
- [`create-vote-account`](cli/usage.md#solana-create-vote-account)
- [`vote-authorize-voter`](cli/usage.md#solana-vote-authorize-voter)
- [`vote-authorize-voter-checked`](cli/usage.md#solana-vote-authorize-voter-checked)
- [`vote-authorize-withdrawer`](cli/usage.md#solana-vote-authorize-withdrawer)
- [`vote-authorize-withdrawer-checked`](cli/usage.md#solana-vote-authorize-withdrawer-checked)
- [`vote-update-commission`](cli/usage.md#solana-vote-update-commission)
- [`vote-update-validator`](cli/usage.md#solana-vote-update-validator)
- [`withdraw-from-vote-account`](cli/usage.md#solana-withdraw-from-vote-account)
## Signing Transactions Offline
To sign a transaction offline, pass the following arguments on the command line

View File

@@ -108,7 +108,7 @@ The time, i.e. number of [slots](#slot), for which a [leader schedule](#leader-s
## fee account
The fee account in the transaction is the account pays for the cost of including the transaction in the ledger. This is the first account in the transaction. This account must be declared as Read-Write (writable) in the transaction since paying for the transaction reduces the account balance.
The fee account in the transaction is the account that pays for the cost of including the transaction in the ledger. This is the first account in the transaction. This account must be declared as Read-Write (writable) in the transaction since paying for the transaction reduces the account balance.
## finality

View File

@@ -5,6 +5,13 @@ title: Mobile App Wallets
Solana is supported by multiple third-party apps which should provide a familiar
experience for most people who are new or experienced with using crypto wallets.
## Coin98
[Coin98](https://coin98.app/) is an app available for iOS and Android and can
be used to send and receive SOL tokens.
_Note: Coin98 does not support stake accounts or staking operations_
## Exodus
Send, receive & exchange cryptocurrency with ease on the world's leading Desktop, Mobile and Hardware crypto wallets.
@@ -13,6 +20,16 @@ Download [Exodus](https://exodus.com/) to easily and securely manage your Solana
Exodus includes live charts, a built-in exchange, and 24/7 human support.
## Solflare
[Solflare Wallet](https://solflare.com/) has mobile applications available for both
iOS and Android. These Mobile apps have support for sending Solana and SPL tokens,
staking, and NFT management in a fully-featured NFT gallery.
Security is a top priority for Solflare - the mobile wallet is non-custodial,
meaning keys are managed by the user who retains total control of their own funds.
The app supports biometric protection alongside passwords for maximum security.
## Trust Wallet
[Trust Wallet](https://trustwallet.com/) is an app available for iOS and Android
@@ -40,13 +57,6 @@ viewed at any later time in the app by following these steps:
- Go to Setting -> Wallets
- Under the Options menu for a particular wallet tap "Show Recovery Phrase"
## Coin98
[Coin98](https://coin98.app/) is an app available for iOS and Android and can
be used to send and receive SOL tokens.
_Note: Coin98 does not support stake accounts or staking operations_
## Zelcore
[Zelcore](https://zelcore.io) is a multi-currency wallet now supporting SOL and all Solana tokens (SPL). Each Zelcore account has 3 separate addresses for each asset.

View File

@@ -2,6 +2,19 @@
title: Web Wallets
---
## BitKeep
[BitKeep](https://bitkeep.com) is an digital currency wallet and can send and receive SOL/SPL tokens.
BitKeep also support Solana DApps with BitKeep Browser and BitKeep Chrome.
## MathWallet
[MathWallet](https://mathwallet.org/) supports wallet
addresses for sending and receiving SOL and SPL Tokens through its
browser extension and web wallet interface.
Note: The MathWallet iOS and Android app do not yet support SOL and SPL Tokens_
## Phantom
[Phantom](https://phantom.app/) is a friendly non-custodial, browser
@@ -14,12 +27,21 @@ receive, collect, and swap tokens.
Available for Chrome, Brave, Firefox, Vivaldi, and Edge
## SolFlare
## Solflare
[SolFlare.com](https://solflare.com/) is a community-created non-custodial
web wallet that was built specifically for Solana. SolFlare supports the creation
and management of stake accounts, and gives users the ability to send and receive
any SPL Token.
[Solflare](https://solflare.com/) is a non-custodial web wallet created by the
[Solrise Finance](https://solrise.finance) team that was built specifically for Solana.
Solflare is accessible and easy to use but also has a very comprehensive set of features, including:
- The ability to connect your wallet to almost any Solana Dapp
- Transaction simulations, which show the balance changes expected from a transaction and protect against malicious dapps
- Deep staking support with the ability to create and manage all your staking accounts
- Comprehensive NFT functionality, including the ability to send, receive and preview NFTs from a Metaplex-compatible NFT gallery. Support is provided for image, video, audio, and 3d/VR NFTs.
- An in wallet swap for SPL tokens
- Compatibility with the Ledger hardware wallet
Solflare is available on web, as a browser extension, and as a mobile app for both Android and iOS.
The extension is available on Chrome, Brave, Firefox, Opera, and Edge.
Check out our [guide for using SolFlare](solflare.md).
@@ -28,15 +50,3 @@ Check out our [guide for using SolFlare](solflare.md).
[sollet.io](https://www.sollet.io/) is a non-custodial web wallet created by the
[Project Serum](https://projectserum.com/) team. sollet.io can be used to send
and receive SOL and any SPL Token.
## MathWallet
[MathWallet](https://mathwallet.org/) supports wallet
addresses for sending and receiving SOL and SPL Tokens through its
browser extension and web wallet interface.
_Note: The MathWallet iOS and Android app do not yet support SOL and SPL Tokens_
## BitKeep
[BitKeep](https://bitkeep.com) is an digital currency wallet and can send and receive SOL/SPL tokens.
BitKeep also support Solana DApps with BitKeep Browser and BitKeep Chrome.

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-dos"
version = "1.9.0"
version = "1.9.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -13,15 +13,15 @@ bincode = "1.3.3"
clap = "2.33.1"
log = "0.4.14"
rand = "0.7.0"
solana-core = { path = "../core", version = "=1.9.0" }
solana-gossip = { path = "../gossip", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-net-utils = { path = "../net-utils", version = "=1.9.0" }
solana-perf = { path = "../perf", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-streamer = { path = "../streamer", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-client = { path = "../client", version = "=1.9.0" }
solana-core = { path = "../core", version = "=1.9.2" }
solana-gossip = { path = "../gossip", version = "=1.9.2" }
solana-logger = { path = "../logger", version = "=1.9.2" }
solana-net-utils = { path = "../net-utils", version = "=1.9.2" }
solana-perf = { path = "../perf", version = "=1.9.2" }
solana-sdk = { path = "../sdk", version = "=1.9.2" }
solana-streamer = { path = "../streamer", version = "=1.9.2" }
solana-version = { path = "../version", version = "=1.9.2" }
solana-client = { path = "../client", version = "=1.9.2" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-download-utils"
version = "1.9.0"
version = "1.9.2"
description = "Solana Download Utils"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -14,8 +14,8 @@ console = "0.15.0"
indicatif = "0.16.2"
log = "0.4.14"
reqwest = { version = "0.11.6", default-features = false, features = ["blocking", "rustls-tls", "json"] }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.2" }
solana-runtime = { path = "../runtime", version = "=1.9.2" }
[lib]
crate-type = ["lib"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-entry"
version = "1.9.0"
version = "1.9.2"
description = "Solana Entry"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -16,17 +16,17 @@ log = "0.4.11"
rand = "0.7.0"
rayon = "1.5.1"
serde = "1.0.130"
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-merkle-tree = { path = "../merkle-tree", version = "=1.9.0" }
solana-metrics = { path = "../metrics", version = "=1.9.0" }
solana-perf = { path = "../perf", version = "=1.9.0" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.2" }
solana-merkle-tree = { path = "../merkle-tree", version = "=1.9.2" }
solana-metrics = { path = "../metrics", version = "=1.9.2" }
solana-perf = { path = "../perf", version = "=1.9.2" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.2" }
solana-sdk = { path = "../sdk", version = "=1.9.2" }
bincode = "1.3.3"
[dev-dependencies]
matches = "0.1.9"
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.2" }
[lib]
crate-type = ["lib"]

View File

@@ -15,7 +15,7 @@ use {
solana_metrics::*,
solana_perf::{
cuda_runtime::PinnedVec,
packet::{Packet, Packets, PacketsRecycler, PACKETS_PER_BATCH},
packet::{Packet, PacketBatch, PacketBatchRecycler, PACKETS_PER_BATCH},
perf_libs,
recycler::Recycler,
sigverify,
@@ -308,7 +308,7 @@ impl<'a> EntrySigVerificationState {
pub struct VerifyRecyclers {
hash_recycler: Recycler<PinnedVec<Hash>>,
tick_count_recycler: Recycler<PinnedVec<u64>>,
packet_recycler: PacketsRecycler,
packet_recycler: PacketBatchRecycler,
out_recycler: Recycler<PinnedVec<u8>>,
tx_offset_recycler: Recycler<sigverify::TxOffset>,
}
@@ -499,12 +499,12 @@ pub fn start_verify_transactions(
})
.flatten()
.collect::<Vec<_>>();
let mut packets_vec = entry_txs
let mut packet_batches = entry_txs
.par_iter()
.chunks(PACKETS_PER_BATCH)
.map(|slice| {
let vec_size = slice.len();
let mut packets = Packets::new_with_recycler(
let mut packet_batch = PacketBatch::new_with_recycler(
verify_recyclers.packet_recycler.clone(),
vec_size,
"entry-sig-verify",
@@ -515,13 +515,13 @@ pub fn start_verify_transactions(
// uninitialized anyway, so the initilization would simply write junk into
// the vector anyway.
unsafe {
packets.packets.set_len(vec_size);
packet_batch.packets.set_len(vec_size);
}
let entry_tx_iter = slice
.into_par_iter()
.map(|tx| tx.to_versioned_transaction());
let res = packets
let res = packet_batch
.packets
.par_iter_mut()
.zip(entry_tx_iter)
@@ -530,7 +530,7 @@ pub fn start_verify_transactions(
Packet::populate_packet(pair.0, None, &pair.1).is_ok()
});
if res {
Ok(packets)
Ok(packet_batch)
} else {
Err(TransactionError::SanitizeFailure)
}
@@ -542,14 +542,14 @@ pub fn start_verify_transactions(
let gpu_verify_thread = thread::spawn(move || {
let mut verify_time = Measure::start("sigverify");
sigverify::ed25519_verify(
&mut packets_vec,
&mut packet_batches,
&tx_offset_recycler,
&out_recycler,
false,
);
let verified = packets_vec
let verified = packet_batches
.iter()
.all(|packets| packets.packets.iter().all(|p| !p.meta.discard));
.all(|batch| batch.packets.iter().all(|p| !p.meta.discard));
verify_time.stop();
(verified, verify_time.as_us())
});

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-faucet"
version = "1.9.0"
version = "1.9.2"
description = "Solana Faucet"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -16,12 +16,12 @@ clap = "2.33"
log = "0.4.14"
serde = "1.0.130"
serde_derive = "1.0.103"
solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" }
solana-cli-config = { path = "../cli-config", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-metrics = { path = "../metrics", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.2" }
solana-cli-config = { path = "../cli-config", version = "=1.9.2" }
solana-logger = { path = "../logger", version = "=1.9.2" }
solana-metrics = { path = "../metrics", version = "=1.9.2" }
solana-sdk = { path = "../sdk", version = "=1.9.2" }
solana-version = { path = "../version", version = "=1.9.2" }
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
thiserror = "1.0"
tokio = { version = "1", features = ["full"] }

Some files were not shown because too many files have changed in this diff Show More