Compare commits

...

170 Commits

Author SHA1 Message Date
mergify[bot]
8ce65878da improve multi executor cache addition (#22382)
Co-authored-by: Jack May <jack@solana.com>
2022-01-08 13:03:46 +00:00
Trent Nelson
a4ca18a54d add excutor creation trace timings 2022-01-08 05:25:37 -07:00
mergify[bot]
7cb147fdcd Executor cache count primer (backport #22333) (#22375)
* bank: prime new executor cache entry use-counts

(cherry picked from commit 4ce48307bb)

* --amend

(cherry picked from commit ad3cb0bc93)

Co-authored-by: Trent Nelson <trent@solana.com>
2022-01-08 11:01:34 +00:00
mergify[bot]
2d693be9fa remove per program timings from blockstore processor ledger replay (#22370) (#22372)
(cherry picked from commit 813006b33b)

Co-authored-by: carllin <carl@solana.com>
2022-01-08 08:43:48 +00:00
mergify[bot]
50e716fc80 bank: Add executors cache metrics (#22368)
(cherry picked from commit 6d76db1de5)

Co-authored-by: Trent Nelson <trent@solana.com>
2022-01-08 01:34:53 +00:00
Justin Starry
1f00926874 Add runtime support for address table lookups (backport #22223) (#22354) 2022-01-08 07:57:04 +08:00
mergify[bot]
662c6be51e removes CowCachedExecutors (#22343) (#22363)
Copy-on-write semantics for cached executors can be implemented by a
simple Arc<CachedExecutors> as opposed to CowCachedExecutors:
https://github.com/solana-labs/solana/blob/f1e2598ba/runtime/src/bank.rs#L244-L247

This will also avoid the need for double locking as in:
https://github.com/solana-labs/solana/blob/f1e2598ba/runtime/src/bank.rs#L3490-L3491
https://github.com/solana-labs/solana/blob/f1e2598ba/runtime/src/bank.rs#L3525-L3526

(cherry picked from commit c2389fc209)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2022-01-07 16:04:13 +00:00
mergify[bot]
9761f5b67f Add aarch64-apple-darwin publish tarball step (#22356)
(cherry picked from commit e2aa932e97)

Co-authored-by: Michael Vines <mvines@gmail.com>
2022-01-07 10:17:11 +00:00
mergify[bot]
7b1da62763 Add execute metrics (backport #22296) (#22335)
* move `ExecuteTimings` from `runtime::bank` to `program_runtime::timings`

(cherry picked from commit 7d32909e17)

# Conflicts:
#	core/Cargo.toml
#	ledger/Cargo.toml
#	programs/bpf/Cargo.lock

* Add execute metrics

(cherry picked from commit b25e4a200b)

* Add metrics for executor creation

(cherry picked from commit 848b6dfbdd)

* Add helper macro for `AddAssign`ing with saturating arithmetic

(cherry picked from commit deb9344e49)

* Use saturating_add_assign macro

(cherry picked from commit 72fc6096a0)

* Consolidate process instruction execution timings to own struct

(cherry picked from commit 390ef0fbcd)

Co-authored-by: Trent Nelson <trent@solana.com>
Co-authored-by: Carl Lin <carl@solana.com>
2022-01-07 09:11:18 +00:00
mergify[bot]
2f97fee71a Cleanup ledger-tool analyze-storage command (#22310) (#22352)
* Make ledger-tool analyze-storage use Blockstore::open()

Opening a large ledger may require setting a larger open file descriptor
limit. Blockstore::open() does this whereas the underlying Database
object that analyze-storage was opening does not.

* Move key_size call lookup to take advantage of traits

* Fix typo where analyze worked on wrong column

* Make analyze-storage analyze all columns

(cherry picked from commit 9f1f64e384)

Co-authored-by: steviez <steven@solana.com>
2022-01-07 07:47:27 +00:00
Justin Starry
3ae674dd28 Increase timeout of local-cluster-slow CI step 2022-01-07 15:31:10 +08:00
mergify[bot]
8214bc9db4 Retain executor cache counts (#22322) (#22341)
(cherry picked from commit f1e2598baa)

Co-authored-by: Jack May <jack@solana.com>
2022-01-06 19:00:29 +00:00
mergify[bot]
1132def37c Split up local cluster tests into separate CI steps (backport #22295) (#22303)
* Split up local cluster tests into separate CI steps (#22295)

* Split up local cluster tests into separate CI steps

* Update buildkite-pipeline.sh

(cherry picked from commit 0e1afcbb26)

# Conflicts:
#	local-cluster/tests/local_cluster.rs

* resolve conflicts

Co-authored-by: Justin Starry <justin@solana.com>
2022-01-06 17:02:45 +00:00
mergify[bot]
7267ebaaf2 Consume from AccountsDataMeter (backport #21994) (#22323)
* Consume from AccountsDataMeter (#21994)

(cherry picked from commit 1460f00e0f)

# Conflicts:
#	program-runtime/src/invoke_context.rs

* fixup! conflicts

* fix tests for v1.9

* fixup! clippy

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-01-06 17:01:02 +00:00
mergify[bot]
4be6e52a4f cache executors on failed transactions (backport #22308) (#22328)
* cache executors on failed transactions (#22308)

(cherry picked from commit 12e160269e)

# Conflicts:
#	program-runtime/src/invoke_context.rs
#	runtime/src/bank.rs

* resolve conflicts

Co-authored-by: Jack May <jack@solana.com>
2022-01-06 09:14:48 +00:00
mergify[bot]
e7348243b4 [ledger-tool]compare_blocks (#22229) (#22330)
* 1.made load_credentials accept credential path as a parameter. 2.partial implement bigtable comparasion function

* finding missing blocks in bigtables in a specified range

* refactor compare-blocks,add unit test for missing_blocks and fmt

* compare-block fix last block bug

* refactor compare-block and improve wording

* Update ledger-tool/src/bigtable.rs

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

* update compare-block command-line description

* style:improve wording/naming/code style

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
(cherry picked from commit d9220652ad)

Co-authored-by: pieceofr <komimi.p@gmail.com>
2022-01-06 08:55:26 +00:00
mergify[bot]
fc0c74d722 Only sum accounts data len from non-zero lamport accounts (#22309) (#22317)
(cherry picked from commit ab13e39518)

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-01-06 02:43:54 +00:00
mergify[bot]
687cd4779e Add AccountsDataMeter to InvokeContext (#21813) (#22299)
(cherry picked from commit 800472ddf5)

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-01-06 01:31:11 +00:00
mergify[bot]
b28d7050ab Update default --dynamic-port-range values to include some room for additional ports that may be added in the future (#22321)
(cherry picked from commit 37ebd9bd9e)

Co-authored-by: Michael Vines <mvines@gmail.com>
2022-01-06 01:29:06 +00:00
Michael Vines
6d72acfd6d --dynamic-port-range now requires at least 12 ports 2022-01-05 16:12:28 -08:00
Brooks Prumo
840ec0686e Fix broken build from bpf/tests/programs.rs (#22312)
These tests were broken due to PR #22289
2022-01-05 15:06:15 -06:00
Will Hickey
ba0188a36d Bump version to 1.9.4 (#22304) 2022-01-05 12:02:36 -06:00
mergify[bot]
05b9a2f203 fix(rpc): recreate dead and uncleaned subscriptions (#22281) (#22294)
(cherry picked from commit c1995c647b)

Co-authored-by: Nikita <bananaelecitrus@gmail.com>
2022-01-05 17:16:12 +00:00
mergify[bot]
8578429c4d Refactor: Improve type safety and readability of transaction execution (backport #22215) (#22289)
* Refactor: Improve type safety and readability of transaction execution (#22215)

* resolve conflicts

Co-authored-by: Justin Starry <justin@solana.com>
2022-01-05 23:01:15 +08:00
mergify[bot]
87f4a1f4b6 Bank gets accounts data len delta from MessageProcessor::process_message() (#22288)
(cherry picked from commit 635337d2ff)

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-01-05 05:50:03 +00:00
mergify[bot]
17411f9b4c Add accounts_data_len to Bank (#21781) (#22285)
(cherry picked from commit eeb97fe7ce)

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-01-05 02:24:52 +00:00
mergify[bot]
fb0e5adc7e patches bug in recv_mmsg when npkts != nrecv (backport #22276) (#22280)
* removes total-size from return value of recv_mmsg

(cherry picked from commit 4b24499916)

* patches bug in recv_mmsg when npkts != nrecv

If recv_mmsg receives 2 packets where the first one is filtered out,
then it returns npkts == 1:
https://github.com/solana-labs/solana/blob/01a096adc/streamer/src/recvmmsg.rs#L104-L115

But then streamer::packet::recv_from will erroneously keep the 1st
packet and drop the 2nd one:
https://github.com/solana-labs/solana/blob/01a096adc/streamer/src/packet.rs#L34-L49

To avoid this bug, this commit updates recv_mmsg to always return total
number of received packets. If socket address cannot be correctly
obtained, it is left as the default value which is UNSPECIFIED:
https://github.com/solana-labs/solana/blob/01a096adc/sdk/src/packet.rs#L145

(cherry picked from commit 379feecae5)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2022-01-04 23:42:52 +00:00
mergify[bot]
f4ded6fb6b Updates to the address lookup table proposal (#22269) (#22270)
Co-authored-by: Justin Starry <justin@solana.com>
2022-01-04 23:38:51 +00:00
mergify[bot]
f89bf7b939 Compute accounts data len during generate_index() (#21757) (#22277)
(cherry picked from commit ec7e17787e)

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-01-04 22:44:51 +00:00
mergify[bot]
c99aed4abf Update address map proposal to improve dev experience (#21576) (#22283)
* Update address map proposal to improve dev experience

* another revision to match implementation

(cherry picked from commit 0224a8b127)

Co-authored-by: Justin Starry <justin@solana.com>
2022-01-04 22:11:59 +00:00
mergify[bot]
edfd8c1717 Fix program log filtering (#22133) (#22151)
(cherry picked from commit c7b0917e1a)

Co-authored-by: Jack May <jack@solana.com>
2022-01-04 21:56:49 +00:00
mergify[bot]
09dbf069e8 Add test to enforce that program id account info for CPI is optional (#22069) (#22103)
* Update tests to demonstrate that program id account info for CPI is optional

* Clean up comments that say that program id account info is required

(cherry picked from commit ec7536faf6)

Co-authored-by: Justin Starry <justin@solana.com>
2022-01-04 21:47:48 +00:00
mergify[bot]
9764d4349b Add return types to generate_index() (#21735) (#22275)
(cherry picked from commit 1528f85112)

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-01-04 18:27:40 +00:00
mergify[bot]
d84b994451 shrinks size of Packet.Meta (backport #22224) (#22273)
* removes seed and slot fields from Packet.Meta

507367e6ac
updated window-service to send shreds (as opposed to packets) to
retransmit-stage and so seed and slot fields in Packet.Meta are unused:
https://github.com/solana-labs/solana/blob/d6ec103be/sdk/src/packet.rs#L27-L28

(cherry picked from commit aa9f7ed7e8)

* uses std::net::IpAddr type for Packet.Meta.addr

(cherry picked from commit 73a7741c49)

# Conflicts:
#	streamer/src/streamer.rs

* adds bitflags to Packet.Meta

Instead of a separate bool type for each flag, all the flags can be
encoded in a type-safe bitflags encoded in a single u8:
https://github.com/solana-labs/solana/blob/d6ec103be/sdk/src/packet.rs#L19-L31

(cherry picked from commit 01a096adc8)

# Conflicts:
#	sdk/Cargo.toml

* removes backport merge conflicts

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2022-01-04 17:38:29 +00:00
mergify[bot]
185f52b712 Lower vote-only-mode to 400 (#22210) (#22272)
(cherry picked from commit 2486e21ffe)

Co-authored-by: sakridge <sakridge@gmail.com>
2022-01-04 15:15:16 +00:00
mergify[bot]
3b59f67562 Limit number of accounts that a transaction can lock (backport #22201) (#22263)
* Limit number of accounts that a transaction can lock (#22201)

(cherry picked from commit 2b5e00d36d)

# Conflicts:
#	accountsdb-plugin-postgres/src/postgres_client/postgres_client_transaction.rs
#	runtime/src/accounts.rs
#	runtime/src/bank.rs
#	sdk/src/feature_set.rs
#	sdk/src/transaction/error.rs
#	storage-proto/proto/transaction_by_addr.proto
#	storage-proto/src/convert.rs

* resolve conflicts

Co-authored-by: Justin Starry <justin@solana.com>
2022-01-04 11:34:34 +00:00
mergify[bot]
7d2589e2ac Documentation typos (#22262) (#22268)
* Fix typo markdown link

* Add missing punctuation full stop

(cherry picked from commit 9665da9d0b)

Co-authored-by: glihm <dev@glihm.net>
2022-01-04 11:15:06 +00:00
mergify[bot]
77558c315d Fixed issue #22124 -- missing historical data if slot updated later. (#22193) (#22259)
* Fixed issue #22124 -- missing historical data if slot updated later.

* Fixed a couple of comments

(cherry picked from commit 5b6027bef0)

Co-authored-by: Lijun Wang <83639177+lijunwangs@users.noreply.github.com>
2022-01-04 07:18:58 +00:00
mergify[bot]
464d533da3 Flip iter operations to keep associated address/header/packets together (#22245) (#22257)
Flip iter operations to keep associated address/header/packets together

Before this change, if cast_socket_addr() returned a None for any
address/header pair, the subsequent zip() would misalign the
address/header pair and packet. So, this change zips all three together,
then does filter_map() so keep things aligned.

Additionally, compute total_size inline to avoid running through packets
a second time.

(cherry picked from commit 20b61e28b6)

Co-authored-by: steviez <steven@solana.com>
2022-01-04 07:06:06 +00:00
mergify[bot]
f8bf478fde Fix bug, add error specific timings (#22225) (#22252)
(cherry picked from commit 005592998d)

Co-authored-by: carllin <carl@solana.com>
2022-01-04 02:53:59 +00:00
mergify[bot]
35fb47d1ce removes epoch_authorized_voters from VoteTracker (backport #22192) (#22248)
* removes epoch_authorized_voters from VoteTracker (#22192)

https://github.com/solana-labs/solana/pull/22169
verifies authorized-voter early on in vote-listener pipeline; and so
VoteTracker no longer needs to maintain and check for epoch authorized
voters.

(cherry picked from commit 69d71f8f86)

# Conflicts:
#	core/src/cluster_info_vote_listener.rs

* removes backport merge conflicts

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2022-01-04 01:51:24 +00:00
Michael Vines
5bd27dd175 Correctly set CI_COMMIT when Buildkite provides HEAD instead of a real commit 2022-01-03 17:39:49 -08:00
Michael Vines
794f28d9ab Switch from arm64-apple-darwin to aarch64-apple-darwin to align with Rust's target names 2022-01-03 17:16:49 -08:00
Michael Vines
d7a673f7f5 Add support for arm64-apple-darwin release/channel artifacts 2022-01-03 17:16:34 -08:00
Michael Vines
b3fa1288aa Use experimential docker virtualization framework for arm64
(cherry picked from commit ed0b47c6f8)
2022-01-03 16:54:37 -08:00
mergify[bot]
3e4e2e9113 Prevent lookup tables from being closed during deactivation slot (#22221) (#22247)
(cherry picked from commit bbe5b66324)

Co-authored-by: Justin Starry <justin@solana.com>
2022-01-03 23:32:03 +00:00
Michael Vines
fd4754e5a9 Correctly set CI_OS_NAME for macOs buildkite agents 2022-01-03 12:54:57 -08:00
mergify[bot]
0a9460ed8b re-calibrate limit based on mainnet data (backport #21995) (#22212)
* re-calibrate limit based on mainnet data, see issue #21917

(cherry picked from commit d743c2917c)

# Conflicts:
#	runtime/src/block_cost_limits.rs

* set secp256k1 cost similar to sigverify

(cherry picked from commit a2a7e91ad6)

* removes backport merge conflicts

Co-authored-by: Tao Zhu <tao@solana.com>
2022-01-03 19:22:10 +00:00
mergify[bot]
478c641cb5 Fix token-balance owner type in docs (#22240) (#22242)
(cherry picked from commit 9029b46570)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2022-01-03 18:28:54 +00:00
mergify[bot]
735f000952 Remove Xargo.toml reference (#22239)
(cherry picked from commit 56fd32bda2)

Co-authored-by: Michael Vines <mvines@gmail.com>
2022-01-03 17:39:19 +00:00
Alexander Meißner
264bb903a3 Bump rbpf to v0.2.21 (#22216) (#22217)
(cherry picked from commit 9139be89b7)
2022-01-01 20:42:53 +00:00
mergify[bot]
7c5d3e5874 Exit early on BigTable error (#22200) (#22209)
(cherry picked from commit 0b1b36f088)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2022-01-01 00:45:18 +00:00
mergify[bot]
70d5b6aeaf Bump solana_rbpf to version v0.2.20 (#22164) (#22207)
(cherry picked from commit 8a43e2d889)

Co-authored-by: Alexander Meißner <AlexanderMeissner@gmx.net>
2021-12-31 21:57:20 +00:00
mergify[bot]
ca451ea23e checks for authorized voter early on in the vote-listener pipeline (backport #22169) (#22206)
* checks for authorized voter early on in the vote-listener pipeline (#22169)

Before votes are verified that they are signed by the authorized voter,
they might be dropped in verified-vote-packets code. If there are
enough many spam votes from unauthorized voters, this may potentially
drop valid votes but keep the false ones.
https://github.com/solana-labs/solana/blob/57986f982/core/src/verified_vote_packets.rs#L165-L168

(cherry picked from commit c0c6038654)

# Conflicts:
#	core/src/cluster_info_vote_listener.rs

* removes backport merge conflicts

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-31 21:46:12 +00:00
mergify[bot]
113d261a2c Count compute units even when transaction errors (backport #22182) (#22199)
* Count compute units even when transaction errors (#22182)

(cherry picked from commit d06e6c7425)

# Conflicts:
#	program-runtime/src/invoke_context.rs
#	runtime/src/cost_model.rs
#	runtime/src/message_processor.rs

* Resolve conflicts

Co-authored-by: carllin <carl@solana.com>
2021-12-31 21:14:00 +00:00
mergify[bot]
c6ab915668 chore: update transaction error links in docs (#22189) (#22197)
(cherry picked from commit 4e4577afbe)

Co-authored-by: Jacob Creech <82475023+jacobcreech@users.noreply.github.com>
2021-12-30 22:32:10 +00:00
Pierre
d5c0ffc11f Update install/src/command.rs
Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>
(cherry picked from commit 29edb130cc)
2021-12-30 07:30:17 -08:00
Arrowana
6a2b62de62 Add connect timeout and change overall timeout to None
(cherry picked from commit 3c1416091e)
2021-12-30 07:30:17 -08:00
Arrowana
4645be3e52 fix: Installer increase download req timeout from 30 seconds to 6 minutes
(cherry picked from commit a1912f8400)
2021-12-30 07:30:17 -08:00
carllin
7efd0391e9 Revert "Count compute units even when transaction errors (backport #22059) (#22154)" (#22175)
This reverts commit 401c542d2a.
2021-12-30 02:39:25 -05:00
mergify[bot]
6a556c5adb Stream additional block metadata via plugin (#22023) (#22179)
* Stream additional block metadata through plugin
blockhash, block_height, block_time, rewards are streamed

(cherry picked from commit f14928a970)

Co-authored-by: Lijun Wang <83639177+lijunwangs@users.noreply.github.com>
2021-12-30 05:44:12 +00:00
mergify[bot]
0cd45400ca Add docs for notifying transactions via plugin (#22097) (#22178)
* Added documentations for streaming transactions via plugin

* Updated comments for transaction info

* Updated doc on transaction format

* Removed a white space

* Apply suggestions from code review from Tyera

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
(cherry picked from commit 135af08b8b)

Co-authored-by: Lijun Wang <83639177+lijunwangs@users.noreply.github.com>
2021-12-30 05:10:32 +00:00
mergify[bot]
531f36c571 Don't forward packets received from TPU forwards port (#22078) (#22171)
* Don't forward packets received from TPU forwards port

* Add banking stage test

(cherry picked from commit b1d9a2e60e)

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-30 10:42:28 +08:00
mergify[bot]
9c9d3e8b6b discards serialized gossip crds votes if cannot parse tx (backport #22129) (#22172)
* discards serialized gossip crds votes if cannot parse tx (#22129)

(cherry picked from commit c9c78622a8)

# Conflicts:
#	gossip/src/crds_value.rs

* removes backport merge conflicts

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-29 22:38:12 +00:00
mergify[bot]
74b98c2dd4 get_signatures_for_address does not correctly account for result sets that span local and Bigtable sources (#22115) (#22168)
* get_signatures_for_address does not correctly account for result sets that span Blockstore and Bigtable.

This causes Bigtable to return `RowNotFound` until the new tx is uploaded.

Check that `before` exists in Bigtable, and if not, set it to `None` to return the full data set.

References #21442
Closes #22110

* Differentiate between before sig not found and no newer signatures

* Dedupe bigtable results to account for potential upload race

Co-authored-by: Tyera Eulberg <tyera@solana.com>
(cherry picked from commit bac6821e19)

Co-authored-by: Omar Kilani <omar.kilani@gmail.com>
2021-12-29 19:52:36 +00:00
mergify[bot]
9fb67f9b07 Prevent log spam (#22148) (#22152)
(cherry picked from commit f061059e45)

Co-authored-by: carllin <carl@solana.com>
2021-12-29 08:28:48 +00:00
mergify[bot]
401c542d2a Count compute units even when transaction errors (backport #22059) (#22154)
* Count compute units even when transaction errors (#22059)

(cherry picked from commit eaa8c67bde)

# Conflicts:
#	program-runtime/src/invoke_context.rs
#	runtime/src/bank.rs
#	runtime/src/message_processor.rs

* Fix merge conflicts

Co-authored-by: carllin <carl@solana.com>
2021-12-29 08:04:12 +00:00
mergify[bot]
14ed446923 cargo-build-bpf: Add Windows support (#20276) (#22155)
* cargo-build-bpf: Add Windows support

* Update error message

(cherry picked from commit 57986f982a)

Co-authored-by: Jon Cinque <jon.cinque@gmail.com>
2021-12-29 03:07:39 +00:00
mergify[bot]
adc584ee22 Add (preflight) simulation to BanksClient (#22084) (#22149)
* Add more-legitimate conversion from legacy Transaction to SanitizedTransaction

* Add Banks method with preflight checks

* Expose BanksClient method with preflight checks

* Unwrap simulation err

* Add Bank simulation method that works on unfrozen Banks

* Add simpler api

* Better name: BanksTransactionResultWithSimulation

(cherry picked from commit 422a095647)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-12-28 22:04:27 +00:00
mergify[bot]
810ca36eae skip reporting all-zero stats (#22054)
(cherry picked from commit 9c5d82557a)

Co-authored-by: Tao Zhu <tao@solana.com>
2021-12-28 07:03:13 +00:00
mergify[bot]
16f821ea8c Ensure AncestorHashesSerice selects an open port (#21919) (#21997)
(cherry picked from commit 7f6fb6937a)

Co-authored-by: carllin <wumu727@gmail.com>
2021-12-28 06:46:59 +00:00
mergify[bot]
584e9bfbe7 docs: fix typo (#22116) (#22118)
(cherry picked from commit f643a8b425)

Co-authored-by: Samuel Oloruntoba <git@kayandra.co>
2021-12-26 04:36:49 +00:00
mergify[bot]
3ad4c3306c Add PubsubClient::vote_subscribe (#22114)
(cherry picked from commit 0a0fc85282)

Co-authored-by: Kirill Fomichev <fanatid@ya.ru>
2021-12-25 23:19:46 +00:00
mergify[bot]
be0bcd85ed tracks erasure coding shreds' indices explicitly (#21822) (#22094)
The indices for erasure coding shreds are tied to data shreds:
https://github.com/solana-labs/solana/blob/90f41fd9b/ledger/src/shred.rs#L921

However with the upcoming changes to erasure schema, there will be more
erasure coding shreds than data shreds and we can no longer infer coding
shreds indices from data shreds.

The commit adds constructs to track coding shreds indices explicitly.

(cherry picked from commit 65d59f4ef0)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-23 19:38:50 +00:00
Ryan Laursen
8708186760 Update checks.rs
(cherry picked from commit d06c04d02c)
2021-12-23 07:00:26 -08:00
Ryan Laursen
8f3e37c174 Remove msg spam from deploying
(cherry picked from commit 52c1eb0160)
2021-12-23 07:00:26 -08:00
mergify[bot]
7d61935bf1 Bump bpf-tools to v1.21 (#22083)
(cherry picked from commit 7cc6262b5a)

Co-authored-by: Dmitri Makarov <dmakarov@alumni.stanford.edu>
2021-12-23 03:38:04 +00:00
mergify[bot]
a70eb098f4 Fix transaction pk violation (#22057) (#22076)
* Handle PK violation issue for transaction notification. The transaction might be replayed due to
validator restart.

(cherry picked from commit d6de4a2f4e)

Co-authored-by: Lijun Wang <83639177+lijunwangs@users.noreply.github.com>
2021-12-23 00:33:35 +00:00
mergify[bot]
f31593bfbe Update jsonrpc-api.md to document 'owner' property (#22074) (#22077)
* Update jsonrpc-api.md to document 'owner' property

Documents 'owner' property on the token balances struct.

* Update docs/src/developing/clients/jsonrpc-api.md

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
(cherry picked from commit 67c8034fe5)

Co-authored-by: splintred <91386295+splintred@users.noreply.github.com>
2021-12-23 00:13:30 +00:00
mergify[bot]
8f26c71964 Fixed a typo in the SQL statement (#21872) (#22075)
* Fixed a typo in the SQL statement

* Fixed additional two errors in the postgres database objects

(cherry picked from commit b610e5503e)

Co-authored-by: Lijun Wang <83639177+lijunwangs@users.noreply.github.com>
2021-12-22 22:55:58 +00:00
mergify[bot]
9fbaaa5102 Increment execution timings on errors as well (#22053) (#22072)
(cherry picked from commit 37f6777ceb)

Co-authored-by: carllin <carl@solana.com>
2021-12-22 22:50:19 +00:00
Will Hickey
78e7913352 Bump version to 1.9.3 (#22065) 2021-12-22 11:41:03 -06:00
Tyera Eulberg
f58b87befe v1.9: bump tarpc from 0.26.2 to 0.27.2 and add BanksClientError (#22055)
* chore: bump tarpc from 0.26.2 to 0.27.2

Bumps [tarpc](https://github.com/google/tarpc) from 0.26.2 to 0.27.2.
- [Release notes](https://github.com/google/tarpc/releases)
- [Changelog](https://github.com/google/tarpc/blob/master/RELEASES.md)
- [Commits](https://github.com/google/tarpc/commits)

---
updated-dependencies:
- dependency-name: tarpc
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

* Accommodate breaking changes

* Reword incorrect error message

* Add error module

* Revert client Error type to io::Error; easy transition to BanksClientError

* Bump tracing crates in programs

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
2021-12-22 03:41:16 +00:00
mergify[bot]
1a2823b875 chore: bump lru from 0.7.0 to 0.7.1 (#22018) (#22056)
Bumps [lru](https://github.com/jeromefroe/lru-rs) from 0.7.0 to 0.7.1.
- [Release notes](https://github.com/jeromefroe/lru-rs/releases)
- [Changelog](https://github.com/jeromefroe/lru-rs/blob/master/CHANGELOG.md)
- [Commits](https://github.com/jeromefroe/lru-rs/compare/0.7.0...0.7.1)

---
updated-dependencies:
- dependency-name: lru
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
(cherry picked from commit 69d0b08dd8)

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2021-12-21 16:27:43 -07:00
mergify[bot]
75fe0d3ecf Fix #21986 (#22035) (#22049)
* Partial revert "Updates documentation around what needs to be passed in CPI. (#21633)"

* Enforces the program_id being passed explicitly by removing it from get_instruction_keyed_accounts().

* instruction_accounts => instructions_account

(cherry picked from commit ba8e15848e)

Co-authored-by: Alexander Meißner <AlexanderMeissner@gmx.net>
2021-12-21 17:54:18 +00:00
mergify[bot]
c296a6c9ed The sidebar for the plugin doc is showing the item as "Overview", corrected the styles (#22033) (#22040)
(cherry picked from commit 2347f65133)

Co-authored-by: Lijun Wang <83639177+lijunwangs@users.noreply.github.com>
2021-12-21 02:58:53 +00:00
mergify[bot]
57e5406476 Add deactivation cooldown before address lookup tables can be closed (#22011) (#22036)
(cherry picked from commit f5d1115468)

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-21 02:10:14 +00:00
mergify[bot]
4f57c4a4fe Fix weird formatting of bullets (#22013) (#22030)
(cherry picked from commit 116517fb6d)

Co-authored-by: Kardashev <96332127+0xkardashev@users.noreply.github.com>
2021-12-20 20:41:18 +00:00
mergify[bot]
c4b3b2865d Update program close docs (#22026) (#22027)
(cherry picked from commit b8eff3456c)

Co-authored-by: Jack May <jack@solana.com>
2021-12-20 18:55:39 +00:00
mergify[bot]
f58c375b1f typo: lanaguage -> language (#22009) (#22015)
(cherry picked from commit e92a81b741)

Co-authored-by: Peter Johnson <peter@geocode.earth>
2021-12-20 07:34:31 +00:00
mergify[bot]
bf41c53f11 chore: add blockSubscribe api docs (#22002) (#22008)
Co-authored-by: Zano <segfaultdoctor@protonmail.com>
(cherry picked from commit df6a4930b9)

Co-authored-by: segfaultdoctor <seg@jito.network>
2021-12-19 16:48:36 +00:00
mergify[bot]
e3a4b98432 removes Select in favor of recv_timeout/try_iter (#21981) (#22001)
crossbeam_channel::Select::ready_timeout might return with success spuriously.

(cherry picked from commit 7476dfeec0)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-18 19:37:07 +00:00
mergify[bot]
91657ba8fe new net-stats require a new table (#21996) (#22000)
(cherry picked from commit 3fe942ab30)

Co-authored-by: Jeff Biseda <jbiseda@gmail.com>
2021-12-18 10:26:16 +00:00
mergify[bot]
35ee48bec9 RPC Block Subscription (backport #21787) (#21992)
* RPC Block Subscription (#21787)

* add stuff

* compiling

* add notify block

* wip

* feat: add blockSubscribe pubsub method

* address PR comments

Co-authored-by: Lucas B <buffalu@jito.network>
Co-authored-by: Zano <segfaultdoctor@protonmail.com>
(cherry picked from commit 76098dd42a)

# Conflicts:
#	Cargo.lock
#	client-test/Cargo.toml
#	rpc/src/rpc_subscriptions.rs

* Fix conflicts

Co-authored-by: segfaultdoctor <seg@jito.network>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2021-12-18 01:43:37 +00:00
mergify[bot]
02cfa85214 Update to reed-solomon-erasure 5.0.1, to get simd-accel on M1 macs (#21990)
(cherry picked from commit 5f054cd51b)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-12-18 00:52:21 +00:00
mergify[bot]
02be3a6568 Check file size of snapshot_version when unarchiving snapshot (#21925) (#21983)
(cherry picked from commit 0f6e8d3385)

Co-authored-by: mooori <moritz.zielke@gmail.com>
2021-12-17 21:02:53 +00:00
mergify[bot]
b20fae5a09 simplifies ShredIndex api (#21932) (#21959)
(cherry picked from commit efd64a3862)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-17 19:50:49 +00:00
mergify[bot]
e572678176 removes next_shred_index from return value of entries to shreds api (#21961) (#21980)
next-shred-index is already readily available from returned data shreds.
The commit simplifies the api for upcoming changes to erasure coding
schema which will require explicit tracking of indices for coding shreds
as well as data shreds.

(cherry picked from commit 89d66c3210)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-17 17:57:57 +00:00
mergify[bot]
f4521002b9 Clean up demote program write lock feature (backport #21949) (#21969)
* Clean up demote program write lock feature (#21949)

* Clean up demote program write lock feature

* fix test

(cherry picked from commit 6ff0be6a82)

# Conflicts:
#	programs/bpf_loader/src/syscalls.rs
#	runtime/src/accounts.rs

* resolve conflicts

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-17 04:45:22 +00:00
mergify[bot]
0c5a2bcd5a Update getSignaturesForAddress and getConfirmedSignaturesForAddress2 RPC call description (#21955) (#21960)
* Update jsonrpc-api.md

* Update docs/src/developing/clients/jsonrpc-api.md

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

* Wrap 80chars

* Update docs/src/developing/clients/jsonrpc-api.md

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
(cherry picked from commit 3398f5a2f5)

Co-authored-by: jdcaballerov <743513+jdcaballerov@users.noreply.github.com>
2021-12-16 20:59:51 +00:00
mergify[bot]
c25d16bf0d adds ErasureSetId identifying erasure coding sets of shreds (backport #21928) (#21946)
* adds ErasureSetId identifying erasure coding sets of shreds (#21928)

(cherry picked from commit 8183f28636)

# Conflicts:
#	ledger/src/blockstore.rs

* removes backport merge conflicts

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-16 17:49:39 +00:00
mergify[bot]
301e38044a Fixes the calculation of the "compute_meter_consumption" across process_instruction() and process_message(). (#21944) (#21945)
(cherry picked from commit 49cb161203)

Co-authored-by: Alexander Meißner <AlexanderMeissner@gmx.net>
2021-12-16 16:28:28 +00:00
Michael Vines
bfa6302985 Bump version to 1.9.2 2021-12-15 16:18:14 -08:00
Kirill Fomichev
b66e2ae353 add caching_enabled option to test-validator
(cherry picked from commit 5fb7da12f2)
2021-12-15 16:11:51 -08:00
Michael Vines
3967dc8685 rebase 2021-12-15 15:33:45 -08:00
Michael Vines
569c83295d Update argument name
(cherry picked from commit ed924e3bc4)
2021-12-15 15:33:45 -08:00
losman0s
a462c58594 Add option to load accounts from file
This introduces the `--clone-from-file` option for
solana-test-validator. It allows specifying any number of files
(without extension) containing account info and data, which will be
loaded at genesis. This is similar to `--bpf-program` for programs
loading.

The files will be searched for in the CWD or in `tests/fixtures`.

Example: `solana-test-validator --clone-from-file SRM_token USD_token`
(cherry picked from commit 9b06d64eb8)

# Conflicts:
#	test-validator/Cargo.toml
2021-12-15 15:33:45 -08:00
losman0s
7dba8bb49f Add complete account dump to file
This commit introduces the ability to dump the complete content of an
account to a JSON file (compact or not depending on the provided format
option).

Example:

```sh
solana account -u m \
  --output json-compact \
  --output-file SRM_token.json \
  SRMuApVNdxXokk5GT7XD5cUUgXMBCoAz2LHeuAoKWRt
```

Note: Behavior remains untouched if format option `--output` is not
provided (only account data gets written to file).

(cherry picked from commit 0e9e67b65d)
2021-12-15 15:33:45 -08:00
mergify[bot]
c907d4444d add accountsdb-plugin-config to test-validator (#21918)
(cherry picked from commit c2a94a8fb0)

Co-authored-by: Kirill Fomichev <fanatid@ya.ru>
2021-12-15 09:48:12 +00:00
Michael Vines
b4c847557b Restore solana_validator::test_validator export
(cherry picked from commit e124659aca)
2021-12-15 00:29:04 -08:00
mergify[bot]
de48347078 Add json support for feature sets; also print output after feature list (#21905) (#21914)
* Add json support for feature sets; also print output after feature list

* Move stringifying into Display implementation

(cherry picked from commit dcd2854829)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-12-15 07:08:39 +00:00
Michael Vines
9f173d3717 Add helper crate to generate syscalls.txt 2021-12-14 21:34:36 -08:00
Michael Vines
dcd76e484f Update openssl-src package to resolve cargo audit complaint
(cherry picked from commit 7ba27e5cae)
2021-12-14 19:09:26 -08:00
mergify[bot]
2246135654 Document solana_program::instruction (#21817) (#21906)
* Document solana_program::instruction

* Apply suggestions from code review

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
(cherry picked from commit dcb5849484)

Co-authored-by: Brian Anderson <andersrb@gmail.com>
2021-12-15 00:55:56 +00:00
mergify[bot]
41ea597256 Fix subtraction overflow (#21871) (#21901)
(cherry picked from commit cb395abff7)

Co-authored-by: carllin <carl@solana.com>
2021-12-14 23:22:47 +00:00
Michael Vines
fb955bd4ec Update Cargo.toml 2021-12-14 14:18:20 -08:00
Michael Vines
5c3fbb384f Futures 0.3.18 has been yanked, back off to .17
(cherry picked from commit 2a6dcb2ffd)

# Conflicts:
#	ledger/Cargo.toml
2021-12-14 14:18:20 -08:00
mergify[bot]
a056fd88cb uses Option<Slot> for SlotMeta.parent_slot (backport #21808) (#21899)
* uses Option<Slot> for SlotMeta.parent_slot (#21808)

SlotMeta.parent_slot for the head of a detached chain of slots is
unknown and that is indicated by u64::MAX which lacks type-safety:
https://github.com/solana-labs/solana/blob/6c108c8fc/ledger/src/blockstore_meta.rs#L203-L205

The commit changes the type to Option<Slot>. Backward compatibility is
maintained by customizing serde serialize/deserialize implementations.

(cherry picked from commit 8d980f07ba)

# Conflicts:
#	ledger-tool/src/main.rs

* removes backport merge conflicts

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-14 21:42:57 +00:00
mergify[bot]
2f1816d1db adds ShredId uniquely identifying each shred (backport #21820) (#21897)
* adds ShredId uniquely identifying each shred (#21820)

(cherry picked from commit 4ceb2689f5)

# Conflicts:
#	ledger/src/blockstore.rs

* removes backport merge conflicts

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-14 21:03:08 +00:00
mergify[bot]
2cd2f3ba7b Bump rbpf to v0.2.19 (#21880) (#21891)
* Bump rbpf to v0.2.19

Co-authored-by: Alexander Meißner <AlexanderMeissner@gmx.net>
(cherry picked from commit 509bcd2e74)

Co-authored-by: Jack May <jack@solana.com>
2021-12-14 20:30:31 +00:00
Michael Vines
135dfdbf1e Don't publish rbpf-cli to crates.io 2021-12-14 12:12:19 -08:00
Michael Vines
fad4bfdf2a Don't publish poh-bench to crates.io 2021-12-14 12:10:03 -08:00
mergify[bot]
a9d4728c35 Deserialize accounts before acquiring stakes cache lock (#21733) (#21889)
* Deserialize stored accounts before locking stakes cache

* fix test

(cherry picked from commit 2bbe1d875a)

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-14 16:47:01 +00:00
mergify[bot]
3977bcde63 Add missing word "that" (#21878) (#21884)
(cherry picked from commit 746869fdac)

Co-authored-by: Raza <42661870+AlmostEfficient@users.noreply.github.com>
2021-12-14 14:44:48 +00:00
mergify[bot]
cf2a9de19c Add solana-cli-config link to rust-api.md (#21840) (#21874)
(cherry picked from commit 033106ed81)

Co-authored-by: Brian Anderson <andersrb@gmail.com>
2021-12-14 08:22:13 +00:00
mergify[bot]
5e2b12aee5 Restore ALL behavior; add enum variant, comments, and help text to make behavior clearer (#21854) (#21863)
(cherry picked from commit bed1b143a5)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-12-14 05:21:25 +00:00
mergify[bot]
6c329e2fd3 Fixup RPC docs (backport #21858) (#21864)
* Remove old notes referring to EOL versions

(cherry picked from commit eebaf89874)

* Add notes about new v1.9 rpc apis

(cherry picked from commit fd212fd2a4)

Co-authored-by: Tyera Eulberg <tyera@solana.com>
2021-12-14 02:46:44 +00:00
mergify[bot]
0376045c7d cli: Order displayed feature list by status (#21810) (#21830)
(cherry picked from commit 1149c1880d)

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-13 14:39:47 +00:00
Michael Vines
c1f54c22ed Remove the 5 integer msg! form
(cherry picked from commit c5c699a918)
2021-12-11 12:47:43 -08:00
Lijun Wang
0576d133ad Add Accountsdb plugin documentations (#21746) (#21799)
Add the public facing documentation about the plugin framework: explaining the interface, how to load plugin and the example PostgreSQL plugin implementation.
Updated the rust documentation for the plugin interfaces for accounts and slot.
This changes are targeted for v1.8. Information about transactions will be updated later.
2021-12-11 11:04:22 -08:00
mergify[bot]
9956afb2bd uses Option<u64> for SlotMeta.last_index (#21775) (#21806)
SlotMeta.last_index may be unknown and current code is using u64::MAX to
indicate that:
https://github.com/solana-labs/solana/blob/6c108c8fc/ledger/src/blockstore_meta.rs#L169-L174

This lacks type-safety and can introduce bugs if not always checked for
Several instances of slot_meta.last_index + 1 are also subject to
overflow.

This commit updates the type to Option<u64>. Backward compatibility is
maintained by customizing serde serialize/deserialize implementations.

(cherry picked from commit e08139f949)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-11 17:39:05 +00:00
mergify[bot]
01941cf3de Rename Packets to PacketBatch (backport #21794) (#21805)
* Rename Packets to PacketBatch (#21794)

(cherry picked from commit 254ef3e7b6)

# Conflicts:
#	core/src/verified_vote_packets.rs

* resolve conflicts

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-11 16:53:23 +00:00
Tao Zhu
4b63d51e3e Bump version to 1.9.1 (#21802) 2021-12-11 12:50:36 +00:00
mergify[bot]
5bf4445ae6 Add address lookup table program (backport #21616) (#21789)
* Add address lookup table program (#21616)

* Add address lookup table program

* feedback

(cherry picked from commit 9b41ddd9ba)

# Conflicts:
#	runtime/Cargo.toml

* resolve conflicts

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-11 05:26:46 +00:00
Justin Starry
7782d34bbf Add StakesCache struct to abstract away locking (#21738) (#21796) 2021-12-10 22:38:04 -05:00
mergify[bot]
2c4765e75a Bump solana_rbpf to version v0.2.18 (#21774) (#21786)
(cherry picked from commit a5a0dabe7b)

Co-authored-by: Alexander Meißner <AlexanderMeissner@gmx.net>
2021-12-11 02:38:03 +00:00
mergify[bot]
e71ea19e60 adds back ErasureMeta::first_coding_index field (#21623) (#21785)
https://github.com/solana-labs/solana/pull/16646
removed first_coding_index since the field is currently redundant and
always equal to fec_set_index.
However, with upcoming changes to erasure coding schema, this will no
longer be the same as fec_set_index and so requires a separate field to
represent.

(cherry picked from commit 49ba09b333)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-10 23:14:10 +00:00
mergify[bot]
ed0040d555 Update to Rust 1.57.0 (#21779)
(cherry picked from commit 15a9fa6f53)

Co-authored-by: Steven Czabaniuk <steven@solana.com>
2021-12-10 22:23:48 +00:00
mergify[bot]
da9e6826ac Move type alias and use it more broadly (#21763) (#21777)
(cherry picked from commit 350845c513)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-12-10 21:44:41 +00:00
mergify[bot]
68fc72a7f4 Add more reporting for invalid stake cache members and prune them (#21654) (#21741)
* Add more reporting for invalid stake cache members

* feedback

(cherry picked from commit 6fc329180b)

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-10 18:30:16 +00:00
mergify[bot]
2a6bb2b954 Migrate from address maps to address lookup tables (#21634) (#21773)
* Migrate from address maps to address lookup tables

* update sanitize error

* cargo fmt

* update abi

(cherry picked from commit 6c108c8fc3)

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-10 18:10:37 +00:00
mergify[bot]
ef51778c78 Nits in message-processor (#21755) (#21762)
* Fixup typo

* Simplify types slightly

(cherry picked from commit c1386d66e6)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-12-10 11:24:20 -05:00
mergify[bot]
abecf292a3 Expand docs for Pubkey::create_program_address (#21750) (#21759)
* Expand docs for Pubkey::create_program_address

* Update sdk/program/src/pubkey.rs

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
(cherry picked from commit 6919c4863b)

Co-authored-by: Brian Anderson <andersrb@gmail.com>
2021-12-10 11:23:54 -05:00
Michael Vines
a31660815f rebase 2021-12-09 18:41:47 -08:00
Michael Vines
539ad4bea6 Remove libcurl to prevent wasm-pack segfault in libssl
(cherry picked from commit f32216588d)
2021-12-09 18:41:47 -08:00
Michael Vines
85f601993f Cargo.lock
(cherry picked from commit f4babb7566)

# Conflicts:
#	Cargo.lock
#	programs/bpf/Cargo.lock
2021-12-09 18:41:47 -08:00
Michael Vines
b0754cc575 Add initial wasm bindings for Instruction, SystemProgram and Transaction
(cherry picked from commit a35df1cb02)
2021-12-09 18:41:47 -08:00
Michael Vines
effd0b2547 Add wasm bindings for Hash
(cherry picked from commit 03a956e8d9)
2021-12-09 18:41:47 -08:00
Michael Vines
8836069719 Add wasm bindings for Pubkey and Keypair
(cherry picked from commit 488dc37fec)
2021-12-09 18:41:47 -08:00
mergify[bot]
2698a5c705 AcctIdx: env var to enable testing of disk buckets (#21494) (#21723)
(cherry picked from commit 54862eba0d)

Co-authored-by: Jeff Washington (jwash) <wash678@gmail.com>
2021-12-09 23:39:06 +00:00
mergify[bot]
dd157fd47f Fixed minor issues with the cluster overview docs which had confused some (#21744) (#21745)
new users.

(cherry picked from commit 6d18b6bab5)

Co-authored-by: bji <bryan@ischo.com>
2021-12-09 20:41:21 +00:00
mergify[bot]
8cacf82cb8 adds more sanity checks to shreds (#21675) (#21734)
(cherry picked from commit 8063273d09)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-09 18:44:43 +00:00
mergify[bot]
8ee5fbc5c0 simulateTransaction now returns the correct error code if accounts are provided as input (#21716)
(cherry picked from commit 824994db69)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-12-09 01:12:42 +00:00
mergify[bot]
f2a6b94e5c SDK: Add stdlib.h include to pull in abort() (#21700) (#21705)
(cherry picked from commit 923720f529)

Co-authored-by: Jon Cinque <jon.cinque@gmail.com>
2021-12-08 17:31:11 +00:00
mergify[bot]
ef970bb14a - Implicitly fixes invoke_context.return_data not being reset between instructions in process_message. (#21671) (#21684)
- Lets InvokeContext::process_cross_program_instruction() handle the first invocation depth too.
- Marks InvokeContext::verify(), InvokeContext::verify_and_update() and InvokeContext::process_executable_chain() private.
- Renames InvokeContext::process_cross_program_instruction() to InvokeContext::process_instruction().
- Removes InvokeContext::new_mock_with_sysvars().

(cherry picked from commit 1df88837c8)

Co-authored-by: Alexander Meißner <AlexanderMeissner@gmx.net>
2021-12-08 10:48:49 +00:00
Jarred Nicholls
cabd851904 Avoid entropy sources when constructing a solana_program::message::Message.
The solana-program crate can be used in certain embedded environments (HSMs) where
the source of entropy, whether used for cryptographic purposes or not, is tightly
controlled. In these cases, using the default OS source of entrophy is not always
acceptable. Thus, using the default Rust stdlib entropy source for seeding its
default hasher, is prohibited. This means any use of HashMap/HashSet must be able
to be constructed and used with a custom hasher implementation.

This commit removes the use of Itertools::unique() to dedupe Instructions that are
being compiled into a new Message, which uses a default-configured HashMap
under-the-hood. Instead, we use a BTreeSet which does not invoke any entropy
source in order to seed a hash implementation.

(cherry picked from commit 4da435f2a0)
2021-12-07 22:36:21 -08:00
mergify[bot]
2d2ef59550 Ensure we have keys to activate these features (#21669) (#21674)
(cherry picked from commit 45e56c599d)

Co-authored-by: Sean Young <sean@mess.org>
2021-12-07 23:24:11 +00:00
mergify[bot]
b7b56d5016 Docs: Solflare web/app updates (#21540) (#21668)
* Update Solflare description

* Add Solflare to mobile wallets

* Sort mobile wallets alphabetically

* Sort web wollets alphabetically

* Update docs/src/wallet-guide/apps.md

* Update docs/src/wallet-guide/apps.md

* Update docs/src/wallet-guide/web-wallets.md

* Update docs/src/wallet-guide/web-wallets.md

* Update docs/src/wallet-guide/apps.md

Co-authored-by: Justin Starry <justin.m.starry@gmail.com>
(cherry picked from commit a2477c1f32)

Co-authored-by: Boris Vujicic <turshija@gmail.com>
2021-12-07 16:44:28 +00:00
mergify[bot]
18e3a635b4 docs: Fix SOL staked formula (#21615) (#21667)
Fix the formula on the proposal page: https://docs.solana.com/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards

(cherry picked from commit b57097ef18)

Co-authored-by: Melroy van den Berg <melroy@melroy.org>
2021-12-07 16:01:12 +00:00
mergify[bot]
2b4347d502 Add option to reclaim accounts-cluster-bench accounts/lamports (backport #21656) (#21658)
* Add option to reclaim accounts-cluster-bench accounts/lamports (#21656)

* Add option to reclaim accounts-cluster-bench accounts/lamports

* lint

(cherry picked from commit 205fd95722)

# Conflicts:
#	accounts-cluster-bench/Cargo.toml

* Fix conflict

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2021-12-07 09:18:48 +00:00
mergify[bot]
87accd16d8 Fixup flaky tests (#21617) (#21647)
* Fixup flaky tests

* Fixup listeners

(cherry picked from commit f493a88258)

Co-authored-by: carllin <carl@solana.com>
2021-12-07 03:54:14 +00:00
mergify[bot]
0e969015fc Add offline and fee-payer utilities to CLI vote module (#21579) (#21649)
* create-vote-account: add offline, nonce, fee_payer capabilities

* vote-authorize: add offline, nonce, fee-payer

* vote-update-things: add offline, nonce, fee-payer

* withdraw-vote: add offline, nonce, fee-payer

* close-vote-acct: add fee-payer

* Allow WithdrawVoteAccount to empty account, since offline operations cannot perform account state queries as in CloseVoteAccount

* Fix lint

* Update offline-signing docs

* Add some parse unit tests

* Add offline integration test

(cherry picked from commit 873fe81bc0)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-12-07 01:51:02 +00:00
mergify[bot]
46935c022e Ensure that StakeDelegations and StakeHistory serde (#21640) (#21653)
Add tests to StakeDelegations and StakeHistory to ensure that the outer
types serialize and deserialize correctly to/from the inner types.

(cherry picked from commit da4015a959)

Co-authored-by: Brooks Prumo <brooks@solana.com>
2021-12-07 01:44:49 +00:00
mergify[bot]
8a7106bc08 Remove activated feature for filtering invalid stakes from rewards (#21641) (#21651)
(cherry picked from commit a1adcb23b6)

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-07 00:58:31 +00:00
mergify[bot]
89d2f34a03 Reject vote withdraws that create non-rent-exempt accounts (backport #21639) (#21645)
* Reject vote withdraws that create non-rent-exempt accounts (#21639)

* Reject vote withdraws that create non-rent-exempt accounts

* fix mocked instruction test

(cherry picked from commit e123883b26)

# Conflicts:
#	sdk/src/feature_set.rs

* resolve conflicts

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-07 00:42:01 +00:00
mergify[bot]
b3fa1e4550 Move transaction error code into new module (#21635) (#21638)
(cherry picked from commit 3dab1e711d)

Co-authored-by: Justin Starry <justin@solana.com>
2021-12-06 20:11:20 +00:00
mergify[bot]
58c755e1d4 Rework docs for Pubkey::find_program_address and friends (#21528) (#21637)
* Rework docs for Pubkey::find_program_address and friends

* Remove circular dependency

* Minor tweaks

* Apply suggestions from code review

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

* Sort solana-program dev-dependencies

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
(cherry picked from commit d1c101cde2)

Co-authored-by: Brian Anderson <andersrb@gmail.com>
2021-12-06 19:04:35 +00:00
mergify[bot]
60085305b4 Fix spelling of 'Borsh' (#21624)
(cherry picked from commit f3c2803af9)

Co-authored-by: Brian Anderson <andersrb@gmail.com>
2021-12-06 05:31:28 +00:00
mergify[bot]
b4c8e095bd adds back position field to coding-shred-header (#21600) (#21620)
https://github.com/solana-labs/solana/pull/17004
removed position field from coding-shred-header because as it stands the
field is redundant and unused.
However, with the upcoming changes to erasure coding schema this field
will no longer be redundant and needs to be populated.

(cherry picked from commit cd17f63d81)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-12-05 16:40:22 +00:00
mergify[bot]
3e28ffa884 Bump RpcClient node versions (#21612) (#21613)
* Bump blockhash/fee api check versions

* Bump snapshot api check version

(cherry picked from commit 3e5a5a834f)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-12-05 01:08:22 +00:00
406 changed files with 19535 additions and 8188 deletions

1
.gitignore vendored
View File

@@ -4,6 +4,7 @@
/solana-metrics/
/solana-metrics.tar.bz2
/target/
/test-ledger/
**/*.rs.bk
.cargo

438
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -46,7 +46,10 @@ members = [
"poh",
"poh-bench",
"program-test",
"programs/address-lookup-table",
"programs/address-lookup-table-tests",
"programs/bpf_loader",
"programs/bpf_loader/gen-syscall-list",
"programs/compute-budget",
"programs/config",
"programs/stake",

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-account-decoder"
version = "1.9.0"
version = "1.9.4"
description = "Solana account decoder"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -19,9 +19,9 @@ lazy_static = "1.4.0"
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.72"
solana-config-program = { path = "../programs/config", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.0" }
solana-config-program = { path = "../programs/config", version = "=1.9.4" }
solana-sdk = { path = "../sdk", version = "=1.9.4" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.4" }
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
thiserror = "1.0"
zstd = "0.9.0"

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accounts-bench"
version = "1.9.0"
version = "1.9.4"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -11,11 +11,11 @@ publish = false
[dependencies]
log = "0.4.14"
rayon = "1.5.1"
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.4" }
solana-runtime = { path = "../runtime", version = "=1.9.4" }
solana-measure = { path = "../measure", version = "=1.9.4" }
solana-sdk = { path = "../sdk", version = "=1.9.4" }
solana-version = { path = "../version", version = "=1.9.4" }
clap = "2.33.1"
[package.metadata.docs.rs]

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accounts-cluster-bench"
version = "1.9.0"
version = "1.9.4"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -13,24 +13,25 @@ clap = "2.33.1"
log = "0.4.14"
rand = "0.7.0"
rayon = "1.5.1"
solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" }
solana-client = { path = "../client", version = "=1.9.0" }
solana-core = { path = "../core", version = "=1.9.0" }
solana-faucet = { path = "../faucet", version = "=1.9.0" }
solana-gossip = { path = "../gossip", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-net-utils = { path = "../net-utils", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-streamer = { path = "../streamer", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-account-decoder = { path = "../account-decoder", version = "=1.9.4" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.4" }
solana-client = { path = "../client", version = "=1.9.4" }
solana-core = { path = "../core", version = "=1.9.4" }
solana-faucet = { path = "../faucet", version = "=1.9.4" }
solana-gossip = { path = "../gossip", version = "=1.9.4" }
solana-logger = { path = "../logger", version = "=1.9.4" }
solana-measure = { path = "../measure", version = "=1.9.4" }
solana-net-utils = { path = "../net-utils", version = "=1.9.4" }
solana-runtime = { path = "../runtime", version = "=1.9.4" }
solana-sdk = { path = "../sdk", version = "=1.9.4" }
solana-streamer = { path = "../streamer", version = "=1.9.4" }
solana-test-validator = { path = "../test-validator", version = "=1.9.4" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" }
solana-version = { path = "../version", version = "=1.9.4" }
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
[dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "=1.9.0" }
solana-local-cluster = { path = "../local-cluster", version = "=1.9.4" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -23,6 +23,7 @@ use {
solana_streamer::socket::SocketAddrSpace,
solana_transaction_status::parse_token::spl_token_instruction,
std::{
cmp::min,
net::SocketAddr,
process::exit,
sync::{
@@ -156,24 +157,30 @@ fn make_create_message(
fn make_close_message(
keypair: &Keypair,
base_keypair: &Keypair,
max_closed_seed: Arc<AtomicU64>,
max_created: Arc<AtomicU64>,
max_closed: Arc<AtomicU64>,
num_instructions: usize,
balance: u64,
spl_token: bool,
) -> Message {
let instructions: Vec<_> = (0..num_instructions)
.into_iter()
.map(|_| {
.filter_map(|_| {
let program_id = if spl_token {
inline_spl_token::id()
} else {
system_program::id()
};
let seed = max_closed_seed.fetch_add(1, Ordering::Relaxed).to_string();
let max_created_seed = max_created.load(Ordering::Relaxed);
let max_closed_seed = max_closed.load(Ordering::Relaxed);
if max_closed_seed >= max_created_seed {
return None;
}
let seed = max_closed.fetch_add(1, Ordering::Relaxed).to_string();
let address =
Pubkey::create_with_seed(&base_keypair.pubkey(), &seed, &program_id).unwrap();
if spl_token {
spl_token_instruction(
Some(spl_token_instruction(
spl_token::instruction::close_account(
&spl_token::id(),
&spl_token_pubkey(&address),
@@ -182,16 +189,16 @@ fn make_close_message(
&[],
)
.unwrap(),
)
))
} else {
system_instruction::transfer_with_seed(
Some(system_instruction::transfer_with_seed(
&address,
&base_keypair.pubkey(),
seed,
&program_id,
&keypair.pubkey(),
balance,
)
))
}
})
.collect();
@@ -211,6 +218,7 @@ fn run_accounts_bench(
maybe_lamports: Option<u64>,
num_instructions: usize,
mint: Option<Pubkey>,
reclaim_accounts: bool,
) {
assert!(num_instructions > 0);
let client =
@@ -350,6 +358,7 @@ fn run_accounts_bench(
let message = make_close_message(
payer_keypairs[0],
&base_keypair,
seed_tracker.max_created.clone(),
seed_tracker.max_closed.clone(),
1,
min_balance,
@@ -372,7 +381,7 @@ fn run_accounts_bench(
}
count += 1;
if last_log.elapsed().as_millis() > 3000 {
if last_log.elapsed().as_millis() > 3000 || count >= iterations {
info!(
"total_accounts_created: {} total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance(s): {:?}",
total_accounts_created, total_accounts_closed, tx_sent_count, count, balances
@@ -387,6 +396,83 @@ fn run_accounts_bench(
}
}
executor.close();
if reclaim_accounts {
let executor = TransactionExecutor::new(entrypoint_addr);
loop {
let max_closed_seed = seed_tracker.max_closed.load(Ordering::Relaxed);
let max_created_seed = seed_tracker.max_created.load(Ordering::Relaxed);
if latest_blockhash.elapsed().as_millis() > 10_000 {
blockhash = client.get_latest_blockhash().expect("blockhash");
latest_blockhash = Instant::now();
}
message.recent_blockhash = blockhash;
let fee = client
.get_fee_for_message(&message)
.expect("get_fee_for_message");
let sigs_len = executor.num_outstanding();
if sigs_len < batch_size && max_closed_seed < max_created_seed {
let num_to_close = min(
batch_size - sigs_len,
(max_created_seed - max_closed_seed) as usize,
);
if num_to_close >= payer_keypairs.len() {
info!("closing {} accounts", num_to_close);
let chunk_size = num_to_close / payer_keypairs.len();
info!("{:?} chunk_size", chunk_size);
if chunk_size > 0 {
for (i, keypair) in payer_keypairs.iter().enumerate() {
let txs: Vec<_> = (0..chunk_size)
.into_par_iter()
.filter_map(|_| {
let message = make_close_message(
keypair,
&base_keypair,
seed_tracker.max_created.clone(),
seed_tracker.max_closed.clone(),
num_instructions,
min_balance,
mint.is_some(),
);
if message.instructions.is_empty() {
return None;
}
let signers: Vec<&Keypair> = vec![keypair, &base_keypair];
Some(Transaction::new(&signers, message, blockhash))
})
.collect();
balances[i] = balances[i].saturating_sub(fee * txs.len() as u64);
info!("close txs: {}", txs.len());
let new_ids = executor.push_transactions(txs);
info!("close ids: {}", new_ids.len());
tx_sent_count += new_ids.len();
total_accounts_closed += (num_instructions * new_ids.len()) as u64;
}
}
}
} else {
let _ = executor.drain_cleared();
}
count += 1;
if last_log.elapsed().as_millis() > 3000 || max_closed_seed >= max_created_seed {
info!(
"total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance(s): {:?}",
total_accounts_closed, tx_sent_count, count, balances
);
last_log = Instant::now();
}
if max_closed_seed >= max_created_seed {
break;
}
if executor.num_outstanding() >= batch_size {
sleep(Duration::from_millis(500));
}
}
executor.close();
}
}
fn main() {
@@ -462,7 +548,7 @@ fn main() {
.long("iterations")
.takes_value(true)
.value_name("NUM")
.help("Number of iterations to make"),
.help("Number of iterations to make. 0 = unlimited iterations."),
)
.arg(
Arg::with_name("check_gossip")
@@ -475,6 +561,12 @@ fn main() {
.takes_value(true)
.help("Mint address to initialize account"),
)
.arg(
Arg::with_name("reclaim_accounts")
.long("reclaim-accounts")
.takes_value(false)
.help("Reclaim accounts after session ends; incompatible with --iterations 0"),
)
.get_matches();
let skip_gossip = !matches.is_present("check_gossip");
@@ -556,6 +648,7 @@ fn main() {
lamports,
num_instructions,
mint,
matches.is_present("reclaim_accounts"),
);
}
@@ -564,12 +657,18 @@ pub mod test {
use {
super::*,
solana_core::validator::ValidatorConfig,
solana_faucet::faucet::run_local_faucet,
solana_local_cluster::{
local_cluster::{ClusterConfig, LocalCluster},
validator_configs::make_identical_validator_configs,
},
solana_measure::measure::Measure,
solana_sdk::poh_config::PohConfig,
solana_sdk::{native_token::sol_to_lamports, poh_config::PohConfig},
solana_test_validator::TestValidator,
spl_token::{
solana_program::program_pack::Pack,
state::{Account, Mint},
},
};
#[test]
@@ -605,6 +704,108 @@ pub mod test {
maybe_lamports,
num_instructions,
None,
false,
);
start.stop();
info!("{}", start);
}
#[test]
fn test_create_then_reclaim_spl_token_accounts() {
solana_logger::setup();
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_custom_fees(
mint_pubkey,
1,
Some(faucet_addr),
SocketAddrSpace::Unspecified,
);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
// Created funder
let funder = Keypair::new();
let latest_blockhash = rpc_client.get_latest_blockhash().unwrap();
let signature = rpc_client
.request_airdrop_with_blockhash(
&funder.pubkey(),
sol_to_lamports(1.0),
&latest_blockhash,
)
.unwrap();
rpc_client
.confirm_transaction_with_spinner(
&signature,
&latest_blockhash,
CommitmentConfig::confirmed(),
)
.unwrap();
// Create Mint
let spl_mint_keypair = Keypair::new();
let spl_mint_len = Mint::get_packed_len();
let spl_mint_rent = rpc_client
.get_minimum_balance_for_rent_exemption(spl_mint_len)
.unwrap();
let transaction = Transaction::new_signed_with_payer(
&[
system_instruction::create_account(
&funder.pubkey(),
&spl_mint_keypair.pubkey(),
spl_mint_rent,
spl_mint_len as u64,
&inline_spl_token::id(),
),
spl_token_instruction(
spl_token::instruction::initialize_mint(
&spl_token::id(),
&spl_token_pubkey(&spl_mint_keypair.pubkey()),
&spl_token_pubkey(&spl_mint_keypair.pubkey()),
None,
2,
)
.unwrap(),
),
],
Some(&funder.pubkey()),
&[&funder, &spl_mint_keypair],
latest_blockhash,
);
let _sig = rpc_client
.send_and_confirm_transaction(&transaction)
.unwrap();
let account_len = Account::get_packed_len();
let minimum_balance = rpc_client
.get_minimum_balance_for_rent_exemption(account_len)
.unwrap();
let iterations = 5;
let batch_size = 100;
let close_nth_batch = 0;
let num_instructions = 4;
let mut start = Measure::start("total accounts run");
let keypair0 = Keypair::new();
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
run_accounts_bench(
test_validator
.rpc_url()
.replace("http://", "")
.parse()
.unwrap(),
faucet_addr,
&[&keypair0, &keypair1, &keypair2],
iterations,
Some(account_len as u64),
batch_size,
close_nth_batch,
Some(minimum_balance),
num_instructions,
Some(spl_mint_keypair.pubkey()),
true,
);
start.stop();
info!("{}", start);

View File

@@ -3,17 +3,17 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accountsdb-plugin-interface"
description = "The Solana AccountsDb plugin interface."
version = "1.9.0"
version = "1.9.4"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-validator"
documentation = "https://docs.rs/solana-accountsdb-plugin-interface"
[dependencies]
log = "0.4.11"
thiserror = "1.0.30"
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.4" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -3,8 +3,8 @@
/// In addition, the dynamic library must export a "C" function _create_plugin which
/// creates the implementation of the plugin.
use {
solana_sdk::{signature::Signature, transaction::SanitizedTransaction},
solana_transaction_status::TransactionStatusMeta,
solana_sdk::{clock::UnixTimestamp, signature::Signature, transaction::SanitizedTransaction},
solana_transaction_status::{Reward, TransactionStatusMeta},
std::{any::Any, error, io},
thiserror::Error,
};
@@ -12,54 +12,117 @@ use {
impl Eq for ReplicaAccountInfo<'_> {}
#[derive(Clone, PartialEq, Debug)]
/// Information about an account being updated
pub struct ReplicaAccountInfo<'a> {
/// The Pubkey for the account
pub pubkey: &'a [u8],
/// The lamports for the account
pub lamports: u64,
/// The Pubkey of the owner program account
pub owner: &'a [u8],
/// This account's data contains a loaded program (and is now read-only)
pub executable: bool,
/// The epoch at which this account will next owe rent
pub rent_epoch: u64,
/// The data held in this account.
pub data: &'a [u8],
/// A global monotonically increasing atomic number, which can be used
/// to tell the order of the account update. For example, when an
/// account is updated in the same slot multiple times, the update
/// with higher write_version should supersede the one with lower
/// write_version.
pub write_version: u64,
}
/// A wrapper to future-proof ReplicaAccountInfo handling.
/// If there were a change to the structure of ReplicaAccountInfo,
/// there would be new enum entry for the newer version, forcing
/// plugin implementations to handle the change.
pub enum ReplicaAccountInfoVersions<'a> {
V0_0_1(&'a ReplicaAccountInfo<'a>),
}
/// Information about a transaction
#[derive(Clone, Debug)]
pub struct ReplicaTransactionInfo<'a> {
/// The first signature of the transaction, used for identifying the transaction.
pub signature: &'a Signature,
/// Indicates if the transaction is a simple vote transaction.
pub is_vote: bool,
/// The sanitized transaction.
pub transaction: &'a SanitizedTransaction,
/// Metadata of the transaction status.
pub transaction_status_meta: &'a TransactionStatusMeta,
}
/// A wrapper to future-proof ReplicaTransactionInfo handling.
/// If there were a change to the structure of ReplicaTransactionInfo,
/// there would be new enum entry for the newer version, forcing
/// plugin implementations to handle the change.
pub enum ReplicaTransactionInfoVersions<'a> {
V0_0_1(&'a ReplicaTransactionInfo<'a>),
}
#[derive(Clone, Debug)]
pub struct ReplicaBlockInfo<'a> {
pub slot: u64,
pub blockhash: &'a str,
pub rewards: &'a [Reward],
pub block_time: Option<UnixTimestamp>,
pub block_height: Option<u64>,
}
pub enum ReplicaBlockInfoVersions<'a> {
V0_0_1(&'a ReplicaBlockInfo<'a>),
}
/// Errors returned by plugin calls
#[derive(Error, Debug)]
pub enum AccountsDbPluginError {
/// Error opening the configuration file; for example, when the file
/// is not found or when the validator process has no permission to read it.
#[error("Error opening config file. Error detail: ({0}).")]
ConfigFileOpenError(#[from] io::Error),
/// Error in reading the content of the config file or the content
/// is not in the expected format.
#[error("Error reading config file. Error message: ({msg})")]
ConfigFileReadError { msg: String },
/// Error when updating the account.
#[error("Error updating account. Error message: ({msg})")]
AccountsUpdateError { msg: String },
/// Error when updating the slot status
#[error("Error updating slot status. Error message: ({msg})")]
SlotStatusUpdateError { msg: String },
/// Any custom error defined by the plugin.
#[error("Plugin-defined custom error. Error message: ({0})")]
Custom(Box<dyn error::Error + Send + Sync>),
}
/// The current status of a slot
#[derive(Debug, Clone)]
pub enum SlotStatus {
/// The highest slot of the heaviest fork processed by the node. Ledger state at this slot is
/// not derived from a confirmed or finalized block, but if multiple forks are present, is from
/// the fork the validator believes is most likely to finalize.
Processed,
/// The highest slot having reached max vote lockout.
Rooted,
/// The highest slot that has been voted on by supermajority of the cluster, ie. is confirmed.
Confirmed,
}
@@ -75,6 +138,9 @@ impl SlotStatus {
pub type Result<T> = std::result::Result<T, AccountsDbPluginError>;
/// Defines an AccountsDb plugin, to stream data from the runtime.
/// AccountsDb plugins must describe desired behavior for load and unload,
/// as well as how they will handle streamed data.
pub trait AccountsDbPlugin: Any + Send + Sync + std::fmt::Debug {
fn name(&self) -> &'static str;
@@ -93,6 +159,9 @@ pub trait AccountsDbPlugin: Any + Send + Sync + std::fmt::Debug {
fn on_unload(&mut self) {}
/// Called when an account is updated at a slot.
/// When `is_startup` is true, it indicates the account is loaded from
/// snapshots when the validator starts up. When `is_startup` is false,
/// the account is updated during transaction processing.
#[allow(unused_variables)]
fn update_account(
&mut self,
@@ -129,6 +198,12 @@ pub trait AccountsDbPlugin: Any + Send + Sync + std::fmt::Debug {
Ok(())
}
/// Called when block's metadata is updated.
#[allow(unused_variables)]
fn notify_block_metadata(&mut self, blockinfo: ReplicaBlockInfoVersions) -> Result<()> {
Ok(())
}
/// Check if the plugin is interested in account data
/// Default is true -- if the plugin is not interested in
/// account data, please return false.

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accountsdb-plugin-manager"
description = "The Solana AccountsDb plugin manager."
version = "1.9.0"
version = "1.9.4"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -17,14 +17,14 @@ log = "0.4.11"
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.72"
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-metrics = { path = "../metrics", version = "=1.9.0" }
solana-rpc = { path = "../rpc", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.9.4" }
solana-logger = { path = "../logger", version = "=1.9.4" }
solana-measure = { path = "../measure", version = "=1.9.4" }
solana-metrics = { path = "../metrics", version = "=1.9.4" }
solana-rpc = { path = "../rpc", version = "=1.9.4" }
solana-runtime = { path = "../runtime", version = "=1.9.4" }
solana-sdk = { path = "../sdk", version = "=1.9.4" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" }
thiserror = "1.0.30"
[package.metadata.docs.rs]

View File

@@ -2,6 +2,8 @@ use {
crate::{
accounts_update_notifier::AccountsUpdateNotifierImpl,
accountsdb_plugin_manager::AccountsDbPluginManager,
block_metadata_notifier::BlockMetadataNotifierImpl,
block_metadata_notifier_interface::BlockMetadataNotifierLock,
slot_status_notifier::SlotStatusNotifierImpl, slot_status_observer::SlotStatusObserver,
transaction_notifier::TransactionNotifierImpl,
},
@@ -50,6 +52,7 @@ pub struct AccountsDbPluginService {
plugin_manager: Arc<RwLock<AccountsDbPluginManager>>,
accounts_update_notifier: Option<AccountsUpdateNotifier>,
transaction_notifier: Option<TransactionNotifierLock>,
block_metadata_notifier: Option<BlockMetadataNotifierLock>,
}
impl AccountsDbPluginService {
@@ -102,17 +105,24 @@ impl AccountsDbPluginService {
None
};
let slot_status_observer =
if account_data_notifications_enabled || transaction_notifications_enabled {
let slot_status_notifier = SlotStatusNotifierImpl::new(plugin_manager.clone());
let slot_status_notifier = Arc::new(RwLock::new(slot_status_notifier));
let (slot_status_observer, block_metadata_notifier): (
Option<SlotStatusObserver>,
Option<BlockMetadataNotifierLock>,
) = if account_data_notifications_enabled || transaction_notifications_enabled {
let slot_status_notifier = SlotStatusNotifierImpl::new(plugin_manager.clone());
let slot_status_notifier = Arc::new(RwLock::new(slot_status_notifier));
(
Some(SlotStatusObserver::new(
confirmed_bank_receiver,
slot_status_notifier,
))
} else {
None
};
)),
Some(Arc::new(RwLock::new(BlockMetadataNotifierImpl::new(
plugin_manager.clone(),
)))),
)
} else {
(None, None)
};
info!("Started AccountsDbPluginService");
Ok(AccountsDbPluginService {
@@ -120,6 +130,7 @@ impl AccountsDbPluginService {
plugin_manager,
accounts_update_notifier,
transaction_notifier,
block_metadata_notifier,
})
}
@@ -186,6 +197,10 @@ impl AccountsDbPluginService {
self.transaction_notifier.clone()
}
pub fn get_block_metadata_notifier(&self) -> Option<BlockMetadataNotifierLock> {
self.block_metadata_notifier.clone()
}
pub fn join(self) -> thread::Result<()> {
if let Some(mut slot_status_observer) = self.slot_status_observer {
slot_status_observer.join()?;

View File

@@ -0,0 +1,105 @@
use {
crate::{
accountsdb_plugin_manager::AccountsDbPluginManager,
block_metadata_notifier_interface::BlockMetadataNotifier,
},
log::*,
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
ReplicaBlockInfo, ReplicaBlockInfoVersions,
},
solana_measure::measure::Measure,
solana_metrics::*,
solana_runtime::bank::RewardInfo,
solana_sdk::{clock::UnixTimestamp, pubkey::Pubkey},
solana_transaction_status::{Reward, Rewards},
std::sync::{Arc, RwLock},
};
pub(crate) struct BlockMetadataNotifierImpl {
plugin_manager: Arc<RwLock<AccountsDbPluginManager>>,
}
impl BlockMetadataNotifier for BlockMetadataNotifierImpl {
/// Notify the block metadata
fn notify_block_metadata(
&self,
slot: u64,
blockhash: &str,
rewards: &RwLock<Vec<(Pubkey, RewardInfo)>>,
block_time: Option<UnixTimestamp>,
block_height: Option<u64>,
) {
let mut plugin_manager = self.plugin_manager.write().unwrap();
if plugin_manager.plugins.is_empty() {
return;
}
let rewards = Self::build_rewards(rewards);
for plugin in plugin_manager.plugins.iter_mut() {
let mut measure = Measure::start("accountsdb-plugin-update-slot");
let block_info =
Self::build_replica_block_info(slot, blockhash, &rewards, block_time, block_height);
let block_info = ReplicaBlockInfoVersions::V0_0_1(&block_info);
match plugin.notify_block_metadata(block_info) {
Err(err) => {
error!(
"Failed to update block metadata at slot {}, error: {} to plugin {}",
slot,
err,
plugin.name()
)
}
Ok(_) => {
trace!(
"Successfully updated block metadata at slot {} to plugin {}",
slot,
plugin.name()
);
}
}
measure.stop();
inc_new_counter_debug!(
"accountsdb-plugin-update-block-metadata-us",
measure.as_us() as usize,
1000,
1000
);
}
}
}
impl BlockMetadataNotifierImpl {
fn build_rewards(rewards: &RwLock<Vec<(Pubkey, RewardInfo)>>) -> Rewards {
let rewards = rewards.read().unwrap();
rewards
.iter()
.map(|(pubkey, reward)| Reward {
pubkey: pubkey.to_string(),
lamports: reward.lamports,
post_balance: reward.post_balance,
reward_type: Some(reward.reward_type),
commission: reward.commission,
})
.collect()
}
fn build_replica_block_info<'a>(
slot: u64,
blockhash: &'a str,
rewards: &'a [Reward],
block_time: Option<UnixTimestamp>,
block_height: Option<u64>,
) -> ReplicaBlockInfo<'a> {
ReplicaBlockInfo {
slot,
blockhash,
rewards,
block_time,
block_height,
}
}
pub fn new(plugin_manager: Arc<RwLock<AccountsDbPluginManager>>) -> Self {
Self { plugin_manager }
}
}

View File

@@ -0,0 +1,20 @@
use {
solana_runtime::bank::RewardInfo,
solana_sdk::{clock::UnixTimestamp, pubkey::Pubkey},
std::sync::{Arc, RwLock},
};
/// Interface for notifying block metadata changes
pub trait BlockMetadataNotifier {
/// Notify the block metadata
fn notify_block_metadata(
&self,
slot: u64,
blockhash: &str,
rewards: &RwLock<Vec<(Pubkey, RewardInfo)>>,
block_time: Option<UnixTimestamp>,
block_height: Option<u64>,
);
}
pub type BlockMetadataNotifierLock = Arc<RwLock<dyn BlockMetadataNotifier + Sync + Send>>;

View File

@@ -1,6 +1,8 @@
pub mod accounts_update_notifier;
pub mod accountsdb_plugin_manager;
pub mod accountsdb_plugin_service;
pub mod block_metadata_notifier;
pub mod block_metadata_notifier_interface;
pub mod slot_status_notifier;
pub mod slot_status_observer;
pub mod transaction_notifier;

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accountsdb-plugin-postgres"
description = "The Solana AccountsDb plugin for PostgreSQL database."
version = "1.9.0"
version = "1.9.4"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -22,18 +22,18 @@ postgres-types = { version = "0.2.2", features = ["derive"] }
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.72"
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-metrics = { path = "../metrics", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.9.4" }
solana-logger = { path = "../logger", version = "=1.9.4" }
solana-measure = { path = "../measure", version = "=1.9.4" }
solana-metrics = { path = "../metrics", version = "=1.9.4" }
solana-runtime = { path = "../runtime", version = "=1.9.4" }
solana-sdk = { path = "../sdk", version = "=1.9.4" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" }
thiserror = "1.0.30"
tokio-postgres = "0.7.4"
[dev-dependencies]
solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" }
solana-account-decoder = { path = "../account-decoder", version = "=1.9.4" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -47,7 +47,14 @@ Create TYPE "TransactionErrorCode" AS ENUM (
'WouldExceedMaxAccountCostLimit',
'WouldExceedMaxBlockCostLimit',
'UnsupportedVersion',
'InvalidWritableAccount'
'InvalidWritableAccount',
'WouldExceedMaxAccountDataCostLimit',
'TooManyAccountLocks',
'AddressLookupError'
'AddressLookupTableNotFound',
'InvalidAddressLookupTableOwner',
'InvalidAddressLookupTableData',
'InvalidAddressLookupTableIndex'
);
CREATE TYPE "TransactionError" AS (
@@ -113,9 +120,10 @@ CREATE TYPE "TransactionMessage" AS (
instructions "CompiledInstruction"[]
);
CREATE TYPE "AddressMapIndexes" AS (
writable SMALLINT[],
readonly SMALLINT[]
CREATE TYPE "TransactionMessageAddressTableLookup" AS (
account_key BYTEA,
writable_indexes SMALLINT[],
readonly_indexes SMALLINT[]
);
CREATE TYPE "TransactionMessageV0" AS (
@@ -123,17 +131,17 @@ CREATE TYPE "TransactionMessageV0" AS (
account_keys BYTEA[],
recent_blockhash BYTEA,
instructions "CompiledInstruction"[],
address_map_indexes "AddressMapIndexes"[]
address_table_lookups "TransactionMessageAddressTableLookup"[]
);
CREATE TYPE "MappedAddresses" AS (
CREATE TYPE "LoadedAddresses" AS (
writable BYTEA[],
readonly BYTEA[]
);
CREATE TYPE "MappedMessage" AS (
CREATE TYPE "LoadedMessageV0" AS (
message "TransactionMessageV0",
mapped_addresses "MappedAddresses"
loaded_addresses "LoadedAddresses"
);
-- The table storing transactions
@@ -143,7 +151,7 @@ CREATE TABLE transaction (
is_vote BOOL NOT NULL,
message_type SMALLINT, -- 0: legacy, 1: v0 message
legacy_message "TransactionMessage",
v0_mapped_message "MappedMessage",
v0_loaded_message "LoadedMessageV0",
signatures BYTEA[],
message_hash BYTEA,
meta "TransactionStatusMeta",
@@ -151,6 +159,16 @@ CREATE TABLE transaction (
CONSTRAINT transaction_pk PRIMARY KEY (slot, signature)
);
-- The table storing block metadata
CREATE TABLE block (
slot BIGINT PRIMARY KEY,
blockhash VARCHAR(44),
rewards "Reward"[],
block_time BIGINT,
block_height BIGINT,
updated_on TIMESTAMP NOT NULL
);
/**
* The following is for keeping historical data for accounts and is not required for plugin to work.
*/

View File

@@ -8,15 +8,16 @@ DROP TABLE account_audit;
DROP TABLE account;
DROP TABLE slot;
DROP TABLE transaction;
DROP TABLE block;
DROP TYPE "TransactionError" CASCADE;
DROP TYPE "TransactionErrorCode" CASCADE;
DROP TYPE "MappedMessage" CASCADE;
DROP TYPE "MappedAddresses" CASCADE;
DROP TYPE "LoadedMessageV0" CASCADE;
DROP TYPE "LoadedAddresses" CASCADE;
DROP TYPE "TransactionMessageV0" CASCADE;
DROP TYPE "AddressMapIndexes" CASCADE;
DROP TYPE "TransactionMessage" CASCADE;
DROP TYPE "TransactionMessageHeader" CASCADE;
DROP TYPE "TransactionMessageAddressTableLookup" CASCADE;
DROP TYPE "TransactionStatusMeta" CASCADE;
DROP TYPE "RewardType" CASCADE;
DROP TYPE "Reward" CASCADE;

View File

@@ -12,7 +12,7 @@ use {
serde_json,
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
AccountsDbPlugin, AccountsDbPluginError, ReplicaAccountInfoVersions,
ReplicaTransactionInfoVersions, Result, SlotStatus,
ReplicaBlockInfoVersions, ReplicaTransactionInfoVersions, Result, SlotStatus,
},
solana_metrics::*,
std::{fs::File, io::Read},
@@ -41,6 +41,8 @@ pub struct AccountsDbPluginPostgresConfig {
pub threads: Option<usize>,
pub batch_size: Option<usize>,
pub panic_on_db_errors: Option<bool>,
/// Indicates if to store historical data for accounts
pub store_account_historical_data: Option<bool>,
}
#[derive(Error, Debug)]
@@ -74,7 +76,7 @@ impl AccountsDbPlugin for AccountsDbPluginPostgres {
/// Accounts either satisyfing the accounts condition or owners condition will be selected.
/// When only owners is specified,
/// all accounts belonging to the owners will be streamed.
/// The accounts field support wildcard to select all accounts:
/// The accounts field supports wildcard to select all accounts:
/// "accounts_selector" : {
/// "accounts" : \["*"\],
/// }
@@ -85,6 +87,8 @@ impl AccountsDbPlugin for AccountsDbPluginPostgres {
/// Please refer to https://docs.rs/postgres/0.19.2/postgres/config/struct.Config.html for the connection configuration.
/// When `connection_str` is set, the values in "host", "user" and "port" are ignored. If `connection_str` is not given,
/// `host` and `user` must be given.
/// "store_account_historical_data", optional, set it to 'true', to store historical account data to account_audit
/// table.
/// * "threads" optional, specifies the number of worker threads for the plugin. A thread
/// maintains a PostgreSQL connection to the server. The default is '10'.
/// * "batch_size" optional, specifies the batch size of bulk insert when the AccountsDb is created
@@ -334,6 +338,31 @@ impl AccountsDbPlugin for AccountsDbPluginPostgres {
Ok(())
}
fn notify_block_metadata(&mut self, block_info: ReplicaBlockInfoVersions) -> Result<()> {
match &mut self.client {
None => {
return Err(AccountsDbPluginError::Custom(Box::new(
AccountsDbPluginPostgresError::DataStoreConnectionError {
msg: "There is no connection to the PostgreSQL database.".to_string(),
},
)));
}
Some(client) => match block_info {
ReplicaBlockInfoVersions::V0_0_1(block_info) => {
let result = client.update_block_metadata(block_info);
if let Err(err) = result {
return Err(AccountsDbPluginError::SlotStatusUpdateError{
msg: format!("Failed to persist the update of block metadata to the PostgreSQL database. Error: {:?}", err)
});
}
}
},
}
Ok(())
}
/// Check if the plugin is interested in account data
/// Default is true -- if the plugin is not interested in
/// account data, please return false.

View File

@@ -1,4 +1,6 @@
#![allow(clippy::integer_arithmetic)]
mod postgres_client_block_metadata;
mod postgres_client_transaction;
/// A concurrent implementation for writing accounts into the PostgreSQL in parallel.
@@ -10,9 +12,10 @@ use {
crossbeam_channel::{bounded, Receiver, RecvTimeoutError, Sender},
log::*,
postgres::{Client, NoTls, Statement},
postgres_client_block_metadata::DbBlockInfo,
postgres_client_transaction::LogTransactionRequest,
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
AccountsDbPluginError, ReplicaAccountInfo, SlotStatus,
AccountsDbPluginError, ReplicaAccountInfo, ReplicaBlockInfo, SlotStatus,
},
solana_measure::measure::Measure,
solana_metrics::*,
@@ -36,6 +39,7 @@ const DEFAULT_THREADS_COUNT: usize = 100;
const DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE: usize = 10;
const ACCOUNT_COLUMN_COUNT: usize = 9;
const DEFAULT_PANIC_ON_DB_ERROR: bool = false;
const DEFAULT_STORE_ACCOUNT_HISTORICAL_DATA: bool = false;
struct PostgresSqlClientWrapper {
client: Client,
@@ -44,6 +48,8 @@ struct PostgresSqlClientWrapper {
update_slot_with_parent_stmt: Statement,
update_slot_without_parent_stmt: Statement,
update_transaction_log_stmt: Statement,
update_block_metadata_stmt: Statement,
insert_account_audit_stmt: Option<Statement>,
}
pub struct SimplePostgresClient {
@@ -195,6 +201,11 @@ pub trait PostgresClient {
&mut self,
transaction_log_info: LogTransactionRequest,
) -> Result<(), AccountsDbPluginError>;
fn update_block_metadata(
&mut self,
block_info: UpdateBlockMetadataRequest,
) -> Result<(), AccountsDbPluginError>;
}
impl SimplePostgresClient {
@@ -315,6 +326,28 @@ impl SimplePostgresClient {
}
}
fn build_account_audit_insert_statement(
client: &mut Client,
config: &AccountsDbPluginPostgresConfig,
) -> Result<Statement, AccountsDbPluginError> {
let stmt = "INSERT INTO account_audit (pubkey, slot, owner, lamports, executable, rent_epoch, data, write_version, updated_on) \
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)";
let stmt = client.prepare(stmt);
match stmt {
Err(err) => {
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
msg: format!(
"Error in preparing for the account_audit update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
err, config.host, config.user, config
),
})));
}
Ok(stmt) => Ok(stmt),
}
}
fn build_slot_upsert_statement_with_parent(
client: &mut Client,
config: &AccountsDbPluginPostgresConfig,
@@ -361,8 +394,8 @@ impl SimplePostgresClient {
}
}
/// Internal function for updating or inserting a single account
fn upsert_account_internal(
/// Internal function for inserting an account into account_audit table.
fn insert_account_audit(
account: &DbAccountInfo,
statement: &Statement,
client: &mut Client,
@@ -370,7 +403,43 @@ impl SimplePostgresClient {
let lamports = account.lamports() as i64;
let rent_epoch = account.rent_epoch() as i64;
let updated_on = Utc::now().naive_utc();
let result = client.query(
let result = client.execute(
statement,
&[
&account.pubkey(),
&account.slot,
&account.owner(),
&lamports,
&account.executable(),
&rent_epoch,
&account.data(),
&account.write_version(),
&updated_on,
],
);
if let Err(err) = result {
let msg = format!(
"Failed to persist the insert of account_audit to the PostgreSQL database. Error: {:?}",
err
);
error!("{}", msg);
return Err(AccountsDbPluginError::AccountsUpdateError { msg });
}
Ok(())
}
/// Internal function for updating or inserting a single account
fn upsert_account_internal(
account: &DbAccountInfo,
statement: &Statement,
client: &mut Client,
insert_account_audit_stmt: &Option<Statement>,
) -> Result<(), AccountsDbPluginError> {
let lamports = account.lamports() as i64;
let rent_epoch = account.rent_epoch() as i64;
let updated_on = Utc::now().naive_utc();
let result = client.execute(
statement,
&[
&account.pubkey(),
@@ -392,6 +461,11 @@ impl SimplePostgresClient {
);
error!("{}", msg);
return Err(AccountsDbPluginError::AccountsUpdateError { msg });
} else if result.unwrap() == 0 && insert_account_audit_stmt.is_some() {
// If no records modified (inserted or updated), it is because the account is updated
// at an older slot, insert the record directly into the account_audit table.
let statement = insert_account_audit_stmt.as_ref().unwrap();
Self::insert_account_audit(account, statement, client)?;
}
Ok(())
@@ -400,9 +474,10 @@ impl SimplePostgresClient {
/// Update or insert a single account
fn upsert_account(&mut self, account: &DbAccountInfo) -> Result<(), AccountsDbPluginError> {
let client = self.client.get_mut().unwrap();
let insert_account_audit_stmt = &client.insert_account_audit_stmt;
let statement = &client.update_account_stmt;
let client = &mut client.client;
Self::upsert_account_internal(account, statement, client)
Self::upsert_account_internal(account, statement, client, insert_account_audit_stmt)
}
/// Insert accounts in batch to reduce network overhead
@@ -478,11 +553,12 @@ impl SimplePostgresClient {
}
let client = self.client.get_mut().unwrap();
let insert_account_audit_stmt = &client.insert_account_audit_stmt;
let statement = &client.update_account_stmt;
let client = &mut client.client;
for account in self.pending_account_updates.drain(..) {
Self::upsert_account_internal(&account, statement, client)?;
Self::upsert_account_internal(&account, statement, client, insert_account_audit_stmt)?;
}
Ok(())
@@ -501,10 +577,24 @@ impl SimplePostgresClient {
Self::build_slot_upsert_statement_without_parent(&mut client, config)?;
let update_transaction_log_stmt =
Self::build_transaction_info_upsert_statement(&mut client, config)?;
let update_block_metadata_stmt =
Self::build_block_metadata_upsert_statement(&mut client, config)?;
let batch_size = config
.batch_size
.unwrap_or(DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE);
let store_account_historical_data = config
.store_account_historical_data
.unwrap_or(DEFAULT_STORE_ACCOUNT_HISTORICAL_DATA);
let insert_account_audit_stmt = if store_account_historical_data {
let stmt = Self::build_account_audit_insert_statement(&mut client, config)?;
Some(stmt)
} else {
None
};
info!("Created SimplePostgresClient.");
Ok(Self {
batch_size,
@@ -516,6 +606,8 @@ impl SimplePostgresClient {
update_slot_with_parent_stmt,
update_slot_without_parent_stmt,
update_transaction_log_stmt,
update_block_metadata_stmt,
insert_account_audit_stmt,
}),
})
}
@@ -591,6 +683,13 @@ impl PostgresClient for SimplePostgresClient {
) -> Result<(), AccountsDbPluginError> {
self.log_transaction_impl(transaction_log_info)
}
fn update_block_metadata(
&mut self,
block_info: UpdateBlockMetadataRequest,
) -> Result<(), AccountsDbPluginError> {
self.update_block_metadata_impl(block_info)
}
}
struct UpdateAccountRequest {
@@ -604,11 +703,16 @@ struct UpdateSlotRequest {
slot_status: SlotStatus,
}
pub struct UpdateBlockMetadataRequest {
pub block_info: DbBlockInfo,
}
#[warn(clippy::large_enum_variant)]
enum DbWorkItem {
UpdateAccount(Box<UpdateAccountRequest>),
UpdateSlot(Box<UpdateSlotRequest>),
LogTransaction(Box<LogTransactionRequest>),
UpdateBlockMetadata(Box<UpdateBlockMetadataRequest>),
}
impl PostgresClientWorker {
@@ -672,6 +776,14 @@ impl PostgresClientWorker {
DbWorkItem::LogTransaction(transaction_log_info) => {
self.client.log_transaction(*transaction_log_info)?;
}
DbWorkItem::UpdateBlockMetadata(block_info) => {
if let Err(err) = self.client.update_block_metadata(*block_info) {
error!("Failed to update block metadata: ({})", err);
if panic_on_db_errors {
abort();
}
}
}
},
Err(err) => match err {
RecvTimeoutError::Timeout => {
@@ -863,6 +975,25 @@ impl ParallelPostgresClient {
Ok(())
}
pub fn update_block_metadata(
&mut self,
block_info: &ReplicaBlockInfo,
) -> Result<(), AccountsDbPluginError> {
if let Err(err) = self.sender.send(DbWorkItem::UpdateBlockMetadata(Box::new(
UpdateBlockMetadataRequest {
block_info: DbBlockInfo::from(block_info),
},
))) {
return Err(AccountsDbPluginError::SlotStatusUpdateError {
msg: format!(
"Failed to update the block metadata at slot {:?}, error: {:?}",
block_info.slot, err
),
});
}
Ok(())
}
pub fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError> {
info!("Notifying the end of startup");
// Ensure all items in the queue has been received by the workers

View File

@@ -0,0 +1,97 @@
use {
crate::{
accountsdb_plugin_postgres::{
AccountsDbPluginPostgresConfig, AccountsDbPluginPostgresError,
},
postgres_client::{
postgres_client_transaction::DbReward, SimplePostgresClient, UpdateBlockMetadataRequest,
},
},
chrono::Utc,
log::*,
postgres::{Client, Statement},
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
AccountsDbPluginError, ReplicaBlockInfo,
},
};
#[derive(Clone, Debug)]
pub struct DbBlockInfo {
pub slot: i64,
pub blockhash: String,
pub rewards: Vec<DbReward>,
pub block_time: Option<i64>,
pub block_height: Option<i64>,
}
impl<'a> From<&ReplicaBlockInfo<'a>> for DbBlockInfo {
fn from(block_info: &ReplicaBlockInfo) -> Self {
Self {
slot: block_info.slot as i64,
blockhash: block_info.blockhash.to_string(),
rewards: block_info.rewards.iter().map(DbReward::from).collect(),
block_time: block_info.block_time,
block_height: block_info
.block_height
.map(|block_height| block_height as i64),
}
}
}
impl SimplePostgresClient {
pub(crate) fn build_block_metadata_upsert_statement(
client: &mut Client,
config: &AccountsDbPluginPostgresConfig,
) -> Result<Statement, AccountsDbPluginError> {
let stmt =
"INSERT INTO block (slot, blockhash, rewards, block_time, block_height, updated_on) \
VALUES ($1, $2, $3, $4, $5, $6)";
let stmt = client.prepare(stmt);
match stmt {
Err(err) => {
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
msg: format!(
"Error in preparing for the block metadata update PostgreSQL database: ({}) host: {:?} user: {:?} config: {:?}",
err, config.host, config.user, config
),
})));
}
Ok(stmt) => Ok(stmt),
}
}
pub(crate) fn update_block_metadata_impl(
&mut self,
block_info: UpdateBlockMetadataRequest,
) -> Result<(), AccountsDbPluginError> {
let client = self.client.get_mut().unwrap();
let statement = &client.update_block_metadata_stmt;
let client = &mut client.client;
let updated_on = Utc::now().naive_utc();
let block_info = block_info.block_info;
let result = client.query(
statement,
&[
&block_info.slot,
&block_info.blockhash,
&block_info.rewards,
&block_info.block_time,
&block_info.block_height,
&updated_on,
],
);
if let Err(err) = result {
let msg = format!(
"Failed to persist the update of block metadata to the PostgreSQL database. Error: {:?}",
err);
error!("{}", msg);
return Err(AccountsDbPluginError::AccountsUpdateError { msg });
}
Ok(())
}
}

View File

@@ -18,8 +18,8 @@ use {
solana_sdk::{
instruction::CompiledInstruction,
message::{
v0::{self, AddressMapIndexes},
MappedAddresses, MappedMessage, Message, MessageHeader, SanitizedMessage,
v0::{self, LoadedAddresses, MessageAddressTableLookup},
Message, MessageHeader, SanitizedMessage,
},
transaction::TransactionError,
},
@@ -105,10 +105,11 @@ pub struct DbTransactionMessage {
}
#[derive(Clone, Debug, ToSql)]
#[postgres(name = "AddressMapIndexes")]
pub struct DbAddressMapIndexes {
pub writable: Vec<i16>,
pub readonly: Vec<i16>,
#[postgres(name = "TransactionMessageAddressTableLookup")]
pub struct DbTransactionMessageAddressTableLookup {
pub account_key: Vec<u8>,
pub writable_indexes: Vec<i16>,
pub readonly_indexes: Vec<i16>,
}
#[derive(Clone, Debug, ToSql)]
@@ -118,21 +119,21 @@ pub struct DbTransactionMessageV0 {
pub account_keys: Vec<Vec<u8>>,
pub recent_blockhash: Vec<u8>,
pub instructions: Vec<DbCompiledInstruction>,
pub address_map_indexes: Vec<DbAddressMapIndexes>,
pub address_table_lookups: Vec<DbTransactionMessageAddressTableLookup>,
}
#[derive(Clone, Debug, ToSql)]
#[postgres(name = "MappedAddresses")]
pub struct DbMappedAddresses {
#[postgres(name = "LoadedAddresses")]
pub struct DbLoadedAddresses {
pub writable: Vec<Vec<u8>>,
pub readonly: Vec<Vec<u8>>,
}
#[derive(Clone, Debug, ToSql)]
#[postgres(name = "MappedMessage")]
pub struct DbMappedMessage {
#[postgres(name = "LoadedMessageV0")]
pub struct DbLoadedMessageV0 {
pub message: DbTransactionMessageV0,
pub mapped_addresses: DbMappedAddresses,
pub loaded_addresses: DbLoadedAddresses,
}
pub struct DbTransaction {
@@ -141,7 +142,7 @@ pub struct DbTransaction {
pub slot: i64,
pub message_type: i16,
pub legacy_message: Option<DbTransactionMessage>,
pub v0_mapped_message: Option<DbMappedMessage>,
pub v0_loaded_message: Option<DbLoadedMessageV0>,
pub message_hash: Vec<u8>,
pub meta: DbTransactionStatusMeta,
pub signatures: Vec<Vec<u8>>,
@@ -151,32 +152,33 @@ pub struct LogTransactionRequest {
pub transaction_info: DbTransaction,
}
impl From<&AddressMapIndexes> for DbAddressMapIndexes {
fn from(address_map_indexes: &AddressMapIndexes) -> Self {
impl From<&MessageAddressTableLookup> for DbTransactionMessageAddressTableLookup {
fn from(address_table_lookup: &MessageAddressTableLookup) -> Self {
Self {
writable: address_map_indexes
.writable
account_key: address_table_lookup.account_key.as_ref().to_vec(),
writable_indexes: address_table_lookup
.writable_indexes
.iter()
.map(|address_idx| *address_idx as i16)
.map(|idx| *idx as i16)
.collect(),
readonly: address_map_indexes
.readonly
readonly_indexes: address_table_lookup
.readonly_indexes
.iter()
.map(|address_idx| *address_idx as i16)
.map(|idx| *idx as i16)
.collect(),
}
}
}
impl From<&MappedAddresses> for DbMappedAddresses {
fn from(mapped_addresses: &MappedAddresses) -> Self {
impl From<&LoadedAddresses> for DbLoadedAddresses {
fn from(loaded_addresses: &LoadedAddresses) -> Self {
Self {
writable: mapped_addresses
writable: loaded_addresses
.writable
.iter()
.map(|pubkey| pubkey.as_ref().to_vec())
.collect(),
readonly: mapped_addresses
readonly: loaded_addresses
.readonly
.iter()
.map(|pubkey| pubkey.as_ref().to_vec())
@@ -243,20 +245,20 @@ impl From<&v0::Message> for DbTransactionMessageV0 {
.iter()
.map(DbCompiledInstruction::from)
.collect(),
address_map_indexes: message
.address_map_indexes
address_table_lookups: message
.address_table_lookups
.iter()
.map(DbAddressMapIndexes::from)
.map(DbTransactionMessageAddressTableLookup::from)
.collect(),
}
}
}
impl From<&MappedMessage> for DbMappedMessage {
fn from(message: &MappedMessage) -> Self {
impl From<&v0::LoadedMessage> for DbLoadedMessageV0 {
fn from(message: &v0::LoadedMessage) -> Self {
Self {
message: DbTransactionMessageV0::from(&message.message),
mapped_addresses: DbMappedAddresses::from(&message.mapped_addresses),
loaded_addresses: DbLoadedAddresses::from(&message.loaded_addresses),
}
}
}
@@ -328,6 +330,12 @@ pub enum DbTransactionErrorCode {
WouldExceedMaxBlockCostLimit,
UnsupportedVersion,
InvalidWritableAccount,
WouldExceedMaxAccountDataCostLimit,
TooManyAccountLocks,
AddressLookupTableNotFound,
InvalidAddressLookupTableOwner,
InvalidAddressLookupTableData,
InvalidAddressLookupTableIndex,
}
impl From<&TransactionError> for DbTransactionErrorCode {
@@ -356,6 +364,18 @@ impl From<&TransactionError> for DbTransactionErrorCode {
TransactionError::WouldExceedMaxBlockCostLimit => Self::WouldExceedMaxBlockCostLimit,
TransactionError::UnsupportedVersion => Self::UnsupportedVersion,
TransactionError::InvalidWritableAccount => Self::InvalidWritableAccount,
TransactionError::WouldExceedMaxAccountDataCostLimit => {
Self::WouldExceedMaxAccountDataCostLimit
}
TransactionError::TooManyAccountLocks => Self::TooManyAccountLocks,
TransactionError::AddressLookupTableNotFound => Self::AddressLookupTableNotFound,
TransactionError::InvalidAddressLookupTableOwner => {
Self::InvalidAddressLookupTableOwner
}
TransactionError::InvalidAddressLookupTableData => Self::InvalidAddressLookupTableData,
TransactionError::InvalidAddressLookupTableIndex => {
Self::InvalidAddressLookupTableIndex
}
}
}
}
@@ -460,8 +480,8 @@ fn build_db_transaction(slot: u64, transaction_info: &ReplicaTransactionInfo) ->
}
_ => None,
},
v0_mapped_message: match transaction_info.transaction.message() {
SanitizedMessage::V0(mapped_message) => Some(DbMappedMessage::from(mapped_message)),
v0_loaded_message: match transaction_info.transaction.message() {
SanitizedMessage::V0(loaded_message) => Some(DbLoadedMessageV0::from(loaded_message)),
_ => None,
},
signatures: transaction_info
@@ -485,8 +505,16 @@ impl SimplePostgresClient {
config: &AccountsDbPluginPostgresConfig,
) -> Result<Statement, AccountsDbPluginError> {
let stmt = "INSERT INTO transaction AS txn (signature, is_vote, slot, message_type, legacy_message, \
v0_mapped_message, signatures, message_hash, meta, updated_on) \
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)";
v0_loaded_message, signatures, message_hash, meta, updated_on) \
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) \
ON CONFLICT (slot, signature) DO UPDATE SET is_vote=excluded.is_vote, \
message_type=excluded.message_type, \
legacy_message=excluded.legacy_message, \
v0_loaded_message=excluded.v0_loaded_message, \
signatures=excluded.signatures, \
message_hash=excluded.message_hash, \
meta=excluded.meta, \
updated_on=excluded.updated_on";
let stmt = client.prepare(stmt);
@@ -521,7 +549,7 @@ impl SimplePostgresClient {
&transaction_info.slot,
&transaction_info.message_type,
&transaction_info.legacy_message,
&transaction_info.v0_mapped_message,
&transaction_info.v0_loaded_message,
&transaction_info.signatures,
&transaction_info.message_hash,
&transaction_info.meta,
@@ -670,42 +698,44 @@ pub(crate) mod tests {
check_inner_instructions_equality(&inner_instructions, &db_inner_instructions);
}
fn check_address_map_indexes_equality(
address_map_indexes: &AddressMapIndexes,
db_address_map_indexes: &DbAddressMapIndexes,
fn check_address_table_lookups_equality(
address_table_lookups: &MessageAddressTableLookup,
db_address_table_lookups: &DbTransactionMessageAddressTableLookup,
) {
assert_eq!(
address_map_indexes.writable.len(),
db_address_map_indexes.writable.len()
address_table_lookups.writable_indexes.len(),
db_address_table_lookups.writable_indexes.len()
);
assert_eq!(
address_map_indexes.readonly.len(),
db_address_map_indexes.readonly.len()
address_table_lookups.readonly_indexes.len(),
db_address_table_lookups.readonly_indexes.len()
);
for i in 0..address_map_indexes.writable.len() {
for i in 0..address_table_lookups.writable_indexes.len() {
assert_eq!(
address_map_indexes.writable[i],
db_address_map_indexes.writable[i] as u8
address_table_lookups.writable_indexes[i],
db_address_table_lookups.writable_indexes[i] as u8
)
}
for i in 0..address_map_indexes.readonly.len() {
for i in 0..address_table_lookups.readonly_indexes.len() {
assert_eq!(
address_map_indexes.readonly[i],
db_address_map_indexes.readonly[i] as u8
address_table_lookups.readonly_indexes[i],
db_address_table_lookups.readonly_indexes[i] as u8
)
}
}
#[test]
fn test_transform_address_map_indexes() {
let address_map_indexes = AddressMapIndexes {
writable: vec![1, 2, 3],
readonly: vec![4, 5, 6],
fn test_transform_address_table_lookups() {
let address_table_lookups = MessageAddressTableLookup {
account_key: Pubkey::new_unique(),
writable_indexes: vec![1, 2, 3],
readonly_indexes: vec![4, 5, 6],
};
let db_address_map_indexes = DbAddressMapIndexes::from(&address_map_indexes);
check_address_map_indexes_equality(&address_map_indexes, &db_address_map_indexes);
let db_address_table_lookups =
DbTransactionMessageAddressTableLookup::from(&address_table_lookups);
check_address_table_lookups_equality(&address_table_lookups, &db_address_table_lookups);
}
fn check_reward_equality(reward: &Reward, db_reward: &DbReward) {
@@ -1089,7 +1119,7 @@ pub(crate) mod tests {
check_transaction_message_equality(&message, &db_message);
}
fn check_transaction_messagev0_equality(
fn check_transaction_message_v0_equality(
message: &v0::Message,
db_message: &DbTransactionMessageV0,
) {
@@ -1106,18 +1136,18 @@ pub(crate) mod tests {
);
}
assert_eq!(
message.address_map_indexes.len(),
db_message.address_map_indexes.len()
message.address_table_lookups.len(),
db_message.address_table_lookups.len()
);
for i in 0..message.address_map_indexes.len() {
check_address_map_indexes_equality(
&message.address_map_indexes[i],
&db_message.address_map_indexes[i],
for i in 0..message.address_table_lookups.len() {
check_address_table_lookups_equality(
&message.address_table_lookups[i],
&db_message.address_table_lookups[i],
);
}
}
fn build_transaction_messagev0() -> v0::Message {
fn build_transaction_message_v0() -> v0::Message {
v0::Message {
header: MessageHeader {
num_readonly_signed_accounts: 2,
@@ -1144,71 +1174,76 @@ pub(crate) mod tests {
data: vec![14, 15, 16],
},
],
address_map_indexes: vec![
AddressMapIndexes {
writable: vec![0],
readonly: vec![1, 2],
address_table_lookups: vec![
MessageAddressTableLookup {
account_key: Pubkey::new_unique(),
writable_indexes: vec![0],
readonly_indexes: vec![1, 2],
},
AddressMapIndexes {
writable: vec![1],
readonly: vec![0, 2],
MessageAddressTableLookup {
account_key: Pubkey::new_unique(),
writable_indexes: vec![1],
readonly_indexes: vec![0, 2],
},
],
}
}
#[test]
fn test_transform_transaction_messagev0() {
let message = build_transaction_messagev0();
fn test_transform_transaction_message_v0() {
let message = build_transaction_message_v0();
let db_message = DbTransactionMessageV0::from(&message);
check_transaction_messagev0_equality(&message, &db_message);
check_transaction_message_v0_equality(&message, &db_message);
}
fn check_mapped_addresses(
mapped_addresses: &MappedAddresses,
db_mapped_addresses: &DbMappedAddresses,
fn check_loaded_addresses(
loaded_addresses: &LoadedAddresses,
db_loaded_addresses: &DbLoadedAddresses,
) {
assert_eq!(
mapped_addresses.writable.len(),
db_mapped_addresses.writable.len()
loaded_addresses.writable.len(),
db_loaded_addresses.writable.len()
);
for i in 0..mapped_addresses.writable.len() {
for i in 0..loaded_addresses.writable.len() {
assert_eq!(
mapped_addresses.writable[i].as_ref(),
db_mapped_addresses.writable[i]
loaded_addresses.writable[i].as_ref(),
db_loaded_addresses.writable[i]
);
}
assert_eq!(
mapped_addresses.readonly.len(),
db_mapped_addresses.readonly.len()
loaded_addresses.readonly.len(),
db_loaded_addresses.readonly.len()
);
for i in 0..mapped_addresses.readonly.len() {
for i in 0..loaded_addresses.readonly.len() {
assert_eq!(
mapped_addresses.readonly[i].as_ref(),
db_mapped_addresses.readonly[i]
loaded_addresses.readonly[i].as_ref(),
db_loaded_addresses.readonly[i]
);
}
}
fn check_mapped_message_equality(message: &MappedMessage, db_message: &DbMappedMessage) {
check_transaction_messagev0_equality(&message.message, &db_message.message);
check_mapped_addresses(&message.mapped_addresses, &db_message.mapped_addresses);
fn check_loaded_message_v0_equality(
message: &v0::LoadedMessage,
db_message: &DbLoadedMessageV0,
) {
check_transaction_message_v0_equality(&message.message, &db_message.message);
check_loaded_addresses(&message.loaded_addresses, &db_message.loaded_addresses);
}
#[test]
fn test_transform_mapped_message() {
let message = MappedMessage {
message: build_transaction_messagev0(),
mapped_addresses: MappedAddresses {
fn test_transform_loaded_message_v0() {
let message = v0::LoadedMessage {
message: build_transaction_message_v0(),
loaded_addresses: LoadedAddresses {
writable: vec![Pubkey::new_unique(), Pubkey::new_unique()],
readonly: vec![Pubkey::new_unique(), Pubkey::new_unique()],
},
};
let db_message = DbMappedMessage::from(&message);
check_mapped_message_equality(&message, &db_message);
let db_message = DbLoadedMessageV0::from(&message);
check_loaded_message_v0_equality(&message, &db_message);
}
fn check_transaction(
@@ -1229,9 +1264,9 @@ pub(crate) mod tests {
}
SanitizedMessage::V0(message) => {
assert_eq!(db_transaction.message_type, 1);
check_mapped_message_equality(
check_loaded_message_v0_equality(
message,
db_transaction.v0_mapped_message.as_ref().unwrap(),
db_transaction.v0_loaded_message.as_ref().unwrap(),
);
}
}
@@ -1298,7 +1333,7 @@ pub(crate) mod tests {
Signature::new(&[2u8; 64]),
Signature::new(&[3u8; 64]),
],
message: VersionedMessage::V0(build_transaction_messagev0()),
message: VersionedMessage::V0(build_transaction_message_v0()),
}
}
@@ -1313,7 +1348,7 @@ pub(crate) mod tests {
let transaction =
SanitizedTransaction::try_create(transaction, message_hash, Some(true), |_message| {
Ok(MappedAddresses {
Ok(LoadedAddresses {
writable: vec![Pubkey::new_unique(), Pubkey::new_unique()],
readonly: vec![Pubkey::new_unique(), Pubkey::new_unique()],
})

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-banking-bench"
version = "1.9.0"
version = "1.9.4"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -14,17 +14,17 @@ crossbeam-channel = "0.5"
log = "0.4.14"
rand = "0.7.0"
rayon = "1.5.1"
solana-core = { path = "../core", version = "=1.9.0" }
solana-gossip = { path = "../gossip", version = "=1.9.0" }
solana-ledger = { path = "../ledger", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-perf = { path = "../perf", version = "=1.9.0" }
solana-poh = { path = "../poh", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-streamer = { path = "../streamer", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-core = { path = "../core", version = "=1.9.4" }
solana-gossip = { path = "../gossip", version = "=1.9.4" }
solana-ledger = { path = "../ledger", version = "=1.9.4" }
solana-logger = { path = "../logger", version = "=1.9.4" }
solana-measure = { path = "../measure", version = "=1.9.4" }
solana-perf = { path = "../perf", version = "=1.9.4" }
solana-poh = { path = "../poh", version = "=1.9.4" }
solana-runtime = { path = "../runtime", version = "=1.9.4" }
solana-streamer = { path = "../streamer", version = "=1.9.4" }
solana-sdk = { path = "../sdk", version = "=1.9.4" }
solana-version = { path = "../version", version = "=1.9.4" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -13,7 +13,7 @@ use {
get_tmp_ledger_path,
},
solana_measure::measure::Measure,
solana_perf::packet::to_packets_chunked,
solana_perf::packet::to_packet_batches,
solana_poh::poh_recorder::{create_test_recorder, PohRecorder, WorkingBankEntry},
solana_runtime::{
accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks,
@@ -212,7 +212,7 @@ fn main() {
bank.clear_signatures();
}
let mut verified: Vec<_> = to_packets_chunked(&transactions, packets_per_chunk);
let mut verified: Vec<_> = to_packet_batches(&transactions, packets_per_chunk);
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(
@@ -364,7 +364,7 @@ fn main() {
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
tx.signatures[0] = Signature::new(&sig[0..64]);
}
verified = to_packets_chunked(&transactions.clone(), packets_per_chunk);
verified = to_packet_batches(&transactions.clone(), packets_per_chunk);
}
start += chunk_len;

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-client"
version = "1.9.0"
version = "1.9.4"
description = "Solana banks client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,16 +12,17 @@ edition = "2021"
[dependencies]
borsh = "0.9.1"
futures = "0.3"
solana-banks-interface = { path = "../banks-interface", version = "=1.9.0" }
solana-program = { path = "../sdk/program", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
tarpc = { version = "0.26.2", features = ["full"] }
solana-banks-interface = { path = "../banks-interface", version = "=1.9.4" }
solana-program = { path = "../sdk/program", version = "=1.9.4" }
solana-sdk = { path = "../sdk", version = "=1.9.4" }
tarpc = { version = "0.27.2", features = ["full"] }
thiserror = "1.0"
tokio = { version = "1", features = ["full"] }
tokio-serde = { version = "0.8", features = ["bincode"] }
[dev-dependencies]
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-banks-server = { path = "../banks-server", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.4" }
solana-banks-server = { path = "../banks-server", version = "=1.9.4" }
[lib]
crate-type = ["lib"]

73
banks-client/src/error.rs Normal file
View File

@@ -0,0 +1,73 @@
use {
solana_sdk::{transaction::TransactionError, transport::TransportError},
std::io,
tarpc::client::RpcError,
thiserror::Error,
};
/// Errors from BanksClient
#[derive(Error, Debug)]
pub enum BanksClientError {
#[error("client error: {0}")]
ClientError(&'static str),
#[error(transparent)]
Io(#[from] io::Error),
#[error(transparent)]
RpcError(#[from] RpcError),
#[error("transport transaction error: {0}")]
TransactionError(#[from] TransactionError),
#[error("simulation error: {err:?}, logs: {logs:?}, units_consumed: {units_consumed:?}")]
SimulationError {
err: TransactionError,
logs: Vec<String>,
units_consumed: u64,
},
}
impl BanksClientError {
pub fn unwrap(&self) -> TransactionError {
match self {
BanksClientError::TransactionError(err)
| BanksClientError::SimulationError { err, .. } => err.clone(),
_ => panic!("unexpected transport error"),
}
}
}
impl From<BanksClientError> for io::Error {
fn from(err: BanksClientError) -> Self {
match err {
BanksClientError::ClientError(err) => Self::new(io::ErrorKind::Other, err.to_string()),
BanksClientError::Io(err) => err,
BanksClientError::RpcError(err) => Self::new(io::ErrorKind::Other, err.to_string()),
BanksClientError::TransactionError(err) => {
Self::new(io::ErrorKind::Other, err.to_string())
}
BanksClientError::SimulationError { err, .. } => {
Self::new(io::ErrorKind::Other, err.to_string())
}
}
}
}
impl From<BanksClientError> for TransportError {
fn from(err: BanksClientError) -> Self {
match err {
BanksClientError::ClientError(err) => {
Self::IoError(io::Error::new(io::ErrorKind::Other, err.to_string()))
}
BanksClientError::Io(err) => {
Self::IoError(io::Error::new(io::ErrorKind::Other, err.to_string()))
}
BanksClientError::RpcError(err) => {
Self::IoError(io::Error::new(io::ErrorKind::Other, err.to_string()))
}
BanksClientError::TransactionError(err) => Self::TransactionError(err),
BanksClientError::SimulationError { err, .. } => Self::TransactionError(err),
}
}
}

View File

@@ -7,9 +7,10 @@
pub use solana_banks_interface::{BanksClient as TarpcClient, TransactionStatus};
use {
crate::error::BanksClientError,
borsh::BorshDeserialize,
futures::{future::join_all, Future, FutureExt},
solana_banks_interface::{BanksRequest, BanksResponse},
futures::{future::join_all, Future, FutureExt, TryFutureExt},
solana_banks_interface::{BanksRequest, BanksResponse, BanksTransactionResultWithSimulation},
solana_program::{
clock::Slot, fee_calculator::FeeCalculator, hash::Hash, program_pack::Pack, pubkey::Pubkey,
rent::Rent, sysvar::Sysvar,
@@ -22,7 +23,7 @@ use {
transaction::{self, Transaction},
transport,
},
std::io::{self, Error, ErrorKind},
std::io,
tarpc::{
client::{self, NewClient, RequestDispatch},
context::{self, Context},
@@ -33,6 +34,8 @@ use {
tokio_serde::formats::Bincode,
};
mod error;
// This exists only for backward compatibility
pub trait BanksClientExt {}
@@ -58,7 +61,10 @@ impl BanksClient {
ctx: Context,
transaction: Transaction,
) -> impl Future<Output = io::Result<()>> + '_ {
self.inner.send_transaction_with_context(ctx, transaction)
self.inner
.send_transaction_with_context(ctx, transaction)
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
.map_err(Into::into)
}
#[deprecated(
@@ -73,6 +79,8 @@ impl BanksClient {
#[allow(deprecated)]
self.inner
.get_fees_with_commitment_and_context(ctx, commitment)
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
.map_err(Into::into)
}
pub fn get_transaction_status_with_context(
@@ -82,6 +90,8 @@ impl BanksClient {
) -> impl Future<Output = io::Result<Option<TransactionStatus>>> + '_ {
self.inner
.get_transaction_status_with_context(ctx, signature)
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
.map_err(Into::into)
}
pub fn get_slot_with_context(
@@ -89,7 +99,10 @@ impl BanksClient {
ctx: Context,
commitment: CommitmentLevel,
) -> impl Future<Output = io::Result<Slot>> + '_ {
self.inner.get_slot_with_context(ctx, commitment)
self.inner
.get_slot_with_context(ctx, commitment)
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
.map_err(Into::into)
}
pub fn get_block_height_with_context(
@@ -97,7 +110,10 @@ impl BanksClient {
ctx: Context,
commitment: CommitmentLevel,
) -> impl Future<Output = io::Result<Slot>> + '_ {
self.inner.get_block_height_with_context(ctx, commitment)
self.inner
.get_block_height_with_context(ctx, commitment)
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
.map_err(Into::into)
}
pub fn process_transaction_with_commitment_and_context(
@@ -108,6 +124,24 @@ impl BanksClient {
) -> impl Future<Output = io::Result<Option<transaction::Result<()>>>> + '_ {
self.inner
.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
.map_err(Into::into)
}
pub fn process_transaction_with_preflight_and_commitment_and_context(
&mut self,
ctx: Context,
transaction: Transaction,
commitment: CommitmentLevel,
) -> impl Future<Output = Result<BanksTransactionResultWithSimulation, BanksClientError>> + '_
{
self.inner
.process_transaction_with_preflight_and_commitment_and_context(
ctx,
transaction,
commitment,
)
.map_err(Into::into)
}
pub fn get_account_with_commitment_and_context(
@@ -118,6 +152,8 @@ impl BanksClient {
) -> impl Future<Output = io::Result<Option<Account>>> + '_ {
self.inner
.get_account_with_commitment_and_context(ctx, address, commitment)
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
.map_err(Into::into)
}
/// Send a transaction and return immediately. The server will resend the
@@ -148,9 +184,13 @@ impl BanksClient {
pub fn get_sysvar<T: Sysvar>(&mut self) -> impl Future<Output = io::Result<T>> + '_ {
self.get_account(T::id()).map(|result| {
let sysvar = result?
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Sysvar not present"))?;
.ok_or(BanksClientError::ClientError("Sysvar not present"))
.map_err(io::Error::from)?; // Remove this map when return Err type updated to BanksClientError
from_account::<T, _>(&sysvar)
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Failed to deserialize sysvar"))
.ok_or(BanksClientError::ClientError(
"Failed to deserialize sysvar",
))
.map_err(Into::into) // Remove this when return Err type updated to BanksClientError
})
}
@@ -164,7 +204,8 @@ impl BanksClient {
/// method to get both a blockhash and the blockhash's last valid slot.
#[deprecated(since = "1.9.0", note = "Please use `get_latest_blockhash` instead")]
pub fn get_recent_blockhash(&mut self) -> impl Future<Output = io::Result<Hash>> + '_ {
self.get_latest_blockhash()
#[allow(deprecated)]
self.get_fees().map(|result| Ok(result?.1))
}
/// Send a transaction and return after the transaction has been rejected or
@@ -178,11 +219,60 @@ impl BanksClient {
ctx.deadline += Duration::from_secs(50);
self.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
.map(|result| match result? {
None => {
Err(Error::new(ErrorKind::TimedOut, "invalid blockhash or fee-payer").into())
}
None => Err(BanksClientError::ClientError(
"invalid blockhash or fee-payer",
)),
Some(transaction_result) => Ok(transaction_result?),
})
.map_err(Into::into) // Remove this when return Err type updated to BanksClientError
}
/// Send a transaction and return any preflight (sanitization or simulation) errors, or return
/// after the transaction has been rejected or reached the given level of commitment.
pub fn process_transaction_with_preflight_and_commitment(
&mut self,
transaction: Transaction,
commitment: CommitmentLevel,
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
let mut ctx = context::current();
ctx.deadline += Duration::from_secs(50);
self.process_transaction_with_preflight_and_commitment_and_context(
ctx,
transaction,
commitment,
)
.map(|result| match result? {
BanksTransactionResultWithSimulation {
result: None,
simulation_details: _,
} => Err(BanksClientError::ClientError(
"invalid blockhash or fee-payer",
)),
BanksTransactionResultWithSimulation {
result: Some(Err(err)),
simulation_details: Some(simulation_details),
} => Err(BanksClientError::SimulationError {
err,
logs: simulation_details.logs,
units_consumed: simulation_details.units_consumed,
}),
BanksTransactionResultWithSimulation {
result: Some(result),
simulation_details: _,
} => result.map_err(Into::into),
})
}
/// Send a transaction and return any preflight (sanitization or simulation) errors, or return
/// after the transaction has been finalized or rejected.
pub fn process_transaction_with_preflight(
&mut self,
transaction: Transaction,
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
self.process_transaction_with_preflight_and_commitment(
transaction,
CommitmentLevel::default(),
)
}
/// Send a transaction and return until the transaction has been finalized or rejected.
@@ -255,10 +345,12 @@ impl BanksClient {
address: Pubkey,
) -> impl Future<Output = io::Result<T>> + '_ {
self.get_account(address).map(|result| {
let account =
result?.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Account not found"))?;
let account = result?
.ok_or(BanksClientError::ClientError("Account not found"))
.map_err(io::Error::from)?; // Remove this map when return Err type updated to BanksClientError
T::unpack_from_slice(&account.data)
.map_err(|_| io::Error::new(io::ErrorKind::Other, "Failed to deserialize account"))
.map_err(|_| BanksClientError::ClientError("Failed to deserialize account"))
.map_err(Into::into) // Remove this when return Err type updated to BanksClientError
})
}
@@ -269,9 +361,8 @@ impl BanksClient {
address: Pubkey,
) -> impl Future<Output = io::Result<T>> + '_ {
self.get_account(address).map(|result| {
let account =
result?.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "account not found"))?;
T::try_from_slice(&account.data)
let account = result?.ok_or(BanksClientError::ClientError("Account not found"))?;
T::try_from_slice(&account.data).map_err(Into::into)
})
}
@@ -330,7 +421,8 @@ impl BanksClient {
.map(|result| {
result?
.map(|x| x.0)
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "account not found"))
.ok_or(BanksClientError::ClientError("valid blockhash not found"))
.map_err(Into::into)
})
}
@@ -348,6 +440,8 @@ impl BanksClient {
) -> impl Future<Output = io::Result<Option<(Hash, u64)>>> + '_ {
self.inner
.get_latest_blockhash_with_commitment_and_context(ctx, commitment)
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
.map_err(Into::into)
}
pub fn get_fee_for_message_with_commitment_and_context(
@@ -358,6 +452,8 @@ impl BanksClient {
) -> impl Future<Output = io::Result<Option<u64>>> + '_ {
self.inner
.get_fee_for_message_with_commitment_and_context(ctx, commitment, message)
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
.map_err(Into::into)
}
}
@@ -399,7 +495,7 @@ mod tests {
}
#[test]
fn test_banks_server_transfer_via_server() -> io::Result<()> {
fn test_banks_server_transfer_via_server() -> Result<(), BanksClientError> {
// This test shows the preferred way to interact with BanksServer.
// It creates a runtime explicitly (no globals via tokio macros) and calls
// `runtime.block_on()` just once, to run all the async code.
@@ -432,7 +528,7 @@ mod tests {
}
#[test]
fn test_banks_server_transfer_via_client() -> io::Result<()> {
fn test_banks_server_transfer_via_client() -> Result<(), BanksClientError> {
// The caller may not want to hold the connection open until the transaction
// is processed (or blockhash expires). In this test, we verify the
// server-side functionality is available to the client.

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-interface"
version = "1.9.0"
version = "1.9.4"
description = "Solana banks RPC interface"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,8 +11,8 @@ edition = "2021"
[dependencies]
serde = { version = "1.0.130", features = ["derive"] }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
tarpc = { version = "0.26.2", features = ["full"] }
solana-sdk = { path = "../sdk", version = "=1.9.4" }
tarpc = { version = "0.27.2", features = ["full"] }
[lib]
crate-type = ["lib"]

View File

@@ -30,6 +30,19 @@ pub struct TransactionStatus {
pub confirmation_status: Option<TransactionConfirmationStatus>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct TransactionSimulationDetails {
pub logs: Vec<String>,
pub units_consumed: u64,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BanksTransactionResultWithSimulation {
pub result: Option<transaction::Result<()>>,
pub simulation_details: Option<TransactionSimulationDetails>,
}
#[tarpc::service]
pub trait Banks {
async fn send_transaction_with_context(transaction: Transaction);
@@ -44,6 +57,10 @@ pub trait Banks {
-> Option<TransactionStatus>;
async fn get_slot_with_context(commitment: CommitmentLevel) -> Slot;
async fn get_block_height_with_context(commitment: CommitmentLevel) -> u64;
async fn process_transaction_with_preflight_and_commitment_and_context(
transaction: Transaction,
commitment: CommitmentLevel,
) -> BanksTransactionResultWithSimulation;
async fn process_transaction_with_commitment_and_context(
transaction: Transaction,
commitment: CommitmentLevel,

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-server"
version = "1.9.0"
version = "1.9.4"
description = "Solana banks server"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,11 +12,11 @@ edition = "2021"
[dependencies]
bincode = "1.3.3"
futures = "0.3"
solana-banks-interface = { path = "../banks-interface", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.0" }
tarpc = { version = "0.26.2", features = ["full"] }
solana-banks-interface = { path = "../banks-interface", version = "=1.9.4" }
solana-runtime = { path = "../runtime", version = "=1.9.4" }
solana-sdk = { path = "../sdk", version = "=1.9.4" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.4" }
tarpc = { version = "0.27.2", features = ["full"] }
tokio = { version = "1", features = ["full"] }
tokio-serde = { version = "0.8", features = ["bincode"] }
tokio-stream = "0.1"

View File

@@ -2,9 +2,14 @@ use {
bincode::{deserialize, serialize},
futures::{future, prelude::stream::StreamExt},
solana_banks_interface::{
Banks, BanksRequest, BanksResponse, TransactionConfirmationStatus, TransactionStatus,
Banks, BanksRequest, BanksResponse, BanksTransactionResultWithSimulation,
TransactionConfirmationStatus, TransactionSimulationDetails, TransactionStatus,
},
solana_runtime::{
bank::{Bank, TransactionSimulationResult},
bank_forks::BankForks,
commitment::BlockCommitmentCache,
},
solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache},
solana_sdk::{
account::Account,
clock::Slot,
@@ -15,7 +20,7 @@ use {
message::{Message, SanitizedMessage},
pubkey::Pubkey,
signature::Signature,
transaction::{self, Transaction},
transaction::{self, SanitizedTransaction, Transaction},
},
solana_send_transaction_service::{
send_transaction_service::{SendTransactionService, TransactionInfo},
@@ -35,7 +40,7 @@ use {
tarpc::{
context::Context,
serde_transport::tcp,
server::{self, Channel, Incoming},
server::{self, incoming::Incoming, Channel},
transport::{self, channel::UnboundedChannel},
ClientMessage, Response,
},
@@ -242,6 +247,47 @@ impl Banks for BanksServer {
self.bank(commitment).block_height()
}
async fn process_transaction_with_preflight_and_commitment_and_context(
self,
ctx: Context,
transaction: Transaction,
commitment: CommitmentLevel,
) -> BanksTransactionResultWithSimulation {
let sanitized_transaction =
match SanitizedTransaction::try_from_legacy_transaction(transaction.clone()) {
Err(err) => {
return BanksTransactionResultWithSimulation {
result: Some(Err(err)),
simulation_details: None,
};
}
Ok(tx) => tx,
};
if let TransactionSimulationResult {
result: Err(err),
logs,
post_simulation_accounts: _,
units_consumed,
} = self
.bank(commitment)
.simulate_transaction_unchecked(sanitized_transaction)
{
return BanksTransactionResultWithSimulation {
result: Some(Err(err)),
simulation_details: Some(TransactionSimulationDetails {
logs,
units_consumed,
}),
};
}
BanksTransactionResultWithSimulation {
result: self
.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
.await,
simulation_details: None,
}
}
async fn process_transaction_with_commitment_and_context(
self,
_: Context,

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-bench-streamer"
version = "1.9.0"
version = "1.9.4"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -10,11 +10,11 @@ publish = false
[dependencies]
clap = "2.33.1"
solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" }
solana-streamer = { path = "../streamer", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-net-utils = { path = "../net-utils", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.4" }
solana-streamer = { path = "../streamer", version = "=1.9.4" }
solana-logger = { path = "../logger", version = "=1.9.4" }
solana-net-utils = { path = "../net-utils", version = "=1.9.4" }
solana-version = { path = "../version", version = "=1.9.4" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,8 +2,8 @@
use {
clap::{crate_description, crate_name, App, Arg},
solana_streamer::{
packet::{Packet, Packets, PacketsRecycler, PACKET_DATA_SIZE},
streamer::{receiver, PacketReceiver},
packet::{Packet, PacketBatch, PacketBatchRecycler, PACKET_DATA_SIZE},
streamer::{receiver, PacketBatchReceiver},
},
std::{
cmp::max,
@@ -20,19 +20,19 @@ use {
fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut msgs = Packets::default();
msgs.packets.resize(10, Packet::default());
for w in msgs.packets.iter_mut() {
let mut packet_batch = PacketBatch::default();
packet_batch.packets.resize(10, Packet::default());
for w in packet_batch.packets.iter_mut() {
w.meta.size = PACKET_DATA_SIZE;
w.meta.set_addr(addr);
}
let msgs = Arc::new(msgs);
let packet_batch = Arc::new(packet_batch);
spawn(move || loop {
if exit.load(Ordering::Relaxed) {
return;
}
let mut num = 0;
for p in &msgs.packets {
for p in &packet_batch.packets {
let a = p.meta.addr();
assert!(p.meta.size <= PACKET_DATA_SIZE);
send.send_to(&p.data[..p.meta.size], &a).unwrap();
@@ -42,14 +42,14 @@ fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
})
}
fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketReceiver) -> JoinHandle<()> {
fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketBatchReceiver) -> JoinHandle<()> {
spawn(move || loop {
if exit.load(Ordering::Relaxed) {
return;
}
let timer = Duration::new(1, 0);
if let Ok(msgs) = r.recv_timeout(timer) {
rvs.fetch_add(msgs.packets.len(), Ordering::Relaxed);
if let Ok(packet_batch) = r.recv_timeout(timer) {
rvs.fetch_add(packet_batch.packets.len(), Ordering::Relaxed);
}
})
}
@@ -81,7 +81,7 @@ fn main() -> Result<()> {
let mut read_channels = Vec::new();
let mut read_threads = Vec::new();
let recycler = PacketsRecycler::default();
let recycler = PacketBatchRecycler::default();
for _ in 0..num_sockets {
let read = solana_net_utils::bind_to(ip_addr, port, false).unwrap();
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-bench-tps"
version = "1.9.0"
version = "1.9.4"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -14,23 +14,23 @@ log = "0.4.14"
rayon = "1.5.1"
serde_json = "1.0.72"
serde_yaml = "0.8.21"
solana-core = { path = "../core", version = "=1.9.0" }
solana-genesis = { path = "../genesis", version = "=1.9.0" }
solana-client = { path = "../client", version = "=1.9.0" }
solana-faucet = { path = "../faucet", version = "=1.9.0" }
solana-gossip = { path = "../gossip", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-metrics = { path = "../metrics", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-net-utils = { path = "../net-utils", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-streamer = { path = "../streamer", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-core = { path = "../core", version = "=1.9.4" }
solana-genesis = { path = "../genesis", version = "=1.9.4" }
solana-client = { path = "../client", version = "=1.9.4" }
solana-faucet = { path = "../faucet", version = "=1.9.4" }
solana-gossip = { path = "../gossip", version = "=1.9.4" }
solana-logger = { path = "../logger", version = "=1.9.4" }
solana-metrics = { path = "../metrics", version = "=1.9.4" }
solana-measure = { path = "../measure", version = "=1.9.4" }
solana-net-utils = { path = "../net-utils", version = "=1.9.4" }
solana-runtime = { path = "../runtime", version = "=1.9.4" }
solana-sdk = { path = "../sdk", version = "=1.9.4" }
solana-streamer = { path = "../streamer", version = "=1.9.4" }
solana-version = { path = "../version", version = "=1.9.4" }
[dev-dependencies]
serial_test = "0.5.1"
solana-local-cluster = { path = "../local-cluster", version = "=1.9.0" }
solana-local-cluster = { path = "../local-cluster", version = "=1.9.4" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-bucket-map"
version = "1.9.0"
version = "1.9.4"
description = "solana-bucket-map"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-bucket-map"
@@ -12,11 +12,11 @@ edition = "2021"
[dependencies]
rayon = "1.5.0"
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.4" }
solana-sdk = { path = "../sdk", version = "=1.9.4" }
memmap2 = "0.5.0"
log = { version = "0.4.11" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.4" }
rand = "0.7.0"
fs_extra = "1.2.0"
tempfile = "3.2.0"

View File

@@ -9,5 +9,8 @@ for a in "$@"; do
fi
done
set -x
set -ex
if [[ ! -f sdk/bpf/syscalls.txt ]]; then
"$here"/cargo build --manifest-path "$here"/programs/bpf_loader/gen-syscall-list/Cargo.toml
fi
exec "$here"/cargo run --manifest-path "$here"/sdk/cargo-build-bpf/Cargo.toml -- $maybe_bpf_sdk "$@"

View File

@@ -226,6 +226,19 @@ EOF
annotate --style info \
"downstream-projects skipped as no relevant files were modified"
fi
# Wasm support
if affects \
^ci/test-wasm.sh \
^ci/test-stable.sh \
^sdk/ \
; then
command_step wasm ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-wasm.sh" 20
else
annotate --style info \
"wasm skipped as no relevant files were modified"
fi
# Benches...
if affects \
.rs$ \
@@ -243,7 +256,15 @@ EOF
command_step "local-cluster" \
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster.sh" \
50
40
command_step "local-cluster-flakey" \
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-flakey.sh" \
10
command_step "local-cluster-slow" \
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-slow.sh" \
30
}
pull_or_push_steps() {

View File

@@ -19,3 +19,8 @@ steps:
timeout_in_minutes: 240
name: "publish crate"
branches: "!master"
- command: "ci/publish-tarball.sh"
agents:
- "queue=release-build-aarch64-apple-darwin"
timeout_in_minutes: 60
name: "publish tarball (aarch64-apple-darwin)"

View File

@@ -1,4 +1,4 @@
FROM solanalabs/rust:1.56.1
FROM solanalabs/rust:1.57.0
ARG date
RUN set -x \

View File

@@ -1,6 +1,6 @@
# Note: when the rust version is changed also modify
# ci/rust-version.sh to pick up the new image tag
FROM rust:1.56.1
FROM rust:1.57.0
# Add Google Protocol Buffers for Libra's metrics library.
ENV PROTOC_VERSION 3.8.0
@@ -11,6 +11,7 @@ RUN set -x \
&& apt-get install apt-transport-https \
&& echo deb https://apt.buildkite.com/buildkite-agent stable main > /etc/apt/sources.list.d/buildkite-agent.list \
&& apt-key adv --no-tty --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 32A37959C2FA5C3C99EFBC32A79206696452D198 \
&& curl -fsSL https://deb.nodesource.com/setup_current.x | bash - \
&& apt update \
&& apt install -y \
buildkite-agent \
@@ -19,15 +20,20 @@ RUN set -x \
lcov \
libudev-dev \
mscgen \
nodejs \
net-tools \
rsync \
sudo \
golang \
unzip \
\
&& apt remove -y libcurl4-openssl-dev \
&& rm -rf /var/lib/apt/lists/* \
&& node --version \
&& npm --version \
&& rustup component add rustfmt \
&& rustup component add clippy \
&& rustup target add wasm32-unknown-unknown \
&& cargo install cargo-audit \
&& cargo install mdbook \
&& cargo install mdbook-linkcheck \

View File

@@ -23,6 +23,9 @@ if [[ -n $CI ]]; then
elif [[ -n $BUILDKITE ]]; then
export CI_BRANCH=$BUILDKITE_BRANCH
export CI_BUILD_ID=$BUILDKITE_BUILD_ID
if [[ $BUILDKITE_COMMIT = HEAD ]]; then
BUILDKITE_COMMIT="$(git rev-parse HEAD)"
fi
export CI_COMMIT=$BUILDKITE_COMMIT
export CI_JOB_ID=$BUILDKITE_JOB_ID
# The standard BUILDKITE_PULL_REQUEST environment variable is always "false" due
@@ -35,7 +38,18 @@ if [[ -n $CI ]]; then
export CI_BASE_BRANCH=$BUILDKITE_BRANCH
export CI_PULL_REQUEST=
fi
export CI_OS_NAME=linux
case "$(uname -s)" in
Linux)
export CI_OS_NAME=linux
;;
Darwin)
export CI_OS_NAME=osx
;;
*)
;;
esac
if [[ -n $BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG ]]; then
# The solana-secondary pipeline should use the slug of the pipeline that
# triggered it

View File

@@ -39,7 +39,11 @@ fi
case "$CI_OS_NAME" in
osx)
TARGET=x86_64-apple-darwin
_cputype="$(uname -m)"
if [[ $_cputype = arm64 ]]; then
_cputype=aarch64
fi
TARGET=${_cputype}-apple-darwin
;;
linux)
TARGET=x86_64-unknown-linux-gnu

View File

@@ -27,6 +27,8 @@ steps+=(test-stable-perf)
steps+=(test-downstream-builds)
steps+=(test-bench)
steps+=(test-local-cluster)
steps+=(test-local-cluster-flakey)
steps+=(test-local-cluster-slow)
step_index=0
if [[ -n "$1" ]]; then

View File

@@ -18,13 +18,13 @@
if [[ -n $RUST_STABLE_VERSION ]]; then
stable_version="$RUST_STABLE_VERSION"
else
stable_version=1.56.1
stable_version=1.57.0
fi
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
nightly_version="$RUST_NIGHTLY_VERSION"
else
nightly_version=2021-11-30
nightly_version=2021-12-03
fi

View File

@@ -0,0 +1 @@
test-stable.sh

View File

@@ -0,0 +1 @@
test-stable.sh

View File

@@ -100,7 +100,30 @@ test-stable-perf)
;;
test-local-cluster)
_ "$cargo" stable build --release --bins ${V:+--verbose}
_ "$cargo" stable test --release --package solana-local-cluster ${V:+--verbose} -- --nocapture --test-threads=1
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster ${V:+--verbose} -- --nocapture --test-threads=1
exit 0
;;
test-local-cluster-flakey)
_ "$cargo" stable build --release --bins ${V:+--verbose}
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster_flakey ${V:+--verbose} -- --nocapture --test-threads=1
exit 0
;;
test-local-cluster-slow)
_ "$cargo" stable build --release --bins ${V:+--verbose}
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster_slow ${V:+--verbose} -- --nocapture --test-threads=1
exit 0
;;
test-wasm)
_ node --version
_ npm --version
for dir in sdk/{program,}; do
if [[ -r "$dir"/package.json ]]; then
pushd "$dir"
_ npm install
_ npm test
popd
fi
done
exit 0
;;
*)

1
ci/test-wasm.sh Symbolic link
View File

@@ -0,0 +1 @@
test-stable.sh

View File

@@ -19,13 +19,24 @@ upload-ci-artifact() {
upload-s3-artifact() {
echo "--- artifact: $1 to $2"
(
set -x
docker run \
--rm \
--env AWS_ACCESS_KEY_ID \
--env AWS_SECRET_ACCESS_KEY \
--volume "$PWD:/solana" \
eremite/aws-cli:2018.12.18 \
args=(
--rm
--env AWS_ACCESS_KEY_ID
--env AWS_SECRET_ACCESS_KEY
--volume "$PWD:/solana"
)
if [[ $(uname -m) = arm64 ]]; then
# Ref: https://blog.jaimyn.dev/how-to-build-multi-architecture-docker-images-on-an-m1-mac/#tldr
args+=(
--platform linux/amd64
)
fi
args+=(
eremite/aws-cli:2018.12.18
/usr/bin/s3cmd --acl-public put "$1" "$2"
)
set -x
docker run "${args[@]}"
)
}

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-clap-utils"
version = "1.9.0"
version = "1.9.4"
description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,9 +12,9 @@ edition = "2021"
[dependencies]
clap = "2.33.0"
rpassword = "5.0"
solana-perf = { path = "../perf", version = "=1.9.0" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-perf = { path = "../perf", version = "=1.9.4" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.4" }
solana-sdk = { path = "../sdk", version = "=1.9.4" }
thiserror = "1.0.30"
tiny-bip39 = "0.8.2"
uriparse = "0.6.3"

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-cli-config"
description = "Blockchain, Rebuilt for Scale"
version = "1.9.0"
version = "1.9.4"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-cli-output"
description = "Blockchain, Rebuilt for Scale"
version = "1.9.0"
version = "1.9.4"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -19,12 +19,12 @@ Inflector = "0.11.4"
indicatif = "0.16.2"
serde = "1.0.130"
serde_json = "1.0.72"
solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" }
solana-client = { path = "../client", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.0" }
solana-account-decoder = { path = "../account-decoder", version = "=1.9.4" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.4" }
solana-client = { path = "../client", version = "=1.9.4" }
solana-sdk = { path = "../sdk", version = "=1.9.4" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.4" }
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
[package.metadata.docs.rs]

View File

@@ -99,7 +99,7 @@ impl OutputFormat {
pub struct CliAccount {
#[serde(flatten)]
pub keyed_account: RpcKeyedAccount,
#[serde(skip_serializing)]
#[serde(skip_serializing, skip_deserializing)]
pub use_lamports_unit: bool,
}

View File

@@ -139,7 +139,7 @@ fn format_account_mode(message: &Message, index: usize) -> String {
} else {
"-"
},
if message.is_writable(index, /*demote_program_write_locks=*/ true) {
if message.is_writable(index) {
"w" // comment for consistent rust fmt (no joking; lol)
} else {
"-"

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-cli"
description = "Blockchain, Rebuilt for Scale"
version = "1.9.0"
version = "1.9.4"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -26,29 +26,29 @@ semver = "1.0.4"
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.72"
solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.9.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" }
solana-cli-config = { path = "../cli-config", version = "=1.9.0" }
solana-cli-output = { path = "../cli-output", version = "=1.9.0" }
solana-client = { path = "../client", version = "=1.9.0" }
solana-config-program = { path = "../programs/config", version = "=1.9.0" }
solana-faucet = { path = "../faucet", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-program-runtime = { path = "../program-runtime", version = "=1.9.0" }
solana_rbpf = "=0.2.16"
solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.0" }
solana-account-decoder = { path = "../account-decoder", version = "=1.9.4" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.9.4" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.4" }
solana-cli-config = { path = "../cli-config", version = "=1.9.4" }
solana-cli-output = { path = "../cli-output", version = "=1.9.4" }
solana-client = { path = "../client", version = "=1.9.4" }
solana-config-program = { path = "../programs/config", version = "=1.9.4" }
solana-faucet = { path = "../faucet", version = "=1.9.4" }
solana-logger = { path = "../logger", version = "=1.9.4" }
solana-program-runtime = { path = "../program-runtime", version = "=1.9.4" }
solana_rbpf = "=0.2.21"
solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.4" }
solana-sdk = { path = "../sdk", version = "=1.9.4" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" }
solana-version = { path = "../version", version = "=1.9.4" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.4" }
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
thiserror = "1.0.30"
tiny-bip39 = "0.8.2"
[dev-dependencies]
solana-streamer = { path = "../streamer", version = "=1.9.0" }
solana-test-validator = { path = "../test-validator", version = "=1.9.0" }
solana-streamer = { path = "../streamer", version = "=1.9.4" }
solana-test-validator = { path = "../test-validator", version = "=1.9.4" }
tempfile = "3.2.0"
[[bin]]

View File

@@ -98,10 +98,7 @@ pub fn get_fee_for_messages(
) -> Result<u64, CliError> {
Ok(messages
.iter()
.map(|message| {
println!("msg {:?}", message.recent_blockhash);
rpc_client.get_fee_for_message(message)
})
.map(|message| rpc_client.get_fee_for_message(message))
.collect::<Result<Vec<_>, _>>()?
.iter()
.sum())

View File

@@ -298,7 +298,13 @@ pub enum CliCommand {
authorized_voter: Option<Pubkey>,
authorized_withdrawer: Pubkey,
commission: u8,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
memo: Option<String>,
fee_payer: SignerIndex,
},
ShowVoteAccount {
pubkey: Pubkey,
@@ -310,19 +316,32 @@ pub enum CliCommand {
destination_account_pubkey: Pubkey,
withdraw_authority: SignerIndex,
withdraw_amount: SpendAmount,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
memo: Option<String>,
fee_payer: SignerIndex,
},
CloseVoteAccount {
vote_account_pubkey: Pubkey,
destination_account_pubkey: Pubkey,
withdraw_authority: SignerIndex,
memo: Option<String>,
fee_payer: SignerIndex,
},
VoteAuthorize {
vote_account_pubkey: Pubkey,
new_authorized_pubkey: Pubkey,
vote_authorize: VoteAuthorize,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
memo: Option<String>,
fee_payer: SignerIndex,
authorized: SignerIndex,
new_authorized: Option<SignerIndex>,
},
@@ -330,13 +349,25 @@ pub enum CliCommand {
vote_account_pubkey: Pubkey,
new_identity_account: SignerIndex,
withdraw_authority: SignerIndex,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
memo: Option<String>,
fee_payer: SignerIndex,
},
VoteUpdateCommission {
vote_account_pubkey: Pubkey,
commission: u8,
withdraw_authority: SignerIndex,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
memo: Option<String>,
fee_payer: SignerIndex,
},
// Wallet Commands
Address,
@@ -1384,7 +1415,13 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
authorized_voter,
authorized_withdrawer,
commission,
sign_only,
dump_transaction_message,
blockhash_query,
ref nonce_account,
nonce_authority,
memo,
fee_payer,
} => process_create_vote_account(
&rpc_client,
config,
@@ -1394,7 +1431,13 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
authorized_voter,
*authorized_withdrawer,
*commission,
*sign_only,
*dump_transaction_message,
blockhash_query,
nonce_account.as_ref(),
*nonce_authority,
memo.as_ref(),
*fee_payer,
),
CliCommand::ShowVoteAccount {
pubkey: vote_account_pubkey,
@@ -1412,7 +1455,13 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
withdraw_authority,
withdraw_amount,
destination_account_pubkey,
sign_only,
dump_transaction_message,
blockhash_query,
ref nonce_account,
nonce_authority,
memo,
fee_payer,
} => process_withdraw_from_vote_account(
&rpc_client,
config,
@@ -1420,13 +1469,20 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
*withdraw_authority,
*withdraw_amount,
destination_account_pubkey,
*sign_only,
*dump_transaction_message,
blockhash_query,
nonce_account.as_ref(),
*nonce_authority,
memo.as_ref(),
*fee_payer,
),
CliCommand::CloseVoteAccount {
vote_account_pubkey,
withdraw_authority,
destination_account_pubkey,
memo,
fee_payer,
} => process_close_vote_account(
&rpc_client,
config,
@@ -1434,12 +1490,19 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
*withdraw_authority,
destination_account_pubkey,
memo.as_ref(),
*fee_payer,
),
CliCommand::VoteAuthorize {
vote_account_pubkey,
new_authorized_pubkey,
vote_authorize,
sign_only,
dump_transaction_message,
blockhash_query,
nonce_account,
nonce_authority,
memo,
fee_payer,
authorized,
new_authorized,
} => process_vote_authorize(
@@ -1450,33 +1513,63 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
*vote_authorize,
*authorized,
*new_authorized,
*sign_only,
*dump_transaction_message,
blockhash_query,
*nonce_account,
*nonce_authority,
memo.as_ref(),
*fee_payer,
),
CliCommand::VoteUpdateValidator {
vote_account_pubkey,
new_identity_account,
withdraw_authority,
sign_only,
dump_transaction_message,
blockhash_query,
nonce_account,
nonce_authority,
memo,
fee_payer,
} => process_vote_update_validator(
&rpc_client,
config,
vote_account_pubkey,
*new_identity_account,
*withdraw_authority,
*sign_only,
*dump_transaction_message,
blockhash_query,
*nonce_account,
*nonce_authority,
memo.as_ref(),
*fee_payer,
),
CliCommand::VoteUpdateCommission {
vote_account_pubkey,
commission,
withdraw_authority,
sign_only,
dump_transaction_message,
blockhash_query,
nonce_account,
nonce_authority,
memo,
fee_payer,
} => process_vote_update_commission(
&rpc_client,
config,
vote_account_pubkey,
*commission,
*withdraw_authority,
*sign_only,
*dump_transaction_message,
blockhash_query,
*nonce_account,
*nonce_authority,
memo.as_ref(),
*fee_payer,
),
// Wallet Commands
@@ -1975,7 +2068,13 @@ mod tests {
authorized_voter: Some(bob_pubkey),
authorized_withdrawer: bob_pubkey,
commission: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
config.signers = vec![&keypair, &bob_keypair, &identity_keypair];
let result = process_command(&config);
@@ -2006,7 +2105,13 @@ mod tests {
vote_account_pubkey: bob_pubkey,
new_authorized_pubkey,
vote_authorize: VoteAuthorize::Withdrawer,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
authorized: 0,
new_authorized: None,
};
@@ -2019,7 +2124,13 @@ mod tests {
vote_account_pubkey: bob_pubkey,
new_identity_account: 2,
withdraw_authority: 1,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
let result = process_command(&config);
assert!(result.is_ok());
@@ -2195,7 +2306,13 @@ mod tests {
authorized_voter: Some(bob_pubkey),
authorized_withdrawer: bob_pubkey,
commission: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
config.signers = vec![&keypair, &bob_keypair, &identity_keypair];
assert!(process_command(&config).is_err());
@@ -2204,7 +2321,13 @@ mod tests {
vote_account_pubkey: bob_pubkey,
new_authorized_pubkey: bob_pubkey,
vote_authorize: VoteAuthorize::Voter,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
authorized: 0,
new_authorized: None,
};
@@ -2214,7 +2337,13 @@ mod tests {
vote_account_pubkey: bob_pubkey,
new_identity_account: 1,
withdraw_authority: 1,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
assert!(process_command(&config).is_err());

View File

@@ -5,7 +5,7 @@ use {
},
clap::{App, AppSettings, Arg, ArgMatches, SubCommand},
console::style,
serde::{Deserialize, Serialize},
serde::{Deserialize, Deserializer, Serialize, Serializer},
solana_clap_utils::{input_parsers::*, input_validators::*, keypair::*},
solana_cli_output::{QuietDisplay, VerboseDisplay},
solana_client::{client_error::ClientError, rpc_client::RpcClient},
@@ -23,6 +23,7 @@ use {
cmp::Ordering,
collections::{HashMap, HashSet},
fmt,
str::FromStr,
sync::Arc,
},
};
@@ -45,7 +46,7 @@ pub enum FeatureCliCommand {
},
}
#[derive(Serialize, Deserialize)]
#[derive(Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "camelCase", tag = "status", content = "sinceSlot")]
pub enum CliFeatureStatus {
Inactive,
@@ -53,7 +54,29 @@ pub enum CliFeatureStatus {
Active(Slot),
}
#[derive(Serialize, Deserialize)]
impl PartialOrd for CliFeatureStatus {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for CliFeatureStatus {
fn cmp(&self, other: &Self) -> Ordering {
match (self, other) {
(Self::Inactive, Self::Inactive) => Ordering::Equal,
(Self::Inactive, _) => Ordering::Greater,
(_, Self::Inactive) => Ordering::Less,
(Self::Pending, Self::Pending) => Ordering::Equal,
(Self::Pending, _) => Ordering::Greater,
(_, Self::Pending) => Ordering::Less,
(Self::Active(self_active_slot), Self::Active(other_active_slot)) => {
self_active_slot.cmp(other_active_slot)
}
}
}
}
#[derive(Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub struct CliFeature {
pub id: String,
@@ -62,11 +85,28 @@ pub struct CliFeature {
pub status: CliFeatureStatus,
}
impl PartialOrd for CliFeature {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for CliFeature {
fn cmp(&self, other: &Self) -> Ordering {
match self.status.cmp(&other.status) {
Ordering::Equal => self.id.cmp(&other.id),
ordering => ordering,
}
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliFeatures {
pub features: Vec<CliFeature>,
pub feature_activation_allowed: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub cluster_feature_sets: Option<CliClusterFeatureSets>,
#[serde(skip)]
pub inactive: bool,
}
@@ -93,11 +133,16 @@ impl fmt::Display for CliFeatures {
CliFeatureStatus::Inactive => style("inactive".to_string()).red(),
CliFeatureStatus::Pending => style("activation pending".to_string()).yellow(),
CliFeatureStatus::Active(activation_slot) =>
style(format!("active since slot {}", activation_slot)).green(),
style(format!("active since slot {:>9}", activation_slot)).green(),
},
feature.description,
)?;
}
if let Some(feature_sets) = &self.cluster_feature_sets {
write!(f, "{}", feature_sets)?;
}
if self.inactive && !self.feature_activation_allowed {
writeln!(
f,
@@ -114,6 +159,191 @@ impl fmt::Display for CliFeatures {
impl QuietDisplay for CliFeatures {}
impl VerboseDisplay for CliFeatures {}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliClusterFeatureSets {
pub tool_feature_set: u32,
pub feature_sets: Vec<CliFeatureSet>,
#[serde(skip)]
pub stake_allowed: bool,
#[serde(skip)]
pub rpc_allowed: bool,
}
impl fmt::Display for CliClusterFeatureSets {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut tool_feature_set_matches_cluster = false;
let software_versions_title = "Software Version";
let feature_set_title = "Feature Set";
let stake_percent_title = "Stake";
let rpc_percent_title = "RPC";
let mut max_software_versions_len = software_versions_title.len();
let mut max_feature_set_len = feature_set_title.len();
let mut max_stake_percent_len = stake_percent_title.len();
let mut max_rpc_percent_len = rpc_percent_title.len();
let feature_sets: Vec<_> = self
.feature_sets
.iter()
.map(|feature_set_info| {
let me = if self.tool_feature_set == feature_set_info.feature_set {
tool_feature_set_matches_cluster = true;
true
} else {
false
};
let software_versions: Vec<_> = feature_set_info
.software_versions
.iter()
.map(ToString::to_string)
.collect();
let software_versions = software_versions.join(", ");
let feature_set = if feature_set_info.feature_set == 0 {
"unknown".to_string()
} else {
feature_set_info.feature_set.to_string()
};
let stake_percent = format!("{:.2}%", feature_set_info.stake_percent);
let rpc_percent = format!("{:.2}%", feature_set_info.rpc_percent);
max_software_versions_len = max_software_versions_len.max(software_versions.len());
max_feature_set_len = max_feature_set_len.max(feature_set.len());
max_stake_percent_len = max_stake_percent_len.max(stake_percent.len());
max_rpc_percent_len = max_rpc_percent_len.max(rpc_percent.len());
(
software_versions,
feature_set,
stake_percent,
rpc_percent,
me,
)
})
.collect();
if !tool_feature_set_matches_cluster {
writeln!(
f,
"\n{}",
style("To activate features the tool and cluster feature sets must match, select a tool version that matches the cluster")
.bold())?;
} else {
if !self.stake_allowed {
write!(
f,
"\n{}",
style("To activate features the stake must be >= 95%")
.bold()
.red()
)?;
}
if !self.rpc_allowed {
write!(
f,
"\n{}",
style("To activate features the RPC nodes must be >= 95%")
.bold()
.red()
)?;
}
}
writeln!(
f,
"\n\n{}",
style(format!("Tool Feature Set: {}", self.tool_feature_set)).bold()
)?;
writeln!(
f,
"{}",
style(format!(
"{1:<0$} {3:<2$} {5:<4$} {7:<6$}",
max_software_versions_len,
software_versions_title,
max_feature_set_len,
feature_set_title,
max_stake_percent_len,
stake_percent_title,
max_rpc_percent_len,
rpc_percent_title,
))
.bold(),
)?;
for (software_versions, feature_set, stake_percent, rpc_percent, me) in feature_sets {
writeln!(
f,
"{1:<0$} {3:>2$} {5:>4$} {7:>6$} {8}",
max_software_versions_len,
software_versions,
max_feature_set_len,
feature_set,
max_stake_percent_len,
stake_percent,
max_rpc_percent_len,
rpc_percent,
if me { "<-- me" } else { "" },
)?;
}
writeln!(f)
}
}
impl QuietDisplay for CliClusterFeatureSets {}
impl VerboseDisplay for CliClusterFeatureSets {}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliFeatureSet {
software_versions: Vec<CliVersion>,
feature_set: u32,
stake_percent: f64,
rpc_percent: f32,
}
#[derive(Eq, PartialEq, Ord, PartialOrd)]
struct CliVersion(Option<semver::Version>);
impl fmt::Display for CliVersion {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let s = match &self.0 {
None => "unknown".to_string(),
Some(version) => version.to_string(),
};
write!(f, "{}", s)
}
}
impl FromStr for CliVersion {
type Err = semver::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let version_option = if s == "unknown" {
None
} else {
Some(semver::Version::from_str(s)?)
};
Ok(CliVersion(version_option))
}
}
impl Serialize for CliVersion {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&self.to_string())
}
}
impl<'de> Deserialize<'de> for CliVersion {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s: &str = Deserialize::deserialize(deserializer)?;
CliVersion::from_str(s).map_err(serde::de::Error::custom)
}
}
pub trait FeatureSubCommands {
fn feature_subcommands(self) -> Self;
}
@@ -330,7 +560,10 @@ fn feature_set_stats(rpc_client: &RpcClient) -> Result<FeatureSetStats, ClientEr
}
// Feature activation is only allowed when 95% of the active stake is on the current feature set
fn feature_activation_allowed(rpc_client: &RpcClient, quiet: bool) -> Result<bool, ClientError> {
fn feature_activation_allowed(
rpc_client: &RpcClient,
quiet: bool,
) -> Result<(bool, Option<CliClusterFeatureSets>), ClientError> {
let my_feature_set = solana_version::Version::default().feature_set;
let feature_set_stats = feature_set_stats(rpc_client)?;
@@ -346,54 +579,43 @@ fn feature_activation_allowed(rpc_client: &RpcClient, quiet: bool) -> Result<boo
)
.unwrap_or((false, false));
if !quiet {
if feature_set_stats.get(&my_feature_set).is_none() {
println!(
"{}",
style("To activate features the tool and cluster feature sets must match, select a tool version that matches the cluster")
.bold());
} else {
if !stake_allowed {
print!(
"\n{}",
style("To activate features the stake must be >= 95%")
.bold()
.red()
);
}
if !rpc_allowed {
print!(
"\n{}",
style("To activate features the RPC nodes must be >= 95%")
.bold()
.red()
);
}
}
println!(
"\n\n{}",
style(format!("Tool Feature Set: {}", my_feature_set)).bold()
);
let mut feature_set_stats = feature_set_stats.into_iter().collect::<Vec<_>>();
feature_set_stats.sort_by(|l, r| {
match l.1.software_versions[0]
.cmp(&r.1.software_versions[0])
let cluster_feature_sets = if quiet {
None
} else {
let mut feature_sets = feature_set_stats
.into_iter()
.map(
|(
feature_set,
FeatureSetStatsEntry {
stake_percent,
rpc_nodes_percent: rpc_percent,
software_versions,
},
)| {
CliFeatureSet {
software_versions: software_versions.into_iter().map(CliVersion).collect(),
feature_set,
stake_percent,
rpc_percent,
}
},
)
.collect::<Vec<_>>();
feature_sets.sort_by(|l, r| {
match l.software_versions[0]
.cmp(&r.software_versions[0])
.reverse()
{
Ordering::Equal => {
match l
.1
.stake_percent
.partial_cmp(&r.1.stake_percent)
.partial_cmp(&r.stake_percent)
.unwrap()
.reverse()
{
Ordering::Equal => {
l.1.rpc_nodes_percent
.partial_cmp(&r.1.rpc_nodes_percent)
.unwrap()
.reverse()
l.rpc_percent.partial_cmp(&r.rpc_percent).unwrap().reverse()
}
o => o,
}
@@ -401,96 +623,15 @@ fn feature_activation_allowed(rpc_client: &RpcClient, quiet: bool) -> Result<boo
o => o,
}
});
Some(CliClusterFeatureSets {
tool_feature_set: my_feature_set,
feature_sets,
stake_allowed,
rpc_allowed,
})
};
let software_versions_title = "Software Version";
let feature_set_title = "Feature Set";
let stake_percent_title = "Stake";
let rpc_percent_title = "RPC";
let mut stats_output = Vec::new();
let mut max_software_versions_len = software_versions_title.len();
let mut max_feature_set_len = feature_set_title.len();
let mut max_stake_percent_len = stake_percent_title.len();
let mut max_rpc_percent_len = rpc_percent_title.len();
for (
feature_set,
FeatureSetStatsEntry {
stake_percent,
rpc_nodes_percent,
software_versions,
},
) in feature_set_stats.into_iter()
{
let me = feature_set == my_feature_set;
let feature_set = if feature_set == 0 {
"unknown".to_string()
} else {
feature_set.to_string()
};
let stake_percent = format!("{:.2}%", stake_percent);
let rpc_percent = format!("{:.2}%", rpc_nodes_percent);
let mut has_unknown = false;
let mut software_versions = software_versions
.iter()
.filter_map(|v| {
if v.is_none() {
has_unknown = true;
}
v.as_ref()
})
.map(ToString::to_string)
.collect::<Vec<_>>();
if has_unknown {
software_versions.push("unknown".to_string());
}
let software_versions = software_versions.join(", ");
max_software_versions_len = max_software_versions_len.max(software_versions.len());
max_feature_set_len = max_feature_set_len.max(feature_set.len());
max_stake_percent_len = max_stake_percent_len.max(stake_percent.len());
max_rpc_percent_len = max_rpc_percent_len.max(rpc_percent.len());
stats_output.push((
software_versions,
feature_set,
stake_percent,
rpc_percent,
me,
));
}
println!(
"{}",
style(format!(
"{1:<0$} {3:<2$} {5:<4$} {7:<6$}",
max_software_versions_len,
software_versions_title,
max_feature_set_len,
feature_set_title,
max_stake_percent_len,
stake_percent_title,
max_rpc_percent_len,
rpc_percent_title,
))
.bold(),
);
for (software_versions, feature_set, stake_percent, rpc_percent, me) in stats_output {
println!(
"{1:<0$} {3:>2$} {5:>4$} {7:>6$} {8}",
max_software_versions_len,
software_versions,
max_feature_set_len,
feature_set,
max_stake_percent_len,
stake_percent,
max_rpc_percent_len,
rpc_percent,
if me { "<-- me" } else { "" },
);
}
println!();
}
Ok(stake_allowed && rpc_allowed)
Ok((stake_allowed && rpc_allowed, cluster_feature_sets))
}
fn status_from_account(account: Account) -> Option<CliFeatureStatus> {
@@ -550,10 +691,14 @@ fn process_status(
});
}
let feature_activation_allowed = feature_activation_allowed(rpc_client, features.len() <= 1)?;
features.sort_unstable();
let (feature_activation_allowed, cluster_feature_sets) =
feature_activation_allowed(rpc_client, features.len() <= 1)?;
let feature_set = CliFeatures {
features,
feature_activation_allowed,
cluster_feature_sets,
inactive,
};
Ok(config.output_format.formatted_string(&feature_set))
@@ -577,7 +722,7 @@ fn process_activate(
}
}
if !feature_activation_allowed(rpc_client, false)? {
if !feature_activation_allowed(rpc_client, false)?.0 {
match force {
ForceActivation::Almost =>
return Err("Add force argument once more to override the sanity check to force feature activation ".into()),

View File

@@ -16,6 +16,7 @@ use {
pub enum SpendAmount {
All,
Some(u64),
RentExempt,
}
impl Default for SpendAmount {
@@ -90,6 +91,7 @@ where
0,
from_pubkey,
fee_pubkey,
0,
build_message,
)?;
Ok((message, spend))
@@ -97,6 +99,12 @@ where
let from_balance = rpc_client
.get_balance_with_commitment(from_pubkey, commitment)?
.value;
let from_rent_exempt_minimum = if amount == SpendAmount::RentExempt {
let data = rpc_client.get_account_data(from_pubkey)?;
rpc_client.get_minimum_balance_for_rent_exemption(data.len())?
} else {
0
};
let (message, SpendAndFee { spend, fee }) = resolve_spend_message(
rpc_client,
amount,
@@ -104,6 +112,7 @@ where
from_balance,
from_pubkey,
fee_pubkey,
from_rent_exempt_minimum,
build_message,
)?;
if from_pubkey == fee_pubkey {
@@ -140,6 +149,7 @@ fn resolve_spend_message<F>(
from_balance: u64,
from_pubkey: &Pubkey,
fee_pubkey: &Pubkey,
from_rent_exempt_minimum: u64,
build_message: F,
) -> Result<(Message, SpendAndFee), CliError>
where
@@ -176,5 +186,20 @@ where
},
))
}
SpendAmount::RentExempt => {
let mut lamports = if from_pubkey == fee_pubkey {
from_balance.saturating_sub(fee)
} else {
from_balance
};
lamports = lamports.saturating_sub(from_rent_exempt_minimum);
Ok((
build_message(lamports),
SpendAndFee {
spend: lamports,
fee,
},
))
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -462,18 +462,27 @@ pub fn process_show_account(
let mut account_string = config.output_format.formatted_string(&cli_account);
if config.output_format == OutputFormat::Display
|| config.output_format == OutputFormat::DisplayVerbose
{
if let Some(output_file) = output_file {
let mut f = File::create(output_file)?;
f.write_all(&data)?;
writeln!(&mut account_string)?;
writeln!(&mut account_string, "Wrote account data to {}", output_file)?;
} else if !data.is_empty() {
use pretty_hex::*;
writeln!(&mut account_string, "{:?}", data.hex_dump())?;
match config.output_format {
OutputFormat::Json | OutputFormat::JsonCompact => {
if let Some(output_file) = output_file {
let mut f = File::create(output_file)?;
f.write_all(account_string.as_bytes())?;
writeln!(&mut account_string)?;
writeln!(&mut account_string, "Wrote account to {}", output_file)?;
}
}
OutputFormat::Display | OutputFormat::DisplayVerbose => {
if let Some(output_file) = output_file {
let mut f = File::create(output_file)?;
f.write_all(&data)?;
writeln!(&mut account_string)?;
writeln!(&mut account_string, "Wrote account data to {}", output_file)?;
} else if !data.is_empty() {
use pretty_hex::*;
writeln!(&mut account_string, "{:?}", data.hex_dump())?;
}
}
OutputFormat::DisplayQuiet => (),
}
Ok(account_string)

View File

@@ -59,7 +59,13 @@ fn test_stake_delegation_force() {
authorized_voter: None,
authorized_withdrawer,
commission: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
process_command(&config).unwrap();

View File

@@ -4,6 +4,7 @@ use {
spend_utils::SpendAmount,
test_utils::check_recent_balance,
},
solana_cli_output::{parse_sign_only_reply_string, OutputFormat},
solana_client::{
blockhash_query::{self, BlockhashQuery},
rpc_client::RpcClient,
@@ -12,7 +13,7 @@ use {
solana_sdk::{
account_utils::StateMut,
commitment_config::CommitmentConfig,
signature::{Keypair, Signer},
signature::{Keypair, NullSigner, Signer},
},
solana_streamer::socket::SocketAddrSpace,
solana_test_validator::TestValidator,
@@ -49,7 +50,13 @@ fn test_vote_authorize_and_withdraw() {
authorized_voter: None,
authorized_withdrawer: config.signers[0].pubkey(),
commission: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
process_command(&config).unwrap();
let vote_account = rpc_client
@@ -93,7 +100,13 @@ fn test_vote_authorize_and_withdraw() {
vote_account_pubkey,
new_authorized_pubkey: first_withdraw_authority.pubkey(),
vote_authorize: VoteAuthorize::Withdrawer,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
authorized: 0,
new_authorized: None,
};
@@ -112,7 +125,13 @@ fn test_vote_authorize_and_withdraw() {
vote_account_pubkey,
new_authorized_pubkey: withdraw_authority.pubkey(),
vote_authorize: VoteAuthorize::Withdrawer,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
authorized: 1,
new_authorized: Some(1),
};
@@ -126,7 +145,13 @@ fn test_vote_authorize_and_withdraw() {
vote_account_pubkey,
new_authorized_pubkey: withdraw_authority.pubkey(),
vote_authorize: VoteAuthorize::Withdrawer,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
authorized: 1,
new_authorized: Some(2),
};
@@ -146,7 +171,13 @@ fn test_vote_authorize_and_withdraw() {
withdraw_authority: 1,
withdraw_amount: SpendAmount::Some(100),
destination_account_pubkey: destination_account,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
process_command(&config).unwrap();
let expected_balance = expected_balance - 100;
@@ -160,7 +191,13 @@ fn test_vote_authorize_and_withdraw() {
vote_account_pubkey,
new_identity_account: 2,
withdraw_authority: 1,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
process_command(&config).unwrap();
@@ -172,8 +209,283 @@ fn test_vote_authorize_and_withdraw() {
withdraw_authority: 1,
destination_account_pubkey: destination_account,
memo: None,
fee_payer: 0,
};
process_command(&config).unwrap();
check_recent_balance(0, &rpc_client, &vote_account_pubkey);
check_recent_balance(expected_balance, &rpc_client, &destination_account);
}
#[test]
fn test_offline_vote_authorize_and_withdraw() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
let default_signer = Keypair::new();
let mut config_payer = CliConfig::recent_for_tests();
config_payer.json_rpc_url = test_validator.rpc_url();
config_payer.signers = vec![&default_signer];
let mut config_offline = CliConfig::recent_for_tests();
config_offline.json_rpc_url = String::default();
config_offline.command = CliCommand::ClusterVersion;
let offline_keypair = Keypair::new();
config_offline.signers = vec![&offline_keypair];
// Verify that we cannot reach the cluster
process_command(&config_offline).unwrap_err();
request_and_confirm_airdrop(
&rpc_client,
&config_payer,
&config_payer.signers[0].pubkey(),
100_000,
)
.unwrap();
check_recent_balance(100_000, &rpc_client, &config_payer.signers[0].pubkey());
request_and_confirm_airdrop(
&rpc_client,
&config_offline,
&config_offline.signers[0].pubkey(),
100_000,
)
.unwrap();
check_recent_balance(100_000, &rpc_client, &config_offline.signers[0].pubkey());
// Create vote account with specific withdrawer
let vote_account_keypair = Keypair::new();
let vote_account_pubkey = vote_account_keypair.pubkey();
config_payer.signers = vec![&default_signer, &vote_account_keypair];
config_payer.command = CliCommand::CreateVoteAccount {
vote_account: 1,
seed: None,
identity_account: 0,
authorized_voter: None,
authorized_withdrawer: offline_keypair.pubkey(),
commission: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
process_command(&config_payer).unwrap();
let vote_account = rpc_client
.get_account(&vote_account_keypair.pubkey())
.unwrap();
let vote_state: VoteStateVersions = vote_account.state().unwrap();
let authorized_withdrawer = vote_state.convert_to_current().authorized_withdrawer;
assert_eq!(authorized_withdrawer, offline_keypair.pubkey());
let expected_balance = rpc_client
.get_minimum_balance_for_rent_exemption(VoteState::size_of())
.unwrap()
.max(1);
check_recent_balance(expected_balance, &rpc_client, &vote_account_pubkey);
// Transfer in some more SOL
config_payer.signers = vec![&default_signer];
config_payer.command = CliCommand::Transfer {
amount: SpendAmount::Some(1_000),
to: vote_account_pubkey,
from: 0,
sign_only: false,
dump_transaction_message: false,
allow_unfunded_recipient: true,
no_wait: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
derived_address_seed: None,
derived_address_program_id: None,
};
process_command(&config_payer).unwrap();
let expected_balance = expected_balance + 1_000;
check_recent_balance(expected_balance, &rpc_client, &vote_account_pubkey);
// Authorize vote account withdrawal to another signer, offline
let withdraw_authority = Keypair::new();
let blockhash = rpc_client.get_latest_blockhash().unwrap();
config_offline.command = CliCommand::VoteAuthorize {
vote_account_pubkey,
new_authorized_pubkey: withdraw_authority.pubkey(),
vote_authorize: VoteAuthorize::Withdrawer,
sign_only: true,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
authorized: 0,
new_authorized: None,
};
config_offline.output_format = OutputFormat::JsonCompact;
let sig_response = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
assert!(sign_only.has_all_signers());
let offline_presigner = sign_only
.presigner_of(&config_offline.signers[0].pubkey())
.unwrap();
config_payer.signers = vec![&offline_presigner];
config_payer.command = CliCommand::VoteAuthorize {
vote_account_pubkey,
new_authorized_pubkey: withdraw_authority.pubkey(),
vote_authorize: VoteAuthorize::Withdrawer,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
authorized: 0,
new_authorized: None,
};
process_command(&config_payer).unwrap();
let vote_account = rpc_client
.get_account(&vote_account_keypair.pubkey())
.unwrap();
let vote_state: VoteStateVersions = vote_account.state().unwrap();
let authorized_withdrawer = vote_state.convert_to_current().authorized_withdrawer;
assert_eq!(authorized_withdrawer, withdraw_authority.pubkey());
// Withdraw from vote account offline
let destination_account = solana_sdk::pubkey::new_rand(); // Send withdrawal to new account to make balance check easy
let blockhash = rpc_client.get_latest_blockhash().unwrap();
let fee_payer_null_signer = NullSigner::new(&default_signer.pubkey());
config_offline.signers = vec![&fee_payer_null_signer, &withdraw_authority];
config_offline.command = CliCommand::WithdrawFromVoteAccount {
vote_account_pubkey,
withdraw_authority: 1,
withdraw_amount: SpendAmount::Some(100),
destination_account_pubkey: destination_account,
sign_only: true,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
config_offline.output_format = OutputFormat::JsonCompact;
let sig_response = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
let offline_presigner = sign_only
.presigner_of(&config_offline.signers[1].pubkey())
.unwrap();
config_payer.signers = vec![&default_signer, &offline_presigner];
config_payer.command = CliCommand::WithdrawFromVoteAccount {
vote_account_pubkey,
withdraw_authority: 1,
withdraw_amount: SpendAmount::Some(100),
destination_account_pubkey: destination_account,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
process_command(&config_payer).unwrap();
let expected_balance = expected_balance - 100;
check_recent_balance(expected_balance, &rpc_client, &vote_account_pubkey);
check_recent_balance(100, &rpc_client, &destination_account);
// Re-assign validator identity offline
let blockhash = rpc_client.get_latest_blockhash().unwrap();
let new_identity_keypair = Keypair::new();
let new_identity_null_signer = NullSigner::new(&new_identity_keypair.pubkey());
config_offline.signers = vec![
&fee_payer_null_signer,
&withdraw_authority,
&new_identity_null_signer,
];
config_offline.command = CliCommand::VoteUpdateValidator {
vote_account_pubkey,
new_identity_account: 2,
withdraw_authority: 1,
sign_only: true,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
process_command(&config_offline).unwrap();
config_offline.output_format = OutputFormat::JsonCompact;
let sig_response = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
let offline_presigner = sign_only
.presigner_of(&config_offline.signers[1].pubkey())
.unwrap();
config_payer.signers = vec![&default_signer, &offline_presigner, &new_identity_keypair];
config_payer.command = CliCommand::VoteUpdateValidator {
vote_account_pubkey,
new_identity_account: 2,
withdraw_authority: 1,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
process_command(&config_payer).unwrap();
// Close vote account offline. Must use WithdrawFromVoteAccount and specify amount, since
// CloseVoteAccount requires RpcClient
let destination_account = solana_sdk::pubkey::new_rand(); // Send withdrawal to new account to make balance check easy
config_offline.signers = vec![&fee_payer_null_signer, &withdraw_authority];
config_offline.command = CliCommand::WithdrawFromVoteAccount {
vote_account_pubkey,
withdraw_authority: 1,
withdraw_amount: SpendAmount::Some(expected_balance),
destination_account_pubkey: destination_account,
sign_only: true,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
process_command(&config_offline).unwrap();
config_offline.output_format = OutputFormat::JsonCompact;
let sig_response = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
let offline_presigner = sign_only
.presigner_of(&config_offline.signers[1].pubkey())
.unwrap();
config_payer.signers = vec![&default_signer, &offline_presigner];
config_payer.command = CliCommand::WithdrawFromVoteAccount {
vote_account_pubkey,
withdraw_authority: 1,
withdraw_amount: SpendAmount::Some(expected_balance),
destination_account_pubkey: destination_account,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
nonce_account: None,
nonce_authority: 0,
memo: None,
fee_payer: 0,
};
let result = process_command(&config_payer).unwrap();
println!("{:?}", result);
check_recent_balance(0, &rpc_client, &vote_account_pubkey);
println!("what");
check_recent_balance(expected_balance, &rpc_client, &destination_account);
}

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-client-test"
version = "1.9.0"
version = "1.9.4"
description = "Solana RPC Test"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,22 +12,24 @@ edition = "2021"
[dependencies]
serde_json = "1.0.72"
serial_test = "0.5.1"
solana-client = { path = "../client", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-merkle-tree = { path = "../merkle-tree", version = "=1.9.0" }
solana-metrics = { path = "../metrics", version = "=1.9.0" }
solana-perf = { path = "../perf", version = "=1.9.0" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.0" }
solana-rpc = { path = "../rpc", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-streamer = { path = "../streamer", version = "=1.9.0" }
solana-test-validator = { path = "../test-validator", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-client = { path = "../client", version = "=1.9.4" }
solana-ledger = { path = "../ledger", version = "=1.9.4" }
solana-measure = { path = "../measure", version = "=1.9.4" }
solana-merkle-tree = { path = "../merkle-tree", version = "=1.9.4" }
solana-metrics = { path = "../metrics", version = "=1.9.4" }
solana-perf = { path = "../perf", version = "=1.9.4" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.4" }
solana-rpc = { path = "../rpc", version = "=1.9.4" }
solana-runtime = { path = "../runtime", version = "=1.9.4" }
solana-sdk = { path = "../sdk", version = "=1.9.4" }
solana-streamer = { path = "../streamer", version = "=1.9.4" }
solana-test-validator = { path = "../test-validator", version = "=1.9.4" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" }
solana-version = { path = "../version", version = "=1.9.4" }
systemstat = "0.1.10"
[dev-dependencies]
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.4" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -4,11 +4,16 @@ use {
solana_client::{
pubsub_client::PubsubClient,
rpc_client::RpcClient,
rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig},
rpc_response::SlotInfo,
rpc_config::{
RpcAccountInfoConfig, RpcBlockSubscribeConfig, RpcBlockSubscribeFilter,
RpcProgramAccountsConfig,
},
rpc_response::{RpcBlockUpdate, SlotInfo},
},
solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path},
solana_rpc::{
optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank,
rpc::create_test_transactions_and_populate_blockstore,
rpc_pubsub_service::{PubSubConfig, PubSubService},
rpc_subscriptions::RpcSubscriptions,
},
@@ -20,7 +25,7 @@ use {
},
solana_sdk::{
clock::Slot,
commitment_config::CommitmentConfig,
commitment_config::{CommitmentConfig, CommitmentLevel},
native_token::sol_to_lamports,
pubkey::Pubkey,
rpc_port,
@@ -29,11 +34,12 @@ use {
},
solana_streamer::socket::SocketAddrSpace,
solana_test_validator::TestValidator,
solana_transaction_status::{TransactionDetails, UiTransactionEncoding},
std::{
collections::HashSet,
net::{IpAddr, SocketAddr},
sync::{
atomic::{AtomicBool, Ordering},
atomic::{AtomicBool, AtomicU64, Ordering},
Arc, RwLock,
},
thread::sleep,
@@ -119,9 +125,10 @@ fn test_account_subscription() {
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.write().unwrap().insert(bank1);
let bob = Keypair::new();
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
&exit,
max_complete_transaction_status_slot,
bank_forks.clone(),
Arc::new(RwLock::new(BlockCommitmentCache::default())),
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
@@ -194,6 +201,112 @@ fn test_account_subscription() {
assert_eq!(errors, [].to_vec());
}
#[test]
#[serial]
fn test_block_subscription() {
// setup BankForks
let exit = Arc::new(AtomicBool::new(false));
let GenesisConfigInfo {
genesis_config,
mint_keypair: alice,
..
} = create_genesis_config(10_000);
let bank = Bank::new_for_tests(&genesis_config);
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
// setup Blockstore
let ledger_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&ledger_path).unwrap();
let blockstore = Arc::new(blockstore);
// populate ledger with test txs
let bank = bank_forks.read().unwrap().working_bank();
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
let keypair3 = Keypair::new();
let max_complete_transaction_status_slot = Arc::new(AtomicU64::new(blockstore.max_root()));
let _confirmed_block_signatures = create_test_transactions_and_populate_blockstore(
vec![&alice, &keypair1, &keypair2, &keypair3],
0,
bank,
blockstore.clone(),
max_complete_transaction_status_slot,
);
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
// setup RpcSubscriptions && PubSubService
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests_with_blockstore(
&exit,
max_complete_transaction_status_slot,
blockstore.clone(),
bank_forks.clone(),
Arc::new(RwLock::new(BlockCommitmentCache::default())),
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
));
let pubsub_addr = SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)),
rpc_port::DEFAULT_RPC_PUBSUB_PORT,
);
let pub_cfg = PubSubConfig {
enable_block_subscription: true,
..PubSubConfig::default()
};
let (trigger, pubsub_service) = PubSubService::new(pub_cfg, &subscriptions, pubsub_addr);
std::thread::sleep(Duration::from_millis(400));
// setup PubsubClient
let (mut client, receiver) = PubsubClient::block_subscribe(
&format!("ws://0.0.0.0:{}/", pubsub_addr.port()),
RpcBlockSubscribeFilter::All,
Some(RpcBlockSubscribeConfig {
commitment: Some(CommitmentConfig {
commitment: CommitmentLevel::Confirmed,
}),
encoding: Some(UiTransactionEncoding::Json),
transaction_details: Some(TransactionDetails::Signatures),
show_rewards: None,
}),
)
.unwrap();
// trigger Gossip notification
let slot = bank_forks.read().unwrap().highest_slot();
subscriptions.notify_gossip_subscribers(slot);
let maybe_actual = receiver.recv_timeout(Duration::from_millis(400));
match maybe_actual {
Ok(actual) => {
let complete_block = blockstore.get_complete_block(slot, false).unwrap();
let block = complete_block.clone().configure(
UiTransactionEncoding::Json,
TransactionDetails::Signatures,
false,
);
let expected = RpcBlockUpdate {
slot,
block: Some(block),
err: None,
};
let block = complete_block.configure(
UiTransactionEncoding::Json,
TransactionDetails::Signatures,
false,
);
assert_eq!(actual.value.slot, expected.slot);
assert!(block.eq(&actual.value.block.unwrap()));
}
Err(e) => {
eprintln!("unexpected websocket receive timeout");
assert_eq!(Some(e), None);
}
}
// cleanup
exit.store(true, Ordering::Relaxed);
trigger.cancel();
client.shutdown().unwrap();
pubsub_service.close().unwrap();
}
#[test]
#[serial]
fn test_program_subscription() {
@@ -215,9 +328,10 @@ fn test_program_subscription() {
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.write().unwrap().insert(bank1);
let bob = Keypair::new();
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
&exit,
max_complete_transaction_status_slot,
bank_forks.clone(),
Arc::new(RwLock::new(BlockCommitmentCache::default())),
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
@@ -300,9 +414,10 @@ fn test_root_subscription() {
let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone();
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.write().unwrap().insert(bank1);
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
&exit,
max_complete_transaction_status_slot,
bank_forks.clone(),
Arc::new(RwLock::new(BlockCommitmentCache::default())),
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
@@ -350,8 +465,10 @@ fn test_slot_subscription() {
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
let optimistically_confirmed_bank =
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks);
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
&exit,
max_complete_transaction_status_slot,
bank_forks,
Arc::new(RwLock::new(BlockCommitmentCache::default())),
optimistically_confirmed_bank,

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-client"
version = "1.9.0"
version = "1.9.4"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -23,15 +23,15 @@ semver = "1.0.4"
serde = "1.0.130"
serde_derive = "1.0.103"
serde_json = "1.0.72"
solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" }
solana-faucet = { path = "../faucet", version = "=1.9.0" }
solana-net-utils = { path = "../net-utils", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.0" }
solana-account-decoder = { path = "../account-decoder", version = "=1.9.4" }
solana-clap-utils = { path = "../clap-utils", version = "=1.9.4" }
solana-faucet = { path = "../faucet", version = "=1.9.4" }
solana-net-utils = { path = "../net-utils", version = "=1.9.4" }
solana-measure = { path = "../measure", version = "=1.9.4" }
solana-sdk = { path = "../sdk", version = "=1.9.4" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" }
solana-version = { path = "../version", version = "=1.9.4" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.4" }
thiserror = "1.0"
tokio = { version = "1", features = ["full"] }
tungstenite = { version = "0.16.0", features = ["rustls-tls-webpki-roots"] }
@@ -40,7 +40,7 @@ url = "2.2.2"
[dev-dependencies]
assert_matches = "1.5.0"
jsonrpc-http-server = "18.0.0"
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.4" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,12 +1,13 @@
use {
crate::{
rpc_config::{
RpcAccountInfoConfig, RpcProgramAccountsConfig, RpcSignatureSubscribeConfig,
RpcTransactionLogsConfig, RpcTransactionLogsFilter,
RpcAccountInfoConfig, RpcBlockSubscribeConfig, RpcBlockSubscribeFilter,
RpcProgramAccountsConfig, RpcSignatureSubscribeConfig, RpcTransactionLogsConfig,
RpcTransactionLogsFilter,
},
rpc_response::{
Response as RpcResponse, RpcKeyedAccount, RpcLogsResponse, RpcSignatureResult,
SlotInfo, SlotUpdate,
Response as RpcResponse, RpcBlockUpdate, RpcKeyedAccount, RpcLogsResponse,
RpcSignatureResult, RpcVote, SlotInfo, SlotUpdate,
},
},
log::*,
@@ -173,6 +174,12 @@ pub type SignatureSubscription = (
Receiver<RpcResponse<RpcSignatureResult>>,
);
pub type PubsubBlockClientSubscription = PubsubClientSubscription<RpcResponse<RpcBlockUpdate>>;
pub type BlockSubscription = (
PubsubBlockClientSubscription,
Receiver<RpcResponse<RpcBlockUpdate>>,
);
pub type PubsubProgramClientSubscription = PubsubClientSubscription<RpcResponse<RpcKeyedAccount>>;
pub type ProgramSubscription = (
PubsubProgramClientSubscription,
@@ -185,6 +192,9 @@ pub type AccountSubscription = (
Receiver<RpcResponse<UiAccount>>,
);
pub type PubsubVoteClientSubscription = PubsubClientSubscription<RpcVote>;
pub type VoteSubscription = (PubsubVoteClientSubscription, Receiver<RpcVote>);
pub type PubsubRootClientSubscription = PubsubClientSubscription<Slot>;
pub type RootSubscription = (PubsubRootClientSubscription, Receiver<Slot>);
@@ -266,6 +276,45 @@ impl PubsubClient {
Ok((result, receiver))
}
pub fn block_subscribe(
url: &str,
filter: RpcBlockSubscribeFilter,
config: Option<RpcBlockSubscribeConfig>,
) -> Result<BlockSubscription, PubsubClientError> {
let url = Url::parse(url)?;
let socket = connect_with_retry(url)?;
let (sender, receiver) = channel();
let socket = Arc::new(RwLock::new(socket));
let socket_clone = socket.clone();
let exit = Arc::new(AtomicBool::new(false));
let exit_clone = exit.clone();
let body = json!({
"jsonrpc":"2.0",
"id":1,
"method":"blockSubscribe",
"params":[filter, config]
})
.to_string();
let subscription_id = PubsubBlockClientSubscription::send_subscribe(&socket_clone, body)?;
let t_cleanup = std::thread::spawn(move || {
Self::cleanup_with_sender(exit_clone, &socket_clone, sender)
});
let result = PubsubClientSubscription {
message_type: PhantomData,
operation: "blocks",
socket,
subscription_id,
t_cleanup: Some(t_cleanup),
exit,
};
Ok((result, receiver))
}
pub fn logs_subscribe(
url: &str,
filter: RpcTransactionLogsFilter,
@@ -346,6 +395,39 @@ impl PubsubClient {
Ok((result, receiver))
}
pub fn vote_subscribe(url: &str) -> Result<VoteSubscription, PubsubClientError> {
let url = Url::parse(url)?;
let socket = connect_with_retry(url)?;
let (sender, receiver) = channel();
let socket = Arc::new(RwLock::new(socket));
let socket_clone = socket.clone();
let exit = Arc::new(AtomicBool::new(false));
let exit_clone = exit.clone();
let body = json!({
"jsonrpc":"2.0",
"id":1,
"method":"voteSubscribe",
})
.to_string();
let subscription_id = PubsubVoteClientSubscription::send_subscribe(&socket_clone, body)?;
let t_cleanup = std::thread::spawn(move || {
Self::cleanup_with_sender(exit_clone, &socket_clone, sender)
});
let result = PubsubClientSubscription {
message_type: PhantomData,
operation: "vote",
socket,
subscription_id,
t_cleanup: Some(t_cleanup),
exit,
};
Ok((result, receiver))
}
pub fn root_subscribe(url: &str) -> Result<RootSubscription, PubsubClientError> {
let url = Url::parse(url)?;
let socket = connect_with_retry(url)?;

View File

@@ -1329,7 +1329,7 @@ impl RpcClient {
/// # Ok::<(), ClientError>(())
/// ```
pub fn get_highest_snapshot_slot(&self) -> ClientResult<RpcSnapshotSlotInfo> {
if self.get_node_version()? < semver::Version::new(1, 8, 0) {
if self.get_node_version()? < semver::Version::new(1, 9, 0) {
#[allow(deprecated)]
self.get_snapshot_slot().map(|full| RpcSnapshotSlotInfo {
full,
@@ -4747,7 +4747,7 @@ impl RpcClient {
commitment: CommitmentConfig,
) -> ClientResult<(Hash, u64)> {
let (blockhash, last_valid_block_height) =
if self.get_node_version()? < semver::Version::new(1, 8, 0) {
if self.get_node_version()? < semver::Version::new(1, 9, 0) {
let Fees {
blockhash,
last_valid_block_height,
@@ -4781,7 +4781,7 @@ impl RpcClient {
blockhash: &Hash,
commitment: CommitmentConfig,
) -> ClientResult<bool> {
let result = if self.get_node_version()? < semver::Version::new(1, 8, 0) {
let result = if self.get_node_version()? < semver::Version::new(1, 9, 0) {
self.get_fee_calculator_for_blockhash_with_commitment(blockhash, commitment)?
.value
.is_some()

View File

@@ -182,6 +182,23 @@ pub struct RpcSignatureSubscribeConfig {
pub enable_received_notification: Option<bool>,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum RpcBlockSubscribeFilter {
All,
MentionsAccountOrProgram(String),
}
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcBlockSubscribeConfig {
#[serde(flatten)]
pub commitment: Option<CommitmentConfig>,
pub encoding: Option<UiTransactionEncoding>,
pub transaction_details: Option<TransactionDetails>,
pub show_rewards: Option<bool>,
}
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcSignaturesForAddressConfig {

View File

@@ -9,9 +9,10 @@ use {
transaction::{Result, TransactionError},
},
solana_transaction_status::{
ConfirmedTransactionStatusWithSignature, TransactionConfirmationStatus,
ConfirmedTransactionStatusWithSignature, TransactionConfirmationStatus, UiConfirmedBlock,
},
std::{collections::HashMap, fmt, net::SocketAddr},
thiserror::Error,
};
pub type RpcResult<T> = client_error::Result<Response<T>>;
@@ -424,6 +425,20 @@ pub struct RpcInflationReward {
pub commission: Option<u8>, // Vote account commission when the reward was credited
}
#[derive(Clone, Deserialize, Serialize, Debug, Error, Eq, PartialEq)]
pub enum RpcBlockUpdateError {
#[error("block store error")]
BlockStoreError,
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcBlockUpdate {
pub slot: Slot,
pub block: Option<UiConfirmedBlock>,
pub err: Option<RpcBlockUpdateError>,
}
impl From<ConfirmedTransactionStatusWithSignature> for RpcConfirmedTransactionStatusWithSignature {
fn from(value: ConfirmedTransactionStatusWithSignature) -> Self {
let ConfirmedTransactionStatusWithSignature {

View File

@@ -1,7 +1,7 @@
[package]
name = "solana-core"
description = "Blockchain, Rebuilt for Scale"
version = "1.9.0"
version = "1.9.4"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-core"
readme = "../README.md"
@@ -26,7 +26,7 @@ fs_extra = "1.2.0"
histogram = "0.6.9"
itertools = "0.10.1"
log = "0.4.14"
lru = "0.7.0"
lru = "0.7.1"
rand = "0.7.0"
rand_chacha = "0.2.2"
raptorq = "1.6.4"
@@ -34,30 +34,31 @@ rayon = "1.5.1"
retain_mut = "0.1.5"
serde = "1.0.130"
serde_derive = "1.0.103"
solana-accountsdb-plugin-manager = { path = "../accountsdb-plugin-manager", version = "=1.9.0" }
solana-client = { path = "../client", version = "=1.9.0" }
solana-entry = { path = "../entry", version = "=1.9.0" }
solana-gossip = { path = "../gossip", version = "=1.9.0" }
solana-ledger = { path = "../ledger", version = "=1.9.0" }
solana-logger = { path = "../logger", version = "=1.9.0" }
solana-measure = { path = "../measure", version = "=1.9.0" }
solana-metrics = { path = "../metrics", version = "=1.9.0" }
solana-net-utils = { path = "../net-utils", version = "=1.9.0" }
solana-perf = { path = "../perf", version = "=1.9.0" }
solana-poh = { path = "../poh", version = "=1.9.0" }
solana-rpc = { path = "../rpc", version = "=1.9.0" }
solana-replica-lib = { path = "../replica-lib", version = "=1.9.0" }
solana-runtime = { path = "../runtime", version = "=1.9.0" }
solana-sdk = { path = "../sdk", version = "=1.9.0" }
solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.0" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.0" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.0" }
solana-streamer = { path = "../streamer", version = "=1.9.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.0" }
solana-accountsdb-plugin-manager = { path = "../accountsdb-plugin-manager", version = "=1.9.4" }
solana-client = { path = "../client", version = "=1.9.4" }
solana-entry = { path = "../entry", version = "=1.9.4" }
solana-gossip = { path = "../gossip", version = "=1.9.4" }
solana-ledger = { path = "../ledger", version = "=1.9.4" }
solana-logger = { path = "../logger", version = "=1.9.4" }
solana-measure = { path = "../measure", version = "=1.9.4" }
solana-metrics = { path = "../metrics", version = "=1.9.4" }
solana-net-utils = { path = "../net-utils", version = "=1.9.4" }
solana-perf = { path = "../perf", version = "=1.9.4" }
solana-poh = { path = "../poh", version = "=1.9.4" }
solana-program-runtime = { path = "../program-runtime", version = "=1.9.4" }
solana-rpc = { path = "../rpc", version = "=1.9.4" }
solana-replica-lib = { path = "../replica-lib", version = "=1.9.4" }
solana-runtime = { path = "../runtime", version = "=1.9.4" }
solana-sdk = { path = "../sdk", version = "=1.9.4" }
solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.4" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.4" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.4" }
solana-streamer = { path = "../streamer", version = "=1.9.4" }
solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" }
solana-vote-program = { path = "../programs/vote", version = "=1.9.4" }
tempfile = "3.2.0"
thiserror = "1.0"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.0" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.4" }
sys-info = "0.9.1"
tokio = { version = "1", features = ["full"] }
trees = "0.4.2"
@@ -71,9 +72,9 @@ matches = "0.1.9"
reqwest = { version = "0.11.6", default-features = false, features = ["blocking", "rustls-tls", "json"] }
serde_json = "1.0.72"
serial_test = "0.5.1"
solana-program-runtime = { path = "../program-runtime", version = "=1.9.0" }
solana-stake-program = { path = "../programs/stake", version = "=1.9.0" }
solana-version = { path = "../version", version = "=1.9.0" }
solana-program-runtime = { path = "../program-runtime", version = "=1.9.4" }
solana-stake-program = { path = "../programs/stake", version = "=1.9.4" }
solana-version = { path = "../version", version = "=1.9.4" }
static_assertions = "1.1.0"
systemstat = "0.1.10"

View File

@@ -20,7 +20,7 @@ use {
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
},
solana_perf::{packet::to_packets_chunked, test_tx::test_tx},
solana_perf::{packet::to_packet_batches, test_tx::test_tx},
solana_poh::poh_recorder::{create_test_recorder, WorkingBankEntry},
solana_runtime::{bank::Bank, cost_model::CostModel},
solana_sdk::{
@@ -77,11 +77,11 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
let tx = test_tx();
let len = 4096;
let chunk_size = 1024;
let batches = to_packets_chunked(&vec![tx; len], chunk_size);
let mut packets = VecDeque::new();
let batches = to_packet_batches(&vec![tx; len], chunk_size);
let mut packet_batches = VecDeque::new();
for batch in batches {
let batch_len = batch.packets.len();
packets.push_back((batch, vec![0usize; batch_len], false));
packet_batches.push_back((batch, vec![0usize; batch_len], false));
}
let (s, _r) = unbounded();
// This tests the performance of buffering packets.
@@ -91,7 +91,7 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
&my_pubkey,
std::u128::MAX,
&poh_recorder,
&mut packets,
&mut packet_batches,
None,
&s,
None::<Box<dyn Fn()>>,
@@ -206,7 +206,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
assert!(r.is_ok(), "sanity parallel execution");
}
bank.clear_signatures();
let verified: Vec<_> = to_packets_chunked(&transactions, PACKETS_PER_BATCH);
let verified: Vec<_> = to_packet_batches(&transactions, PACKETS_PER_BATCH);
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(

View File

@@ -100,7 +100,11 @@ fn bench_retransmitter(bencher: &mut Bencher) {
let slot = 0;
let parent = 0;
let shredder = Shredder::new(slot, parent, 0, 0).unwrap();
let mut data_shreds = shredder.entries_to_shreds(&keypair, &entries, true, 0).0;
let (mut data_shreds, _) = shredder.entries_to_shreds(
&keypair, &entries, true, // is_last_in_slot
0, // next_shred_index
0, // next_code_index
);
let num_packets = data_shreds.len();

View File

@@ -40,16 +40,14 @@ fn make_shreds(num_shreds: usize) -> Vec<Shred> {
);
let entries = make_large_unchained_entries(txs_per_entry, num_entries);
let shredder = Shredder::new(1, 0, 0, 0).unwrap();
let data_shreds = shredder
.entries_to_data_shreds(
&Keypair::new(),
&entries,
true, // is_last_in_slot
0, // next_shred_index
0, // fec_set_offset
&mut ProcessShredsStats::default(),
)
.0;
let data_shreds = shredder.entries_to_data_shreds(
&Keypair::new(),
&entries,
true, // is_last_in_slot
0, // next_shred_index
0, // fec_set_offset
&mut ProcessShredsStats::default(),
);
assert!(data_shreds.len() >= num_shreds);
data_shreds
}
@@ -76,7 +74,7 @@ fn bench_shredder_ticks(bencher: &mut Bencher) {
let entries = create_ticks(num_ticks, 0, Hash::default());
bencher.iter(|| {
let shredder = Shredder::new(1, 0, 0, 0).unwrap();
shredder.entries_to_shreds(&kp, &entries, true, 0);
shredder.entries_to_shreds(&kp, &entries, true, 0, 0);
})
}
@@ -95,7 +93,7 @@ fn bench_shredder_large_entries(bencher: &mut Bencher) {
// 1Mb
bencher.iter(|| {
let shredder = Shredder::new(1, 0, 0, 0).unwrap();
shredder.entries_to_shreds(&kp, &entries, true, 0);
shredder.entries_to_shreds(&kp, &entries, true, 0, 0);
})
}
@@ -108,7 +106,7 @@ fn bench_deshredder(bencher: &mut Bencher) {
let num_ticks = max_ticks_per_n_shreds(1, Some(shred_size)) * num_shreds as u64;
let entries = create_ticks(num_ticks, 0, Hash::default());
let shredder = Shredder::new(1, 0, 0, 0).unwrap();
let data_shreds = shredder.entries_to_shreds(&kp, &entries, true, 0).0;
let (data_shreds, _) = shredder.entries_to_shreds(&kp, &entries, true, 0, 0);
bencher.iter(|| {
let raw = &mut Shredder::deshred(&data_shreds).unwrap();
assert_ne!(raw.len(), 0);
@@ -135,6 +133,7 @@ fn bench_shredder_coding(bencher: &mut Bencher) {
Shredder::generate_coding_shreds(
&data_shreds[..symbol_count],
true, // is_last_in_slot
0, // next_code_index
)
.len();
})
@@ -147,6 +146,7 @@ fn bench_shredder_decoding(bencher: &mut Bencher) {
let coding_shreds = Shredder::generate_coding_shreds(
&data_shreds[..symbol_count],
true, // is_last_in_slot
0, // next_code_index
);
bencher.iter(|| {
Shredder::try_recovery(coding_shreds[..].to_vec()).unwrap();

View File

@@ -8,7 +8,7 @@ use {
log::*,
rand::{thread_rng, Rng},
solana_core::{sigverify::TransactionSigVerifier, sigverify_stage::SigVerifyStage},
solana_perf::{packet::to_packets_chunked, test_tx::test_tx},
solana_perf::{packet::to_packet_batches, test_tx::test_tx},
solana_sdk::{
hash::Hash,
signature::{Keypair, Signer},
@@ -28,7 +28,7 @@ fn bench_packet_discard(bencher: &mut Bencher) {
let len = 30 * 1000;
let chunk_size = 1024;
let tx = test_tx();
let mut batches = to_packets_chunked(&vec![tx; len], chunk_size);
let mut batches = to_packet_batches(&vec![tx; len], chunk_size);
let mut total = 0;
@@ -37,7 +37,7 @@ fn bench_packet_discard(bencher: &mut Bencher) {
.map(|_| {
let mut addr = [0u16; 8];
thread_rng().fill(&mut addr);
addr
std::net::IpAddr::from(addr)
})
.collect();
@@ -54,7 +54,7 @@ fn bench_packet_discard(bencher: &mut Bencher) {
SigVerifyStage::discard_excess_packets(&mut batches, 10_000);
for batch in batches.iter_mut() {
for p in batch.packets.iter_mut() {
p.meta.discard = false;
p.meta.set_discard(false);
}
}
});
@@ -74,7 +74,7 @@ fn bench_sigverify_stage(bencher: &mut Bencher) {
let chunk_size = 1024;
let mut batches = if use_same_tx {
let tx = test_tx();
to_packets_chunked(&vec![tx; len], chunk_size)
to_packet_batches(&vec![tx; len], chunk_size)
} else {
let from_keypair = Keypair::new();
let to_keypair = Keypair::new();
@@ -89,7 +89,7 @@ fn bench_sigverify_stage(bencher: &mut Bencher) {
)
})
.collect();
to_packets_chunked(&txs, chunk_size)
to_packet_batches(&txs, chunk_size)
};
trace!(

View File

@@ -14,7 +14,7 @@ use {
solana_ledger::{blockstore::Blockstore, shred::SIZE_OF_NONCE},
solana_measure::measure::Measure,
solana_perf::{
packet::{limited_deserialize, Packet, Packets},
packet::{limited_deserialize, Packet, PacketBatch},
recycler::Recycler,
},
solana_runtime::bank::Bank,
@@ -23,7 +23,7 @@ use {
pubkey::Pubkey,
timing::timestamp,
},
solana_streamer::streamer::{self, PacketReceiver},
solana_streamer::streamer::{self, PacketBatchReceiver},
std::{
collections::HashSet,
net::UdpSocket,
@@ -197,7 +197,7 @@ impl AncestorHashesService {
/// Listen for responses to our ancestors hashes repair requests
fn run_responses_listener(
ancestor_hashes_request_statuses: Arc<DashMap<Slot, DeadSlotAncestorRequestStatus>>,
response_receiver: PacketReceiver,
response_receiver: PacketBatchReceiver,
blockstore: Arc<Blockstore>,
outstanding_requests: Arc<RwLock<OutstandingAncestorHashesRepairs>>,
exit: Arc<AtomicBool>,
@@ -240,7 +240,7 @@ impl AncestorHashesService {
/// Process messages from the network
fn process_new_packets_from_channel(
ancestor_hashes_request_statuses: &DashMap<Slot, DeadSlotAncestorRequestStatus>,
response_receiver: &PacketReceiver,
response_receiver: &PacketBatchReceiver,
blockstore: &Blockstore,
outstanding_requests: &RwLock<OutstandingAncestorHashesRepairs>,
stats: &mut AncestorHashesResponsesStats,
@@ -249,17 +249,17 @@ impl AncestorHashesService {
retryable_slots_sender: &RetryableSlotsSender,
) -> Result<()> {
let timeout = Duration::new(1, 0);
let mut responses = vec![response_receiver.recv_timeout(timeout)?];
let mut total_packets = responses[0].packets.len();
let mut packet_batches = vec![response_receiver.recv_timeout(timeout)?];
let mut total_packets = packet_batches[0].packets.len();
let mut dropped_packets = 0;
while let Ok(more) = response_receiver.try_recv() {
total_packets += more.packets.len();
while let Ok(batch) = response_receiver.try_recv() {
total_packets += batch.packets.len();
if total_packets < *max_packets {
// Drop the rest in the channel in case of DOS
responses.push(more);
packet_batches.push(batch);
} else {
dropped_packets += more.packets.len();
dropped_packets += batch.packets.len();
}
}
@@ -267,10 +267,10 @@ impl AncestorHashesService {
stats.total_packets += total_packets;
let mut time = Measure::start("ancestor_hashes::handle_packets");
for response in responses {
Self::process_single_packets(
for packet_batch in packet_batches {
Self::process_packet_batch(
ancestor_hashes_request_statuses,
response,
packet_batch,
stats,
outstanding_requests,
blockstore,
@@ -289,16 +289,16 @@ impl AncestorHashesService {
Ok(())
}
fn process_single_packets(
fn process_packet_batch(
ancestor_hashes_request_statuses: &DashMap<Slot, DeadSlotAncestorRequestStatus>,
packets: Packets,
packet_batch: PacketBatch,
stats: &mut AncestorHashesResponsesStats,
outstanding_requests: &RwLock<OutstandingAncestorHashesRepairs>,
blockstore: &Blockstore,
duplicate_slots_reset_sender: &DuplicateSlotsResetSender,
retryable_slots_sender: &RetryableSlotsSender,
) {
packets.packets.iter().for_each(|packet| {
packet_batch.packets.iter().for_each(|packet| {
let decision = Self::verify_and_process_ancestor_response(
packet,
ancestor_hashes_request_statuses,
@@ -328,7 +328,7 @@ impl AncestorHashesService {
blockstore: &Blockstore,
) -> Option<(Slot, DuplicateAncestorDecision)> {
let from_addr = packet.meta.addr();
limited_deserialize(&packet.data[..packet.meta.size - SIZE_OF_NONCE])
limited_deserialize(&packet.data[..packet.meta.size.saturating_sub(SIZE_OF_NONCE)])
.ok()
.and_then(|ancestor_hashes_response| {
// Verify the response
@@ -871,7 +871,7 @@ mod test {
t_listen: JoinHandle<()>,
exit: Arc<AtomicBool>,
responder_info: ContactInfo,
response_receiver: PacketReceiver,
response_receiver: PacketBatchReceiver,
correct_bank_hashes: HashMap<Slot, Hash>,
}
@@ -1033,15 +1033,6 @@ mod test {
is_frozen,
);
/*{
let w_bank_forks = bank_forks.write().unwrap();
assert!(w_bank_forks.get(dead_slot).is_none());
let parent = w_bank_forks.get(dead_slot - 1).unwrap().clone();
let dead_bank = Bank::new_from_parent(&parent, &Pubkey::default(), dead_slot);
bank_forks.insert(dead_bank);
}*/
// Create slots [slot, slot + num_ancestors) with 5 shreds apiece
let (shreds, _) = make_many_slot_entries(dead_slot, dead_slot, 5);
blockstore
@@ -1369,6 +1360,34 @@ mod test {
assert!(ancestor_hashes_request_statuses.is_empty());
}
#[test]
fn test_verify_and_process_ancestor_responses_invalid_packet() {
let bank0 = Bank::default_for_tests();
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank0)));
let ManageAncestorHashesState {
ancestor_hashes_request_statuses,
outstanding_requests,
..
} = ManageAncestorHashesState::new(bank_forks);
let ledger_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&ledger_path).unwrap();
// Create invalid packet with fewer bytes than the size of the nonce
let mut packet = Packet::default();
packet.meta.size = 0;
assert!(AncestorHashesService::verify_and_process_ancestor_response(
&packet,
&ancestor_hashes_request_statuses,
&mut AncestorHashesResponsesStats::default(),
&outstanding_requests,
&blockstore,
)
.is_none());
}
#[test]
fn test_ancestor_hashes_service_manage_ancestor_hashes_after_replay_dump() {
let dead_slot = MAX_ANCESTOR_RESPONSES as Slot;

File diff suppressed because it is too large Load Diff

View File

@@ -496,6 +496,7 @@ pub mod test {
&keypair,
&data_shreds[0..],
true, // is_last_in_slot
0, // next_code_index
&mut ProcessShredsStats::default(),
)
.unwrap();

View File

@@ -28,6 +28,7 @@ pub(super) struct BroadcastDuplicatesRun {
config: BroadcastDuplicatesConfig,
current_slot: Slot,
next_shred_index: u32,
next_code_index: u32,
shred_version: u16,
recent_blockhash: Option<Hash>,
prev_entry_hash: Option<Hash>,
@@ -46,6 +47,7 @@ impl BroadcastDuplicatesRun {
Self {
config,
next_shred_index: u32::MAX,
next_code_index: 0,
shred_version,
current_slot: 0,
recent_blockhash: None,
@@ -74,6 +76,7 @@ impl BroadcastRun for BroadcastDuplicatesRun {
if bank.slot() != self.current_slot {
self.next_shred_index = 0;
self.next_code_index = 0;
self.current_slot = bank.slot();
self.prev_entry_hash = None;
self.num_slots_broadcasted += 1;
@@ -154,22 +157,26 @@ impl BroadcastRun for BroadcastDuplicatesRun {
)
.expect("Expected to create a new shredder");
let (data_shreds, _, _) = shredder.entries_to_shreds(
let (data_shreds, coding_shreds) = shredder.entries_to_shreds(
keypair,
&receive_results.entries,
last_tick_height == bank.max_tick_height() && last_entries.is_none(),
self.next_shred_index,
self.next_code_index,
);
self.next_shred_index += data_shreds.len() as u32;
if let Some(index) = coding_shreds.iter().map(Shred::index).max() {
self.next_code_index = index + 1;
}
let last_shreds = last_entries.map(|(original_last_entry, duplicate_extra_last_entries)| {
let (original_last_data_shred, _, _) =
shredder.entries_to_shreds(keypair, &[original_last_entry], true, self.next_shred_index);
let (original_last_data_shred, _) =
shredder.entries_to_shreds(keypair, &[original_last_entry], true, self.next_shred_index, self.next_code_index);
let (partition_last_data_shred, _, _) =
let (partition_last_data_shred, _) =
// Don't mark the last shred as last so that validators won't know that
// they've gotten all the shreds, and will continue trying to repair
shredder.entries_to_shreds(keypair, &duplicate_extra_last_entries, true, self.next_shred_index);
shredder.entries_to_shreds(keypair, &duplicate_extra_last_entries, true, self.next_shred_index, self.next_code_index);
let sigs: Vec<_> = partition_last_data_shred.iter().map(|s| (s.signature(), s.index())).collect();
info!(

View File

@@ -10,6 +10,7 @@ pub(super) struct BroadcastFakeShredsRun {
last_blockhash: Hash,
partition: usize,
shred_version: u16,
next_code_index: u32,
}
impl BroadcastFakeShredsRun {
@@ -18,6 +19,7 @@ impl BroadcastFakeShredsRun {
last_blockhash: Hash::default(),
partition,
shred_version,
next_code_index: 0,
}
}
}
@@ -52,11 +54,12 @@ impl BroadcastRun for BroadcastFakeShredsRun {
)
.expect("Expected to create a new shredder");
let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds(
let (data_shreds, coding_shreds) = shredder.entries_to_shreds(
keypair,
&receive_results.entries,
last_tick_height == bank.max_tick_height(),
next_shred_index,
self.next_code_index,
);
// If the last blockhash is default, a new block is being created
@@ -69,13 +72,23 @@ impl BroadcastRun for BroadcastFakeShredsRun {
.map(|_| Entry::new(&self.last_blockhash, 0, vec![]))
.collect();
let (fake_data_shreds, fake_coding_shreds, _) = shredder.entries_to_shreds(
let (fake_data_shreds, fake_coding_shreds) = shredder.entries_to_shreds(
keypair,
&fake_entries,
last_tick_height == bank.max_tick_height(),
next_shred_index,
self.next_code_index,
);
if let Some(index) = coding_shreds
.iter()
.chain(&fake_coding_shreds)
.map(Shred::index)
.max()
{
self.next_code_index = index + 1;
}
// If it's the last tick, reset the last block hash to default
// this will cause next run to grab last bank's blockhash
if last_tick_height == bank.max_tick_height() {

View File

@@ -21,6 +21,7 @@ pub(super) struct ReceiveResults {
#[derive(Clone)]
pub struct UnfinishedSlotInfo {
pub next_shred_index: u32,
pub(crate) next_code_index: u32,
pub slot: Slot,
pub parent: Slot,
// Data shreds buffered to make a batch of size

View File

@@ -15,6 +15,7 @@ pub(super) struct FailEntryVerificationBroadcastRun {
good_shreds: Vec<Shred>,
current_slot: Slot,
next_shred_index: u32,
next_code_index: u32,
cluster_nodes_cache: Arc<ClusterNodesCache<BroadcastStage>>,
}
@@ -29,6 +30,7 @@ impl FailEntryVerificationBroadcastRun {
good_shreds: vec![],
current_slot: 0,
next_shred_index: 0,
next_code_index: 0,
cluster_nodes_cache,
}
}
@@ -50,6 +52,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
if bank.slot() != self.current_slot {
self.next_shred_index = 0;
self.next_code_index = 0;
self.current_slot = bank.slot();
}
@@ -83,22 +86,26 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
)
.expect("Expected to create a new shredder");
let (data_shreds, _, _) = shredder.entries_to_shreds(
let (data_shreds, coding_shreds) = shredder.entries_to_shreds(
keypair,
&receive_results.entries,
last_tick_height == bank.max_tick_height() && last_entries.is_none(),
self.next_shred_index,
self.next_code_index,
);
self.next_shred_index += data_shreds.len() as u32;
if let Some(index) = coding_shreds.iter().map(Shred::index).max() {
self.next_code_index = index + 1;
}
let last_shreds = last_entries.map(|(good_last_entry, bad_last_entry)| {
let (good_last_data_shred, _, _) =
shredder.entries_to_shreds(keypair, &[good_last_entry], true, self.next_shred_index);
let (good_last_data_shred, _) =
shredder.entries_to_shreds(keypair, &[good_last_entry], true, self.next_shred_index, self.next_code_index);
let (bad_last_data_shred, _, _) =
let (bad_last_data_shred, _) =
// Don't mark the last shred as last so that validators won't know that
// they've gotten all the shreds, and will continue trying to repair
shredder.entries_to_shreds(keypair, &[bad_last_entry], false, self.next_shred_index);
shredder.entries_to_shreds(keypair, &[bad_last_entry], false, self.next_shred_index, self.next_code_index);
self.next_shred_index += 1;
(good_last_data_shred, bad_last_data_shred)

View File

@@ -119,17 +119,16 @@ impl StandardBroadcastRun {
None => (0, 0),
},
};
let (data_shreds, next_shred_index) =
Shredder::new(slot, parent_slot, reference_tick, self.shred_version)
.unwrap()
.entries_to_data_shreds(
keypair,
entries,
is_slot_end,
next_shred_index,
fec_set_offset,
process_stats,
);
let data_shreds = Shredder::new(slot, parent_slot, reference_tick, self.shred_version)
.unwrap()
.entries_to_data_shreds(
keypair,
entries,
is_slot_end,
next_shred_index,
fec_set_offset,
process_stats,
);
let mut data_shreds_buffer = match &mut self.unfinished_slot {
Some(state) => {
assert_eq!(state.slot, slot);
@@ -138,8 +137,17 @@ impl StandardBroadcastRun {
None => Vec::default(),
};
data_shreds_buffer.extend(data_shreds.clone());
let next_shred_index = match data_shreds.iter().map(Shred::index).max() {
Some(index) => index + 1,
None => next_shred_index,
};
let next_code_index = match &self.unfinished_slot {
Some(state) => state.next_code_index,
None => 0,
};
self.unfinished_slot = Some(UnfinishedSlotInfo {
next_shred_index,
next_code_index,
slot,
parent: parent_slot,
data_shreds_buffer,
@@ -446,23 +454,40 @@ fn make_coding_shreds(
is_slot_end: bool,
stats: &mut ProcessShredsStats,
) -> Vec<Shred> {
let data_shreds = match unfinished_slot {
None => Vec::default(),
Some(unfinished_slot) => {
let size = unfinished_slot.data_shreds_buffer.len();
// Consume a multiple of 32, unless this is the slot end.
let offset = if is_slot_end {
0
} else {
size % MAX_DATA_SHREDS_PER_FEC_BLOCK as usize
};
unfinished_slot
.data_shreds_buffer
.drain(0..size - offset)
.collect()
}
let unfinished_slot = match unfinished_slot {
None => return Vec::default(),
Some(state) => state,
};
Shredder::data_shreds_to_coding_shreds(keypair, &data_shreds, is_slot_end, stats).unwrap()
let data_shreds: Vec<_> = {
let size = unfinished_slot.data_shreds_buffer.len();
// Consume a multiple of 32, unless this is the slot end.
let offset = if is_slot_end {
0
} else {
size % MAX_DATA_SHREDS_PER_FEC_BLOCK as usize
};
unfinished_slot
.data_shreds_buffer
.drain(0..size - offset)
.collect()
};
let shreds = Shredder::data_shreds_to_coding_shreds(
keypair,
&data_shreds,
is_slot_end,
unfinished_slot.next_code_index,
stats,
)
.unwrap();
if let Some(index) = shreds
.iter()
.filter(|shred| shred.is_code())
.map(Shred::index)
.max()
{
unfinished_slot.next_code_index = unfinished_slot.next_code_index.max(index + 1);
}
shreds
}
impl BroadcastRun for StandardBroadcastRun {
@@ -579,6 +604,7 @@ mod test {
let parent = 0;
run.unfinished_slot = Some(UnfinishedSlotInfo {
next_shred_index,
next_code_index: 17,
slot,
parent,
data_shreds_buffer: Vec::default(),
@@ -596,7 +622,7 @@ mod test {
.expect("Expected a shred that signals an interrupt");
// Validate the shred
assert_eq!(shred.parent(), Some(parent));
assert_eq!(shred.parent().unwrap(), parent);
assert_eq!(shred.slot(), slot);
assert_eq!(shred.index(), next_shred_index);
assert!(shred.is_data());

View File

@@ -13,7 +13,6 @@ use {
unbounded, Receiver as CrossbeamReceiver, RecvTimeoutError, Select,
Sender as CrossbeamSender,
},
itertools::izip,
log::*,
solana_gossip::{
cluster_info::{ClusterInfo, GOSSIP_SLEEP_MILLIS},
@@ -22,7 +21,7 @@ use {
solana_ledger::blockstore::Blockstore,
solana_measure::measure::Measure,
solana_metrics::inc_new_counter_debug,
solana_perf::packet::{self, Packets},
solana_perf::packet::{self, PacketBatch},
solana_poh::poh_recorder::PohRecorder,
solana_rpc::{
optimistically_confirmed_bank_tracker::{BankNotification, BankNotificationSender},
@@ -32,12 +31,11 @@ use {
bank::Bank,
bank_forks::BankForks,
commitment::VOTE_THRESHOLD_SIZE,
epoch_stakes::{EpochAuthorizedVoters, EpochStakes},
epoch_stakes::EpochStakes,
vote_sender_types::{ReplayVoteReceiver, ReplayedVote},
},
solana_sdk::{
clock::{Epoch, Slot, DEFAULT_MS_PER_SLOT, DEFAULT_TICKS_PER_SLOT},
epoch_schedule::EpochSchedule,
clock::{Slot, DEFAULT_MS_PER_SLOT, DEFAULT_TICKS_PER_SLOT},
hash::Hash,
pubkey::Pubkey,
signature::Signature,
@@ -47,6 +45,7 @@ use {
solana_vote_program::{self, vote_state::Vote, vote_transaction},
std::{
collections::{HashMap, HashSet},
iter::repeat,
sync::{
atomic::{AtomicBool, Ordering},
Arc, Mutex, RwLock,
@@ -58,7 +57,6 @@ use {
// Map from a vote account to the authorized voter for an epoch
pub type ThresholdConfirmedSlots = Vec<(Slot, Hash)>;
pub type VotedHashUpdates = HashMap<Hash, Vec<Pubkey>>;
pub type VerifiedLabelVotePacketsSender = CrossbeamSender<Vec<VerifiedVoteMetadata>>;
pub type VerifiedLabelVotePacketsReceiver = CrossbeamReceiver<Vec<VerifiedVoteMetadata>>;
pub type VerifiedVoteTransactionsSender = CrossbeamSender<Vec<Transaction>>;
@@ -85,14 +83,14 @@ pub struct SlotVoteTracker {
}
impl SlotVoteTracker {
pub fn get_voted_slot_updates(&mut self) -> Option<Vec<Pubkey>> {
pub(crate) fn get_voted_slot_updates(&mut self) -> Option<Vec<Pubkey>> {
self.voted_slot_updates.take()
}
pub fn get_or_insert_optimistic_votes_tracker(&mut self, hash: Hash) -> &mut VoteStakeTracker {
fn get_or_insert_optimistic_votes_tracker(&mut self, hash: Hash) -> &mut VoteStakeTracker {
self.optimistic_votes_tracker.entry(hash).or_default()
}
pub fn optimistic_votes_tracker(&self, hash: &Hash) -> Option<&VoteStakeTracker> {
pub(crate) fn optimistic_votes_tracker(&self, hash: &Hash) -> Option<&VoteStakeTracker> {
self.optimistic_votes_tracker.get(hash)
}
}
@@ -101,82 +99,29 @@ impl SlotVoteTracker {
pub struct VoteTracker {
// Map from a slot to a set of validators who have voted for that slot
slot_vote_trackers: RwLock<HashMap<Slot, Arc<RwLock<SlotVoteTracker>>>>,
// Don't track votes from people who are not staked, acts as a spam filter
epoch_authorized_voters: RwLock<HashMap<Epoch, Arc<EpochAuthorizedVoters>>>,
leader_schedule_epoch: RwLock<Epoch>,
current_epoch: RwLock<Epoch>,
epoch_schedule: EpochSchedule,
}
impl VoteTracker {
pub fn new(root_bank: &Bank) -> Self {
let current_epoch = root_bank.epoch();
let vote_tracker = Self {
leader_schedule_epoch: RwLock::new(current_epoch),
current_epoch: RwLock::new(current_epoch),
epoch_schedule: *root_bank.epoch_schedule(),
..VoteTracker::default()
};
pub(crate) fn new(root_bank: &Bank) -> Self {
let vote_tracker = VoteTracker::default();
vote_tracker.progress_with_new_root_bank(root_bank);
assert_eq!(
*vote_tracker.leader_schedule_epoch.read().unwrap(),
root_bank.get_leader_schedule_epoch(root_bank.slot())
);
assert_eq!(*vote_tracker.current_epoch.read().unwrap(), current_epoch,);
vote_tracker
}
pub fn get_or_insert_slot_tracker(&self, slot: Slot) -> Arc<RwLock<SlotVoteTracker>> {
let mut slot_tracker = self.slot_vote_trackers.read().unwrap().get(&slot).cloned();
if slot_tracker.is_none() {
let new_slot_tracker = Arc::new(RwLock::new(SlotVoteTracker {
voted: HashMap::new(),
optimistic_votes_tracker: HashMap::default(),
voted_slot_updates: None,
gossip_only_stake: 0,
}));
self.slot_vote_trackers
.write()
.unwrap()
.insert(slot, new_slot_tracker.clone());
slot_tracker = Some(new_slot_tracker);
fn get_or_insert_slot_tracker(&self, slot: Slot) -> Arc<RwLock<SlotVoteTracker>> {
if let Some(slot_vote_tracker) = self.slot_vote_trackers.read().unwrap().get(&slot) {
return slot_vote_tracker.clone();
}
slot_tracker.unwrap()
let mut slot_vote_trackers = self.slot_vote_trackers.write().unwrap();
slot_vote_trackers.entry(slot).or_default().clone()
}
pub fn get_slot_vote_tracker(&self, slot: Slot) -> Option<Arc<RwLock<SlotVoteTracker>>> {
pub(crate) fn get_slot_vote_tracker(&self, slot: Slot) -> Option<Arc<RwLock<SlotVoteTracker>>> {
self.slot_vote_trackers.read().unwrap().get(&slot).cloned()
}
pub fn get_authorized_voter(&self, pubkey: &Pubkey, slot: Slot) -> Option<Pubkey> {
let epoch = self.epoch_schedule.get_epoch(slot);
self.epoch_authorized_voters
.read()
.unwrap()
.get(&epoch)
.map(|epoch_authorized_voters| epoch_authorized_voters.get(pubkey))
.unwrap_or(None)
.cloned()
}
pub fn vote_contains_authorized_voter(
vote_tx: &Transaction,
authorized_voter: &Pubkey,
) -> bool {
let message = &vote_tx.message;
for (i, key) in message.account_keys.iter().enumerate() {
if message.is_signer(i) && key == authorized_voter {
return true;
}
}
false
}
#[cfg(test)]
pub fn insert_vote(&self, slot: Slot, pubkey: Pubkey) {
pub(crate) fn insert_vote(&self, slot: Slot, pubkey: Pubkey) {
let mut w_slot_vote_trackers = self.slot_vote_trackers.write().unwrap();
let slot_vote_tracker = w_slot_vote_trackers.entry(slot).or_default();
@@ -191,59 +136,16 @@ impl VoteTracker {
}
}
fn progress_leader_schedule_epoch(&self, root_bank: &Bank) {
// Update with any newly calculated epoch state about future epochs
let start_leader_schedule_epoch = *self.leader_schedule_epoch.read().unwrap();
let mut greatest_leader_schedule_epoch = start_leader_schedule_epoch;
for leader_schedule_epoch in
start_leader_schedule_epoch..=root_bank.get_leader_schedule_epoch(root_bank.slot())
{
let exists = self
.epoch_authorized_voters
.read()
.unwrap()
.contains_key(&leader_schedule_epoch);
if !exists {
let epoch_authorized_voters = root_bank
.epoch_stakes(leader_schedule_epoch)
.unwrap()
.epoch_authorized_voters()
.clone();
self.epoch_authorized_voters
.write()
.unwrap()
.insert(leader_schedule_epoch, epoch_authorized_voters);
greatest_leader_schedule_epoch = leader_schedule_epoch;
}
}
if greatest_leader_schedule_epoch != start_leader_schedule_epoch {
*self.leader_schedule_epoch.write().unwrap() = greatest_leader_schedule_epoch;
}
}
fn purge_stale_state(&self, root_bank: &Bank) {
// Purge any outdated slot data
let new_root = root_bank.slot();
let root_epoch = root_bank.epoch();
self.slot_vote_trackers
.write()
.unwrap()
.retain(|slot, _| *slot >= new_root);
let current_epoch = *self.current_epoch.read().unwrap();
if root_epoch != current_epoch {
// If root moved to a new epoch, purge outdated state
self.epoch_authorized_voters
.write()
.unwrap()
.retain(|epoch, _| *epoch >= root_epoch);
*self.current_epoch.write().unwrap() = root_epoch;
}
}
fn progress_with_new_root_bank(&self, root_bank: &Bank) {
self.progress_leader_schedule_epoch(root_bank);
self.purge_stale_state(root_bank);
}
}
@@ -294,10 +196,10 @@ pub struct ClusterInfoVoteListener {
impl ClusterInfoVoteListener {
#[allow(clippy::too_many_arguments)]
pub fn new(
exit: &Arc<AtomicBool>,
exit: Arc<AtomicBool>,
cluster_info: Arc<ClusterInfo>,
verified_packets_sender: CrossbeamSender<Vec<Packets>>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
verified_packets_sender: CrossbeamSender<Vec<PacketBatch>>,
poh_recorder: Arc<Mutex<PohRecorder>>,
vote_tracker: Arc<VoteTracker>,
bank_forks: Arc<RwLock<BankForks>>,
subscriptions: Arc<RpcSubscriptions>,
@@ -308,25 +210,26 @@ impl ClusterInfoVoteListener {
bank_notification_sender: Option<BankNotificationSender>,
cluster_confirmed_slot_sender: GossipDuplicateConfirmedSlotsSender,
) -> Self {
let exit_ = exit.clone();
let (verified_vote_label_packets_sender, verified_vote_label_packets_receiver) =
unbounded();
let (verified_vote_transactions_sender, verified_vote_transactions_receiver) = unbounded();
let listen_thread = Builder::new()
.name("solana-cluster_info_vote_listener".to_string())
.spawn(move || {
let _ = Self::recv_loop(
exit_,
&cluster_info,
verified_vote_label_packets_sender,
verified_vote_transactions_sender,
);
})
.unwrap();
let listen_thread = {
let exit = exit.clone();
let bank_forks = bank_forks.clone();
Builder::new()
.name("solana-cluster_info_vote_listener".to_string())
.spawn(move || {
let _ = Self::recv_loop(
exit,
&cluster_info,
&bank_forks,
verified_vote_label_packets_sender,
verified_vote_transactions_sender,
);
})
.unwrap()
};
let exit_ = exit.clone();
let poh_recorder = poh_recorder.clone();
let bank_send_thread = Builder::new()
.name("solana-cluster_info_bank_send".to_string())
.spawn(move || {
@@ -339,12 +242,11 @@ impl ClusterInfoVoteListener {
})
.unwrap();
let exit_ = exit.clone();
let send_thread = Builder::new()
.name("solana-cluster_info_process_votes".to_string())
.spawn(move || {
let _ = Self::process_votes_loop(
exit_,
exit,
verified_vote_transactions_receiver,
vote_tracker,
bank_forks,
@@ -364,16 +266,14 @@ impl ClusterInfoVoteListener {
}
}
pub fn join(self) -> thread::Result<()> {
for thread_hdl in self.thread_hdls {
thread_hdl.join()?;
}
Ok(())
pub(crate) fn join(self) -> thread::Result<()> {
self.thread_hdls.into_iter().try_for_each(JoinHandle::join)
}
fn recv_loop(
exit: Arc<AtomicBool>,
cluster_info: &ClusterInfo,
bank_forks: &RwLock<BankForks>,
verified_vote_label_packets_sender: VerifiedLabelVotePacketsSender,
verified_vote_transactions_sender: VerifiedVoteTransactionsSender,
) -> Result<()> {
@@ -382,7 +282,7 @@ impl ClusterInfoVoteListener {
let votes = cluster_info.get_votes(&mut cursor);
inc_new_counter_debug!("cluster_info_vote_listener-recv_count", votes.len());
if !votes.is_empty() {
let (vote_txs, packets) = Self::verify_votes(votes);
let (vote_txs, packets) = Self::verify_votes(votes, bank_forks);
verified_vote_transactions_sender.send(vote_txs)?;
verified_vote_label_packets_sender.send(packets)?;
}
@@ -392,50 +292,52 @@ impl ClusterInfoVoteListener {
}
#[allow(clippy::type_complexity)]
fn verify_votes(votes: Vec<Transaction>) -> (Vec<Transaction>, Vec<VerifiedVoteMetadata>) {
let mut msgs = packet::to_packets_chunked(&votes, 1);
fn verify_votes(
votes: Vec<Transaction>,
bank_forks: &RwLock<BankForks>,
) -> (Vec<Transaction>, Vec<VerifiedVoteMetadata>) {
let mut packet_batches = packet::to_packet_batches(&votes, 1);
// Votes should already be filtered by this point.
let reject_non_vote = false;
sigverify::ed25519_verify_cpu(&mut msgs, reject_non_vote);
let (vote_txs, vote_metadata) = izip!(votes.into_iter(), msgs,)
.filter_map(|(vote_tx, packet)| {
let (vote, vote_account_key) = vote_transaction::parse_vote_transaction(&vote_tx)
.and_then(|(vote_account_key, vote, _)| {
if vote.slots.is_empty() {
None
} else {
Some((vote, vote_account_key))
}
})?;
// to_packets_chunked() above split into 1 packet long chunks
assert_eq!(packet.packets.len(), 1);
if !packet.packets[0].meta.discard {
if let Some(signature) = vote_tx.signatures.first().cloned() {
return Some((
vote_tx,
VerifiedVoteMetadata {
vote_account_key,
vote,
packet,
signature,
},
));
}
}
None
sigverify::ed25519_verify_cpu(&mut packet_batches, /*reject_non_vote=*/ false);
let root_bank = bank_forks.read().unwrap().root_bank();
let epoch_schedule = root_bank.epoch_schedule();
votes
.into_iter()
.zip(packet_batches)
.filter(|(_, packet_batch)| {
// to_packet_batches() above splits into 1 packet long batches
assert_eq!(packet_batch.packets.len(), 1);
!packet_batch.packets[0].meta.discard()
})
.unzip();
(vote_txs, vote_metadata)
.filter_map(|(tx, packet_batch)| {
let (vote_account_key, vote, _) = vote_transaction::parse_vote_transaction(&tx)?;
let slot = vote.last_voted_slot()?;
let epoch = epoch_schedule.get_epoch(slot);
let authorized_voter = root_bank
.epoch_stakes(epoch)?
.epoch_authorized_voters()
.get(&vote_account_key)?;
let mut keys = tx.message.account_keys.iter().enumerate();
if !keys.any(|(i, key)| tx.message.is_signer(i) && key == authorized_voter) {
return None;
}
let verified_vote_metadata = VerifiedVoteMetadata {
vote_account_key,
vote,
packet_batch,
signature: *tx.signatures.first()?,
};
Some((tx, verified_vote_metadata))
})
.unzip()
}
fn bank_send_loop(
exit: Arc<AtomicBool>,
verified_vote_label_packets_receiver: VerifiedLabelVotePacketsReceiver,
poh_recorder: Arc<Mutex<PohRecorder>>,
verified_packets_sender: &CrossbeamSender<Vec<Packets>>,
verified_packets_sender: &CrossbeamSender<Vec<PacketBatch>>,
) -> Result<()> {
let mut verified_vote_packets = VerifiedVotePackets::default();
let mut time_since_lock = Instant::now();
@@ -457,7 +359,7 @@ impl ClusterInfoVoteListener {
) {
match e {
Error::CrossbeamRecvTimeout(RecvTimeoutError::Disconnected)
| Error::ReadyTimeout => (),
| Error::CrossbeamRecvTimeout(RecvTimeoutError::Timeout) => (),
_ => {
error!("thread {:?} error {:?}", thread::current().name(), e);
}
@@ -483,7 +385,7 @@ impl ClusterInfoVoteListener {
fn check_for_leader_bank_and_send_votes(
bank_vote_sender_state_option: &mut Option<BankVoteSenderState>,
current_working_bank: Arc<Bank>,
verified_packets_sender: &CrossbeamSender<Vec<Packets>>,
verified_packets_sender: &CrossbeamSender<Vec<PacketBatch>>,
verified_vote_packets: &VerifiedVotePackets,
) -> Result<()> {
// We will take this lock at most once every `BANK_SEND_VOTES_LOOP_SLEEP_MS`
@@ -555,7 +457,7 @@ impl ClusterInfoVoteListener {
return Ok(());
}
let root_bank = bank_forks.read().unwrap().root_bank().clone();
let root_bank = bank_forks.read().unwrap().root_bank();
if last_process_root.elapsed().as_millis() > DEFAULT_MS_PER_SLOT as u128 {
let unrooted_optimistic_slots = confirmation_verifier
.verify_for_unrooted_optimistic_slots(&root_bank, &blockstore);
@@ -786,39 +688,6 @@ impl ClusterInfoVoteListener {
}
}
fn filter_gossip_votes(
vote_tracker: &VoteTracker,
vote_pubkey: &Pubkey,
vote: &Vote,
gossip_tx: &Transaction,
) -> bool {
if vote.slots.is_empty() {
return false;
}
let last_vote_slot = vote.slots.last().unwrap();
// Votes from gossip need to be verified as they have not been
// verified by the replay pipeline. Determine the authorized voter
// based on the last vote slot. This will drop votes from authorized
// voters trying to make votes for slots earlier than the epoch for
// which they are authorized
let actual_authorized_voter =
vote_tracker.get_authorized_voter(vote_pubkey, *last_vote_slot);
if actual_authorized_voter.is_none() {
return false;
}
// Voting without the correct authorized pubkey, dump the vote
if !VoteTracker::vote_contains_authorized_voter(
gossip_tx,
&actual_authorized_voter.unwrap(),
) {
return false;
}
true
}
fn filter_and_confirm_with_new_votes(
vote_tracker: &VoteTracker,
gossip_vote_txs: Vec<Transaction>,
@@ -834,17 +703,12 @@ impl ClusterInfoVoteListener {
let mut new_optimistic_confirmed_slots = vec![];
// Process votes from gossip and ReplayStage
for (is_gossip, (vote_pubkey, vote, _)) in gossip_vote_txs
let votes = gossip_vote_txs
.iter()
.filter_map(|gossip_tx| {
vote_transaction::parse_vote_transaction(gossip_tx)
.filter(|(vote_pubkey, vote, _)| {
Self::filter_gossip_votes(vote_tracker, vote_pubkey, vote, gossip_tx)
})
.map(|v| (true, v))
})
.chain(replayed_votes.into_iter().map(|v| (false, v)))
{
.filter_map(vote_transaction::parse_vote_transaction)
.zip(repeat(/*is_gossip:*/ true))
.chain(replayed_votes.into_iter().zip(repeat(/*is_gossip:*/ false)));
for ((vote_pubkey, vote, _), is_gossip) in votes {
Self::track_new_votes_and_notify_confirmations(
vote,
&vote_pubkey,
@@ -960,7 +824,11 @@ mod tests {
signature::{Keypair, Signature, Signer},
},
solana_vote_program::vote_state::Vote,
std::{collections::BTreeSet, sync::Arc},
std::{
collections::BTreeSet,
iter::repeat_with,
sync::{atomic::AtomicU64, Arc},
},
};
#[test]
@@ -983,76 +851,9 @@ mod tests {
use bincode::serialized_size;
info!("max vote size {}", serialized_size(&vote_tx).unwrap());
let msgs = packet::to_packets_chunked(&[vote_tx], 1); // panics if won't fit
let packet_batches = packet::to_packet_batches(&[vote_tx], 1); // panics if won't fit
assert_eq!(msgs.len(), 1);
}
fn run_vote_contains_authorized_voter(hash: Option<Hash>) {
let node_keypair = Keypair::new();
let vote_keypair = Keypair::new();
let authorized_voter = Keypair::new();
let vote_tx = vote_transaction::new_vote_transaction(
vec![0],
Hash::default(),
Hash::default(),
&node_keypair,
&vote_keypair,
&authorized_voter,
hash,
);
// Check that the two signing keys pass the check
assert!(VoteTracker::vote_contains_authorized_voter(
&vote_tx,
&node_keypair.pubkey()
));
assert!(VoteTracker::vote_contains_authorized_voter(
&vote_tx,
&authorized_voter.pubkey()
));
// Non signing key shouldn't pass the check
assert!(!VoteTracker::vote_contains_authorized_voter(
&vote_tx,
&vote_keypair.pubkey()
));
// Set the authorized voter == vote keypair
let vote_tx = vote_transaction::new_vote_transaction(
vec![0],
Hash::default(),
Hash::default(),
&node_keypair,
&vote_keypair,
&vote_keypair,
hash,
);
// Check that the node_keypair and vote keypair pass the authorized voter check
assert!(VoteTracker::vote_contains_authorized_voter(
&vote_tx,
&node_keypair.pubkey()
));
assert!(VoteTracker::vote_contains_authorized_voter(
&vote_tx,
&vote_keypair.pubkey()
));
// The other keypair should not pass the check
assert!(!VoteTracker::vote_contains_authorized_voter(
&vote_tx,
&authorized_voter.pubkey()
));
}
#[test]
fn test_vote_contains_authorized_voter() {
run_vote_contains_authorized_voter(None);
run_vote_contains_authorized_voter(Some(Hash::default()));
assert_eq!(packet_batches.len(), 1);
}
#[test]
@@ -1088,15 +889,11 @@ mod tests {
.get_first_slot_in_epoch(current_epoch + 1),
);
vote_tracker.progress_with_new_root_bank(&new_epoch_bank);
assert_eq!(
*vote_tracker.current_epoch.read().unwrap(),
current_epoch + 1
);
}
#[test]
fn test_update_new_leader_schedule_epoch() {
let (vote_tracker, bank, _, _) = setup();
let (_, bank, _, _) = setup();
// Check outdated slots are purged with new root
let leader_schedule_epoch = bank.get_leader_schedule_epoch(bank.slot());
@@ -1114,25 +911,6 @@ mod tests {
bank.get_leader_schedule_epoch(next_leader_schedule_computed),
next_leader_schedule_epoch
);
let next_leader_schedule_bank =
Bank::new_from_parent(&bank, &Pubkey::default(), next_leader_schedule_computed);
vote_tracker.progress_leader_schedule_epoch(&next_leader_schedule_bank);
assert_eq!(
*vote_tracker.leader_schedule_epoch.read().unwrap(),
next_leader_schedule_epoch
);
assert_eq!(
vote_tracker
.epoch_authorized_voters
.read()
.unwrap()
.get(&next_leader_schedule_epoch)
.unwrap(),
next_leader_schedule_bank
.epoch_stakes(next_leader_schedule_epoch)
.unwrap()
.epoch_authorized_voters()
);
}
#[test]
@@ -1574,59 +1352,6 @@ mod tests {
run_test_process_votes3(Some(Hash::default()));
}
#[test]
fn test_get_voters_by_epoch() {
// Create some voters at genesis
let (vote_tracker, bank, validator_voting_keypairs, _) = setup();
let last_known_epoch = bank.get_leader_schedule_epoch(bank.slot());
let last_known_slot = bank
.epoch_schedule()
.get_last_slot_in_epoch(last_known_epoch);
// Check we can get the authorized voters
for keypairs in &validator_voting_keypairs {
assert!(vote_tracker
.get_authorized_voter(&keypairs.vote_keypair.pubkey(), last_known_slot)
.is_some());
assert!(vote_tracker
.get_authorized_voter(&keypairs.vote_keypair.pubkey(), last_known_slot + 1)
.is_none());
}
// Create the set of relevant voters for the next epoch
let new_epoch = last_known_epoch + 1;
let first_slot_in_new_epoch = bank.epoch_schedule().get_first_slot_in_epoch(new_epoch);
let new_keypairs: Vec<_> = (0..10).map(|_| ValidatorVoteKeypairs::new_rand()).collect();
let new_epoch_authorized_voters: HashMap<_, _> = new_keypairs
.iter()
.chain(validator_voting_keypairs[0..5].iter())
.map(|keypair| (keypair.vote_keypair.pubkey(), keypair.vote_keypair.pubkey()))
.collect();
vote_tracker
.epoch_authorized_voters
.write()
.unwrap()
.insert(new_epoch, Arc::new(new_epoch_authorized_voters));
// These keypairs made it into the new epoch
for keypairs in new_keypairs
.iter()
.chain(validator_voting_keypairs[0..5].iter())
{
assert!(vote_tracker
.get_authorized_voter(&keypairs.vote_keypair.pubkey(), first_slot_in_new_epoch)
.is_some());
}
// These keypairs were not refreshed in new epoch
for keypairs in validator_voting_keypairs[5..10].iter() {
assert!(vote_tracker
.get_authorized_voter(&keypairs.vote_keypair.pubkey(), first_slot_in_new_epoch)
.is_none());
}
}
#[test]
fn test_vote_tracker_references() {
// Create some voters at genesis
@@ -1646,8 +1371,10 @@ mod tests {
let vote_tracker = VoteTracker::new(&bank);
let optimistically_confirmed_bank =
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks);
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
&exit,
max_complete_transaction_status_slot,
bank_forks,
Arc::new(RwLock::new(BlockCommitmentCache::default())),
optimistically_confirmed_bank,
@@ -1690,17 +1417,6 @@ mod tests {
// Setup next epoch
let old_epoch = bank.get_leader_schedule_epoch(bank.slot());
let new_epoch = old_epoch + 1;
let new_epoch_vote_accounts: HashMap<_, _> = vec![(
validator0_keypairs.vote_keypair.pubkey(),
validator0_keypairs.vote_keypair.pubkey(),
)]
.into_iter()
.collect();
vote_tracker
.epoch_authorized_voters
.write()
.unwrap()
.insert(new_epoch, Arc::new(new_epoch_vote_accounts));
// Test with votes across two epochs
let first_slot_in_new_epoch = bank.epoch_schedule().get_first_slot_in_epoch(new_epoch);
@@ -1765,36 +1481,15 @@ mod tests {
let bank = bank_forks.read().unwrap().get(0).unwrap().clone();
let optimistically_confirmed_bank =
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks);
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
&exit,
max_complete_transaction_status_slot,
bank_forks,
Arc::new(RwLock::new(BlockCommitmentCache::default())),
optimistically_confirmed_bank,
));
// Integrity Checks
let current_epoch = bank.epoch();
let leader_schedule_epoch = bank.get_leader_schedule_epoch(bank.slot());
// Check the vote tracker has all the known epoch state on construction
for epoch in current_epoch..=leader_schedule_epoch {
assert_eq!(
vote_tracker
.epoch_authorized_voters
.read()
.unwrap()
.get(&epoch)
.unwrap(),
bank.epoch_stakes(epoch).unwrap().epoch_authorized_voters()
);
}
// Check the epoch state is correct
assert_eq!(
*vote_tracker.leader_schedule_epoch.read().unwrap(),
leader_schedule_epoch,
);
assert_eq!(*vote_tracker.current_epoch.read().unwrap(), current_epoch);
(
Arc::new(vote_tracker),
bank,
@@ -1806,8 +1501,11 @@ mod tests {
#[test]
fn test_verify_votes_empty() {
solana_logger::setup();
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let bank = Bank::new_for_tests(&genesis_config);
let bank_forks = RwLock::new(BankForks::new(bank));
let votes = vec![];
let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes);
let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, &bank_forks);
assert!(vote_txs.is_empty());
assert!(packets.is_empty());
}
@@ -1815,30 +1513,45 @@ mod tests {
fn verify_packets_len(packets: &[VerifiedVoteMetadata], ref_value: usize) {
let num_packets: usize = packets
.iter()
.map(|vote_metadata| vote_metadata.packet.packets.len())
.map(|vote_metadata| vote_metadata.packet_batch.packets.len())
.sum();
assert_eq!(num_packets, ref_value);
}
fn test_vote_tx(hash: Option<Hash>) -> Transaction {
let node_keypair = Keypair::new();
let vote_keypair = Keypair::new();
let auth_voter_keypair = Keypair::new();
fn test_vote_tx(
validator_vote_keypairs: Option<&ValidatorVoteKeypairs>,
hash: Option<Hash>,
) -> Transaction {
let other = ValidatorVoteKeypairs::new_rand();
let validator_vote_keypair = validator_vote_keypairs.unwrap_or(&other);
// TODO authorized_voter_keypair should be different from vote-keypair
// but that is what create_genesis_... currently generates.
vote_transaction::new_vote_transaction(
vec![0],
Hash::default(),
Hash::default(),
&node_keypair,
&vote_keypair,
&auth_voter_keypair,
&validator_vote_keypair.node_keypair,
&validator_vote_keypair.vote_keypair,
&validator_vote_keypair.vote_keypair, // authorized_voter_keypair
hash,
)
}
fn run_test_verify_votes_1_pass(hash: Option<Hash>) {
let vote_tx = test_vote_tx(hash);
let voting_keypairs: Vec<_> = repeat_with(ValidatorVoteKeypairs::new_rand)
.take(10)
.collect();
let GenesisConfigInfo { genesis_config, .. } =
genesis_utils::create_genesis_config_with_vote_accounts(
10_000, // mint_lamports
&voting_keypairs,
vec![100; voting_keypairs.len()], // stakes
);
let bank = Bank::new_for_tests(&genesis_config);
let bank_forks = RwLock::new(BankForks::new(bank));
let vote_tx = test_vote_tx(voting_keypairs.first(), hash);
let votes = vec![vote_tx];
let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes);
let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, &bank_forks);
assert_eq!(vote_txs.len(), 1);
verify_packets_len(&packets, 1);
}
@@ -1850,11 +1563,22 @@ mod tests {
}
fn run_test_bad_vote(hash: Option<Hash>) {
let vote_tx = test_vote_tx(hash);
let voting_keypairs: Vec<_> = repeat_with(ValidatorVoteKeypairs::new_rand)
.take(10)
.collect();
let GenesisConfigInfo { genesis_config, .. } =
genesis_utils::create_genesis_config_with_vote_accounts(
10_000, // mint_lamports
&voting_keypairs,
vec![100; voting_keypairs.len()], // stakes
);
let bank = Bank::new_for_tests(&genesis_config);
let bank_forks = RwLock::new(BankForks::new(bank));
let vote_tx = test_vote_tx(voting_keypairs.first(), hash);
let mut bad_vote = vote_tx.clone();
bad_vote.signatures[0] = Signature::default();
let votes = vec![vote_tx.clone(), bad_vote, vote_tx];
let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes);
let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, &bank_forks);
assert_eq!(vote_txs.len(), 2);
verify_packets_len(&packets, 2);
}

View File

@@ -97,11 +97,8 @@ impl AggregateCommitmentService {
return Ok(());
}
let mut aggregation_data = receiver.recv_timeout(Duration::from_secs(1))?;
while let Ok(new_data) = receiver.try_recv() {
aggregation_data = new_data;
}
let aggregation_data = receiver.recv_timeout(Duration::from_secs(1))?;
let aggregation_data = receiver.try_iter().last().unwrap_or(aggregation_data);
let ancestors = aggregation_data.bank.status_cache_ancestors();
if ancestors.is_empty() {
@@ -506,11 +503,7 @@ mod tests {
let validator_vote_keypairs = ValidatorVoteKeypairs::new_rand();
let validator_keypairs = vec![&validator_vote_keypairs];
let GenesisConfigInfo {
genesis_config,
mint_keypair: _,
voting_keypair: _,
} = create_genesis_config_with_vote_accounts(
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config_with_vote_accounts(
1_000_000_000,
&validator_keypairs,
vec![100; 1],

View File

@@ -6,10 +6,8 @@
use {
solana_ledger::blockstore::Blockstore,
solana_measure::measure::Measure,
solana_runtime::{
bank::{Bank, ExecuteTimings},
cost_model::CostModel,
},
solana_program_runtime::timings::ExecuteTimings,
solana_runtime::{bank::Bank, cost_model::CostModel},
solana_sdk::timing::timestamp,
std::{
sync::{
@@ -71,8 +69,12 @@ impl CostUpdateServiceTiming {
}
pub enum CostUpdate {
FrozenBank { bank: Arc<Bank> },
ExecuteTiming { execute_timings: ExecuteTimings },
FrozenBank {
bank: Arc<Bank>,
},
ExecuteTiming {
execute_timings: Box<ExecuteTimings>,
},
}
pub type CostUpdateReceiver = Receiver<CostUpdate>;
@@ -127,8 +129,10 @@ impl CostUpdateService {
CostUpdate::FrozenBank { bank } => {
bank.read_cost_tracker().unwrap().report_stats(bank.slot());
}
CostUpdate::ExecuteTiming { execute_timings } => {
dirty |= Self::update_cost_model(&cost_model, &execute_timings);
CostUpdate::ExecuteTiming {
mut execute_timings,
} => {
dirty |= Self::update_cost_model(&cost_model, &mut execute_timings);
update_count += 1;
}
}
@@ -151,16 +155,27 @@ impl CostUpdateService {
}
}
fn update_cost_model(cost_model: &RwLock<CostModel>, execute_timings: &ExecuteTimings) -> bool {
fn update_cost_model(
cost_model: &RwLock<CostModel>,
execute_timings: &mut ExecuteTimings,
) -> bool {
let mut dirty = false;
{
let mut cost_model_mutable = cost_model.write().unwrap();
for (program_id, timing) in &execute_timings.details.per_program_timings {
if timing.count < 1 {
for (program_id, program_timings) in &mut execute_timings.details.per_program_timings {
let current_estimated_program_cost =
cost_model.read().unwrap().find_instruction_cost(program_id);
program_timings.coalesce_error_timings(current_estimated_program_cost);
if program_timings.count < 1 {
continue;
}
let units = timing.accumulated_units / timing.count as u64;
match cost_model_mutable.upsert_instruction_cost(program_id, units) {
let units = program_timings.accumulated_units / program_timings.count as u64;
match cost_model
.write()
.unwrap()
.upsert_instruction_cost(program_id, units)
{
Ok(c) => {
debug!(
"after replayed into bank, instruction {:?} has averaged cost {}",
@@ -213,8 +228,8 @@ mod tests {
#[test]
fn test_update_cost_model_with_empty_execute_timings() {
let cost_model = Arc::new(RwLock::new(CostModel::default()));
let empty_execute_timings = ExecuteTimings::default();
CostUpdateService::update_cost_model(&cost_model, &empty_execute_timings);
let mut empty_execute_timings = ExecuteTimings::default();
CostUpdateService::update_cost_model(&cost_model, &mut empty_execute_timings);
assert_eq!(
0,
@@ -238,6 +253,7 @@ mod tests {
{
let accumulated_us: u64 = 1000;
let accumulated_units: u64 = 100;
let total_errored_units = 0;
let count: u32 = 10;
expected_cost = accumulated_units / count as u64;
@@ -247,9 +263,11 @@ mod tests {
accumulated_us,
accumulated_units,
count,
errored_txs_compute_consumed: vec![],
total_errored_units,
},
);
CostUpdateService::update_cost_model(&cost_model, &execute_timings);
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
assert_eq!(
1,
cost_model
@@ -282,9 +300,11 @@ mod tests {
accumulated_us,
accumulated_units,
count,
errored_txs_compute_consumed: vec![],
total_errored_units: 0,
},
);
CostUpdateService::update_cost_model(&cost_model, &execute_timings);
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
assert_eq!(
1,
cost_model
@@ -303,4 +323,106 @@ mod tests {
);
}
}
#[test]
fn test_update_cost_model_with_error_execute_timings() {
let cost_model = Arc::new(RwLock::new(CostModel::default()));
let mut execute_timings = ExecuteTimings::default();
let program_key_1 = Pubkey::new_unique();
// Test updating cost model with a `ProgramTiming` with no compute units accumulated, i.e.
// `accumulated_units` == 0
{
execute_timings.details.per_program_timings.insert(
program_key_1,
ProgramTiming {
accumulated_us: 1000,
accumulated_units: 0,
count: 0,
errored_txs_compute_consumed: vec![],
total_errored_units: 0,
},
);
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
// If both the `errored_txs_compute_consumed` is empty and `count == 0`, then
// nothing should be inserted into the cost model
assert!(cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.is_empty());
}
// Test updating cost model with only erroring compute costs where the `cost_per_error` is
// greater than the current instruction cost for the program. Should update with the
// new erroring compute costs
let cost_per_error = 1000;
{
let errored_txs_compute_consumed = vec![cost_per_error; 3];
let total_errored_units = errored_txs_compute_consumed.iter().sum();
execute_timings.details.per_program_timings.insert(
program_key_1,
ProgramTiming {
accumulated_us: 1000,
accumulated_units: 0,
count: 0,
errored_txs_compute_consumed,
total_errored_units,
},
);
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
assert_eq!(
1,
cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.len()
);
assert_eq!(
Some(&cost_per_error),
cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.get(&program_key_1)
);
}
// Test updating cost model with only erroring compute costs where the error cost is
// `smaller_cost_per_error`, less than the current instruction cost for the program.
// The cost should not decrease for these new lesser errors
let smaller_cost_per_error = cost_per_error - 10;
{
let errored_txs_compute_consumed = vec![smaller_cost_per_error; 3];
let total_errored_units = errored_txs_compute_consumed.iter().sum();
execute_timings.details.per_program_timings.insert(
program_key_1,
ProgramTiming {
accumulated_us: 1000,
accumulated_units: 0,
count: 0,
errored_txs_compute_consumed,
total_errored_units,
},
);
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
assert_eq!(
1,
cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.len()
);
assert_eq!(
Some(&cost_per_error),
cost_model
.read()
.unwrap()
.get_instruction_cost_table()
.get(&program_key_1)
);
}
}
}

View File

@@ -6,10 +6,13 @@ use {
result::{Error, Result},
},
solana_metrics::{inc_new_counter_debug, inc_new_counter_info},
solana_perf::{packet::PacketsRecycler, recycler::Recycler},
solana_perf::{packet::PacketBatchRecycler, recycler::Recycler},
solana_poh::poh_recorder::PohRecorder,
solana_sdk::clock::DEFAULT_TICKS_PER_SLOT,
solana_streamer::streamer::{self, PacketReceiver, PacketSender},
solana_sdk::{
clock::DEFAULT_TICKS_PER_SLOT,
packet::{Packet, PacketFlags},
},
solana_streamer::streamer::{self, PacketBatchReceiver, PacketBatchSender},
std::{
net::UdpSocket,
sync::{
@@ -34,7 +37,7 @@ impl FetchStage {
exit: &Arc<AtomicBool>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
coalesce_ms: u64,
) -> (Self, PacketReceiver, PacketReceiver) {
) -> (Self, PacketBatchReceiver, PacketBatchReceiver) {
let (sender, receiver) = channel();
let (vote_sender, vote_receiver) = channel();
(
@@ -58,8 +61,8 @@ impl FetchStage {
tpu_forwards_sockets: Vec<UdpSocket>,
tpu_vote_sockets: Vec<UdpSocket>,
exit: &Arc<AtomicBool>,
sender: &PacketSender,
vote_sender: &PacketSender,
sender: &PacketBatchSender,
vote_sender: &PacketBatchSender,
poh_recorder: &Arc<Mutex<PohRecorder>>,
coalesce_ms: u64,
) -> Self {
@@ -79,18 +82,24 @@ impl FetchStage {
}
fn handle_forwarded_packets(
recvr: &PacketReceiver,
sendr: &PacketSender,
recvr: &PacketBatchReceiver,
sendr: &PacketBatchSender,
poh_recorder: &Arc<Mutex<PohRecorder>>,
) -> Result<()> {
let msgs = recvr.recv()?;
let mut len = msgs.packets.len();
let mut batch = vec![msgs];
while let Ok(more) = recvr.try_recv() {
len += more.packets.len();
batch.push(more);
let mark_forwarded = |packet: &mut Packet| {
packet.meta.flags |= PacketFlags::FORWARDED;
};
let mut packet_batch = recvr.recv()?;
let mut num_packets = packet_batch.packets.len();
packet_batch.packets.iter_mut().for_each(mark_forwarded);
let mut packet_batches = vec![packet_batch];
while let Ok(mut packet_batch) = recvr.try_recv() {
packet_batch.packets.iter_mut().for_each(mark_forwarded);
num_packets += packet_batch.packets.len();
packet_batches.push(packet_batch);
// Read at most 1K transactions in a loop
if len > 1024 {
if num_packets > 1024 {
break;
}
}
@@ -100,33 +109,33 @@ impl FetchStage {
.unwrap()
.would_be_leader(HOLD_TRANSACTIONS_SLOT_OFFSET.saturating_mul(DEFAULT_TICKS_PER_SLOT))
{
inc_new_counter_debug!("fetch_stage-honor_forwards", len);
for packets in batch {
inc_new_counter_debug!("fetch_stage-honor_forwards", num_packets);
for packet_batch in packet_batches {
#[allow(clippy::question_mark)]
if sendr.send(packets).is_err() {
if sendr.send(packet_batch).is_err() {
return Err(Error::Send);
}
}
} else {
inc_new_counter_info!("fetch_stage-discard_forwards", len);
inc_new_counter_info!("fetch_stage-discard_forwards", num_packets);
}
Ok(())
}
fn new_multi_socket(
sockets: Vec<Arc<UdpSocket>>,
tpu_sockets: Vec<Arc<UdpSocket>>,
tpu_forwards_sockets: Vec<Arc<UdpSocket>>,
tpu_vote_sockets: Vec<Arc<UdpSocket>>,
exit: &Arc<AtomicBool>,
sender: &PacketSender,
vote_sender: &PacketSender,
sender: &PacketBatchSender,
vote_sender: &PacketBatchSender,
poh_recorder: &Arc<Mutex<PohRecorder>>,
coalesce_ms: u64,
) -> Self {
let recycler: PacketsRecycler = Recycler::warmed(1000, 1024);
let recycler: PacketBatchRecycler = Recycler::warmed(1000, 1024);
let tpu_threads = sockets.into_iter().map(|socket| {
let tpu_threads = tpu_sockets.into_iter().map(|socket| {
streamer::receiver(
socket,
exit,

View File

@@ -164,12 +164,9 @@ impl LedgerCleanupService {
}
fn receive_new_roots(new_root_receiver: &Receiver<Slot>) -> Result<Slot, RecvTimeoutError> {
let mut root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
let root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
// Get the newest root
while let Ok(new_root) = new_root_receiver.try_recv() {
root = new_root;
}
Ok(root)
Ok(new_root_receiver.try_iter().last().unwrap_or(root))
}
pub fn cleanup_ledger(

View File

@@ -81,45 +81,133 @@ impl ReplaySlotStats {
i64
),
(
"serialize_us",
"execute_details_serialize_us",
self.execute_timings.details.serialize_us,
i64
),
(
"create_vm_us",
"execute_details_create_vm_us",
self.execute_timings.details.create_vm_us,
i64
),
(
"execute_inner_us",
"execute_details_execute_inner_us",
self.execute_timings.details.execute_us,
i64
),
(
"deserialize_us",
"execute_details_deserialize_us",
self.execute_timings.details.deserialize_us,
i64
),
(
"changed_account_count",
"execute_details_get_or_create_executor_us",
self.execute_timings.details.get_or_create_executor_us,
i64
),
(
"execute_details_changed_account_count",
self.execute_timings.details.changed_account_count,
i64
),
(
"total_account_count",
"execute_details_total_account_count",
self.execute_timings.details.total_account_count,
i64
),
(
"total_data_size",
"execute_details_total_data_size",
self.execute_timings.details.total_data_size,
i64
),
(
"data_size_changed",
"execute_details_data_size_changed",
self.execute_timings.details.data_size_changed,
i64
),
(
"execute_details_create_executor_register_syscalls_us",
self.execute_timings
.details
.create_executor_register_syscalls_us,
i64
),
(
"execute_details_create_executor_load_elf_us",
self.execute_timings.details.create_executor_load_elf_us,
i64
),
(
"execute_details_create_executor_verify_code_us",
self.execute_timings.details.create_executor_verify_code_us,
i64
),
(
"execute_details_create_executor_jit_compile_us",
self.execute_timings.details.create_executor_jit_compile_us,
i64
),
(
"execute_accessories_feature_set_clone_us",
self.execute_timings
.execute_accessories
.feature_set_clone_us,
i64
),
(
"execute_accessories_compute_budget_process_transaction_us",
self.execute_timings
.execute_accessories
.compute_budget_process_transaction_us,
i64
),
(
"execute_accessories_get_executors_us",
self.execute_timings.execute_accessories.get_executors_us,
i64
),
(
"execute_accessories_process_message_us",
self.execute_timings.execute_accessories.process_message_us,
i64
),
(
"execute_accessories_update_executors_us",
self.execute_timings.execute_accessories.update_executors_us,
i64
),
(
"execute_accessories_process_instructions_total_us",
self.execute_timings
.execute_accessories
.process_instructions
.total_us,
i64
),
(
"execute_accessories_process_instructions_verify_caller_us",
self.execute_timings
.execute_accessories
.process_instructions
.verify_caller_us,
i64
),
(
"execute_accessories_process_instructions_process_executable_chain_us",
self.execute_timings
.execute_accessories
.process_instructions
.process_executable_chain_us,
i64
),
(
"execute_accessories_process_instructions_verify_callee_us",
self.execute_timings
.execute_accessories
.process_instructions
.verify_callee_us,
i64
),
);
let mut per_pubkey_timings: Vec<_> = self
@@ -129,16 +217,19 @@ impl ReplaySlotStats {
.iter()
.collect();
per_pubkey_timings.sort_by(|a, b| b.1.accumulated_us.cmp(&a.1.accumulated_us));
let (total_us, total_units, total_count) =
per_pubkey_timings
.iter()
.fold((0, 0, 0), |(sum_us, sum_units, sum_count), a| {
let (total_us, total_units, total_count, total_errored_units, total_errored_count) =
per_pubkey_timings.iter().fold(
(0, 0, 0, 0, 0),
|(sum_us, sum_units, sum_count, sum_errored_units, sum_errored_count), a| {
(
sum_us + a.1.accumulated_us,
sum_units + a.1.accumulated_units,
sum_count + a.1.count,
sum_errored_units + a.1.total_errored_units,
sum_errored_count + a.1.errored_txs_compute_consumed.len(),
)
});
},
);
for (pubkey, time) in per_pubkey_timings.iter().take(5) {
datapoint_info!(
@@ -147,7 +238,13 @@ impl ReplaySlotStats {
("pubkey", pubkey.to_string(), String),
("execute_us", time.accumulated_us, i64),
("accumulated_units", time.accumulated_units, i64),
("count", time.count, i64)
("errored_units", time.total_errored_units, i64),
("count", time.count, i64),
(
"errored_count",
time.errored_txs_compute_consumed.len(),
i64
),
);
}
datapoint_info!(
@@ -156,7 +253,9 @@ impl ReplaySlotStats {
("pubkey", "all", String),
("execute_us", total_us, i64),
("accumulated_units", total_units, i64),
("count", total_count, i64)
("count", total_count, i64),
("errored_units", total_errored_units, i64),
("count", total_errored_count, i64)
);
}
}

View File

@@ -78,13 +78,12 @@ impl QosService {
pub fn compute_transaction_costs<'a>(
&self,
transactions: impl Iterator<Item = &'a SanitizedTransaction>,
demote_program_write_locks: bool,
) -> Vec<TransactionCost> {
let mut compute_cost_time = Measure::start("compute_cost_time");
let cost_model = self.cost_model.read().unwrap();
let txs_costs: Vec<_> = transactions
.map(|tx| {
let cost = cost_model.calculate_cost(tx, demote_program_write_locks);
let cost = cost_model.calculate_cost(tx);
debug!(
"transaction {:?}, cost {:?}, cost sum {}",
tx,
@@ -250,7 +249,7 @@ mod tests {
let cost_model = Arc::new(RwLock::new(CostModel::default()));
let qos_service = QosService::new(cost_model.clone());
let txs_costs = qos_service.compute_transaction_costs(txs.iter(), false);
let txs_costs = qos_service.compute_transaction_costs(txs.iter());
// verify the size of txs_costs and its contents
assert_eq!(txs_costs.len(), txs.len());
@@ -260,11 +259,7 @@ mod tests {
.map(|(index, cost)| {
assert_eq!(
cost.sum(),
cost_model
.read()
.unwrap()
.calculate_cost(&txs[index], false)
.sum()
cost_model.read().unwrap().calculate_cost(&txs[index]).sum()
);
})
.collect_vec();
@@ -295,14 +290,14 @@ mod tests {
let transfer_tx_cost = cost_model
.read()
.unwrap()
.calculate_cost(&transfer_tx, false)
.calculate_cost(&transfer_tx)
.sum();
// make a vec of txs
let txs = vec![transfer_tx.clone(), vote_tx.clone(), transfer_tx, vote_tx];
let qos_service = QosService::new(cost_model);
let txs_costs = qos_service.compute_transaction_costs(txs.iter(), false);
let txs_costs = qos_service.compute_transaction_costs(txs.iter());
// set cost tracker limit to fit 1 transfer tx, vote tx bypasses limit check
let cost_limit = transfer_tx_cost;
@@ -348,7 +343,7 @@ mod tests {
.name("test-producer-1".to_string())
.spawn(move || {
debug!("thread 1 starts with {} txs", txs_1.len());
let tx_costs = qos_service_1.compute_transaction_costs(txs_1.iter(), false);
let tx_costs = qos_service_1.compute_transaction_costs(txs_1.iter());
assert_eq!(txs_count, tx_costs.len());
debug!(
"thread 1 done, generated {} count, see service count as {}",
@@ -365,7 +360,7 @@ mod tests {
.name("test-producer-2".to_string())
.spawn(move || {
debug!("thread 2 starts with {} txs", txs_2.len());
let tx_costs = qos_service_2.compute_transaction_costs(txs_2.iter(), false);
let tx_costs = qos_service_2.compute_transaction_costs(txs_2.iter());
assert_eq!(txs_count, tx_costs.len());
debug!(
"thread 2 done, generated {} count, see service count as {}",

View File

@@ -57,7 +57,7 @@ pub fn get_unknown_last_index(
.entry(slot)
.or_insert_with(|| blockstore.meta(slot).unwrap());
if let Some(slot_meta) = slot_meta {
if slot_meta.known_last_index().is_none() {
if slot_meta.last_index.is_none() {
let shred_index = blockstore.get_index(slot).unwrap();
let num_processed_shreds = if let Some(shred_index) = shred_index {
shred_index.data().num_shreds() as u64
@@ -86,17 +86,17 @@ fn get_unrepaired_path(
) -> Vec<Slot> {
let mut path = Vec::new();
let mut slot = start_slot;
while !visited.contains(&slot) {
visited.insert(slot);
while visited.insert(slot) {
let slot_meta = slot_meta_cache
.entry(slot)
.or_insert_with(|| blockstore.meta(slot).unwrap());
if let Some(slot_meta) = slot_meta {
if slot_meta.is_full() {
break;
if !slot_meta.is_full() {
path.push(slot);
if let Some(parent_slot) = slot_meta.parent_slot {
slot = parent_slot
}
}
path.push(slot);
slot = slot_meta.parent_slot;
}
}
path.reverse();
@@ -123,7 +123,7 @@ pub fn get_closest_completion(
if slot_meta.is_full() {
continue;
}
if let Some(last_index) = slot_meta.known_last_index() {
if let Some(last_index) = slot_meta.last_index {
let shred_index = blockstore.get_index(slot).unwrap();
let dist = if let Some(shred_index) = shred_index {
let shred_count = shred_index.data().num_shreds() as u64;

View File

@@ -56,7 +56,10 @@ mod test {
shred::{Shred, Shredder},
sigverify_shreds::verify_shred_cpu,
},
solana_sdk::signature::{Keypair, Signer},
solana_sdk::{
packet::PacketFlags,
signature::{Keypair, Signer},
},
std::{
collections::HashMap,
net::{IpAddr, Ipv4Addr},
@@ -87,7 +90,7 @@ mod test {
nonce,
)
.unwrap();
packet.meta.repair = true;
packet.meta.flags |= PacketFlags::REPAIR;
let leader_slots = [(slot, keypair.pubkey().to_bytes())]
.iter()

View File

@@ -201,6 +201,7 @@ impl RepairService {
blockstore: Arc<Blockstore>,
exit: Arc<AtomicBool>,
repair_socket: Arc<UdpSocket>,
ancestor_hashes_socket: Arc<UdpSocket>,
repair_info: RepairInfo,
verified_vote_receiver: VerifiedVoteReceiver,
outstanding_requests: Arc<RwLock<OutstandingShredRepairs>>,
@@ -225,11 +226,10 @@ impl RepairService {
.unwrap()
};
let ancestor_hashes_request_socket = Arc::new(UdpSocket::bind("0.0.0.0:0").unwrap());
let ancestor_hashes_service = AncestorHashesService::new(
exit,
blockstore,
ancestor_hashes_request_socket,
ancestor_hashes_socket,
repair_info,
ancestor_hashes_replay_update_receiver,
);

View File

@@ -26,6 +26,7 @@ use {
voting_service::VoteOp,
window_service::DuplicateSlotReceiver,
},
solana_accountsdb_plugin_manager::block_metadata_notifier_interface::BlockMetadataNotifierLock,
solana_client::rpc_response::SlotUpdate,
solana_entry::entry::VerifyRecyclers,
solana_gossip::cluster_info::ClusterInfo,
@@ -38,13 +39,14 @@ use {
solana_measure::measure::Measure,
solana_metrics::inc_new_counter_info,
solana_poh::poh_recorder::{PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS},
solana_program_runtime::timings::ExecuteTimings,
solana_rpc::{
optimistically_confirmed_bank_tracker::{BankNotification, BankNotificationSender},
rpc_subscriptions::RpcSubscriptions,
},
solana_runtime::{
accounts_background_service::AbsRequestSender,
bank::{Bank, ExecuteTimings, NewBankOptions},
bank::{Bank, NewBankOptions},
bank_forks::BankForks,
commitment::BlockCommitmentCache,
vote_sender_types::ReplayVoteSender,
@@ -327,6 +329,7 @@ impl ReplayStage {
cost_update_sender: Sender<CostUpdate>,
voting_sender: Sender<VoteOp>,
drop_bank_sender: Sender<Vec<Arc<Bank>>>,
block_metadata_notifier: Option<BlockMetadataNotifierLock>,
) -> Self {
let ReplayStageConfig {
vote_account,
@@ -432,6 +435,7 @@ impl ReplayStage {
&cost_update_sender,
&mut duplicate_slots_to_repair,
&ancestor_hashes_replay_update_sender,
block_metadata_notifier.clone(),
);
replay_active_banks_time.stop();
@@ -1459,7 +1463,7 @@ impl ReplayStage {
);
let root_distance = poh_slot - root_slot;
const MAX_ROOT_DISTANCE_FOR_VOTE_ONLY: Slot = 500;
const MAX_ROOT_DISTANCE_FOR_VOTE_ONLY: Slot = 400;
let vote_only_bank = if root_distance > MAX_ROOT_DISTANCE_FOR_VOTE_ONLY {
datapoint_info!("vote-only-bank", ("slot", poh_slot, i64));
true
@@ -1988,6 +1992,7 @@ impl ReplayStage {
cost_update_sender: &Sender<CostUpdate>,
duplicate_slots_to_repair: &mut DuplicateSlotsToRepair,
ancestor_hashes_replay_update_sender: &AncestorHashesReplayUpdateSender,
block_metadata_notifier: Option<BlockMetadataNotifierLock>,
) -> bool {
let mut did_complete_bank = false;
let mut tx_count = 0;
@@ -2143,6 +2148,16 @@ impl ReplayStage {
}
}
Self::record_rewards(&bank, rewards_recorder_sender);
if let Some(ref block_metadata_notifier) = block_metadata_notifier {
let block_metadata_notifier = block_metadata_notifier.read().unwrap();
block_metadata_notifier.notify_block_metadata(
bank.slot(),
&bank.last_blockhash().to_string(),
&bank.rewards,
Some(bank.clock().unix_timestamp),
Some(bank.block_height()),
)
}
} else {
trace!(
"bank {} not completed tick_height: {}, max_tick_height: {}",
@@ -2156,7 +2171,9 @@ impl ReplayStage {
// send accumulated excute-timings to cost_update_service
if !execute_timings.details.per_program_timings.is_empty() {
cost_update_sender
.send(CostUpdate::ExecuteTiming { execute_timings })
.send(CostUpdate::ExecuteTiming {
execute_timings: Box::new(execute_timings),
})
.unwrap_or_else(|err| warn!("cost_update_sender failed: {:?}", err));
}
@@ -3033,8 +3050,10 @@ pub mod tests {
let optimistically_confirmed_bank =
OptimisticallyConfirmedBank::locked_from_bank_forks_root(bank_forks);
let exit = Arc::new(AtomicBool::new(false));
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
let rpc_subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
&exit,
max_complete_transaction_status_slot,
bank_forks.clone(),
Arc::new(RwLock::new(BlockCommitmentCache::default())),
optimistically_confirmed_bank,
@@ -3568,8 +3587,10 @@ pub mod tests {
&replay_vote_sender,
&VerifyRecyclers::default(),
);
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
let rpc_subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
&exit,
max_complete_transaction_status_slot,
bank_forks.clone(),
block_commitment_cache,
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
@@ -3636,8 +3657,10 @@ pub mod tests {
let exit = Arc::new(AtomicBool::new(false));
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
let rpc_subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
&exit,
max_complete_transaction_status_slot,
bank_forks.clone(),
block_commitment_cache.clone(),
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),

View File

@@ -24,10 +24,10 @@ use {
solana_ledger::{
blockstore::Blockstore,
leader_schedule_cache::LeaderScheduleCache,
shred::{Shred, ShredType},
shred::{Shred, ShredId},
},
solana_measure::measure::Measure,
solana_perf::packet::Packets,
solana_perf::packet::PacketBatch,
solana_rayon_threadlimit::get_thread_count,
solana_rpc::{max_slots::MaxSlots, rpc_subscriptions::RpcSubscriptions},
solana_runtime::{bank::Bank, bank_forks::BankForks},
@@ -145,13 +145,13 @@ impl RetransmitStats {
}
// Map of shred (slot, index, type) => list of hash values seen for that key.
type ShredFilter = LruCache<(Slot, u32, ShredType), Vec<u64>>;
type ShredFilter = LruCache<ShredId, Vec<u64>>;
type ShredFilterAndHasher = (ShredFilter, PacketHasher);
// Returns true if shred is already received and should skip retransmit.
fn should_skip_retransmit(shred: &Shred, shreds_received: &Mutex<ShredFilterAndHasher>) -> bool {
let key = (shred.slot(), shred.index(), shred.shred_type());
let key = shred.id();
let mut shreds_received = shreds_received.lock().unwrap();
let (cache, hasher) = shreds_received.deref_mut();
match cache.get_mut(&key) {
@@ -433,7 +433,8 @@ impl RetransmitStage {
cluster_info: Arc<ClusterInfo>,
retransmit_sockets: Arc<Vec<UdpSocket>>,
repair_socket: Arc<UdpSocket>,
verified_receiver: Receiver<Vec<Packets>>,
ancestor_hashes_socket: Arc<UdpSocket>,
verified_receiver: Receiver<Vec<PacketBatch>>,
exit: Arc<AtomicBool>,
cluster_slots_update_receiver: ClusterSlotsUpdateReceiver,
epoch_schedule: EpochSchedule,
@@ -486,6 +487,7 @@ impl RetransmitStage {
verified_receiver,
retransmit_sender,
repair_socket,
ancestor_hashes_socket,
exit,
repair_info,
leader_schedule_cache,
@@ -610,10 +612,10 @@ mod tests {
let shred = Shred::new_from_data(0, 0, 0, None, true, true, 0, 0x20, 0);
// it should send this over the sockets.
retransmit_sender.send(vec![shred]).unwrap();
let mut packets = Packets::new(vec![]);
solana_streamer::packet::recv_from(&mut packets, &me_retransmit, 1).unwrap();
assert_eq!(packets.packets.len(), 1);
assert!(!packets.packets[0].meta.repair);
let mut packet_batch = PacketBatch::new(vec![]);
solana_streamer::packet::recv_from(&mut packet_batch, &me_retransmit, 1).unwrap();
assert_eq!(packet_batch.packets.len(), 1);
assert!(!packet_batch.packets[0].meta.repair());
}
#[test]
@@ -639,19 +641,19 @@ mod tests {
assert!(should_skip_retransmit(&shred, &shreds_received));
assert!(should_skip_retransmit(&shred, &shreds_received));
let shred = Shred::new_empty_coding(slot, index, 0, 1, 1, version);
let shred = Shred::new_empty_coding(slot, index, 0, 1, 1, 0, version);
// Coding at (1, 5) passes
assert!(!should_skip_retransmit(&shred, &shreds_received));
// then blocked
assert!(should_skip_retransmit(&shred, &shreds_received));
let shred = Shred::new_empty_coding(slot, index, 2, 1, 1, version);
let shred = Shred::new_empty_coding(slot, index, 2, 1, 1, 0, version);
// 2nd unique coding at (1, 5) passes
assert!(!should_skip_retransmit(&shred, &shreds_received));
// same again is blocked
assert!(should_skip_retransmit(&shred, &shreds_received));
let shred = Shred::new_empty_coding(slot, index, 3, 1, 1, version);
let shred = Shred::new_empty_coding(slot, index, 3, 1, 1, 0, version);
// Another unique coding at (1, 5) always blocked
assert!(should_skip_retransmit(&shred, &shreds_received));
assert!(should_skip_retransmit(&shred, &shreds_received));

View File

@@ -25,11 +25,11 @@ use {
},
solana_measure::measure::Measure,
solana_metrics::inc_new_counter_debug,
solana_perf::packet::{limited_deserialize, Packets, PacketsRecycler},
solana_perf::packet::{limited_deserialize, PacketBatch, PacketBatchRecycler},
solana_sdk::{
clock::Slot, hash::Hash, packet::PACKET_DATA_SIZE, pubkey::Pubkey, timing::duration_as_ms,
},
solana_streamer::streamer::{PacketReceiver, PacketSender},
solana_streamer::streamer::{PacketBatchReceiver, PacketBatchSender},
std::{
collections::HashSet,
net::SocketAddr,
@@ -229,12 +229,12 @@ impl ServeRepair {
fn handle_repair(
me: &Arc<RwLock<Self>>,
recycler: &PacketsRecycler,
recycler: &PacketBatchRecycler,
from_addr: &SocketAddr,
blockstore: Option<&Arc<Blockstore>>,
request: RepairProtocol,
stats: &mut ServeRepairStats,
) -> Option<Packets> {
) -> Option<PacketBatch> {
let now = Instant::now();
let my_id = me.read().unwrap().my_id();
@@ -317,10 +317,10 @@ impl ServeRepair {
/// Process messages from the network
fn run_listen(
obj: &Arc<RwLock<Self>>,
recycler: &PacketsRecycler,
recycler: &PacketBatchRecycler,
blockstore: Option<&Arc<Blockstore>>,
requests_receiver: &PacketReceiver,
response_sender: &PacketSender,
requests_receiver: &PacketBatchReceiver,
response_sender: &PacketBatchSender,
stats: &mut ServeRepairStats,
max_packets: &mut usize,
) -> Result<()> {
@@ -392,12 +392,12 @@ impl ServeRepair {
pub fn listen(
me: Arc<RwLock<Self>>,
blockstore: Option<Arc<Blockstore>>,
requests_receiver: PacketReceiver,
response_sender: PacketSender,
requests_receiver: PacketBatchReceiver,
response_sender: PacketBatchSender,
exit: &Arc<AtomicBool>,
) -> JoinHandle<()> {
let exit = exit.clone();
let recycler = PacketsRecycler::default();
let recycler = PacketBatchRecycler::default();
Builder::new()
.name("solana-repair-listen".to_string())
.spawn(move || {
@@ -432,14 +432,14 @@ impl ServeRepair {
fn handle_packets(
me: &Arc<RwLock<Self>>,
recycler: &PacketsRecycler,
recycler: &PacketBatchRecycler,
blockstore: Option<&Arc<Blockstore>>,
packets: Packets,
response_sender: &PacketSender,
packet_batch: PacketBatch,
response_sender: &PacketBatchSender,
stats: &mut ServeRepairStats,
) {
// iter over the packets
packets.packets.iter().for_each(|packet| {
packet_batch.packets.iter().for_each(|packet| {
let from_addr = packet.meta.addr();
limited_deserialize(&packet.data[..packet.meta.size])
.into_iter()
@@ -609,7 +609,7 @@ impl ServeRepair {
}
fn run_window_request(
recycler: &PacketsRecycler,
recycler: &PacketBatchRecycler,
from: &ContactInfo,
from_addr: &SocketAddr,
blockstore: Option<&Arc<Blockstore>>,
@@ -617,7 +617,7 @@ impl ServeRepair {
slot: Slot,
shred_index: u64,
nonce: Nonce,
) -> Option<Packets> {
) -> Option<PacketBatch> {
if let Some(blockstore) = blockstore {
// Try to find the requested index in one of the slots
let packet = repair_response::repair_response_packet(
@@ -630,7 +630,7 @@ impl ServeRepair {
if let Some(packet) = packet {
inc_new_counter_debug!("serve_repair-window-request-ledger", 1);
return Some(Packets::new_unpinned_with_recycler_data(
return Some(PacketBatch::new_unpinned_with_recycler_data(
recycler,
"run_window_request",
vec![packet],
@@ -651,13 +651,13 @@ impl ServeRepair {
}
fn run_highest_window_request(
recycler: &PacketsRecycler,
recycler: &PacketBatchRecycler,
from_addr: &SocketAddr,
blockstore: Option<&Arc<Blockstore>>,
slot: Slot,
highest_index: u64,
nonce: Nonce,
) -> Option<Packets> {
) -> Option<PacketBatch> {
let blockstore = blockstore?;
// Try to find the requested index in one of the slots
let meta = blockstore.meta(slot).ok()??;
@@ -670,7 +670,7 @@ impl ServeRepair {
from_addr,
nonce,
)?;
return Some(Packets::new_unpinned_with_recycler_data(
return Some(PacketBatch::new_unpinned_with_recycler_data(
recycler,
"run_highest_window_request",
vec![packet],
@@ -680,14 +680,14 @@ impl ServeRepair {
}
fn run_orphan(
recycler: &PacketsRecycler,
recycler: &PacketBatchRecycler,
from_addr: &SocketAddr,
blockstore: Option<&Arc<Blockstore>>,
mut slot: Slot,
max_responses: usize,
nonce: Nonce,
) -> Option<Packets> {
let mut res = Packets::new_unpinned_with_recycler(recycler.clone(), 64, "run_orphan");
) -> Option<PacketBatch> {
let mut res = PacketBatch::new_unpinned_with_recycler(recycler.clone(), 64, "run_orphan");
if let Some(blockstore) = blockstore {
// Try to find the next "n" parent slots of the input slot
while let Ok(Some(meta)) = blockstore.meta(slot) {
@@ -706,8 +706,8 @@ impl ServeRepair {
} else {
break;
}
if meta.is_parent_set() && res.packets.len() <= max_responses {
slot = meta.parent_slot;
if meta.parent_slot.is_some() && res.packets.len() <= max_responses {
slot = meta.parent_slot.unwrap();
} else {
break;
}
@@ -720,12 +720,12 @@ impl ServeRepair {
}
fn run_ancestor_hashes(
recycler: &PacketsRecycler,
recycler: &PacketBatchRecycler,
from_addr: &SocketAddr,
blockstore: Option<&Arc<Blockstore>>,
slot: Slot,
nonce: Nonce,
) -> Option<Packets> {
) -> Option<PacketBatch> {
let blockstore = blockstore?;
let ancestor_slot_hashes = if blockstore.is_duplicate_confirmed(slot) {
let ancestor_iterator =
@@ -746,7 +746,7 @@ impl ServeRepair {
from_addr,
nonce,
)?;
Some(Packets::new_unpinned_with_recycler_data(
Some(PacketBatch::new_unpinned_with_recycler_data(
recycler,
"run_ancestor_hashes",
vec![packet],
@@ -778,7 +778,7 @@ mod tests {
/// test run_window_request responds with the right shred, and do not overrun
fn run_highest_window_request(slot: Slot, num_slots: u64, nonce: Nonce) {
let recycler = PacketsRecycler::default();
let recycler = PacketBatchRecycler::default();
solana_logger::setup();
let ledger_path = get_tmp_ledger_path!();
{
@@ -848,7 +848,7 @@ mod tests {
/// test window requests respond with the right shred, and do not overrun
fn run_window_request(slot: Slot, nonce: Nonce) {
let recycler = PacketsRecycler::default();
let recycler = PacketBatchRecycler::default();
solana_logger::setup();
let ledger_path = get_tmp_ledger_path!();
{
@@ -1017,7 +1017,7 @@ mod tests {
fn run_orphan(slot: Slot, num_slots: u64, nonce: Nonce) {
solana_logger::setup();
let recycler = PacketsRecycler::default();
let recycler = PacketBatchRecycler::default();
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
@@ -1091,7 +1091,7 @@ mod tests {
#[test]
fn run_orphan_corrupted_shred_size() {
solana_logger::setup();
let recycler = PacketsRecycler::default();
let recycler = PacketBatchRecycler::default();
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
@@ -1152,7 +1152,7 @@ mod tests {
#[test]
fn test_run_ancestor_hashes() {
solana_logger::setup();
let recycler = PacketsRecycler::default();
let recycler = PacketBatchRecycler::default();
let ledger_path = get_tmp_ledger_path!();
{
let slot = 0;

View File

@@ -6,12 +6,12 @@ use {
solana_ledger::shred::{get_shred_slot_index_type, ShredFetchStats},
solana_perf::{
cuda_runtime::PinnedVec,
packet::{Packet, PacketsRecycler},
packet::{Packet, PacketBatchRecycler, PacketFlags},
recycler::Recycler,
},
solana_runtime::bank_forks::BankForks,
solana_sdk::clock::{Slot, DEFAULT_MS_PER_SLOT},
solana_streamer::streamer::{self, PacketReceiver, PacketSender},
solana_streamer::streamer::{self, PacketBatchReceiver, PacketBatchSender},
std::{
net::UdpSocket,
sync::{atomic::AtomicBool, mpsc::channel, Arc, RwLock},
@@ -40,7 +40,7 @@ impl ShredFetchStage {
) where
F: Fn(&mut Packet),
{
p.meta.discard = true;
p.meta.set_discard(true);
if let Some((slot, _index, _shred_type)) = get_shred_slot_index_type(p, stats) {
// Seems reasonable to limit shreds to 2 epochs away
if slot > last_root && slot < (last_slot + 2 * slots_per_epoch) {
@@ -50,7 +50,7 @@ impl ShredFetchStage {
if shreds_received.get(&hash).is_none() {
shreds_received.put(hash, ());
p.meta.discard = false;
p.meta.set_discard(false);
modify(p);
} else {
stats.duplicate_shred += 1;
@@ -63,8 +63,8 @@ impl ShredFetchStage {
// updates packets received on a channel and sends them on another channel
fn modify_packets<F>(
recvr: PacketReceiver,
sendr: PacketSender,
recvr: PacketBatchReceiver,
sendr: PacketBatchSender,
bank_forks: Option<Arc<RwLock<BankForks>>>,
name: &'static str,
modify: F,
@@ -83,7 +83,7 @@ impl ShredFetchStage {
let mut stats = ShredFetchStats::default();
let mut packet_hasher = PacketHasher::default();
while let Some(mut p) = recvr.iter().next() {
while let Some(mut packet_batch) = recvr.iter().next() {
if last_updated.elapsed().as_millis() as u64 > DEFAULT_MS_PER_SLOT {
last_updated = Instant::now();
packet_hasher.reset();
@@ -97,8 +97,8 @@ impl ShredFetchStage {
slots_per_epoch = root_bank.get_slots_in_epoch(root_bank.epoch());
}
}
stats.shred_count += p.packets.len();
p.packets.iter_mut().for_each(|packet| {
stats.shred_count += packet_batch.packets.len();
packet_batch.packets.iter_mut().for_each(|packet| {
Self::process_packet(
packet,
&mut shreds_received,
@@ -124,7 +124,7 @@ impl ShredFetchStage {
stats = ShredFetchStats::default();
last_stats = Instant::now();
}
if sendr.send(p).is_err() {
if sendr.send(packet_batch).is_err() {
break;
}
}
@@ -133,7 +133,7 @@ impl ShredFetchStage {
fn packet_modifier<F>(
sockets: Vec<Arc<UdpSocket>>,
exit: &Arc<AtomicBool>,
sender: PacketSender,
sender: PacketBatchSender,
recycler: Recycler<PinnedVec<Packet>>,
bank_forks: Option<Arc<RwLock<BankForks>>>,
name: &'static str,
@@ -169,11 +169,11 @@ impl ShredFetchStage {
sockets: Vec<Arc<UdpSocket>>,
forward_sockets: Vec<Arc<UdpSocket>>,
repair_socket: Arc<UdpSocket>,
sender: &PacketSender,
sender: &PacketBatchSender,
bank_forks: Option<Arc<RwLock<BankForks>>>,
exit: &Arc<AtomicBool>,
) -> Self {
let recycler: PacketsRecycler = Recycler::warmed(100, 1024);
let recycler: PacketBatchRecycler = Recycler::warmed(100, 1024);
let (mut tvu_threads, tvu_filter) = Self::packet_modifier(
sockets,
@@ -192,7 +192,7 @@ impl ShredFetchStage {
recycler.clone(),
bank_forks.clone(),
"shred_fetch_tvu_forwards",
|p| p.meta.forward = true,
|p| p.meta.flags.insert(PacketFlags::FORWARDED),
);
let (repair_receiver, repair_handler) = Self::packet_modifier(
@@ -202,7 +202,7 @@ impl ShredFetchStage {
recycler,
bank_forks,
"shred_fetch_repair",
|p| p.meta.repair = true,
|p| p.meta.flags.insert(PacketFlags::REPAIR),
);
tvu_threads.extend(tvu_forwards_threads.into_iter());
@@ -266,10 +266,11 @@ mod tests {
&|_p| {},
&hasher,
);
assert!(!packet.meta.discard);
assert!(!packet.meta.discard());
let coding = solana_ledger::shred::Shredder::generate_coding_shreds(
&[shred],
false, // is_last_in_slot
3, // next_code_index
);
coding[0].copy_to_packet(&mut packet);
ShredFetchStage::process_packet(
@@ -282,7 +283,7 @@ mod tests {
&|_p| {},
&hasher,
);
assert!(!packet.meta.discard);
assert!(!packet.meta.discard());
}
#[test]
@@ -309,7 +310,7 @@ mod tests {
&hasher,
);
assert_eq!(stats.index_overrun, 1);
assert!(packet.meta.discard);
assert!(packet.meta.discard());
let shred = Shred::new_from_data(1, 3, 0, None, true, true, 0, 0, 0);
shred.copy_to_packet(&mut packet);
@@ -324,7 +325,7 @@ mod tests {
&|_p| {},
&hasher,
);
assert!(packet.meta.discard);
assert!(packet.meta.discard());
// Accepted for 1,3
ShredFetchStage::process_packet(
@@ -337,7 +338,7 @@ mod tests {
&|_p| {},
&hasher,
);
assert!(!packet.meta.discard);
assert!(!packet.meta.discard());
// shreds_received should filter duplicate
ShredFetchStage::process_packet(
@@ -350,7 +351,7 @@ mod tests {
&|_p| {},
&hasher,
);
assert!(packet.meta.discard);
assert!(packet.meta.discard());
let shred = Shred::new_from_data(1_000_000, 3, 0, None, true, true, 0, 0, 0);
shred.copy_to_packet(&mut packet);
@@ -366,7 +367,7 @@ mod tests {
&|_p| {},
&hasher,
);
assert!(packet.meta.discard);
assert!(packet.meta.discard());
let index = MAX_DATA_SHREDS_PER_SLOT as u32;
let shred = Shred::new_from_data(5, index, 0, None, true, true, 0, 0, 0);
@@ -381,6 +382,6 @@ mod tests {
&|_p| {},
&hasher,
);
assert!(packet.meta.discard);
assert!(packet.meta.discard());
}
}

View File

@@ -5,11 +5,11 @@
//!
pub use solana_perf::sigverify::{
batch_size, ed25519_verify_cpu, ed25519_verify_disabled, init, TxOffset,
count_packets_in_batches, ed25519_verify_cpu, ed25519_verify_disabled, init, TxOffset,
};
use {
crate::sigverify_stage::SigVerifier,
solana_perf::{cuda_runtime::PinnedVec, packet::Packets, recycler::Recycler, sigverify},
solana_perf::{cuda_runtime::PinnedVec, packet::PacketBatch, recycler::Recycler, sigverify},
};
#[derive(Clone)]
@@ -40,13 +40,13 @@ impl Default for TransactionSigVerifier {
}
impl SigVerifier for TransactionSigVerifier {
fn verify_batch(&self, mut batch: Vec<Packets>) -> Vec<Packets> {
fn verify_batches(&self, mut batches: Vec<PacketBatch>) -> Vec<PacketBatch> {
sigverify::ed25519_verify(
&mut batch,
&mut batches,
&self.recycler,
&self.recycler_out,
self.reject_non_vote,
);
batch
batches
}
}

View File

@@ -5,7 +5,7 @@ use {
leader_schedule_cache::LeaderScheduleCache, shred::Shred,
sigverify_shreds::verify_shreds_gpu,
},
solana_perf::{self, packet::Packets, recycler_cache::RecyclerCache},
solana_perf::{self, packet::PacketBatch, recycler_cache::RecyclerCache},
solana_runtime::bank_forks::BankForks,
std::{
collections::{HashMap, HashSet},
@@ -32,7 +32,7 @@ impl ShredSigVerifier {
recycler_cache: RecyclerCache::warmed(),
}
}
fn read_slots(batches: &[Packets]) -> HashSet<u64> {
fn read_slots(batches: &[PacketBatch]) -> HashSet<u64> {
batches
.iter()
.flat_map(|batch| batch.packets.iter().filter_map(Shred::get_slot_from_packet))
@@ -41,7 +41,7 @@ impl ShredSigVerifier {
}
impl SigVerifier for ShredSigVerifier {
fn verify_batch(&self, mut batches: Vec<Packets>) -> Vec<Packets> {
fn verify_batches(&self, mut batches: Vec<PacketBatch>) -> Vec<PacketBatch> {
let r_bank = self.bank_forks.read().unwrap().working_bank();
let slots: HashSet<u64> = Self::read_slots(&batches);
let mut leader_slots: HashMap<u64, [u8; 32]> = slots
@@ -88,13 +88,13 @@ pub mod tests {
0,
0xc0de,
);
let mut batch = [Packets::default(), Packets::default()];
let mut batches = [PacketBatch::default(), PacketBatch::default()];
let keypair = Keypair::new();
Shredder::sign_shred(&keypair, &mut shred);
batch[0].packets.resize(1, Packet::default());
batch[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload);
batch[0].packets[0].meta.size = shred.payload.len();
batches[0].packets.resize(1, Packet::default());
batches[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload);
batches[0].packets[0].meta.size = shred.payload.len();
let mut shred = Shred::new_from_data(
0xc0de_dead,
@@ -108,16 +108,16 @@ pub mod tests {
0xc0de,
);
Shredder::sign_shred(&keypair, &mut shred);
batch[1].packets.resize(1, Packet::default());
batch[1].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload);
batch[1].packets[0].meta.size = shred.payload.len();
batches[1].packets.resize(1, Packet::default());
batches[1].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload);
batches[1].packets[0].meta.size = shred.payload.len();
let expected: HashSet<u64> = [0xc0de_dead, 0xdead_c0de].iter().cloned().collect();
assert_eq!(ShredSigVerifier::read_slots(&batch), expected);
assert_eq!(ShredSigVerifier::read_slots(&batches), expected);
}
#[test]
fn test_sigverify_shreds_verify_batch() {
fn test_sigverify_shreds_verify_batches() {
let leader_keypair = Arc::new(Keypair::new());
let leader_pubkey = leader_keypair.pubkey();
let bank = Bank::new_for_tests(
@@ -127,8 +127,8 @@ pub mod tests {
let bf = Arc::new(RwLock::new(BankForks::new(bank)));
let verifier = ShredSigVerifier::new(bf, cache);
let mut batch = vec![Packets::default()];
batch[0].packets.resize(2, Packet::default());
let mut batches = vec![PacketBatch::default()];
batches[0].packets.resize(2, Packet::default());
let mut shred = Shred::new_from_data(
0,
@@ -142,8 +142,8 @@ pub mod tests {
0xc0de,
);
Shredder::sign_shred(&leader_keypair, &mut shred);
batch[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload);
batch[0].packets[0].meta.size = shred.payload.len();
batches[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload);
batches[0].packets[0].meta.size = shred.payload.len();
let mut shred = Shred::new_from_data(
0,
@@ -158,11 +158,11 @@ pub mod tests {
);
let wrong_keypair = Keypair::new();
Shredder::sign_shred(&wrong_keypair, &mut shred);
batch[0].packets[1].data[0..shred.payload.len()].copy_from_slice(&shred.payload);
batch[0].packets[1].meta.size = shred.payload.len();
batches[0].packets[1].data[0..shred.payload.len()].copy_from_slice(&shred.payload);
batches[0].packets[1].meta.size = shred.payload.len();
let rv = verifier.verify_batch(batch);
assert!(!rv[0].packets[0].meta.discard);
assert!(rv[0].packets[1].meta.discard);
let rv = verifier.verify_batches(batches);
assert!(!rv[0].packets[0].meta.discard());
assert!(rv[0].packets[1].meta.discard());
}
}

View File

@@ -9,9 +9,9 @@ use {
crate::sigverify,
crossbeam_channel::{SendError, Sender as CrossbeamSender},
solana_measure::measure::Measure,
solana_perf::packet::Packets,
solana_perf::packet::PacketBatch,
solana_sdk::timing,
solana_streamer::streamer::{self, PacketReceiver, StreamerError},
solana_streamer::streamer::{self, PacketBatchReceiver, StreamerError},
std::{
collections::HashMap,
sync::mpsc::{Receiver, RecvTimeoutError},
@@ -26,7 +26,7 @@ const MAX_SIGVERIFY_BATCH: usize = 10_000;
#[derive(Error, Debug)]
pub enum SigVerifyServiceError {
#[error("send packets batch error")]
Send(#[from] SendError<Vec<Packets>>),
Send(#[from] SendError<Vec<PacketBatch>>),
#[error("streamer error")]
Streamer(#[from] StreamerError),
@@ -39,7 +39,7 @@ pub struct SigVerifyStage {
}
pub trait SigVerifier {
fn verify_batch(&self, batch: Vec<Packets>) -> Vec<Packets>;
fn verify_batches(&self, batches: Vec<PacketBatch>) -> Vec<PacketBatch>;
}
#[derive(Default, Clone)]
@@ -49,7 +49,7 @@ pub struct DisabledSigVerifier {}
struct SigVerifierStats {
recv_batches_us_hist: histogram::Histogram, // time to call recv_batch
verify_batches_pp_us_hist: histogram::Histogram, // per-packet time to call verify_batch
batches_hist: histogram::Histogram, // number of Packets structures per verify call
batches_hist: histogram::Histogram, // number of packet batches per verify call
packets_hist: histogram::Histogram, // number of packets per verify call
total_batches: usize,
total_packets: usize,
@@ -122,24 +122,24 @@ impl SigVerifierStats {
}
impl SigVerifier for DisabledSigVerifier {
fn verify_batch(&self, mut batch: Vec<Packets>) -> Vec<Packets> {
sigverify::ed25519_verify_disabled(&mut batch);
batch
fn verify_batches(&self, mut batches: Vec<PacketBatch>) -> Vec<PacketBatch> {
sigverify::ed25519_verify_disabled(&mut batches);
batches
}
}
impl SigVerifyStage {
#[allow(clippy::new_ret_no_self)]
pub fn new<T: SigVerifier + 'static + Send + Clone>(
packet_receiver: Receiver<Packets>,
verified_sender: CrossbeamSender<Vec<Packets>>,
packet_receiver: Receiver<PacketBatch>,
verified_sender: CrossbeamSender<Vec<PacketBatch>>,
verifier: T,
) -> Self {
let thread_hdl = Self::verifier_services(packet_receiver, verified_sender, verifier);
Self { thread_hdl }
}
pub fn discard_excess_packets(batches: &mut Vec<Packets>, max_packets: usize) {
pub fn discard_excess_packets(batches: &mut Vec<PacketBatch>, max_packets: usize) {
let mut received_ips = HashMap::new();
for (batch_index, batch) in batches.iter().enumerate() {
for (packet_index, packets) in batch.packets.iter().enumerate() {
@@ -163,18 +163,20 @@ impl SigVerifyStage {
}
for (_addr, indexes) in received_ips {
for (batch_index, packet_index) in indexes {
batches[batch_index].packets[packet_index].meta.discard = true;
batches[batch_index].packets[packet_index]
.meta
.set_discard(true);
}
}
}
fn verifier<T: SigVerifier>(
recvr: &PacketReceiver,
sendr: &CrossbeamSender<Vec<Packets>>,
recvr: &PacketBatchReceiver,
sendr: &CrossbeamSender<Vec<PacketBatch>>,
verifier: &T,
stats: &mut SigVerifierStats,
) -> Result<()> {
let (mut batches, num_packets, recv_duration) = streamer::recv_batch(recvr)?;
let (mut batches, num_packets, recv_duration) = streamer::recv_packet_batches(recvr)?;
let batches_len = batches.len();
debug!(
@@ -187,7 +189,7 @@ impl SigVerifyStage {
}
let mut verify_batch_time = Measure::start("sigverify_batch_time");
sendr.send(verifier.verify_batch(batches))?;
sendr.send(verifier.verify_batches(batches))?;
verify_batch_time.stop();
debug!(
@@ -216,8 +218,8 @@ impl SigVerifyStage {
}
fn verifier_service<T: SigVerifier + 'static + Send + Clone>(
packet_receiver: PacketReceiver,
verified_sender: CrossbeamSender<Vec<Packets>>,
packet_receiver: PacketBatchReceiver,
verified_sender: CrossbeamSender<Vec<PacketBatch>>,
verifier: &T,
) -> JoinHandle<()> {
let verifier = verifier.clone();
@@ -252,8 +254,8 @@ impl SigVerifyStage {
}
fn verifier_services<T: SigVerifier + 'static + Send + Clone>(
packet_receiver: PacketReceiver,
verified_sender: CrossbeamSender<Vec<Packets>>,
packet_receiver: PacketBatchReceiver,
verified_sender: CrossbeamSender<Vec<PacketBatch>>,
verifier: T,
) -> JoinHandle<()> {
Self::verifier_service(packet_receiver, verified_sender, &verifier)
@@ -268,13 +270,14 @@ impl SigVerifyStage {
mod tests {
use {super::*, solana_perf::packet::Packet};
fn count_non_discard(packets: &[Packets]) -> usize {
packets
fn count_non_discard(packet_batches: &[PacketBatch]) -> usize {
packet_batches
.iter()
.map(|pp| {
pp.packets
.map(|batch| {
batch
.packets
.iter()
.map(|p| if p.meta.discard { 0 } else { 1 })
.map(|p| if p.meta.discard() { 0 } else { 1 })
.sum::<usize>()
})
.sum::<usize>()
@@ -283,14 +286,14 @@ mod tests {
#[test]
fn test_packet_discard() {
solana_logger::setup();
let mut p = Packets::default();
p.packets.resize(10, Packet::default());
p.packets[3].meta.addr = [1u16; 8];
let mut packets = vec![p];
let mut batch = PacketBatch::default();
batch.packets.resize(10, Packet::default());
batch.packets[3].meta.addr = std::net::IpAddr::from([1u16; 8]);
let mut batches = vec![batch];
let max = 3;
SigVerifyStage::discard_excess_packets(&mut packets, max);
assert_eq!(count_non_discard(&packets), max);
assert!(!packets[0].packets[0].meta.discard);
assert!(!packets[0].packets[3].meta.discard);
SigVerifyStage::discard_excess_packets(&mut batches, max);
assert_eq!(count_non_discard(&batches), max);
assert!(!batches[0].packets[0].meta.discard());
assert!(!batches[0].packets[3].meta.discard());
}
}

View File

@@ -15,7 +15,7 @@ use {
};
const MS_PER_S: u64 = 1_000;
const SAMPLE_INTERVAL_UDP_MS: u64 = 60 * MS_PER_S;
const SAMPLE_INTERVAL_UDP_MS: u64 = 2 * MS_PER_S;
const SAMPLE_INTERVAL_MEM_MS: u64 = MS_PER_S;
const SLEEP_INTERVAL: Duration = Duration::from_millis(500);
@@ -130,7 +130,7 @@ impl SystemMonitorService {
#[cfg(target_os = "linux")]
fn report_udp_stats(old_stats: &UdpStats, new_stats: &UdpStats) {
datapoint_info!(
"net-stats",
"net-stats-validator",
(
"in_datagrams_delta",
new_stats.in_datagrams - old_stats.in_datagrams,

Some files were not shown because too many files have changed in this diff Show More