Compare commits

...

403 Commits

Author SHA1 Message Date
mergify[bot]
5eb085fcaf Implement forwarding via TpuConnection (#23817) (#23936)
(cherry picked from commit 6b85c2104c)

Co-authored-by: ryleung-solana <91908731+ryleung-solana@users.noreply.github.com>
2022-03-28 16:38:44 +02:00
mergify[bot]
c66d086db1 fix: thread enforce_ulimit_nofile config down when opening blockstore (#23925) (#23958)
(cherry picked from commit f44c8f296f)

Co-authored-by: Steven Luscher <steveluscher@users.noreply.github.com>
2022-03-26 20:09:49 +00:00
mergify[bot]
0c740ebba6 Specify if archive size datapoint is for full or incremental snapshots (#23941) (#23957)
(cherry picked from commit 31b707b625)

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-03-26 19:25:39 +00:00
Will Hickey
fd49ed1959 Bump version to 1.10.5 (#23955) 2022-03-26 11:34:12 -05:00
Michael Vines
df9f4193af improve arg documentation 2022-03-25 21:37:35 -07:00
Trent Nelson
be99d1d55d cli: allow skipping fee-checks when writing program buffers (hidden) 2022-03-25 18:17:15 -06:00
mergify[bot]
22af384700 Add get_confirmed_blocks_with_data method (backport #23618) (#23940)
* add get_confirmed_blocks_with_data and get_protobuf_or_bincode_cells

(cherry picked from commit f3219fb695)

* appease clippy

(cherry picked from commit 5533e9393c)

* use &[T] instead of Vec<T> where appropriate

clippy

(cherry picked from commit fbcf6a0802)

* modify get_protobuf_or_bincode_cells to accept and return an iterator

(cherry picked from commit f717fda9a3)

* make get_protobuf_or_bincode_cells accept IntoIter on row_keys, make get_confirmed_blocks_with_data return an Iterator

(cherry picked from commit d8be0d9430)

Co-authored-by: Edgar Xi <edgarxi97@gmail.com>
2022-03-25 22:45:01 +00:00
mergify[bot]
30059510cc Add solana-faucet to the list of dependencies referenced by downstream projects (#23935) (#23938)
(cherry picked from commit c6dda3b324)

Co-authored-by: Will Hickey <will.hickey@solana.com>
2022-03-25 13:36:29 -05:00
mergify[bot]
c0d3cd145e Optimize TpuConnection and its implementations and refactor connection-cache to not use dyn in order to enable those changes (#23877) (#23909)
Co-authored-by: ryleung-solana <91908731+ryleung-solana@users.noreply.github.com>
2022-03-25 19:09:26 +01:00
mergify[bot]
af79a86a72 ci: don't allow mergify to add automerge label to merged PRs (#23931)
(cherry picked from commit e34c52934c)

Co-authored-by: Trent Nelson <trent@solana.com>
2022-03-25 16:20:09 +00:00
Michael Vines
86acc8c59b vote-authorize-voter now accepts either the vote or withdraw authority
(cherry picked from commit c8c3c4359f)
2022-03-25 08:37:44 -07:00
mergify[bot]
8222f3a675 Update TpuConnection interface to be compatible with versioned txs (#23760) (#23913)
* Update TpuConnection interface to be compatible with versioned txs

* Add convenience method for sending txs

* use parallel iterator to serialize transactions

(cherry picked from commit 016d3c450a)

Co-authored-by: Justin Starry <justin@solana.com>
2022-03-24 22:09:34 +00:00
mergify[bot]
d135e3b839 Use QUIC client in voting service (#23713) (#23813)
* Use QUIC client in voting service

* guard quic-client usage with a flag

* add measure to time the quic client

* move time measure outside if block

* remove quic vs UDP flag from voting service

(cherry picked from commit 5d03b188c8)

Co-authored-by: Pankaj Garg <pankaj@solana.com>
2022-03-24 22:03:33 +00:00
Lijun Wang
821261a2d1 Use connection cache in send transaction (#23712) (#23900)
Use connection cache in send transaction (#23712)
2022-03-24 13:24:57 -07:00
mergify[bot]
f0c5962817 disable 'check_hash' on accounts hash calc (#23873) (#23902)
(cherry picked from commit 5a892af2fe)

Co-authored-by: Jeff Washington (jwash) <wash678@gmail.com>
2022-03-24 18:33:50 +00:00
mergify[bot]
1b930a1485 Make find_program_address client example runnable (#23492) (#23901)
(cherry picked from commit 6428602cd9)

Co-authored-by: Brian Anderson <andersrb@gmail.com>
2022-03-24 15:00:32 +00:00
mergify[bot]
2ed9655958 Set accounts_data_len on feature activation (#23730) (#23810)
(cherry picked from commit cb06126388)

Co-authored-by: Brooks Prumo <brooks@solana.com>
2022-03-21 21:50:31 +00:00
mergify[bot]
c63782f833 Made connection cache configurable. (#23783) (#23812)
Added command-line argument tpu-use-quic argument.
Changed connection cache to return different connections based on the config.

(cherry picked from commit ae76fe2bd7)

Co-authored-by: Lijun Wang <83639177+lijunwangs@users.noreply.github.com>
2022-03-21 21:42:53 +00:00
mergify[bot]
258f752e5d Add ability to get the latest incremental snapshot via RPC (#23788) (#23809)
(cherry picked from commit 739e43ba58)

Co-authored-by: DimAn <diman@diman.io>
2022-03-21 20:19:30 +00:00
Justin Starry
15357480ec Refactor instruction compilation and update message account key ordering (#23729)
* Refactor: Make instruction compilation usable for other message versions

* apply trents feedback

* Fix tests

* Fix bpf compatiblity
2022-03-21 20:53:32 +08:00
axleiro
a1a29b0b86 Increased timeout limit of coverage and stable-perf by 10 mins each (#23797)
* Increased timeout limit of coverage and stable-perf by 10 mins each

* Increasing timeout for in disk CI by 10 min
2022-03-21 15:08:23 +05:30
Jeff Washington (jwash)
258db77100 AcctIdx: factor 'scan' out of flush_internal (#23777) 2022-03-20 22:00:38 -05:00
carllin
f34434f96b Drop lock (#23765) 2022-03-20 21:27:24 -04:00
Jeff Washington (jwash)
dd69f3baf5 throttle index adding to allow disk flushing to keep up and reduce startup ram usage (#23773) 2022-03-20 19:56:20 -05:00
Brooks Prumo
335c4b668b Fix bug in bank/sysvar_cache tests (#23780) 2022-03-19 21:38:18 -05:00
Ikko Ashimine
848093b9fd Fix typo in processor.rs (#23786)
relavant -> relevant
2022-03-19 15:24:40 -05:00
Jeff Washington (jwash)
df29276eb0 AcctIdx: remove -> evict (#23775) 2022-03-18 17:13:21 -05:00
Tao Zhu
71ea05c176 replace nested for_each with flat_map 2022-03-18 16:37:41 -05:00
Tao Zhu
1c369fb55f Scan entire UnprocessedPacketBatches buffer to produce stake and locator of each packet 2022-03-18 16:37:41 -05:00
Jack May
1f052c6234 disable deprecated BPF loader deploys (#23757) 2022-03-18 14:29:49 -07:00
Jack May
7e358c654f add test to assert type assumption (#23769) 2022-03-18 14:15:59 -07:00
g1stavo
c556811c0f docs: fix stake state typo (#23776) 2022-03-18 13:45:07 -06:00
Jeff Washington (jwash)
a419374fa4 factor out function (#23742) 2022-03-18 14:10:52 -05:00
Jack May
0e64fb1fab don't rely on align_offset to check alignment (#23770) 2022-03-18 11:30:52 -07:00
Brian Anderson
fcea92ec6c Improve correctness of Rust-side type definitions for C invoke syscall (#23624)
* Make Rust definitions of C types repr(C)

* Make SolInstruction field types agree with C definitions

* Use correct SolSignerSeedsC type in SyscallInvokeSignedC

* rustfmt

* Change asserts to debug asserts in syscall.rs
2022-03-18 11:30:30 -07:00
Yueh-Hsuan Chiang
f999eef452 (LedgerStore) Rename BlockstoreAdvancedOptions to LedgerColumnOptions (#23764)
This PR renames BlockstoreAdvancedOptions to LedgerColumnOptions, as we will
pass-down this struct to LedgerColumn to allow it to perform metric reporting.
2022-03-18 11:13:35 -07:00
Tao Zhu
56428be629 Not exposing inner cost_table to encapsulating implementation details,
making future change easier.
2022-03-18 12:58:43 -05:00
dependabot[bot]
00ddf6576c chore: bump crossbeam-channel from 0.5.2 to 0.5.3 (#23698)
* chore: bump crossbeam-channel from 0.5.2 to 0.5.3

Bumps [crossbeam-channel](https://github.com/crossbeam-rs/crossbeam) from 0.5.2 to 0.5.3.
- [Release notes](https://github.com/crossbeam-rs/crossbeam/releases)
- [Changelog](https://github.com/crossbeam-rs/crossbeam/blob/master/CHANGELOG.md)
- [Commits](https://github.com/crossbeam-rs/crossbeam/compare/crossbeam-channel-0.5.2...crossbeam-channel-0.5.3)

---
updated-dependencies:
- dependency-name: crossbeam-channel
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <you@example.com>
2022-03-18 11:44:33 -06:00
Jeff Washington (jwash)
998e7d18f9 AcctIdx: never retry a bucket flush (#23732) 2022-03-18 12:20:42 -05:00
Brian Anderson
c9b8977226 Add crate docs for solana-program (#23363)
* Add crate docs for solana-program

* Rework solana-program docs for pr feedback

* Clarify log module docs

* Remove address lookup table program from solana-program docs
2022-03-18 08:27:51 -07:00
HaoranYi
f54e746fc5 Support u8 slice digester in frozen abi struct. (#23726)
* support u8 slice in frozen abi digester

* use slice in account struct

* add bpf cargo lock file

* no need to pass account.data to serializer

* fix comments
2022-03-18 09:31:07 -05:00
Kirill Lykov
c694703e14 address PR comments 2022-03-18 14:55:33 +01:00
Kirill Lykov
2da896fa40 add documentation 2022-03-18 14:55:33 +01:00
Kirill Lykov
7074ebf45a address PR comments 2022-03-18 14:55:33 +01:00
klykov
957bc0db6b add tests to dos tool 2022-03-18 14:55:33 +01:00
klykov
d9dbfc83d5 upd Cargo.lock 2022-03-18 14:55:33 +01:00
klykov
f5339882cb refactor cmdline interface 2022-03-18 14:55:33 +01:00
klykov
a63dee87ec add transaction parameters dump 2022-03-18 14:55:33 +01:00
klykov
1b0c9ad4c0 add option payer to dos tool 2022-03-18 14:55:33 +01:00
klykov
cf73f6dc74 fix typo in dos 2022-03-18 14:55:33 +01:00
klykov
dce5d1c1fa avoid signatures if unnecessary in dos 2022-03-18 14:55:33 +01:00
klykov
1641d1d329 fix: cache blockhash in dos tool 2022-03-18 14:55:33 +01:00
klykov
cb537e80d7 add transaction options to dos 2022-03-18 14:55:33 +01:00
klykov
d4d95f1811 add valid blockhash option to dos 2022-03-18 14:55:33 +01:00
klykov
797c3324f0 add number of signatures to dos 2022-03-18 14:55:33 +01:00
Tao Zhu
0ed23899e7 directly use compute_budget MAX_UNITS and DEFAULT_UNITS 2022-03-18 08:53:11 -05:00
Tao Zhu
a4cacf3389 add deterministic default cost 2022-03-18 08:53:11 -05:00
Trent Nelson
ce2e82cfb6 validator: --only-known-rpc requires a --known-validator ... 2022-03-18 07:02:16 +00:00
Jeff Washington (jwash)
857576d76f AcctIdx: move write to disk outside in mem write lock (#23731) 2022-03-17 23:09:41 -05:00
Brooks Prumo
7ff8c80e25 Add accounts_data_len to bank snapshot (#23714) 2022-03-17 20:14:54 -05:00
Tao Zhu
c478fe2047 add timing metrics, some renaming 2022-03-17 19:31:28 -05:00
Tao Zhu
fd515097d8 leader qos part 2: add stage to find sender stake, set to packet meta 2022-03-17 19:31:28 -05:00
Stephen Akridge
976b138e76 Add tx weighting stage 2022-03-17 19:31:28 -05:00
Jeff Washington (jwash)
664deb2157 AcctIdx: get rid of unused is_dirty (#23733) 2022-03-17 16:29:36 -05:00
Lijun Wang
8b230b86cc Use borrow instead of move in interfaces defined by TpuConnection (#23734)
* Use borrow instead of move in interfaces defined by TpuConnection to avoid data copy

* Removed a few more unnecessary whole array slicing.
2022-03-17 13:31:11 -07:00
behzad nouri
6b0d34d70d removes redundant Arcs from Blockstore (#23735) 2022-03-17 19:43:57 +00:00
Jeff Washington (jwash)
342f1ab1cb clean up/add comments (#23727) 2022-03-17 14:23:08 -05:00
Will Hickey
2f58c9e501 Bump version to 1.10.4 (#23743) 2022-03-17 14:02:13 -05:00
Jeff Washington (jwash)
bb9f9c8dd5 AccountSharedData::serialize (#23685) 2022-03-17 09:29:49 -05:00
Jeff Washington (jwash)
66b1f55351 AcctIdx: cheaper check for should evict while flushing (#23705) 2022-03-17 08:46:32 -05:00
Jeff Washington (jwash)
3a46f45650 AcctIdx: add stats for flushing and estimated memory (#23709) 2022-03-17 08:46:00 -05:00
Justin Starry
9ed056424c Remove unused InstructionRecorder and some SanitizedMessage methods (#23723) 2022-03-17 20:25:12 +08:00
Justin Starry
0eccacbd5b Add CLI support for versioned transactions (#23606) 2022-03-17 11:43:04 +08:00
Tyera Eulberg
330d6db19a Pin version for publish cargo-check (#23716) 2022-03-16 18:46:41 -06:00
Justin Starry
b4350a2522 Make solana-address-lookup-table-program crate bpf compatible (#23700) 2022-03-17 08:21:07 +08:00
Alexander Meißner
dda3a463a2 Fix duplicate account translation in CPI (#23701)
* Use visit_each_account_once() in CPI syscall to prevent duplicate accounts from being translated twice.

* remove unwrap() from runtime

Co-authored-by: Jack May <jack@solana.com>
2022-03-16 22:45:01 +00:00
Jeff Washington (jwash)
be0aeea01a AcctIdx: check for range holds outside lock (#23706) 2022-03-16 17:44:59 -05:00
Jeff Washington (jwash)
caddb851be acct idx flush keeps slot_list read lock open (#23704) 2022-03-16 17:29:04 -05:00
Brooks Prumo
18bddbc730 Stake tests respect MINIMUM_STAKE_DELEGATION (#23426) 2022-03-16 16:36:23 -05:00
Yueh-Hsuan Chiang
86c695268e (LedgerStore) Improve the function API of new_cf_descriptor (#23696)
As we start adding more options into BlockstoreOptions, it's better to allow
new_cf_descriptor to take the reference to BlockstoreOptions so that
we can avoid future function API changes on new_cf_descriptor.
2022-03-16 11:47:49 -07:00
dependabot[bot]
45337885c1 chore: bump gethostname from 0.2.1 to 0.2.3 (#23697)
* chore: bump gethostname from 0.2.1 to 0.2.3

Bumps gethostname from 0.2.1 to 0.2.3.

---
updated-dependencies:
- dependency-name: gethostname
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
2022-03-16 12:35:39 -06:00
Tyera Eulberg
072a8c99ea Bump openssl-src (#23708) 2022-03-16 12:34:47 -06:00
Brooks Prumo
74bb527203 Refactor and use MINIMUM_STAKE_DELEGATION constant (#22663) 2022-03-16 10:56:48 -05:00
Jeff Washington (jwash)
fa7926580a minor VoteAccount refactoring (#23686) 2022-03-16 09:50:12 -05:00
Justin Starry
cffc32af3e Make payer and system program optional when extending lookup tables (#23678) 2022-03-16 21:40:16 +08:00
behzad nouri
3252dc7203 uses structural sharing for stake-delegations hash-map (#23585)
StakeDelegations is using Arc to implement copy-on-write semantics:
https://github.com/solana-labs/solana/blob/58c0db970/runtime/src/stake_delegations.rs#L14-L16

However a single delegation change will still clone the entire hash-map,
resulting in excessive memory use as observed in:
https://github.com/solana-labs/solana/issues/23061#issuecomment-1063444072

This commit instead uses immutable hash-map implementing structural
sharing:
> which means that if two data structures are mostly copies of each
> other, most of the memory they take up will be shared between them.
https://docs.rs/im/latest/im/
2022-03-16 12:58:05 +00:00
Alexander Meißner
584ac80b1e Revert: KeyedAccount refactoings in builtin programs (#23649)
* Revert "Replaces KeyedAccount by BorrowedAccount in the BPF loader. (#23056)"

6c56eb9663

* Revert "Replaces `KeyedAccount` by `BorrowedAccount` in `system_instruction_processor`. (#23217)"

ee7e411d68

* Revert "Replaces `KeyedAccount` by `BorrowedAccount` in `nonce_keyed_account`. (#23214)"

1a68f81f89

* Revert "Replaces KeyedAccount by BorrowedAccount in the config processor. (#23302)"

a14c7c37ee

* Revert "Replaces `KeyedAccount` by `BorrowedAccount` in vote processor (#23348)"

e2fa6a0f7a

* Revert "Refactor: Prepare stake_instruction.rs to remove `KeyedAccount`s (#23375)"

ee3fc39f1c
2022-03-16 11:30:01 +01:00
dependabot[bot]
44ab660172 chore: bump libc from 0.2.119 to 0.2.120 (#23694)
* chore: bump libc from 0.2.119 to 0.2.120

Bumps [libc](https://github.com/rust-lang/libc) from 0.2.119 to 0.2.120.
- [Release notes](https://github.com/rust-lang/libc/releases)
- [Commits](https://github.com/rust-lang/libc/compare/0.2.119...0.2.120)

---
updated-dependencies:
- dependency-name: libc
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
2022-03-16 00:34:40 -06:00
Michael Vines
3773b753d1 Configure shrink paths during blockstore load 2022-03-15 23:08:07 -07:00
Michael Vines
ab373bb1a9 Refactor new_banks_from_ledger() into load and process steps 2022-03-15 23:08:07 -07:00
Michael Vines
390c5667f7 Setup bank hard_forks in load_bank_forks() 2022-03-15 23:08:07 -07:00
dependabot[bot]
455313584f chore: bump sha3 from 0.10.0 to 0.10.1 (#23480)
* chore: bump sha3 from 0.10.0 to 0.10.1

Bumps [sha3](https://github.com/RustCrypto/hashes) from 0.10.0 to 0.10.1.
- [Release notes](https://github.com/RustCrypto/hashes/releases)
- [Commits](https://github.com/RustCrypto/hashes/compare/sha3-v0.10.0...sha3-v0.10.1)

---
updated-dependencies:
- dependency-name: sha3
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

* Revert bump in zk-token-sdk

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2022-03-16 06:01:01 +00:00
dependabot[bot]
e4590c5be6 chore: bump rpassword from 6.0.0 to 6.0.1 (#23683)
* chore: bump rpassword from 6.0.0 to 6.0.1

Bumps [rpassword](https://github.com/conradkleinespel/rpassword) from 6.0.0 to 6.0.1.
- [Release notes](https://github.com/conradkleinespel/rpassword/releases)
- [Commits](https://github.com/conradkleinespel/rpassword/compare/v6.0.0...v6.0.1)

---
updated-dependencies:
- dependency-name: rpassword
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
2022-03-15 21:41:27 -06:00
Tyera Eulberg
410463fb72 Update InvalidRentPayingAccount error (#23680) 2022-03-15 21:41:16 -06:00
Michael Vines
2da4e3eb6c Add --no-os-memory-stats-reporting 2022-03-15 17:07:40 -07:00
Michael Vines
dbc62f2e28 Use consistent variable naming for DropBankService 2022-03-15 17:07:13 -07:00
Michael Vines
d44f3d7216 Remove unhelpful log message 2022-03-15 17:07:13 -07:00
ryleung-solana
9b46f9b2da Quic Connection Cache (#23598)
Add a connection cache to allow add modules that send data to get or create connections (e.g. for quic) associated with a certain SocketAddr
2022-03-15 18:16:35 -04:00
Tao Zhu
2d3501dff9 make upsert infallible op 2022-03-15 17:05:41 -05:00
Jeff Washington (jwash)
b5a99b9b09 avoid data copies in writing to cache (#23674) 2022-03-15 16:42:26 -05:00
dependabot[bot]
29af42f428 chore: bump zstd from 0.11.0+zstd.1.5.2 to 0.11.1+zstd.1.5.2 (#23679)
* chore: bump zstd from 0.11.0+zstd.1.5.2 to 0.11.1+zstd.1.5.2

Bumps [zstd](https://github.com/gyscos/zstd-rs) from 0.11.0+zstd.1.5.2 to 0.11.1+zstd.1.5.2.
- [Release notes](https://github.com/gyscos/zstd-rs/releases)
- [Commits](https://github.com/gyscos/zstd-rs/commits)

---
updated-dependencies:
- dependency-name: zstd
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <you@example.com>
2022-03-15 15:31:14 -06:00
dependabot[bot]
62b26f012e chore: bump reqwest from 0.11.9 to 0.11.10 (#23671)
* chore: bump reqwest from 0.11.9 to 0.11.10

Bumps [reqwest](https://github.com/seanmonstar/reqwest) from 0.11.9 to 0.11.10.
- [Release notes](https://github.com/seanmonstar/reqwest/releases)
- [Changelog](https://github.com/seanmonstar/reqwest/blob/master/CHANGELOG.md)
- [Commits](https://github.com/seanmonstar/reqwest/compare/v0.11.9...v0.11.10)

---
updated-dependencies:
- dependency-name: reqwest
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
2022-03-15 12:54:23 -06:00
Tao Zhu
61cead9b9b Remove injection of exit signal into cost_update_service 2022-03-15 09:58:56 -05:00
Tao Zhu
eb73dacd58 harden banking tests 2022-03-15 09:58:08 -05:00
Alexander Meißner
e9040d2766 Adds a missing check_number_of_instruction_accounts() in StakeInstruction::Authorize. (#23672) 2022-03-15 15:53:11 +01:00
HaoranYi
8c4f010b8d fix logging: only walk remote dir if it exist (#23663) 2022-03-15 08:56:22 -05:00
Zayyan Faizal
64e2d9dc47 docs: update sysvar docs for load_instruction_at_checked (#22925)
* docs: update sysvar docs for load_instruction_at_checked

Update the instruction introspection docs to use the updated load_instruction_at_checked function instead of deprecated load_instruction_at

* Update to load_current_index_checked
2022-03-15 20:22:34 +08:00
steviez
e7cf84a593 Remove AccessType arg from create_new_ledger (#23591)
Creating a new ledger implicitly means that no other process could have
previously held access to it. Additionally, creating a new ledger
implicitly requires writing, so it follows that Primary access is
required and we can drop access type as an argument.
2022-03-15 00:07:16 -05:00
page18
c1bf85b109 Explorer: Add Swim Swap Program to known addresses (#23660) 2022-03-15 12:18:35 +08:00
Justin Starry
8c8f9694e0 Refactor: Sanitized transaction creation (#23558)
* Refactor: SanitizedTransaction::try_create optionally computes hash

* Refactor: Add SimpleAddressLoader
2022-03-15 12:02:22 +08:00
Jeff Washington (jwash)
f05ac7a899 move to get_slots_in_epoch (#23657) 2022-03-14 22:47:45 -05:00
Tyera Eulberg
102dd68a03 Rename AccountsDb plugins to Geyser plugins (#23604) 2022-03-14 19:18:46 -06:00
Brooks Prumo
bcc5890182 Fix comments in bootstrap for downloading snapshots (#23664) 2022-03-14 19:35:57 -05:00
dependabot[bot]
7651ae5039 chore: bump rpassword from 5.0.1 to 6.0.0 (#23638)
* chore: bump rpassword from 5.0.1 to 6.0.0

Bumps [rpassword](https://github.com/conradkleinespel/rpassword) from 5.0.1 to 6.0.0.
- [Release notes](https://github.com/conradkleinespel/rpassword/releases)
- [Commits](https://github.com/conradkleinespel/rpassword/compare/v5.0.1...v6.0.0)

---
updated-dependencies:
- dependency-name: rpassword
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

* Update api

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2022-03-15 00:03:30 +00:00
Michael Vines
17cc095d28 Slot warping doesn't need to be in new_banks_from_ledger 2022-03-14 15:29:58 -07:00
Michael Vines
2e7ee0f177 Tower loading doesn't need to be in new_banks_from_ledger 2022-03-14 15:29:58 -07:00
Michael Vines
390dc24608 Create leader schedule before processing blockstore 2022-03-14 15:29:58 -07:00
Michael Vines
543d5d4a5d Reduce new_banks_from_ledger arguments 2022-03-14 15:29:58 -07:00
Michael Vines
115f376465 Factor out bank_forks_utils::load_bank_forks() 2022-03-14 15:29:58 -07:00
Michael Vines
63324be5b3 Remove last_full_snapshot_slot return value when it can be derived by the caller 2022-03-14 15:29:58 -07:00
Michael Vines
cf5048faaa Eliminate to_loadresult() 2022-03-14 15:29:58 -07:00
Michael Vines
3d4bf1d00a Refactor bank_forks_utils::load() to invoke a common process_blockstore_from_root() 2022-03-14 15:29:58 -07:00
Michael Vines
c2ce152be8 Inline do_process_blockstore_from_root 2022-03-14 15:29:58 -07:00
Michael Vines
07d5ee062d Push recyclers down the stack 2022-03-14 15:29:58 -07:00
HaoranYi
fc2d1a61f3 signing_coding_time is not used (#23655) 2022-03-14 17:16:30 -05:00
Jack May
c68c0e881e consolidate use-jit flag (#23652) 2022-03-14 15:00:00 -07:00
Jacob Creech
c5eb8ed7d1 docs: add web3 source docs copy 2022-03-14 15:18:08 -06:00
Enrique Fynn
7eaec26a1c Add space for keys in calculation for rent exempt in process_set_validator_info (#23485)
* Fix `process_set_validator_info`

Add space for keys in calculation for rent exempt in
`process_set_validator_info`.

The space required for allocating the `(ConfigKeys, ValidatorInfo)`
tuple only considered space for `ValidatorInfo`.
But `config_instruction::create_account` also requires space for `n`
keys.

* Remove one clone call from closure
2022-03-14 15:10:31 -06:00
HaoranYi
0c684721d8 Separate remotely downloaded snapshot archives (#23510)
* seperate remotely downloaded snapshot archives

* add str const for snapshot download dir

* only walk remote sub directory

* move directory creation outside of loop

* move is_remote to traits

* clippy simplify

* clippy

* clippy

* add unittest

* fix local cluster tests

* look for remote snapshot archive in remote foler

* create remote dir in tests

* use snapshot download dir constant

* extract build_remote_dir fn

* fix build

* code review - walking snapshot archived dirs explicitly

* fix build

* fix build

* fix comments

* Update runtime/src/snapshot_utils.rs

Co-authored-by: Brooks Prumo <brooks@prumo.org>

* clippy

* borrow to avoid copy

Co-authored-by: Brooks Prumo <brooks@prumo.org>
2022-03-14 14:03:59 -05:00
Tao Zhu
5ea6a1e500 code review 2022-03-14 13:14:27 -05:00
Tao Zhu
8590911b0a Replace type alias with newtype for UnprocesedPacketBatches 2022-03-14 13:14:27 -05:00
Jeff Washington (jwash)
a3242b6b86 remove unnecessary namespaces (#23646) 2022-03-14 12:28:55 -05:00
Jack May
c369f8b871 limit secp256k1 recover id (#23621) 2022-03-14 09:34:43 -07:00
Will Hickey
63bf0f66af Bump version to 1.10.3 (#23648) 2022-03-14 11:18:45 -05:00
Brooks Prumo
11be3fb7fa Add FlushGuard to ensure flushing_active is used safely (#23571) 2022-03-14 10:35:00 -05:00
Jeff Washington (jwash)
e8a8f4e9e2 fix filler acct slots in epoch (#23536) 2022-03-14 09:57:44 -05:00
Ashwin Ramaswami
accc64ebcf chore: fix typo retring -> retrying (#23630) 2022-03-13 23:25:27 -06:00
Michael Vines
bc9882a78b Disable publish of test crates 2022-03-13 20:47:50 -07:00
Michael Vines
01b6f97f0b Pin histogram crate version to fix cargo publish 2022-03-13 16:19:30 -07:00
Brooks Prumo
7758c32035 Banking Stage drops transactions that'll exceed the total account data size limit (#23537) 2022-03-13 15:58:57 +00:00
Jack May
bf57252298 deny slice indexing (#23622) 2022-03-13 08:43:07 -07:00
axleiro
0d369616e7 add script for the traceability (#23626)
merging for traceability
2022-03-12 16:20:22 +05:30
Tyera Eulberg
48d4c8eb5c Clippy 2022-03-11 20:04:05 -07:00
Tyera Eulberg
e2e54ef64b Add token-balance unit test 2022-03-11 19:56:37 -07:00
Yueh-Hsuan Chiang
1e20bd8f9a (LedgerStore) Include storage type as a tag in RocksDB metric reporting (#23523)
#### Summary of Changes
This PR further enables group by operation on storage type in blockstore_rocksdb_cfs metrics.
Such group-by allows us to further compare the performance metrics between rocks-level and
rocks-fifo.

To make things extensible, this PR introduces BlockstoreAdvancedOptions and move shred_storage_type. 
All fields in BlockstoreAdvancedOptions will support group-by operation in blockstore_rocksdb_cfs.

Dependency: #23580
2022-03-11 15:17:34 -08:00
dependabot[bot]
b1da7cff66 chore: bump base64 from 0.12.3 to 0.13.0 (#23616)
* chore: bump base64 from 0.12.3 to 0.13.0

Bumps [base64](https://github.com/marshallpierce/rust-base64) from 0.12.3 to 0.13.0.
- [Release notes](https://github.com/marshallpierce/rust-base64/releases)
- [Changelog](https://github.com/marshallpierce/rust-base64/blob/master/RELEASE-NOTES.md)
- [Commits](https://github.com/marshallpierce/rust-base64/compare/v0.12.3...v0.13.0)

---
updated-dependencies:
- dependency-name: base64
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* Update Cargo.lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2022-03-11 16:01:15 -07:00
samkim-crypto
4e02ec342c zk-token-sdk: fixing a range proof edge case (#23605)
* zk-token-sdk: fixing a range proof edge case

* zk-token-sdk: clippy
2022-03-11 16:57:56 -05:00
Jordan Sexton
3cf31fa9b8 fix: web3.js browser cjs build (#23500) 2022-03-11 14:35:15 -07:00
Giorgio Gambino
ccff006948 Add more logs to purge_old_snapshot_archives (#23531) 2022-03-11 15:01:48 -06:00
Brooks Prumo
3eca66a1f8 Remove extra line from Pull Request template 2022-03-11 14:56:52 -06:00
Brooks Prumo
4d08234603 Remove extra newline from Issue template 2022-03-11 14:28:26 -06:00
kirill lykov
5d75ef4766 fix deadlink in doc (#23607) 2022-03-11 12:20:04 -08:00
Jack May
7ee7fc6f58 CI clippy and fmt for all (#23599) 2022-03-11 12:07:06 -08:00
Brooks Prumo
d20dd21600 Sort tables in Cargo.toml files (#23602) 2022-03-11 11:05:57 -06:00
Marc Jaramillo
2bff36dfba feat: add getEstimatedFee to Transaction (#23579) 2022-03-11 10:05:22 -07:00
Justin Starry
4f18d73281 Explorer: Add support for cluster specific program ids (#23609) 2022-03-11 23:22:31 +08:00
behzad nouri
043086081f checks if --entrypoint is consistent with --allow-private-addr (#23559)
If entrypoints are excluded by SocketAddrSpace, the node cannot start.
Private --entrypoint addresses require --allow-private-addr.
2022-03-11 14:15:05 +00:00
Richard Patel
3d021cffa3 explorer: add Pyth instruction support (#23551) 2022-03-11 21:59:16 +08:00
sakridge
9b591286d7 Revert "chore: bump dashmap from 4.0.2 to 5.1.0 (#23372)" (#23592)
This reverts commit 3a0271c113.
2022-03-11 09:12:27 +01:00
Yueh-Hsuan Chiang
99f1a22b9d (LedgerStore) Generalize RocksDB metric reporting macro (#23580)
#### Summary of Changes

This PR generalizes RocksDB metric reporting macro so that future RocksDB metrics can
reuse the same macro.
2022-03-10 23:13:59 -08:00
Brooks Prumo
e6a67cd091 Remove unused dependencies (#23595) 2022-03-10 22:20:52 -06:00
dependabot[bot]
631be1ffdd chore: bump zstd from 0.10.0+zstd.1.5.2 to 0.11.0+zstd.1.5.2 (#23581)
* chore: bump zstd from 0.10.0+zstd.1.5.2 to 0.11.0+zstd.1.5.2

Bumps [zstd](https://github.com/gyscos/zstd-rs) from 0.10.0+zstd.1.5.2 to 0.11.0+zstd.1.5.2.
- [Release notes](https://github.com/gyscos/zstd-rs/releases)
- [Commits](https://github.com/gyscos/zstd-rs/compare/v0.10.0...v0.11.0)

---
updated-dependencies:
- dependency-name: zstd
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
2022-03-10 20:01:05 -07:00
Justin Starry
021135978d Refactor: Split up cli transaction display methods (#23547) 2022-03-11 10:49:53 +08:00
Will Hickey
b444836a97 Bump version to 1.10.2 (#23597) 2022-03-10 16:41:06 -06:00
HaoranYi
83f5f8bfc3 Move test_purge_huge test (#23587)
* ignore test_purge_huge tests it is expensive.

* move test_purge to integration tests
2022-03-10 15:31:43 -06:00
Jack May
ddd9d5a5a5 deny slice indexing (#23565) 2022-03-10 11:48:33 -08:00
Jack May
ead8cc4366 Check for physical region overlap (#23568) 2022-03-10 11:48:09 -08:00
Russell Wong
7b238b3645 fix: renamed the internal wasm_bindgen init function to avoid collision 2022-03-10 11:33:24 -08:00
Tao Zhu
35d1235ed0 - move unprocessed_packet_batches from BankingStage to its own (#23508)
module
- deserialize packets during receving and buffering
2022-03-10 18:47:46 +00:00
Brooks Prumo
3c6840050c Ensure blocks do not exceed the max accounts data size during Replay Stage (#23422) 2022-03-10 10:24:31 -06:00
steviez
58c0db9704 Cleanup several blockstore functions (#23390)
* Rename excludes_from_compaction to should_exclude_from_compaction
* Make subfunction to create all cf descriptors
* Condense logic for when to disable compactions
2022-03-10 02:08:38 -06:00
sethgirvan
37189f20c5 Fix error message typo (#23574) 2022-03-10 05:03:02 +00:00
carllin
588414a776 Report even if slot begins and ends in process_buffered_packets() (#23549) 2022-03-09 23:42:35 -05:00
Brian Anderson
72af687aa6 Fix broken links in solana-client (#23503)
* Fix broken links in solana-client

* Revise docs for RpcClient::new_sender

* Add docs for RpcClient::new_mock etc

* Fix doc warnings on RpcSender

* rustfmt
2022-03-09 21:42:23 -07:00
Tao Zhu
f68c5a274d remove persist_cost_table code 2022-03-09 21:05:47 -07:00
Tao Zhu
9f71958d7d Patch validator from loading persisted program costs 2022-03-09 21:05:47 -07:00
ryleung-solana
17b00ad3a4 Add quic-client module (#23166)
* Add quic-client module to send transactions via quic, abstracted behind the TpuConnection trait (along with a legacy UDP implementation of TpuConnection) and change thin-client to use TpuConnection
2022-03-09 21:33:05 -05:00
Brooks Prumo
1fe0d6eeeb Set ordering flushing_active.swap() to AcqRel (#23567) 2022-03-10 01:22:42 +00:00
Alexander Meißner
e60c9b97c9 Removes KeyedAccount from tests in stake instruction. (Part 1) (#23473)
* Migrates test_stake_delegate().

* Migrates test_stake_initialize().

* Migrates test_initialize_incorrect_account_sizes().

* Migrates test_authorize().

* Migrates test_authorize_lockup().

* Migrates test_authorize_override().

* Migrates test_authorize_with_seed().

* Migrates test_split().

* Migrates test_split_fake_stake_dest().

* Migrates test_deactivate().

* Migrates test_set_lockup().

* Migrates test_optional_lockup_for_stake_program().

* Migrates test_withdraw_stake().

* Migrates test_withdraw_stake_invalid_state().

* Migrates test_withdraw_stake_before_warmup().

* Migrates test_withdraw_lockup().

* Migrates test_withdraw_identical_authorities().

* Migrates test_withdraw_rent_exempt().

* Migrates test_authorize_delegated_stake().

* Migrates test_redelegate_consider_balance_changes().
2022-03-09 23:48:10 +01:00
Brooks Prumo
ea1bcd3d59 Add newlines in templates (#23540) 2022-03-09 16:10:23 -06:00
Brooks Prumo
1eddb6d1e9 Move ArchiveFormat into own module (#23562) 2022-03-09 16:09:34 -06:00
dependabot[bot]
26ef6111bb chore: bump follow-redirects from 1.13.1 to 1.14.8 in /web3.js (#23122)
Bumps [follow-redirects](https://github.com/follow-redirects/follow-redirects) from 1.13.1 to 1.14.8.
- [Release notes](https://github.com/follow-redirects/follow-redirects/releases)
- [Commits](https://github.com/follow-redirects/follow-redirects/compare/v1.13.1...v1.14.8)

---
updated-dependencies:
- dependency-name: follow-redirects
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-03-09 14:09:06 -08:00
Brooks Prumo
9bbccbe27c Use AsRef<Path> instead of PathBuf for parameters (#23560) 2022-03-09 16:08:33 -06:00
SolanaMonkeyBusiness
fb974489a5 fix(explorer): metaplexNFTHeader for unverified collection (#23498)
Current behavior: 
When the NFT is in an unverified collection, the metaplex NFT header displays a 0.
Example : https://explorer.solana.com/address/ARA6zvFJZAydh6mdAgc7A6pMpZotCQj6eYugUMpEWKYh

Expected behavior:
The metaplex NFT header should display nothing
2022-03-09 13:44:12 -08:00
HaoranYi
ba54b30101 clippy (#23563) 2022-03-09 15:33:50 -06:00
HaoranYi
a1c45d5acb typo (#23564) 2022-03-09 15:11:21 -06:00
Brian Anderson
176fd23002 Continue making it possible to implement RpcSender (#23561)
* Make nonblocking RpcClient::new_sender pub

As is the blocking RpcClient::new_sender, per #23503.

* Make solana_client::rpc_sender pub

Per #17631 it is supposed to be possible to implement RpcSender in
order to call RpcClient::new_sender. As of now though it is
not possible to name RpcSender in order to implement it.
2022-03-09 14:07:51 -07:00
klykov
3688ac4eae upd Cargo.lock 2022-03-09 17:53:06 +01:00
klykov
cc55684f5f update clap to v3: rbpf-cli 2022-03-09 17:53:06 +01:00
klykov
e2bc326d58 update clap to v3: test-bpf 2022-03-09 17:53:06 +01:00
klykov
8abaa5d350 update clap to v3: bpf-tools 2022-03-09 17:53:06 +01:00
Richard Patel
949006b5a2 explorer: format Instructions title as singular/plural (#23553) 2022-03-09 20:53:29 +08:00
axleiro
afc41c7b11 fix: vercel preview "PR commit URl" on slack (#23519)
* fix: vercel preview "PR commit URl" on slack

added command to get "PR commit URl" on slack along with vercel preview deploy link on PR.

* fix: error trailing whitespace on buildkite
2022-03-09 17:06:36 +05:30
sakridge
7a9884c831 Quic limit connections (#23283)
* quic server limit connections

* bump per_ip

* Review comments

* Make the connections per port
2022-03-09 10:52:31 +01:00
dependabot[bot]
8a4b019ded chore: bump byte-unit from 4.0.13 to 4.0.14 (#23550)
Bumps [byte-unit](https://github.com/magiclen/byte-unit) from 4.0.13 to 4.0.14.
- [Release notes](https://github.com/magiclen/byte-unit/releases)
- [Commits](https://github.com/magiclen/byte-unit/compare/v4.0.13...v4.0.14)

---
updated-dependencies:
- dependency-name: byte-unit
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-03-09 08:28:42 +00:00
Justin Starry
249d926d1b Refactor: Add transaction binary encoding enum (#23546) 2022-03-09 16:09:08 +08:00
dependabot[bot]
65e2d9b2f2 chore: bump hidapi from 1.3.3 to 1.3.4 (#23544)
Bumps [hidapi](https://github.com/ruabmbua/hidapi-rs) from 1.3.3 to 1.3.4.
- [Release notes](https://github.com/ruabmbua/hidapi-rs/releases)
- [Commits](https://github.com/ruabmbua/hidapi-rs/commits)

---
updated-dependencies:
- dependency-name: hidapi
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-03-09 05:57:59 +00:00
dependabot[bot]
5c722519cf chore: bump etcd-client from 0.8.3 to 0.8.4
Bumps [etcd-client](https://github.com/etcdv3/etcd-client) from 0.8.3 to 0.8.4.
- [Release notes](https://github.com/etcdv3/etcd-client/releases)
- [Commits](https://github.com/etcdv3/etcd-client/compare/0.8.3...v0.8.4)

---
updated-dependencies:
- dependency-name: etcd-client
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-03-08 17:33:24 -08:00
Carl Lin
5a0cd05866 Revert "- estimate a program cost as 2 standard deviation above mean"
This reverts commit a25ac1c988.
2022-03-08 17:18:44 -08:00
Carl Lin
9acbfa5eb1 Revert "use EMA in place of Welford"
This reverts commit 6587dbfa47.
2022-03-08 17:18:44 -08:00
Carl Lin
c878c9e2cb Revert "1. Persist to blockstore less frequently;"
This reverts commit 7aa1fb4e24.
2022-03-08 17:18:44 -08:00
Carl Lin
0a17edcc1f Revert "fix tests after merge"
This reverts commit ba2d83f580.
2022-03-08 17:18:44 -08:00
Jeff Washington (jwash)
cc4d75a16f fix typo (#23535) 2022-03-08 18:28:00 -06:00
Michael Vines
b719d6a2ad solana-validator set-identity no longer writes a tower file unnecessarily 2022-03-08 15:34:23 -08:00
Mark Percival
8438366d1b fix: update 'borsh' dependency to v0.7.0 (#22425)
Fixes issue with usage of 'global' when used in the browser.

Currently the web3.js distributable is built with a commonJS rollup, but if you
use the npm package with another packager, it will fail when it hits the call
to 'global' inside the browser. Borsh fixed this in v0.7.0
2022-03-08 15:07:33 -08:00
dependabot[bot]
46ec5d563b chore: bump libc from 0.2.117 to 0.2.119 (#23527)
* chore: bump libc from 0.2.117 to 0.2.119

Bumps [libc](https://github.com/rust-lang/libc) from 0.2.117 to 0.2.119.
- [Release notes](https://github.com/rust-lang/libc/releases)
- [Commits](https://github.com/rust-lang/libc/compare/0.2.117...0.2.119)

---
updated-dependencies:
- dependency-name: libc
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
2022-03-08 14:25:38 -07:00
Brooks Prumo
9b80452c7c Fix getting the golden snapshot hashes during bootstrap with Incremental Snapshots (#23518) 2022-03-08 15:16:53 -06:00
Jack May
c2ec294401 featurize loader sat math (#23516) 2022-03-08 11:48:22 -08:00
Michael Vines
536a99705b Update regex to v1.5.5 2022-03-08 10:45:47 -08:00
dependabot[bot]
00558227be chore: bump pbkdf2 from 0.10.0 to 0.10.1 (#23522)
* chore: bump pbkdf2 from 0.10.0 to 0.10.1

Bumps [pbkdf2](https://github.com/RustCrypto/password-hashes) from 0.10.0 to 0.10.1.
- [Release notes](https://github.com/RustCrypto/password-hashes/releases)
- [Commits](https://github.com/RustCrypto/password-hashes/compare/pbkdf2-v0.10.0...pbkdf2-v0.10.1)

---
updated-dependencies:
- dependency-name: pbkdf2
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
2022-03-08 10:50:33 -07:00
Michael Vines
5599bd9442 Remove BankFromArchiveTimings from ledger/ 2022-03-08 08:11:50 -08:00
Michael Vines
9cfa21f7d1 Inline load_from_genesis 2022-03-08 08:11:50 -08:00
dependabot[bot]
12337d8daf chore: bump curve25519-dalek from 3.2.0 to 3.2.1 (#23517)
Bumps [curve25519-dalek](https://github.com/dalek-cryptography/curve25519-dalek) from 3.2.0 to 3.2.1.
- [Release notes](https://github.com/dalek-cryptography/curve25519-dalek/releases)
- [Changelog](https://github.com/dalek-cryptography/curve25519-dalek/blob/main/CHANGELOG.md)
- [Commits](https://github.com/dalek-cryptography/curve25519-dalek/compare/3.2.0...3.2.1)

---
updated-dependencies:
- dependency-name: curve25519-dalek
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-03-08 01:14:37 -07:00
Justin Starry
3114c199bd Add RPC support for versioned transactions (#22530)
* Add RPC support for versioned transactions

* fix doc tests

* Add rpc test for versioned txs

* Switch to preflight bank
2022-03-08 15:20:34 +08:00
Tao Zhu
e790d0fc53 Optional Cli parameter to add additional-fee to solana ping (#23513)
* Optional Cli parameter to add additional-fee to solana ping

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>

* Update cli/src/cluster_query.rs

correct the help message for arg

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>
2022-03-08 03:49:36 +00:00
dependabot[bot]
7933c7fc24 chore: bump anyhow from 1.0.55 to 1.0.56 (#23514)
* chore: bump anyhow from 1.0.55 to 1.0.56

Bumps [anyhow](https://github.com/dtolnay/anyhow) from 1.0.55 to 1.0.56.
- [Release notes](https://github.com/dtolnay/anyhow/releases)
- [Commits](https://github.com/dtolnay/anyhow/compare/1.0.55...1.0.56)

---
updated-dependencies:
- dependency-name: anyhow
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
2022-03-07 16:26:30 -07:00
Brian Anderson
ddbf5c782f Fix broken doc links in solana-runtime (#23504) 2022-03-07 22:21:40 +00:00
dependabot[bot]
38d8bbb19c chore: bump sysctl from 0.4.3 to 0.4.4 (#23505)
Bumps [sysctl](https://github.com/johalun/sysctl-rs) from 0.4.3 to 0.4.4.
- [Release notes](https://github.com/johalun/sysctl-rs/releases)
- [Changelog](https://github.com/johalun/sysctl-rs/blob/master/CHANGELOG.md)
- [Commits](https://github.com/johalun/sysctl-rs/commits)

---
updated-dependencies:
- dependency-name: sysctl
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-03-07 14:24:19 -07:00
HaoranYi
181fffb916 rename status filename to be consistent (#23501) 2022-03-07 17:34:35 +00:00
samkim-crypto
08c9a650db zk-token-sdk: generalize range proof (#23506)
* zk-token-sdk: update range proof in transfers for more flexible setting of params

* zk-token-sdk: clippy
2022-03-07 12:27:56 -05:00
dependabot[bot]
3a0271c113 chore: bump dashmap from 4.0.2 to 5.1.0 (#23372)
* chore: bump dashmap from 4.0.2 to 5.1.0

Bumps [dashmap](https://github.com/xacrimon/dashmap) from 4.0.2 to 5.1.0.
- [Release notes](https://github.com/xacrimon/dashmap/releases)
- [Commits](https://github.com/xacrimon/dashmap/commits/v5.1.0)

---
updated-dependencies:
- dependency-name: dashmap
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
2022-03-07 00:00:26 -05:00
HaoranYi
463cd564cf fix typos (#23495)
* fix typos
2022-03-05 20:46:46 -06:00
Yueh-Hsuan Chiang
b8b7163b66 (Ledger Store) Report RocksDB Column Family Metrics (#22503)
This PR enables blockstore to periodically report RocksDB column family properties.
The reported properties are under blockstore_rocksdb_cfs, and the properties also
support group by operation on cf_name.
2022-03-05 16:13:03 -08:00
Will Roeder
ba771cdc45 feat(explorer): adding verified on-chain collection support (#23490)
* Adding Verified On-chain Collection tag to help consumers check if their NFTs are authentic

* On-chain isn't quite the same as NO-chain

* Grammar fix

* Added Collection Owner verification to guarantee this is actually a Collection Mint
2022-03-04 18:35:35 -08:00
samkim-crypto
d2b23da9ea Zk token sdk clean decryption (#23478)
* zk-token-sdk: add decryption for pod elgamal ciphertexts

* zk-token-sdk: add decryption for pod elgamal ciphertexts

* zk-token-sdk: cargo fmt

* zk-token-sdk: minor update to docs

* zk-token-sdk: minor

* zk-token-sdk: fix bpf build error

* zk-token-sdk: more simplifying discrete log

* zk-token-sdk: fmt

* zk-token-sdk: minor update to doc
2022-03-04 15:57:19 -04:00
Jack May
09b58e1cfb sha-2:0.9.8 yanked, bump to 0.9.9 (#23477) 2022-03-04 08:13:52 -08:00
Alexander Meißner
e23c6ce62b Increases check_number_of_instruction_accounts() in BPF loader. (#23440) 2022-03-04 14:33:27 +01:00
Justin Starry
38db1dead4 Refactor RPC tests (#23483) 2022-03-04 18:20:11 +08:00
Michael Vines
36ad59673c drop mut 2022-03-04 09:52:46 +01:00
Michael Vines
0d33b54d74 Rework do_process_blockstore_from_root to use BankForks 2022-03-04 09:52:46 +01:00
Michael Vines
93c8e04d51 Simplify do_process_blockstore_from_root slightly 2022-03-04 09:52:46 +01:00
Michael Vines
b28acd2d4d Reduce fn visibility 2022-03-04 09:52:46 +01:00
dependabot[bot]
360f6466a3 chore: bump futures from 0.3.19 to 0.3.21 (#23476)
* chore: bump futures from 0.3.19 to 0.3.21

Bumps [futures](https://github.com/rust-lang/futures-rs) from 0.3.19 to 0.3.21.
- [Release notes](https://github.com/rust-lang/futures-rs/releases)
- [Changelog](https://github.com/rust-lang/futures-rs/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/futures-rs/compare/0.3.19...0.3.21)

---
updated-dependencies:
- dependency-name: futures
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
2022-03-03 16:44:43 -07:00
HaoranYi
aad73f1f2e fix recycler free stat report (#23159)
* fix recycler free stat report

* update comments for ewma window size

* one atomic update with aggregated local counter

* fix clippy integer arithmetic error

* move free calcs outside of loop

* fix integer arithmetic error
2022-03-03 17:08:59 -06:00
dependabot[bot]
afda8c4020 chore: bump memmap2 from 0.5.2 to 0.5.3 (#23475)
* chore: bump memmap2 from 0.5.2 to 0.5.3

Bumps [memmap2](https://github.com/RazrFalcon/memmap2-rs) from 0.5.2 to 0.5.3.
- [Release notes](https://github.com/RazrFalcon/memmap2-rs/releases)
- [Changelog](https://github.com/RazrFalcon/memmap2-rs/blob/master/CHANGELOG.md)
- [Commits](https://github.com/RazrFalcon/memmap2-rs/compare/v0.5.2...v0.5.3)

---
updated-dependencies:
- dependency-name: memmap2
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <you@example.com>
2022-03-03 14:45:07 -07:00
Yueh-Hsuan Chiang
62d2a4cd88 Make ShredStorageType::RocksLevel public (#23272)
#### Summary of Changes
This PR adds two hidden arguments to the validator that allow users to use RocksDB's FIFO compaction for storing shreds.

        --shred-storage <SHRED_STORAGE>
            EXPERIMENTAL: Controls how RocksDB compacts shreds.  *WARNING*: You will lose your ledger data
            when you switch between options. Possible values are: 'level': stores shreds using RocksDB's default (level)
            compaction. 'fifo': stores shreds under RocksDB's FIFO compaction. This option is more efficient on
            disk-write-bytes of the ledger store. [default: level]  [possible values: level, fifo]

        --shred-storage-size <SHRED_STORAGE_SIZE_BYTES>
            The shred storage size in bytes. The suggested value is 50% of your ledger storage size in bytes. [default:
            268435456000]
2022-03-03 12:43:58 -08:00
samkim-crypto
8d53ea81e9 zk-token-sdk: change variable names to use suffix rather than prefix (#23474)
* zk-token-sdk: change variable names to use suffix rather than prefix for type

* zk-token-sdk: cargo fmt
2022-03-03 15:07:27 -05:00
dependabot[bot]
f2fa49a771 chore: bump lru from 0.7.2 to 0.7.3 (#23462)
Bumps [lru](https://github.com/jeromefroe/lru-rs) from 0.7.2 to 0.7.3.
- [Release notes](https://github.com/jeromefroe/lru-rs/releases)
- [Changelog](https://github.com/jeromefroe/lru-rs/blob/master/CHANGELOG.md)
- [Commits](https://github.com/jeromefroe/lru-rs/compare/0.7.2...0.7.3)

---
updated-dependencies:
- dependency-name: lru
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-03-03 12:01:49 -07:00
mooori
7b7160448b Rename AccountsDataMeter::consume (#23037) 2022-03-03 12:23:06 -06:00
Jeff Washington (jwash)
7f608965ef new counter data point for unexpected rent_epoch (#23449) 2022-03-03 09:09:31 -06:00
Jeff Washington (jwash)
ddfd4f86f3 exit early from acct bg svc if no shrink candidates (#23459) 2022-03-03 08:47:39 -06:00
Jeff Washington (jwash)
a99fd09c16 allow index update to change storage slot # (#23311) 2022-03-03 08:40:48 -06:00
axleiro
4b59bfe6d8 testing latest changes on explorer (#23470) 2022-03-03 16:48:18 +05:30
axleiro
5ac3466f26 added PAT token
fixing Error: Resource not accessible by integration
2022-03-03 16:24:09 +05:30
axleiro
2e750722c7 resolving promote to prod issue (#23469) 2022-03-03 15:40:49 +05:30
axleiro
a9fd807f61 adding default working dir to fix promote to prod issue 2022-03-03 15:23:41 +05:30
axleiro
011472a8e8 add comment to trigger the file to check the previous merge (#23468)
* fix the issue of "promote to Production" on vercel

* fix the issue "promote to production" on vercel for production deployment

* add comment to trigger the file to check the previous merge for vercel "promote to prod" issue
2022-03-03 15:05:02 +05:30
axleiro
b4480e6b70 fixing the vercel " promote to production" issue (#23466)
* fix the issue of "promote to Production" on vercel

* fix the issue "promote to production" on vercel for production deployment
2022-03-03 13:20:32 +05:30
dependabot[bot]
61d7bdd66f chore: bump serde_json from 1.0.78 to 1.0.79 (#23461)
* chore: bump serde_json from 1.0.78 to 1.0.79

Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.78 to 1.0.79.
- [Release notes](https://github.com/serde-rs/json/releases)
- [Commits](https://github.com/serde-rs/json/compare/v1.0.78...v1.0.79)

---
updated-dependencies:
- dependency-name: serde_json
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <you@example.com>
2022-03-02 23:38:06 -07:00
Josh
43347f3da6 fix(explorer): change title to average ping time (#23463) 2022-03-03 03:28:23 +00:00
Yueh-Hsuan Chiang
634f4eb37d (LedgerStore) Use different path for different blockstore storage type. (#23236)
#### Summary of Changes
To avoid mixing the use of different shred storage types, each shred storage type
will have its blockstore in a different directory.

This PR still keeps the RocksFifo setting hidden.  The default ShredStorageType and
blockstore directory are still RocksLevel and `rocksdb`.

Will follow-up with PRs on making FIFO option public in ledger-tool and validator.

#### Test Plan
* Added a new test to verify the existence of `rocksdb-fifo` directory when FIFO compaction is used.
* Updated existing test to verify the current setting still store ledger under `rocksdb` directory.
* Manually ran ledger_cleanup_test with both level and fifo compaction and verified the resulting ledger.
* Ran a validator with this PR.
2022-03-02 18:30:22 -08:00
dependabot[bot]
39387e8446 chore: bump blake3 from 1.3.0 to 1.3.1 (#23460)
* chore: bump blake3 from 1.3.0 to 1.3.1

Bumps [blake3](https://github.com/BLAKE3-team/BLAKE3) from 1.3.0 to 1.3.1.
- [Release notes](https://github.com/BLAKE3-team/BLAKE3/releases)
- [Commits](https://github.com/BLAKE3-team/BLAKE3/compare/1.3.0...1.3.1)

---
updated-dependencies:
- dependency-name: blake3
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
2022-03-02 18:20:30 -07:00
Brooks Prumo
79a515e88e Add tests for split() from Uninitialized account where dest=source (#23442) 2022-03-02 18:32:15 -06:00
Brooks Prumo
e87b941a51 Test stake split: destination delegation is at least the minimum (#23456) 2022-03-02 17:29:40 -06:00
dependabot[bot]
fe7604589d chore: bump retain_mut from 0.1.5 to 0.1.7 (#23450)
Bumps [retain_mut](https://github.com/upsuper/retain_mut) from 0.1.5 to 0.1.7.
- [Release notes](https://github.com/upsuper/retain_mut/releases)
- [Commits](https://github.com/upsuper/retain_mut/commits)

---
updated-dependencies:
- dependency-name: retain_mut
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-03-02 23:15:33 +00:00
Jack May
e9912744ef use saturating arithmetic (#23435) 2022-03-02 14:50:16 -08:00
Jeff Washington (jwash)
8184f755ae 2 more rent collection tests don't skip epochs (#23446) 2022-03-02 15:04:34 -06:00
Jack May
97d40ba3da Resized accounts must be rent exempt 2022-03-02 13:02:02 -08:00
Kirill Fomichev
82cb61dc36 Add Copy/Eq derive traits to SlotStatus in accountsdb-plugin (#23100)
* Add copy/eq derive traits to SlotStatus in accountsdb-plugin

* move Eq to derive

* remove not required clone
2022-03-02 12:49:11 -08:00
Will Hickey
1a99251498 Bump version to 1.10.1 (#23453) 2022-03-02 13:47:01 -06:00
Krešimir Klas
41ab690a61 feat: add getMultipleAccountsInfoAndContext method to Connection
Similar to `getAccountInfoAndContext`.
2022-03-02 20:39:48 +01:00
Brian Long
7dbde2247d Adds comments related to the public RPC endpoints (#22797)
* Adds comments related to the public RPC endpoints

* Update docs/src/cluster/rpc-endpoints.md

Co-authored-by: Michael Vines <mvines@gmail.com>

Co-authored-by: Brian Long <bl@triton.one>
Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Michael Vines <mvines@gmail.com>
2022-03-02 19:15:35 +00:00
dependabot[bot]
da00d29de0 chore: bump bytemuck from 1.7.3 to 1.8.0 (#23437)
* chore: bump bytemuck from 1.7.3 to 1.8.0

Bumps [bytemuck](https://github.com/Lokathor/bytemuck) from 1.7.3 to 1.8.0.
- [Release notes](https://github.com/Lokathor/bytemuck/releases)
- [Changelog](https://github.com/Lokathor/bytemuck/blob/main/changelog.md)
- [Commits](https://github.com/Lokathor/bytemuck/compare/v1.7.3...v1.8.0)

---
updated-dependencies:
- dependency-name: bytemuck
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
2022-03-02 19:04:54 +00:00
Jeff Washington (jwash)
e88da2ec0a minor test cleanup (#23447)
* minor test cleanup

* fmt
2022-03-02 12:10:05 -06:00
Jeff Washington (jwash)
26aa18b3f3 fmt (#23448) 2022-03-02 11:54:58 -06:00
Jeff Washington (jwash)
e630eb73d7 rework test to avoid multi-epoch gap (#23393) 2022-03-02 11:12:10 -06:00
Jeff Washington (jwash)
ef8b7d9c62 ledger tool halt at slot verify hash (#23424) 2022-03-02 11:11:18 -06:00
Jeff Washington (jwash)
d909b7c80b log hash and lamport result of calculate_accounts_hash_without_index (#23425) 2022-03-02 11:10:40 -06:00
Jordan Sexton
8eefe60c44 chore: web3.js: fix cjs file reference in package.json (#21940) 2022-03-02 10:00:55 -07:00
Josh
d43786edcf fix(explorer): remove purple from ping (#23445) 2022-03-02 16:57:41 +00:00
Michael Vines
9ec514f6c5 Restore solana-test-validator informational output 2022-03-02 17:27:27 +01:00
Josh
3ddd018452 feat(explorer): reenable solana ping widget (#23443) 2022-03-02 16:11:35 +00:00
HaoranYi
41f78b9925 small optimization. use shift for pow of 2. (#22975) 2022-03-02 09:11:12 -06:00
HaoranYi
4f0070a5c6 unittest for bind two consecutive ports (#23008)
* minor fix of comments in fork-selection tests

* fix doc link

* add unittest for bind_two_consecutive_in_range
2022-03-02 09:10:29 -06:00
HaoranYi
8de88d0a55 Refactor packet_threshold adjustment code into its own struct (#23216)
* refactor packet_threshold adjustment code into own struct and add unittest for it

* fix a typo in error message

* code review feedbacks

* another code review feedback

* Update core/src/ancestor_hashes_service.rs

Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>

* share packet threshold with repair service (credit to carl)

Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>
2022-03-02 09:09:06 -06:00
HaoranYi
86e2f728c3 Fix a batch limits bug in banking (#23327)
* add thread index in thread name for debugging

* fix batch_limit

* use NUM_VOTE_THREAD instead of hardcoded number (credit to carllin)
2022-03-02 09:08:08 -06:00
Jeff Washington (jwash)
7b7fdb42d9 minor test refactoring (#23423) 2022-03-02 08:31:14 -06:00
Jeff Washington (jwash)
c8cb940b4e tweak 2 rent tests to narrow epoch ranges (#23420) 2022-03-02 08:22:33 -06:00
sakridge
a4f4ac5279 add plumbing to allow for arbitrary tpu address in gossip (#22703)
* add plumbing to allow for arbitrary tpu address in gossip

* make clippy happy

* Review comments

Co-authored-by: CherryWorm <nico.gruendel@web.de>
2022-03-02 09:42:14 +01:00
Tyera Eulberg
d3ebe8d8f5 Remove unneeded jsonrpc dependencies/features; update do-audit (#23436)
* Update generic-array note

* Remove unneeded jsonrpc deps

* Remove unneeded jsonrpc features

* Rewrite slot-update test without websocket crate

* Rewrite rpc-subscription test without websocket crate, and remove jsonrpc deps

* Update expected balance to accommodate rent-exempt minimum transfer amount

* Remove obsolete audit ignores
2022-03-02 01:42:01 -07:00
dependabot[bot]
7d1a090cfb chore: bump semver from 1.0.5 to 1.0.6 (#23429)
* chore: bump semver from 1.0.5 to 1.0.6

Bumps [semver](https://github.com/dtolnay/semver) from 1.0.5 to 1.0.6.
- [Release notes](https://github.com/dtolnay/semver/releases)
- [Commits](https://github.com/dtolnay/semver/compare/1.0.5...1.0.6)

---
updated-dependencies:
- dependency-name: semver
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
2022-03-01 22:16:44 -07:00
Jeff Biseda
c69e3b73ff bench get_retransmit_peers (#23292) 2022-03-01 19:10:29 -08:00
Tyera Eulberg
2a17a661e6 Remove failure audit ignore (#23431) 2022-03-01 17:02:55 -07:00
dependabot[bot]
a0d68ef60e chore: bump futures-util from 0.3.19 to 0.3.21 (#23418)
* chore: bump futures-util from 0.3.19 to 0.3.21

Bumps [futures-util](https://github.com/rust-lang/futures-rs) from 0.3.19 to 0.3.21.
- [Release notes](https://github.com/rust-lang/futures-rs/releases)
- [Changelog](https://github.com/rust-lang/futures-rs/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/futures-rs/compare/0.3.19...0.3.21)

---
updated-dependencies:
- dependency-name: futures-util
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
2022-03-01 16:44:30 -07:00
dependabot[bot]
7d1810bbcc chore: bump hidapi from 1.3.2 to 1.3.3 (#23416)
Bumps [hidapi](https://github.com/ruabmbua/hidapi-rs) from 1.3.2 to 1.3.3.
- [Release notes](https://github.com/ruabmbua/hidapi-rs/releases)
- [Commits](https://github.com/ruabmbua/hidapi-rs/commits/v1.3.3)

---
updated-dependencies:
- dependency-name: hidapi
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-03-01 16:06:56 -07:00
Alexander Meißner
6c56eb9663 Replaces KeyedAccount by BorrowedAccount in the BPF loader. (#23056) 2022-03-01 22:55:26 +01:00
Josh
d0ba914d2b fix(explorer): rollback ping until api is more stable (#23419) 2022-03-01 19:57:23 +00:00
Josh
7943e8a1c3 fix(explorer): retry should trigger loading state (#23417)
* fix(explorer): retry should trigger loading state

* fix(explorer): Solana ping add refreshing state
2022-03-01 11:41:11 -08:00
dependabot[bot]
3e48cc4e00 chore: bump dialoguer from 0.9.0 to 0.10.0 (#23413)
* chore: bump dialoguer from 0.9.0 to 0.10.0

Bumps [dialoguer](https://github.com/mitsuhiko/dialoguer) from 0.9.0 to 0.10.0.
- [Release notes](https://github.com/mitsuhiko/dialoguer/releases)
- [Changelog](https://github.com/mitsuhiko/dialoguer/blob/master/CHANGELOG.md)
- [Commits](https://github.com/mitsuhiko/dialoguer/compare/v0.9.0...v0.10.0)

---
updated-dependencies:
- dependency-name: dialoguer
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
2022-03-01 19:31:03 +00:00
dependabot[bot]
ce4d579499 chore: bump serial_test from 0.5.1 to 0.6.0 (#23414)
Bumps [serial_test](https://github.com/palfrey/serial_test) from 0.5.1 to 0.6.0.
- [Release notes](https://github.com/palfrey/serial_test/releases)
- [Commits](https://github.com/palfrey/serial_test/compare/v0.5.1...v0.6.0)

---
updated-dependencies:
- dependency-name: serial_test
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-03-01 12:13:40 -07:00
Josh
f6a06826d8 feat(explorer): add Solana Ping to cluster stats page (#23239)
* feat: add Solana Ping to explorer

* fix: remove br tags in label
2022-03-01 10:50:48 -08:00
dependabot[bot]
93c5642f9f chore: bump rcgen from 0.9.1 to 0.9.2 (#23406)
Bumps [rcgen](https://github.com/est31/rcgen) from 0.9.1 to 0.9.2.
- [Release notes](https://github.com/est31/rcgen/releases)
- [Changelog](https://github.com/est31/rcgen/blob/master/CHANGELOG.md)
- [Commits](https://github.com/est31/rcgen/compare/v0.9.1...v0.9.2)

---
updated-dependencies:
- dependency-name: rcgen
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-03-01 10:09:14 -07:00
behzad nouri
1282277126 bumps up crds-shards-bits (#23220)
The commit adjust CRDS_SHARDS_BITS up to be in-line with mask_bits in
gossip pull request. This will avoid redundant filtering of irrelevant
crds entries when responding to pull requests.
2022-03-01 15:14:11 +00:00
Jeff Washington (jwash)
454e82683e refactor rent test (#23392) 2022-03-01 08:47:09 -06:00
Jeff Washington (jwash)
6b2683f7da refactor test (#23384) 2022-03-01 08:22:10 -06:00
dependabot[bot]
ec798f5aad chore: bump anyhow from 1.0.53 to 1.0.55 (#23402)
* chore: bump anyhow from 1.0.53 to 1.0.55

Bumps [anyhow](https://github.com/dtolnay/anyhow) from 1.0.53 to 1.0.55.
- [Release notes](https://github.com/dtolnay/anyhow/releases)
- [Commits](https://github.com/dtolnay/anyhow/compare/1.0.53...1.0.55)

---
updated-dependencies:
- dependency-name: anyhow
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
2022-03-01 00:16:26 -07:00
Tyera Eulberg
3b5b71ce44 Improve UX querying rpc for blocks at or before the snapshot from boot (#23403)
* Bump first-available block to first complete block

* Remove obsolete purges in tests (PrimaryIndex toggling no longer in use

* Check first-available block in Rpc check_slot_cleaned_up
2022-02-28 23:57:41 -07:00
Tyera Eulberg
19448ba078 Allow sub-rent-exempt-minimum transfers to 1nc1nerator (#23382)
* Add failing test

* Allow small burns to incinerator

* Use check_id method
2022-02-28 23:56:34 -07:00
dependabot[bot]
e3fa55f88d chore: bump hmac from 0.12.0 to 0.12.1 (#23396)
* chore: bump hmac from 0.12.0 to 0.12.1

Bumps [hmac](https://github.com/RustCrypto/MACs) from 0.12.0 to 0.12.1.
- [Release notes](https://github.com/RustCrypto/MACs/releases)
- [Commits](https://github.com/RustCrypto/MACs/compare/hmac-v0.12.0...hmac-v0.12.1)

---
updated-dependencies:
- dependency-name: hmac
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
2022-03-01 02:07:33 +00:00
Trent Nelson
5877e38baa docs: resolve svgbob binary 2022-02-28 17:33:30 -07:00
Tyera Eulberg
0de7b757d0 Update durable-nonce docs (#23397)
* Use parsable blockhash string

* Remove deprecated cli command from docs
2022-02-28 17:03:26 -07:00
dependabot[bot]
6dfd1b9883 chore: bump quinn from 0.8.0 to 0.8.1 (#23380)
Bumps [quinn](https://github.com/quinn-rs/quinn) from 0.8.0 to 0.8.1.
- [Release notes](https://github.com/quinn-rs/quinn/releases)
- [Commits](https://github.com/quinn-rs/quinn/compare/0.8.0...0.8.1)

---
updated-dependencies:
- dependency-name: quinn
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-02-28 15:50:58 -07:00
Trent Nelson
6666f23c01 client: expose creating RpcClients with custom RpcSender impls 2022-02-28 22:04:15 +00:00
Jeff Washington (jwash)
f0a235d16f add comment (#23378) 2022-02-28 15:18:42 -06:00
Jeff Washington (jwash)
4eeb9f4648 acct idx comments (#23377) 2022-02-28 15:00:53 -06:00
Trent Nelson
f814c4a082 ci: move all formerly-default-queue jobs to solana queue 2022-02-28 13:50:34 -07:00
dependabot[bot]
911c5a8362 chore: bump cipher from 0.3.0 to 0.4.3 (#23362)
* chore: bump cipher from 0.3.0 to 0.4.3

Bumps [cipher](https://github.com/RustCrypto/traits) from 0.3.0 to 0.4.3.
- [Release notes](https://github.com/RustCrypto/traits/releases)
- [Commits](https://github.com/RustCrypto/traits/compare/cipher-v0.3.0...cipher-v0.4.3)

---
updated-dependencies:
- dependency-name: cipher
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* Update Cargo.lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2022-02-28 11:06:18 -07:00
tigarcia
22d2a40133 Updating known validators in the docs for testnet 2022-02-28 09:57:57 -07:00
Jeff Washington (jwash)
611d745241 refactor rent_due for normal case of exempt (#23350) 2022-02-28 09:42:42 -06:00
Alexander Meißner
ee3fc39f1c Refactor: Prepare stake_instruction.rs to remove KeyedAccounts (#23375)
* Adds instruction_account_indices to stake instruction.

* Uses instruction_account_indices in stake instruction.

* Replaces get_sysvar_with_account_check by get_sysvar_with_account_check2 in stake instruction.

* Adds check_number_of_instruction_accounts().
2022-02-28 16:24:40 +01:00
dependabot[bot]
fe18ea35a2 chore: bump cargo_metadata from 0.14.1 to 0.14.2 (#23365)
Bumps [cargo_metadata](https://github.com/oli-obk/cargo_metadata) from 0.14.1 to 0.14.2.
- [Release notes](https://github.com/oli-obk/cargo_metadata/releases)
- [Commits](https://github.com/oli-obk/cargo_metadata/compare/0.14.1...0.14.2)

---
updated-dependencies:
- dependency-name: cargo_metadata
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-02-28 05:46:47 +00:00
Jeff Washington (jwash)
30dafc7135 list -> slot_list (#23355) 2022-02-27 23:08:58 -06:00
dependabot[bot]
186bb19965 chore:(deps): bump url-parse from 1.5.7 to 1.5.10 in /explorer (#23371)
Bumps [url-parse](https://github.com/unshiftio/url-parse) from 1.5.7 to 1.5.10.
- [Release notes](https://github.com/unshiftio/url-parse/releases)
- [Commits](https://github.com/unshiftio/url-parse/compare/1.5.7...1.5.10)

---
updated-dependencies:
- dependency-name: url-parse
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-02-28 04:33:23 +00:00
Michael Vines
87b76aeeb0 Align the solana validators output columns for -ud,-ut, and -um 2022-02-27 16:54:13 -08:00
SAITO Kosuke | Coinfra NFT Creator
a57c7ba5df Change comments (#23366)
* change url

* change comment

Co-authored-by: cosuke2000 <saitou@matchingood.co.jp>
2022-02-27 10:36:25 -07:00
dependabot[bot]
2efb909051 chore: bump fd-lock from 3.0.3 to 3.0.4 (#23347)
Bumps [fd-lock](https://github.com/yoshuawuyts/fd-lock) from 3.0.3 to 3.0.4.
- [Release notes](https://github.com/yoshuawuyts/fd-lock/releases)
- [Commits](https://github.com/yoshuawuyts/fd-lock/commits/v3.0.4)

---
updated-dependencies:
- dependency-name: fd-lock
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-02-27 08:16:43 +00:00
dependabot[bot]
418076ab3e chore: bump ed25519-dalek-bip32 from 0.1.1 to 0.2.0 (#23346)
* chore: bump ed25519-dalek-bip32 from 0.1.1 to 0.2.0

Bumps [ed25519-dalek-bip32](https://github.com/jpopesculian/ed25519-dalek-bip32) from 0.1.1 to 0.2.0.
- [Release notes](https://github.com/jpopesculian/ed25519-dalek-bip32/releases)
- [Commits](https://github.com/jpopesculian/ed25519-dalek-bip32/commits/v0.2.0)

---
updated-dependencies:
- dependency-name: ed25519-dalek-bip32
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

* Bump derivation-path

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2022-02-27 00:15:04 +00:00
Tyera Eulberg
36484f4f08 Prevent new RentPaying state created by paying fees (#23358)
* Add failing test

* Check fee-payer rent-state change on load

* Add more test cases

* Review comments
2022-02-26 12:10:01 -07:00
Alexander Meißner
569d531573 Code cleanup: In vote and stake processor (#23353)
* Enable benchmarks of vote processor.

* Inlines from_keyed_account() in stake instruction.
2022-02-25 21:05:05 +01:00
Jeff Washington (jwash)
0ad4757159 plumbing for 'other_slot' in 'update_index' (#23330) 2022-02-25 12:58:08 -06:00
Alexander Meißner
e2fa6a0f7a Replaces KeyedAccount by BorrowedAccount in vote processor (#23348)
* Use instruction_account_indices, get_sysvar_with_account_check2 and instruction_context.get_signers in vote processor.

* Replaces KeyedAccount by BorrowedAccount in vote processor.

* Removes KeyedAccount from benches in vote processor.
2022-02-25 17:22:54 +01:00
Brooks Prumo
533eca3b4c Simplify replay_blockstore_into_bank() (#23282) 2022-02-25 06:57:04 -06:00
axleiro
ff04a5b989 changing permission on "buildkite-pipeline-in-disk" 2022-02-25 15:35:47 +05:30
axleiro
cf18292fd3 adding a new script with in_disk_env var 2022-02-25 15:28:04 +05:30
axleiro
92216c01ff Create buildkite-pipeline-in-disk.sh 2022-02-25 15:26:45 +05:30
axleiro
0e5a58b883 adding a copy of "pipeline-upload.sh"
for the new pipeline
2022-02-25 15:25:37 +05:30
Trent Nelson
d4292774c5 checks 2022-02-25 08:05:28 +00:00
Trent Nelson
97b5a71ceb bump rust to 1.59.0 2022-02-25 08:05:28 +00:00
Alexander Meißner
804fac8ea9 Removes KeyedAccount from tests in vote processor. (#23333) 2022-02-25 08:21:28 +01:00
dependabot[bot]
985af71241 chore: bump sha2 from 0.10.1 to 0.10.2 (#23343)
* chore: bump sha2 from 0.10.1 to 0.10.2

Bumps [sha2](https://github.com/RustCrypto/hashes) from 0.10.1 to 0.10.2.
- [Release notes](https://github.com/RustCrypto/hashes/releases)
- [Commits](https://github.com/RustCrypto/hashes/compare/sha2-v0.10.1...sha2-v0.10.2)

---
updated-dependencies:
- dependency-name: sha2
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
2022-02-24 23:57:42 -07:00
dependabot[bot]
98f059e89c chore: bump reqwest from 0.11.6 to 0.11.9 (#23337)
* chore: bump reqwest from 0.11.6 to 0.11.9

Bumps [reqwest](https://github.com/seanmonstar/reqwest) from 0.11.6 to 0.11.9.
- [Release notes](https://github.com/seanmonstar/reqwest/releases)
- [Changelog](https://github.com/seanmonstar/reqwest/blob/master/CHANGELOG.md)
- [Commits](https://github.com/seanmonstar/reqwest/compare/v0.11.6...v0.11.9)

---
updated-dependencies:
- dependency-name: reqwest
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
2022-02-24 22:40:01 -07:00
Michael Vines
274a6a5ccb Narrow scope of client_targets build 2022-02-24 21:08:43 -08:00
Tyera Eulberg
2d55a8e1c3 Refresh blockhash for fee calculations (#23338) 2022-02-24 21:21:38 -07:00
dependabot[bot]
3f35d7fad9 chore: bump rustls from 0.20.2 to 0.20.4 (#23324)
* chore: bump rustls from 0.20.2 to 0.20.4

Bumps [rustls](https://github.com/rustls/rustls) from 0.20.2 to 0.20.4.
- [Release notes](https://github.com/rustls/rustls/releases)
- [Changelog](https://github.com/rustls/rustls/blob/main/RELEASE_NOTES.md)
- [Commits](https://github.com/rustls/rustls/compare/v/0.20.2...v/0.20.4)

---
updated-dependencies:
- dependency-name: rustls
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
2022-02-24 19:57:35 -07:00
Michael Vines
7111918596 solana gossip now includes feature set information 2022-02-24 18:04:38 -08:00
Trent Nelson
5e0086c1ee followup safety checks for #23295 2022-02-24 17:50:58 -07:00
alnoki
d1f141484e Update derivation path integer sign specification (#23336)
Previously, `ACCOUNT` and `CHANGE` were specified as being positive integers, but since both can assume a value of 0 (as in the given example), they should be specified as nonnegative integers
2022-02-24 16:49:58 -07:00
dependabot[bot]
db12d90735 chore: bump ouroboros from 0.13.0 to 0.14.2 (#23328)
* chore: bump ouroboros from 0.13.0 to 0.14.2

Bumps [ouroboros](https://github.com/joshua-maros/ouroboros) from 0.13.0 to 0.14.2.
- [Release notes](https://github.com/joshua-maros/ouroboros/releases)
- [Commits](https://github.com/joshua-maros/ouroboros/commits/0.14.2)

---
updated-dependencies:
- dependency-name: ouroboros
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
2022-02-24 15:56:13 -07:00
Jeff Washington (jwash)
2207e49633 all TestValdiators act like validators (#23318) 2022-02-24 12:12:47 -06:00
Jeff Washington (jwash)
39c86c6c44 fmt (#23329) 2022-02-24 12:12:29 -06:00
dependabot[bot]
6c53f7f588 chore: bump tokio from 1.16.1 to 1.17.0 (#23322)
* chore: bump tokio from 1.16.1 to 1.17.0

Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.16.1 to 1.17.0.
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.16.1...tokio-1.17.0)

---
updated-dependencies:
- dependency-name: tokio
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* [auto-commit] Update all Cargo lock files

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot-buildkite <dependabot-buildkite@noreply.solana.com>
2022-02-24 10:54:50 -07:00
Jeff Washington (jwash)
2b0f16e7c3 slot per account in StorableAccounts (#23313) 2022-02-24 10:34:38 -06:00
Jeff Washington (jwash)
017170c99d DiskIdx: new items are not dirty by default (#23123) 2022-02-24 10:17:35 -06:00
Alexander Meißner
a14c7c37ee Replaces KeyedAccount by BorrowedAccount in the config processor. (#23302) 2022-02-24 09:44:25 +01:00
dependabot[bot]
9d2232306e chore: bump tungstenite from 0.16.0 to 0.17.2 (#23268)
* chore: bump tungstenite from 0.16.0 to 0.17.2

Bumps [tungstenite](https://github.com/snapview/tungstenite-rs) from 0.16.0 to 0.17.2.
- [Release notes](https://github.com/snapview/tungstenite-rs/releases)
- [Changelog](https://github.com/snapview/tungstenite-rs/blob/master/CHANGELOG.md)
- [Commits](https://github.com/snapview/tungstenite-rs/compare/v0.16.0...v0.17.2)

---
updated-dependencies:
- dependency-name: tungstenite
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* Bump tokio-tungstenite in tandem

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2022-02-24 01:39:55 -07:00
steviez
12bddbc4ec Use rocksdb property constant instead of local constant (#23147) 2022-02-23 23:02:11 -06:00
Brooks Prumo
fcaf01e243 Add test to check destination for stake splitting (#23303) 2022-02-23 22:47:57 -06:00
dependabot[bot]
856d29c7b7 chore: bump parking_lot from 0.11.2 to 0.12.0 (#23087)
* chore: bump parking_lot from 0.11.2 to 0.12.0

Bumps [parking_lot](https://github.com/Amanieu/parking_lot) from 0.11.2 to 0.12.0.
- [Release notes](https://github.com/Amanieu/parking_lot/releases)
- [Changelog](https://github.com/Amanieu/parking_lot/blob/master/CHANGELOG.md)
- [Commits](https://github.com/Amanieu/parking_lot/compare/0.11.2...0.12.0)

---
updated-dependencies:
- dependency-name: parking_lot
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* Sync parking_lot

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2022-02-23 20:07:07 -07:00
Trent Nelson
6a0d2fcfa7 docs: post merge review for #23286 2022-02-24 01:53:54 +00:00
Jeff Washington (jwash)
4bc440666a add arg --disable_accounts_disk_index (#23308) 2022-02-23 18:07:24 -06:00
Jeff Washington (jwash)
c0d0724be0 clippy (#23310) 2022-02-23 18:07:00 -06:00
Jeff Washington (jwash)
7ee549e5ae fix params for store_accounts_frozen (#23312) 2022-02-23 17:40:51 -06:00
Jeff Washington (jwash)
227df52213 update comment (#23314) 2022-02-23 17:33:06 -06:00
Jeff Washington (jwash)
99a057927c started_from_validator, ignore env var for cli (#23309) 2022-02-23 17:15:43 -06:00
Jeff Washington (jwash)
cafc18c3f9 update_index uses ReadableAccount to reduce params (#23305) 2022-02-23 17:01:23 -06:00
Masaya Funakoshi
0dd36f3201 Grammar corrections PR#23206 Review Fixes (#23291)
* Accounts page grammar edits

* calling-between-programs page grammar edits

* PR #23206 "Grammar Corrections" review fixes

* Update docs/src/developing/clients/jsonrpc-api.md

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

* Update docs/src/developing/programming-model/accounts.md

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2022-02-23 15:34:13 -07:00
Michael Vines
3ea9ca35fa grant public access to ProofError enum 2022-02-23 14:22:42 -08:00
Jeff Washington (jwash)
a245efe83d trait for acct data with slot per item (#23285) 2022-02-23 15:22:29 -06:00
Trent Nelson
c81dd602c4 Hack fix for ICE as seen in CI 2022-02-23 11:17:39 -07:00
Lijun Wang
084fb79ad8 Unhide AccountsDb plugin parameters (#23284)
* Unhide AccountsDb plugin parameters

* addressed feedback from Trent
2022-02-23 10:01:14 -08:00
Bruno
2e3eafaa11 RPC Client: Use Tokio's RwLock instead of std to make using in async easier (#23299) 2022-02-23 09:33:32 -08:00
Jeff Washington (jwash)
2996f1f783 add comments (#23275) 2022-02-23 10:53:11 -06:00
kirill lykov
05f04a22b7 Fix small problems in transaction-dos tool (#23234)
* Fix small problems in transaction-dos tool
2022-02-23 17:17:06 +01:00
Justin Starry
d0e85c293f Fix rustfmt check (#23296) 2022-02-23 16:38:53 +08:00
Michael Vines
6872fc79ba Derive Clone for AeCiphertext (#23293) 2022-02-22 22:47:26 -08:00
Dmitri Makarov
0a3a18744f Update the consumed compute units cost for hashing syscalls
This change prevents zero-cost computation of hash functions on
unbound number of zero-length slices of data.  The cost for each slice
is at least equal to the base cost of a memory operation, but could be
more for longer slices.
2022-02-23 02:32:41 +00:00
Trent Nelson
09d064c090 docs: clarify spl token account creation handling for exchange integrations 2022-02-23 00:22:44 +00:00
Masaya Funakoshi
ff604efc44 Grammar corrections (#23206)
* Accounts page grammar edits

* Runtime page grammar edits

* calling-between-programs page grammar edits

* transactions page grammar edits

* small changes with e.g. following Chicago Manual
2022-02-22 15:23:47 -08:00
Gavin Chan
20d031e2b8 Refactor ExecuteTimings w/ enum-indexed array (#23085) 2022-02-22 14:46:56 -08:00
Trent Nelson
5766567e9f ci: revert cache version hack 2022-02-22 19:01:29 +00:00
Brooks Prumo
88e1192c6b Refactor new() and default() for CostTracker (#23267) 2022-02-22 10:44:23 -06:00
Anton Lazarev
cadc2de77d add github workflow to check solana-client compilation on android/ios targets 2022-02-22 09:05:51 -07:00
Jeff Washington (jwash)
8b32e80ee2 DiskIdx: rename 'remove' to 'evict' (#23188) 2022-02-22 09:40:25 -06:00
Jeff Washington (jwash)
7ebf398ed7 AcctIdx: env var "SOLANA_TEST_ACCOUNTS_INDEX_MEMORY_LIMIT_MB" (#23194)
* AcctIdx: env var "SOLANA_TEST_ACCOUNTS_INDEX_MEMORY_LIMIT_MB"

* ignore env var when starting as validator

* Update runtime/src/bucket_map_holder.rs

Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>

Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>
2022-02-22 09:40:12 -06:00
Brooks Prumo
ac70070e5b Add MINIMUM_STAKE_DELEGATION constant (#23259)
Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2022-02-22 09:21:23 -06:00
microwavedcola1
f00016b647 add support for time in force order type (#23255)
Signed-off-by: microwavedcola1 <microwavedcola@gmail.com>
2022-02-22 21:12:02 +08:00
Justin Starry
bcda74f42f Fix builtin handling on epoch boundaries (#23256) 2022-02-22 20:54:08 +08:00
Justin Starry
c97f34a0fd Add script for running nightly rustfmt on all workspaces (#23244)
* Add script for running nightly rustfmt on all workspaces

* invalidate ci cache
2022-02-22 11:59:06 +08:00
Michael Vines
72c68695b5 Do not keep oldest full snapshot 2022-02-21 17:10:31 -07:00
Alexander Meißner
d0d256ee9a Bump rbpf to v0.2.24 (#23263) 2022-02-21 23:38:12 +01:00
Tyera Eulberg
7e08ae1d0c Revert "Add simulation detection countermeasure (#22880)" (#23261)
This reverts commit c42b80f099.
2022-02-21 21:15:37 +00:00
Pierre
ebe3d2d59d fix: simulateTransaction accounts items can be null (#23229)
* fix: simulated accounts can be null

* Use Missing rather than token program id

Co-authored-by: Arrowana <8245419+Arrowana@users.noreply.github.com>
2022-02-21 14:20:11 +08:00
HaoranYi
04d23a1597 fix memory ordering in append_vec (#23215) 2022-02-20 20:30:49 -06:00
dependabot[bot]
42bdf1d864 chore:(deps): bump url-parse from 1.5.1 to 1.5.7 in /explorer (#23245)
Bumps [url-parse](https://github.com/unshiftio/url-parse) from 1.5.1 to 1.5.7.
- [Release notes](https://github.com/unshiftio/url-parse/releases)
- [Commits](https://github.com/unshiftio/url-parse/compare/1.5.1...1.5.7)

---
updated-dependencies:
- dependency-name: url-parse
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-02-19 11:34:37 +00:00
Yueh-Hsuan Chiang
8c872e9ce0 (LedgerStore/FIFO) Refactor FIFO options and sanity check. (#23131) 2022-02-19 00:58:38 -08:00
buffalu
70ebab2c82 Add rustfmt.toml and cargo fmt (#23238)
* fmt

* formatted

Co-authored-by: Lucas B <buffalu@jito.network>
2022-02-19 13:32:29 +08:00
Yueh-Hsuan Chiang
1add82aa9e (Ledger Store Benchmark) Add flags for checking shred storage size. (#22451) 2022-02-18 19:35:28 -08:00
Trent Nelson
e2d0ede630 cli: accept 'system' program id mnemonic for with seed 2022-02-18 19:51:18 -07:00
Trent Nelson
dcd0a39cb6 cli: coerce with seed program id mnemonics to uppercase 2022-02-18 19:51:18 -07:00
Justin Starry
1719d2349f Skip adding builtins if they will be removed (#23233)
* Add failing test for precompile transition

* Skip adding builtins if they will be removed

* cargo clean

* nits

* fix abi check

* remove workaround

Co-authored-by: Jack May <jack@solana.com>
2022-02-18 18:36:59 -08:00
Alexander Meißner
ee7e411d68 Replaces KeyedAccount by BorrowedAccount in system_instruction_processor. (#23217)
* Adds InstructionContext::check_number_of_instruction_accounts() and InstructionContext::get_instruction_account_key().
Bases check_sysvar_account() on instuction account indices.

* Adds instruction_account_indices enums to system_instruction_processor.

* Reorders parameters and adds InstructionContext.

* Replaces KeyedAccount by BorrowedAccount in system_instruction_processor (part 1).

* Replaces KeyedAccount by BorrowedAccount in system_instruction_processor (part 2).

* Replaces KeyedAccount by BorrowedAccount in system_instruction_processor (part 3).

* Replaces KeyedAccount by BorrowedAccount in system_instruction_processor (part 4).

* Replaces KeyedAccount by BorrowedAccount in system_instruction_processor (part 5).

* Code cleanup
2022-02-19 02:06:33 +01:00
Jack May
970f543ef6 Precompiles owned by the native loader (#23237) 2022-02-18 15:41:59 -08:00
Justin Starry
1351c1bbcf Explorer: Add ed25519 sigverify precompile to search results (#23228) 2022-02-18 07:18:01 +00:00
Will Hickey
c696944d36 Add --locked to spl-token-cli install (#23223) 2022-02-17 22:09:29 -06:00
mooori
5726f42a7c feat(stake-program): support splitWithSeed (#23213) 2022-02-17 12:21:07 -07:00
Dmitri Makarov
ae7fedf0b1 Extend buildkite cargo cache name to include host side rust version 2022-02-17 09:53:51 -08:00
samkim-crypto
b4100a9b5d Add additional zkp for fee (#23112)
* zk-token-sdk: add equality proof for fee

* zk-token-sdk: tweak some naming conventions for readability

* zk-token-sdk: add verify withdraw withheld instruction

* zk-token-sdk: add test for withdraw withheld verification

* zk-token-sdk: more renaming of variables for readability

* zk-token-sdk: cargo fmt

* zk-token-sdk: minor

* zk-token-sdk: resolve bpf compilation warnings

* zk-token-sdk: minor update to doc
2022-02-17 12:45:07 -05:00
Alexander Meißner
1a68f81f89 Replaces KeyedAccount by BorrowedAccount in nonce_keyed_account. (#23214)
* Adds get_sysvar_with_account_check2 for ABIv2.

* Replaces get_signers() and get_sysvar_with_account_check() in system_instruction_processor.

* Replaces KeyedAccount by BorrowedAccount in nonce_keyed_account.
2022-02-17 17:36:55 +01:00
Alexander Meißner
da00b39f4f Cleanup: get_program_key() and get_loader_key() in TransactionContext (#23191)
* Moves TransactionContext::get_program_key() to InstructionContext::get_program_key().

* Removes TransactionContext::get_loader_key().

* Test full program and loader executable account chain in BPF loader.
2022-02-17 10:16:28 +01:00
carllin
619335df1a Add execute timings (#23097) 2022-02-17 01:14:32 -05:00
Trent Nelson
fa680a35ea docs: remove wallet ads 2022-02-16 22:19:10 -07:00
sethgirvan
b66a304e7b docs: fix typos (#22320) 2022-02-16 22:57:22 -06:00
Tao Zhu
3c235503de remove block limit for bench tests to avoid skew test result (#23204) 2022-02-17 03:44:34 +00:00
556 changed files with 30434 additions and 20139 deletions

View File

@@ -0,0 +1,19 @@
#!/usr/bin/env bash
#
# This script is used to upload the full buildkite pipeline. The steps defined
# in the buildkite UI should simply be:
#
# steps:
# - command: ".buildkite/pipeline-upload.sh"
#
set -e
cd "$(dirname "$0")"/..
source ci/_
sudo chmod 0777 ci/buildkite-pipeline-in-disk.sh
_ ci/buildkite-pipeline-in-disk.sh pipeline.yml
echo +++ pipeline
cat pipeline.yml
_ buildkite-agent pipeline upload pipeline.yml

View File

@@ -13,7 +13,13 @@ export PS4="++"
#
eval "$(ci/channel-info.sh)"
eval "$(ci/sbf-tools-info.sh)"
export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"-"$SBF_TOOLS_VERSION"
source "ci/rust-version.sh"
HOST_RUST_VERSION="$rust_stable"
pattern='^[0-9]+\.[0-9]+\.[0-9]+$'
if [[ ${HOST_RUST_VERSION} =~ ${pattern} ]]; then
HOST_RUST_VERSION="${rust_stable%.*}"
fi
export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"-"$SBF_TOOLS_VERSION"-"$HOST_RUST_VERSION"
(
set -x
MAX_CACHE_SIZE=18 # gigabytes

View File

@@ -1,5 +1,9 @@
#### Problem
#### Summary of Changes
Fixes #

66
.github/workflows/client-targets.yml vendored Normal file
View File

@@ -0,0 +1,66 @@
name: client_targets
on:
pull_request:
branches:
- master
paths:
- "client/**"
- "sdk/**"
- ".github/workflows/client-targets.yml"
env:
CARGO_TERM_COLOR: always
jobs:
check_compilation:
name: Client compilation
runs-on: ${{ matrix.os }}
strategy:
matrix:
target: [aarch64-apple-ios, x86_64-apple-ios, aarch64-apple-darwin, x86_64-apple-darwin, aarch64-linux-android, armv7-linux-androideabi, i686-linux-android, x86_64-linux-android]
include:
- target: aarch64-apple-ios
platform: ios
os: macos-latest
- target: x86_64-apple-ios
platform: ios
os: macos-latest
- target: aarch64-apple-darwin
platform: ios
os: macos-latest
- target: x86_64-apple-darwin
platform: ios
os: macos-latest
- target: aarch64-linux-android
platform: android
os: ubuntu-latest
- target: armv7-linux-androideabi
platform: android
os: ubuntu-latest
- target: i686-linux-android
platform: android
os: ubuntu-latest
- target: x86_64-linux-android
platform: android
os: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
toolchain: stable
target: ${{ matrix.target }}
- name: Install cargo-ndk
if: ${{ matrix.platform == 'android' }}
run: cargo install cargo-ndk
- uses: actions-rs/cargo@v1
if: ${{ matrix.platform == 'android' }}
with:
command: ndk
args: --target ${{ matrix.target }} build -p solana-client
- uses: actions-rs/cargo@v1
if: ${{ matrix.platform == 'ios' }}
with:
command: build
args: -p solana-client --target ${{ matrix.target }}

View File

@@ -17,8 +17,7 @@ jobs:
vercel-token: ${{ secrets.VERCEL_TOKEN }} # Required
github-token: ${{ secrets.GITHUB_TOKEN }} #Optional
vercel-org-id: ${{ secrets.ORG_ID}} #Required
vercel-project-id: ${{ secrets.PROJECT_ID}} #Required
working-directory: ./explorer
vercel-project-id: ${{ secrets.PROJECT_ID}} #Required
scope: ${{ secrets.TEAM_ID }}
- name: vercel url
@@ -36,17 +35,24 @@ jobs:
#filtered_url=$(cat vercelfile2.txt )
#echo "$filtered_url" >> .env.preview1
- name: Run tests
- name: Fetching Vercel Preview Deployment Link
uses: mathiasvr/command-output@v1
id: tests2
id: test1
with:
run: |
echo "$(cat .env.preview1)"
- name: Fetching PR commit URL
uses: mathiasvr/command-output@v1
id: test2
with:
run: |
HEAD_SHA=$(curl -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/solana-labs/solana/pulls | jq .[0] | jq -r .head.sha)
USER_NAME=$(curl -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/solana-labs/solana/pulls | jq .[0] | jq -r .head.user.login)
echo "github.com/$USER_NAME/solana/commit/$HEAD_SHA"
- name: Slack Notification1
- name: Slack Notification4
uses: rtCamp/action-slack-notify@master
env:
SLACK_MESSAGE: ${{ steps.tests2.outputs.stdout }}
SLACK_TITLE: Vercel "Explorer" Preview Deployment Link
SLACK_MESSAGE: ' Vercel Link: ${{ steps.test1.outputs.stdout }} PR Commit: ${{steps.test2.outputs.stdout}}'
SLACK_TITLE: Vercel "Explorer" Preview Deployment Link , PR Commit
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -30,7 +30,8 @@ jobs:
runs-on: ubuntu-latest
defaults:
run:
working-directory: explorer
working-directory: explorer
steps:
- uses: actions/checkout@v2
with:
@@ -38,9 +39,8 @@ jobs:
- uses: amondnet/vercel-action@v20
with:
vercel-token: ${{ secrets.VERCEL_TOKEN }} # Required
github-token: ${{ secrets.GITHUB_TOKEN }} #Optional
github-token: ${{ secrets.PAT }} #Optional
vercel-args: '--prod' #for production
vercel-org-id: ${{ secrets.ORG_ID}} #Required
vercel-project-id: ${{ secrets.PROJECT_ID}} #Required
working-directory: ./explorer
scope: ${{ secrets.TEAM_ID }}

View File

@@ -93,6 +93,7 @@ pull_request_rules:
- author=mergify[bot]
- head~=^mergify/bp/
- "#status-failure=0"
- "-merged"
actions:
label:
add:

1926
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,63 +1,68 @@
[workspace]
members = [
"accountsdb-plugin-interface",
"accountsdb-plugin-manager",
"accounts-cluster-bench",
"bench-streamer",
"bench-tps",
"account-decoder",
"accounts-bench",
"accounts-cluster-bench",
"banking-bench",
"banks-client",
"banks-interface",
"banks-server",
"bucket_map",
"bench-streamer",
"bench-tps",
"bloom",
"bucket_map",
"clap-utils",
"cli",
"cli-config",
"cli-output",
"client",
"client-test",
"core",
"dos",
"download-utils",
"entry",
"faucet",
"frozen-abi",
"perf",
"validator",
"genesis",
"genesis-utils",
"geyser-plugin-interface",
"geyser-plugin-manager",
"gossip",
"install",
"keygen",
"ledger",
"ledger-tool",
"local-cluster",
"logger",
"log-analyzer",
"logger",
"measure",
"merkle-root-bench",
"merkle-tree",
"storage-bigtable",
"storage-proto",
"streamer",
"measure",
"metrics",
"net-shaper",
"net-utils",
"notifier",
"perf",
"poh",
"poh-bench",
"program-test",
"programs/address-lookup-table",
"programs/address-lookup-table-tests",
"programs/ed25519-tests",
"programs/bpf_loader",
"programs/bpf_loader/gen-syscall-list",
"programs/compute-budget",
"programs/config",
"programs/ed25519-tests",
"programs/stake",
"programs/vote",
"programs/zk-token-proof",
"rayon-threadlimit",
"rbpf-cli",
"remote-wallet",
"replica-lib",
"replica-node",
"rpc",
"rpc-test",
"runtime",
"runtime/store-tool",
"sdk",
@@ -65,24 +70,19 @@ members = [
"sdk/cargo-test-bpf",
"send-transaction-service",
"stake-accounts",
"storage-bigtable",
"storage-proto",
"streamer",
"sys-tuner",
"test-validator",
"tokens",
"transaction-dos",
"transaction-status",
"account-decoder",
"upload-perf",
"net-utils",
"validator",
"version",
"cli",
"rayon-threadlimit",
"watchtower",
"replica-node",
"replica-lib",
"test-validator",
"rpc-test",
"client-test",
"zk-token-sdk",
"programs/zk-token-proof",
]
exclude = [

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-account-decoder"
version = "1.10.0"
version = "1.10.5"
description = "Solana account decoder"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -10,21 +10,21 @@ license = "Apache-2.0"
edition = "2021"
[dependencies]
base64 = "0.12.3"
Inflector = "0.11.4"
base64 = "0.13.0"
bincode = "1.3.3"
bs58 = "0.4.0"
bv = "0.11.1"
Inflector = "0.11.4"
lazy_static = "1.4.0"
serde = "1.0.136"
serde_derive = "1.0.103"
serde_json = "1.0.78"
solana-config-program = { path = "../programs/config", version = "=1.10.0" }
solana-sdk = { path = "../sdk", version = "=1.10.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.0" }
serde_json = "1.0.79"
solana-config-program = { path = "../programs/config", version = "=1.10.5" }
solana-sdk = { path = "../sdk", version = "=1.10.5" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.5" }
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
thiserror = "1.0"
zstd = "0.10.0"
zstd = "0.11.1"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -33,7 +33,7 @@ pub type StringDecimals = String;
pub const MAX_BASE58_BYTES: usize = 128;
/// A duplicate representation of an Account for pretty JSON serialization
#[derive(Serialize, Deserialize, Clone, Debug)]
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct UiAccount {
pub lamports: u64,

View File

@@ -2,21 +2,21 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accounts-bench"
version = "1.10.0"
version = "1.10.5"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
publish = false
[dependencies]
clap = "2.33.1"
log = "0.4.14"
rayon = "1.5.1"
solana-logger = { path = "../logger", version = "=1.10.0" }
solana-runtime = { path = "../runtime", version = "=1.10.0" }
solana-measure = { path = "../measure", version = "=1.10.0" }
solana-sdk = { path = "../sdk", version = "=1.10.0" }
solana-version = { path = "../version", version = "=1.10.0" }
clap = "2.33.1"
solana-logger = { path = "../logger", version = "=1.10.5" }
solana-measure = { path = "../measure", version = "=1.10.5" }
solana-runtime = { path = "../runtime", version = "=1.10.5" }
solana-sdk = { path = "../sdk", version = "=1.10.5" }
solana-version = { path = "../version", version = "=1.10.5" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accounts-cluster-bench"
version = "1.10.0"
version = "1.10.5"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -13,25 +13,25 @@ clap = "2.33.1"
log = "0.4.14"
rand = "0.7.0"
rayon = "1.5.1"
solana-account-decoder = { path = "../account-decoder", version = "=1.10.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.10.0" }
solana-client = { path = "../client", version = "=1.10.0" }
solana-faucet = { path = "../faucet", version = "=1.10.0" }
solana-gossip = { path = "../gossip", version = "=1.10.0" }
solana-logger = { path = "../logger", version = "=1.10.0" }
solana-measure = { path = "../measure", version = "=1.10.0" }
solana-net-utils = { path = "../net-utils", version = "=1.10.0" }
solana-runtime = { path = "../runtime", version = "=1.10.0" }
solana-sdk = { path = "../sdk", version = "=1.10.0" }
solana-streamer = { path = "../streamer", version = "=1.10.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
solana-version = { path = "../version", version = "=1.10.0" }
solana-account-decoder = { path = "../account-decoder", version = "=1.10.5" }
solana-clap-utils = { path = "../clap-utils", version = "=1.10.5" }
solana-client = { path = "../client", version = "=1.10.5" }
solana-faucet = { path = "../faucet", version = "=1.10.5" }
solana-gossip = { path = "../gossip", version = "=1.10.5" }
solana-logger = { path = "../logger", version = "=1.10.5" }
solana-measure = { path = "../measure", version = "=1.10.5" }
solana-net-utils = { path = "../net-utils", version = "=1.10.5" }
solana-runtime = { path = "../runtime", version = "=1.10.5" }
solana-sdk = { path = "../sdk", version = "=1.10.5" }
solana-streamer = { path = "../streamer", version = "=1.10.5" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.5" }
solana-version = { path = "../version", version = "=1.10.5" }
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
[dev-dependencies]
solana-core = { path = "../core", version = "=1.10.0" }
solana-local-cluster = { path = "../local-cluster", version = "=1.10.0" }
solana-test-validator = { path = "../test-validator", version = "=1.10.0" }
solana-core = { path = "../core", version = "=1.10.5" }
solana-local-cluster = { path = "../local-cluster", version = "=1.10.5" }
solana-test-validator = { path = "../test-validator", version = "=1.10.5" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,20 +0,0 @@
<p align="center">
<a href="https://solana.com">
<img alt="Solana" src="https://i.imgur.com/IKyzQ6T.png" width="250" />
</a>
</p>
# Solana AccountsDb Plugin Interface
This crate enables an AccountsDb plugin to be plugged into the Solana Validator runtime to take actions
at the time of each account update; for example, saving the account state to an external database. The plugin must implement the `AccountsDbPlugin` trait. Please see the detail of the `accountsdb_plugin_interface.rs` for the interface definition.
The plugin should produce a `cdylib` dynamic library, which must expose a `C` function `_create_plugin()` that
instantiates the implementation of the interface.
The `solana-accountsdb-plugin-postgres` crate provides an example of how to create a plugin which saves the accounts data into an
external PostgreSQL databases.
More information about Solana is available in the [Solana documentation](https://docs.solana.com/).
Still have questions? Ask us on [Discord](https://discordapp.com/invite/pquxPsq)

View File

@@ -1 +0,0 @@
pub mod accountsdb_plugin_interface;

View File

@@ -1,29 +0,0 @@
[package]
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-accountsdb-plugin-manager"
description = "The Solana AccountsDb plugin manager."
version = "1.10.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-validator"
[dependencies]
bs58 = "0.4.0"
crossbeam-channel = "0.5"
json5 = "0.4.1"
libloading = "0.7.3"
log = "0.4.11"
serde_json = "1.0.78"
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.10.0" }
solana-measure = { path = "../measure", version = "=1.10.0" }
solana-metrics = { path = "../metrics", version = "=1.10.0" }
solana-rpc = { path = "../rpc", version = "=1.10.0" }
solana-runtime = { path = "../runtime", version = "=1.10.0" }
solana-sdk = { path = "../sdk", version = "=1.10.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
thiserror = "1.0.30"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-banking-bench"
version = "1.10.0"
version = "1.10.5"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -14,17 +14,17 @@ crossbeam-channel = "0.5"
log = "0.4.14"
rand = "0.7.0"
rayon = "1.5.1"
solana-core = { path = "../core", version = "=1.10.0" }
solana-gossip = { path = "../gossip", version = "=1.10.0" }
solana-ledger = { path = "../ledger", version = "=1.10.0" }
solana-logger = { path = "../logger", version = "=1.10.0" }
solana-measure = { path = "../measure", version = "=1.10.0" }
solana-perf = { path = "../perf", version = "=1.10.0" }
solana-poh = { path = "../poh", version = "=1.10.0" }
solana-runtime = { path = "../runtime", version = "=1.10.0" }
solana-streamer = { path = "../streamer", version = "=1.10.0" }
solana-sdk = { path = "../sdk", version = "=1.10.0" }
solana-version = { path = "../version", version = "=1.10.0" }
solana-core = { path = "../core", version = "=1.10.5" }
solana-gossip = { path = "../gossip", version = "=1.10.5" }
solana-ledger = { path = "../ledger", version = "=1.10.5" }
solana-logger = { path = "../logger", version = "=1.10.5" }
solana-measure = { path = "../measure", version = "=1.10.5" }
solana-perf = { path = "../perf", version = "=1.10.5" }
solana-poh = { path = "../poh", version = "=1.10.5" }
solana-runtime = { path = "../runtime", version = "=1.10.5" }
solana-sdk = { path = "../sdk", version = "=1.10.5" }
solana-streamer = { path = "../streamer", version = "=1.10.5" }
solana-version = { path = "../version", version = "=1.10.5" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -175,6 +175,11 @@ fn main() {
let mut bank_forks = BankForks::new(bank0);
let mut bank = bank_forks.working_bank();
// set cost tracker limits to MAX so it will not filter out TXs
bank.write_cost_tracker()
.unwrap()
.set_limits(std::u64::MAX, std::u64::MAX, std::u64::MAX);
info!("threads: {} txs: {}", num_threads, total_num_transactions);
let same_payer = matches.is_present("same_payer");
@@ -335,6 +340,13 @@ fn main() {
bank = bank_forks.working_bank();
insert_time.stop();
// set cost tracker limits to MAX so it will not filter out TXs
bank.write_cost_tracker().unwrap().set_limits(
std::u64::MAX,
std::u64::MAX,
std::u64::MAX,
);
poh_recorder.lock().unwrap().set_bank(&bank);
assert!(poh_recorder.lock().unwrap().bank().is_some());
if bank.slot() > 32 {

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-client"
version = "1.10.0"
version = "1.10.5"
description = "Solana banks client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,17 +12,17 @@ edition = "2021"
[dependencies]
borsh = "0.9.3"
futures = "0.3"
solana-banks-interface = { path = "../banks-interface", version = "=1.10.0" }
solana-program = { path = "../sdk/program", version = "=1.10.0" }
solana-sdk = { path = "../sdk", version = "=1.10.0" }
solana-banks-interface = { path = "../banks-interface", version = "=1.10.5" }
solana-program = { path = "../sdk/program", version = "=1.10.5" }
solana-sdk = { path = "../sdk", version = "=1.10.5" }
tarpc = { version = "0.27.2", features = ["full"] }
thiserror = "1.0"
tokio = { version = "1", features = ["full"] }
tokio-serde = { version = "0.8", features = ["bincode"] }
[dev-dependencies]
solana-runtime = { path = "../runtime", version = "=1.10.0" }
solana-banks-server = { path = "../banks-server", version = "=1.10.0" }
solana-banks-server = { path = "../banks-server", version = "=1.10.5" }
solana-runtime = { path = "../runtime", version = "=1.10.5" }
[lib]
crate-type = ["lib"]

View File

@@ -5,8 +5,10 @@
//! but they are undocumented, may change over time, and are generally more
//! cumbersome to use.
pub use crate::error::BanksClientError;
pub use solana_banks_interface::{BanksClient as TarpcClient, TransactionStatus};
pub use {
crate::error::BanksClientError,
solana_banks_interface::{BanksClient as TarpcClient, TransactionStatus},
};
use {
borsh::BorshDeserialize,
futures::{future::join_all, Future, FutureExt, TryFutureExt},

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-interface"
version = "1.10.0"
version = "1.10.5"
description = "Solana banks RPC interface"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,7 +11,7 @@ edition = "2021"
[dependencies]
serde = { version = "1.0.136", features = ["derive"] }
solana-sdk = { path = "../sdk", version = "=1.10.0" }
solana-sdk = { path = "../sdk", version = "=1.10.5" }
tarpc = { version = "0.27.2", features = ["full"] }
[lib]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-server"
version = "1.10.0"
version = "1.10.5"
description = "Solana banks server"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -13,10 +13,10 @@ edition = "2021"
bincode = "1.3.3"
crossbeam-channel = "0.5"
futures = "0.3"
solana-banks-interface = { path = "../banks-interface", version = "=1.10.0" }
solana-runtime = { path = "../runtime", version = "=1.10.0" }
solana-sdk = { path = "../sdk", version = "=1.10.0" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.10.0" }
solana-banks-interface = { path = "../banks-interface", version = "=1.10.5" }
solana-runtime = { path = "../runtime", version = "=1.10.5" }
solana-sdk = { path = "../sdk", version = "=1.10.5" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.10.5" }
tarpc = { version = "0.27.2", features = ["full"] }
tokio = { version = "1", features = ["full"] }
tokio-serde = { version = "0.8", features = ["bincode"] }

View File

@@ -24,7 +24,7 @@ use {
transaction::{self, SanitizedTransaction, Transaction},
},
solana_send_transaction_service::{
send_transaction_service::{SendTransactionService, TransactionInfo},
send_transaction_service::{SendTransactionService, TransactionInfo, DEFAULT_TPU_USE_QUIC},
tpu_info::NullTpuInfo,
},
std::{
@@ -399,6 +399,7 @@ pub async fn start_tcp_server(
receiver,
5_000,
0,
DEFAULT_TPU_USE_QUIC,
);
let server = BanksServer::new(

View File

@@ -2,18 +2,18 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-bench-streamer"
version = "1.10.0"
version = "1.10.5"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
publish = false
[dependencies]
crossbeam-channel = "0.5"
clap = "2.33.1"
solana-streamer = { path = "../streamer", version = "=1.10.0" }
solana-net-utils = { path = "../net-utils", version = "=1.10.0" }
solana-version = { path = "../version", version = "=1.10.0" }
crossbeam-channel = "0.5"
solana-net-utils = { path = "../net-utils", version = "=1.10.5" }
solana-streamer = { path = "../streamer", version = "=1.10.5" }
solana-version = { path = "../version", version = "=1.10.5" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-bench-tps"
version = "1.10.0"
version = "1.10.5"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -13,25 +13,25 @@ clap = "2.33.1"
crossbeam-channel = "0.5"
log = "0.4.14"
rayon = "1.5.1"
serde_json = "1.0.78"
serde_json = "1.0.79"
serde_yaml = "0.8.23"
solana-core = { path = "../core", version = "=1.10.0" }
solana-genesis = { path = "../genesis", version = "=1.10.0" }
solana-client = { path = "../client", version = "=1.10.0" }
solana-faucet = { path = "../faucet", version = "=1.10.0" }
solana-gossip = { path = "../gossip", version = "=1.10.0" }
solana-logger = { path = "../logger", version = "=1.10.0" }
solana-metrics = { path = "../metrics", version = "=1.10.0" }
solana-measure = { path = "../measure", version = "=1.10.0" }
solana-net-utils = { path = "../net-utils", version = "=1.10.0" }
solana-runtime = { path = "../runtime", version = "=1.10.0" }
solana-sdk = { path = "../sdk", version = "=1.10.0" }
solana-streamer = { path = "../streamer", version = "=1.10.0" }
solana-version = { path = "../version", version = "=1.10.0" }
solana-client = { path = "../client", version = "=1.10.5" }
solana-core = { path = "../core", version = "=1.10.5" }
solana-faucet = { path = "../faucet", version = "=1.10.5" }
solana-genesis = { path = "../genesis", version = "=1.10.5" }
solana-gossip = { path = "../gossip", version = "=1.10.5" }
solana-logger = { path = "../logger", version = "=1.10.5" }
solana-measure = { path = "../measure", version = "=1.10.5" }
solana-metrics = { path = "../metrics", version = "=1.10.5" }
solana-net-utils = { path = "../net-utils", version = "=1.10.5" }
solana-runtime = { path = "../runtime", version = "=1.10.5" }
solana-sdk = { path = "../sdk", version = "=1.10.5" }
solana-streamer = { path = "../streamer", version = "=1.10.5" }
solana-version = { path = "../version", version = "=1.10.5" }
[dev-dependencies]
serial_test = "0.5.1"
solana-local-cluster = { path = "../local-cluster", version = "=1.10.0" }
serial_test = "0.6.0"
solana-local-cluster = { path = "../local-cluster", version = "=1.10.5" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -110,7 +110,7 @@ fn generate_chunked_transfers(
shared_txs: &SharedTransactions,
shared_tx_active_thread_count: Arc<AtomicIsize>,
source_keypair_chunks: Vec<Vec<&Keypair>>,
dest_keypair_chunks: &mut Vec<VecDeque<&Keypair>>,
dest_keypair_chunks: &mut [VecDeque<&Keypair>],
threads: usize,
duration: Duration,
sustained: bool,
@@ -475,6 +475,7 @@ fn do_tx_transfers<T: Client>(
let tx_len = txs0.len();
let transfer_start = Instant::now();
let mut old_transactions = false;
let mut transactions = Vec::<_>::new();
for tx in txs0 {
let now = timestamp();
// Transactions that are too old will be rejected by the cluster Don't bother
@@ -483,10 +484,13 @@ fn do_tx_transfers<T: Client>(
old_transactions = true;
continue;
}
client
.async_send_transaction(tx.0)
.expect("async_send_transaction in do_tx_transfers");
transactions.push(tx.0);
}
if let Err(error) = client.async_send_batch(transactions) {
warn!("send_batch_sync in do_tx_transfers failed: {}", error);
}
if old_transactions {
let mut shared_txs_wl = shared_txs.write().expect("write lock in do_tx_transfers");
shared_txs_wl.clear();

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-bloom"
version = "1.10.0"
version = "1.10.5"
description = "Solana bloom filter"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,14 +12,14 @@ edition = "2021"
[dependencies]
bv = { version = "0.11.1", features = ["serde"] }
fnv = "1.0.7"
rand = "0.7.0"
serde = { version = "1.0.136", features = ["rc"] }
rayon = "1.5.1"
serde_derive = "1.0.103"
solana-frozen-abi = { path = "../frozen-abi", version = "=1.10.0" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.10.0" }
solana-sdk = { path = "../sdk", version = "=1.10.0" }
log = "0.4.14"
rand = "0.7.0"
rayon = "1.5.1"
serde = { version = "1.0.136", features = ["rc"] }
serde_derive = "1.0.103"
solana-frozen-abi = { path = "../frozen-abi", version = "=1.10.5" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.10.5" }
solana-sdk = { path = "../sdk", version = "=1.10.5" }
[lib]
crate-type = ["lib"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-bucket-map"
version = "1.10.0"
version = "1.10.5"
description = "solana-bucket-map"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-bucket-map"
@@ -11,18 +11,18 @@ license = "Apache-2.0"
edition = "2021"
[dependencies]
solana-sdk = { path = "../sdk", version = "=1.10.0" }
memmap2 = "0.5.2"
log = { version = "0.4.11" }
solana-measure = { path = "../measure", version = "=1.10.0" }
rand = "0.7.0"
tempfile = "3.3.0"
memmap2 = "0.5.3"
modular-bitfield = "0.11.2"
rand = "0.7.0"
solana-measure = { path = "../measure", version = "=1.10.5" }
solana-sdk = { path = "../sdk", version = "=1.10.5" }
tempfile = "3.3.0"
[dev-dependencies]
fs_extra = "1.2.0"
rayon = "1.5.0"
solana-logger = { path = "../logger", version = "=1.10.0" }
solana-logger = { path = "../logger", version = "=1.10.5" }
[lib]
crate-type = ["lib"]

View File

@@ -0,0 +1,365 @@
#!/usr/bin/env bash
#
# Builds a buildkite pipeline based on the environment variables
#
set -e
cd "$(dirname "$0")"/..
output_file=${1:-/dev/stderr}
if [[ -n $CI_PULL_REQUEST ]]; then
IFS=':' read -ra affected_files <<< "$(buildkite-agent meta-data get affected_files)"
if [[ ${#affected_files[*]} -eq 0 ]]; then
echo "Unable to determine the files affected by this PR"
exit 1
fi
else
affected_files=()
fi
annotate() {
if [[ -n $BUILDKITE ]]; then
buildkite-agent annotate "$@"
fi
}
# Checks if a CI pull request affects one or more path patterns. Each
# pattern argument is checked in series. If one of them found to be affected,
# return immediately as such.
#
# Bash regular expressions are permitted in the pattern:
# affects .rs$ -- any file or directory ending in .rs
# affects .rs -- also matches foo.rs.bar
# affects ^snap/ -- anything under the snap/ subdirectory
# affects snap/ -- also matches foo/snap/
# Any pattern starting with the ! character will be negated:
# affects !^docs/ -- anything *not* under the docs/ subdirectory
#
affects() {
if [[ -z $CI_PULL_REQUEST ]]; then
# affected_files metadata is not currently available for non-PR builds so assume
# the worse (affected)
return 0
fi
# Assume everyting needs to be tested when any Dockerfile changes
for pattern in ^ci/docker-rust/Dockerfile ^ci/docker-rust-nightly/Dockerfile "$@"; do
if [[ ${pattern:0:1} = "!" ]]; then
for file in "${affected_files[@]}"; do
if [[ ! $file =~ ${pattern:1} ]]; then
return 0 # affected
fi
done
else
for file in "${affected_files[@]}"; do
if [[ $file =~ $pattern ]]; then
return 0 # affected
fi
done
fi
done
return 1 # not affected
}
# Checks if a CI pull request affects anything other than the provided path patterns
#
# Syntax is the same as `affects()` except that the negation prefix is not
# supported
#
affects_other_than() {
if [[ -z $CI_PULL_REQUEST ]]; then
# affected_files metadata is not currently available for non-PR builds so assume
# the worse (affected)
return 0
fi
for file in "${affected_files[@]}"; do
declare matched=false
for pattern in "$@"; do
if [[ $file =~ $pattern ]]; then
matched=true
fi
done
if ! $matched; then
return 0 # affected
fi
done
return 1 # not affected
}
start_pipeline() {
echo "# $*" > "$output_file"
echo "steps:" >> "$output_file"
}
command_step() {
cat >> "$output_file" <<EOF
- name: "$1"
command: "$2"
timeout_in_minutes: $3
artifact_paths: "log-*.txt"
EOF
}
trigger_secondary_step() {
cat >> "$output_file" <<"EOF"
- trigger: "solana-secondary"
branches: "!pull/*"
async: true
build:
message: "${BUILDKITE_MESSAGE}"
commit: "${BUILDKITE_COMMIT}"
branch: "${BUILDKITE_BRANCH}"
env:
TRIGGERED_BUILDKITE_TAG: "${BUILDKITE_TAG}"
EOF
}
wait_step() {
echo " - wait" >> "$output_file"
}
all_test_steps() {
command_step checks ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-checks.sh" 20
wait_step
# Coverage...
if affects \
.rs$ \
Cargo.lock$ \
Cargo.toml$ \
^ci/rust-version.sh \
^ci/test-coverage.sh \
^scripts/coverage.sh \
; then
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 50
wait_step
else
annotate --style info --context test-coverage \
"Coverage skipped as no .rs files were modified"
fi
# Coverage in disk...
if affects \
.rs$ \
Cargo.lock$ \
Cargo.toml$ \
^ci/rust-version.sh \
^ci/test-coverage.sh \
^scripts/coverage-in-disk.sh \
; then
command_step coverage-in-disk ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 50
wait_step
else
annotate --style info --context test-coverage \
"Coverage skipped as no .rs files were modified"
fi
# Full test suite
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 70
wait_step
# BPF test suite
if affects \
.rs$ \
Cargo.lock$ \
Cargo.toml$ \
^ci/rust-version.sh \
^ci/test-stable-bpf.sh \
^ci/test-stable.sh \
^ci/test-local-cluster.sh \
^core/build.rs \
^fetch-perf-libs.sh \
^programs/ \
^sdk/ \
; then
cat >> "$output_file" <<"EOF"
- command: "ci/test-stable-bpf.sh"
name: "stable-bpf"
timeout_in_minutes: 20
artifact_paths: "bpf-dumps.tar.bz2"
agents:
- "queue=default"
EOF
else
annotate --style info \
"Stable-BPF skipped as no relevant files were modified"
fi
# Perf test suite
if affects \
.rs$ \
Cargo.lock$ \
Cargo.toml$ \
^ci/rust-version.sh \
^ci/test-stable-perf.sh \
^ci/test-stable.sh \
^ci/test-local-cluster.sh \
^core/build.rs \
^fetch-perf-libs.sh \
^programs/ \
^sdk/ \
; then
cat >> "$output_file" <<"EOF"
- command: "ci/test-stable-perf.sh"
name: "stable-perf"
timeout_in_minutes: 20
artifact_paths: "log-*.txt"
agents:
- "queue=cuda"
EOF
else
annotate --style info \
"Stable-perf skipped as no relevant files were modified"
fi
# Downstream backwards compatibility
if affects \
.rs$ \
Cargo.lock$ \
Cargo.toml$ \
^ci/rust-version.sh \
^ci/test-stable-perf.sh \
^ci/test-stable.sh \
^ci/test-local-cluster.sh \
^core/build.rs \
^fetch-perf-libs.sh \
^programs/ \
^sdk/ \
^scripts/build-downstream-projects.sh \
; then
cat >> "$output_file" <<"EOF"
- command: "scripts/build-downstream-projects.sh"
name: "downstream-projects"
timeout_in_minutes: 30
EOF
else
annotate --style info \
"downstream-projects skipped as no relevant files were modified"
fi
# Downstream Anchor projects backwards compatibility
if affects \
.rs$ \
Cargo.lock$ \
Cargo.toml$ \
^ci/rust-version.sh \
^ci/test-stable-perf.sh \
^ci/test-stable.sh \
^ci/test-local-cluster.sh \
^core/build.rs \
^fetch-perf-libs.sh \
^programs/ \
^sdk/ \
^scripts/build-downstream-anchor-projects.sh \
; then
cat >> "$output_file" <<"EOF"
- command: "scripts/build-downstream-anchor-projects.sh"
name: "downstream-anchor-projects"
timeout_in_minutes: 10
EOF
else
annotate --style info \
"downstream-anchor-projects skipped as no relevant files were modified"
fi
# Wasm support
if affects \
^ci/test-wasm.sh \
^ci/test-stable.sh \
^sdk/ \
; then
command_step wasm ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-wasm.sh" 20
else
annotate --style info \
"wasm skipped as no relevant files were modified"
fi
# Benches...
if affects \
.rs$ \
Cargo.lock$ \
Cargo.toml$ \
^ci/rust-version.sh \
^ci/test-coverage.sh \
^ci/test-bench.sh \
; then
command_step bench "ci/test-bench.sh" 30
else
annotate --style info --context test-bench \
"Bench skipped as no .rs files were modified"
fi
command_step "local-cluster" \
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster.sh" \
40
command_step "local-cluster-flakey" \
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-flakey.sh" \
10
command_step "local-cluster-slow" \
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-slow.sh" \
30
}
pull_or_push_steps() {
command_step sanity "ci/test-sanity.sh" 5
wait_step
# Check for any .sh file changes
if affects .sh$; then
command_step shellcheck "ci/shellcheck.sh" 5
wait_step
fi
# Run the full test suite by default, skipping only if modifications are local
# to some particular areas of the tree
if affects_other_than ^.buildkite ^.mergify .md$ ^docs/ ^web3.js/ ^explorer/ ^.gitbook; then
all_test_steps
fi
# web3.js, explorer and docs changes run on Travis or Github actions...
}
if [[ -n $BUILDKITE_TAG ]]; then
start_pipeline "Tag pipeline for $BUILDKITE_TAG"
annotate --style info --context release-tag \
"https://github.com/solana-labs/solana/releases/$BUILDKITE_TAG"
# Jump directly to the secondary build to publish release artifacts quickly
trigger_secondary_step
exit 0
fi
if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then
echo "+++ Affected files in this PR"
for file in "${affected_files[@]}"; do
echo "- $file"
done
start_pipeline "Pull request pipeline for $BUILDKITE_BRANCH"
# Add helpful link back to the corresponding Github Pull Request
annotate --style info --context pr-backlink \
"Github Pull Request: https://github.com/solana-labs/solana/$BUILDKITE_BRANCH"
if [[ $GITHUB_USER = "dependabot[bot]" ]]; then
command_step dependabot "ci/dependabot-pr.sh" 5
wait_step
fi
pull_or_push_steps
exit 0
fi
start_pipeline "Push pipeline for ${BUILDKITE_BRANCH:-?unknown branch?}"
pull_or_push_steps
wait_step
trigger_secondary_step
exit 0

View File

@@ -102,6 +102,8 @@ command_step() {
command: "$2"
timeout_in_minutes: $3
artifact_paths: "log-*.txt"
agents:
- "queue=solana"
EOF
}
@@ -137,7 +139,7 @@ all_test_steps() {
^ci/test-coverage.sh \
^scripts/coverage.sh \
; then
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 40
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 50
wait_step
else
annotate --style info --context test-coverage \
@@ -145,7 +147,7 @@ all_test_steps() {
fi
# Full test suite
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 60
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 70
wait_step
# BPF test suite
@@ -168,7 +170,7 @@ all_test_steps() {
timeout_in_minutes: 20
artifact_paths: "bpf-dumps.tar.bz2"
agents:
- "queue=default"
- "queue=solana"
EOF
else
annotate --style info \
@@ -221,6 +223,8 @@ EOF
- command: "scripts/build-downstream-projects.sh"
name: "downstream-projects"
timeout_in_minutes: 30
agents:
- "queue=solana"
EOF
else
annotate --style info \
@@ -246,6 +250,8 @@ EOF
- command: "scripts/build-downstream-anchor-projects.sh"
name: "downstream-anchor-projects"
timeout_in_minutes: 10
agents:
- "queue=solana"
EOF
else
annotate --style info \

View File

@@ -8,11 +8,6 @@ src_root="$(readlink -f "${here}/..")"
cd "${src_root}"
cargo_audit_ignores=(
# failure is officially deprecated/unmaintained
#
# Blocked on multiple upstream crates removing their `failure` dependency.
--ignore RUSTSEC-2020-0036
# `net2` crate has been deprecated; use `socket2` instead
#
# Blocked on https://github.com/paritytech/jsonrpc/issues/575
@@ -30,22 +25,10 @@ cargo_audit_ignores=(
# generic-array: arr! macro erases lifetimes
#
# Blocked on libsecp256k1 releasing with upgraded dependencies
# https://github.com/paritytech/libsecp256k1/issues/66
# Blocked on new spl dependencies on solana-program v1.9
# due to curve25519-dalek dependency
--ignore RUSTSEC-2020-0146
# hyper: Lenient `hyper` header parsing of `Content-Length` could allow request smuggling
#
# Blocked on jsonrpc removing dependency on unmaintained `websocket`
# https://github.com/paritytech/jsonrpc/issues/605
--ignore RUSTSEC-2021-0078
# hyper: Integer overflow in `hyper`'s parsing of the `Transfer-Encoding` header leads to data loss
#
# Blocked on jsonrpc removing dependency on unmaintained `websocket`
# https://github.com/paritytech/jsonrpc/issues/605
--ignore RUSTSEC-2021-0079
# chrono: Potential segfault in `localtime_r` invocations
#
# Blocked due to no safe upgrade

View File

@@ -1,4 +1,4 @@
FROM solanalabs/rust:1.58.1
FROM solanalabs/rust:1.59.0
ARG date
RUN set -x \

View File

@@ -1,6 +1,6 @@
# Note: when the rust version is changed also modify
# ci/rust-version.sh to pick up the new image tag
FROM rust:1.58.1
FROM rust:1.59.0
# Add Google Protocol Buffers for Libra's metrics library.
ENV PROTOC_VERSION 3.8.0

View File

@@ -70,7 +70,7 @@ for Cargo_toml in $Cargo_tomls; do
rm -rf crate-test
"$cargo" stable init crate-test
cd crate-test/
echo "${crate_name} = \"${expectedCrateVersion}\"" >> Cargo.toml
echo "${crate_name} = \"=${expectedCrateVersion}\"" >> Cargo.toml
echo "[workspace]" >> Cargo.toml
"$cargo" stable check
) && really_uploaded=1

View File

@@ -18,13 +18,13 @@
if [[ -n $RUST_STABLE_VERSION ]]; then
stable_version="$RUST_STABLE_VERSION"
else
stable_version=1.58.1
stable_version=1.59.0
fi
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
nightly_version="$RUST_NIGHTLY_VERSION"
else
nightly_version=2022-01-21
nightly_version=2022-02-24
fi

View File

@@ -57,32 +57,20 @@ if [[ $CI_BASE_BRANCH = "$EDGE_CHANNEL" ]]; then
exit "$check_status"
fi
# Ensure nightly and --benches
# Ensure nightly and --benches
_ scripts/cargo-for-all-lock-files.sh nightly check --locked --all-targets
else
echo "Note: cargo-for-all-lock-files.sh skipped because $CI_BASE_BRANCH != $EDGE_CHANNEL"
fi
_ ci/order-crates-for-publishing.py
_ ci/order-crates-for-publishing.py
# -Z... is needed because of clippy bug: https://github.com/rust-lang/rust-clippy/issues/4612
# run nightly clippy for `sdk/` as there's a moderate amount of nightly-only code there
_ "$cargo" nightly clippy -Zunstable-options --workspace --all-targets -- --deny=warnings --deny=clippy::integer_arithmetic
_ scripts/cargo-for-all-lock-files.sh -- nightly clippy -Zunstable-options --all-targets -- --deny=warnings --deny=clippy::integer_arithmetic
_ "$cargo" stable fmt --all -- --check
_ scripts/cargo-for-all-lock-files.sh -- nightly fmt --all -- --check
_ ci/do-audit.sh
{
cd programs/bpf
for project in rust/*/ ; do
echo "+++ do_bpf_checks $project"
(
cd "$project"
_ "$cargo" nightly clippy -- --deny=warnings --allow=clippy::missing_safety_doc
_ "$cargo" stable fmt -- --check
)
done
}
_ ci/do-audit.sh
echo --- ok

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-clap-utils"
version = "1.10.0"
version = "1.10.5"
description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -10,16 +10,16 @@ documentation = "https://docs.rs/solana-clap-utils"
edition = "2021"
[dependencies]
chrono = "0.4"
clap = "2.33.0"
rpassword = "5.0"
solana-perf = { path = "../perf", version = "=1.10.0" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.10.0", default-features = false}
solana-sdk = { path = "../sdk", version = "=1.10.0" }
rpassword = "6.0"
solana-perf = { path = "../perf", version = "=1.10.5" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.10.5", default-features = false }
solana-sdk = { path = "../sdk", version = "=1.10.5" }
thiserror = "1.0.30"
tiny-bip39 = "0.8.2"
uriparse = "0.6.3"
url = "2.2.2"
chrono = "0.4"
[dev-dependencies]
tempfile = "3.3.0"

View File

@@ -17,7 +17,7 @@ use {
},
bip39::{Language, Mnemonic, Seed},
clap::ArgMatches,
rpassword::prompt_password_stderr,
rpassword::prompt_password,
solana_remote_wallet::{
locator::{Locator as RemoteWalletLocator, LocatorError as RemoteWalletLocatorError},
remote_keypair::generate_remote_keypair,
@@ -945,9 +945,9 @@ pub const SKIP_SEED_PHRASE_VALIDATION_ARG: ArgConstant<'static> = ArgConstant {
/// Prompts user for a passphrase and then asks for confirmirmation to check for mistakes
pub fn prompt_passphrase(prompt: &str) -> Result<String, Box<dyn error::Error>> {
let passphrase = prompt_password_stderr(prompt)?;
let passphrase = prompt_password(prompt)?;
if !passphrase.is_empty() {
let confirmed = rpassword::prompt_password_stderr("Enter same passphrase again: ")?;
let confirmed = rpassword::prompt_password("Enter same passphrase again: ")?;
if confirmed != passphrase {
return Err("Passphrases did not match".into());
}
@@ -1055,7 +1055,7 @@ pub fn keypair_from_seed_phrase(
derivation_path: Option<DerivationPath>,
legacy: bool,
) -> Result<Keypair, Box<dyn error::Error>> {
let seed_phrase = prompt_password_stderr(&format!("[{}] seed phrase: ", keypair_name))?;
let seed_phrase = prompt_password(&format!("[{}] seed phrase: ", keypair_name))?;
let seed_phrase = seed_phrase.trim();
let passphrase_prompt = format!(
"[{}] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue: ",

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-cli-config"
description = "Blockchain, Rebuilt for Scale"
version = "1.10.0"
version = "1.10.5"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -18,7 +18,7 @@ serde_yaml = "0.8.23"
url = "2.2.2"
[dev-dependencies]
anyhow = "1.0.53"
anyhow = "1.0.56"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -3,29 +3,32 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-cli-output"
description = "Blockchain, Rebuilt for Scale"
version = "1.10.0"
version = "1.10.5"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-cli-output"
[dependencies]
Inflector = "0.11.4"
base64 = "0.13.0"
chrono = { version = "0.4.11", features = ["serde"] }
clap = "2.33.0"
console = "0.15.0"
humantime = "2.0.1"
Inflector = "0.11.4"
indicatif = "0.16.2"
serde = "1.0.136"
serde_json = "1.0.78"
solana-account-decoder = { path = "../account-decoder", version = "=1.10.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.10.0" }
solana-client = { path = "../client", version = "=1.10.0" }
solana-sdk = { path = "../sdk", version = "=1.10.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.0" }
serde_json = "1.0.79"
solana-account-decoder = { path = "../account-decoder", version = "=1.10.5" }
solana-clap-utils = { path = "../clap-utils", version = "=1.10.5" }
solana-client = { path = "../client", version = "=1.10.5" }
solana-sdk = { path = "../sdk", version = "=1.10.5" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.5" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.5" }
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
[dev-dependencies]
ed25519-dalek = "=1.0.1"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -28,7 +28,7 @@ use {
signature::Signature,
stake::state::{Authorized, Lockup},
stake_history::StakeHistoryEntry,
transaction::{Transaction, TransactionError},
transaction::{Transaction, TransactionError, VersionedTransaction},
},
solana_transaction_status::{
EncodedConfirmedBlock, EncodedTransaction, TransactionConfirmationStatus,
@@ -393,19 +393,19 @@ impl fmt::Display for CliValidators {
) -> fmt::Result {
fn non_zero_or_dash(v: u64, max_v: u64) -> String {
if v == 0 {
"- ".into()
" - ".into()
} else if v == max_v {
format!("{:>8} ( 0)", v)
format!("{:>9} ( 0)", v)
} else if v > max_v.saturating_sub(100) {
format!("{:>8} ({:>3})", v, -(max_v.saturating_sub(v) as isize))
format!("{:>9} ({:>3})", v, -(max_v.saturating_sub(v) as isize))
} else {
format!("{:>8} ", v)
format!("{:>9} ", v)
}
}
writeln!(
f,
"{} {:<44} {:<44} {:>3}% {:>14} {:>14} {:>7} {:>8} {:>7} {}",
"{} {:<44} {:<44} {:>3}% {:>14} {:>14} {:>7} {:>8} {:>7} {:>22} ({:.2}%)",
if validator.delinquent {
WARNING.to_string()
} else {
@@ -419,19 +419,19 @@ impl fmt::Display for CliValidators {
if let Some(skip_rate) = validator.skip_rate {
format!("{:.2}%", skip_rate)
} else {
"- ".to_string()
"- ".to_string()
},
validator.epoch_credits,
validator.version,
if validator.activated_stake > 0 {
format!(
"{} ({:.2}%)",
build_balance_message(validator.activated_stake, use_lamports_unit, true),
100. * validator.activated_stake as f64 / total_active_stake as f64,
)
} else {
"-".into()
},
build_balance_message_with_config(
validator.activated_stake,
&BuildBalanceMessageConfig {
use_lamports_unit,
trim_trailing_zeros: false,
..BuildBalanceMessageConfig::default()
}
),
100. * validator.activated_stake as f64 / total_active_stake as f64,
)
}
@@ -441,13 +441,13 @@ impl fmt::Display for CliValidators {
0
};
let header = style(format!(
"{:padding$} {:<44} {:<38} {} {} {} {} {} {} {}",
"{:padding$} {:<44} {:<38} {} {} {} {} {} {} {}",
" ",
"Identity",
"Vote Account",
"Commission",
"Last Vote ",
"Root Slot ",
"Last Vote ",
"Root Slot ",
"Skip Rate",
"Credits",
"Version",
@@ -2218,7 +2218,7 @@ pub enum CliSignatureVerificationStatus {
}
impl CliSignatureVerificationStatus {
pub fn verify_transaction(tx: &Transaction) -> Vec<Self> {
pub fn verify_transaction(tx: &VersionedTransaction) -> Vec<Self> {
tx.verify_with_results()
.iter()
.zip(&tx.signatures)
@@ -2335,7 +2335,7 @@ impl fmt::Display for CliBlock {
writeln_transaction(
f,
&transaction_with_meta.transaction.decode().unwrap(),
&transaction_with_meta.meta,
transaction_with_meta.meta.as_ref(),
" ",
None,
None,
@@ -2354,7 +2354,7 @@ pub struct CliTransaction {
#[serde(skip_serializing)]
pub slot: Option<Slot>,
#[serde(skip_serializing)]
pub decoded_transaction: Transaction,
pub decoded_transaction: VersionedTransaction,
#[serde(skip_serializing)]
pub prefix: String,
#[serde(skip_serializing_if = "Vec::is_empty")]
@@ -2369,7 +2369,7 @@ impl fmt::Display for CliTransaction {
writeln_transaction(
f,
&self.decoded_transaction,
&self.meta,
self.meta.as_ref(),
&self.prefix,
if !self.sigverify_status.is_empty() {
Some(&self.sigverify_status)
@@ -2451,6 +2451,8 @@ pub struct CliGossipNode {
pub rpc_host: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub feature_set: Option<u32>,
}
impl CliGossipNode {
@@ -2463,6 +2465,7 @@ impl CliGossipNode {
tpu_port: info.tpu.map(|addr| addr.port()),
rpc_host: info.rpc.map(|addr| addr.to_string()),
version: info.version,
feature_set: info.feature_set,
}
}
}
@@ -2488,7 +2491,7 @@ impl fmt::Display for CliGossipNode {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{:15} | {:44} | {:6} | {:5} | {:21} | {}",
"{:15} | {:44} | {:6} | {:5} | {:21} | {:8}| {}",
unwrap_to_string_or_none(self.ip_address.as_ref()),
self.identity_label
.as_ref()
@@ -2497,6 +2500,7 @@ impl fmt::Display for CliGossipNode {
unwrap_to_string_or_none(self.tpu_port.as_ref()),
unwrap_to_string_or_none(self.rpc_host.as_ref()),
unwrap_to_string_or_default(self.version.as_ref(), "unknown"),
unwrap_to_string_or_default(self.feature_set.as_ref(), "unknown"),
)
}
}
@@ -2511,10 +2515,10 @@ impl fmt::Display for CliGossipNodes {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
f,
"IP Address | Node identifier \
| Gossip | TPU | RPC Address | Version\n\
"IP Address | Identity \
| Gossip | TPU | RPC Address | Version | Feature Set\n\
----------------+----------------------------------------------+\
--------+-------+-----------------------+----------------",
--------+-------+-----------------------+---------+----------------",
)?;
for node in self.0.iter() {
writeln!(f, "{}", node)?;
@@ -2772,10 +2776,10 @@ mod tests {
let expected_msg = "AwECBwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDgTl3Dqh9\
F19Wo1Rmw0x+zMuNipG07jeiXfYPW4/Js5QEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQE\
BAQEBAYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBQUFBQUFBQUFBQUFBQUFBQUF\
BQUFBQUFBQUFBQUFBQUGp9UXGSxWjuCKhF9z0peIzwNcMUWyGrNE2AYuqUAAAAAAAAAAAAAA\
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcH\
BwcCBgMDBQIEBAAAAAYCAQQMAgAAACoAAAAAAAAA"
BAQEBAUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBgYGBgYGBgYGBgYGBgYGBgYG\
BgYGBgYGBgYGBgYGBgYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAan1RcZLFaO\
4IqEX3PSl4jPA1wxRbIas0TYBi6pQAAABwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcH\
BwcCBQMEBgIEBAAAAAUCAQMMAgAAACoAAAAAAAAA"
.to_string();
let config = ReturnSignersConfig {
dump_transaction_message: true,

View File

@@ -4,10 +4,18 @@ use {
console::style,
indicatif::{ProgressBar, ProgressStyle},
solana_sdk::{
clock::UnixTimestamp, hash::Hash, message::Message, native_token::lamports_to_sol,
program_utils::limited_deserialize, pubkey::Pubkey, stake, transaction::Transaction,
clock::UnixTimestamp,
hash::Hash,
instruction::CompiledInstruction,
message::v0::MessageAddressTableLookup,
native_token::lamports_to_sol,
program_utils::limited_deserialize,
pubkey::Pubkey,
signature::Signature,
stake,
transaction::{TransactionError, TransactionVersion, VersionedTransaction},
},
solana_transaction_status::UiTransactionStatusMeta,
solana_transaction_status::{Rewards, UiTransactionStatusMeta},
spl_memo::{id as spl_memo_id, v1::id as spl_memo_v1_id},
std::{collections::HashMap, fmt, io},
};
@@ -131,22 +139,28 @@ pub fn println_signers(
println!();
}
fn format_account_mode(message: &Message, index: usize) -> String {
struct CliAccountMeta {
is_signer: bool,
is_writable: bool,
is_invoked: bool,
}
fn format_account_mode(meta: CliAccountMeta) -> String {
format!(
"{}r{}{}", // accounts are always readable...
if message.is_signer(index) {
if meta.is_signer {
"s" // stands for signer
} else {
"-"
},
if message.is_writable(index) {
if meta.is_writable {
"w" // comment for consistent rust fmt (no joking; lol)
} else {
"-"
},
// account may be executable on-chain while not being
// designated as a program-id in the message
if message.maybe_executable(index) {
if meta.is_invoked {
"x"
} else {
// programs to be executed via CPI cannot be identified as
@@ -156,202 +170,83 @@ fn format_account_mode(message: &Message, index: usize) -> String {
)
}
pub fn write_transaction<W: io::Write>(
fn write_transaction<W: io::Write>(
w: &mut W,
transaction: &Transaction,
transaction_status: &Option<UiTransactionStatusMeta>,
transaction: &VersionedTransaction,
transaction_status: Option<&UiTransactionStatusMeta>,
prefix: &str,
sigverify_status: Option<&[CliSignatureVerificationStatus]>,
block_time: Option<UnixTimestamp>,
timezone: CliTimezone,
) -> io::Result<()> {
write_block_time(w, block_time, timezone, prefix)?;
let message = &transaction.message;
if let Some(block_time) = block_time {
writeln!(
w,
"{}Block Time: {:?}",
prefix,
Local.timestamp(block_time, 0)
)?;
}
writeln!(
w,
"{}Recent Blockhash: {:?}",
prefix, message.recent_blockhash
)?;
let sigverify_statuses = if let Some(sigverify_status) = sigverify_status {
sigverify_status
let account_keys: Vec<AccountKeyType> = {
let static_keys_iter = message
.static_account_keys()
.iter()
.map(|s| format!(" ({})", s))
.collect()
} else {
vec!["".to_string(); transaction.signatures.len()]
.map(AccountKeyType::Known);
let dynamic_keys: Vec<AccountKeyType> = message
.address_table_lookups()
.map(transform_lookups_to_unknown_keys)
.unwrap_or_default();
static_keys_iter.chain(dynamic_keys).collect()
};
for (signature_index, (signature, sigverify_status)) in transaction
.signatures
.iter()
.zip(&sigverify_statuses)
.enumerate()
{
writeln!(
w,
"{}Signature {}: {:?}{}",
prefix, signature_index, signature, sigverify_status,
)?;
}
write_version(w, transaction.version(), prefix)?;
write_recent_blockhash(w, message.recent_blockhash(), prefix)?;
write_signatures(w, &transaction.signatures, sigverify_status, prefix)?;
let mut fee_payer_index = None;
for (account_index, account) in message.account_keys.iter().enumerate() {
for (account_index, account) in account_keys.iter().enumerate() {
if fee_payer_index.is_none() && message.is_non_loader_key(account_index) {
fee_payer_index = Some(account_index)
}
writeln!(
let account_meta = CliAccountMeta {
is_signer: message.is_signer(account_index),
is_writable: message.is_maybe_writable(account_index),
is_invoked: message.is_invoked(account_index),
};
write_account(
w,
"{}Account {}: {} {}{}",
prefix,
account_index,
format_account_mode(message, account_index),
account,
if Some(account_index) == fee_payer_index {
" (fee payer)"
} else {
""
},
*account,
format_account_mode(account_meta),
Some(account_index) == fee_payer_index,
prefix,
)?;
}
for (instruction_index, instruction) in message.instructions.iter().enumerate() {
let program_pubkey = message.account_keys[instruction.program_id_index as usize];
writeln!(w, "{}Instruction {}", prefix, instruction_index)?;
writeln!(
for (instruction_index, instruction) in message.instructions().iter().enumerate() {
let program_pubkey = account_keys[instruction.program_id_index as usize];
let instruction_accounts = instruction
.accounts
.iter()
.map(|account_index| (account_keys[*account_index as usize], *account_index));
write_instruction(
w,
"{} Program: {} ({})",
prefix, program_pubkey, instruction.program_id_index
instruction_index,
program_pubkey,
instruction,
instruction_accounts,
prefix,
)?;
for (account_index, account) in instruction.accounts.iter().enumerate() {
let account_pubkey = message.account_keys[*account as usize];
writeln!(
w,
"{} Account {}: {} ({})",
prefix, account_index, account_pubkey, account
)?;
}
}
let mut raw = true;
if program_pubkey == solana_vote_program::id() {
if let Ok(vote_instruction) = limited_deserialize::<
solana_vote_program::vote_instruction::VoteInstruction,
>(&instruction.data)
{
writeln!(w, "{} {:?}", prefix, vote_instruction)?;
raw = false;
}
} else if program_pubkey == stake::program::id() {
if let Ok(stake_instruction) =
limited_deserialize::<stake::instruction::StakeInstruction>(&instruction.data)
{
writeln!(w, "{} {:?}", prefix, stake_instruction)?;
raw = false;
}
} else if program_pubkey == solana_sdk::system_program::id() {
if let Ok(system_instruction) = limited_deserialize::<
solana_sdk::system_instruction::SystemInstruction,
>(&instruction.data)
{
writeln!(w, "{} {:?}", prefix, system_instruction)?;
raw = false;
}
} else if is_memo_program(&program_pubkey) {
if let Ok(s) = std::str::from_utf8(&instruction.data) {
writeln!(w, "{} Data: \"{}\"", prefix, s)?;
raw = false;
}
}
if raw {
writeln!(w, "{} Data: {:?}", prefix, instruction.data)?;
}
if let Some(address_table_lookups) = message.address_table_lookups() {
write_address_table_lookups(w, address_table_lookups, prefix)?;
}
if let Some(transaction_status) = transaction_status {
writeln!(
w,
"{}Status: {}",
prefix,
match &transaction_status.status {
Ok(_) => "Ok".into(),
Err(err) => err.to_string(),
}
)?;
writeln!(
w,
"{} Fee: ◎{}",
prefix,
lamports_to_sol(transaction_status.fee)
)?;
assert_eq!(
transaction_status.pre_balances.len(),
transaction_status.post_balances.len()
);
for (i, (pre, post)) in transaction_status
.pre_balances
.iter()
.zip(transaction_status.post_balances.iter())
.enumerate()
{
if pre == post {
writeln!(
w,
"{} Account {} balance: ◎{}",
prefix,
i,
lamports_to_sol(*pre)
)?;
} else {
writeln!(
w,
"{} Account {} balance: ◎{} -> ◎{}",
prefix,
i,
lamports_to_sol(*pre),
lamports_to_sol(*post)
)?;
}
}
if let Some(log_messages) = &transaction_status.log_messages {
if !log_messages.is_empty() {
writeln!(w, "{}Log Messages:", prefix,)?;
for log_message in log_messages {
writeln!(w, "{} {}", prefix, log_message)?;
}
}
}
if let Some(rewards) = &transaction_status.rewards {
if !rewards.is_empty() {
writeln!(w, "{}Rewards:", prefix,)?;
writeln!(
w,
"{} {:<44} {:^15} {:<15} {:<20}",
prefix, "Address", "Type", "Amount", "New Balance"
)?;
for reward in rewards {
let sign = if reward.lamports < 0 { "-" } else { "" };
writeln!(
w,
"{} {:<44} {:^15} {}◎{:<14.9} ◎{:<18.9}",
prefix,
reward.pubkey,
if let Some(reward_type) = reward.reward_type {
format!("{}", reward_type)
} else {
"-".to_string()
},
sign,
lamports_to_sol(reward.lamports.abs() as u64),
lamports_to_sol(reward.post_balance)
)?;
}
}
}
write_status(w, &transaction_status.status, prefix)?;
write_fees(w, transaction_status.fee, prefix)?;
write_balances(w, transaction_status, prefix)?;
write_log_messages(w, transaction_status.log_messages.as_ref(), prefix)?;
write_rewards(w, transaction_status.rewards.as_ref(), prefix)?;
} else {
writeln!(w, "{}Status: Unavailable", prefix)?;
}
@@ -359,9 +254,347 @@ pub fn write_transaction<W: io::Write>(
Ok(())
}
fn transform_lookups_to_unknown_keys(lookups: &[MessageAddressTableLookup]) -> Vec<AccountKeyType> {
let unknown_writable_keys = lookups
.iter()
.enumerate()
.flat_map(|(lookup_index, lookup)| {
lookup
.writable_indexes
.iter()
.map(move |table_index| AccountKeyType::Unknown {
lookup_index,
table_index: *table_index,
})
});
let unknown_readonly_keys = lookups
.iter()
.enumerate()
.flat_map(|(lookup_index, lookup)| {
lookup
.readonly_indexes
.iter()
.map(move |table_index| AccountKeyType::Unknown {
lookup_index,
table_index: *table_index,
})
});
unknown_writable_keys.chain(unknown_readonly_keys).collect()
}
enum CliTimezone {
Local,
#[allow(dead_code)]
Utc,
}
fn write_block_time<W: io::Write>(
w: &mut W,
block_time: Option<UnixTimestamp>,
timezone: CliTimezone,
prefix: &str,
) -> io::Result<()> {
if let Some(block_time) = block_time {
let block_time_output = match timezone {
CliTimezone::Local => format!("{:?}", Local.timestamp(block_time, 0)),
CliTimezone::Utc => format!("{:?}", Utc.timestamp(block_time, 0)),
};
writeln!(w, "{}Block Time: {}", prefix, block_time_output,)?;
}
Ok(())
}
fn write_version<W: io::Write>(
w: &mut W,
version: TransactionVersion,
prefix: &str,
) -> io::Result<()> {
let version = match version {
TransactionVersion::Legacy(_) => "legacy".to_string(),
TransactionVersion::Number(number) => number.to_string(),
};
writeln!(w, "{}Version: {}", prefix, version)
}
fn write_recent_blockhash<W: io::Write>(
w: &mut W,
recent_blockhash: &Hash,
prefix: &str,
) -> io::Result<()> {
writeln!(w, "{}Recent Blockhash: {:?}", prefix, recent_blockhash)
}
fn write_signatures<W: io::Write>(
w: &mut W,
signatures: &[Signature],
sigverify_status: Option<&[CliSignatureVerificationStatus]>,
prefix: &str,
) -> io::Result<()> {
let sigverify_statuses = if let Some(sigverify_status) = sigverify_status {
sigverify_status
.iter()
.map(|s| format!(" ({})", s))
.collect()
} else {
vec!["".to_string(); signatures.len()]
};
for (signature_index, (signature, sigverify_status)) in
signatures.iter().zip(&sigverify_statuses).enumerate()
{
writeln!(
w,
"{}Signature {}: {:?}{}",
prefix, signature_index, signature, sigverify_status,
)?;
}
Ok(())
}
#[derive(Debug, Clone, Copy, PartialEq)]
enum AccountKeyType<'a> {
Known(&'a Pubkey),
Unknown {
lookup_index: usize,
table_index: u8,
},
}
impl fmt::Display for AccountKeyType<'_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Self::Known(address) => write!(f, "{}", address),
Self::Unknown {
lookup_index,
table_index,
} => {
write!(
f,
"Unknown Address (uses lookup {} and index {})",
lookup_index, table_index
)
}
}
}
}
fn write_account<W: io::Write>(
w: &mut W,
account_index: usize,
account_address: AccountKeyType,
account_mode: String,
is_fee_payer: bool,
prefix: &str,
) -> io::Result<()> {
writeln!(
w,
"{}Account {}: {} {}{}",
prefix,
account_index,
account_mode,
account_address,
if is_fee_payer { " (fee payer)" } else { "" },
)
}
fn write_instruction<'a, W: io::Write>(
w: &mut W,
instruction_index: usize,
program_pubkey: AccountKeyType,
instruction: &CompiledInstruction,
instruction_accounts: impl Iterator<Item = (AccountKeyType<'a>, u8)>,
prefix: &str,
) -> io::Result<()> {
writeln!(w, "{}Instruction {}", prefix, instruction_index)?;
writeln!(
w,
"{} Program: {} ({})",
prefix, program_pubkey, instruction.program_id_index
)?;
for (index, (account_address, account_index)) in instruction_accounts.enumerate() {
writeln!(
w,
"{} Account {}: {} ({})",
prefix, index, account_address, account_index
)?;
}
let mut raw = true;
if let AccountKeyType::Known(program_pubkey) = program_pubkey {
if program_pubkey == &solana_vote_program::id() {
if let Ok(vote_instruction) = limited_deserialize::<
solana_vote_program::vote_instruction::VoteInstruction,
>(&instruction.data)
{
writeln!(w, "{} {:?}", prefix, vote_instruction)?;
raw = false;
}
} else if program_pubkey == &stake::program::id() {
if let Ok(stake_instruction) =
limited_deserialize::<stake::instruction::StakeInstruction>(&instruction.data)
{
writeln!(w, "{} {:?}", prefix, stake_instruction)?;
raw = false;
}
} else if program_pubkey == &solana_sdk::system_program::id() {
if let Ok(system_instruction) = limited_deserialize::<
solana_sdk::system_instruction::SystemInstruction,
>(&instruction.data)
{
writeln!(w, "{} {:?}", prefix, system_instruction)?;
raw = false;
}
} else if is_memo_program(program_pubkey) {
if let Ok(s) = std::str::from_utf8(&instruction.data) {
writeln!(w, "{} Data: \"{}\"", prefix, s)?;
raw = false;
}
}
}
if raw {
writeln!(w, "{} Data: {:?}", prefix, instruction.data)?;
}
Ok(())
}
fn write_address_table_lookups<W: io::Write>(
w: &mut W,
address_table_lookups: &[MessageAddressTableLookup],
prefix: &str,
) -> io::Result<()> {
for (lookup_index, lookup) in address_table_lookups.iter().enumerate() {
writeln!(w, "{}Address Table Lookup {}", prefix, lookup_index,)?;
writeln!(w, "{} Table Account: {}", prefix, lookup.account_key,)?;
writeln!(
w,
"{} Writable Indexes: {:?}",
prefix,
&lookup.writable_indexes[..],
)?;
writeln!(
w,
"{} Readonly Indexes: {:?}",
prefix,
&lookup.readonly_indexes[..],
)?;
}
Ok(())
}
fn write_rewards<W: io::Write>(
w: &mut W,
rewards: Option<&Rewards>,
prefix: &str,
) -> io::Result<()> {
if let Some(rewards) = rewards {
if !rewards.is_empty() {
writeln!(w, "{}Rewards:", prefix,)?;
writeln!(
w,
"{} {:<44} {:^15} {:<16} {:<20}",
prefix, "Address", "Type", "Amount", "New Balance"
)?;
for reward in rewards {
let sign = if reward.lamports < 0 { "-" } else { "" };
writeln!(
w,
"{} {:<44} {:^15} {}◎{:<14.9} ◎{:<18.9}",
prefix,
reward.pubkey,
if let Some(reward_type) = reward.reward_type {
format!("{}", reward_type)
} else {
"-".to_string()
},
sign,
lamports_to_sol(reward.lamports.abs() as u64),
lamports_to_sol(reward.post_balance)
)?;
}
}
}
Ok(())
}
fn write_status<W: io::Write>(
w: &mut W,
transaction_status: &Result<(), TransactionError>,
prefix: &str,
) -> io::Result<()> {
writeln!(
w,
"{}Status: {}",
prefix,
match transaction_status {
Ok(_) => "Ok".into(),
Err(err) => err.to_string(),
}
)
}
fn write_fees<W: io::Write>(w: &mut W, transaction_fee: u64, prefix: &str) -> io::Result<()> {
writeln!(w, "{} Fee: ◎{}", prefix, lamports_to_sol(transaction_fee))
}
fn write_balances<W: io::Write>(
w: &mut W,
transaction_status: &UiTransactionStatusMeta,
prefix: &str,
) -> io::Result<()> {
assert_eq!(
transaction_status.pre_balances.len(),
transaction_status.post_balances.len()
);
for (i, (pre, post)) in transaction_status
.pre_balances
.iter()
.zip(transaction_status.post_balances.iter())
.enumerate()
{
if pre == post {
writeln!(
w,
"{} Account {} balance: ◎{}",
prefix,
i,
lamports_to_sol(*pre)
)?;
} else {
writeln!(
w,
"{} Account {} balance: ◎{} -> ◎{}",
prefix,
i,
lamports_to_sol(*pre),
lamports_to_sol(*post)
)?;
}
}
Ok(())
}
fn write_log_messages<W: io::Write>(
w: &mut W,
log_messages: Option<&Vec<String>>,
prefix: &str,
) -> io::Result<()> {
if let Some(log_messages) = log_messages {
if !log_messages.is_empty() {
writeln!(w, "{}Log Messages:", prefix,)?;
for log_message in log_messages {
writeln!(w, "{} {}", prefix, log_message)?;
}
}
}
Ok(())
}
pub fn println_transaction(
transaction: &Transaction,
transaction_status: &Option<UiTransactionStatusMeta>,
transaction: &VersionedTransaction,
transaction_status: Option<&UiTransactionStatusMeta>,
prefix: &str,
sigverify_status: Option<&[CliSignatureVerificationStatus]>,
block_time: Option<UnixTimestamp>,
@@ -374,6 +607,7 @@ pub fn println_transaction(
prefix,
sigverify_status,
block_time,
CliTimezone::Local,
)
.is_ok()
{
@@ -385,23 +619,24 @@ pub fn println_transaction(
pub fn writeln_transaction(
f: &mut dyn fmt::Write,
transaction: &Transaction,
transaction_status: &Option<UiTransactionStatusMeta>,
transaction: &VersionedTransaction,
transaction_status: Option<&UiTransactionStatusMeta>,
prefix: &str,
sigverify_status: Option<&[CliSignatureVerificationStatus]>,
block_time: Option<UnixTimestamp>,
) -> fmt::Result {
let mut w = Vec::new();
if write_transaction(
let write_result = write_transaction(
&mut w,
transaction,
transaction_status,
prefix,
sigverify_status,
block_time,
)
.is_ok()
{
CliTimezone::Local,
);
if write_result.is_ok() {
if let Ok(s) = String::from_utf8(w) {
write!(f, "{}", s)?;
}
@@ -427,7 +662,215 @@ pub fn unix_timestamp_to_string(unix_timestamp: UnixTimestamp) -> String {
#[cfg(test)]
mod test {
use {super::*, solana_sdk::pubkey::Pubkey};
use {
super::*,
solana_sdk::{
message::{
v0::{self, LoadedAddresses},
Message as LegacyMessage, MessageHeader, VersionedMessage,
},
pubkey::Pubkey,
signature::{Keypair, Signer},
transaction::Transaction,
},
solana_transaction_status::{Reward, RewardType, TransactionStatusMeta},
std::io::BufWriter,
};
fn new_test_keypair() -> Keypair {
let secret = ed25519_dalek::SecretKey::from_bytes(&[0u8; 32]).unwrap();
let public = ed25519_dalek::PublicKey::from(&secret);
let keypair = ed25519_dalek::Keypair { secret, public };
Keypair::from_bytes(&keypair.to_bytes()).unwrap()
}
fn new_test_v0_transaction() -> VersionedTransaction {
let keypair = new_test_keypair();
let account_key = Pubkey::new_from_array([1u8; 32]);
let address_table_key = Pubkey::new_from_array([2u8; 32]);
VersionedTransaction::try_new(
VersionedMessage::V0(v0::Message {
header: MessageHeader {
num_required_signatures: 1,
num_readonly_signed_accounts: 0,
num_readonly_unsigned_accounts: 1,
},
recent_blockhash: Hash::default(),
account_keys: vec![keypair.pubkey(), account_key],
address_table_lookups: vec![MessageAddressTableLookup {
account_key: address_table_key,
writable_indexes: vec![0],
readonly_indexes: vec![1],
}],
instructions: vec![CompiledInstruction::new_from_raw_parts(
3,
vec![],
vec![1, 2],
)],
}),
&[&keypair],
)
.unwrap()
}
#[test]
fn test_write_legacy_transaction() {
let keypair = new_test_keypair();
let account_key = Pubkey::new_from_array([1u8; 32]);
let transaction = VersionedTransaction::from(Transaction::new(
&[&keypair],
LegacyMessage {
header: MessageHeader {
num_required_signatures: 1,
num_readonly_signed_accounts: 0,
num_readonly_unsigned_accounts: 1,
},
recent_blockhash: Hash::default(),
account_keys: vec![keypair.pubkey(), account_key],
instructions: vec![CompiledInstruction::new_from_raw_parts(1, vec![], vec![0])],
},
Hash::default(),
));
let sigverify_status = CliSignatureVerificationStatus::verify_transaction(&transaction);
let meta = TransactionStatusMeta {
status: Ok(()),
fee: 5000,
pre_balances: vec![5000, 10_000],
post_balances: vec![0, 9_900],
inner_instructions: None,
log_messages: Some(vec!["Test message".to_string()]),
pre_token_balances: None,
post_token_balances: None,
rewards: Some(vec![Reward {
pubkey: account_key.to_string(),
lamports: -100,
post_balance: 9_900,
reward_type: Some(RewardType::Rent),
commission: None,
}]),
loaded_addresses: LoadedAddresses::default(),
};
let output = {
let mut write_buffer = BufWriter::new(Vec::new());
write_transaction(
&mut write_buffer,
&transaction,
Some(&meta.into()),
"",
Some(&sigverify_status),
Some(1628633791),
CliTimezone::Utc,
)
.unwrap();
let bytes = write_buffer.into_inner().unwrap();
String::from_utf8(bytes).unwrap()
};
assert_eq!(
output,
r#"Block Time: 2021-08-10T22:16:31Z
Version: legacy
Recent Blockhash: 11111111111111111111111111111111
Signature 0: 5pkjrE4VBa3Bu9CMKXgh1U345cT1gGo8QBVRTzHAo6gHeiPae5BTbShP15g6NgqRMNqu8Qrhph1ATmrfC1Ley3rx (pass)
Account 0: srw- 4zvwRjXUKGfvwnParsHAS3HuSVzV5cA4McphgmoCtajS (fee payer)
Account 1: -r-x 4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi
Instruction 0
Program: 4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi (1)
Account 0: 4zvwRjXUKGfvwnParsHAS3HuSVzV5cA4McphgmoCtajS (0)
Data: []
Status: Ok
Fee: ◎0.000005
Account 0 balance: ◎0.000005 -> ◎0
Account 1 balance: ◎0.00001 -> ◎0.0000099
Log Messages:
Test message
Rewards:
Address Type Amount New Balance \0
4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi rent -◎0.000000100 ◎0.000009900 \0
"#.replace("\\0", "") // replace marker used to subvert trailing whitespace linter on CI
);
}
#[test]
fn test_write_v0_transaction() {
let versioned_tx = new_test_v0_transaction();
let sigverify_status = CliSignatureVerificationStatus::verify_transaction(&versioned_tx);
let address_table_entry1 = Pubkey::new_from_array([3u8; 32]);
let address_table_entry2 = Pubkey::new_from_array([4u8; 32]);
let loaded_addresses = LoadedAddresses {
writable: vec![address_table_entry1],
readonly: vec![address_table_entry2],
};
let meta = TransactionStatusMeta {
status: Ok(()),
fee: 5000,
pre_balances: vec![5000, 10_000, 15_000, 20_000],
post_balances: vec![0, 10_000, 14_900, 20_000],
inner_instructions: None,
log_messages: Some(vec!["Test message".to_string()]),
pre_token_balances: None,
post_token_balances: None,
rewards: Some(vec![Reward {
pubkey: address_table_entry1.to_string(),
lamports: -100,
post_balance: 14_900,
reward_type: Some(RewardType::Rent),
commission: None,
}]),
loaded_addresses,
};
let output = {
let mut write_buffer = BufWriter::new(Vec::new());
write_transaction(
&mut write_buffer,
&versioned_tx,
Some(&meta.into()),
"",
Some(&sigverify_status),
Some(1628633791),
CliTimezone::Utc,
)
.unwrap();
let bytes = write_buffer.into_inner().unwrap();
String::from_utf8(bytes).unwrap()
};
assert_eq!(
output,
r#"Block Time: 2021-08-10T22:16:31Z
Version: 0
Recent Blockhash: 11111111111111111111111111111111
Signature 0: 5iEy3TT3ZhTA1NkuCY8GrQGNVY8d5m1bpjdh5FT3Ca4Py81fMipAZjafDuKJKrkw5q5UAAd8oPcgZ4nyXpHt4Fp7 (pass)
Account 0: srw- 4zvwRjXUKGfvwnParsHAS3HuSVzV5cA4McphgmoCtajS (fee payer)
Account 1: -r-- 4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi
Account 2: -rw- Unknown Address (uses lookup 0 and index 0)
Account 3: -r-x Unknown Address (uses lookup 0 and index 1)
Instruction 0
Program: Unknown Address (uses lookup 0 and index 1) (3)
Account 0: 4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi (1)
Account 1: Unknown Address (uses lookup 0 and index 0) (2)
Data: []
Address Table Lookup 0
Table Account: 8qbHbw2BbbTHBW1sbeqakYXVKRQM8Ne7pLK7m6CVfeR
Writable Indexes: [0]
Readonly Indexes: [1]
Status: Ok
Fee: ◎0.000005
Account 0 balance: ◎0.000005 -> ◎0
Account 1 balance: ◎0.00001
Account 2 balance: ◎0.000015 -> ◎0.0000149
Account 3 balance: ◎0.00002
Log Messages:
Test message
Rewards:
Address Type Amount New Balance \0
CktRuQ2mttgRGkXJtyksdKHjUdc2C4TgDzyB98oEzy8 rent -◎0.000000100 ◎0.000014900 \0
"#.replace("\\0", "") // replace marker used to subvert trailing whitespace linter on CI
);
}
#[test]
fn test_format_labeled_address() {

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2021"
name = "solana-cli"
description = "Blockchain, Rebuilt for Scale"
version = "1.10.0"
version = "1.10.5"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -13,43 +13,43 @@ documentation = "https://docs.rs/solana-cli"
bincode = "1.3.3"
bs58 = "0.4.0"
clap = "2.33.1"
criterion-stats = "0.3.0"
ctrlc = { version = "3.2.1", features = ["termination"] }
console = "0.15.0"
const_format = "0.2.22"
criterion-stats = "0.3.0"
crossbeam-channel = "0.5"
log = "0.4.14"
ctrlc = { version = "3.2.1", features = ["termination"] }
humantime = "2.0.1"
log = "0.4.14"
num-traits = "0.2"
pretty-hex = "0.2.1"
reqwest = { version = "0.11.6", default-features = false, features = ["blocking", "rustls-tls", "json"] }
semver = "1.0.5"
reqwest = { version = "0.11.10", default-features = false, features = ["blocking", "rustls-tls", "json"] }
semver = "1.0.6"
serde = "1.0.136"
serde_derive = "1.0.103"
serde_json = "1.0.78"
solana-account-decoder = { path = "../account-decoder", version = "=1.10.0" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.10.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.10.0" }
solana-cli-config = { path = "../cli-config", version = "=1.10.0" }
solana-cli-output = { path = "../cli-output", version = "=1.10.0" }
solana-client = { path = "../client", version = "=1.10.0" }
solana-config-program = { path = "../programs/config", version = "=1.10.0" }
solana-faucet = { path = "../faucet", version = "=1.10.0" }
solana-logger = { path = "../logger", version = "=1.10.0" }
solana-program-runtime = { path = "../program-runtime", version = "=1.10.0" }
solana_rbpf = "=0.2.23"
solana-remote-wallet = { path = "../remote-wallet", version = "=1.10.0" }
solana-sdk = { path = "../sdk", version = "=1.10.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
solana-version = { path = "../version", version = "=1.10.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.0" }
serde_json = "1.0.79"
solana-account-decoder = { path = "../account-decoder", version = "=1.10.5" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.10.5" }
solana-clap-utils = { path = "../clap-utils", version = "=1.10.5" }
solana-cli-config = { path = "../cli-config", version = "=1.10.5" }
solana-cli-output = { path = "../cli-output", version = "=1.10.5" }
solana-client = { path = "../client", version = "=1.10.5" }
solana-config-program = { path = "../programs/config", version = "=1.10.5" }
solana-faucet = { path = "../faucet", version = "=1.10.5" }
solana-logger = { path = "../logger", version = "=1.10.5" }
solana-program-runtime = { path = "../program-runtime", version = "=1.10.5" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.10.5" }
solana-sdk = { path = "../sdk", version = "=1.10.5" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.5" }
solana-version = { path = "../version", version = "=1.10.5" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.5" }
solana_rbpf = "=0.2.24"
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
thiserror = "1.0.30"
tiny-bip39 = "0.8.2"
[dev-dependencies]
solana-streamer = { path = "../streamer", version = "=1.10.0" }
solana-test-validator = { path = "../test-validator", version = "=1.10.0" }
solana-streamer = { path = "../streamer", version = "=1.10.5" }
solana-test-validator = { path = "../test-validator", version = "=1.10.5" }
tempfile = "3.3.0"
[[bin]]

View File

@@ -30,7 +30,7 @@ use {
pubkey::Pubkey,
signature::{Signature, Signer, SignerError},
stake::{instruction::LockupArgs, state::Lockup},
transaction::{Transaction, TransactionError},
transaction::{TransactionError, VersionedTransaction},
},
solana_vote_program::vote_state::VoteAuthorize,
std::{collections::HashMap, error, io::stdout, str::FromStr, sync::Arc, time::Duration},
@@ -88,6 +88,7 @@ pub enum CliCommand {
timeout: Duration,
blockhash: Option<Hash>,
print_timestamp: bool,
additional_fee: Option<u32>,
},
Rent {
data_length: usize,
@@ -161,6 +162,7 @@ pub enum CliCommand {
address: Option<SignerIndex>,
use_deprecated_loader: bool,
allow_excessive_balance: bool,
skip_fee_check: bool,
},
Program(ProgramCliCommand),
// Stake Commands
@@ -384,7 +386,7 @@ pub enum CliCommand {
seed: String,
program_id: Pubkey,
},
DecodeTransaction(Transaction),
DecodeTransaction(VersionedTransaction),
ResolveSigner(Option<String>),
ShowAccount {
pubkey: Pubkey,
@@ -743,6 +745,7 @@ pub fn parse_command(
signers.push(signer);
1
});
let skip_fee_check = matches.is_present("skip_fee_check");
Ok(CliCommandInfo {
command: CliCommand::Deploy {
@@ -750,6 +753,7 @@ pub fn parse_command(
address,
use_deprecated_loader: matches.is_present("use_deprecated_loader"),
allow_excessive_balance: matches.is_present("allow_excessive_balance"),
skip_fee_check,
},
signers,
})
@@ -977,6 +981,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
timeout,
blockhash,
print_timestamp,
additional_fee,
} => process_ping(
&rpc_client,
config,
@@ -985,6 +990,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
timeout,
blockhash,
*print_timestamp,
additional_fee,
),
CliCommand::Rent {
data_length,
@@ -1126,6 +1132,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
address,
use_deprecated_loader,
allow_excessive_balance,
skip_fee_check,
} => process_deploy(
rpc_client,
config,
@@ -1133,6 +1140,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
*address,
*use_deprecated_loader,
*allow_excessive_balance,
*skip_fee_check,
),
CliCommand::Program(program_subcommand) => {
process_program_subcommand(rpc_client, config, program_subcommand)
@@ -1964,6 +1972,7 @@ mod tests {
address: None,
use_deprecated_loader: false,
allow_excessive_balance: false,
skip_fee_check: false,
},
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
}
@@ -1986,6 +1995,7 @@ mod tests {
address: Some(1),
use_deprecated_loader: false,
allow_excessive_balance: false,
skip_fee_check: false,
},
signers: vec![
read_keypair_file(&keypair_file).unwrap().into(),
@@ -2379,6 +2389,7 @@ mod tests {
address: None,
use_deprecated_loader: false,
allow_excessive_balance: false,
skip_fee_check: false,
};
config.output_format = OutputFormat::JsonCompact;
let result = process_command(&config);
@@ -2399,6 +2410,7 @@ mod tests {
address: None,
use_deprecated_loader: false,
allow_excessive_balance: false,
skip_fee_check: false,
};
assert!(process_command(&config).is_err());
}

View File

@@ -33,12 +33,14 @@ use {
rpc_request::DELINQUENT_VALIDATOR_SLOT_DISTANCE,
rpc_response::SlotInfo,
},
solana_program_runtime::compute_budget,
solana_remote_wallet::remote_wallet::RemoteWalletManager,
solana_sdk::{
account::from_account,
account_utils::StateMut,
clock::{self, Clock, Slot},
commitment_config::CommitmentConfig,
compute_budget::ComputeBudgetInstruction,
epoch_schedule::Epoch,
hash::Hash,
message::Message,
@@ -269,6 +271,13 @@ impl ClusterQuerySubCommands for App<'_, '_> {
.default_value("15")
.help("Wait up to timeout seconds for transaction confirmation"),
)
.arg(
Arg::with_name("additional_fee")
.long("additional-fee")
.value_name("NUMBER")
.takes_value(true)
.help("Request additional-fee for transaction"),
)
.arg(blockhash_arg()),
)
.subcommand(
@@ -513,6 +522,7 @@ pub fn parse_cluster_ping(
let timeout = Duration::from_secs(value_t_or_exit!(matches, "timeout", u64));
let blockhash = value_of(matches, BLOCKHASH_ARG.name);
let print_timestamp = matches.is_present("print_timestamp");
let additional_fee = value_of(matches, "additional_fee");
Ok(CliCommandInfo {
command: CliCommand::Ping {
interval,
@@ -520,6 +530,7 @@ pub fn parse_cluster_ping(
timeout,
blockhash,
print_timestamp,
additional_fee,
},
signers: vec![default_signer.signer_from_path(matches, wallet_manager)?],
})
@@ -1047,6 +1058,7 @@ pub fn process_get_block(
RpcBlockConfig {
encoding: Some(UiTransactionEncoding::Base64),
commitment: Some(CommitmentConfig::confirmed()),
max_supported_transaction_version: Some(0),
..RpcBlockConfig::default()
},
)?
@@ -1350,6 +1362,7 @@ pub fn process_ping(
timeout: &Duration,
fixed_blockhash: &Option<Hash>,
print_timestamp: bool,
additional_fee: &Option<u32>,
) -> ProcessResult {
let (signal_sender, signal_receiver) = unbounded();
ctrlc::set_handler(move || {
@@ -1374,6 +1387,7 @@ pub fn process_ping(
blockhash_from_cluster = true;
}
}
'mainloop: for seq in 0..count.unwrap_or(std::u64::MAX) {
let now = Instant::now();
if fixed_blockhash.is_none() && now.duration_since(blockhash_acquired).as_secs() > 60 {
@@ -1388,8 +1402,18 @@ pub fn process_ping(
lamports += 1;
let build_message = |lamports| {
let ix = system_instruction::transfer(&config.signers[0].pubkey(), &to, lamports);
Message::new(&[ix], Some(&config.signers[0].pubkey()))
let mut ixs = vec![system_instruction::transfer(
&config.signers[0].pubkey(),
&to,
lamports,
)];
if let Some(additional_fee) = additional_fee {
ixs.push(ComputeBudgetInstruction::request_units(
compute_budget::DEFAULT_UNITS,
*additional_fee,
));
}
Message::new(&ixs, Some(&config.signers[0].pubkey()))
};
let (message, _) = resolve_spend_tx_and_check_account_balance(
rpc_client,
@@ -2019,6 +2043,7 @@ pub fn process_transaction_history(
RpcTransactionConfig {
encoding: Some(UiTransactionEncoding::Base64),
commitment: Some(CommitmentConfig::confirmed()),
max_supported_transaction_version: Some(0),
},
) {
Ok(confirmed_transaction) => {
@@ -2028,7 +2053,7 @@ pub fn process_transaction_history(
.transaction
.decode()
.expect("Successful decode"),
&confirmed_transaction.transaction.meta,
confirmed_transaction.transaction.meta.as_ref(),
" ",
None,
None,
@@ -2312,6 +2337,7 @@ mod tests {
Hash::from_str("4CCNp28j6AhGq7PkjPDP4wbQWBS8LLbQin2xV5n8frKX").unwrap()
),
print_timestamp: true,
additional_fee: None,
},
signers: vec![default_keypair.into()],
}

View File

@@ -66,6 +66,7 @@ pub enum ProgramCliCommand {
is_final: bool,
max_len: Option<usize>,
allow_excessive_balance: bool,
skip_fee_check: bool,
},
WriteBuffer {
program_location: String,
@@ -73,6 +74,7 @@ pub enum ProgramCliCommand {
buffer_pubkey: Option<Pubkey>,
buffer_authority_signer_index: Option<SignerIndex>,
max_len: Option<usize>,
skip_fee_check: bool,
},
SetBufferAuthority {
buffer_pubkey: Pubkey,
@@ -114,6 +116,13 @@ impl ProgramSubCommands for App<'_, '_> {
SubCommand::with_name("program")
.about("Program management")
.setting(AppSettings::SubcommandRequiredElseHelp)
.arg(
Arg::with_name("skip_fee_check")
.long("skip-fee-check")
.hidden(true)
.takes_value(false)
.global(true)
)
.subcommand(
SubCommand::with_name("deploy")
.about("Deploy a program")
@@ -406,6 +415,12 @@ impl ProgramSubCommands for App<'_, '_> {
.long("allow-excessive-deploy-account-balance")
.takes_value(false)
.help("Use the designated program id, even if the account already holds a large balance of SOL")
)
.arg(
Arg::with_name("skip_fee_check")
.long("skip-fee-check")
.hidden(true)
.takes_value(false)
),
)
}
@@ -416,7 +431,14 @@ pub fn parse_program_subcommand(
default_signer: &DefaultSigner,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let response = match matches.subcommand() {
let (subcommand, sub_matches) = matches.subcommand();
let matches_skip_fee_check = matches.is_present("skip_fee_check");
let sub_matches_skip_fee_check = sub_matches
.map(|m| m.is_present("skip_fee_check"))
.unwrap_or(false);
let skip_fee_check = matches_skip_fee_check || sub_matches_skip_fee_check;
let response = match (subcommand, sub_matches) {
("deploy", Some(matches)) => {
let mut bulk_signers = vec![Some(
default_signer.signer_from_path(matches, wallet_manager)?,
@@ -476,6 +498,7 @@ pub fn parse_program_subcommand(
is_final: matches.is_present("final"),
max_len,
allow_excessive_balance: matches.is_present("allow_excessive_balance"),
skip_fee_check,
}),
signers: signer_info.signers,
}
@@ -521,6 +544,7 @@ pub fn parse_program_subcommand(
buffer_authority_signer_index: signer_info
.index_of_or_none(buffer_authority_pubkey),
max_len,
skip_fee_check,
}),
signers: signer_info.signers,
}
@@ -669,6 +693,7 @@ pub fn process_program_subcommand(
is_final,
max_len,
allow_excessive_balance,
skip_fee_check,
} => process_program_deploy(
rpc_client,
config,
@@ -681,6 +706,7 @@ pub fn process_program_subcommand(
*is_final,
*max_len,
*allow_excessive_balance,
*skip_fee_check,
),
ProgramCliCommand::WriteBuffer {
program_location,
@@ -688,6 +714,7 @@ pub fn process_program_subcommand(
buffer_pubkey,
buffer_authority_signer_index,
max_len,
skip_fee_check,
} => process_write_buffer(
rpc_client,
config,
@@ -696,6 +723,7 @@ pub fn process_program_subcommand(
*buffer_pubkey,
*buffer_authority_signer_index,
*max_len,
*skip_fee_check,
),
ProgramCliCommand::SetBufferAuthority {
buffer_pubkey,
@@ -793,6 +821,7 @@ fn process_program_deploy(
is_final: bool,
max_len: Option<usize>,
allow_excessive_balance: bool,
skip_fee_check: bool,
) -> ProcessResult {
let (words, mnemonic, buffer_keypair) = create_ephemeral_keypair()?;
let (buffer_provided, buffer_signer, buffer_pubkey) = if let Some(i) = buffer_signer_index {
@@ -947,6 +976,7 @@ fn process_program_deploy(
&buffer_pubkey,
Some(upgrade_authority_signer),
allow_excessive_balance,
skip_fee_check,
)
} else {
do_process_program_upgrade(
@@ -957,6 +987,7 @@ fn process_program_deploy(
config.signers[upgrade_authority_signer_index],
&buffer_pubkey,
buffer_signer,
skip_fee_check,
)
};
if result.is_ok() && is_final {
@@ -983,6 +1014,7 @@ fn process_write_buffer(
buffer_pubkey: Option<Pubkey>,
buffer_authority_signer_index: Option<SignerIndex>,
max_len: Option<usize>,
skip_fee_check: bool,
) -> ProcessResult {
// Create ephemeral keypair to use for Buffer account, if not provided
let (words, mnemonic, buffer_keypair) = create_ephemeral_keypair()?;
@@ -1050,6 +1082,7 @@ fn process_write_buffer(
&buffer_pubkey,
Some(buffer_authority),
true,
skip_fee_check,
);
if result.is_err() && buffer_signer_index.is_none() && buffer_signer.is_some() {
@@ -1636,6 +1669,7 @@ pub fn process_deploy(
buffer_signer_index: Option<SignerIndex>,
use_deprecated_loader: bool,
allow_excessive_balance: bool,
skip_fee_check: bool,
) -> ProcessResult {
// Create ephemeral keypair to use for Buffer account, if not provided
let (words, mnemonic, buffer_keypair) = create_ephemeral_keypair()?;
@@ -1666,6 +1700,7 @@ pub fn process_deploy(
&buffer_signer.pubkey(),
Some(buffer_signer),
allow_excessive_balance,
skip_fee_check,
);
if result.is_err() && buffer_signer_index.is_none() {
report_ephemeral_mnemonic(words, mnemonic);
@@ -1704,6 +1739,7 @@ fn do_process_program_write_and_deploy(
buffer_pubkey: &Pubkey,
buffer_authority_signer: Option<&dyn Signer>,
allow_excessive_balance: bool,
skip_fee_check: bool,
) -> ProcessResult {
// Build messages to calculate fees
let mut messages: Vec<&Message> = Vec::new();
@@ -1834,7 +1870,9 @@ fn do_process_program_write_and_deploy(
messages.push(message);
}
check_payer(&rpc_client, config, balance_needed, &messages)?;
if !skip_fee_check {
check_payer(&rpc_client, config, balance_needed, &messages)?;
}
send_deploy_messages(
rpc_client,
@@ -1868,6 +1906,7 @@ fn do_process_program_upgrade(
upgrade_authority: &dyn Signer,
buffer_pubkey: &Pubkey,
buffer_signer: Option<&dyn Signer>,
skip_fee_check: bool,
) -> ProcessResult {
let loader_id = bpf_loader_upgradeable::id();
let data_len = program_data.len();
@@ -1967,7 +2006,10 @@ fn do_process_program_upgrade(
);
messages.push(&final_message);
check_payer(&rpc_client, config, balance_needed, &messages)?;
if !skip_fee_check {
check_payer(&rpc_client, config, balance_needed, &messages)?;
}
send_deploy_messages(
rpc_client,
config,
@@ -2255,6 +2297,7 @@ mod tests {
is_final: false,
max_len: None,
allow_excessive_balance: false,
skip_fee_check: false,
}),
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
}
@@ -2281,6 +2324,7 @@ mod tests {
is_final: false,
max_len: Some(42),
allow_excessive_balance: false,
skip_fee_check: false,
}),
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
}
@@ -2309,6 +2353,7 @@ mod tests {
is_final: false,
max_len: None,
allow_excessive_balance: false,
skip_fee_check: false,
}),
signers: vec![
read_keypair_file(&keypair_file).unwrap().into(),
@@ -2339,6 +2384,7 @@ mod tests {
is_final: false,
max_len: None,
allow_excessive_balance: false,
skip_fee_check: false,
}),
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
}
@@ -2368,6 +2414,7 @@ mod tests {
is_final: false,
max_len: None,
allow_excessive_balance: false,
skip_fee_check: false,
}),
signers: vec![
read_keypair_file(&keypair_file).unwrap().into(),
@@ -2400,6 +2447,7 @@ mod tests {
is_final: false,
max_len: None,
allow_excessive_balance: false,
skip_fee_check: false,
}),
signers: vec![
read_keypair_file(&keypair_file).unwrap().into(),
@@ -2427,6 +2475,7 @@ mod tests {
upgrade_authority_signer_index: 0,
is_final: true,
max_len: None,
skip_fee_check: false,
allow_excessive_balance: false,
}),
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
@@ -2460,6 +2509,7 @@ mod tests {
buffer_pubkey: None,
buffer_authority_signer_index: Some(0),
max_len: None,
skip_fee_check: false,
}),
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
}
@@ -2483,6 +2533,7 @@ mod tests {
buffer_pubkey: None,
buffer_authority_signer_index: Some(0),
max_len: Some(42),
skip_fee_check: false,
}),
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
}
@@ -2509,6 +2560,7 @@ mod tests {
buffer_pubkey: Some(buffer_keypair.pubkey()),
buffer_authority_signer_index: Some(0),
max_len: None,
skip_fee_check: false,
}),
signers: vec![
read_keypair_file(&keypair_file).unwrap().into(),
@@ -2538,6 +2590,7 @@ mod tests {
buffer_pubkey: None,
buffer_authority_signer_index: Some(1),
max_len: None,
skip_fee_check: false,
}),
signers: vec![
read_keypair_file(&keypair_file).unwrap().into(),
@@ -2572,6 +2625,7 @@ mod tests {
buffer_pubkey: Some(buffer_keypair.pubkey()),
buffer_authority_signer_index: Some(2),
max_len: None,
skip_fee_check: false,
}),
signers: vec![
read_keypair_file(&keypair_file).unwrap().into(),
@@ -3014,6 +3068,7 @@ mod tests {
is_final: false,
max_len: None,
allow_excessive_balance: false,
skip_fee_check: false,
}),
signers: vec![&default_keypair],
output_format: OutputFormat::JsonCompact,

View File

@@ -1383,17 +1383,12 @@ pub fn process_stake_authorize(
};
if let Some(authorized) = authorized {
match authorization_type {
StakeAuthorize::Staker => {
// first check authorized withdrawer
check_current_authority(&authorized.withdrawer, &authority.pubkey())
.or_else(|_| {
// ...then check authorized staker. If neither matches, error will
// print the stake key as `expected`
check_current_authority(&authorized.staker, &authority.pubkey())
})?;
}
StakeAuthorize::Staker => check_current_authority(
&[authorized.withdrawer, authorized.staker],
&authority.pubkey(),
)?,
StakeAuthorize::Withdrawer => {
check_current_authority(&authorized.withdrawer, &authority.pubkey())?;
check_current_authority(&[authorized.withdrawer], &authority.pubkey())?;
}
}
} else {
@@ -1935,7 +1930,7 @@ pub fn process_stake_set_lockup(
};
if let Some(lockup) = lockup {
if lockup.custodian != Pubkey::default() {
check_current_authority(&lockup.custodian, &custodian.pubkey())?;
check_current_authority(&[lockup.custodian], &custodian.pubkey())?;
}
} else {
return Err(CliError::RpcRequestError(format!(
@@ -2119,13 +2114,13 @@ fn get_stake_account_state(
}
pub(crate) fn check_current_authority(
account_current_authority: &Pubkey,
permitted_authorities: &[Pubkey],
provided_current_authority: &Pubkey,
) -> Result<(), CliError> {
if account_current_authority != provided_current_authority {
if !permitted_authorities.contains(provided_current_authority) {
Err(CliError::RpcRequestError(format!(
"Invalid current authority provided: {:?}, expected {:?}",
provided_current_authority, account_current_authority
"Invalid authority provided: {:?}, expected {:?}",
provided_current_authority, permitted_authorities
)))
} else {
Ok(())

View File

@@ -291,8 +291,12 @@ pub fn process_set_validator_info(
// Check existence of validator-info account
let balance = rpc_client.get_balance(&info_pubkey).unwrap_or(0);
let lamports =
rpc_client.get_minimum_balance_for_rent_exemption(ValidatorInfo::max_space() as usize)?;
let keys = vec![
(validator_info::id(), false),
(config.signers[0].pubkey(), true),
];
let data_len = ValidatorInfo::max_space() + ConfigKeys::serialized_size(keys.clone());
let lamports = rpc_client.get_minimum_balance_for_rent_exemption(data_len as usize)?;
let signers = if balance == 0 {
if info_pubkey != info_keypair.pubkey() {
@@ -308,10 +312,7 @@ pub fn process_set_validator_info(
};
let build_message = |lamports| {
let keys = vec![
(validator_info::id(), false),
(config.signers[0].pubkey(), true),
];
let keys = keys.clone();
if balance == 0 {
println!(
"Publishing info for Validator {:?}",

View File

@@ -910,7 +910,10 @@ pub fn process_vote_authorize(
"Invalid vote account state; no authorized voters found".to_string(),
)
})?;
check_current_authority(&current_authorized_voter, &authorized.pubkey())?;
check_current_authority(
&[current_authorized_voter, vote_state.authorized_withdrawer],
&authorized.pubkey(),
)?;
if let Some(signer) = new_authorized_signer {
if signer.is_interactive() {
return Err(CliError::BadParameter(format!(
@@ -927,7 +930,7 @@ pub fn process_vote_authorize(
(new_authorized_pubkey, "new_authorized_pubkey".to_string()),
)?;
if let Some(vote_state) = vote_state {
check_current_authority(&vote_state.authorized_withdrawer, &authorized.pubkey())?
check_current_authority(&[vote_state.authorized_withdrawer], &authorized.pubkey())?
}
}
}

View File

@@ -37,9 +37,12 @@ use {
stake,
system_instruction::{self, SystemError},
system_program,
transaction::Transaction,
transaction::{Transaction, VersionedTransaction},
},
solana_transaction_status::{
EncodableWithMeta, EncodedConfirmedTransactionWithStatusMeta, EncodedTransaction,
TransactionBinaryEncoding, UiTransactionEncoding,
},
solana_transaction_status::{Encodable, EncodedTransaction, UiTransactionEncoding},
std::{fmt::Write as FmtWrite, fs::File, io::Write, sync::Arc},
};
@@ -189,7 +192,7 @@ impl WalletSubCommands for App<'_, '_> {
Arg::with_name("encoding")
.index(2)
.value_name("ENCODING")
.possible_values(&["base58", "base64"]) // Subset of `UiTransactionEncoding` enum
.possible_values(&["base58", "base64"]) // Variants of `TransactionBinaryEncoding` enum
.default_value("base58")
.takes_value(true)
.required(true)
@@ -273,11 +276,14 @@ impl WalletSubCommands for App<'_, '_> {
}
fn resolve_derived_address_program_id(matches: &ArgMatches<'_>, arg_name: &str) -> Option<Pubkey> {
matches.value_of(arg_name).and_then(|v| match v {
"NONCE" => Some(system_program::id()),
"STAKE" => Some(stake::program::id()),
"VOTE" => Some(solana_vote_program::id()),
_ => pubkey_of(matches, arg_name),
matches.value_of(arg_name).and_then(|v| {
let upper = v.to_ascii_uppercase();
match upper.as_str() {
"NONCE" | "SYSTEM" => Some(system_program::id()),
"STAKE" => Some(stake::program::id()),
"VOTE" => Some(solana_vote_program::id()),
_ => pubkey_of(matches, arg_name),
}
})
}
@@ -338,13 +344,13 @@ pub fn parse_balance(
pub fn parse_decode_transaction(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let blob = value_t_or_exit!(matches, "transaction", String);
let encoding = match matches.value_of("encoding").unwrap() {
"base58" => UiTransactionEncoding::Base58,
"base64" => UiTransactionEncoding::Base64,
let binary_encoding = match matches.value_of("encoding").unwrap() {
"base58" => TransactionBinaryEncoding::Base58,
"base64" => TransactionBinaryEncoding::Base64,
_ => unreachable!(),
};
let encoded_transaction = EncodedTransaction::Binary(blob, encoding);
let encoded_transaction = EncodedTransaction::Binary(blob, binary_encoding);
if let Some(transaction) = encoded_transaction.decode() {
Ok(CliCommandInfo {
command: CliCommand::DecodeTransaction(transaction),
@@ -556,22 +562,25 @@ pub fn process_confirm(
RpcTransactionConfig {
encoding: Some(UiTransactionEncoding::Base64),
commitment: Some(CommitmentConfig::confirmed()),
max_supported_transaction_version: Some(0),
},
) {
Ok(confirmed_transaction) => {
let decoded_transaction = confirmed_transaction
.transaction
.transaction
.decode()
.expect("Successful decode");
let json_transaction =
decoded_transaction.encode(UiTransactionEncoding::Json);
let EncodedConfirmedTransactionWithStatusMeta {
block_time,
slot,
transaction: transaction_with_meta,
} = confirmed_transaction;
let decoded_transaction =
transaction_with_meta.transaction.decode().unwrap();
let json_transaction = decoded_transaction.json_encode();
transaction = Some(CliTransaction {
transaction: json_transaction,
meta: confirmed_transaction.transaction.meta,
block_time: confirmed_transaction.block_time,
slot: Some(confirmed_transaction.slot),
meta: transaction_with_meta.meta,
block_time,
slot: Some(slot),
decoded_transaction,
prefix: " ".to_string(),
sigverify_status: vec![],
@@ -603,11 +612,14 @@ pub fn process_confirm(
}
#[allow(clippy::unnecessary_wraps)]
pub fn process_decode_transaction(config: &CliConfig, transaction: &Transaction) -> ProcessResult {
pub fn process_decode_transaction(
config: &CliConfig,
transaction: &VersionedTransaction,
) -> ProcessResult {
let sigverify_status = CliSignatureVerificationStatus::verify_transaction(transaction);
let decode_transaction = CliTransaction {
decoded_transaction: transaction.clone(),
transaction: transaction.encode(UiTransactionEncoding::Json),
transaction: transaction.json_encode(),
meta: None,
block_time: None,
slot: None,

View File

@@ -62,6 +62,7 @@ fn test_cli_program_deploy_non_upgradeable() {
address: None,
use_deprecated_loader: false,
allow_excessive_balance: false,
skip_fee_check: false,
};
config.output_format = OutputFormat::JsonCompact;
let response = process_command(&config);
@@ -91,6 +92,7 @@ fn test_cli_program_deploy_non_upgradeable() {
address: Some(1),
use_deprecated_loader: false,
allow_excessive_balance: false,
skip_fee_check: false,
};
process_command(&config).unwrap();
let account1 = rpc_client
@@ -118,6 +120,7 @@ fn test_cli_program_deploy_non_upgradeable() {
address: Some(1),
use_deprecated_loader: false,
allow_excessive_balance: false,
skip_fee_check: false,
};
process_command(&config).unwrap_err();
@@ -127,6 +130,7 @@ fn test_cli_program_deploy_non_upgradeable() {
address: Some(1),
use_deprecated_loader: false,
allow_excessive_balance: true,
skip_fee_check: false,
};
process_command(&config).unwrap();
let account2 = rpc_client
@@ -193,6 +197,7 @@ fn test_cli_program_deploy_no_authority() {
upgrade_authority_signer_index: 1,
is_final: true,
max_len: None,
skip_fee_check: false,
});
config.output_format = OutputFormat::JsonCompact;
let response = process_command(&config);
@@ -218,6 +223,7 @@ fn test_cli_program_deploy_no_authority() {
upgrade_authority_signer_index: 1,
is_final: false,
max_len: None,
skip_fee_check: false,
});
process_command(&config).unwrap_err();
}
@@ -278,6 +284,7 @@ fn test_cli_program_deploy_with_authority() {
upgrade_authority_signer_index: 1,
is_final: false,
max_len: Some(max_len),
skip_fee_check: false,
});
config.output_format = OutputFormat::JsonCompact;
let response = process_command(&config);
@@ -325,6 +332,7 @@ fn test_cli_program_deploy_with_authority() {
upgrade_authority_signer_index: 1,
is_final: false,
max_len: Some(max_len),
skip_fee_check: false,
});
let response = process_command(&config);
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
@@ -366,6 +374,7 @@ fn test_cli_program_deploy_with_authority() {
upgrade_authority_signer_index: 1,
is_final: false,
max_len: Some(max_len),
skip_fee_check: false,
});
process_command(&config).unwrap();
let program_account = rpc_client.get_account(&program_pubkey).unwrap();
@@ -420,6 +429,7 @@ fn test_cli_program_deploy_with_authority() {
upgrade_authority_signer_index: 1,
is_final: false,
max_len: None,
skip_fee_check: false,
});
process_command(&config).unwrap();
let program_account = rpc_client.get_account(&program_pubkey).unwrap();
@@ -494,6 +504,7 @@ fn test_cli_program_deploy_with_authority() {
upgrade_authority_signer_index: 1,
is_final: false,
max_len: None,
skip_fee_check: false,
});
process_command(&config).unwrap_err();
@@ -509,6 +520,7 @@ fn test_cli_program_deploy_with_authority() {
upgrade_authority_signer_index: 1,
is_final: true,
max_len: None,
skip_fee_check: false,
});
let response = process_command(&config);
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
@@ -611,6 +623,7 @@ fn test_cli_program_close_program() {
upgrade_authority_signer_index: 1,
is_final: false,
max_len: Some(max_len),
skip_fee_check: false,
});
config.output_format = OutputFormat::JsonCompact;
process_command(&config).unwrap();
@@ -695,6 +708,7 @@ fn test_cli_program_write_buffer() {
buffer_pubkey: None,
buffer_authority_signer_index: None,
max_len: None,
skip_fee_check: false,
});
config.output_format = OutputFormat::JsonCompact;
let response = process_command(&config);
@@ -729,6 +743,7 @@ fn test_cli_program_write_buffer() {
buffer_pubkey: Some(buffer_keypair.pubkey()),
buffer_authority_signer_index: None,
max_len: Some(max_len),
skip_fee_check: false,
});
let response = process_command(&config);
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
@@ -790,6 +805,7 @@ fn test_cli_program_write_buffer() {
buffer_pubkey: Some(buffer_keypair.pubkey()),
buffer_authority_signer_index: Some(2),
max_len: None,
skip_fee_check: false,
});
let response = process_command(&config);
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
@@ -827,6 +843,7 @@ fn test_cli_program_write_buffer() {
buffer_pubkey: None,
buffer_authority_signer_index: Some(2),
max_len: None,
skip_fee_check: false,
});
let response = process_command(&config);
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
@@ -899,6 +916,7 @@ fn test_cli_program_write_buffer() {
buffer_pubkey: None,
buffer_authority_signer_index: None,
max_len: None,
skip_fee_check: false,
});
config.output_format = OutputFormat::JsonCompact;
let response = process_command(&config);
@@ -938,6 +956,7 @@ fn test_cli_program_write_buffer() {
buffer_pubkey: Some(buffer_keypair.pubkey()),
buffer_authority_signer_index: None,
max_len: None, //Some(max_len),
skip_fee_check: false,
});
process_command(&config).unwrap();
config.signers = vec![&keypair, &buffer_keypair];
@@ -951,6 +970,7 @@ fn test_cli_program_write_buffer() {
upgrade_authority_signer_index: 1,
is_final: true,
max_len: None,
skip_fee_check: false,
});
config.output_format = OutputFormat::JsonCompact;
let error = process_command(&config).unwrap_err();
@@ -1008,6 +1028,7 @@ fn test_cli_program_set_buffer_authority() {
buffer_pubkey: Some(buffer_keypair.pubkey()),
buffer_authority_signer_index: None,
max_len: None,
skip_fee_check: false,
});
process_command(&config).unwrap();
let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap();
@@ -1123,6 +1144,7 @@ fn test_cli_program_mismatch_buffer_authority() {
buffer_pubkey: Some(buffer_keypair.pubkey()),
buffer_authority_signer_index: Some(2),
max_len: None,
skip_fee_check: false,
});
process_command(&config).unwrap();
let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap();
@@ -1145,6 +1167,7 @@ fn test_cli_program_mismatch_buffer_authority() {
upgrade_authority_signer_index: 1,
is_final: true,
max_len: None,
skip_fee_check: false,
});
process_command(&config).unwrap_err();
@@ -1160,6 +1183,7 @@ fn test_cli_program_mismatch_buffer_authority() {
upgrade_authority_signer_index: 1,
is_final: true,
max_len: None,
skip_fee_check: false,
});
process_command(&config).unwrap();
}
@@ -1216,6 +1240,7 @@ fn test_cli_program_show() {
buffer_pubkey: Some(buffer_keypair.pubkey()),
buffer_authority_signer_index: Some(2),
max_len: None,
skip_fee_check: false,
});
process_command(&config).unwrap();
@@ -1275,6 +1300,7 @@ fn test_cli_program_show() {
upgrade_authority_signer_index: 1,
is_final: false,
max_len: Some(max_len),
skip_fee_check: false,
});
config.output_format = OutputFormat::JsonCompact;
let min_slot = rpc_client.get_slot().unwrap();
@@ -1401,6 +1427,7 @@ fn test_cli_program_dump() {
buffer_pubkey: Some(buffer_keypair.pubkey()),
buffer_authority_signer_index: Some(2),
max_len: None,
skip_fee_check: false,
});
process_command(&config).unwrap();

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-client-test"
version = "1.10.0"
version = "1.10.5"
description = "Solana RPC Test"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -8,30 +8,31 @@ license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-client-test"
edition = "2021"
publish = false
[dependencies]
futures-util = "0.3.19"
serde_json = "1.0.78"
serial_test = "0.5.1"
solana-client = { path = "../client", version = "=1.10.0" }
solana-ledger = { path = "../ledger", version = "=1.10.0" }
solana-measure = { path = "../measure", version = "=1.10.0" }
solana-merkle-tree = { path = "../merkle-tree", version = "=1.10.0" }
solana-metrics = { path = "../metrics", version = "=1.10.0" }
solana-perf = { path = "../perf", version = "=1.10.0" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.10.0" }
solana-rpc = { path = "../rpc", version = "=1.10.0" }
solana-runtime = { path = "../runtime", version = "=1.10.0" }
solana-sdk = { path = "../sdk", version = "=1.10.0" }
solana-streamer = { path = "../streamer", version = "=1.10.0" }
solana-test-validator = { path = "../test-validator", version = "=1.10.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
solana-version = { path = "../version", version = "=1.10.0" }
futures-util = "0.3.21"
serde_json = "1.0.79"
serial_test = "0.6.0"
solana-client = { path = "../client", version = "=1.10.5" }
solana-ledger = { path = "../ledger", version = "=1.10.5" }
solana-measure = { path = "../measure", version = "=1.10.5" }
solana-merkle-tree = { path = "../merkle-tree", version = "=1.10.5" }
solana-metrics = { path = "../metrics", version = "=1.10.5" }
solana-perf = { path = "../perf", version = "=1.10.5" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.10.5" }
solana-rpc = { path = "../rpc", version = "=1.10.5" }
solana-runtime = { path = "../runtime", version = "=1.10.5" }
solana-sdk = { path = "../sdk", version = "=1.10.5" }
solana-streamer = { path = "../streamer", version = "=1.10.5" }
solana-test-validator = { path = "../test-validator", version = "=1.10.5" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.5" }
solana-version = { path = "../version", version = "=1.10.5" }
systemstat = "0.1.10"
tokio = { version = "1", features = ["full"] }
[dev-dependencies]
solana-logger = { path = "../logger", version = "=1.10.0" }
solana-logger = { path = "../logger", version = "=1.10.5" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -15,7 +15,7 @@ use {
solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path},
solana_rpc::{
optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank,
rpc::create_test_transactions_and_populate_blockstore,
rpc::{create_test_transaction_entries, populate_blockstore_for_tests},
rpc_pubsub_service::{PubSubConfig, PubSubService},
rpc_subscriptions::RpcSubscriptions,
},
@@ -36,7 +36,9 @@ use {
},
solana_streamer::socket::SocketAddrSpace,
solana_test_validator::TestValidator,
solana_transaction_status::{ConfirmedBlock, TransactionDetails, UiTransactionEncoding},
solana_transaction_status::{
BlockEncodingOptions, ConfirmedBlock, TransactionDetails, UiTransactionEncoding,
},
std::{
collections::HashSet,
net::{IpAddr, SocketAddr},
@@ -230,9 +232,12 @@ fn test_block_subscription() {
let max_complete_transaction_status_slot = Arc::new(AtomicU64::new(blockstore.max_root()));
bank.transfer(rent_exempt_amount, &alice, &keypair2.pubkey())
.unwrap();
let _confirmed_block_signatures = create_test_transactions_and_populate_blockstore(
vec![&alice, &keypair1, &keypair2, &keypair3],
0,
populate_blockstore_for_tests(
create_test_transaction_entries(
vec![&alice, &keypair1, &keypair2, &keypair3],
bank.clone(),
)
.0,
bank,
blockstore.clone(),
max_complete_transaction_status_slot,
@@ -270,6 +275,7 @@ fn test_block_subscription() {
encoding: Some(UiTransactionEncoding::Json),
transaction_details: Some(TransactionDetails::Signatures),
show_rewards: None,
max_supported_transaction_version: None,
}),
)
.unwrap();
@@ -281,14 +287,17 @@ fn test_block_subscription() {
match maybe_actual {
Ok(actual) => {
let versioned_block = blockstore.get_complete_block(slot, false).unwrap();
let legacy_block = ConfirmedBlock::from(versioned_block)
.into_legacy_block()
let confirmed_block = ConfirmedBlock::from(versioned_block);
let block = confirmed_block
.encode_with_options(
UiTransactionEncoding::Json,
BlockEncodingOptions {
transaction_details: TransactionDetails::Signatures,
show_rewards: false,
max_supported_transaction_version: None,
},
)
.unwrap();
let block = legacy_block.configure(
UiTransactionEncoding::Json,
TransactionDetails::Signatures,
false,
);
assert_eq!(actual.value.slot, slot);
assert!(block.eq(&actual.value.block.unwrap()));
}

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-client"
version = "1.10.0"
version = "1.10.5"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -10,42 +10,52 @@ license = "Apache-2.0"
edition = "2021"
[dependencies]
async-mutex = "1.4.0"
async-trait = "0.1.52"
base64 = "0.13.0"
bincode = "1.3.3"
bs58 = "0.4.0"
bytes = "1.1.0"
clap = "2.33.0"
crossbeam-channel = "0.5"
futures-util = "0.3.19"
futures = "0.3"
futures-util = "0.3.21"
indicatif = "0.16.2"
itertools = "0.10.2"
jsonrpc-core = "18.0.0"
lazy_static = "1.4.0"
log = "0.4.14"
quinn = "0.8.0"
rand = "0.7.0"
rand_chacha = "0.2.2"
rayon = "1.5.1"
reqwest = { version = "0.11.6", default-features = false, features = ["blocking", "rustls-tls", "json"] }
semver = "1.0.5"
reqwest = { version = "0.11.10", default-features = false, features = ["blocking", "rustls-tls", "json"] }
rustls = { version = "0.20.2", features = ["dangerous_configuration"] }
semver = "1.0.6"
serde = "1.0.136"
serde_derive = "1.0.103"
serde_json = "1.0.78"
solana-account-decoder = { path = "../account-decoder", version = "=1.10.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.10.0" }
solana-faucet = { path = "../faucet", version = "=1.10.0" }
solana-net-utils = { path = "../net-utils", version = "=1.10.0" }
solana-measure = { path = "../measure", version = "=1.10.0" }
solana-sdk = { path = "../sdk", version = "=1.10.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
solana-version = { path = "../version", version = "=1.10.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.0" }
serde_json = "1.0.79"
solana-account-decoder = { path = "../account-decoder", version = "=1.10.5" }
solana-clap-utils = { path = "../clap-utils", version = "=1.10.5" }
solana-faucet = { path = "../faucet", version = "=1.10.5" }
solana-measure = { path = "../measure", version = "=1.10.5" }
solana-net-utils = { path = "../net-utils", version = "=1.10.5" }
solana-sdk = { path = "../sdk", version = "=1.10.5" }
solana-streamer = { path = "../streamer", version = "=1.10.5" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.5" }
solana-version = { path = "../version", version = "=1.10.5" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.5" }
thiserror = "1.0"
tokio = { version = "1", features = ["full"] }
tokio-stream = "0.1.8"
tokio-tungstenite = { version = "0.16.0", features = ["rustls-tls-webpki-roots"] }
tungstenite = { version = "0.16.0", features = ["rustls-tls-webpki-roots"] }
tokio-tungstenite = { version = "0.17.1", features = ["rustls-tls-webpki-roots"] }
tungstenite = { version = "0.17.2", features = ["rustls-tls-webpki-roots"] }
url = "2.2.2"
[dev-dependencies]
assert_matches = "1.5.0"
jsonrpc-http-server = "18.0.0"
solana-logger = { path = "../logger", version = "=1.10.0" }
solana-logger = { path = "../logger", version = "=1.10.5" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,6 +1,7 @@
pub use reqwest;
use {
crate::{rpc_request, rpc_response},
quinn::{ConnectError, WriteError},
solana_faucet::faucet::FaucetError,
solana_sdk::{
signature::SignerError, transaction::TransactionError, transport::TransportError,
@@ -72,6 +73,18 @@ impl From<ClientErrorKind> for TransportError {
}
}
impl From<WriteError> for ClientErrorKind {
fn from(write_error: WriteError) -> Self {
Self::Custom(format!("{:?}", write_error))
}
}
impl From<ConnectError> for ClientErrorKind {
fn from(connect_error: ConnectError) -> Self {
Self::Custom(format!("{:?}", connect_error))
}
}
#[derive(Error, Debug)]
#[error("{kind}")]
pub struct ClientError {

View File

@@ -0,0 +1,241 @@
use {
crate::{
quic_client::QuicTpuConnection, tpu_connection::TpuConnection, udp_client::UdpTpuConnection,
},
lazy_static::lazy_static,
solana_sdk::{transaction::VersionedTransaction, transport::TransportError},
std::{
collections::{hash_map::Entry, BTreeMap, HashMap},
net::{SocketAddr, UdpSocket},
sync::{Arc, Mutex},
},
};
// Should be non-zero
static MAX_CONNECTIONS: usize = 64;
#[derive(Clone)]
enum Connection {
Udp(Arc<UdpTpuConnection>),
Quic(Arc<QuicTpuConnection>),
}
struct ConnMap {
// Keeps track of the connection associated with an addr and the last time it was used
map: HashMap<SocketAddr, (Connection, u64)>,
// Helps to find the least recently used connection. The search and inserts are O(log(n))
// but since we're bounding the size of the collections, this should be constant
// (and hopefully negligible) time. In theory, we can do this in constant time
// with a queue implemented as a doubly-linked list (and all the
// HashMap entries holding a "pointer" to the corresponding linked-list node),
// so we can push, pop and bump a used connection back to the end of the queue in O(1) time, but
// that seems non-"Rust-y" and low bang/buck. This is still pretty terrible though...
last_used_times: BTreeMap<u64, SocketAddr>,
ticks: u64,
use_quic: bool,
}
impl ConnMap {
pub fn new() -> Self {
Self {
map: HashMap::new(),
last_used_times: BTreeMap::new(),
ticks: 0,
use_quic: false,
}
}
pub fn set_use_quic(&mut self, use_quic: bool) {
self.use_quic = use_quic;
}
}
lazy_static! {
static ref CONNECTION_MAP: Mutex<ConnMap> = Mutex::new(ConnMap::new());
}
pub fn set_use_quic(use_quic: bool) {
let mut map = (*CONNECTION_MAP).lock().unwrap();
map.set_use_quic(use_quic);
}
#[allow(dead_code)]
// TODO: see https://github.com/solana-labs/solana/issues/23661
// remove lazy_static and optimize and refactor this
fn get_connection(addr: &SocketAddr) -> Connection {
let mut map = (*CONNECTION_MAP).lock().unwrap();
let ticks = map.ticks;
let use_quic = map.use_quic;
let (conn, target_ticks) = match map.map.entry(*addr) {
Entry::Occupied(mut entry) => {
let mut pair = entry.get_mut();
let old_ticks = pair.1;
pair.1 = ticks;
(pair.0.clone(), old_ticks)
}
Entry::Vacant(entry) => {
let send_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
// TODO: see https://github.com/solana-labs/solana/issues/23659
// make it configurable (e.g. via the command line) whether to use UDP or Quic
let conn = if use_quic {
Connection::Quic(Arc::new(QuicTpuConnection::new(send_socket, *addr)))
} else {
Connection::Udp(Arc::new(UdpTpuConnection::new(send_socket, *addr)))
};
entry.insert((conn.clone(), ticks));
(conn, ticks)
}
};
let num_connections = map.map.len();
if num_connections > MAX_CONNECTIONS {
let (old_ticks, target_addr) = {
let (old_ticks, target_addr) = map.last_used_times.iter().next().unwrap();
(*old_ticks, *target_addr)
};
map.map.remove(&target_addr);
map.last_used_times.remove(&old_ticks);
}
if target_ticks != ticks {
map.last_used_times.remove(&target_ticks);
}
map.last_used_times.insert(ticks, *addr);
map.ticks += 1;
conn
}
// TODO: see https://github.com/solana-labs/solana/issues/23851
// use enum_dispatch and get rid of this tedious code.
// The main blocker to using enum_dispatch right now is that
// the it doesn't work with static methods like TpuConnection::new
// which is used by thin_client. This will be eliminated soon
// once thin_client is moved to using this connection cache.
// Once that is done, we will migrate to using enum_dispatch
// This will be done in a followup to
// https://github.com/solana-labs/solana/pull/23817
pub fn send_wire_transaction_batch(
packets: &[&[u8]],
addr: &SocketAddr,
) -> Result<(), TransportError> {
let conn = get_connection(addr);
match conn {
Connection::Udp(conn) => conn.send_wire_transaction_batch(packets),
Connection::Quic(conn) => conn.send_wire_transaction_batch(packets),
}
}
pub fn send_wire_transaction(
wire_transaction: &[u8],
addr: &SocketAddr,
) -> Result<(), TransportError> {
let conn = get_connection(addr);
match conn {
Connection::Udp(conn) => conn.send_wire_transaction(wire_transaction),
Connection::Quic(conn) => conn.send_wire_transaction(wire_transaction),
}
}
pub fn serialize_and_send_transaction(
transaction: &VersionedTransaction,
addr: &SocketAddr,
) -> Result<(), TransportError> {
let conn = get_connection(addr);
match conn {
Connection::Udp(conn) => conn.serialize_and_send_transaction(transaction),
Connection::Quic(conn) => conn.serialize_and_send_transaction(transaction),
}
}
pub fn par_serialize_and_send_transaction_batch(
transactions: &[VersionedTransaction],
addr: &SocketAddr,
) -> Result<(), TransportError> {
let conn = get_connection(addr);
match conn {
Connection::Udp(conn) => conn.par_serialize_and_send_transaction_batch(transactions),
Connection::Quic(conn) => conn.par_serialize_and_send_transaction_batch(transactions),
}
}
#[cfg(test)]
mod tests {
use {
crate::{
connection_cache::{get_connection, Connection, CONNECTION_MAP, MAX_CONNECTIONS},
tpu_connection::TpuConnection,
},
rand::{Rng, SeedableRng},
rand_chacha::ChaChaRng,
std::net::{IpAddr, SocketAddr},
};
fn get_addr(rng: &mut ChaChaRng) -> SocketAddr {
let a = rng.gen_range(1, 255);
let b = rng.gen_range(1, 255);
let c = rng.gen_range(1, 255);
let d = rng.gen_range(1, 255);
let addr_str = format!("{}.{}.{}.{}:80", a, b, c, d);
addr_str.parse().expect("Invalid address")
}
fn ip(conn: Connection) -> IpAddr {
match conn {
Connection::Udp(conn) => conn.tpu_addr().ip(),
Connection::Quic(conn) => conn.tpu_addr().ip(),
}
}
#[test]
fn test_connection_cache() {
// Allow the test to run deterministically
// with the same pseudorandom sequence between runs
// and on different platforms - the cryptographic security
// property isn't important here but ChaChaRng provides a way
// to get the same pseudorandom sequence on different platforms
let mut rng = ChaChaRng::seed_from_u64(42);
// Generate a bunch of random addresses and create TPUConnections to them
// Since TPUConnection::new is infallible, it should't matter whether or not
// we can actually connect to those addresses - TPUConnection implementations should either
// be lazy and not connect until first use or handle connection errors somehow
// (without crashing, as would be required in a real practical validator)
let first_addr = get_addr(&mut rng);
assert!(ip(get_connection(&first_addr)) == first_addr.ip());
let addrs = (0..MAX_CONNECTIONS)
.into_iter()
.map(|_| {
let addr = get_addr(&mut rng);
get_connection(&addr);
addr
})
.collect::<Vec<_>>();
{
let map = (*CONNECTION_MAP).lock().unwrap();
addrs.iter().for_each(|a| {
let conn = map.map.get(a).expect("Address not found");
assert!(a.ip() == ip(conn.0.clone()));
});
assert!(map.map.get(&first_addr).is_none());
}
// Test that get_connection updates which connection is next up for eviction
// when an existing connection is used. Initially, addrs[0] should be next up for eviction, since
// it was the earliest added. But we do get_connection(&addrs[0]), thereby using
// that connection, and bumping it back to the end of the queue. So addrs[1] should be
// the next up for eviction. So we add a new connection, and test that addrs[0] is not
// evicted but addrs[1] is.
get_connection(&addrs[0]);
get_connection(&get_addr(&mut rng));
let map = (*CONNECTION_MAP).lock().unwrap();
assert!(map.map.get(&addrs[0]).is_some());
assert!(map.map.get(&addrs[1]).is_none());
}
}

View File

@@ -4,12 +4,14 @@ extern crate serde_derive;
pub mod blockhash_query;
pub mod client_error;
pub mod connection_cache;
pub(crate) mod http_sender;
pub(crate) mod mock_sender;
pub mod nonblocking;
pub mod nonce_utils;
pub mod perf_utils;
pub mod pubsub_client;
pub mod quic_client;
pub mod rpc_cache;
pub mod rpc_client;
pub mod rpc_config;
@@ -18,11 +20,13 @@ pub mod rpc_deprecated_config;
pub mod rpc_filter;
pub mod rpc_request;
pub mod rpc_response;
pub(crate) mod rpc_sender;
pub mod rpc_sender;
pub mod spinner;
pub mod thin_client;
pub mod tpu_client;
pub mod tpu_connection;
pub mod transaction_executor;
pub mod udp_client;
pub mod mock_sender_for_cli {
/// Magic `SIGNATURE` value used by `solana-cli` unit tests.

View File

@@ -28,13 +28,13 @@ use {
pubkey::Pubkey,
signature::Signature,
sysvar::epoch_schedule::EpochSchedule,
transaction::{self, Transaction, TransactionError},
transaction::{self, Transaction, TransactionError, TransactionVersion},
},
solana_transaction_status::{
EncodedConfirmedBlock, EncodedConfirmedTransactionWithStatusMeta, EncodedTransaction,
EncodedTransactionWithStatusMeta, Rewards, TransactionConfirmationStatus,
TransactionStatus, UiCompiledInstruction, UiMessage, UiRawMessage, UiTransaction,
UiTransactionEncoding, UiTransactionStatusMeta,
EncodedTransactionWithStatusMeta, Rewards, TransactionBinaryEncoding,
TransactionConfirmationStatus, TransactionStatus, UiCompiledInstruction, UiMessage,
UiRawMessage, UiTransaction, UiTransactionStatusMeta,
},
solana_version::Version,
std::{collections::HashMap, net::SocketAddr, str::FromStr, sync::RwLock},
@@ -192,6 +192,7 @@ impl RpcSender for MockSender {
"getTransaction" => serde_json::to_value(EncodedConfirmedTransactionWithStatusMeta {
slot: 2,
transaction: EncodedTransactionWithStatusMeta {
version: Some(TransactionVersion::LEGACY),
transaction: EncodedTransaction::Json(
UiTransaction {
signatures: vec!["3AsdoALgZFuq2oUVWrDYhg2pNeaLJKPLf8hU2mQ6U8qJxeJ6hsrPVpMn9ma39DtfYCrDQSvngWRP8NnTpEhezJpE".to_string()],
@@ -213,6 +214,7 @@ impl RpcSender for MockSender {
accounts: vec![0, 1],
data: "3Bxs49DitAvXtoDR".to_string(),
}],
address_table_lookups: None,
})
}),
meta: Some(UiTransactionStatusMeta {
@@ -226,6 +228,7 @@ impl RpcSender for MockSender {
pre_token_balances: None,
post_token_balances: None,
rewards: None,
loaded_addresses: None,
}),
},
block_time: Some(1628633791),
@@ -378,9 +381,10 @@ impl RpcSender for MockSender {
pLHxcaShD81xBNaFDgnA2nkkdHnKtZt4hVSfKAmw3VRZbjrZ7L2fKZBx21CwsG\
hD6onjM2M3qZW5C8J6d1pj41MxKmZgPBSha3MyKkNLkAGFASK"
.to_string(),
UiTransactionEncoding::Base58,
TransactionBinaryEncoding::Base58,
),
meta: None,
version: Some(TransactionVersion::LEGACY),
}],
rewards: Rewards::new(),
block_time: None,

View File

@@ -238,6 +238,7 @@ impl PubsubClient {
},
Message::Pong(_data) => continue,
Message::Close(_frame) => break,
Message::Frame(_frame) => continue,
};

View File

@@ -6,6 +6,7 @@
//!
//! [JSON-RPC]: https://www.jsonrpc.org/specification
pub use crate::mock_sender::Mocks;
#[allow(deprecated)]
use crate::rpc_deprecated_config::{
RpcConfirmedBlockConfig, RpcConfirmedTransactionConfig,
@@ -15,7 +16,7 @@ use {
crate::{
client_error::{ClientError, ClientErrorKind, Result as ClientResult},
http_sender::HttpSender,
mock_sender::{MockSender, Mocks},
mock_sender::MockSender,
rpc_client::{GetConfirmedSignaturesForAddress2Config, RpcClientConfig},
rpc_config::{RpcAccountInfoConfig, *},
rpc_request::{RpcError, RpcRequest, RpcResponseErrorData, TokenAccountsFilter},
@@ -52,10 +53,9 @@ use {
cmp::min,
net::SocketAddr,
str::FromStr,
sync::RwLock,
time::{Duration, Instant},
},
tokio::time::sleep,
tokio::{sync::RwLock, time::sleep},
};
/// A client of a remote Solana node.
@@ -147,9 +147,9 @@ impl RpcClient {
///
/// This is the basic constructor, allowing construction with any type of
/// `RpcSender`. Most applications should use one of the other constructors,
/// such as [`new`] and [`new_mock`], which create an `RpcClient`
/// encapsulating an [`HttpSender`] and [`MockSender`] respectively.
pub(crate) fn new_sender<T: RpcSender + Send + Sync + 'static>(
/// such as [`RpcClient::new`], [`RpcClient::new_with_commitment`] or
/// [`RpcClient::new_with_timeout`].
pub fn new_sender<T: RpcSender + Send + Sync + 'static>(
sender: T,
config: RpcClientConfig,
) -> Self {
@@ -315,8 +315,34 @@ impl RpcClient {
/// Create a mock `RpcClient`.
///
/// See the [`MockSender`] documentation for an explanation of
/// how it treats the `url` argument.
/// A mock `RpcClient` contains an implementation of [`RpcSender`] that does
/// not use the network, and instead returns synthetic responses, for use in
/// tests.
///
/// It is primarily for internal use, with limited customizability, and
/// behaviors determined by internal Solana test cases. New users should
/// consider implementing `RpcSender` themselves and constructing
/// `RpcClient` with [`RpcClient::new_sender`] to get mock behavior.
///
/// Unless directed otherwise, a mock `RpcClient` will generally return a
/// reasonable default response to any request, at least for [`RpcRequest`]
/// values for which responses have been implemented.
///
/// This mock can be customized by changing the `url` argument, which is not
/// actually a URL, but a simple string directive that changes the mock
/// behavior in specific scenarios:
///
/// - It is customary to set the `url` to "succeeds" for mocks that should
/// return sucessfully, though this value is not actually interpreted.
///
/// - If `url` is "fails" then any call to `send` will return `Ok(Value::Null)`.
///
/// - Other possible values of `url` are specific to different `RpcRequest`
/// values. Read the implementation of (non-public) `MockSender` for
/// details.
///
/// The [`RpcClient::new_mock_with_mocks`] function offers further
/// customization options.
///
/// # Examples
///
@@ -342,8 +368,43 @@ impl RpcClient {
/// Create a mock `RpcClient`.
///
/// See the [`MockSender`] documentation for an explanation of how it treats
/// the `url` argument.
/// A mock `RpcClient` contains an implementation of [`RpcSender`] that does
/// not use the network, and instead returns synthetic responses, for use in
/// tests.
///
/// It is primarily for internal use, with limited customizability, and
/// behaviors determined by internal Solana test cases. New users should
/// consider implementing `RpcSender` themselves and constructing
/// `RpcClient` with [`RpcClient::new_sender`] to get mock behavior.
///
/// Unless directed otherwise, a mock `RpcClient` will generally return a
/// reasonable default response to any request, at least for [`RpcRequest`]
/// values for which responses have been implemented.
///
/// This mock can be customized in two ways:
///
/// 1) By changing the `url` argument, which is not actually a URL, but a
/// simple string directive that changes the mock behavior in specific
/// scenarios.
///
/// It is customary to set the `url` to "succeeds" for mocks that should
/// return sucessfully, though this value is not actually interpreted.
///
/// If `url` is "fails" then any call to `send` will return `Ok(Value::Null)`.
///
/// Other possible values of `url` are specific to different `RpcRequest`
/// values. Read the implementation of `MockSender` (which is non-public)
/// for details.
///
/// 2) Custom responses can be configured by providing [`Mocks`]. This type
/// is a [`HashMap`] from [`RpcRequest`] to a JSON [`Value`] response,
/// Any entries in this map override the default behavior for the given
/// request.
///
/// The [`RpcClient::new_mock_with_mocks`] function offers further
/// customization options.
///
/// [`HashMap`]: std::collections::HashMap
///
/// # Examples
///
@@ -442,12 +503,12 @@ impl RpcClient {
}
async fn get_node_version(&self) -> Result<semver::Version, RpcError> {
let r_node_version = self.node_version.read().unwrap();
let r_node_version = self.node_version.read().await;
if let Some(version) = &*r_node_version {
Ok(version.clone())
} else {
drop(r_node_version);
let mut w_node_version = self.node_version.write().unwrap();
let mut w_node_version = self.node_version.write().await;
let node_version = self.get_version().await.map_err(|e| {
RpcError::RpcRequestError(format!("cluster version query failed: {}", e))
})?;
@@ -2412,6 +2473,7 @@ impl RpcClient {
/// transaction_details: Some(TransactionDetails::None),
/// rewards: Some(true),
/// commitment: None,
/// max_supported_transaction_version: Some(0),
/// };
/// let block = rpc_client.get_block_with_config(
/// slot,
@@ -3052,6 +3114,7 @@ impl RpcClient {
/// let config = RpcTransactionConfig {
/// encoding: Some(UiTransactionEncoding::Json),
/// commitment: Some(CommitmentConfig::confirmed()),
/// max_supported_transaction_version: Some(0),
/// };
/// let transaction = rpc_client.get_transaction_with_config(
/// &signature,

213
client/src/quic_client.rs Normal file
View File

@@ -0,0 +1,213 @@
//! Simple client that connects to a given UDP port with the QUIC protocol and provides
//! an interface for sending transactions which is restricted by the server's flow control.
use {
crate::{client_error::ClientErrorKind, tpu_connection::TpuConnection},
async_mutex::Mutex,
futures::future::join_all,
itertools::Itertools,
quinn::{ClientConfig, Endpoint, EndpointConfig, NewConnection, WriteError},
solana_sdk::{
quic::{QUIC_MAX_CONCURRENT_STREAMS, QUIC_PORT_OFFSET},
transport::Result as TransportResult,
},
std::{
net::{SocketAddr, UdpSocket},
sync::Arc,
},
tokio::runtime::Runtime,
};
struct SkipServerVerification;
impl SkipServerVerification {
pub fn new() -> Arc<Self> {
Arc::new(Self)
}
}
impl rustls::client::ServerCertVerifier for SkipServerVerification {
fn verify_server_cert(
&self,
_end_entity: &rustls::Certificate,
_intermediates: &[rustls::Certificate],
_server_name: &rustls::ServerName,
_scts: &mut dyn Iterator<Item = &[u8]>,
_ocsp_response: &[u8],
_now: std::time::SystemTime,
) -> Result<rustls::client::ServerCertVerified, rustls::Error> {
Ok(rustls::client::ServerCertVerified::assertion())
}
}
struct QuicClient {
runtime: Runtime,
endpoint: Endpoint,
connection: Arc<Mutex<Option<Arc<NewConnection>>>>,
addr: SocketAddr,
}
pub struct QuicTpuConnection {
client: Arc<QuicClient>,
}
impl TpuConnection for QuicTpuConnection {
fn new(client_socket: UdpSocket, tpu_addr: SocketAddr) -> Self {
let tpu_addr = SocketAddr::new(tpu_addr.ip(), tpu_addr.port() + QUIC_PORT_OFFSET);
let client = Arc::new(QuicClient::new(client_socket, tpu_addr));
Self { client }
}
fn tpu_addr(&self) -> &SocketAddr {
&self.client.addr
}
fn send_wire_transaction<T>(&self, wire_transaction: T) -> TransportResult<()>
where
T: AsRef<[u8]>,
{
let _guard = self.client.runtime.enter();
let send_buffer = self.client.send_buffer(wire_transaction);
self.client.runtime.block_on(send_buffer)?;
Ok(())
}
fn send_wire_transaction_batch<T>(&self, buffers: &[T]) -> TransportResult<()>
where
T: AsRef<[u8]>,
{
let _guard = self.client.runtime.enter();
let send_batch = self.client.send_batch(buffers);
self.client.runtime.block_on(send_batch)?;
Ok(())
}
}
impl QuicClient {
pub fn new(client_socket: UdpSocket, addr: SocketAddr) -> Self {
let runtime = tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.unwrap();
let _guard = runtime.enter();
let crypto = rustls::ClientConfig::builder()
.with_safe_defaults()
.with_custom_certificate_verifier(SkipServerVerification::new())
.with_no_client_auth();
let create_endpoint = QuicClient::create_endpoint(EndpointConfig::default(), client_socket);
let mut endpoint = runtime.block_on(create_endpoint);
endpoint.set_default_client_config(ClientConfig::new(Arc::new(crypto)));
Self {
runtime,
endpoint,
connection: Arc::new(Mutex::new(None)),
addr,
}
}
// If this function becomes public, it should be changed to
// not expose details of the specific Quic implementation we're using
async fn create_endpoint(config: EndpointConfig, client_socket: UdpSocket) -> Endpoint {
quinn::Endpoint::new(config, None, client_socket).unwrap().0
}
async fn _send_buffer_using_conn(
data: &[u8],
connection: &NewConnection,
) -> Result<(), WriteError> {
let mut send_stream = connection.connection.open_uni().await?;
send_stream.write_all(data).await?;
send_stream.finish().await?;
Ok(())
}
// Attempts to send data, connecting/reconnecting as necessary
// On success, returns the connection used to successfully send the data
async fn _send_buffer(&self, data: &[u8]) -> Result<Arc<NewConnection>, WriteError> {
let connection = {
let mut conn_guard = self.connection.lock().await;
let maybe_conn = (*conn_guard).clone();
match maybe_conn {
Some(conn) => conn.clone(),
None => {
let connecting = self.endpoint.connect(self.addr, "connect").unwrap();
let connection = Arc::new(connecting.await?);
*conn_guard = Some(connection.clone());
connection
}
}
};
match Self::_send_buffer_using_conn(data, &connection).await {
Ok(()) => Ok(connection),
_ => {
let connection = {
let connecting = self.endpoint.connect(self.addr, "connect").unwrap();
let connection = Arc::new(connecting.await?);
let mut conn_guard = self.connection.lock().await;
*conn_guard = Some(connection.clone());
connection
};
Self::_send_buffer_using_conn(data, &connection).await?;
Ok(connection)
}
}
}
pub async fn send_buffer<T>(&self, data: T) -> Result<(), ClientErrorKind>
where
T: AsRef<[u8]>,
{
self._send_buffer(data.as_ref()).await?;
Ok(())
}
pub async fn send_batch<T>(&self, buffers: &[T]) -> Result<(), ClientErrorKind>
where
T: AsRef<[u8]>,
{
// Start off by "testing" the connection by sending the first transaction
// This will also connect to the server if not already connected
// and reconnect and retry if the first send attempt failed
// (for example due to a timed out connection), returning an error
// or the connection that was used to successfully send the transaction.
// We will use the returned connection to send the rest of the transactions in the batch
// to avoid touching the mutex in self, and not bother reconnecting if we fail along the way
// since testing even in the ideal GCE environment has found no cases
// where reconnecting and retrying in the middle of a batch send
// (i.e. we encounter a connection error in the middle of a batch send, which presumably cannot
// be due to a timed out connection) has succeeded
if buffers.is_empty() {
return Ok(());
}
let connection = self._send_buffer(buffers[0].as_ref()).await?;
// Used to avoid dereferencing the Arc multiple times below
// by just getting a reference to the NewConnection once
let connection_ref: &NewConnection = &connection;
let chunks = buffers[1..buffers.len()]
.iter()
.chunks(QUIC_MAX_CONCURRENT_STREAMS);
let futures = chunks.into_iter().map(|buffs| {
join_all(
buffs
.into_iter()
.map(|buf| Self::_send_buffer_using_conn(buf.as_ref(), connection_ref)),
)
});
for f in futures {
f.await.into_iter().try_for_each(|res| res)?;
}
Ok(())
}
}

View File

@@ -6,13 +6,14 @@
//!
//! [JSON-RPC]: https://www.jsonrpc.org/specification
pub use crate::mock_sender::Mocks;
#[allow(deprecated)]
use crate::rpc_deprecated_config::{RpcConfirmedBlockConfig, RpcConfirmedTransactionConfig};
use {
crate::{
client_error::Result as ClientResult,
http_sender::HttpSender,
mock_sender::{MockSender, Mocks},
mock_sender::MockSender,
nonblocking::{self, rpc_client::get_rpc_request_str},
rpc_config::{RpcAccountInfoConfig, *},
rpc_request::{RpcRequest, TokenAccountsFilter},
@@ -103,8 +104,8 @@ pub struct GetConfirmedSignaturesForAddress2Config {
/// [`Processed`] commitment level. These exceptions are noted in the method
/// documentation.
///
/// [`Finalized`]: CommitmentLevel::Finalized
/// [`Processed`]: CommitmentLevel::Processed
/// [`Finalized`]: solana_sdk::commitment_config::CommitmentLevel::Finalized
/// [`Processed`]: solana_sdk::commitment_config::CommitmentLevel::Processed
/// [jsonprot]: https://docs.solana.com/developing/clients/jsonrpc-api
/// [JSON-RPC]: https://www.jsonrpc.org/specification
/// [slots]: https://docs.solana.com/terminology#slot
@@ -145,6 +146,10 @@ pub struct GetConfirmedSignaturesForAddress2Config {
/// [`is_timeout`](crate::client_error::reqwest::Error::is_timeout) method
/// returns `true`. The default timeout is 30 seconds, and may be changed by
/// calling an appropriate constructor with a `timeout` parameter.
///
/// [`ClientError`]: crate::client_error::ClientError
/// [`ClientErrorKind`]: crate::client_error::ClientErrorKind
/// [`ClientErrorKind::Reqwest`]: crate::client_error::ClientErrorKind::Reqwest
pub struct RpcClient {
rpc_client: nonblocking::rpc_client::RpcClient,
runtime: Option<tokio::runtime::Runtime>,
@@ -161,9 +166,9 @@ impl RpcClient {
///
/// This is the basic constructor, allowing construction with any type of
/// `RpcSender`. Most applications should use one of the other constructors,
/// such as [`new`] and [`new_mock`], which create an `RpcClient`
/// encapsulating an [`HttpSender`] and [`MockSender`] respectively.
fn new_sender<T: RpcSender + Send + Sync + 'static>(
/// such as [`RpcClient::new`], [`RpcClient::new_with_commitment`] or
/// [`RpcClient::new_with_timeout`].
pub fn new_sender<T: RpcSender + Send + Sync + 'static>(
sender: T,
config: RpcClientConfig,
) -> Self {
@@ -186,9 +191,10 @@ impl RpcClient {
/// "http://localhost:8899".
///
/// The client has a default timeout of 30 seconds, and a default [commitment
/// level][cl] of [`Finalized`](CommitmentLevel::Finalized).
/// level][cl] of [`Finalized`].
///
/// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment
/// [`Finalized`]: solana_sdk::commitment_config::CommitmentLevel::Finalized
///
/// # Examples
///
@@ -211,6 +217,8 @@ impl RpcClient {
/// The client has a default timeout of 30 seconds, and a user-specified
/// [`CommitmentLevel`] via [`CommitmentConfig`].
///
/// [`CommitmentLevel`]: solana_sdk::commitment_config::CommitmentLevel
///
/// # Examples
///
/// ```
@@ -233,9 +241,10 @@ impl RpcClient {
/// "http://localhost:8899".
///
/// The client has and a default [commitment level][cl] of
/// [`Finalized`](CommitmentLevel::Finalized).
/// [`Finalized`].
///
/// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment
/// [`Finalized`]: solana_sdk::commitment_config::CommitmentLevel::Finalized
///
/// # Examples
///
@@ -335,8 +344,34 @@ impl RpcClient {
/// Create a mock `RpcClient`.
///
/// See the [`MockSender`] documentation for an explanation of
/// how it treats the `url` argument.
/// A mock `RpcClient` contains an implementation of [`RpcSender`] that does
/// not use the network, and instead returns synthetic responses, for use in
/// tests.
///
/// It is primarily for internal use, with limited customizability, and
/// behaviors determined by internal Solana test cases. New users should
/// consider implementing `RpcSender` themselves and constructing
/// `RpcClient` with [`RpcClient::new_sender`] to get mock behavior.
///
/// Unless directed otherwise, a mock `RpcClient` will generally return a
/// reasonable default response to any request, at least for [`RpcRequest`]
/// values for which responses have been implemented.
///
/// This mock can be customized by changing the `url` argument, which is not
/// actually a URL, but a simple string directive that changes the mock
/// behavior in specific scenarios:
///
/// - It is customary to set the `url` to "succeeds" for mocks that should
/// return sucessfully, though this value is not actually interpreted.
///
/// - If `url` is "fails" then any call to `send` will return `Ok(Value::Null)`.
///
/// - Other possible values of `url` are specific to different `RpcRequest`
/// values. Read the implementation of (non-public) `MockSender` for
/// details.
///
/// The [`RpcClient::new_mock_with_mocks`] function offers further
/// customization options.
///
/// # Examples
///
@@ -362,8 +397,43 @@ impl RpcClient {
/// Create a mock `RpcClient`.
///
/// See the [`MockSender`] documentation for an explanation of how it treats
/// the `url` argument.
/// A mock `RpcClient` contains an implementation of [`RpcSender`] that does
/// not use the network, and instead returns synthetic responses, for use in
/// tests.
///
/// It is primarily for internal use, with limited customizability, and
/// behaviors determined by internal Solana test cases. New users should
/// consider implementing `RpcSender` themselves and constructing
/// `RpcClient` with [`RpcClient::new_sender`] to get mock behavior.
///
/// Unless directed otherwise, a mock `RpcClient` will generally return a
/// reasonable default response to any request, at least for [`RpcRequest`]
/// values for which responses have been implemented.
///
/// This mock can be customized in two ways:
///
/// 1) By changing the `url` argument, which is not actually a URL, but a
/// simple string directive that changes the mock behavior in specific
/// scenarios.
///
/// It is customary to set the `url` to "succeeds" for mocks that should
/// return sucessfully, though this value is not actually interpreted.
///
/// If `url` is "fails" then any call to `send` will return `Ok(Value::Null)`.
///
/// Other possible values of `url` are specific to different `RpcRequest`
/// values. Read the implementation of `MockSender` (which is non-public)
/// for details.
///
/// 2) Custom responses can be configured by providing [`Mocks`]. This type
/// is a [`HashMap`] from [`RpcRequest`] to a JSON [`Value`] response,
/// Any entries in this map override the default behavior for the given
/// request.
///
/// The [`RpcClient::new_mock_with_mocks`] function offers further
/// customization options.
///
/// [`HashMap`]: std::collections::HashMap
///
/// # Examples
///
@@ -397,9 +467,10 @@ impl RpcClient {
/// Create an HTTP `RpcClient` from a [`SocketAddr`].
///
/// The client has a default timeout of 30 seconds, and a default [commitment
/// level][cl] of [`Finalized`](CommitmentLevel::Finalized).
/// level][cl] of [`Finalized`].
///
/// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment
/// [`Finalized`]: solana_sdk::commitment_config::CommitmentLevel::Finalized
///
/// # Examples
///
@@ -420,6 +491,8 @@ impl RpcClient {
/// The client has a default timeout of 30 seconds, and a user-specified
/// [`CommitmentLevel`] via [`CommitmentConfig`].
///
/// [`CommitmentLevel`]: solana_sdk::commitment_config::CommitmentLevel
///
/// # Examples
///
/// ```
@@ -442,9 +515,10 @@ impl RpcClient {
/// Create an HTTP `RpcClient` from a [`SocketAddr`] with specified timeout.
///
/// The client has a default [commitment level][cl] of [`Finalized`](CommitmentLevel::Finalized).
/// The client has a default [commitment level][cl] of [`Finalized`].
///
/// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment
/// [`Finalized`]: solana_sdk::commitment_config::CommitmentLevel::Finalized
///
/// # Examples
///
@@ -469,7 +543,9 @@ impl RpcClient {
/// determines how thoroughly committed a transaction must be when waiting
/// for its confirmation or otherwise checking for confirmation. If not
/// specified, the default commitment level is
/// [`Finalized`](CommitmentLevel::Finalized).
/// [`Finalized`].
///
/// [`Finalized`]: solana_sdk::commitment_config::CommitmentLevel::Finalized
///
/// The default commitment level is overridden when calling methods that
/// explicitly provide a [`CommitmentConfig`], like
@@ -503,7 +579,8 @@ impl RpcClient {
/// containing an [`RpcResponseError`] with `code` set to
/// [`JSON_RPC_SERVER_ERROR_NODE_UNHEALTHY`].
///
/// [`RpcResponseError`]: RpcError::RpcResponseError
/// [`RpcError`]: crate::rpc_request::RpcError
/// [`RpcResponseError`]: crate::rpc_request::RpcError::RpcResponseError
/// [`JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_VERIFICATION_FAILURE`]: crate::rpc_custom_error::JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_VERIFICATION_FAILURE
/// [`JSON_RPC_SERVER_ERROR_SEND_TRANSACTION_PREFLIGHT_FAILURE`]: crate::rpc_custom_error::JSON_RPC_SERVER_ERROR_SEND_TRANSACTION_PREFLIGHT_FAILURE
/// [`JSON_RPC_SERVER_ERROR_NODE_UNHEALTHY`]: crate::rpc_custom_error::JSON_RPC_SERVER_ERROR_NODE_UNHEALTHY
@@ -616,7 +693,8 @@ impl RpcClient {
/// containing an [`RpcResponseError`] with `code` set to
/// [`JSON_RPC_SERVER_ERROR_NODE_UNHEALTHY`].
///
/// [`RpcResponseError`]: RpcError::RpcResponseError
/// [`RpcError`]: crate::rpc_request::RpcError
/// [`RpcResponseError`]: crate::rpc_request::RpcError::RpcResponseError
/// [`JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_VERIFICATION_FAILURE`]: crate::rpc_custom_error::JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_VERIFICATION_FAILURE
/// [`JSON_RPC_SERVER_ERROR_SEND_TRANSACTION_PREFLIGHT_FAILURE`]: crate::rpc_custom_error::JSON_RPC_SERVER_ERROR_SEND_TRANSACTION_PREFLIGHT_FAILURE
/// [`JSON_RPC_SERVER_ERROR_NODE_UNHEALTHY`]: crate::rpc_custom_error::JSON_RPC_SERVER_ERROR_NODE_UNHEALTHY
@@ -690,7 +768,8 @@ impl RpcClient {
/// containing an [`RpcResponseError`] with `code` set to
/// [`JSON_RPC_SERVER_ERROR_NODE_UNHEALTHY`].
///
/// [`RpcResponseError`]: RpcError::RpcResponseError
/// [`RpcError`]: crate::rpc_request::RpcError
/// [`RpcResponseError`]: crate::rpc_request::RpcError::RpcResponseError
/// [`JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_VERIFICATION_FAILURE`]: crate::rpc_custom_error::JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_VERIFICATION_FAILURE
/// [`JSON_RPC_SERVER_ERROR_SEND_TRANSACTION_PREFLIGHT_FAILURE`]: crate::rpc_custom_error::JSON_RPC_SERVER_ERROR_SEND_TRANSACTION_PREFLIGHT_FAILURE
/// [`JSON_RPC_SERVER_ERROR_NODE_UNHEALTHY`]: crate::rpc_custom_error::JSON_RPC_SERVER_ERROR_NODE_UNHEALTHY
@@ -2034,6 +2113,7 @@ impl RpcClient {
/// transaction_details: Some(TransactionDetails::None),
/// rewards: Some(true),
/// commitment: None,
/// max_supported_transaction_version: Some(0),
/// };
/// let block = rpc_client.get_block_with_config(
/// slot,
@@ -2101,7 +2181,7 @@ impl RpcClient {
///
/// This method uses the [`Finalized`] [commitment level][cl].
///
/// [`Finalized`]: CommitmentLevel::Finalized
/// [`Finalized`]: solana_sdk::commitment_config::CommitmentLevel::Finalized
/// [`get_blocks_with_limit`]: RpcClient::get_blocks_with_limit.
/// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment
///
@@ -2160,7 +2240,7 @@ impl RpcClient {
/// This method returns an error if the given commitment level is below
/// [`Confirmed`].
///
/// [`Confirmed`]: CommitmentLevel::Confirmed
/// [`Confirmed`]: solana_sdk::commitment_config::CommitmentLevel::Confirmed
///
/// # RPC Reference
///
@@ -2253,7 +2333,7 @@ impl RpcClient {
/// [`Confirmed`].
///
/// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment
/// [`Confirmed`]: CommitmentLevel::Confirmed
/// [`Confirmed`]: solana_sdk::commitment_config::CommitmentLevel::Confirmed
///
/// # RPC Reference
///
@@ -2414,7 +2494,7 @@ impl RpcClient {
/// [`Confirmed`].
///
/// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment
/// [`Confirmed`]: CommitmentLevel::Confirmed
/// [`Confirmed`]: solana_sdk::commitment_config::CommitmentLevel::Confirmed
///
/// # RPC Reference
///
@@ -2504,7 +2584,7 @@ impl RpcClient {
///
/// This method uses the [`Finalized`] [commitment level][cl].
///
/// [`Finalized`]: CommitmentLevel::Finalized
/// [`Finalized`]: solana_sdk::commitment_config::CommitmentLevel::Finalized
/// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment
///
/// # RPC Reference
@@ -2559,7 +2639,7 @@ impl RpcClient {
/// [`Confirmed`].
///
/// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment
/// [`Confirmed`]: CommitmentLevel::Confirmed
/// [`Confirmed`]: solana_sdk::commitment_config::CommitmentLevel::Confirmed
///
/// # RPC Reference
///
@@ -2596,6 +2676,7 @@ impl RpcClient {
/// let config = RpcTransactionConfig {
/// encoding: Some(UiTransactionEncoding::Json),
/// commitment: Some(CommitmentConfig::confirmed()),
/// max_supported_transaction_version: Some(0),
/// };
/// let transaction = rpc_client.get_transaction_with_config(
/// &signature,
@@ -2924,7 +3005,7 @@ impl RpcClient {
///
/// This method uses the [`Finalized`] [commitment level][cl].
///
/// [`Finalized`]: CommitmentLevel::Finalized
/// [`Finalized`]: solana_sdk::commitment_config::CommitmentLevel::Finalized
/// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment
///
/// # RPC Reference
@@ -3082,6 +3163,7 @@ impl RpcClient {
/// [`RpcError::ForUser`]. This is unlike [`get_account_with_commitment`],
/// which returns `Ok(None)` if the account does not exist.
///
/// [`RpcError::ForUser`]: crate::rpc_request::RpcError::ForUser
/// [`get_account_with_commitment`]: RpcClient::get_account_with_commitment
///
/// # RPC Reference

View File

@@ -197,6 +197,7 @@ pub struct RpcBlockSubscribeConfig {
pub encoding: Option<UiTransactionEncoding>,
pub transaction_details: Option<TransactionDetails>,
pub show_rewards: Option<bool>,
pub max_supported_transaction_version: Option<u8>,
}
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
@@ -248,6 +249,7 @@ pub struct RpcBlockConfig {
pub rewards: Option<bool>,
#[serde(flatten)]
pub commitment: Option<CommitmentConfig>,
pub max_supported_transaction_version: Option<u8>,
}
impl EncodingConfig for RpcBlockConfig {
@@ -288,6 +290,7 @@ pub struct RpcTransactionConfig {
pub encoding: Option<UiTransactionEncoding>,
#[serde(flatten)]
pub commitment: Option<CommitmentConfig>,
pub max_supported_transaction_version: Option<u8>,
}
impl EncodingConfig for RpcTransactionConfig {

View File

@@ -3,6 +3,7 @@ use {
crate::rpc_response::RpcSimulateTransactionResult,
jsonrpc_core::{Error, ErrorCode},
solana_sdk::clock::Slot,
solana_transaction_status::EncodeError,
thiserror::Error,
};
@@ -59,7 +60,7 @@ pub enum RpcCustomError {
#[error("BlockStatusNotAvailableYet")]
BlockStatusNotAvailableYet { slot: Slot },
#[error("UnsupportedTransactionVersion")]
UnsupportedTransactionVersion,
UnsupportedTransactionVersion(u8),
}
#[derive(Debug, Serialize, Deserialize)]
@@ -68,6 +69,16 @@ pub struct NodeUnhealthyErrorData {
pub num_slots_behind: Option<Slot>,
}
impl From<EncodeError> for RpcCustomError {
fn from(err: EncodeError) -> Self {
match err {
EncodeError::UnsupportedTransactionVersion(version) => {
Self::UnsupportedTransactionVersion(version)
}
}
}
}
impl From<RpcCustomError> for Error {
fn from(e: RpcCustomError) -> Self {
match e {
@@ -172,9 +183,9 @@ impl From<RpcCustomError> for Error {
message: format!("Block status not yet available for slot {}", slot),
data: None,
},
RpcCustomError::UnsupportedTransactionVersion => Self {
RpcCustomError::UnsupportedTransactionVersion(version) => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_UNSUPPORTED_TRANSACTION_VERSION),
message: "Versioned transactions are not supported".to_string(),
message: format!("Transaction version ({}) is not supported", version),
data: None,
},
}

View File

@@ -71,6 +71,7 @@ impl From<RpcConfirmedBlockConfig> for RpcBlockConfig {
transaction_details: config.transaction_details,
rewards: config.rewards,
commitment: config.commitment,
max_supported_transaction_version: None,
}
}
}
@@ -98,6 +99,7 @@ impl From<RpcConfirmedTransactionConfig> for RpcTransactionConfig {
Self {
encoding: config.encoding,
commitment: config.commitment,
max_supported_transaction_version: None,
}
}
}

View File

@@ -117,7 +117,7 @@ pub struct RpcInflationRate {
pub epoch: Epoch,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct RpcKeyedAccount {
pub pubkey: String,
@@ -246,7 +246,7 @@ pub struct RpcBlockProductionRange {
pub last_slot: Slot,
}
#[derive(Serialize, Deserialize, Clone)]
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct RpcBlockProduction {
/// Map of leader base58 identity pubkeys to a tuple of `(number of leader slots, number of blocks produced)`
@@ -363,7 +363,7 @@ pub struct RpcAccountBalance {
pub lamports: u64,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct RpcSupply {
pub total: u64,
@@ -432,8 +432,8 @@ pub enum RpcBlockUpdateError {
#[error("block store error")]
BlockStoreError,
#[error("unsupported transaction version")]
UnsupportedTransactionVersion,
#[error("unsupported transaction version ({0})")]
UnsupportedTransactionVersion(u8),
}
#[derive(Serialize, Deserialize, Debug)]

View File

@@ -23,13 +23,9 @@ pub struct RpcTransportStats {
/// `RpcSender` implements the underlying transport of requests to, and
/// responses from, a Solana node, and is used primarily by [`RpcClient`].
///
/// It is typically implemented by [`HttpSender`] in production, and
/// [`MockSender`] in unit tests.
///
/// [`HttpSender`]: crate::http_sender::HttpSender
/// [`MockSender`]: crate::mock_sender::MockSender
/// [`RpcClient`]: crate::rpc_client::RpcClient
#[async_trait]
pub(crate) trait RpcSender {
pub trait RpcSender {
async fn send(
&self,
request: RpcRequest,

View File

@@ -4,8 +4,10 @@
//! unstable and may change in future releases.
use {
crate::{rpc_client::RpcClient, rpc_config::RpcProgramAccountsConfig, rpc_response::Response},
bincode::{serialize_into, serialized_size},
crate::{
rpc_client::RpcClient, rpc_config::RpcProgramAccountsConfig, rpc_response::Response,
tpu_connection::TpuConnection, udp_client::UdpTpuConnection,
},
log::*,
solana_sdk::{
account::Account,
@@ -17,13 +19,12 @@ use {
hash::Hash,
instruction::Instruction,
message::Message,
packet::PACKET_DATA_SIZE,
pubkey::Pubkey,
signature::{Keypair, Signature, Signer},
signers::Signers,
system_instruction,
timing::duration_as_ms,
transaction::{self, Transaction},
transaction::{self, Transaction, VersionedTransaction},
transport::Result as TransportResult,
},
std::{
@@ -117,22 +118,20 @@ impl ClientOptimizer {
}
/// An object for querying and sending transactions to the network.
pub struct ThinClient {
transactions_socket: UdpSocket,
tpu_addrs: Vec<SocketAddr>,
pub struct ThinClient<C: 'static + TpuConnection> {
rpc_clients: Vec<RpcClient>,
tpu_connections: Vec<C>,
optimizer: ClientOptimizer,
}
impl ThinClient {
impl<C: 'static + TpuConnection> ThinClient<C> {
/// Create a new ThinClient that will interface with the Rpc at `rpc_addr` using TCP
/// and the Tpu at `tpu_addr` over `transactions_socket` using UDP.
/// and the Tpu at `tpu_addr` over `transactions_socket` using Quic or UDP
/// (currently hardcoded to UDP)
pub fn new(rpc_addr: SocketAddr, tpu_addr: SocketAddr, transactions_socket: UdpSocket) -> Self {
Self::new_from_client(
tpu_addr,
transactions_socket,
RpcClient::new_socket(rpc_addr),
)
let tpu_connection = C::new(transactions_socket, tpu_addr);
Self::new_from_client(RpcClient::new_socket(rpc_addr), tpu_connection)
}
pub fn new_socket_with_timeout(
@@ -142,18 +141,14 @@ impl ThinClient {
timeout: Duration,
) -> Self {
let rpc_client = RpcClient::new_socket_with_timeout(rpc_addr, timeout);
Self::new_from_client(tpu_addr, transactions_socket, rpc_client)
let tpu_connection = C::new(transactions_socket, tpu_addr);
Self::new_from_client(rpc_client, tpu_connection)
}
fn new_from_client(
tpu_addr: SocketAddr,
transactions_socket: UdpSocket,
rpc_client: RpcClient,
) -> Self {
fn new_from_client(rpc_client: RpcClient, tpu_connection: C) -> Self {
Self {
transactions_socket,
tpu_addrs: vec![tpu_addr],
rpc_clients: vec![rpc_client],
tpu_connections: vec![tpu_connection],
optimizer: ClientOptimizer::new(0),
}
}
@@ -168,16 +163,19 @@ impl ThinClient {
let rpc_clients: Vec<_> = rpc_addrs.into_iter().map(RpcClient::new_socket).collect();
let optimizer = ClientOptimizer::new(rpc_clients.len());
let tpu_connections: Vec<_> = tpu_addrs
.into_iter()
.map(|tpu_addr| C::new(transactions_socket.try_clone().unwrap(), tpu_addr))
.collect();
Self {
transactions_socket,
tpu_addrs,
rpc_clients,
tpu_connections,
optimizer,
}
}
fn tpu_addr(&self) -> &SocketAddr {
&self.tpu_addrs[self.optimizer.best()]
fn tpu_connection(&self) -> &C {
&self.tpu_connections[self.optimizer.best()]
}
fn rpc_client(&self) -> &RpcClient {
@@ -205,7 +203,6 @@ impl ThinClient {
self.send_and_confirm_transaction(&[keypair], transaction, tries, 0)
}
/// Retry sending a signed Transaction to the server for processing
pub fn send_and_confirm_transaction<T: Signers>(
&self,
keypairs: &T,
@@ -215,18 +212,16 @@ impl ThinClient {
) -> TransportResult<Signature> {
for x in 0..tries {
let now = Instant::now();
let mut buf = vec![0; serialized_size(&transaction).unwrap() as usize];
let mut wr = std::io::Cursor::new(&mut buf[..]);
let mut num_confirmed = 0;
let mut wait_time = MAX_PROCESSING_AGE;
serialize_into(&mut wr, &transaction)
.expect("serialize Transaction in pub fn transfer_signed");
// resend the same transaction until the transaction has no chance of succeeding
let wire_transaction =
bincode::serialize(&transaction).expect("transaction serialization failed");
while now.elapsed().as_secs() < wait_time as u64 {
if num_confirmed == 0 {
// Send the transaction if there has been no confirmation (e.g. the first time)
self.transactions_socket
.send_to(&buf[..], &self.tpu_addr())?;
self.tpu_connection()
.send_wire_transaction(&wire_transaction)?;
}
if let Ok(confirmed_blocks) = self.poll_for_signature_confirmation(
@@ -321,13 +316,13 @@ impl ThinClient {
}
}
impl Client for ThinClient {
impl<C: 'static + TpuConnection> Client for ThinClient<C> {
fn tpu_addr(&self) -> String {
self.tpu_addr().to_string()
self.tpu_connection().tpu_addr().to_string()
}
}
impl SyncClient for ThinClient {
impl<C: 'static + TpuConnection> SyncClient for ThinClient<C> {
fn send_and_confirm_message<T: Signers>(
&self,
keypairs: &T,
@@ -607,17 +602,21 @@ impl SyncClient for ThinClient {
}
}
impl AsyncClient for ThinClient {
impl<C: 'static + TpuConnection> AsyncClient for ThinClient<C> {
fn async_send_transaction(&self, transaction: Transaction) -> TransportResult<Signature> {
let mut buf = vec![0; serialized_size(&transaction).unwrap() as usize];
let mut wr = std::io::Cursor::new(&mut buf[..]);
serialize_into(&mut wr, &transaction)
.expect("serialize Transaction in pub fn transfer_signed");
assert!(buf.len() < PACKET_DATA_SIZE);
self.transactions_socket
.send_to(&buf[..], &self.tpu_addr())?;
let transaction = VersionedTransaction::from(transaction);
self.tpu_connection()
.serialize_and_send_transaction(&transaction)?;
Ok(transaction.signatures[0])
}
fn async_send_batch(&self, transactions: Vec<Transaction>) -> TransportResult<()> {
let batch: Vec<VersionedTransaction> = transactions.into_iter().map(Into::into).collect();
self.tpu_connection()
.par_serialize_and_send_transaction_batch(&batch[..])?;
Ok(())
}
fn async_send_message<T: Signers>(
&self,
keypairs: &T,
@@ -649,20 +648,23 @@ impl AsyncClient for ThinClient {
}
}
pub fn create_client((rpc, tpu): (SocketAddr, SocketAddr), range: (u16, u16)) -> ThinClient {
pub fn create_client(
(rpc, tpu): (SocketAddr, SocketAddr),
range: (u16, u16),
) -> ThinClient<UdpTpuConnection> {
let (_, transactions_socket) =
solana_net_utils::bind_in_range(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), range).unwrap();
ThinClient::new(rpc, tpu, transactions_socket)
ThinClient::<UdpTpuConnection>::new(rpc, tpu, transactions_socket)
}
pub fn create_client_with_timeout(
(rpc, tpu): (SocketAddr, SocketAddr),
range: (u16, u16),
timeout: Duration,
) -> ThinClient {
) -> ThinClient<UdpTpuConnection> {
let (_, transactions_socket) =
solana_net_utils::bind_in_range(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), range).unwrap();
ThinClient::new_socket_with_timeout(rpc, tpu, transactions_socket, timeout)
ThinClient::<UdpTpuConnection>::new_socket_with_timeout(rpc, tpu, transactions_socket, timeout)
}
#[cfg(test)]

View File

@@ -0,0 +1,40 @@
use {
rayon::iter::{IntoParallelIterator, ParallelIterator},
solana_sdk::{transaction::VersionedTransaction, transport::Result as TransportResult},
std::net::{SocketAddr, UdpSocket},
};
pub trait TpuConnection {
fn new(client_socket: UdpSocket, tpu_addr: SocketAddr) -> Self;
fn tpu_addr(&self) -> &SocketAddr;
fn serialize_and_send_transaction(
&self,
transaction: &VersionedTransaction,
) -> TransportResult<()> {
let wire_transaction =
bincode::serialize(transaction).expect("serialize Transaction in send_batch");
self.send_wire_transaction(&wire_transaction)
}
fn send_wire_transaction<T>(&self, wire_transaction: T) -> TransportResult<()>
where
T: AsRef<[u8]>;
fn par_serialize_and_send_transaction_batch(
&self,
transactions: &[VersionedTransaction],
) -> TransportResult<()> {
let buffers = transactions
.into_par_iter()
.map(|tx| bincode::serialize(&tx).expect("serialize Transaction in send_batch"))
.collect::<Vec<_>>();
self.send_wire_transaction_batch(&buffers)
}
fn send_wire_transaction_batch<T>(&self, buffers: &[T]) -> TransportResult<()>
where
T: AsRef<[u8]>;
}

45
client/src/udp_client.rs Normal file
View File

@@ -0,0 +1,45 @@
//! Simple TPU client that communicates with the given UDP port with UDP and provides
//! an interface for sending transactions
use {
crate::tpu_connection::TpuConnection,
core::iter::repeat,
solana_sdk::transport::Result as TransportResult,
solana_streamer::sendmmsg::batch_send,
std::net::{SocketAddr, UdpSocket},
};
pub struct UdpTpuConnection {
socket: UdpSocket,
addr: SocketAddr,
}
impl TpuConnection for UdpTpuConnection {
fn new(client_socket: UdpSocket, tpu_addr: SocketAddr) -> Self {
Self {
socket: client_socket,
addr: tpu_addr,
}
}
fn tpu_addr(&self) -> &SocketAddr {
&self.addr
}
fn send_wire_transaction<T>(&self, wire_transaction: T) -> TransportResult<()>
where
T: AsRef<[u8]>,
{
self.socket.send_to(wire_transaction.as_ref(), self.addr)?;
Ok(())
}
fn send_wire_transaction_batch<T>(&self, buffers: &[T]) -> TransportResult<()>
where
T: AsRef<[u8]>,
{
let pkts: Vec<_> = buffers.iter().zip(repeat(self.tpu_addr())).collect();
batch_send(&self.socket, &pkts)?;
Ok(())
}
}

View File

@@ -1,7 +1,7 @@
[package]
name = "solana-core"
description = "Blockchain, Rebuilt for Scale"
version = "1.10.0"
version = "1.10.5"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-core"
readme = "../README.md"
@@ -15,73 +15,69 @@ codecov = { repository = "solana-labs/solana", branch = "master", service = "git
[dependencies]
ahash = "0.7.6"
base64 = "0.12.3"
base64 = "0.13.0"
bincode = "1.3.3"
bs58 = "0.4.0"
chrono = { version = "0.4.11", features = ["serde"] }
crossbeam-channel = "0.5"
dashmap = { version = "4.0.2", features = ["rayon", "raw-api"] }
etcd-client = { version = "0.8.3", features = ["tls"]}
etcd-client = { version = "0.8.4", features = ["tls"] }
fs_extra = "1.2.0"
histogram = "0.6.9"
itertools = "0.10.3"
log = "0.4.14"
lru = "0.7.2"
lru = "0.7.3"
rand = "0.7.0"
rand_chacha = "0.2.2"
rayon = "1.5.1"
retain_mut = "0.1.5"
retain_mut = "0.1.7"
serde = "1.0.136"
serde_derive = "1.0.103"
solana-address-lookup-table-program = { path = "../programs/address-lookup-table", version = "=1.10.0" }
solana-bloom = { path = "../bloom", version = "=1.10.0" }
solana-accountsdb-plugin-manager = { path = "../accountsdb-plugin-manager", version = "=1.10.0" }
solana-client = { path = "../client", version = "=1.10.0" }
solana-entry = { path = "../entry", version = "=1.10.0" }
solana-gossip = { path = "../gossip", version = "=1.10.0" }
solana-ledger = { path = "../ledger", version = "=1.10.0" }
solana-measure = { path = "../measure", version = "=1.10.0" }
solana-metrics = { path = "../metrics", version = "=1.10.0" }
solana-net-utils = { path = "../net-utils", version = "=1.10.0" }
solana-perf = { path = "../perf", version = "=1.10.0" }
solana-poh = { path = "../poh", version = "=1.10.0" }
solana-program-runtime = { path = "../program-runtime", version = "=1.10.0" }
solana-rpc = { path = "../rpc", version = "=1.10.0" }
solana-replica-lib = { path = "../replica-lib", version = "=1.10.0" }
solana-runtime = { path = "../runtime", version = "=1.10.0" }
solana-sdk = { path = "../sdk", version = "=1.10.0" }
solana-frozen-abi = { path = "../frozen-abi", version = "=1.10.0" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.10.0" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.10.0" }
solana-streamer = { path = "../streamer", version = "=1.10.0" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.0" }
solana-address-lookup-table-program = { path = "../programs/address-lookup-table", version = "=1.10.5" }
solana-bloom = { path = "../bloom", version = "=1.10.5" }
solana-client = { path = "../client", version = "=1.10.5" }
solana-entry = { path = "../entry", version = "=1.10.5" }
solana-frozen-abi = { path = "../frozen-abi", version = "=1.10.5" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.10.5" }
solana-geyser-plugin-manager = { path = "../geyser-plugin-manager", version = "=1.10.5" }
solana-gossip = { path = "../gossip", version = "=1.10.5" }
solana-ledger = { path = "../ledger", version = "=1.10.5" }
solana-measure = { path = "../measure", version = "=1.10.5" }
solana-metrics = { path = "../metrics", version = "=1.10.5" }
solana-net-utils = { path = "../net-utils", version = "=1.10.5" }
solana-perf = { path = "../perf", version = "=1.10.5" }
solana-poh = { path = "../poh", version = "=1.10.5" }
solana-program-runtime = { path = "../program-runtime", version = "=1.10.5" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.10.5" }
solana-replica-lib = { path = "../replica-lib", version = "=1.10.5" }
solana-rpc = { path = "../rpc", version = "=1.10.5" }
solana-runtime = { path = "../runtime", version = "=1.10.5" }
solana-sdk = { path = "../sdk", version = "=1.10.5" }
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.10.5" }
solana-streamer = { path = "../streamer", version = "=1.10.5" }
solana-transaction-status = { path = "../transaction-status", version = "=1.10.5" }
solana-vote-program = { path = "../programs/vote", version = "=1.10.5" }
sys-info = "0.9.1"
tempfile = "3.3.0"
thiserror = "1.0"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.10.0" }
sys-info = "0.9.1"
tokio = { version = "1", features = ["full"] }
trees = "0.4.2"
[dev-dependencies]
jsonrpc-core = "18.0.0"
jsonrpc-core-client = { version = "18.0.0", features = ["ipc", "ws"] }
jsonrpc-derive = "18.0.0"
jsonrpc-pubsub = "18.0.0"
matches = "0.1.9"
raptorq = "1.6.5"
reqwest = { version = "0.11.6", default-features = false, features = ["blocking", "rustls-tls", "json"] }
serde_json = "1.0.78"
serial_test = "0.5.1"
solana-logger = { path = "../logger", version = "=1.10.0" }
solana-program-runtime = { path = "../program-runtime", version = "=1.10.0" }
solana-stake-program = { path = "../programs/stake", version = "=1.10.0" }
solana-version = { path = "../version", version = "=1.10.0" }
reqwest = { version = "0.11.10", default-features = false, features = ["blocking", "rustls-tls", "json"] }
serde_json = "1.0.79"
serial_test = "0.6.0"
solana-logger = { path = "../logger", version = "=1.10.5" }
solana-program-runtime = { path = "../program-runtime", version = "=1.10.5" }
solana-stake-program = { path = "../programs/stake", version = "=1.10.5" }
solana-version = { path = "../version", version = "=1.10.5" }
static_assertions = "1.1.0"
systemstat = "0.1.10"
[target."cfg(unix)".dependencies]
sysctl = "0.4.3"
sysctl = "0.4.4"
[build-dependencies]
rustc_version = "0.4"

View File

@@ -12,6 +12,7 @@ use {
banking_stage::{BankingStage, BankingStageStats},
leader_slot_banking_stage_metrics::LeaderSlotMetricsTracker,
qos_service::QosService,
unprocessed_packet_batches::*,
},
solana_entry::entry::{next_hash, Entry},
solana_gossip::cluster_info::{ClusterInfo, Node},
@@ -36,7 +37,6 @@ use {
},
solana_streamer::socket::SocketAddrSpace,
std::{
collections::VecDeque,
sync::{atomic::Ordering, Arc, RwLock},
time::{Duration, Instant},
},
@@ -79,10 +79,14 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
let len = 4096;
let chunk_size = 1024;
let batches = to_packet_batches(&vec![tx; len], chunk_size);
let mut packet_batches = VecDeque::new();
let mut packet_batches = UnprocessedPacketBatches::new();
for batch in batches {
let batch_len = batch.packets.len();
packet_batches.push_back((batch, vec![0usize; batch_len], false));
packet_batches.push_back(DeserializedPacketBatch::new(
batch,
vec![0usize; batch_len],
false,
));
}
let (s, _r) = unbounded();
// This tests the performance of buffering packets.

View File

@@ -0,0 +1,112 @@
#![feature(test)]
extern crate test;
use {
rand::{seq::SliceRandom, Rng},
solana_core::{
cluster_nodes::{make_test_cluster, new_cluster_nodes, ClusterNodes},
retransmit_stage::RetransmitStage,
},
solana_gossip::contact_info::ContactInfo,
solana_sdk::{clock::Slot, hash::hashv, pubkey::Pubkey, signature::Signature},
test::Bencher,
};
const NUM_SIMULATED_SHREDS: usize = 4;
fn make_cluster_nodes<R: Rng>(
rng: &mut R,
unstaked_ratio: Option<(u32, u32)>,
) -> (Vec<ContactInfo>, ClusterNodes<RetransmitStage>) {
let (nodes, stakes, cluster_info) = make_test_cluster(rng, 5_000, unstaked_ratio);
let cluster_nodes = new_cluster_nodes::<RetransmitStage>(&cluster_info, &stakes);
(nodes, cluster_nodes)
}
fn get_retransmit_peers_deterministic(
cluster_nodes: &ClusterNodes<RetransmitStage>,
slot: &Slot,
slot_leader: &Pubkey,
num_simulated_shreds: usize,
) {
for i in 0..num_simulated_shreds {
// see Shred::seed
let shred_seed = hashv(&[
&slot.to_le_bytes(),
&(i as u32).to_le_bytes(),
&slot_leader.to_bytes(),
])
.to_bytes();
let (_neighbors, _children) = cluster_nodes.get_retransmit_peers_deterministic(
shred_seed,
solana_gossip::cluster_info::DATA_PLANE_FANOUT,
*slot_leader,
);
}
}
fn get_retransmit_peers_compat(
cluster_nodes: &ClusterNodes<RetransmitStage>,
slot_leader: &Pubkey,
signatures: &[Signature],
) {
for signature in signatures.iter() {
// see Shred::seed
let signature = signature.as_ref();
let offset = signature.len().checked_sub(32).unwrap();
let shred_seed = signature[offset..].try_into().unwrap();
let (_neighbors, _children) = cluster_nodes.get_retransmit_peers_compat(
shred_seed,
solana_gossip::cluster_info::DATA_PLANE_FANOUT,
*slot_leader,
);
}
}
fn get_retransmit_peers_deterministic_wrapper(b: &mut Bencher, unstaked_ratio: Option<(u32, u32)>) {
let mut rng = rand::thread_rng();
let (nodes, cluster_nodes) = make_cluster_nodes(&mut rng, unstaked_ratio);
let slot_leader = nodes[1..].choose(&mut rng).unwrap().id;
let slot = rand::random::<u64>();
b.iter(|| {
get_retransmit_peers_deterministic(
&cluster_nodes,
&slot,
&slot_leader,
NUM_SIMULATED_SHREDS,
)
});
}
fn get_retransmit_peers_compat_wrapper(b: &mut Bencher, unstaked_ratio: Option<(u32, u32)>) {
let mut rng = rand::thread_rng();
let (nodes, cluster_nodes) = make_cluster_nodes(&mut rng, unstaked_ratio);
let slot_leader = nodes[1..].choose(&mut rng).unwrap().id;
let signatures: Vec<_> = std::iter::repeat_with(Signature::new_unique)
.take(NUM_SIMULATED_SHREDS)
.collect();
b.iter(|| get_retransmit_peers_compat(&cluster_nodes, &slot_leader, &signatures));
}
#[bench]
fn bench_get_retransmit_peers_deterministic_unstaked_ratio_1_2(b: &mut Bencher) {
get_retransmit_peers_deterministic_wrapper(b, Some((1, 2)));
}
#[bench]
fn bench_get_retransmit_peers_compat_unstaked_ratio_1_2(b: &mut Bencher) {
get_retransmit_peers_compat_wrapper(b, Some((1, 2)));
}
#[bench]
fn bench_get_retransmit_peers_deterministic_unstaked_ratio_1_32(b: &mut Bencher) {
get_retransmit_peers_deterministic_wrapper(b, Some((1, 32)));
}
#[bench]
fn bench_get_retransmit_peers_compat_unstaked_ratio_1_32(b: &mut Bencher) {
get_retransmit_peers_compat_wrapper(b, Some((1, 32)));
}

View File

@@ -9,7 +9,10 @@ use {
log::*,
rand::{thread_rng, Rng},
solana_core::{sigverify::TransactionSigVerifier, sigverify_stage::SigVerifyStage},
solana_perf::{packet::to_packet_batches, packet::PacketBatch, test_tx::test_tx},
solana_perf::{
packet::{to_packet_batches, PacketBatch},
test_tx::test_tx,
},
solana_sdk::{
hash::Hash,
signature::{Keypair, Signer},
@@ -156,7 +159,7 @@ fn bench_sigverify_stage(bencher: &mut Bencher) {
for _ in 0..batches.len() {
if let Some(batch) = batches.pop() {
sent_len += batch.packets.len();
packet_s.send(batch).unwrap();
packet_s.send(vec![batch]).unwrap();
}
}
let mut received = 0;

View File

@@ -3,6 +3,7 @@ use {
cluster_slots::ClusterSlots,
duplicate_repair_status::{DeadSlotAncestorRequestStatus, DuplicateAncestorDecision},
outstanding_requests::OutstandingRequests,
packet_threshold::DynamicPacketToProcessThreshold,
repair_response::{self},
repair_service::{DuplicateSlotsResetSender, RepairInfo, RepairStatsGroup},
replay_stage::DUPLICATE_THRESHOLD,
@@ -12,7 +13,6 @@ use {
crossbeam_channel::{unbounded, Receiver, Sender},
dashmap::{mapref::entry::Entry::Occupied, DashMap},
solana_ledger::{blockstore::Blockstore, shred::SIZE_OF_NONCE},
solana_measure::measure::Measure,
solana_perf::{
packet::{limited_deserialize, Packet, PacketBatch},
recycler::Recycler,
@@ -208,7 +208,7 @@ impl AncestorHashesService {
.spawn(move || {
let mut last_stats_report = Instant::now();
let mut stats = AncestorHashesResponsesStats::default();
let mut max_packets = 1024;
let mut packet_threshold = DynamicPacketToProcessThreshold::default();
loop {
let result = Self::process_new_packets_from_channel(
&ancestor_hashes_request_statuses,
@@ -216,13 +216,13 @@ impl AncestorHashesService {
&blockstore,
&outstanding_requests,
&mut stats,
&mut max_packets,
&mut packet_threshold,
&duplicate_slots_reset_sender,
&retryable_slots_sender,
);
match result {
Err(Error::RecvTimeout(_)) | Ok(_) => {}
Err(err) => info!("ancestors hashes reponses listener error: {:?}", err),
Err(err) => info!("ancestors hashes responses listener error: {:?}", err),
};
if exit.load(Ordering::Relaxed) {
return;
@@ -243,7 +243,7 @@ impl AncestorHashesService {
blockstore: &Blockstore,
outstanding_requests: &RwLock<OutstandingAncestorHashesRepairs>,
stats: &mut AncestorHashesResponsesStats,
max_packets: &mut usize,
packet_threshold: &mut DynamicPacketToProcessThreshold,
duplicate_slots_reset_sender: &DuplicateSlotsResetSender,
retryable_slots_sender: &RetryableSlotsSender,
) -> Result<()> {
@@ -254,18 +254,17 @@ impl AncestorHashesService {
let mut dropped_packets = 0;
while let Ok(batch) = response_receiver.try_recv() {
total_packets += batch.packets.len();
if total_packets < *max_packets {
// Drop the rest in the channel in case of DOS
packet_batches.push(batch);
} else {
if packet_threshold.should_drop(total_packets) {
dropped_packets += batch.packets.len();
} else {
packet_batches.push(batch);
}
}
stats.dropped_packets += dropped_packets;
stats.total_packets += total_packets;
let mut time = Measure::start("ancestor_hashes::handle_packets");
let timer = Instant::now();
for packet_batch in packet_batches {
Self::process_packet_batch(
ancestor_hashes_request_statuses,
@@ -277,14 +276,7 @@ impl AncestorHashesService {
retryable_slots_sender,
);
}
time.stop();
if total_packets >= *max_packets {
if time.as_ms() > 1000 {
*max_packets = (*max_packets * 9) / 10;
} else {
*max_packets = (*max_packets * 10) / 9;
}
}
packet_threshold.update(total_packets, timer.elapsed());
Ok(())
}

File diff suppressed because it is too large Load Diff

View File

@@ -2,12 +2,14 @@ use {
crate::{broadcast_stage::BroadcastStage, retransmit_stage::RetransmitStage},
itertools::Itertools,
lru::LruCache,
rand::SeedableRng,
rand::{seq::SliceRandom, Rng, SeedableRng},
rand_chacha::ChaChaRng,
solana_gossip::{
cluster_info::{compute_retransmit_peers, ClusterInfo},
contact_info::ContactInfo,
crds::GossipRoute,
crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS,
crds_value::{CrdsData, CrdsValue},
weighted_shuffle::{weighted_best, weighted_shuffle, WeightedShuffle},
},
solana_ledger::shred::Shred,
@@ -16,6 +18,7 @@ use {
clock::{Epoch, Slot},
feature_set,
pubkey::Pubkey,
signature::Keypair,
timing::timestamp,
},
solana_streamer::socket::SocketAddrSpace,
@@ -23,6 +26,7 @@ use {
any::TypeId,
cmp::Reverse,
collections::HashMap,
iter::repeat_with,
marker::PhantomData,
net::SocketAddr,
ops::Deref,
@@ -39,7 +43,7 @@ enum NodeId {
Pubkey(Pubkey),
}
struct Node {
pub struct Node {
node: NodeId,
stake: u64,
}
@@ -233,6 +237,18 @@ impl ClusterNodes<RetransmitStage> {
if !enable_turbine_peers_shuffle_patch(shred.slot(), root_bank) {
return self.get_retransmit_peers_compat(shred_seed, fanout, slot_leader);
}
self.get_retransmit_peers_deterministic(shred_seed, fanout, slot_leader)
}
pub fn get_retransmit_peers_deterministic(
&self,
shred_seed: [u8; 32],
fanout: usize,
slot_leader: Pubkey,
) -> (
Vec<&Node>, // neighbors
Vec<&Node>, // children
) {
let mut weighted_shuffle = self.weighted_shuffle.clone();
// Exclude slot leader from list of nodes.
if slot_leader == self.pubkey {
@@ -256,7 +272,7 @@ impl ClusterNodes<RetransmitStage> {
(neighbors, children)
}
fn get_retransmit_peers_compat(
pub fn get_retransmit_peers_compat(
&self,
shred_seed: [u8; 32],
fanout: usize,
@@ -297,7 +313,7 @@ impl ClusterNodes<RetransmitStage> {
}
}
fn new_cluster_nodes<T: 'static>(
pub fn new_cluster_nodes<T: 'static>(
cluster_info: &ClusterInfo,
stakes: &HashMap<Pubkey, u64>,
) -> ClusterNodes<T> {
@@ -462,22 +478,61 @@ impl From<Pubkey> for NodeId {
}
}
pub fn make_test_cluster<R: Rng>(
rng: &mut R,
num_nodes: usize,
unstaked_ratio: Option<(u32, u32)>,
) -> (
Vec<ContactInfo>,
HashMap<Pubkey, u64>, // stakes
ClusterInfo,
) {
let (unstaked_numerator, unstaked_denominator) = unstaked_ratio.unwrap_or((1, 7));
let mut nodes: Vec<_> = repeat_with(|| ContactInfo::new_rand(rng, None))
.take(num_nodes)
.collect();
nodes.shuffle(rng);
let this_node = nodes[0].clone();
let mut stakes: HashMap<Pubkey, u64> = nodes
.iter()
.filter_map(|node| {
if rng.gen_ratio(unstaked_numerator, unstaked_denominator) {
None // No stake for some of the nodes.
} else {
Some((node.id, rng.gen_range(0, 20)))
}
})
.collect();
// Add some staked nodes with no contact-info.
stakes.extend(repeat_with(|| (Pubkey::new_unique(), rng.gen_range(0, 20))).take(100));
let cluster_info = ClusterInfo::new(
this_node,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
{
let now = timestamp();
let mut gossip_crds = cluster_info.gossip.crds.write().unwrap();
// First node is pushed to crds table by ClusterInfo constructor.
for node in nodes.iter().skip(1) {
let node = CrdsData::ContactInfo(node.clone());
let node = CrdsValue::new_unsigned(node);
assert_eq!(
gossip_crds.insert(node, now, GossipRoute::LocalMessage),
Ok(())
);
}
}
(nodes, stakes, cluster_info)
}
#[cfg(test)]
mod tests {
use {
super::*,
rand::{seq::SliceRandom, Rng},
solana_gossip::{
crds::GossipRoute,
crds_value::{CrdsData, CrdsValue},
deprecated::{
shuffle_peers_and_index, sorted_retransmit_peers_and_stakes,
sorted_stakes_with_index,
},
solana_gossip::deprecated::{
shuffle_peers_and_index, sorted_retransmit_peers_and_stakes, sorted_stakes_with_index,
},
solana_sdk::{signature::Keypair, timing::timestamp},
solana_streamer::socket::SocketAddrSpace,
std::{iter::repeat_with, sync::Arc},
};
// Legacy methods copied for testing backward compatibility.
@@ -499,55 +554,10 @@ mod tests {
sorted_stakes_with_index(peers, stakes)
}
fn make_cluster<R: Rng>(
rng: &mut R,
) -> (
Vec<ContactInfo>,
HashMap<Pubkey, u64>, // stakes
ClusterInfo,
) {
let mut nodes: Vec<_> = repeat_with(|| ContactInfo::new_rand(rng, None))
.take(1000)
.collect();
nodes.shuffle(rng);
let this_node = nodes[0].clone();
let mut stakes: HashMap<Pubkey, u64> = nodes
.iter()
.filter_map(|node| {
if rng.gen_ratio(1, 7) {
None // No stake for some of the nodes.
} else {
Some((node.id, rng.gen_range(0, 20)))
}
})
.collect();
// Add some staked nodes with no contact-info.
stakes.extend(repeat_with(|| (Pubkey::new_unique(), rng.gen_range(0, 20))).take(100));
let cluster_info = ClusterInfo::new(
this_node,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
{
let now = timestamp();
let mut gossip_crds = cluster_info.gossip.crds.write().unwrap();
// First node is pushed to crds table by ClusterInfo constructor.
for node in nodes.iter().skip(1) {
let node = CrdsData::ContactInfo(node.clone());
let node = CrdsValue::new_unsigned(node);
assert_eq!(
gossip_crds.insert(node, now, GossipRoute::LocalMessage),
Ok(())
);
}
}
(nodes, stakes, cluster_info)
}
#[test]
fn test_cluster_nodes_retransmit() {
let mut rng = rand::thread_rng();
let (nodes, stakes, cluster_info) = make_cluster(&mut rng);
let (nodes, stakes, cluster_info) = make_test_cluster(&mut rng, 1_000, None);
let this_node = cluster_info.my_contact_info();
// ClusterInfo::tvu_peers excludes the node itself.
assert_eq!(cluster_info.tvu_peers().len(), nodes.len() - 1);
@@ -628,7 +638,7 @@ mod tests {
#[test]
fn test_cluster_nodes_broadcast() {
let mut rng = rand::thread_rng();
let (nodes, stakes, cluster_info) = make_cluster(&mut rng);
let (nodes, stakes, cluster_info) = make_test_cluster(&mut rng, 1_000, None);
// ClusterInfo::tvu_peers excludes the node itself.
assert_eq!(cluster_info.tvu_peers().len(), nodes.len() - 1);
let cluster_nodes = ClusterNodes::<BroadcastStage>::new(&cluster_info, &stakes);

View File

@@ -1,9 +1,9 @@
use crate::tower1_7_14::Tower1_7_14;
use {
crate::{
heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice,
latest_validator_votes_for_frozen_banks::LatestValidatorVotesForFrozenBanks,
progress_map::{LockoutIntervals, ProgressMap},
tower1_7_14::Tower1_7_14,
tower_storage::{SavedTower, SavedTowerVersions, TowerStorage},
},
chrono::prelude::*,

View File

@@ -16,39 +16,24 @@ use {
},
};
// Update blockstore persistence storage when accumulated cost_table updates count exceeds the threshold
const PERSIST_THRESHOLD: u64 = 1_000;
#[derive(Default)]
pub struct CostUpdateServiceTiming {
last_print: u64,
update_cost_model_count: u64,
update_cost_model_elapsed: u64,
persist_cost_table_elapsed: u64,
}
impl CostUpdateServiceTiming {
fn update(
&mut self,
update_cost_model_count: Option<u64>,
update_cost_model_elapsed: Option<u64>,
persist_cost_table_elapsed: Option<u64>,
) {
if let Some(update_cost_model_count) = update_cost_model_count {
self.update_cost_model_count += update_cost_model_count;
}
if let Some(update_cost_model_elapsed) = update_cost_model_elapsed {
self.update_cost_model_elapsed += update_cost_model_elapsed;
}
if let Some(persist_cost_table_elapsed) = persist_cost_table_elapsed {
self.persist_cost_table_elapsed += persist_cost_table_elapsed;
}
fn update(&mut self, update_cost_model_count: u64, update_cost_model_elapsed: u64) {
self.update_cost_model_count += update_cost_model_count;
self.update_cost_model_elapsed += update_cost_model_elapsed;
let now = timestamp();
let elapsed_ms = now - self.last_print;
if elapsed_ms > 1000 {
datapoint_info!(
"cost-update-service-stats",
("total_elapsed_us", elapsed_ms * 1000, i64),
(
"update_cost_model_count",
self.update_cost_model_count as i64,
@@ -59,11 +44,6 @@ impl CostUpdateServiceTiming {
self.update_cost_model_elapsed as i64,
i64
),
(
"persist_cost_table_elapsed",
self.persist_cost_table_elapsed as i64,
i64
),
);
*self = CostUpdateServiceTiming::default();
@@ -109,13 +89,11 @@ impl CostUpdateService {
}
fn service_loop(
blockstore: Arc<Blockstore>,
_blockstore: Arc<Blockstore>,
cost_model: Arc<RwLock<CostModel>>,
cost_update_receiver: CostUpdateReceiver,
) {
let mut cost_update_service_timing = CostUpdateServiceTiming::default();
let mut update_count = 0_u64;
for cost_update in cost_update_receiver.iter() {
match cost_update {
CostUpdate::FrozenBank { bank } => {
@@ -124,33 +102,17 @@ impl CostUpdateService {
CostUpdate::ExecuteTiming {
mut execute_timings,
} => {
let mut update_cost_model_time = Measure::start("update_cost_model_time");
update_count += Self::update_cost_model(&cost_model, &mut execute_timings);
update_cost_model_time.stop();
cost_update_service_timing.update(
Some(update_count),
Some(update_cost_model_time.as_us()),
None,
let (update_count, update_cost_model_time) = Measure::this(
|_| Self::update_cost_model(&cost_model, &mut execute_timings),
(),
"update_cost_model_time",
);
if update_count > PERSIST_THRESHOLD {
let mut persist_cost_table_time = Measure::start("persist_cost_table_time");
Self::persist_cost_table(&blockstore, &cost_model);
update_count = 0_u64;
persist_cost_table_time.stop();
cost_update_service_timing.update(
None,
None,
Some(persist_cost_table_time.as_us()),
);
}
cost_update_service_timing.update(update_count, update_cost_model_time.as_us());
}
}
}
}
// Normalize `program_timings` with current estimated cost, update instruction_cost table
// Returns number of updates applied
fn update_cost_model(
cost_model: &RwLock<CostModel>,
execute_timings: &mut ExecuteTimings,
@@ -171,38 +133,9 @@ impl CostUpdateService {
.unwrap()
.upsert_instruction_cost(program_id, units);
update_count += 1;
debug!(
"After replayed into bank, updated cost for instruction {:?}, update_value {}, pre_aggregated_value {}",
program_id, units, current_estimated_program_cost
);
}
update_count
}
// 1. Remove obsolete program entries from persisted table to limit its size
// 2. Update persisted program cost. This involves EMA cost calculation at
// execute_cost_table.get_cost()
fn persist_cost_table(blockstore: &Blockstore, cost_model: &RwLock<CostModel>) {
let db_records = blockstore.read_program_costs().expect("read programs");
let cost_model = cost_model.read().unwrap();
let active_program_keys = cost_model.get_program_keys();
// delete records from blockstore if they are no longer in cost_table
db_records.iter().for_each(|(pubkey, _)| {
if !active_program_keys.contains(&pubkey) {
blockstore
.delete_program_cost(pubkey)
.expect("delete old program");
}
});
active_program_keys.iter().for_each(|program_id| {
let cost = cost_model.find_instruction_cost(program_id);
blockstore
.write_program_cost(program_id, &cost)
.expect("persist program costs to blockstore");
});
}
}
#[cfg(test)]
@@ -213,9 +146,10 @@ mod tests {
fn test_update_cost_model_with_empty_execute_timings() {
let cost_model = Arc::new(RwLock::new(CostModel::default()));
let mut empty_execute_timings = ExecuteTimings::default();
assert_eq!(
0,
CostUpdateService::update_cost_model(&cost_model, &mut empty_execute_timings),
0
);
}
@@ -233,7 +167,7 @@ mod tests {
let accumulated_units: u64 = 100;
let total_errored_units = 0;
let count: u32 = 10;
expected_cost = accumulated_units / count as u64; // = 10
expected_cost = accumulated_units / count as u64;
execute_timings.details.per_program_timings.insert(
program_key_1,
@@ -245,9 +179,10 @@ mod tests {
total_errored_units,
},
);
let update_count =
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
assert_eq!(1, update_count);
assert_eq!(
1,
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings),
);
assert_eq!(
expected_cost,
cost_model
@@ -262,8 +197,8 @@ mod tests {
let accumulated_us: u64 = 2000;
let accumulated_units: u64 = 200;
let count: u32 = 10;
// to expect new cost = (mean + 2 * std) of [10, 20]
expected_cost = 13;
// to expect new cost is Average(new_value, existing_value)
expected_cost = ((accumulated_units / count as u64) + expected_cost) / 2;
execute_timings.details.per_program_timings.insert(
program_key_1,
@@ -275,9 +210,10 @@ mod tests {
total_errored_units: 0,
},
);
let update_count =
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
assert_eq!(1, update_count);
assert_eq!(
1,
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings),
);
assert_eq!(
expected_cost,
cost_model
@@ -328,9 +264,10 @@ mod tests {
total_errored_units: 0,
},
);
let update_count =
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
assert_eq!(1, update_count);
assert_eq!(
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings),
1
);
assert_eq!(
current_program_cost,
cost_model
@@ -344,12 +281,8 @@ mod tests {
// greater than the current instruction cost for the program. Should update with the
// new erroring compute costs
let cost_per_error = 1000;
// expected_cost = (mean + 2*std) of data points:
// [
// 100, // original program_cost
// 1000, // cost_per_error
// ]
let expected_cost = 289u64;
// the expect cost is (previous_cost + new_cost)/2 = (100 + 1000)/2 = 550
let expected_units = 550;
{
let errored_txs_compute_consumed = vec![cost_per_error; 3];
let total_errored_units = errored_txs_compute_consumed.iter().sum();
@@ -363,12 +296,12 @@ mod tests {
total_errored_units,
},
);
let update_count =
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
assert_eq!(1, update_count);
assert_eq!(
expected_cost,
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings),
1
);
assert_eq!(
expected_units,
cost_model
.read()
.unwrap()
@@ -379,7 +312,7 @@ mod tests {
// Test updating cost model with only erroring compute costs where the error cost is
// `smaller_cost_per_error`, less than the current instruction cost for the program.
// The cost should not decrease for these new lesser errors
let smaller_cost_per_error = expected_cost - 10;
let smaller_cost_per_error = expected_units - 10;
{
let errored_txs_compute_consumed = vec![smaller_cost_per_error; 3];
let total_errored_units = errored_txs_compute_consumed.iter().sum();
@@ -393,19 +326,12 @@ mod tests {
total_errored_units,
},
);
let update_count =
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
// expected_cost = (mean = 2*std) of data points:
// [
// 100, // original program cost,
// 1000, // cost_per_error from above test
// 289, // the smaller_cost_per_error will be coalesced to prev cost
// ]
let expected_cost = 293u64;
assert_eq!(1, update_count);
assert_eq!(
expected_cost,
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings),
1
);
assert_eq!(
expected_units,
cost_model
.read()
.unwrap()

View File

@@ -0,0 +1,185 @@
use {
crossbeam_channel::{Receiver, RecvTimeoutError, Sender},
rayon::{prelude::*, ThreadPool},
solana_gossip::cluster_info::ClusterInfo,
solana_measure::measure::Measure,
solana_perf::packet::PacketBatch,
solana_rayon_threadlimit::get_thread_count,
solana_runtime::bank_forks::BankForks,
solana_sdk::timing::timestamp,
solana_streamer::streamer::{self, StreamerError},
std::{
cell::RefCell,
collections::HashMap,
net::IpAddr,
sync::{Arc, RwLock},
thread::{self, Builder, JoinHandle},
time::{Duration, Instant},
},
};
const IP_TO_STAKE_REFRESH_DURATION: Duration = Duration::from_secs(5);
thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new()
.num_threads(get_thread_count())
.thread_name(|ix| format!("transaction_sender_stake_stage_{}", ix))
.build()
.unwrap()));
pub type FindPacketSenderStakeSender = Sender<Vec<PacketBatch>>;
pub type FindPacketSenderStakeReceiver = Receiver<Vec<PacketBatch>>;
#[derive(Debug, Default)]
struct FindPacketSenderStakeStats {
last_print: u64,
refresh_ip_to_stake_time: u64,
apply_sender_stakes_time: u64,
send_batches_time: u64,
receive_batches_time: u64,
total_batches: u64,
total_packets: u64,
}
impl FindPacketSenderStakeStats {
fn report(&mut self) {
let now = timestamp();
let elapsed_ms = now - self.last_print;
if elapsed_ms > 2000 {
datapoint_info!(
"find_packet_sender_stake-services_stats",
(
"refresh_ip_to_stake_time",
self.refresh_ip_to_stake_time as i64,
i64
),
(
"apply_sender_stakes_time",
self.apply_sender_stakes_time as i64,
i64
),
("send_batches_time", self.send_batches_time as i64, i64),
(
"receive_batches_time",
self.receive_batches_time as i64,
i64
),
("total_batches", self.total_batches as i64, i64),
("total_packets", self.total_packets as i64, i64),
);
*self = FindPacketSenderStakeStats::default();
self.last_print = now;
}
}
}
pub struct FindPacketSenderStakeStage {
thread_hdl: JoinHandle<()>,
}
impl FindPacketSenderStakeStage {
pub fn new(
packet_receiver: streamer::PacketBatchReceiver,
sender: FindPacketSenderStakeSender,
bank_forks: Arc<RwLock<BankForks>>,
cluster_info: Arc<ClusterInfo>,
) -> Self {
let mut stats = FindPacketSenderStakeStats::default();
let thread_hdl = Builder::new()
.name("find-packet-sender-stake".to_string())
.spawn(move || {
let mut last_stakes = Instant::now();
let mut ip_to_stake: HashMap<IpAddr, u64> = HashMap::new();
loop {
let mut refresh_ip_to_stake_time = Measure::start("refresh_ip_to_stake_time");
Self::try_refresh_ip_to_stake(
&mut last_stakes,
&mut ip_to_stake,
bank_forks.clone(),
cluster_info.clone(),
);
refresh_ip_to_stake_time.stop();
stats.refresh_ip_to_stake_time = stats
.refresh_ip_to_stake_time
.saturating_add(refresh_ip_to_stake_time.as_us());
match streamer::recv_packet_batches(&packet_receiver) {
Ok((mut batches, num_packets, recv_duration)) => {
let num_batches = batches.len();
let mut apply_sender_stakes_time =
Measure::start("apply_sender_stakes_time");
Self::apply_sender_stakes(&mut batches, &ip_to_stake);
apply_sender_stakes_time.stop();
let mut send_batches_time = Measure::start("send_batches_time");
if let Err(e) = sender.send(batches) {
info!("Sender error: {:?}", e);
}
send_batches_time.stop();
stats.apply_sender_stakes_time = stats
.apply_sender_stakes_time
.saturating_add(apply_sender_stakes_time.as_us());
stats.send_batches_time = stats
.send_batches_time
.saturating_add(send_batches_time.as_us());
stats.receive_batches_time = stats
.receive_batches_time
.saturating_add(recv_duration.as_nanos() as u64);
stats.total_batches =
stats.total_batches.saturating_add(num_batches as u64);
stats.total_packets =
stats.total_packets.saturating_add(num_packets as u64);
}
Err(e) => match e {
StreamerError::RecvTimeout(RecvTimeoutError::Disconnected) => break,
StreamerError::RecvTimeout(RecvTimeoutError::Timeout) => (),
_ => error!("error: {:?}", e),
},
}
stats.report();
}
})
.unwrap();
Self { thread_hdl }
}
fn try_refresh_ip_to_stake(
last_stakes: &mut Instant,
ip_to_stake: &mut HashMap<IpAddr, u64>,
bank_forks: Arc<RwLock<BankForks>>,
cluster_info: Arc<ClusterInfo>,
) {
if last_stakes.elapsed() > IP_TO_STAKE_REFRESH_DURATION {
let root_bank = bank_forks.read().unwrap().root_bank();
let staked_nodes = root_bank.staked_nodes();
*ip_to_stake = cluster_info
.tvu_peers()
.into_iter()
.filter_map(|node| {
let stake = staked_nodes.get(&node.id)?;
Some((node.tvu.ip(), *stake))
})
.collect();
*last_stakes = Instant::now();
}
}
fn apply_sender_stakes(batches: &mut [PacketBatch], ip_to_stake: &HashMap<IpAddr, u64>) {
PAR_THREAD_POOL.with(|thread_pool| {
thread_pool.borrow().install(|| {
batches
.into_par_iter()
.flat_map(|batch| batch.packets.par_iter_mut())
.for_each(|packet| {
packet.meta.sender_stake =
*ip_to_stake.get(&packet.meta.addr().ip()).unwrap_or(&0);
});
})
});
}
pub fn join(self) -> thread::Result<()> {
self.thread_hdl.join()
}
}

View File

@@ -2715,7 +2715,8 @@ mod test {
stake
);
}
for slot in &[17] {
{
let slot = &17;
assert_eq!(
tree1
.stake_voted_subtree(&(*slot, Hash::default()))

View File

@@ -1,4 +1,5 @@
use {
crate::leader_slot_banking_stage_timing_metrics::*,
solana_poh::poh_recorder::BankStart,
solana_sdk::{clock::Slot, saturating_add_assign},
std::time::Instant,
@@ -38,41 +39,12 @@ pub(crate) struct ProcessTransactionsSummary {
// The number of transactions filtered out by the cost model
pub cost_model_throttled_transactions_count: usize,
}
// Metrics capturing wallclock time spent in various parts of BankingStage during this
// validator's leader slot
#[derive(Debug)]
struct LeaderSlotTimingMetrics {
bank_detected_time: Instant,
// Total amount of time spent running the cost model
pub cost_model_us: u64,
// Delay from when the bank was created to when this thread detected it
bank_detected_delay_us: u64,
}
impl LeaderSlotTimingMetrics {
fn new(bank_creation_time: &Instant) -> Self {
Self {
bank_detected_time: Instant::now(),
bank_detected_delay_us: bank_creation_time.elapsed().as_micros() as u64,
}
}
fn report(&self, id: u32, slot: Slot) {
let bank_detected_to_now = self.bank_detected_time.elapsed().as_micros() as u64;
datapoint_info!(
"banking_stage-leader_slot_loop_timings",
("id", id as i64, i64),
("slot", slot as i64, i64),
("bank_detected_to_now_us", bank_detected_to_now, i64),
(
"bank_creation_to_now_us",
bank_detected_to_now + self.bank_detected_delay_us,
i64
),
("bank_detected_delay_us", self.bank_detected_delay_us, i64),
);
}
// Breakdown of time spent executing and comitting transactions
pub execute_and_commit_timings: LeaderExecuteAndCommitTimings,
}
// Metrics describing packets ingested/processed in various parts of BankingStage during this
@@ -362,6 +334,8 @@ impl LeaderSlotMetricsTracker {
failed_commit_count,
ref retryable_transaction_indexes,
cost_model_throttled_transactions_count,
cost_model_us,
ref execute_and_commit_timings,
..
} = process_transactions_summary;
@@ -415,9 +389,23 @@ impl LeaderSlotMetricsTracker {
.cost_model_throttled_transactions_count,
*cost_model_throttled_transactions_count as u64
);
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.process_packets_timings
.cost_model_us,
*cost_model_us as u64
);
leader_slot_metrics
.timing_metrics
.execute_and_commit_timings
.accumulate(execute_and_commit_timings);
}
}
// Packet inflow/outflow/processing metrics
pub(crate) fn increment_total_new_valid_packets(&mut self, count: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
@@ -527,6 +515,166 @@ impl LeaderSlotMetricsTracker {
);
}
}
// Outermost banking thread's loop timing metrics
pub(crate) fn increment_process_buffered_packets_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.outer_loop_timings
.process_buffered_packets_us,
us
);
}
}
pub(crate) fn increment_slot_metrics_check_slot_boundary_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.outer_loop_timings
.slot_metrics_check_slot_boundary_us,
us
);
}
}
pub(crate) fn increment_receive_and_buffer_packets_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.outer_loop_timings
.receive_and_buffer_packets_us,
us
);
}
}
// Processing buffer timing metrics
pub(crate) fn increment_make_decision_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.process_buffered_packets_timings
.make_decision_us,
us
);
}
}
pub(crate) fn increment_consume_buffered_packets_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.process_buffered_packets_timings
.consume_buffered_packets_us,
us
);
}
}
pub(crate) fn increment_forward_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.process_buffered_packets_timings
.forward_us,
us
);
}
}
pub(crate) fn increment_forward_and_hold_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.process_buffered_packets_timings
.forward_and_hold_us,
us
);
}
}
// Consuming buffered packets timing metrics
pub(crate) fn increment_end_of_slot_filtering_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.consume_buffered_packets_timings
.end_of_slot_filtering_us,
us
);
}
}
pub(crate) fn increment_consume_buffered_packets_poh_recorder_lock_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.consume_buffered_packets_timings
.poh_recorder_lock_us,
us
);
}
}
pub(crate) fn increment_process_packets_transactions_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.consume_buffered_packets_timings
.process_packets_transactions_us,
us
);
}
}
// Processing packets timing metrics
pub(crate) fn increment_transactions_from_packets_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.process_packets_timings
.transactions_from_packets_us,
us
);
}
}
pub(crate) fn increment_process_transactions_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.process_packets_timings
.process_transactions_us,
us
);
}
}
pub(crate) fn increment_filter_retryable_packets_us(&mut self, us: u64) {
if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics {
saturating_add_assign!(
leader_slot_metrics
.timing_metrics
.process_packets_timings
.filter_retryable_packets_us,
us
);
}
}
}
#[cfg(test)]

View File

@@ -0,0 +1,286 @@
use {
solana_program_runtime::timings::ExecuteTimings,
solana_sdk::{clock::Slot, saturating_add_assign},
std::time::Instant,
};
#[derive(Default, Debug)]
pub struct LeaderExecuteAndCommitTimings {
pub collect_balances_us: u64,
pub load_execute_us: u64,
pub freeze_lock_us: u64,
pub record_us: u64,
pub commit_us: u64,
pub find_and_send_votes_us: u64,
pub record_transactions_timings: RecordTransactionsTimings,
pub execute_timings: ExecuteTimings,
}
impl LeaderExecuteAndCommitTimings {
pub fn accumulate(&mut self, other: &LeaderExecuteAndCommitTimings) {
saturating_add_assign!(self.collect_balances_us, other.collect_balances_us);
saturating_add_assign!(self.load_execute_us, other.load_execute_us);
saturating_add_assign!(self.freeze_lock_us, other.freeze_lock_us);
saturating_add_assign!(self.record_us, other.record_us);
saturating_add_assign!(self.commit_us, other.commit_us);
saturating_add_assign!(self.find_and_send_votes_us, other.find_and_send_votes_us);
saturating_add_assign!(self.commit_us, other.commit_us);
self.record_transactions_timings
.accumulate(&other.record_transactions_timings);
self.execute_timings.accumulate(&other.execute_timings);
}
pub fn report(&self, id: u32, slot: Slot) {
datapoint_info!(
"banking_stage-leader_slot_execute_and_commit_timings",
("id", id as i64, i64),
("slot", slot as i64, i64),
("collect_balances_us", self.collect_balances_us as i64, i64),
("load_execute_us", self.load_execute_us as i64, i64),
("freeze_lock_us", self.freeze_lock_us as i64, i64),
("record_us", self.record_us as i64, i64),
("commit_us", self.commit_us as i64, i64),
(
"find_and_send_votes_us",
self.find_and_send_votes_us as i64,
i64
),
);
datapoint_info!(
"banking_stage-leader_slot_record_timings",
("id", id as i64, i64),
("slot", slot as i64, i64),
(
"execution_results_to_transactions_us",
self.record_transactions_timings
.execution_results_to_transactions_us as i64,
i64
),
(
"hash_us",
self.record_transactions_timings.hash_us as i64,
i64
),
(
"poh_record_us",
self.record_transactions_timings.poh_record_us as i64,
i64
),
);
}
}
#[derive(Default, Debug)]
pub struct RecordTransactionsTimings {
pub execution_results_to_transactions_us: u64,
pub hash_us: u64,
pub poh_record_us: u64,
}
impl RecordTransactionsTimings {
pub fn accumulate(&mut self, other: &RecordTransactionsTimings) {
saturating_add_assign!(
self.execution_results_to_transactions_us,
other.execution_results_to_transactions_us
);
saturating_add_assign!(self.hash_us, other.hash_us);
saturating_add_assign!(self.poh_record_us, other.poh_record_us);
}
}
// Metrics capturing wallclock time spent in various parts of BankingStage during this
// validator's leader slot
#[derive(Debug)]
pub(crate) struct LeaderSlotTimingMetrics {
pub outer_loop_timings: OuterLoopTimings,
pub process_buffered_packets_timings: ProcessBufferedPacketsTimings,
pub consume_buffered_packets_timings: ConsumeBufferedPacketsTimings,
pub process_packets_timings: ProcessPacketsTimings,
pub execute_and_commit_timings: LeaderExecuteAndCommitTimings,
}
impl LeaderSlotTimingMetrics {
pub(crate) fn new(bank_creation_time: &Instant) -> Self {
Self {
outer_loop_timings: OuterLoopTimings::new(bank_creation_time),
process_buffered_packets_timings: ProcessBufferedPacketsTimings::default(),
consume_buffered_packets_timings: ConsumeBufferedPacketsTimings::default(),
process_packets_timings: ProcessPacketsTimings::default(),
execute_and_commit_timings: LeaderExecuteAndCommitTimings::default(),
}
}
pub(crate) fn report(&self, id: u32, slot: Slot) {
self.outer_loop_timings.report(id, slot);
self.process_buffered_packets_timings.report(id, slot);
self.consume_buffered_packets_timings.report(id, slot);
self.process_packets_timings.report(id, slot);
self.execute_and_commit_timings.report(id, slot);
}
}
#[derive(Debug)]
pub(crate) struct OuterLoopTimings {
pub bank_detected_time: Instant,
// Delay from when the bank was created to when this thread detected it
pub bank_detected_delay_us: u64,
// Time spent processing buffered packets
pub process_buffered_packets_us: u64,
// Time spent checking for slot boundary and reporting leader slot metrics
pub slot_metrics_check_slot_boundary_us: u64,
// Time spent processing new incoming packets to the banking thread
pub receive_and_buffer_packets_us: u64,
}
impl OuterLoopTimings {
fn new(bank_creation_time: &Instant) -> Self {
Self {
bank_detected_time: Instant::now(),
bank_detected_delay_us: bank_creation_time.elapsed().as_micros() as u64,
process_buffered_packets_us: 0,
slot_metrics_check_slot_boundary_us: 0,
receive_and_buffer_packets_us: 0,
}
}
fn report(&self, id: u32, slot: Slot) {
let bank_detected_to_now_us = self.bank_detected_time.elapsed().as_micros() as u64;
datapoint_info!(
"banking_stage-leader_slot_loop_timings",
("id", id as i64, i64),
("slot", slot as i64, i64),
(
"bank_detected_to_slot_end_detected_us",
bank_detected_to_now_us,
i64
),
(
"bank_creation_to_slot_end_detected_us",
bank_detected_to_now_us + self.bank_detected_delay_us,
i64
),
("bank_detected_delay_us", self.bank_detected_delay_us, i64),
(
"process_buffered_packets_us",
self.process_buffered_packets_us,
i64
),
(
"slot_metrics_check_slot_boundary_us",
self.slot_metrics_check_slot_boundary_us,
i64
),
(
"receive_and_buffer_packets_us",
self.receive_and_buffer_packets_us,
i64
),
);
}
}
#[derive(Debug, Default)]
pub(crate) struct ProcessBufferedPacketsTimings {
pub make_decision_us: u64,
pub consume_buffered_packets_us: u64,
pub forward_us: u64,
pub forward_and_hold_us: u64,
}
impl ProcessBufferedPacketsTimings {
fn report(&self, id: u32, slot: Slot) {
datapoint_info!(
"banking_stage-leader_slot_process_buffered_packets_timings",
("id", id as i64, i64),
("slot", slot as i64, i64),
("make_decision_us", self.make_decision_us as i64, i64),
(
"consume_buffered_packets_us",
self.consume_buffered_packets_us as i64,
i64
),
("forward_us", self.forward_us as i64, i64),
("forward_and_hold_us", self.forward_and_hold_us as i64, i64),
);
}
}
#[derive(Debug, Default)]
pub(crate) struct ConsumeBufferedPacketsTimings {
// Time spent grabbing poh recorder lock
pub poh_recorder_lock_us: u64,
// Time spent filtering invalid packets after leader slot has ended
pub end_of_slot_filtering_us: u64,
// Time spent processing transactions
pub process_packets_transactions_us: u64,
}
impl ConsumeBufferedPacketsTimings {
fn report(&self, id: u32, slot: Slot) {
datapoint_info!(
"banking_stage-leader_slot_consume_buffered_packets_timings",
("id", id as i64, i64),
("slot", slot as i64, i64),
(
"poh_recorder_lock_us",
self.poh_recorder_lock_us as i64,
i64
),
(
"end_of_slot_filtering_us",
self.end_of_slot_filtering_us as i64,
i64
),
(
"process_packets_transactions_us",
self.process_packets_transactions_us as i64,
i64
),
);
}
}
#[derive(Debug, Default)]
pub(crate) struct ProcessPacketsTimings {
// Time spent converting packets to transactions
pub transactions_from_packets_us: u64,
// Time spent processing transactions
pub process_transactions_us: u64,
// Time spent filtering retryable packets that were returned after transaction
// processing
pub filter_retryable_packets_us: u64,
// Time spent running the cost model in processing transactions before executing
// transactions
pub cost_model_us: u64,
}
impl ProcessPacketsTimings {
fn report(&self, id: u32, slot: Slot) {
datapoint_info!(
"banking_stage-leader_slot_process_packets_timings",
("id", id as i64, i64),
("slot", slot as i64, i64),
(
"transactions_from_packets_us",
self.transactions_from_packets_us,
i64
),
("process_transactions_us", self.process_transactions_us, i64),
(
"filter_retryable_packets_us",
self.filter_retryable_packets_us,
i64
),
("cost_model_us", self.cost_model_us, i64),
);
}
}

View File

@@ -0,0 +1,44 @@
//! The `ledger_metric_report_service` periodically reports ledger store metrics.
use {
solana_ledger::blockstore::Blockstore,
std::{
string::ToString,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
thread::{self, Builder, JoinHandle},
time::Duration,
},
};
// Determines how often we report blockstore metrics.
const BLOCKSTORE_METRICS_REPORT_PERIOD_MILLIS: u64 = 10000;
pub struct LedgerMetricReportService {
t_cf_metric: JoinHandle<()>,
}
impl LedgerMetricReportService {
pub fn new(blockstore: Arc<Blockstore>, exit: &Arc<AtomicBool>) -> Self {
let exit_signal = exit.clone();
let t_cf_metric = Builder::new()
.name("metric_report_rocksdb_cf_metrics".to_string())
.spawn(move || loop {
if exit_signal.load(Ordering::Relaxed) {
break;
}
thread::sleep(Duration::from_millis(
BLOCKSTORE_METRICS_REPORT_PERIOD_MILLIS,
));
blockstore.submit_rocksdb_cf_metrics_for_all_cfs();
})
.unwrap();
Self { t_cf_metric }
}
pub fn join(self) -> thread::Result<()> {
self.t_cf_metric.join()
}
}

View File

@@ -24,15 +24,19 @@ pub mod cost_update_service;
pub mod drop_bank_service;
pub mod duplicate_repair_status;
pub mod fetch_stage;
pub mod find_packet_sender_stake_stage;
pub mod fork_choice;
pub mod gen_keys;
pub mod heaviest_subtree_fork_choice;
pub mod latest_validator_votes_for_frozen_banks;
pub mod leader_slot_banking_stage_metrics;
pub mod leader_slot_banking_stage_timing_metrics;
pub mod ledger_cleanup_service;
pub mod ledger_metric_report_service;
pub mod optimistic_confirmation_verifier;
pub mod outstanding_requests;
pub mod packet_hasher;
pub mod packet_threshold;
pub mod progress_map;
pub mod qos_service;
pub mod repair_generic_traversal;
@@ -61,6 +65,7 @@ pub mod tpu;
pub mod tree_diff;
pub mod tvu;
pub mod unfrozen_gossip_verified_vote_hashes;
pub mod unprocessed_packet_batches;
pub mod validator;
pub mod verified_vote_packets;
pub mod vote_simulator;

View File

@@ -0,0 +1,84 @@
use std::time::Duration;
enum PacketThresholdUpdate {
Increase,
Decrease,
}
impl PacketThresholdUpdate {
const PERCENTAGE: usize = 90;
fn calculate(&self, current: usize) -> usize {
match *self {
PacketThresholdUpdate::Increase => {
current.saturating_mul(100).saturating_div(Self::PERCENTAGE)
}
PacketThresholdUpdate::Decrease => {
current.saturating_mul(Self::PERCENTAGE).saturating_div(100)
}
}
}
}
#[derive(Debug)]
pub struct DynamicPacketToProcessThreshold {
max_packets: usize,
}
impl Default for DynamicPacketToProcessThreshold {
fn default() -> Self {
Self {
max_packets: Self::DEFAULT_MAX_PACKETS,
}
}
}
impl DynamicPacketToProcessThreshold {
const DEFAULT_MAX_PACKETS: usize = 1024;
const TIME_THRESHOLD: Duration = Duration::from_secs(1);
pub fn update(&mut self, total_packets: usize, compute_time: Duration) {
if total_packets >= self.max_packets {
let threshold_update = if compute_time > Self::TIME_THRESHOLD {
PacketThresholdUpdate::Decrease
} else {
PacketThresholdUpdate::Increase
};
self.max_packets = threshold_update.calculate(self.max_packets);
}
}
pub fn should_drop(&self, total: usize) -> bool {
total >= self.max_packets
}
}
#[cfg(test)]
mod test {
use {super::DynamicPacketToProcessThreshold, std::time::Duration};
#[test]
fn test_dynamic_packet_threshold() {
let mut threshold = DynamicPacketToProcessThreshold::default();
assert_eq!(
threshold.max_packets,
DynamicPacketToProcessThreshold::DEFAULT_MAX_PACKETS
);
assert!(!threshold.should_drop(10));
assert!(threshold.should_drop(2000));
let old = threshold.max_packets;
// Increase
let total = 2000;
let compute_time = Duration::from_millis(500);
threshold.update(total, compute_time);
assert!(threshold.max_packets > old);
// Decrease
let compute_time = Duration::from_millis(2000);
threshold.update(total, compute_time);
assert_eq!(threshold.max_packets, old - 1); // due to rounding error, there is a difference of 1
}
}

View File

@@ -6,10 +6,12 @@ use {
replay_stage::SUPERMINORITY_THRESHOLD,
},
solana_ledger::blockstore_processor::{ConfirmationProgress, ConfirmationTiming},
solana_program_runtime::timings::ExecuteTimingType,
solana_runtime::{bank::Bank, bank_forks::BankForks, vote_account::VoteAccount},
solana_sdk::{clock::Slot, hash::Hash, pubkey::Pubkey},
std::{
collections::{BTreeMap, HashMap, HashSet},
ops::Index,
sync::{Arc, RwLock},
time::Instant,
},
@@ -62,23 +64,60 @@ impl ReplaySlotStats {
),
("total_entries", num_entries as i64, i64),
("total_shreds", num_shreds as i64, i64),
("check_us", self.execute_timings.check_us, i64),
("load_us", self.execute_timings.load_us, i64),
("execute_us", self.execute_timings.execute_us, i64),
("store_us", self.execute_timings.store_us, i64),
(
"check_us",
*self
.execute_timings
.metrics
.index(ExecuteTimingType::CheckUs),
i64
),
(
"load_us",
*self
.execute_timings
.metrics
.index(ExecuteTimingType::LoadUs),
i64
),
(
"execute_us",
*self
.execute_timings
.metrics
.index(ExecuteTimingType::ExecuteUs),
i64
),
(
"store_us",
*self
.execute_timings
.metrics
.index(ExecuteTimingType::StoreUs),
i64
),
(
"update_stakes_cache_us",
self.execute_timings.update_stakes_cache_us,
*self
.execute_timings
.metrics
.index(ExecuteTimingType::UpdateStakesCacheUs),
i64
),
(
"total_batches_len",
self.execute_timings.total_batches_len,
*self
.execute_timings
.metrics
.index(ExecuteTimingType::TotalBatchesLen),
i64
),
(
"num_execute_batches",
self.execute_timings.num_execute_batches,
*self
.execute_timings
.metrics
.index(ExecuteTimingType::NumExecuteBatches),
i64
),
(
@@ -300,7 +339,7 @@ pub struct RetransmitInfo {
impl RetransmitInfo {
pub fn reached_retransmit_threshold(&self) -> bool {
let backoff = std::cmp::min(self.retry_iteration, RETRANSMIT_BACKOFF_CAP);
let backoff_duration_ms = 2_u64.pow(backoff) * RETRANSMIT_BASE_DELAY_MS;
let backoff_duration_ms = (1_u64 << backoff) * RETRANSMIT_BASE_DELAY_MS;
self.retry_time
.map(|time| time.elapsed().as_millis() > backoff_duration_ms.into())
.unwrap_or(true)

View File

@@ -3,7 +3,7 @@
//! how transactions are included in blocks, and optimize those blocks.
//!
use {
crate::banking_stage::BatchedTransactionCostDetails,
crate::banking_stage::BatchedTransactionDetails,
crossbeam_channel::{unbounded, Receiver, Sender},
solana_measure::measure::Measure,
solana_runtime::{
@@ -68,8 +68,8 @@ impl QosService {
let running_flag = Arc::new(AtomicBool::new(true));
let metrics = Arc::new(QosServiceMetrics::new(id));
let running_flag_clone = running_flag.clone();
let metrics_clone = metrics.clone();
let running_flag_clone = Arc::clone(&running_flag);
let metrics_clone = Arc::clone(&metrics);
let reporting_thread = Some(
Builder::new()
.name("solana-qos-service-metrics-repoting".to_string())
@@ -109,9 +109,11 @@ impl QosService {
.collect();
compute_cost_time.stop();
self.metrics
.stats
.compute_cost_time
.fetch_add(compute_cost_time.as_us(), Ordering::Relaxed);
self.metrics
.stats
.compute_cost_count
.fetch_add(txs_costs.len() as u64, Ordering::Relaxed);
txs_costs
@@ -134,7 +136,7 @@ impl QosService {
.map(|(tx, cost)| match cost_tracker.try_add(tx, cost) {
Ok(current_block_cost) => {
debug!("slot {:?}, transaction {:?}, cost {:?}, fit into current block, current block cost {}", bank.slot(), tx, cost, current_block_cost);
self.metrics.selected_txs_count.fetch_add(1, Ordering::Relaxed);
self.metrics.stats.selected_txs_count.fetch_add(1, Ordering::Relaxed);
num_included += 1;
Ok(())
},
@@ -142,20 +144,19 @@ impl QosService {
debug!("slot {:?}, transaction {:?}, cost {:?}, not fit into current block, '{:?}'", bank.slot(), tx, cost, e);
match e {
CostTrackerError::WouldExceedBlockMaxLimit => {
self.metrics.retried_txs_per_block_limit_count.fetch_add(1, Ordering::Relaxed);
Err(TransactionError::WouldExceedMaxBlockCostLimit)
}
CostTrackerError::WouldExceedVoteMaxLimit => {
self.metrics.retried_txs_per_vote_limit_count.fetch_add(1, Ordering::Relaxed);
Err(TransactionError::WouldExceedMaxVoteCostLimit)
}
CostTrackerError::WouldExceedAccountMaxLimit => {
self.metrics.retried_txs_per_account_limit_count.fetch_add(1, Ordering::Relaxed);
Err(TransactionError::WouldExceedMaxAccountCostLimit)
}
CostTrackerError::WouldExceedAccountDataMaxLimit => {
self.metrics.retried_txs_per_account_data_limit_count.fetch_add(1, Ordering::Relaxed);
Err(TransactionError::WouldExceedMaxAccountDataCostLimit)
CostTrackerError::WouldExceedAccountDataBlockLimit => {
Err(TransactionError::WouldExceedAccountDataBlockLimit)
}
CostTrackerError::WouldExceedAccountDataTotalLimit => {
Err(TransactionError::WouldExceedAccountDataTotalLimit)
}
}
}
@@ -163,6 +164,7 @@ impl QosService {
.collect();
cost_tracking_time.stop();
self.metrics
.stats
.cost_tracking_time
.fetch_add(cost_tracking_time.as_us(), Ordering::Relaxed);
(select_results, num_included)
@@ -177,30 +179,82 @@ impl QosService {
pub fn accumulate_estimated_transaction_costs(
&self,
cost_details: &BatchedTransactionCostDetails,
batched_transaction_details: &BatchedTransactionDetails,
) {
self.metrics.stats.estimated_signature_cu.fetch_add(
batched_transaction_details.costs.batched_signature_cost,
Ordering::Relaxed,
);
self.metrics.stats.estimated_write_lock_cu.fetch_add(
batched_transaction_details.costs.batched_write_lock_cost,
Ordering::Relaxed,
);
self.metrics.stats.estimated_data_bytes_cu.fetch_add(
batched_transaction_details.costs.batched_data_bytes_cost,
Ordering::Relaxed,
);
self.metrics.stats.estimated_execute_cu.fetch_add(
batched_transaction_details.costs.batched_execute_cost,
Ordering::Relaxed,
);
self.metrics
.estimated_signature_cu
.fetch_add(cost_details.batched_signature_cost, Ordering::Relaxed);
.errors
.retried_txs_per_block_limit_count
.fetch_add(
batched_transaction_details
.errors
.batched_retried_txs_per_block_limit_count,
Ordering::Relaxed,
);
self.metrics
.estimated_write_lock_cu
.fetch_add(cost_details.batched_write_lock_cost, Ordering::Relaxed);
.errors
.retried_txs_per_vote_limit_count
.fetch_add(
batched_transaction_details
.errors
.batched_retried_txs_per_vote_limit_count,
Ordering::Relaxed,
);
self.metrics
.estimated_data_bytes_cu
.fetch_add(cost_details.batched_data_bytes_cost, Ordering::Relaxed);
.errors
.retried_txs_per_account_limit_count
.fetch_add(
batched_transaction_details
.errors
.batched_retried_txs_per_account_limit_count,
Ordering::Relaxed,
);
self.metrics
.estimated_execute_cu
.fetch_add(cost_details.batched_execute_cost, Ordering::Relaxed);
.errors
.retried_txs_per_account_data_block_limit_count
.fetch_add(
batched_transaction_details
.errors
.batched_retried_txs_per_account_data_block_limit_count,
Ordering::Relaxed,
);
self.metrics
.errors
.dropped_txs_per_account_data_total_limit_count
.fetch_add(
batched_transaction_details
.errors
.batched_dropped_txs_per_account_data_total_limit_count,
Ordering::Relaxed,
);
}
pub fn accumulate_actual_execute_cu(&self, units: u64) {
self.metrics
.stats
.actual_execute_cu
.fetch_add(units, Ordering::Relaxed);
}
pub fn accumulate_actual_execute_time(&self, micro_sec: u64) {
self.metrics
.stats
.actual_execute_time_us
.fetch_add(micro_sec, Ordering::Relaxed);
}
@@ -223,63 +277,77 @@ impl QosService {
}
}
#[derive(Default)]
#[derive(Debug, Default)]
struct QosServiceMetrics {
// banking_stage creates one QosService instance per working threads, that is uniquely
// identified by id. This field allows to categorize metrics for gossip votes, TPU votes
// and other transactions.
/// banking_stage creates one QosService instance per working threads, that is uniquely
/// identified by id. This field allows to categorize metrics for gossip votes, TPU votes
/// and other transactions.
id: u32,
// aggregate metrics per slot
/// aggregate metrics per slot
slot: AtomicU64,
// accumulated time in micro-sec spent in computing transaction cost. It is the main performance
// overhead introduced by cost_model
stats: QosServiceMetricsStats,
errors: QosServiceMetricsErrors,
}
#[derive(Debug, Default)]
struct QosServiceMetricsStats {
/// accumulated time in micro-sec spent in computing transaction cost. It is the main performance
/// overhead introduced by cost_model
compute_cost_time: AtomicU64,
// total nummber of transactions in the reporting period to be computed for theit cost. It is
// usually the number of sanitized transactions leader receives.
/// total nummber of transactions in the reporting period to be computed for theit cost. It is
/// usually the number of sanitized transactions leader receives.
compute_cost_count: AtomicU64,
// acumulated time in micro-sec spent in tracking each bank's cost. It is the second part of
// overhead introduced
/// acumulated time in micro-sec spent in tracking each bank's cost. It is the second part of
/// overhead introduced
cost_tracking_time: AtomicU64,
// number of transactions to be included in blocks
/// number of transactions to be included in blocks
selected_txs_count: AtomicU64,
// number of transactions to be queued for retry due to its potential to breach block limit
retried_txs_per_block_limit_count: AtomicU64,
// number of transactions to be queued for retry due to its potential to breach vote limit
retried_txs_per_vote_limit_count: AtomicU64,
// number of transactions to be queued for retry due to its potential to breach writable
// account limit
retried_txs_per_account_limit_count: AtomicU64,
// number of transactions to be queued for retry due to its account data limits
retried_txs_per_account_data_limit_count: AtomicU64,
// accumulated estimated signature Compute Unites to be packed into block
/// accumulated estimated signature Compute Unites to be packed into block
estimated_signature_cu: AtomicU64,
// accumulated estimated write locks Compute Units to be packed into block
/// accumulated estimated write locks Compute Units to be packed into block
estimated_write_lock_cu: AtomicU64,
// accumulated estimated instructino data Compute Units to be packed into block
/// accumulated estimated instructino data Compute Units to be packed into block
estimated_data_bytes_cu: AtomicU64,
// accumulated estimated program Compute Units to be packed into block
/// accumulated estimated program Compute Units to be packed into block
estimated_execute_cu: AtomicU64,
// accumulated actual program Compute Units that have been packed into block
/// accumulated actual program Compute Units that have been packed into block
actual_execute_cu: AtomicU64,
// accumulated actual program execute micro-sec that have been packed into block
/// accumulated actual program execute micro-sec that have been packed into block
actual_execute_time_us: AtomicU64,
}
#[derive(Debug, Default)]
struct QosServiceMetricsErrors {
/// number of transactions to be queued for retry due to their potential to breach block limit
retried_txs_per_block_limit_count: AtomicU64,
/// number of transactions to be queued for retry due to their potential to breach vote limit
retried_txs_per_vote_limit_count: AtomicU64,
/// number of transactions to be queued for retry due to their potential to breach writable
/// account limit
retried_txs_per_account_limit_count: AtomicU64,
/// number of transactions to be queued for retry due to their potential to breach account data
/// block limits
retried_txs_per_account_data_block_limit_count: AtomicU64,
/// number of transactions to be dropped due to their potential to breach account data total
/// limits
dropped_txs_per_account_data_total_limit_count: AtomicU64,
}
impl QosServiceMetrics {
pub fn new(id: u32) -> Self {
QosServiceMetrics {
@@ -296,76 +364,96 @@ impl QosServiceMetrics {
("bank_slot", bank_slot as i64, i64),
(
"compute_cost_time",
self.compute_cost_time.swap(0, Ordering::Relaxed) as i64,
self.stats.compute_cost_time.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"compute_cost_count",
self.compute_cost_count.swap(0, Ordering::Relaxed) as i64,
self.stats.compute_cost_count.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"cost_tracking_time",
self.cost_tracking_time.swap(0, Ordering::Relaxed) as i64,
self.stats.cost_tracking_time.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"selected_txs_count",
self.selected_txs_count.swap(0, Ordering::Relaxed) as i64,
self.stats.selected_txs_count.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"estimated_signature_cu",
self.stats.estimated_signature_cu.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"estimated_write_lock_cu",
self.stats
.estimated_write_lock_cu
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"estimated_data_bytes_cu",
self.stats
.estimated_data_bytes_cu
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"estimated_execute_cu",
self.stats.estimated_execute_cu.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"actual_execute_cu",
self.stats.actual_execute_cu.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"actual_execute_time_us",
self.stats.actual_execute_time_us.swap(0, Ordering::Relaxed) as i64,
i64
),
);
datapoint_info!(
"qos-service-errors",
("id", self.id as i64, i64),
("bank_slot", bank_slot as i64, i64),
(
"retried_txs_per_block_limit_count",
self.retried_txs_per_block_limit_count
self.errors
.retried_txs_per_block_limit_count
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"retried_txs_per_vote_limit_count",
self.retried_txs_per_vote_limit_count
self.errors
.retried_txs_per_vote_limit_count
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"retried_txs_per_account_limit_count",
self.retried_txs_per_account_limit_count
self.errors
.retried_txs_per_account_limit_count
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"retried_txs_per_account_data_limit_count",
self.retried_txs_per_account_data_limit_count
"retried_txs_per_account_data_block_limit_count",
self.errors
.retried_txs_per_account_data_block_limit_count
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"estimated_signature_cu",
self.estimated_signature_cu.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"estimated_write_lock_cu",
self.estimated_write_lock_cu.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"estimated_data_bytes_cu",
self.estimated_data_bytes_cu.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"estimated_execute_cu",
self.estimated_execute_cu.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"actual_execute_cu",
self.actual_execute_cu.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"actual_execute_time_us",
self.actual_execute_time_us.swap(0, Ordering::Relaxed) as i64,
"dropped_txs_per_account_data_total_limit_count",
self.errors
.dropped_txs_per_account_data_total_limit_count
.swap(0, Ordering::Relaxed) as i64,
i64
),
);

View File

@@ -642,7 +642,7 @@ impl RepairWeight {
}
// Heavier, smaller slots come first
fn sort_by_stake_weight_slot(slot_stake_voted: &mut Vec<(Slot, u64)>) {
fn sort_by_stake_weight_slot(slot_stake_voted: &mut [(Slot, u64)]) {
slot_stake_voted.sort_by(|(slot, stake_voted), (slot_, stake_voted_)| {
if stake_voted == stake_voted_ {
slot.cmp(slot_)

View File

@@ -27,9 +27,9 @@ use {
window_service::DuplicateSlotReceiver,
},
crossbeam_channel::{Receiver, RecvTimeoutError, Sender},
solana_accountsdb_plugin_manager::block_metadata_notifier_interface::BlockMetadataNotifierLock,
solana_client::rpc_response::SlotUpdate,
solana_entry::entry::VerifyRecyclers,
solana_geyser_plugin_manager::block_metadata_notifier_interface::BlockMetadataNotifierLock,
solana_gossip::cluster_info::ClusterInfo,
solana_ledger::{
block_error::BlockError,
@@ -735,11 +735,16 @@ impl ReplayStage {
restored_tower.adjust_lockouts_after_replay(root_bank.slot(), &slot_history)
}).
unwrap_or_else(|err| {
// It's a fatal error if the tower is not present. This is
// necessary to prevent the validator from violating
// lockouts for its new identity
error!("Failed to load tower for {}: {}", my_pubkey, err);
std::process::exit(1);
if err.is_file_missing() {
Tower::new_from_bankforks(
&bank_forks.read().unwrap(),
&my_pubkey,
&vote_account,
)
} else {
error!("Failed to load tower for {}: {}", my_pubkey, err);
std::process::exit(1);
}
});
// Ensure the validator can land votes with the new identity before
@@ -1593,10 +1598,7 @@ impl ReplayStage {
root_slot,
my_pubkey,
rpc_subscriptions,
NewBankOptions {
vote_only_bank,
simulation_bank: false,
},
NewBankOptions { vote_only_bank },
);
let tpu_bank = bank_forks.write().unwrap().insert(tpu_bank);
@@ -1616,7 +1618,10 @@ impl ReplayStage {
verify_recyclers: &VerifyRecyclers,
) -> result::Result<usize, BlockstoreProcessorError> {
let tx_count_before = bank_progress.replay_progress.num_txs;
let confirm_result = blockstore_processor::confirm_slot(
// All errors must lead to marking the slot as dead, otherwise,
// the `check_slot_agrees_with_cluster()` called by `replay_active_banks()`
// will break!
blockstore_processor::confirm_slot(
blockstore,
bank,
&mut bank_progress.replay_stats,
@@ -1628,16 +1633,9 @@ impl ReplayStage {
None,
verify_recyclers,
false,
);
)?;
let tx_count_after = bank_progress.replay_progress.num_txs;
let tx_count = tx_count_after - tx_count_before;
confirm_result.map_err(|err| {
// All errors must lead to marking the slot as dead, otherwise,
// the `check_slot_agrees_with_cluster()` called by `replay_active_banks()`
// will break!
err
})?;
Ok(tx_count)
}
@@ -1736,7 +1734,7 @@ impl ReplayStage {
replay_timing: &mut ReplayTiming,
voting_sender: &Sender<VoteOp>,
epoch_slots_frozen_slots: &mut EpochSlotsFrozenSlots,
bank_drop_sender: &Sender<Vec<Arc<Bank>>>,
drop_bank_sender: &Sender<Vec<Arc<Bank>>>,
wait_to_vote_slot: Option<Slot>,
) {
if bank.is_empty() {
@@ -1788,7 +1786,7 @@ impl ReplayStage {
has_new_vote_been_rooted,
vote_signatures,
epoch_slots_frozen_slots,
bank_drop_sender,
drop_bank_sender,
);
rpc_subscriptions.notify_roots(rooted_slots);
if let Some(sender) = bank_notification_sender {
@@ -2926,14 +2924,14 @@ impl ReplayStage {
has_new_vote_been_rooted: &mut bool,
voted_signatures: &mut Vec<Signature>,
epoch_slots_frozen_slots: &mut EpochSlotsFrozenSlots,
bank_drop_sender: &Sender<Vec<Arc<Bank>>>,
drop_bank_sender: &Sender<Vec<Arc<Bank>>>,
) {
let removed_banks = bank_forks.write().unwrap().set_root(
new_root,
accounts_background_request_sender,
highest_confirmed_root,
);
bank_drop_sender
drop_bank_sender
.send(removed_banks)
.unwrap_or_else(|err| warn!("bank drop failed: {:?}", err));
@@ -3136,7 +3134,7 @@ pub mod tests {
},
solana_rpc::{
optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank,
rpc::create_test_transactions_and_populate_blockstore,
rpc::{create_test_transaction_entries, populate_blockstore_for_tests},
},
solana_runtime::{
accounts_background_service::AbsRequestSender,
@@ -3444,7 +3442,7 @@ pub mod tests {
.into_iter()
.map(|slot| (slot, Hash::default()))
.collect();
let (bank_drop_sender, _bank_drop_receiver) = unbounded();
let (drop_bank_sender, _drop_bank_receiver) = unbounded();
ReplayStage::handle_new_root(
root,
&bank_forks,
@@ -3458,7 +3456,7 @@ pub mod tests {
&mut true,
&mut Vec::new(),
&mut epoch_slots_frozen_slots,
&bank_drop_sender,
&drop_bank_sender,
);
assert_eq!(bank_forks.read().unwrap().root(), root);
assert_eq!(progress.len(), 1);
@@ -3525,7 +3523,7 @@ pub mod tests {
for i in 0..=root {
progress.insert(i, ForkProgress::new(Hash::default(), None, None, 0, 0));
}
let (bank_drop_sender, _bank_drop_receiver) = unbounded();
let (drop_bank_sender, _drop_bank_receiver) = unbounded();
ReplayStage::handle_new_root(
root,
&bank_forks,
@@ -3539,7 +3537,7 @@ pub mod tests {
&mut true,
&mut Vec::new(),
&mut EpochSlotsFrozenSlots::default(),
&bank_drop_sender,
&drop_bank_sender,
);
assert_eq!(bank_forks.read().unwrap().root(), root);
assert!(bank_forks.read().unwrap().get(confirmed_root).is_some());
@@ -4005,15 +4003,18 @@ pub mod tests {
let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1));
let slot = bank1.slot();
let mut test_signatures_iter = create_test_transactions_and_populate_blockstore(
let (entries, test_signatures) = create_test_transaction_entries(
vec![&mint_keypair, &keypair1, &keypair2, &keypair3],
bank0.slot(),
bank1.clone(),
);
populate_blockstore_for_tests(
entries,
bank1,
blockstore.clone(),
Arc::new(AtomicU64::default()),
)
.into_iter();
);
let mut test_signatures_iter = test_signatures.into_iter();
let confirmed_block = blockstore.get_rooted_block(slot, false).unwrap();
let actual_tx_results: Vec<_> = confirmed_block
.transactions

View File

@@ -416,7 +416,7 @@ pub fn retransmitter(
.unwrap()
}
pub(crate) struct RetransmitStage {
pub struct RetransmitStage {
retransmit_thread_handle: JoinHandle<()>,
window_service: WindowService,
cluster_slots_service: ClusterSlotsService,
@@ -529,7 +529,7 @@ mod tests {
super::*,
solana_gossip::contact_info::ContactInfo,
solana_ledger::{
blockstore_processor::{process_blockstore, ProcessOptions},
blockstore_processor::{test_process_blockstore, ProcessOptions},
create_new_tmp_ledger,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
},
@@ -550,19 +550,9 @@ mod tests {
full_leader_cache: true,
..ProcessOptions::default()
};
let (accounts_package_sender, _) = unbounded();
let (bank_forks, cached_leader_schedule, _) = process_blockstore(
&genesis_config,
&blockstore,
Vec::new(),
opts,
None,
None,
accounts_package_sender,
None,
)
.unwrap();
let leader_schedule_cache = Arc::new(cached_leader_schedule);
let (bank_forks, leader_schedule_cache) =
test_process_blockstore(&genesis_config, &blockstore, opts);
let leader_schedule_cache = Arc::new(leader_schedule_cache);
let bank_forks = Arc::new(RwLock::new(bank_forks));
let mut me = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);

View File

@@ -2,6 +2,7 @@ use {
crate::{
cluster_slots::ClusterSlots,
duplicate_repair_status::ANCESTOR_HASH_REPAIR_SAMPLE_SIZE,
packet_threshold::DynamicPacketToProcessThreshold,
repair_response,
repair_service::{OutstandingShredRepairs, RepairStats},
request_response::RequestResponse,
@@ -23,7 +24,6 @@ use {
blockstore::Blockstore,
shred::{Nonce, Shred, SIZE_OF_NONCE},
},
solana_measure::measure::Measure,
solana_metrics::inc_new_counter_debug,
solana_perf::packet::{limited_deserialize, PacketBatch, PacketBatchRecycler},
solana_sdk::{
@@ -322,7 +322,7 @@ impl ServeRepair {
requests_receiver: &PacketBatchReceiver,
response_sender: &PacketBatchSender,
stats: &mut ServeRepairStats,
max_packets: &mut usize,
packet_threshold: &mut DynamicPacketToProcessThreshold,
) -> Result<()> {
//TODO cache connections
let timeout = Duration::new(1, 0);
@@ -332,29 +332,21 @@ impl ServeRepair {
let mut dropped_packets = 0;
while let Ok(more) = requests_receiver.try_recv() {
total_packets += more.packets.len();
if total_packets < *max_packets {
// Drop the rest in the channel in case of dos
reqs_v.push(more);
} else {
if packet_threshold.should_drop(total_packets) {
dropped_packets += more.packets.len();
} else {
reqs_v.push(more);
}
}
stats.dropped_packets += dropped_packets;
stats.total_packets += total_packets;
let mut time = Measure::start("repair::handle_packets");
let timer = Instant::now();
for reqs in reqs_v {
Self::handle_packets(obj, recycler, blockstore, reqs, response_sender, stats);
}
time.stop();
if total_packets >= *max_packets {
if time.as_ms() > 1000 {
*max_packets = (*max_packets * 9) / 10;
} else {
*max_packets = (*max_packets * 10) / 9;
}
}
packet_threshold.update(total_packets, timer.elapsed());
Ok(())
}
@@ -403,7 +395,7 @@ impl ServeRepair {
.spawn(move || {
let mut last_print = Instant::now();
let mut stats = ServeRepairStats::default();
let mut max_packets = 1024;
let mut packet_threshold = DynamicPacketToProcessThreshold::default();
loop {
let result = Self::run_listen(
&me,
@@ -412,7 +404,7 @@ impl ServeRepair {
&requests_receiver,
&response_sender,
&mut stats,
&mut max_packets,
&mut packet_threshold,
);
match result {
Err(Error::RecvTimeout(_)) | Ok(_) => {}

View File

@@ -2,17 +2,17 @@
use {
crate::packet_hasher::PacketHasher,
crossbeam_channel::unbounded,
crossbeam_channel::{unbounded, Sender},
lru::LruCache,
solana_ledger::shred::{get_shred_slot_index_type, ShredFetchStats},
solana_perf::{
cuda_runtime::PinnedVec,
packet::{Packet, PacketBatchRecycler, PacketFlags},
packet::{Packet, PacketBatch, PacketBatchRecycler, PacketFlags},
recycler::Recycler,
},
solana_runtime::bank_forks::BankForks,
solana_sdk::clock::{Slot, DEFAULT_MS_PER_SLOT},
solana_streamer::streamer::{self, PacketBatchReceiver, PacketBatchSender},
solana_streamer::streamer::{self, PacketBatchReceiver},
std::{
net::UdpSocket,
sync::{atomic::AtomicBool, Arc, RwLock},
@@ -65,7 +65,7 @@ impl ShredFetchStage {
// updates packets received on a channel and sends them on another channel
fn modify_packets<F>(
recvr: PacketBatchReceiver,
sendr: PacketBatchSender,
sendr: Sender<Vec<PacketBatch>>,
bank_forks: Option<Arc<RwLock<BankForks>>>,
name: &'static str,
modify: F,
@@ -125,7 +125,7 @@ impl ShredFetchStage {
stats = ShredFetchStats::default();
last_stats = Instant::now();
}
if sendr.send(packet_batch).is_err() {
if sendr.send(vec![packet_batch]).is_err() {
break;
}
}
@@ -134,7 +134,7 @@ impl ShredFetchStage {
fn packet_modifier<F>(
sockets: Vec<Arc<UdpSocket>>,
exit: &Arc<AtomicBool>,
sender: PacketBatchSender,
sender: Sender<Vec<PacketBatch>>,
recycler: Recycler<PinnedVec<Packet>>,
bank_forks: Option<Arc<RwLock<BankForks>>>,
name: &'static str,
@@ -170,7 +170,7 @@ impl ShredFetchStage {
sockets: Vec<Arc<UdpSocket>>,
forward_sockets: Vec<Arc<UdpSocket>>,
repair_socket: Arc<UdpSocket>,
sender: &PacketBatchSender,
sender: &Sender<Vec<PacketBatch>>,
bank_forks: Option<Arc<RwLock<BankForks>>>,
exit: &Arc<AtomicBool>,
) -> Self {

View File

@@ -6,15 +6,17 @@
//! if perf-libs are available
use {
crate::sigverify,
crate::{find_packet_sender_stake_stage, sigverify},
core::time::Duration,
crossbeam_channel::{Receiver, RecvTimeoutError, SendError, Sender},
crossbeam_channel::{RecvTimeoutError, SendError, Sender},
itertools::Itertools,
solana_measure::measure::Measure,
solana_perf::packet::PacketBatch,
solana_perf::sigverify::{count_valid_packets, shrink_batches, Deduper},
solana_perf::{
packet::PacketBatch,
sigverify::{count_valid_packets, shrink_batches, Deduper},
},
solana_sdk::timing,
solana_streamer::streamer::{self, PacketBatchReceiver, StreamerError},
solana_streamer::streamer::{self, StreamerError},
std::{
thread::{self, Builder, JoinHandle},
time::Instant,
@@ -190,7 +192,7 @@ impl SigVerifier for DisabledSigVerifier {
impl SigVerifyStage {
#[allow(clippy::new_ret_no_self)]
pub fn new<T: SigVerifier + 'static + Send + Clone>(
packet_receiver: Receiver<PacketBatch>,
packet_receiver: find_packet_sender_stake_stage::FindPacketSenderStakeReceiver,
verified_sender: Sender<Vec<PacketBatch>>,
verifier: T,
) -> Self {
@@ -225,12 +227,12 @@ impl SigVerifyStage {
fn verifier<T: SigVerifier>(
deduper: &Deduper,
recvr: &PacketBatchReceiver,
recvr: &find_packet_sender_stake_stage::FindPacketSenderStakeReceiver,
sendr: &Sender<Vec<PacketBatch>>,
verifier: &T,
stats: &mut SigVerifierStats,
) -> Result<()> {
let (mut batches, num_packets, recv_duration) = streamer::recv_packet_batches(recvr)?;
let (mut batches, num_packets, recv_duration) = streamer::recv_vec_packet_batches(recvr)?;
let batches_len = batches.len();
debug!(
@@ -310,7 +312,7 @@ impl SigVerifyStage {
}
fn verifier_service<T: SigVerifier + 'static + Send + Clone>(
packet_receiver: PacketBatchReceiver,
packet_receiver: find_packet_sender_stake_stage::FindPacketSenderStakeReceiver,
verified_sender: Sender<Vec<PacketBatch>>,
verifier: &T,
) -> JoinHandle<()> {
@@ -356,7 +358,7 @@ impl SigVerifyStage {
}
fn verifier_services<T: SigVerifier + 'static + Send + Clone>(
packet_receiver: PacketBatchReceiver,
packet_receiver: find_packet_sender_stake_stage::FindPacketSenderStakeReceiver,
verified_sender: Sender<Vec<PacketBatch>>,
verifier: T,
) -> JoinHandle<()> {
@@ -370,12 +372,15 @@ impl SigVerifyStage {
#[cfg(test)]
mod tests {
use crate::sigverify::TransactionSigVerifier;
use crate::sigverify_stage::timing::duration_as_ms;
use crossbeam_channel::unbounded;
use solana_perf::packet::to_packet_batches;
use solana_perf::test_tx::test_tx;
use {super::*, solana_perf::packet::Packet};
use {
super::*,
crate::{sigverify::TransactionSigVerifier, sigverify_stage::timing::duration_as_ms},
crossbeam_channel::unbounded,
solana_perf::{
packet::{to_packet_batches, Packet},
test_tx::test_tx,
},
};
fn count_non_discard(packet_batches: &[PacketBatch]) -> usize {
packet_batches
@@ -440,7 +445,7 @@ mod tests {
for _ in 0..batches.len() {
if let Some(batch) = batches.pop() {
sent_len += batch.packets.len();
packet_s.send(batch).unwrap();
packet_s.send(vec![batch]).unwrap();
}
}
let mut received = 0;

View File

@@ -219,7 +219,7 @@ mod tests {
snapshot_archive_info::SnapshotArchiveInfo,
snapshot_package::{SnapshotPackage, SnapshotType},
snapshot_utils::{
self, ArchiveFormat, SnapshotVersion, SNAPSHOT_STATUS_CACHE_FILE_NAME,
self, ArchiveFormat, SnapshotVersion, SNAPSHOT_STATUS_CACHE_FILENAME,
},
},
solana_sdk::hash::Hash,
@@ -335,7 +335,7 @@ mod tests {
// the source dir for snapshots
let dummy_slot_deltas: Vec<BankSlotDelta> = vec![];
snapshot_utils::serialize_snapshot_data_file(
&snapshots_dir.join(SNAPSHOT_STATUS_CACHE_FILE_NAME),
&snapshots_dir.join(SNAPSHOT_STATUS_CACHE_FILENAME),
|stream| {
serialize_into(stream, &dummy_slot_deltas)?;
Ok(())

View File

@@ -117,12 +117,16 @@ pub fn verify_udp_stats_access() -> Result<(), String> {
}
impl SystemMonitorService {
pub fn new(exit: Arc<AtomicBool>, report_os_network_stats: bool) -> Self {
pub fn new(
exit: Arc<AtomicBool>,
report_os_memory_stats: bool,
report_os_network_stats: bool,
) -> Self {
info!("Starting SystemMonitorService");
let thread_hdl = Builder::new()
.name("system-monitor".to_string())
.spawn(move || {
Self::run(exit, report_os_network_stats);
Self::run(exit, report_os_memory_stats, report_os_network_stats);
})
.unwrap();
@@ -331,7 +335,7 @@ impl SystemMonitorService {
}
}
pub fn run(exit: Arc<AtomicBool>, report_os_network_stats: bool) {
pub fn run(exit: Arc<AtomicBool>, report_os_memory_stats: bool, report_os_network_stats: bool) {
let mut udp_stats = None;
let network_limits_timer = AtomicInterval::default();
let udp_timer = AtomicInterval::default();
@@ -349,7 +353,7 @@ impl SystemMonitorService {
Self::process_udp_stats(&mut udp_stats);
}
}
if mem_timer.should_update(SAMPLE_INTERVAL_MEM_MS) {
if report_os_memory_stats && mem_timer.should_update(SAMPLE_INTERVAL_MEM_MS) {
Self::report_mem_stats();
}
sleep(SLEEP_INTERVAL);

View File

@@ -1,11 +1,13 @@
use crate::consensus::{SwitchForkDecision, TowerError};
use solana_sdk::{
clock::Slot,
hash::Hash,
pubkey::Pubkey,
signature::{Signature, Signer},
use {
crate::consensus::{SwitchForkDecision, TowerError},
solana_sdk::{
clock::Slot,
hash::Hash,
pubkey::Pubkey,
signature::{Signature, Signer},
},
solana_vote_program::vote_state::{BlockTimestamp, Vote, VoteState},
};
use solana_vote_program::vote_state::{BlockTimestamp, Vote, VoteState};
#[frozen_abi(digest = "7phMrqmBo2D3rXPdhBj8CpjRvvmx9qgpcU4cDGkL3W9q")]
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, AbiExample)]

View File

@@ -1,6 +1,8 @@
use {
crate::consensus::{Result, Tower, TowerError, TowerVersions},
crate::tower1_7_14::SavedTower1_7_14,
crate::{
consensus::{Result, Tower, TowerError, TowerVersions},
tower1_7_14::SavedTower1_7_14,
},
solana_sdk::{
pubkey::Pubkey,
signature::{Signature, Signer},

View File

@@ -10,6 +10,7 @@ use {
GossipVerifiedVoteHashSender, VerifiedVoteSender, VoteTracker,
},
fetch_stage::FetchStage,
find_packet_sender_stake_stage::FindPacketSenderStakeStage,
sigverify::TransactionSigVerifier,
sigverify_stage::SigVerifyStage,
},
@@ -36,6 +37,9 @@ use {
pub const DEFAULT_TPU_COALESCE_MS: u64 = 5;
// allow multiple connections for NAT and any open/close overlap
pub const MAX_QUIC_CONNECTIONS_PER_IP: usize = 8;
pub struct TpuSockets {
pub transactions: Vec<UdpSocket>,
pub transaction_forwards: Vec<UdpSocket>,
@@ -52,6 +56,8 @@ pub struct Tpu {
cluster_info_vote_listener: ClusterInfoVoteListener,
broadcast_stage: BroadcastStage,
tpu_quic_t: thread::JoinHandle<()>,
find_packet_sender_stake_stage: FindPacketSenderStakeStage,
vote_find_packet_sender_stake_stage: FindPacketSenderStakeStage,
}
impl Tpu {
@@ -100,6 +106,26 @@ impl Tpu {
poh_recorder,
tpu_coalesce_ms,
);
let (find_packet_sender_stake_sender, find_packet_sender_stake_receiver) = unbounded();
let find_packet_sender_stake_stage = FindPacketSenderStakeStage::new(
packet_receiver,
find_packet_sender_stake_sender,
bank_forks.clone(),
cluster_info.clone(),
);
let (vote_find_packet_sender_stake_sender, vote_find_packet_sender_stake_receiver) =
unbounded();
let vote_find_packet_sender_stake_stage = FindPacketSenderStakeStage::new(
vote_packet_receiver,
vote_find_packet_sender_stake_sender,
bank_forks.clone(),
cluster_info.clone(),
);
let (verified_sender, verified_receiver) = unbounded();
let tpu_quic_t = solana_streamer::quic::spawn_server(
@@ -108,12 +134,13 @@ impl Tpu {
cluster_info.my_contact_info().tpu.ip(),
packet_sender,
exit.clone(),
MAX_QUIC_CONNECTIONS_PER_IP,
)
.unwrap();
let sigverify_stage = {
let verifier = TransactionSigVerifier::default();
SigVerifyStage::new(packet_receiver, verified_sender, verifier)
SigVerifyStage::new(find_packet_sender_stake_receiver, verified_sender, verifier)
};
let (verified_tpu_vote_packets_sender, verified_tpu_vote_packets_receiver) = unbounded();
@@ -121,7 +148,7 @@ impl Tpu {
let vote_sigverify_stage = {
let verifier = TransactionSigVerifier::new_reject_non_vote();
SigVerifyStage::new(
vote_packet_receiver,
vote_find_packet_sender_stake_receiver,
verified_tpu_vote_packets_sender,
verifier,
)
@@ -175,6 +202,8 @@ impl Tpu {
cluster_info_vote_listener,
broadcast_stage,
tpu_quic_t,
find_packet_sender_stake_stage,
vote_find_packet_sender_stake_stage,
}
}
@@ -185,6 +214,8 @@ impl Tpu {
self.vote_sigverify_stage.join(),
self.cluster_info_vote_listener.join(),
self.banking_stage.join(),
self.find_packet_sender_stake_stage.join(),
self.vote_find_packet_sender_stake_stage.join(),
];
self.tpu_quic_t.join()?;
let broadcast_result = self.broadcast_stage.join();

View File

@@ -16,6 +16,7 @@ use {
cost_update_service::CostUpdateService,
drop_bank_service::DropBankService,
ledger_cleanup_service::LedgerCleanupService,
ledger_metric_report_service::LedgerMetricReportService,
replay_stage::{ReplayStage, ReplayStageConfig},
retransmit_stage::RetransmitStage,
rewards_recorder_service::RewardsRecorderSender,
@@ -26,7 +27,7 @@ use {
voting_service::VotingService,
},
crossbeam_channel::{unbounded, Receiver},
solana_accountsdb_plugin_manager::block_metadata_notifier_interface::BlockMetadataNotifierLock,
solana_geyser_plugin_manager::block_metadata_notifier_interface::BlockMetadataNotifierLock,
solana_gossip::cluster_info::ClusterInfo,
solana_ledger::{
blockstore::Blockstore, blockstore_processor::TransactionStatusSender,
@@ -70,6 +71,7 @@ pub struct Tvu {
retransmit_stage: RetransmitStage,
replay_stage: ReplayStage,
ledger_cleanup_service: Option<LedgerCleanupService>,
ledger_metric_report_service: LedgerMetricReportService,
accounts_background_service: AccountsBackgroundService,
accounts_hash_verifier: AccountsHashVerifier,
cost_update_service: CostUpdateService,
@@ -213,14 +215,6 @@ impl Tvu {
let (ledger_cleanup_slot_sender, ledger_cleanup_slot_receiver) = unbounded();
let snapshot_interval_slots = {
if let Some(config) = bank_forks.read().unwrap().snapshot_config() {
config.full_snapshot_archive_interval_slots
} else {
std::u64::MAX
}
};
info!("snapshot_interval_slots: {}", snapshot_interval_slots);
let (snapshot_config, pending_snapshot_package) = snapshot_config_and_pending_package
.map(|(snapshot_config, pending_snapshot_package)| {
(Some(snapshot_config), Some(pending_snapshot_package))
@@ -357,6 +351,8 @@ impl Tvu {
)
});
let ledger_metric_report_service = LedgerMetricReportService::new(blockstore, exit);
let accounts_background_service = AccountsBackgroundService::new(
bank_forks.clone(),
exit,
@@ -373,6 +369,7 @@ impl Tvu {
retransmit_stage,
replay_stage,
ledger_cleanup_service,
ledger_metric_report_service,
accounts_background_service,
accounts_hash_verifier,
cost_update_service,
@@ -389,6 +386,7 @@ impl Tvu {
if self.ledger_cleanup_service.is_some() {
self.ledger_cleanup_service.unwrap().join()?;
}
self.ledger_metric_report_service.join()?;
self.accounts_background_service.join()?;
self.replay_stage.join()?;
self.accounts_hash_verifier.join()?;
@@ -417,8 +415,7 @@ pub mod tests {
solana_runtime::bank::Bank,
solana_sdk::signature::{Keypair, Signer},
solana_streamer::socket::SocketAddrSpace,
std::sync::atomic::AtomicU64,
std::sync::atomic::Ordering,
std::sync::atomic::{AtomicU64, Ordering},
};
#[ignore]

Some files were not shown because too many files have changed in this diff Show More