Compare commits

...

88 Commits

Author SHA1 Message Date
Trent Nelson
03b21f2e9d Bump version to v1.6.2 2021-03-30 00:06:01 -06:00
carllin
cc5565b17e Setup ReplayStage confirmation scaffolding for duplicate slots (#9698)
(cherry picked from commit 52703badfa)
2021-03-29 22:07:14 -06:00
mergify[bot]
50beef0b15 Allow incomplete features in frozen-abi (#16205)
(cherry picked from commit 9ba9d2a8ae)

Co-authored-by: Trent Nelson <trent@solana.com>
2021-03-30 03:46:10 +00:00
mergify[bot]
06a54e1423 remove old code (#15988) (#15993)
(cherry picked from commit 9760fded2d)

Co-authored-by: Jeff Washington (jwash) <75863576+jeffwashington@users.noreply.github.com>
2021-03-30 00:50:27 +00:00
mergify[bot]
4d731ecd08 eliminate lock on record (#15929) (#16073)
* eliminate lock on record

* use same error as MaxHeightReached

* clippy

* review feedback

* refactor should_tick code

* pr feedback

(cherry picked from commit 57ba86c821)

Co-authored-by: Jeff Washington (jwash) <75863576+jeffwashington@users.noreply.github.com>
2021-03-30 00:46:13 +00:00
mergify[bot]
ee06789a66 sdk: Add try_from_slice_unchecked for Borsh (#16098) (#16158)
* sdk: Add try_from_slice_unchecked for Borsh

* Add tests

* Rename + clarify comment

* Rename back to unchecked

(cherry picked from commit cffa851e0f)

Co-authored-by: Jon Cinque <jon.cinque@gmail.com>
2021-03-29 23:15:34 +00:00
mergify[bot]
2dabe1d706 Add handling to close accounts to many-accounts bench (#16199) (#16201)
* gitignore farf

* Improve cli args

* Use derived addresses for accounts

* Add parameter to close every nth account created

(cherry picked from commit 1d145e1fc2)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-03-29 22:54:09 +00:00
Tyera Eulberg
3b1279a005 Future-aware enum name 2021-03-29 14:58:35 -06:00
mergify[bot]
5c9f85f28d Rpc: enable getConfirmedBlocks and getConfirmedBlocksWithLimit to return confirmed (not yet finalized) data (#16161) (#16198)
* Add commitment config capabilities

* Use rpc limit if no end_slot provided

* Limit to actually finalized blocks

* Support confirmed blocks in getConfirmedBlocks and getConfirmedBlocksWithLimit

* Update docs

* Add client plumbing

* Rename config enum

(cherry picked from commit 60ed8e2892)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-03-29 19:53:17 +00:00
mergify[bot]
e12dd46ef3 Derive PartialEq for StakeActivationState (#16196)
(cherry picked from commit 4e7bd45d4c)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-03-29 18:16:44 +00:00
mergify[bot]
c4fa03b478 Status cache improvements (#16174) (#16178)
(cherry picked from commit 5e5b63712b)

Co-authored-by: sakridge <sakridge@gmail.com>
2021-03-29 10:11:16 -07:00
mergify[bot]
9fb749deb7 Print the rust version when building bpf programs (#16181) (#16183)
(cherry picked from commit abada56ba1)

Co-authored-by: Justin Starry <justin@solana.com>
2021-03-29 07:18:55 +00:00
mergify[bot]
bd48344de2 Fix handling of invoked ix accounts in program-test (#16170) (#16176)
(cherry picked from commit 27ab415ecc)

Co-authored-by: Justin Starry <justin@solana.com>
2021-03-29 01:55:11 +00:00
mergify[bot]
78e54f1d2c Implement mnemonic support for solana-keygen grind (solana-labs#9325) (#16108) (#16173)
* Implement mnemonic support for solana-keygen grind (solana-labs#9325)

* Updated to include feedback from review.

* Renaming as per review feedback

* Fixed an incorrectly transcribed underscore

* Properly re-use string constants.

(cherry picked from commit e50f598449)

Co-authored-by: bji <bryan@ischo.com>
2021-03-28 07:05:17 +00:00
mergify[bot]
76a6576976 sdk: Use u32::MAX from std to unbreak BPF builds (#16171) (#16172)
(cherry picked from commit aabe186e3f)

Co-authored-by: Justin Starry <justin@solana.com>
2021-03-27 17:05:53 +00:00
mergify[bot]
92ec1ae255 Switch to a single use (#16169)
(cherry picked from commit 16e4ccca13)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-03-27 06:58:31 +00:00
Michael Vines
0d203728cc Add RpcClient::get_stake_activation() 2021-03-26 22:33:06 -07:00
mergify[bot]
625773e5b8 Rpc: enable getConfirmedBlock and getConfirmedTransaction to return confirmed (not yet finalized) data (bp #16142) (#16160)
* Rpc: enable getConfirmedBlock and getConfirmedTransaction to return confirmed (not yet finalized) data (#16142)

* Add Blockstore block and tx apis that allow unrooted responses

* Add TransactionStatusMessage, and send on bank freeze; also refactor TransactionStatusSender

* Track highest slot with tx-status writes complete

* Rename and unpub fn

* Add commitment to GetConfirmed input configs

* Support confirmed blocks in getConfirmedBlock

* Support confirmed txs in getConfirmedTransaction

* Update sigs-for-addr2 comment

* Enable confirmed block in cli

* Enable confirmed transaction in cli

* Review comments

* Rename blockstore method

(cherry picked from commit 433f1ead1c)

# Conflicts:
#	core/src/replay_stage.rs

* Fix conflict

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2021-03-27 04:51:53 +00:00
mergify[bot]
a4cb1e45ae Only print skipped leader slot message when the node is actually leader (#16156) (#16164)
Also, check vote signature after the vote is signed

(cherry picked from commit 60b4771fc6)

Co-authored-by: sakridge <sakridge@gmail.com>
2021-03-27 02:03:10 +00:00
mergify[bot]
8aded2778e Bump bpf-tools to version v1.4 (#16152) (#16154)
(cherry picked from commit 658ddd1c9c)

Co-authored-by: Dmitri Makarov <dmakarov@users.noreply.github.com>
2021-03-26 20:51:25 +00:00
mergify[bot]
d940c5b1a3 Skip leader slots until a vote lands (#15607) (#16147)
(cherry picked from commit b99ae8f334)

Co-authored-by: sakridge <sakridge@gmail.com>
2021-03-26 19:07:24 +00:00
Trent Nelson
1be045df94 sq: optimize
(cherry picked from commit 482c027d3b)
2021-03-25 21:31:52 -06:00
Trent Nelson
86191911c7 perf: use saturating/checked integer arithmetic
(cherry picked from commit 834fae684b)
2021-03-25 21:31:52 -06:00
mergify[bot]
8f852d8a6b makes test_pull_request_time_pruning smaller (#16128) (#16144)
(cherry picked from commit b041b55028)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-03-26 01:20:26 +00:00
Kristofer Peterson
68a439f8da Refactored ShortU16Visitor::visit_seq() to reject overflows, extra leading zeros and ensure one-to-one encoding. 2021-03-26 01:20:22 +00:00
Trent Nelson
e021832708 sdk: ShortU16 - rename variables for clarity
ShortU16's implementation embeds its usage as the length of a
ShortVec, confusingly referring to both a 'len' and a 'size'
at the same time.
2021-03-26 01:20:22 +00:00
Trent Nelson
87b11aa187 sdk: Add ShortU16 deser test 2021-03-26 01:20:22 +00:00
mergify[bot]
7475a6f444 makes turbine peer computation consistent between broadcast and retransmit (#14910) (#16143)
get_broadcast_peers is using tvu_peers:
https://github.com/solana-labs/solana/blob/84e52b606/core/src/broadcast_stage.rs#L362-L370
which is potentially inconsistent with retransmit_peers:
https://github.com/solana-labs/solana/blob/84e52b606/core/src/cluster_info.rs#L1332-L1345

Also, the leader does not include its own contact-info when broadcasting
shreds:
https://github.com/solana-labs/solana/blob/84e52b606/core/src/cluster_info.rs#L1324
but on the retransmit side, slot leader is removed only _after_ neighbors and
children are computed:
https://github.com/solana-labs/solana/blob/84e52b606/core/src/retransmit_stage.rs#L383-L384
So the turbine broadcast tree is different between the two stages.

This commit:
* Removes retransmit_peers. Broadcast and retransmit stages will use tvu_peers
  consistently.
* Retransmit stage removes slot leader _before_ computing children and
  neighbors.

(cherry picked from commit 570fd3f810)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-03-26 00:16:48 +00:00
mergify[bot]
86ce650661 Add timeout for local cluster partition tests (bp #16123) (#16137)
* Add timeout for local cluster partition tests (#16123)

* Add timeout for local cluster partition tests

* fix optimistic conf test logs

* Bump instruction count assertions

(cherry picked from commit e817a6db00)

# Conflicts:
#	local-cluster/Cargo.toml

* Fix conflict

Co-authored-by: Justin Starry <justin@solana.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2021-03-25 22:56:05 +00:00
mergify[bot]
4dc5a53014 Show bpf-tools download progress (#16135)
(cherry picked from commit 07273bfa9e)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-03-25 20:55:11 +00:00
mergify[bot]
5e35cf3536 program: Correct clamp in Message::signer_keys() (#16114)
(cherry picked from commit 8b3de72e2a)

Co-authored-by: Trent Nelson <trent@solana.com>
2021-03-25 17:53:34 +00:00
Trent Nelson
e8a8d1efb3 clap-utils: Allow NullSigners outside sign-only mode
(cherry picked from commit 7f0ac6a67c)
2021-03-25 11:10:53 -06:00
mergify[bot]
defd9238fa Simplify account.rent_epoch handling for sysvar rent (bp #16049) (#16118)
* Simplify account.rent_epoch handling for sysvar rent (#16049)

* Add some code for special local testing

* Add comment to store_account_and_update_capitalization

* Simplify account.rent_epoch handling for sysvar rent

* Introduce *_for_test functions

* Add deprecation messages to existing api

(cherry picked from commit 6d5c6c17c5)

# Conflicts:
#	sdk/src/native_loader.rs

* Fix conflicts

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2021-03-25 17:17:43 +09:00
mergify[bot]
5f061dcea1 Support getBlockTime for unfinalized blocks (#16103) (#16110)
(cherry picked from commit a8ef29df27)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-03-25 04:18:00 +00:00
mergify[bot]
e6ee27a738 Add Exodus as Solana Mobile app option (#16100) (#16101)
* Add Exodus as Solana Mobile app option

* Update docs/src/wallet-guide/apps.md

Co-authored-by: Michael Vines <mvines@gmail.com>
(cherry picked from commit ad47c63f27)

Co-authored-by: Davey <35187388+davidzelaya@users.noreply.github.com>
2021-03-24 21:34:58 +00:00
mergify[bot]
dd2d25d698 limits CrdsGossipPull::pull_request_time size (#15793) (#16097)
There is no pruning logic on CrdsGossipPull::pull_request_time
https://github.com/solana-labs/solana/blob/79ac1997d/core/src/crds_gossip_pull.rs#L172-L174
potentially allowing this to take too much memory.

Additionally, CrdsGossipPush::last_pushed_to is pruning recent push
timestamps:
https://github.com/solana-labs/solana/blob/79ac1997d/core/src/crds_gossip_push.rs#L275-L279
instead of the older ones.

Co-authored-by: Nathan Hawkins <utsl@utsl.org>
(cherry picked from commit a6c23648cb)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-03-24 20:05:04 +00:00
Dmitri Makarov
9096c3df02 Adjust BPF test programs instruction counts 2021-03-24 11:59:59 +01:00
Dmitri Makarov
9f94c2a9a0 Bump bpf-tools to version v1.3
This brings in the fix for increased compute budget that wasn't caught
when bpf-tools v1.2 were released.
2021-03-24 11:59:59 +01:00
Dmitri Makarov
34213da9f4 Bump bpf-tools to v1.2 and get rid of xargo 2021-03-24 11:59:59 +01:00
mergify[bot]
c3c4991c44 rpc: add getSlotLeaders method (#16057) (#16079)
(cherry picked from commit e7fd7d46cf)

Co-authored-by: Justin Starry <justin@solana.com>
2021-03-23 19:27:18 +00:00
mergify[bot]
9d37a33dcd buffers data shreds to make larger erasure coded sets (bp #15849) (#16074)
* buffers data shreds to make larger erasure coded sets (#15849)

Broadcast stage batches up to 8 entries:
https://github.com/solana-labs/solana/blob/79280b304/core/src/broadcast_stage/broadcast_utils.rs#L26-L29
which will be serialized into some number of shreds and chunked into FEC
sets of at most 32 shreds each:
https://github.com/solana-labs/solana/blob/79280b304/ledger/src/shred.rs#L576-L597
So depending on the size of entries, FEC sets can be small, which may
aggravate loss rate.
For example 16 FEC sets of 2:2 data/code shreds each have higher loss
rate than one 32:32 set.

This commit broadcasts data shreds immediately, but also buffers them
until it has a batch of 32 data shreds, at which point 32 coding shreds
are generated and broadcasted.

(cherry picked from commit 4f82b897bc)

# Conflicts:
#	ledger/src/shred.rs

* removes backport merge conflicts

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-03-23 18:23:09 +00:00
mergify[bot]
a04ca03fee renames is_last_in_fec_set back to is_last_data (#15848) (#16075)
https://github.com/solana-labs/solana/pull/10095
renamed is_last_data to is_last_in_fec_set. However, the code shows that
this is actually meant to indicate where the serialized data is
complete:
https://github.com/solana-labs/solana/blob/420174d3d/ledger/src/shred.rs#L599-L600
https://github.com/solana-labs/solana/blob/420174d3d/ledger/src/shred.rs#L229-L231

There are multiple FEC sets for each `&[Entry]` serialized and this flag
does not represent shreds last in FEC sets (only the very last one by
overlap). So the name is wrong and confusing

(cherry picked from commit 3b85cbc504)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2021-03-23 16:59:47 +00:00
mergify[bot]
64ce4a6203 solana transfer now requires --allow-unfunded-recipient if the recipient doesn't exist (bp #16060) (#16067)
* transfer now requires --allow-unfunded-recipient if the recipient doesn't exist

(cherry picked from commit 3dff5c9dee)

* Avoid RPC in `--sign-only` mode

Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>
(cherry picked from commit 6271665ba6)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-03-23 03:54:42 +00:00
mergify[bot]
7ac3c9ec76 Handle blockstore insert dup checks (#16051) (#16066)
(cherry picked from commit d76ad33597)

Co-authored-by: carllin <carl@solana.com>
2021-03-23 00:49:10 +00:00
mergify[bot]
7d91515e8d Make getStakeActivation response consistent for undelegated accounts (#16038) (#16040)
(cherry picked from commit 2ec24d438f)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-03-19 22:07:30 +00:00
mergify[bot]
4e3f2c3d2d program-test: Fix warp and staking issue (#16002) (#16031)
Since program-test creates a test genesis and then adds fees and rent,
some of the genesis accounts get rent-collected after warping.  Most
notably, `StakeConfig` gets rent-collected, causing any stake operations
to fail after warp.  This fix creates genesis with the `Rent` and
`FeeRateGovernor` actually used by the bank.

(cherry picked from commit 6cc22e62d4)

Co-authored-by: Jon Cinque <jon.cinque@gmail.com>
2021-03-19 14:54:58 +00:00
mergify[bot]
8b67ba6d3d docs: SIGUSR1 killing wrapper shell scripts (#16009)
(cherry picked from commit 07dc522981)

Co-authored-by: Trent Nelson <trent@solana.com>
2021-03-19 07:45:34 +00:00
mergify[bot]
c2ce68ab90 Santize instruction index when loading instruction from sysvar (#15942) (#16004)
(cherry picked from commit 4c5660ba7a)

Co-authored-by: Justin Starry <justin@solana.com>
2021-03-19 02:48:41 +00:00
mergify[bot]
fe87cb1cd1 Update to reqwest 0.11.2 (#16000)
(cherry picked from commit 02b81dd05d)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-03-18 22:12:01 +00:00
mergify[bot]
1c8f6a836a cli cleanup (#15990) (#15997)
(cherry picked from commit 067b390194)

Co-authored-by: Jack May <jack@solana.com>
2021-03-18 20:03:04 +00:00
mergify[bot]
3d5ff7968e rpc: Add config options limiting getConfirmedBlock response data (#15970) (#15995)
* Add new confirmed block struct

* Add RpcConfirmedBlockConfig options

* Configure block response based on new options

* Add client api, use in cli fetch_epoch_rewards

* Update docs

* Apply review suggestions

(cherry picked from commit aa54c468ea)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-03-18 19:33:01 +00:00
Tyera Eulberg
d6160f7744 Avoid panic when validator doesn't have performance samples (#15976)
(cherry picked from commit ba33c9e18e)
2021-03-18 08:28:31 -07:00
mergify[bot]
5e9ce99abf remote-wallet: Expose Ledger app settings (#15978)
(cherry picked from commit 2dabcac0da)

Co-authored-by: Trent Nelson <trent@solana.com>
2021-03-18 09:13:24 +00:00
mergify[bot]
ebd6fe7acb Avoid a panic when --slots-per-epoch is less than MINIMUM_SLOTS_PER_EPOCH (#15975)
(cherry picked from commit 4ab98fff02)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-03-18 07:17:50 +00:00
mergify[bot]
9e91a2c2fd Add Close instrruction and tooling to upgradeable loader (#15887) (#15972)
(cherry picked from commit 7f500d610c)

Co-authored-by: Jack May <jack@solana.com>
2021-03-18 06:02:57 +00:00
Michael Vines
899f57962a Add --slots-per-epoch argument
(cherry picked from commit 04c99cf7ea)
2021-03-17 17:25:51 -07:00
Michael Vines
3176b00e57 Add --slots-per-epoch validator
(cherry picked from commit c06ff47a90)
2021-03-17 17:25:51 -07:00
Jeff Washington (jwash)
08b9da8397 drop poh lock after record (#15930)
(cherry picked from commit 5460fb10a2)
2021-03-17 17:24:53 -07:00
mergify[bot]
2bc21ecba2 Allow unbounded wallclock processing time in tests (#15961) (#15966)
(cherry picked from commit f548a04fae)

Co-authored-by: carllin <carl@solana.com>
2021-03-18 00:22:06 +00:00
Jeff Washington (jwash)
5b2a65fab3 add metrics for tick producer and poh_recorder (#15931)
(cherry picked from commit 40997d0aef)
2021-03-17 16:36:50 -07:00
mergify[bot]
f5d56eabf3 Build full SPL in CI (bp #15886) (#15964)
* Build full SPL in CI

(cherry picked from commit 82269f1351)

* Avoid changing signature of ProgramTest::add_account

(cherry picked from commit 03180b502d)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-03-17 22:46:55 +00:00
Michael Vines
af45efb62c Notice the user when the --mint, --bpf-program, or --clone arguments are ignored
(cherry picked from commit 59c19d9fbf)
2021-03-17 14:10:14 -07:00
mergify[bot]
f528cda832 Ignore flaky test_banking_stage_entries_only and test_banking_stage_entryfication (#15959)
(cherry picked from commit 8a9b51952e)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-03-17 20:34:30 +00:00
mergify[bot]
eeef9f4e59 Separate snapshot location (bp #15840) (#15956)
* Add option for separate snapshot location

(cherry picked from commit 6126878f509c69e23480a5ec22b3271e2b16e072)
(cherry picked from commit 0209d334bd)

* Apply suggestions from code review

Co-authored-by: Michael Vines <mvines@gmail.com>
(cherry picked from commit cfb01e26dd)

* add missed suggestion

(cherry picked from commit a43b3674c7)

* Revert to snapshots

Co-authored-by: Michael Vines <mvines@gmail.com>
(cherry picked from commit 0b42379ed7)

* Revert to snapshots 2

(cherry picked from commit 20b53eb4b4)

* Revert to removing only tmp-

(cherry picked from commit a5d144b00f)

Co-authored-by: DimAn <diman@diman.io>
Co-authored-by: DimAn <andiman7000@gmail.com>
2021-03-17 20:25:18 +00:00
Michael Vines
32124b59e9 Download snapshot files with a tmp- prefix so they'll automatically be cleaned up if interrupted
(cherry picked from commit 58b980f9cd)
2021-03-17 10:18:18 -07:00
Michael Vines
aa9772f9c0 Replace solana-program-test when building example-helloworld 2021-03-17 09:08:41 -07:00
mergify[bot]
5f183bd773 Add helper for paring down signers to those requried by a tx message (bp #15899) (#15938)
* sdk: Add accessor for signer pubkeys of a tx message

(cherry picked from commit bf33ce8906)

* clap-utils: Add helper to `CliSignerInfo` for getting signers for a message

(cherry picked from commit 4e99f1e634)

Co-authored-by: Trent Nelson <trent@solana.com>
2021-03-17 07:48:47 +00:00
mergify[bot]
2238e5001b solana-install init can now select a pre-release from Github (#15936)
(cherry picked from commit d9176c1903)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-03-17 04:31:55 +00:00
mergify[bot]
79fa7ef55c CLI: Support dumping the TX message in sign-only mode (#15933)
(cherry picked from commit 672e9c640f)

Co-authored-by: Trent Nelson <trent@solana.com>
2021-03-17 04:13:21 +00:00
mergify[bot]
07df827411 Bump tokio to 1.1 (#15926) (#15928)
(cherry picked from commit 654449ce91)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-03-16 23:29:55 +00:00
mergify[bot]
a259ff0e72 Wallclock BankingStage Throttle (#15731) (#15890)
(cherry picked from commit c1ba265dd9)

Co-authored-by: carllin <carl@solana.com>
2021-03-16 21:12:59 +00:00
mergify[bot]
d7d3e767e7 fix: compute pre/post token balances on all accounts if token program present (#15900) (#15923)
* fix: compute pre/post token balances on all accounts if token program present

* fix: skip token program in balance query

* fix: prevent program ids from being collected

(cherry picked from commit 61112d4826)

Co-authored-by: Josh <josh.hundley@gmail.com>
2021-03-16 18:23:29 +00:00
mergify[bot]
6e8aa9af17 nit: fix spelling (#15908) (#15911)
(cherry picked from commit 5760cf0f41)

# Conflicts:
#	sdk/src/feature_set.rs

Co-authored-by: Jack May <jack@solana.com>
2021-03-16 10:58:39 -07:00
mergify[bot]
0236de7bc8 Encourage use of the default --ledger location (#15921)
(cherry picked from commit 1c261d293f)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-03-16 16:58:14 +00:00
mergify[bot]
899bd1572a Show flags for accounts in tx by solana confirm (#15804) (#15906)
* Show flags for accounts in tx by solana confirm

* Address review comments

* Improve comment a bit

* Apply suggestions from code review

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

* Further apply review suggestions

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
(cherry picked from commit 74aa32175b)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2021-03-16 10:43:06 +00:00
mergify[bot]
97ec4cd44e Cli: better estimate of epoch time elapsed/remaining (#15893) (#15918)
* Add rpc_client api for getRecentPerformanceSamples

* Prep fn for variable avg slot time

* Use recent-perf-samples to more-accurately estimate epoch completed times

* Spell out average

(cherry picked from commit 3726358f51)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-03-16 09:58:51 +00:00
mergify[bot]
5500970a7e Add cargo-bpf-test --no-run flag, matching cargo-test (#15916)
(cherry picked from commit eb19e11688)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-03-16 09:45:35 +00:00
Michael Vines
caea04d8d5 Pin solana crate versions to prevent downstream users from accidentally mixing crate versions 2021-03-16 08:41:28 +00:00
Michael Vines
b1a90c3580 =1.6.1 2021-03-16 08:41:28 +00:00
mergify[bot]
5bd4e38345 Charge compute budget for bytes passed via cpi (#15874) (#15905)
(cherry picked from commit ad9901d7c6)

Co-authored-by: Jack May <jack@solana.com>
2021-03-16 07:57:32 +00:00
mergify[bot]
fddba08571 Improve Instruction::new deprecation warning (#15896)
(cherry picked from commit 8567b41d5f)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-03-16 05:18:31 +00:00
Michael Vines
87963764fa Export tokio for program-test clients
(cherry picked from commit 430ed6d774)
2021-03-15 22:14:17 -07:00
mergify[bot]
b691a159dd increment_cargo_version.sh tune ups (bp #15880) (#15892)
* Disallow version bump with dirty working tree

(cherry picked from commit 853e735edf)

* Ignore `not_paths` for `*.md` files when bumping version

(cherry picked from commit 510760d81b)

* Also ignore `*/node_modules/*` paths when bumping version

(cherry picked from commit 2bf46b789f)

Co-authored-by: Trent Nelson <trent@solana.com>
2021-03-16 02:07:46 +00:00
mergify[bot]
5af1d48be8 Display actual account length (#15875) (#15884)
(cherry picked from commit 60e5fd11c9)

Co-authored-by: Jack May <jack@solana.com>
2021-03-16 01:01:25 +00:00
mergify[bot]
3b3ec3313f Fix real_number_string_trimmed zero-decimal behavior (#15873) (#15877)
* Add failing test

* Don't strip zeroes from zero-decimal amounts

* Add zero-case test

(cherry picked from commit c40bd5f394)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2021-03-15 21:33:01 +00:00
Michael Vines
be00246fb5 Bump version to v1.6.1 2021-03-15 14:47:58 -06:00
Michael Vines
1d80ba9edf Update cargo lock files on version bump 2021-03-15 14:47:58 -06:00
mergify[bot]
4bcf976ecd Fix delinquent stake display (#15839)
(cherry picked from commit eab182188a)

Co-authored-by: Michael Vines <mvines@gmail.com>
2021-03-13 20:27:25 +00:00
302 changed files with 9522 additions and 3357 deletions

488
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-account-decoder"
version = "1.6.0"
version = "1.6.2"
description = "Solana account decoder"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -19,10 +19,10 @@ lazy_static = "1.4.0"
serde = "1.0.122"
serde_derive = "1.0.103"
serde_json = "1.0.56"
solana-config-program = { path = "../programs/config", version = "1.6.0" }
solana-sdk = { path = "../sdk", version = "1.6.0" }
solana-stake-program = { path = "../programs/stake", version = "1.6.0" }
solana-vote-program = { path = "../programs/vote", version = "1.6.0" }
solana-config-program = { path = "../programs/config", version = "=1.6.2" }
solana-sdk = { path = "../sdk", version = "=1.6.2" }
solana-stake-program = { path = "../programs/stake", version = "=1.6.2" }
solana-vote-program = { path = "../programs/vote", version = "=1.6.2" }
spl-token-v2-0 = { package = "spl-token", version = "=3.1.0", features = ["no-entrypoint"] }
thiserror = "1.0"
zstd = "0.5.1"

View File

@@ -214,13 +214,13 @@ pub struct UiStakeHistoryEntry {
mod test {
use super::*;
use solana_sdk::{
account::create_account, fee_calculator::FeeCalculator, hash::Hash,
account::create_account_for_test, fee_calculator::FeeCalculator, hash::Hash,
sysvar::recent_blockhashes::IterItem,
};
#[test]
fn test_parse_sysvars() {
let clock_sysvar = create_account(&Clock::default(), 1);
let clock_sysvar = create_account_for_test(&Clock::default());
assert_eq!(
parse_sysvar(&clock_sysvar.data, &sysvar::clock::id()).unwrap(),
SysvarAccountType::Clock(UiClock::default()),
@@ -233,13 +233,13 @@ mod test {
first_normal_epoch: 1,
first_normal_slot: 12,
};
let epoch_schedule_sysvar = create_account(&epoch_schedule, 1);
let epoch_schedule_sysvar = create_account_for_test(&epoch_schedule);
assert_eq!(
parse_sysvar(&epoch_schedule_sysvar.data, &sysvar::epoch_schedule::id()).unwrap(),
SysvarAccountType::EpochSchedule(epoch_schedule),
);
let fees_sysvar = create_account(&Fees::default(), 1);
let fees_sysvar = create_account_for_test(&Fees::default());
assert_eq!(
parse_sysvar(&fees_sysvar.data, &sysvar::fees::id()).unwrap(),
SysvarAccountType::Fees(UiFees::default()),
@@ -252,7 +252,7 @@ mod test {
let recent_blockhashes: RecentBlockhashes = vec![IterItem(0, &hash, &fee_calculator)]
.into_iter()
.collect();
let recent_blockhashes_sysvar = create_account(&recent_blockhashes, 1);
let recent_blockhashes_sysvar = create_account_for_test(&recent_blockhashes);
assert_eq!(
parse_sysvar(
&recent_blockhashes_sysvar.data,
@@ -270,13 +270,13 @@ mod test {
exemption_threshold: 2.0,
burn_percent: 5,
};
let rent_sysvar = create_account(&rent, 1);
let rent_sysvar = create_account_for_test(&rent);
assert_eq!(
parse_sysvar(&rent_sysvar.data, &sysvar::rent::id()).unwrap(),
SysvarAccountType::Rent(rent.into()),
);
let rewards_sysvar = create_account(&Rewards::default(), 1);
let rewards_sysvar = create_account_for_test(&Rewards::default());
assert_eq!(
parse_sysvar(&rewards_sysvar.data, &sysvar::rewards::id()).unwrap(),
SysvarAccountType::Rewards(UiRewards::default()),
@@ -284,7 +284,7 @@ mod test {
let mut slot_hashes = SlotHashes::default();
slot_hashes.add(1, hash);
let slot_hashes_sysvar = create_account(&slot_hashes, 1);
let slot_hashes_sysvar = create_account_for_test(&slot_hashes);
assert_eq!(
parse_sysvar(&slot_hashes_sysvar.data, &sysvar::slot_hashes::id()).unwrap(),
SysvarAccountType::SlotHashes(vec![UiSlotHashEntry {
@@ -295,7 +295,7 @@ mod test {
let mut slot_history = SlotHistory::default();
slot_history.add(42);
let slot_history_sysvar = create_account(&slot_history, 1);
let slot_history_sysvar = create_account_for_test(&slot_history);
assert_eq!(
parse_sysvar(&slot_history_sysvar.data, &sysvar::slot_history::id()).unwrap(),
SysvarAccountType::SlotHistory(UiSlotHistory {
@@ -311,7 +311,7 @@ mod test {
deactivating: 3,
};
stake_history.add(1, stake_history_entry.clone());
let stake_history_sysvar = create_account(&stake_history, 1);
let stake_history_sysvar = create_account_for_test(&stake_history);
assert_eq!(
parse_sysvar(&stake_history_sysvar.data, &sysvar::stake_history::id()).unwrap(),
SysvarAccountType::StakeHistory(vec![UiStakeHistoryEntry {

View File

@@ -172,10 +172,12 @@ pub fn real_number_string(amount: u64, decimals: u8) -> StringDecimals {
}
pub fn real_number_string_trimmed(amount: u64, decimals: u8) -> StringDecimals {
let s = real_number_string(amount, decimals);
let zeros_trimmed = s.trim_end_matches('0');
let decimal_trimmed = zeros_trimmed.trim_end_matches('.');
decimal_trimmed.to_string()
let mut s = real_number_string(amount, decimals);
if decimals > 0 {
let zeros_trimmed = s.trim_end_matches('0');
s = zeros_trimmed.trim_end_matches('.').to_string();
}
s
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
@@ -363,6 +365,14 @@ mod test {
real_number_string_trimmed(1, 0)
);
assert_eq!(token_amount.ui_amount, Some(1.0));
assert_eq!(&real_number_string(10, 0), "10");
assert_eq!(&real_number_string_trimmed(10, 0), "10");
let token_amount = token_amount_to_ui_amount(10, 0);
assert_eq!(
token_amount.ui_amount_string,
real_number_string_trimmed(10, 0)
);
assert_eq!(token_amount.ui_amount, Some(10.0));
assert_eq!(&real_number_string(1, 9), "0.000000001");
assert_eq!(&real_number_string_trimmed(1, 9), "0.000000001");
let token_amount = token_amount_to_ui_amount(1, 9);
@@ -402,4 +412,32 @@ mod test {
);
assert_eq!(token_amount.ui_amount, None);
}
#[test]
fn test_ui_token_amount_real_string_zero() {
assert_eq!(&real_number_string(0, 0), "0");
assert_eq!(&real_number_string_trimmed(0, 0), "0");
let token_amount = token_amount_to_ui_amount(0, 0);
assert_eq!(
token_amount.ui_amount_string,
real_number_string_trimmed(0, 0)
);
assert_eq!(token_amount.ui_amount, Some(0.0));
assert_eq!(&real_number_string(0, 9), "0.000000000");
assert_eq!(&real_number_string_trimmed(0, 9), "0");
let token_amount = token_amount_to_ui_amount(0, 9);
assert_eq!(
token_amount.ui_amount_string,
real_number_string_trimmed(0, 9)
);
assert_eq!(token_amount.ui_amount, Some(0.0));
assert_eq!(&real_number_string(0, 25), "0.0000000000000000000000000");
assert_eq!(&real_number_string_trimmed(0, 25), "0");
let token_amount = token_amount_to_ui_amount(0, 20);
assert_eq!(
token_amount.ui_amount_string,
real_number_string_trimmed(0, 20)
);
assert_eq!(token_amount.ui_amount, None);
}
}

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-accounts-bench"
version = "1.6.0"
version = "1.6.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -11,11 +11,11 @@ publish = false
[dependencies]
log = "0.4.11"
rayon = "1.5.0"
solana-logger = { path = "../logger", version = "1.6.0" }
solana-runtime = { path = "../runtime", version = "1.6.0" }
solana-measure = { path = "../measure", version = "1.6.0" }
solana-sdk = { path = "../sdk", version = "1.6.0" }
solana-version = { path = "../version", version = "1.6.0" }
solana-logger = { path = "../logger", version = "=1.6.2" }
solana-runtime = { path = "../runtime", version = "=1.6.2" }
solana-measure = { path = "../measure", version = "=1.6.2" }
solana-sdk = { path = "../sdk", version = "=1.6.2" }
solana-version = { path = "../version", version = "=1.6.2" }
rand = "0.7.0"
clap = "2.33.1"
crossbeam-channel = "0.4"

1
accounts-cluster-bench/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/farf/

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-accounts-cluster-bench"
version = "1.6.0"
version = "1.6.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -13,22 +13,22 @@ clap = "2.33.1"
log = "0.4.11"
rand = "0.7.0"
rayon = "1.4.1"
solana-account-decoder = { path = "../account-decoder", version = "1.6.0" }
solana-clap-utils = { path = "../clap-utils", version = "1.6.0" }
solana-client = { path = "../client", version = "1.6.0" }
solana-core = { path = "../core", version = "1.6.0" }
solana-measure = { path = "../measure", version = "1.6.0" }
solana-logger = { path = "../logger", version = "1.6.0" }
solana-net-utils = { path = "../net-utils", version = "1.6.0" }
solana-faucet = { path = "../faucet", version = "1.6.0" }
solana-runtime = { path = "../runtime", version = "1.6.0" }
solana-sdk = { path = "../sdk", version = "1.6.0" }
solana-transaction-status = { path = "../transaction-status", version = "1.6.0" }
solana-version = { path = "../version", version = "1.6.0" }
solana-account-decoder = { path = "../account-decoder", version = "=1.6.2" }
solana-clap-utils = { path = "../clap-utils", version = "=1.6.2" }
solana-client = { path = "../client", version = "=1.6.2" }
solana-core = { path = "../core", version = "=1.6.2" }
solana-measure = { path = "../measure", version = "=1.6.2" }
solana-logger = { path = "../logger", version = "=1.6.2" }
solana-net-utils = { path = "../net-utils", version = "=1.6.2" }
solana-faucet = { path = "../faucet", version = "=1.6.2" }
solana-runtime = { path = "../runtime", version = "=1.6.2" }
solana-sdk = { path = "../sdk", version = "=1.6.2" }
solana-transaction-status = { path = "../transaction-status", version = "=1.6.2" }
solana-version = { path = "../version", version = "=1.6.2" }
spl-token-v2-0 = { package = "spl-token", version = "=3.1.0", features = ["no-entrypoint"] }
[dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "1.6.0" }
solana-local-cluster = { path = "../local-cluster", version = "=1.6.2" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -21,7 +21,6 @@ use solana_sdk::{
transaction::Transaction,
};
use solana_transaction_status::parse_token::spl_token_v2_0_instruction;
use spl_token_v2_0::solana_program::pubkey::Pubkey as SplPubkey;
use std::{
net::SocketAddr,
process::exit,
@@ -251,28 +250,38 @@ impl TransactionExecutor {
}
}
fn make_message(
struct SeedTracker {
max_created: Arc<AtomicU64>,
max_closed: Arc<AtomicU64>,
}
fn make_create_message(
keypair: &Keypair,
base_keypair: &Keypair,
max_created_seed: Arc<AtomicU64>,
num_instructions: usize,
balance: u64,
maybe_space: Option<u64>,
mint: Option<Pubkey>,
) -> (Message, Vec<Keypair>) {
) -> Message {
let space = maybe_space.unwrap_or_else(|| thread_rng().gen_range(0, 1000));
let (instructions, new_keypairs): (Vec<_>, Vec<_>) = (0..num_instructions)
let instructions: Vec<_> = (0..num_instructions)
.into_iter()
.map(|_| {
let new_keypair = Keypair::new();
let program_id = if mint.is_some() {
inline_spl_token_v2_0::id()
} else {
system_program::id()
};
let mut instructions = vec![system_instruction::create_account(
let seed = max_created_seed.fetch_add(1, Ordering::Relaxed).to_string();
let to_pubkey =
Pubkey::create_with_seed(&base_keypair.pubkey(), &seed, &program_id).unwrap();
let mut instructions = vec![system_instruction::create_account_with_seed(
&keypair.pubkey(),
&new_keypair.pubkey(),
&to_pubkey,
&base_keypair.pubkey(),
&seed,
balance,
space,
&program_id,
@@ -281,25 +290,69 @@ fn make_message(
instructions.push(spl_token_v2_0_instruction(
spl_token_v2_0::instruction::initialize_account(
&spl_token_v2_0::id(),
&spl_token_v2_0_pubkey(&new_keypair.pubkey()),
&spl_token_v2_0_pubkey(&to_pubkey),
&spl_token_v2_0_pubkey(&mint_address),
&SplPubkey::new_unique(),
&spl_token_v2_0_pubkey(&base_keypair.pubkey()),
)
.unwrap(),
));
}
(instructions, new_keypair)
instructions
})
.unzip();
.collect();
let instructions: Vec<_> = instructions.into_iter().flatten().collect();
(
Message::new(&instructions, Some(&keypair.pubkey())),
new_keypairs,
)
Message::new(&instructions, Some(&keypair.pubkey()))
}
fn make_close_message(
keypair: &Keypair,
base_keypair: &Keypair,
max_closed_seed: Arc<AtomicU64>,
num_instructions: usize,
balance: u64,
spl_token: bool,
) -> Message {
let instructions: Vec<_> = (0..num_instructions)
.into_iter()
.map(|_| {
let program_id = if spl_token {
inline_spl_token_v2_0::id()
} else {
system_program::id()
};
let seed = max_closed_seed.fetch_add(1, Ordering::Relaxed).to_string();
let address =
Pubkey::create_with_seed(&base_keypair.pubkey(), &seed, &program_id).unwrap();
if spl_token {
spl_token_v2_0_instruction(
spl_token_v2_0::instruction::close_account(
&spl_token_v2_0::id(),
&spl_token_v2_0_pubkey(&address),
&spl_token_v2_0_pubkey(&keypair.pubkey()),
&spl_token_v2_0_pubkey(&base_keypair.pubkey()),
&[],
)
.unwrap(),
)
} else {
system_instruction::transfer_with_seed(
&address,
&base_keypair.pubkey(),
seed,
&program_id,
&keypair.pubkey(),
balance,
)
}
})
.collect();
Message::new(&instructions, Some(&keypair.pubkey()))
}
#[allow(clippy::too_many_arguments)]
fn run_accounts_bench(
entrypoint_addr: SocketAddr,
faucet_addr: SocketAddr,
@@ -307,6 +360,7 @@ fn run_accounts_bench(
iterations: usize,
maybe_space: Option<u64>,
batch_size: usize,
close_nth: u64,
maybe_lamports: Option<u64>,
num_instructions: usize,
mint: Option<Pubkey>,
@@ -322,7 +376,8 @@ fn run_accounts_bench(
let mut count = 0;
let mut recent_blockhash = client.get_recent_blockhash().expect("blockhash");
let mut tx_sent_count = 0;
let mut total_account_count = 0;
let mut total_accounts_created = 0;
let mut total_accounts_closed = 0;
let mut balance = client.get_balance(&keypair.pubkey()).unwrap_or(0);
let mut last_balance = Instant::now();
@@ -334,6 +389,12 @@ fn run_accounts_bench(
.expect("min balance")
});
let base_keypair = Keypair::new();
let seed_tracker = SeedTracker {
max_created: Arc::new(AtomicU64::default()),
max_closed: Arc::new(AtomicU64::default()),
};
info!("Starting balance: {}", balance);
let executor = TransactionExecutor::new(entrypoint_addr);
@@ -344,8 +405,15 @@ fn run_accounts_bench(
last_blockhash = Instant::now();
}
let (message, _keypairs) =
make_message(keypair, num_instructions, min_balance, maybe_space, mint);
let message = make_create_message(
keypair,
&base_keypair,
seed_tracker.max_created.clone(),
num_instructions,
min_balance,
maybe_space,
mint,
);
let fee = recent_blockhash.1.calculate_fee(&message);
let lamports = min_balance + fee;
@@ -370,27 +438,55 @@ fn run_accounts_bench(
if sigs_len < batch_size {
let num_to_create = batch_size - sigs_len;
info!("creating {} new", num_to_create);
let (txs, _new_keypairs): (Vec<_>, Vec<_>) = (0..num_to_create)
let txs: Vec<_> = (0..num_to_create)
.into_par_iter()
.map(|_| {
let (message, new_keypairs) =
make_message(keypair, num_instructions, min_balance, maybe_space, mint);
let signers: Vec<&Keypair> = new_keypairs
.iter()
.chain(std::iter::once(keypair))
.collect();
(
Transaction::new(&signers, message, recent_blockhash.0),
new_keypairs,
)
let message = make_create_message(
keypair,
&base_keypair,
seed_tracker.max_created.clone(),
num_instructions,
min_balance,
maybe_space,
mint,
);
let signers: Vec<&Keypair> = vec![keypair, &base_keypair];
Transaction::new(&signers, message, recent_blockhash.0)
})
.unzip();
.collect();
balance = balance.saturating_sub(lamports * txs.len() as u64);
info!("txs: {}", txs.len());
let new_ids = executor.push_transactions(txs);
info!("ids: {}", new_ids.len());
tx_sent_count += new_ids.len();
total_account_count += num_instructions * new_ids.len();
total_accounts_created += num_instructions * new_ids.len();
if close_nth > 0 {
let expected_closed = total_accounts_created as u64 / close_nth;
if expected_closed > total_accounts_closed {
let txs: Vec<_> = (0..expected_closed - total_accounts_closed)
.into_par_iter()
.map(|_| {
let message = make_close_message(
keypair,
&base_keypair,
seed_tracker.max_closed.clone(),
1,
min_balance,
mint.is_some(),
);
let signers: Vec<&Keypair> = vec![keypair, &base_keypair];
Transaction::new(&signers, message, recent_blockhash.0)
})
.collect();
balance = balance.saturating_sub(fee * txs.len() as u64);
info!("close txs: {}", txs.len());
let new_ids = executor.push_transactions(txs);
info!("close ids: {}", new_ids.len());
tx_sent_count += new_ids.len();
total_accounts_closed += new_ids.len() as u64;
}
}
} else {
let _ = executor.drain_cleared();
}
@@ -398,8 +494,8 @@ fn run_accounts_bench(
count += 1;
if last_log.elapsed().as_millis() > 3000 {
info!(
"total_accounts: {} tx_sent_count: {} loop_count: {} balance: {}",
total_account_count, tx_sent_count, count, balance
"total_accounts_created: {} total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance: {}",
total_accounts_created, total_accounts_closed, tx_sent_count, count, balance
);
last_log = Instant::now();
}
@@ -455,14 +551,21 @@ fn main() {
)
.arg(
Arg::with_name("batch_size")
.long("batch_size")
.long("batch-size")
.takes_value(true)
.value_name("BYTES")
.help("Size of accounts to create"),
.help("Number of transactions to send per batch"),
)
.arg(
Arg::with_name("close_nth")
.long("close-frequency")
.takes_value(true)
.value_name("BYTES")
.help("Send close transactions after this many accounts created"),
)
.arg(
Arg::with_name("num_instructions")
.long("num_instructions")
.long("num-instructions")
.takes_value(true)
.value_name("NUM")
.help("Number of accounts to create on each transaction"),
@@ -508,6 +611,7 @@ fn main() {
let space = value_t!(matches, "space", u64).ok();
let lamports = value_t!(matches, "lamports", u64).ok();
let batch_size = value_t!(matches, "batch_size", usize).unwrap_or(4);
let close_nth = value_t!(matches, "close_nth", u64).unwrap_or(0);
let iterations = value_t!(matches, "iterations", usize).unwrap_or(10);
let num_instructions = value_t!(matches, "num_instructions", usize).unwrap_or(1);
if num_instructions == 0 || num_instructions > 500 {
@@ -551,6 +655,7 @@ fn main() {
iterations,
space,
batch_size,
close_nth,
lamports,
num_instructions,
mint,
@@ -585,6 +690,7 @@ pub mod test {
let iterations = 10;
let maybe_space = None;
let batch_size = 100;
let close_nth = 100;
let maybe_lamports = None;
let num_instructions = 2;
let mut start = Measure::start("total accounts run");
@@ -595,6 +701,7 @@ pub mod test {
iterations,
maybe_space,
batch_size,
close_nth,
maybe_lamports,
num_instructions,
None,

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-banking-bench"
version = "1.6.0"
version = "1.6.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -14,16 +14,16 @@ crossbeam-channel = "0.4"
log = "0.4.11"
rand = "0.7.0"
rayon = "1.5.0"
solana-core = { path = "../core", version = "1.6.0" }
solana-clap-utils = { path = "../clap-utils", version = "1.6.0" }
solana-streamer = { path = "../streamer", version = "1.6.0" }
solana-perf = { path = "../perf", version = "1.6.0" }
solana-ledger = { path = "../ledger", version = "1.6.0" }
solana-logger = { path = "../logger", version = "1.6.0" }
solana-runtime = { path = "../runtime", version = "1.6.0" }
solana-measure = { path = "../measure", version = "1.6.0" }
solana-sdk = { path = "../sdk", version = "1.6.0" }
solana-version = { path = "../version", version = "1.6.0" }
solana-core = { path = "../core", version = "=1.6.2" }
solana-clap-utils = { path = "../clap-utils", version = "=1.6.2" }
solana-streamer = { path = "../streamer", version = "=1.6.2" }
solana-perf = { path = "../perf", version = "=1.6.2" }
solana-ledger = { path = "../ledger", version = "=1.6.2" }
solana-logger = { path = "../logger", version = "=1.6.2" }
solana-runtime = { path = "../runtime", version = "=1.6.2" }
solana-measure = { path = "../measure", version = "=1.6.2" }
solana-sdk = { path = "../sdk", version = "=1.6.2" }
solana-version = { path = "../version", version = "=1.6.2" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-client"
version = "1.6.0"
version = "1.6.2"
description = "Solana banks client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -15,16 +15,16 @@ borsh = "0.8.1"
borsh-derive = "0.8.1"
futures = "0.3"
mio = "0.7.6"
solana-banks-interface = { path = "../banks-interface", version = "1.6.0" }
solana-program = { path = "../sdk/program", version = "1.6.0" }
solana-sdk = { path = "../sdk", version = "1.6.0" }
solana-banks-interface = { path = "../banks-interface", version = "=1.6.2" }
solana-program = { path = "../sdk/program", version = "=1.6.2" }
solana-sdk = { path = "../sdk", version = "=1.6.2" }
tarpc = { version = "0.24.1", features = ["full"] }
tokio = { version = "1.1", features = ["full"] }
tokio-serde = { version = "0.8", features = ["bincode"] }
[dev-dependencies]
solana-runtime = { path = "../runtime", version = "1.6.0" }
solana-banks-server = { path = "../banks-server", version = "1.6.0" }
solana-runtime = { path = "../runtime", version = "=1.6.2" }
solana-banks-server = { path = "../banks-server", version = "=1.6.2" }
[lib]
crate-type = ["lib"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-interface"
version = "1.6.0"
version = "1.6.2"
description = "Solana banks RPC interface"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,7 +12,7 @@ edition = "2018"
[dependencies]
mio = "0.7.6"
serde = { version = "1.0.122", features = ["derive"] }
solana-sdk = { path = "../sdk", version = "1.6.0" }
solana-sdk = { path = "../sdk", version = "=1.6.2" }
tarpc = { version = "0.24.1", features = ["full"] }
[dev-dependencies]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-server"
version = "1.6.0"
version = "1.6.2"
description = "Solana banks server"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -14,10 +14,10 @@ bincode = "1.3.1"
futures = "0.3"
log = "0.4.11"
mio = "0.7.6"
solana-banks-interface = { path = "../banks-interface", version = "1.6.0" }
solana-runtime = { path = "../runtime", version = "1.6.0" }
solana-sdk = { path = "../sdk", version = "1.6.0" }
solana-metrics = { path = "../metrics", version = "1.6.0" }
solana-banks-interface = { path = "../banks-interface", version = "=1.6.2" }
solana-runtime = { path = "../runtime", version = "=1.6.2" }
solana-sdk = { path = "../sdk", version = "=1.6.2" }
solana-metrics = { path = "../metrics", version = "=1.6.2" }
tarpc = { version = "0.24.1", features = ["full"] }
tokio = { version = "1.1", features = ["full"] }
tokio-serde = { version = "0.8", features = ["bincode"] }

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-bench-exchange"
version = "1.6.0"
version = "1.6.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -18,21 +18,21 @@ rand = "0.7.0"
rayon = "1.5.0"
serde_json = "1.0.56"
serde_yaml = "0.8.13"
solana-clap-utils = { path = "../clap-utils", version = "1.6.0" }
solana-core = { path = "../core", version = "1.6.0" }
solana-genesis = { path = "../genesis", version = "1.6.0" }
solana-client = { path = "../client", version = "1.6.0" }
solana-faucet = { path = "../faucet", version = "1.6.0" }
solana-exchange-program = { path = "../programs/exchange", version = "1.6.0" }
solana-logger = { path = "../logger", version = "1.6.0" }
solana-metrics = { path = "../metrics", version = "1.6.0" }
solana-net-utils = { path = "../net-utils", version = "1.6.0" }
solana-runtime = { path = "../runtime", version = "1.6.0" }
solana-sdk = { path = "../sdk", version = "1.6.0" }
solana-version = { path = "../version", version = "1.6.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.6.2" }
solana-core = { path = "../core", version = "=1.6.2" }
solana-genesis = { path = "../genesis", version = "=1.6.2" }
solana-client = { path = "../client", version = "=1.6.2" }
solana-faucet = { path = "../faucet", version = "=1.6.2" }
solana-exchange-program = { path = "../programs/exchange", version = "=1.6.2" }
solana-logger = { path = "../logger", version = "=1.6.2" }
solana-metrics = { path = "../metrics", version = "=1.6.2" }
solana-net-utils = { path = "../net-utils", version = "=1.6.2" }
solana-runtime = { path = "../runtime", version = "=1.6.2" }
solana-sdk = { path = "../sdk", version = "=1.6.2" }
solana-version = { path = "../version", version = "=1.6.2" }
[dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "1.6.0" }
solana-local-cluster = { path = "../local-cluster", version = "=1.6.2" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-bench-streamer"
version = "1.6.0"
version = "1.6.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -10,11 +10,11 @@ publish = false
[dependencies]
clap = "2.33.1"
solana-clap-utils = { path = "../clap-utils", version = "1.6.0" }
solana-streamer = { path = "../streamer", version = "1.6.0" }
solana-logger = { path = "../logger", version = "1.6.0" }
solana-net-utils = { path = "../net-utils", version = "1.6.0" }
solana-version = { path = "../version", version = "1.6.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.6.2" }
solana-streamer = { path = "../streamer", version = "=1.6.2" }
solana-logger = { path = "../logger", version = "=1.6.2" }
solana-net-utils = { path = "../net-utils", version = "=1.6.2" }
solana-version = { path = "../version", version = "=1.6.2" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-bench-tps"
version = "1.6.0"
version = "1.6.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -15,22 +15,22 @@ log = "0.4.11"
rayon = "1.5.0"
serde_json = "1.0.56"
serde_yaml = "0.8.13"
solana-clap-utils = { path = "../clap-utils", version = "1.6.0" }
solana-core = { path = "../core", version = "1.6.0" }
solana-genesis = { path = "../genesis", version = "1.6.0" }
solana-client = { path = "../client", version = "1.6.0" }
solana-faucet = { path = "../faucet", version = "1.6.0" }
solana-logger = { path = "../logger", version = "1.6.0" }
solana-metrics = { path = "../metrics", version = "1.6.0" }
solana-measure = { path = "../measure", version = "1.6.0" }
solana-net-utils = { path = "../net-utils", version = "1.6.0" }
solana-runtime = { path = "../runtime", version = "1.6.0" }
solana-sdk = { path = "../sdk", version = "1.6.0" }
solana-version = { path = "../version", version = "1.6.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.6.2" }
solana-core = { path = "../core", version = "=1.6.2" }
solana-genesis = { path = "../genesis", version = "=1.6.2" }
solana-client = { path = "../client", version = "=1.6.2" }
solana-faucet = { path = "../faucet", version = "=1.6.2" }
solana-logger = { path = "../logger", version = "=1.6.2" }
solana-metrics = { path = "../metrics", version = "=1.6.2" }
solana-measure = { path = "../measure", version = "=1.6.2" }
solana-net-utils = { path = "../net-utils", version = "=1.6.2" }
solana-runtime = { path = "../runtime", version = "=1.6.2" }
solana-sdk = { path = "../sdk", version = "=1.6.2" }
solana-version = { path = "../version", version = "=1.6.2" }
[dev-dependencies]
serial_test = "0.4.0"
solana-local-cluster = { path = "../local-cluster", version = "1.6.0" }
solana-local-cluster = { path = "../local-cluster", version = "=1.6.2" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -25,9 +25,6 @@ source scripts/ulimit-n.sh
test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
# Clear the BPF sysroot files, they are not automatically rebuilt
rm -rf target/xargo # Issue #3105
# Limit compiler jobs to reduce memory usage
# on machines with 2gb/thread of memory
NPROC=$(nproc)

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-clap-utils"
version = "1.6.0"
version = "1.6.2"
description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,8 +12,8 @@ edition = "2018"
[dependencies]
clap = "2.33.0"
rpassword = "4.0"
solana-remote-wallet = { path = "../remote-wallet", version = "1.6.0" }
solana-sdk = { path = "../sdk", version = "1.6.0" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.6.2" }
solana-sdk = { path = "../sdk", version = "=1.6.2" }
thiserror = "1.0.21"
tiny-bip39 = "0.8.0"
url = "2.1.0"

View File

@@ -12,6 +12,7 @@ use solana_remote_wallet::{
};
use solana_sdk::{
hash::Hash,
message::Message,
pubkey::Pubkey,
signature::{
keypair_from_seed, keypair_from_seed_phrase_and_passphrase, read_keypair,
@@ -28,6 +29,7 @@ use std::{
pub struct SignOnly {
pub blockhash: Hash,
pub message: Option<String>,
pub present_signers: Vec<(Pubkey, Signature)>,
pub absent_signers: Vec<Pubkey>,
pub bad_signers: Vec<Pubkey>,
@@ -67,6 +69,18 @@ impl CliSignerInfo {
None
}
}
pub fn signers_for_message(&self, message: &Message) -> Vec<&dyn Signer> {
self.signers
.iter()
.filter_map(|k| {
if message.signer_keys().contains(&&k.pubkey()) {
Some(k.as_ref())
} else {
None
}
})
.collect()
}
}
pub struct DefaultSigner {
@@ -108,6 +122,15 @@ impl DefaultSigner {
) -> Result<Box<dyn Signer>, Box<dyn std::error::Error>> {
signer_from_path(matches, &self.path, &self.arg_name, wallet_manager)
}
pub fn signer_from_path_with_config(
&self,
matches: &ArgMatches,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
config: &SignerFromPathConfig,
) -> Result<Box<dyn Signer>, Box<dyn std::error::Error>> {
signer_from_path_with_config(matches, &self.path, &self.arg_name, wallet_manager, config)
}
}
pub enum KeypairUrl {
@@ -145,11 +168,35 @@ pub fn presigner_from_pubkey_sigs(
})
}
#[derive(Debug)]
pub struct SignerFromPathConfig {
pub allow_null_signer: bool,
}
impl Default for SignerFromPathConfig {
fn default() -> Self {
Self {
allow_null_signer: false,
}
}
}
pub fn signer_from_path(
matches: &ArgMatches,
path: &str,
keypair_name: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<Box<dyn Signer>, Box<dyn error::Error>> {
let config = SignerFromPathConfig::default();
signer_from_path_with_config(matches, path, keypair_name, wallet_manager, &config)
}
pub fn signer_from_path_with_config(
matches: &ArgMatches,
path: &str,
keypair_name: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
config: &SignerFromPathConfig,
) -> Result<Box<dyn Signer>, Box<dyn error::Error>> {
match parse_keypair_path(path) {
KeypairUrl::Ask => {
@@ -193,7 +240,7 @@ pub fn signer_from_path(
.and_then(|presigners| presigner_from_pubkey_sigs(&pubkey, presigners));
if let Some(presigner) = presigner {
Ok(Box::new(presigner))
} else if matches.is_present(SIGN_ONLY_ARG.name) {
} else if config.allow_null_signer || matches.is_present(SIGN_ONLY_ARG.name) {
Ok(Box::new(NullSigner::new(&pubkey)))
} else {
Err(std::io::Error::new(
@@ -355,6 +402,7 @@ fn sanitize_seed_phrase(seed_phrase: &str) -> String {
#[cfg(test)]
mod tests {
use super::*;
use solana_sdk::system_instruction;
#[test]
fn test_sanitize_seed_phrase() {
@@ -364,4 +412,35 @@ mod tests {
sanitize_seed_phrase(seed_phrase)
);
}
#[test]
fn test_signer_info_signers_for_message() {
let source = Keypair::new();
let fee_payer = Keypair::new();
let nonsigner1 = Keypair::new();
let nonsigner2 = Keypair::new();
let recipient = Pubkey::new_unique();
let message = Message::new(
&[system_instruction::transfer(
&source.pubkey(),
&recipient,
42,
)],
Some(&fee_payer.pubkey()),
);
let signers = vec![
Box::new(fee_payer) as Box<dyn Signer>,
Box::new(source) as Box<dyn Signer>,
Box::new(nonsigner1) as Box<dyn Signer>,
Box::new(nonsigner2) as Box<dyn Signer>,
];
let signer_info = CliSignerInfo { signers };
let msg_signers = signer_info.signers_for_message(&message);
let signer_pubkeys = msg_signers.iter().map(|s| s.pubkey()).collect::<Vec<_>>();
let expect = vec![
signer_info.signers[0].pubkey(),
signer_info.signers[1].pubkey(),
];
assert_eq!(signer_pubkeys, expect);
}
}

View File

@@ -19,6 +19,12 @@ pub const SIGNER_ARG: ArgConstant<'static> = ArgConstant {
help: "Provide a public-key/signature pair for the transaction",
};
pub const DUMP_TRANSACTION_MESSAGE: ArgConstant<'static> = ArgConstant {
name: "dump_transaction_message",
long: "dump-transaction-message",
help: "Display the base64 encoded binary transaction message in sign-only mode",
};
pub fn blockhash_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name(BLOCKHASH_ARG.name)
.long(BLOCKHASH_ARG.long)
@@ -47,6 +53,14 @@ fn signer_arg<'a, 'b>() -> Arg<'a, 'b> {
.help(SIGNER_ARG.help)
}
pub fn dump_transaction_message<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name(DUMP_TRANSACTION_MESSAGE.name)
.long(DUMP_TRANSACTION_MESSAGE.long)
.takes_value(false)
.requires(SIGN_ONLY_ARG.name)
.help(DUMP_TRANSACTION_MESSAGE.help)
}
pub trait ArgsConfig {
fn blockhash_arg<'a, 'b>(&self, arg: Arg<'a, 'b>) -> Arg<'a, 'b> {
arg
@@ -57,6 +71,9 @@ pub trait ArgsConfig {
fn signer_arg<'a, 'b>(&self, arg: Arg<'a, 'b>) -> Arg<'a, 'b> {
arg
}
fn dump_transaction_message_arg<'a, 'b>(&self, arg: Arg<'a, 'b>) -> Arg<'a, 'b> {
arg
}
}
pub trait OfflineArgs {
@@ -69,6 +86,7 @@ impl OfflineArgs for App<'_, '_> {
self.arg(config.blockhash_arg(blockhash_arg()))
.arg(config.sign_only_arg(sign_only_arg()))
.arg(config.signer_arg(signer_arg()))
.arg(config.dump_transaction_message_arg(dump_transaction_message()))
}
fn offline_args(self) -> Self {
struct NullArgsConfig {}

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-cli-config"
description = "Blockchain, Rebuilt for Scale"
version = "1.6.0"
version = "1.6.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"

View File

@@ -3,13 +3,14 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-cli-output"
description = "Blockchain, Rebuilt for Scale"
version = "1.6.0"
version = "1.6.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-cli-output"
[dependencies]
base64 = "0.13.0"
chrono = { version = "0.4.11", features = ["serde"] }
console = "0.11.3"
humantime = "2.0.1"
@@ -18,13 +19,13 @@ indicatif = "0.15.0"
serde = "1.0.122"
serde_derive = "1.0.103"
serde_json = "1.0.56"
solana-account-decoder = { path = "../account-decoder", version = "1.6.0" }
solana-clap-utils = { path = "../clap-utils", version = "1.6.0" }
solana-client = { path = "../client", version = "1.6.0" }
solana-sdk = { path = "../sdk", version = "1.6.0" }
solana-stake-program = { path = "../programs/stake", version = "1.6.0" }
solana-transaction-status = { path = "../transaction-status", version = "1.6.0" }
solana-vote-program = { path = "../programs/vote", version = "1.6.0" }
solana-account-decoder = { path = "../account-decoder", version = "=1.6.2" }
solana-clap-utils = { path = "../clap-utils", version = "=1.6.2" }
solana-client = { path = "../client", version = "=1.6.2" }
solana-sdk = { path = "../sdk", version = "=1.6.2" }
solana-stake-program = { path = "../programs/stake", version = "=1.6.2" }
solana-transaction-status = { path = "../transaction-status", version = "=1.6.2" }
solana-vote-program = { path = "../programs/vote", version = "=1.6.2" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -19,7 +19,7 @@ use {
RpcVoteAccountInfo,
},
solana_sdk::{
clock::{self, Epoch, Slot, UnixTimestamp},
clock::{Epoch, Slot, UnixTimestamp},
epoch_info::EpochInfo,
hash::Hash,
native_token::lamports_to_sol,
@@ -231,12 +231,8 @@ pub struct CliSlotStatus {
pub struct CliEpochInfo {
#[serde(flatten)]
pub epoch_info: EpochInfo,
}
impl From<EpochInfo> for CliEpochInfo {
fn from(epoch_info: EpochInfo) -> Self {
Self { epoch_info }
}
#[serde(skip)]
pub average_slot_time_ms: u64,
}
impl QuietDisplay for CliEpochInfo {}
@@ -286,16 +282,16 @@ impl fmt::Display for CliEpochInfo {
"Epoch Completed Time:",
&format!(
"{}/{} ({} remaining)",
slot_to_human_time(self.epoch_info.slot_index),
slot_to_human_time(self.epoch_info.slots_in_epoch),
slot_to_human_time(remaining_slots_in_epoch)
slot_to_human_time(self.epoch_info.slot_index, self.average_slot_time_ms),
slot_to_human_time(self.epoch_info.slots_in_epoch, self.average_slot_time_ms),
slot_to_human_time(remaining_slots_in_epoch, self.average_slot_time_ms)
),
)
}
}
fn slot_to_human_time(slot: Slot) -> String {
humantime::format_duration(Duration::from_millis(slot * clock::DEFAULT_MS_PER_SLOT)).to_string()
fn slot_to_human_time(slot: Slot, slot_time_ms: u64) -> String {
humantime::format_duration(Duration::from_secs((slot * slot_time_ms) / 1000)).to_string()
}
#[derive(Serialize, Deserialize, Default)]
@@ -1323,6 +1319,8 @@ impl fmt::Display for CliInflation {
#[serde(rename_all = "camelCase")]
pub struct CliSignOnlyData {
pub blockhash: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub signers: Vec<String>,
#[serde(skip_serializing_if = "Vec::is_empty", default)]
@@ -1338,6 +1336,9 @@ impl fmt::Display for CliSignOnlyData {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f)?;
writeln_name_value(f, "Blockhash:", &self.blockhash)?;
if let Some(message) = self.message.as_ref() {
writeln_name_value(f, "Transaction Message:", message)?;
}
if !self.signers.is_empty() {
writeln!(f, "{}", style("Signers (Pubkey=Signature):").bold())?;
for signer in self.signers.iter() {
@@ -1391,7 +1392,7 @@ impl fmt::Display for CliAccountBalances {
writeln!(
f,
"{}",
style(format!("{:<44} {}", "Address", "Balance",)).bold()
style(format!("{:<44} {}", "Address", "Balance")).bold()
)?;
for account in &self.accounts {
writeln!(
@@ -1683,6 +1684,9 @@ pub struct CliUpgradeableBuffer {
pub address: String,
pub authority: String,
pub data_len: usize,
pub lamports: u64,
#[serde(skip_serializing)]
pub use_lamports_unit: bool,
}
impl QuietDisplay for CliUpgradeableBuffer {}
impl VerboseDisplay for CliUpgradeableBuffer {}
@@ -1691,18 +1695,74 @@ impl fmt::Display for CliUpgradeableBuffer {
writeln!(f)?;
writeln_name_value(f, "Buffer Address:", &self.address)?;
writeln_name_value(f, "Authority:", &self.authority)?;
writeln_name_value(
f,
"Balance:",
&build_balance_message(self.lamports, self.use_lamports_unit, true),
)?;
writeln_name_value(
f,
"Data Length:",
&format!("{:?} ({:#x?}) bytes", self.data_len, self.data_len),
)?;
Ok(())
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliUpgradeableBuffers {
pub buffers: Vec<CliUpgradeableBuffer>,
#[serde(skip_serializing)]
pub use_lamports_unit: bool,
}
impl QuietDisplay for CliUpgradeableBuffers {}
impl VerboseDisplay for CliUpgradeableBuffers {}
impl fmt::Display for CliUpgradeableBuffers {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f)?;
writeln!(
f,
"{}",
style(format!(
"{:<44} | {:<44} | {}",
"Buffer Address", "Authority", "Balance"
))
.bold()
)?;
for buffer in self.buffers.iter() {
writeln!(
f,
"{}",
&format!(
"{:<44} | {:<44} | {}",
buffer.address,
buffer.authority,
build_balance_message(buffer.lamports, self.use_lamports_unit, true)
)
)?;
}
Ok(())
}
}
#[derive(Debug, Default)]
pub struct ReturnSignersConfig {
pub dump_transaction_message: bool,
}
pub fn return_signers(
tx: &Transaction,
output_format: &OutputFormat,
) -> Result<String, Box<dyn std::error::Error>> {
return_signers_with_config(tx, output_format, &ReturnSignersConfig::default())
}
pub fn return_signers_with_config(
tx: &Transaction,
output_format: &OutputFormat,
config: &ReturnSignersConfig,
) -> Result<String, Box<dyn std::error::Error>> {
let verify_results = tx.verify_with_results();
let mut signers = Vec::new();
@@ -1721,9 +1781,16 @@ pub fn return_signers(
bad_sig.push(key.to_string());
}
});
let message = if config.dump_transaction_message {
let message_data = tx.message_data();
Some(base64::encode(&message_data))
} else {
None
};
let cli_command = CliSignOnlyData {
blockhash: tx.message.recent_blockhash.to_string(),
message,
signers,
absent,
bad_sig,
@@ -1778,8 +1845,14 @@ pub fn parse_sign_only_reply_string(reply: &str) -> SignOnly {
.collect();
}
let message = object
.get("message")
.and_then(|o| o.as_str())
.map(|m| m.to_string());
SignOnly {
blockhash,
message,
present_signers,
absent_signers,
bad_signers,
@@ -2058,6 +2131,25 @@ mod tests {
let res = return_signers(&tx, &OutputFormat::JsonCompact).unwrap();
let sign_only = parse_sign_only_reply_string(&res);
assert_eq!(sign_only.blockhash, blockhash);
assert_eq!(sign_only.message, None);
assert_eq!(sign_only.present_signers[0].0, present.pubkey());
assert_eq!(sign_only.absent_signers[0], absent.pubkey());
assert_eq!(sign_only.bad_signers[0], bad.pubkey());
let expected_msg = "AwECBwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDgTl3Dqh9\
F19Wo1Rmw0x+zMuNipG07jeiXfYPW4/Js5QEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQE\
BAQEBAYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBQUFBQUFBQUFBQUFBQUFBQUF\
BQUFBQUFBQUFBQUFBQUGp9UXGSxWjuCKhF9z0peIzwNcMUWyGrNE2AYuqUAAAAAAAAAAAAAA\
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcH\
BwcCBgMDBQIEBAAAAAYCAQQMAgAAACoAAAAAAAAA"
.to_string();
let config = ReturnSignersConfig {
dump_transaction_message: true,
};
let res = return_signers_with_config(&tx, &OutputFormat::JsonCompact, &config).unwrap();
let sign_only = parse_sign_only_reply_string(&res);
assert_eq!(sign_only.blockhash, blockhash);
assert_eq!(sign_only.message, Some(expected_msg));
assert_eq!(sign_only.present_signers[0].0, present.pubkey());
assert_eq!(sign_only.absent_signers[0], absent.pubkey());
assert_eq!(sign_only.bad_signers[0], bad.pubkey());

View File

@@ -4,7 +4,7 @@ use {
console::style,
indicatif::{ProgressBar, ProgressStyle},
solana_sdk::{
clock::UnixTimestamp, hash::Hash, native_token::lamports_to_sol,
clock::UnixTimestamp, hash::Hash, message::Message, native_token::lamports_to_sol,
program_utils::limited_deserialize, transaction::Transaction,
},
solana_transaction_status::UiTransactionStatusMeta,
@@ -125,6 +125,31 @@ pub fn println_signers(
println!();
}
fn format_account_mode(message: &Message, index: usize) -> String {
format!(
"{}r{}{}", // accounts are always readable...
if message.is_signer(index) {
"s" // stands for signer
} else {
"-"
},
if message.is_writable(index) {
"w" // comment for consistent rust fmt (no joking; lol)
} else {
"-"
},
// account may be executable on-chain while not being
// designated as a program-id in the message
if message.maybe_executable(index) {
"x"
} else {
// programs to be executed via CPI cannot be identified as
// executable from the message
"-"
},
)
}
pub fn write_transaction<W: io::Write>(
w: &mut W,
transaction: &Transaction,
@@ -167,16 +192,31 @@ pub fn write_transaction<W: io::Write>(
prefix, signature_index, signature, sigverify_status,
)?;
}
writeln!(w, "{}{:?}", prefix, message.header)?;
let mut fee_payer_index = None;
for (account_index, account) in message.account_keys.iter().enumerate() {
writeln!(w, "{}Account {}: {:?}", prefix, account_index, account)?;
if fee_payer_index.is_none() && message.is_non_loader_key(account, account_index) {
fee_payer_index = Some(account_index)
}
writeln!(
w,
"{}Account {}: {} {}{}",
prefix,
account_index,
format_account_mode(message, account_index),
account,
if Some(account_index) == fee_payer_index {
" (fee payer)"
} else {
""
},
)?;
}
for (instruction_index, instruction) in message.instructions.iter().enumerate() {
let program_pubkey = message.account_keys[instruction.program_id_index as usize];
writeln!(w, "{}Instruction {}", prefix, instruction_index)?;
writeln!(
w,
"{} Program: {} ({})",
"{} Program: {} ({})",
prefix, program_pubkey, instruction.program_id_index
)?;
for (account_index, account) in instruction.accounts.iter().enumerate() {

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-cli"
description = "Blockchain, Rebuilt for Scale"
version = "1.6.0"
version = "1.6.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -24,33 +24,33 @@ indicatif = "0.15.0"
humantime = "2.0.1"
num-traits = "0.2"
pretty-hex = "0.2.1"
reqwest = { version = "0.10.8", default-features = false, features = ["blocking", "rustls-tls", "json"] }
reqwest = { version = "0.11.2", default-features = false, features = ["blocking", "rustls-tls", "json"] }
serde = "1.0.122"
serde_derive = "1.0.103"
serde_json = "1.0.56"
solana-account-decoder = { path = "../account-decoder", version = "1.6.0" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.6.0" }
solana-clap-utils = { path = "../clap-utils", version = "1.6.0" }
solana-cli-config = { path = "../cli-config", version = "1.6.0" }
solana-cli-output = { path = "../cli-output", version = "1.6.0" }
solana-client = { path = "../client", version = "1.6.0" }
solana-config-program = { path = "../programs/config", version = "1.6.0" }
solana-faucet = { path = "../faucet", version = "1.6.0" }
solana-logger = { path = "../logger", version = "1.6.0" }
solana-net-utils = { path = "../net-utils", version = "1.6.0" }
solana-account-decoder = { path = "../account-decoder", version = "=1.6.2" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.6.2" }
solana-clap-utils = { path = "../clap-utils", version = "=1.6.2" }
solana-cli-config = { path = "../cli-config", version = "=1.6.2" }
solana-cli-output = { path = "../cli-output", version = "=1.6.2" }
solana-client = { path = "../client", version = "=1.6.2" }
solana-config-program = { path = "../programs/config", version = "=1.6.2" }
solana-faucet = { path = "../faucet", version = "=1.6.2" }
solana-logger = { path = "../logger", version = "=1.6.2" }
solana-net-utils = { path = "../net-utils", version = "=1.6.2" }
solana_rbpf = "=0.2.5"
solana-remote-wallet = { path = "../remote-wallet", version = "1.6.0" }
solana-sdk = { path = "../sdk", version = "1.6.0" }
solana-stake-program = { path = "../programs/stake", version = "1.6.0" }
solana-transaction-status = { path = "../transaction-status", version = "1.6.0" }
solana-version = { path = "../version", version = "1.6.0" }
solana-vote-program = { path = "../programs/vote", version = "1.6.0" }
solana-remote-wallet = { path = "../remote-wallet", version = "=1.6.2" }
solana-sdk = { path = "../sdk", version = "=1.6.2" }
solana-stake-program = { path = "../programs/stake", version = "=1.6.2" }
solana-transaction-status = { path = "../transaction-status", version = "=1.6.2" }
solana-version = { path = "../version", version = "=1.6.2" }
solana-vote-program = { path = "../programs/vote", version = "=1.6.2" }
thiserror = "1.0.21"
tiny-bip39 = "0.7.0"
url = "2.1.1"
[dev-dependencies]
solana-core = { path = "../core", version = "1.6.0" }
solana-core = { path = "../core", version = "=1.6.2" }
tempfile = "3.1.0"
[[bin]]

View File

@@ -18,15 +18,18 @@ use solana_clap_utils::{
};
use solana_cli_output::{
display::{build_balance_message, println_name_value},
return_signers, CliAccount, CliSignature, CliSignatureVerificationStatus, CliTransaction,
CliTransactionConfirmation, OutputFormat,
return_signers_with_config, CliAccount, CliSignature, CliSignatureVerificationStatus,
CliTransaction, CliTransactionConfirmation, OutputFormat, ReturnSignersConfig,
};
use solana_client::{
blockhash_query::BlockhashQuery,
client_error::{ClientError, ClientErrorKind, Result as ClientResult},
nonce_utils,
rpc_client::RpcClient,
rpc_config::{RpcLargestAccountsFilter, RpcSendTransactionConfig, RpcTransactionLogsFilter},
rpc_config::{
RpcConfirmedTransactionConfig, RpcLargestAccountsFilter, RpcSendTransactionConfig,
RpcTransactionLogsFilter,
},
rpc_response::RpcKeyedAccount,
};
#[cfg(not(test))]
@@ -198,6 +201,7 @@ pub enum CliCommand {
lockup: Lockup,
amount: SpendAmount,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
@@ -208,6 +212,7 @@ pub enum CliCommand {
stake_account_pubkey: Pubkey,
stake_authority: SignerIndex,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
@@ -219,6 +224,7 @@ pub enum CliCommand {
stake_authority: SignerIndex,
force: bool,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
@@ -228,6 +234,7 @@ pub enum CliCommand {
stake_account_pubkey: Pubkey,
stake_authority: SignerIndex,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
@@ -241,6 +248,7 @@ pub enum CliCommand {
source_stake_account_pubkey: Pubkey,
stake_authority: SignerIndex,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
@@ -257,6 +265,7 @@ pub enum CliCommand {
stake_account_pubkey: Pubkey,
new_authorizations: Vec<(StakeAuthorize, Pubkey, SignerIndex)>,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
@@ -268,6 +277,7 @@ pub enum CliCommand {
lockup: LockupArgs,
custodian: SignerIndex,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
@@ -280,6 +290,7 @@ pub enum CliCommand {
withdraw_authority: SignerIndex,
custodian: Option<SignerIndex>,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
@@ -351,6 +362,8 @@ pub enum CliCommand {
to: Pubkey,
from: SignerIndex,
sign_only: bool,
dump_transaction_message: bool,
allow_unfunded_recipient: bool,
no_wait: bool,
blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>,
@@ -847,6 +860,7 @@ pub fn parse_command(
let amount = SpendAmount::new_from_matches(matches, "amount");
let to = pubkey_of_signer(matches, "to", wallet_manager)?.unwrap();
let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name);
let no_wait = matches.is_present("no_wait");
let blockhash_query = BlockhashQuery::new_from_matches(matches);
let nonce_account = pubkey_of_signer(matches, NONCE_ARG.name, wallet_manager)?;
@@ -855,6 +869,7 @@ pub fn parse_command(
let (fee_payer, fee_payer_pubkey) =
signer_of(matches, FEE_PAYER_ARG.name, wallet_manager)?;
let (from, from_pubkey) = signer_of(matches, "from", wallet_manager)?;
let allow_unfunded_recipient = matches.is_present("allow_unfunded_recipient");
let mut bulk_signers = vec![fee_payer, from];
if nonce_account.is_some() {
@@ -875,6 +890,8 @@ pub fn parse_command(
amount,
to,
sign_only,
dump_transaction_message,
allow_unfunded_recipient,
no_wait,
blockhash_query,
nonce_account,
@@ -1013,9 +1030,13 @@ fn process_confirm(
let mut transaction = None;
let mut get_transaction_error = None;
if config.verbose {
match rpc_client
.get_confirmed_transaction(signature, UiTransactionEncoding::Base64)
{
match rpc_client.get_confirmed_transaction_with_config(
signature,
RpcConfirmedTransactionConfig {
encoding: Some(UiTransactionEncoding::Base64),
commitment: Some(CommitmentConfig::confirmed()),
},
) {
Ok(confirmed_transaction) => {
let decoded_transaction = confirmed_transaction
.transaction
@@ -1127,6 +1148,8 @@ fn process_transfer(
to: &Pubkey,
from: SignerIndex,
sign_only: bool,
dump_transaction_message: bool,
allow_unfunded_recipient: bool,
no_wait: bool,
blockhash_query: &BlockhashQuery,
nonce_account: Option<&Pubkey>,
@@ -1141,6 +1164,21 @@ fn process_transfer(
let (recent_blockhash, fee_calculator) =
blockhash_query.get_blockhash_and_fee_calculator(rpc_client, config.commitment)?;
if !sign_only && !allow_unfunded_recipient {
let recipient_balance = rpc_client
.get_balance_with_commitment(to, config.commitment)?
.value;
if recipient_balance == 0 {
return Err(format!(
"The recipient address ({}) is not funded. \
Add `--allow-unfunded-recipient` to complete the transfer \
",
to
)
.into());
}
}
let nonce_authority = config.signers[nonce_authority];
let fee_payer = config.signers[fee_payer];
@@ -1193,7 +1231,13 @@ fn process_transfer(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx, &config.output_format)
return_signers_with_config(
&tx,
&config.output_format,
&ReturnSignersConfig {
dump_transaction_message,
},
)
} else {
if let Some(nonce_account) = &nonce_account {
let nonce_account = nonce_utils::get_account_with_commitment(
@@ -1445,6 +1489,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
lockup,
amount,
sign_only,
dump_transaction_message,
blockhash_query,
ref nonce_account,
nonce_authority,
@@ -1460,6 +1505,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
lockup,
*amount,
*sign_only,
*dump_transaction_message,
blockhash_query,
nonce_account.as_ref(),
*nonce_authority,
@@ -1470,6 +1516,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
stake_account_pubkey,
stake_authority,
sign_only,
dump_transaction_message,
blockhash_query,
nonce_account,
nonce_authority,
@@ -1480,6 +1527,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
&stake_account_pubkey,
*stake_authority,
*sign_only,
*dump_transaction_message,
blockhash_query,
*nonce_account,
*nonce_authority,
@@ -1491,6 +1539,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
stake_authority,
force,
sign_only,
dump_transaction_message,
blockhash_query,
nonce_account,
nonce_authority,
@@ -1503,6 +1552,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
*stake_authority,
*force,
*sign_only,
*dump_transaction_message,
blockhash_query,
*nonce_account,
*nonce_authority,
@@ -1512,6 +1562,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
stake_account_pubkey,
stake_authority,
sign_only,
dump_transaction_message,
blockhash_query,
nonce_account,
nonce_authority,
@@ -1525,6 +1576,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
&stake_account_pubkey,
*stake_authority,
*sign_only,
*dump_transaction_message,
blockhash_query,
*nonce_account,
*nonce_authority,
@@ -1538,6 +1590,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
source_stake_account_pubkey,
stake_authority,
sign_only,
dump_transaction_message,
blockhash_query,
nonce_account,
nonce_authority,
@@ -1549,6 +1602,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
&source_stake_account_pubkey,
*stake_authority,
*sign_only,
*dump_transaction_message,
blockhash_query,
*nonce_account,
*nonce_authority,
@@ -1570,6 +1624,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
stake_account_pubkey,
ref new_authorizations,
sign_only,
dump_transaction_message,
blockhash_query,
nonce_account,
nonce_authority,
@@ -1582,6 +1637,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
new_authorizations,
*custodian,
*sign_only,
*dump_transaction_message,
blockhash_query,
*nonce_account,
*nonce_authority,
@@ -1592,6 +1648,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
mut lockup,
custodian,
sign_only,
dump_transaction_message,
blockhash_query,
nonce_account,
nonce_authority,
@@ -1603,6 +1660,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
&mut lockup,
*custodian,
*sign_only,
*dump_transaction_message,
blockhash_query,
*nonce_account,
*nonce_authority,
@@ -1615,6 +1673,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
withdraw_authority,
custodian,
sign_only,
dump_transaction_message,
blockhash_query,
ref nonce_account,
nonce_authority,
@@ -1628,6 +1687,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
*withdraw_authority,
*custodian,
*sign_only,
*dump_transaction_message,
blockhash_query,
nonce_account.as_ref(),
*nonce_authority,
@@ -1787,6 +1847,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
to,
from,
sign_only,
dump_transaction_message,
allow_unfunded_recipient,
no_wait,
ref blockhash_query,
ref nonce_account,
@@ -1801,6 +1863,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
to,
*from,
*sign_only,
*dump_transaction_message,
*allow_unfunded_recipient,
*no_wait,
blockhash_query,
nonce_account.as_ref(),
@@ -1987,6 +2051,17 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
.takes_value(true)
.required(true)
.help("The transaction signature to confirm"),
)
.after_help(// Formatted specifically for the manually-indented heredoc string
"Note: This will show more detailed information for finalized transactions with verbose mode (-v/--verbose).\
\n\
\nAccount modes:\
\n |srwx|\
\n s: signed\
\n r: readable (always true)\
\n w: writable\
\n x: program account (inner instructions excluded)\
"
),
)
.subcommand(
@@ -2158,6 +2233,12 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
.requires("derived_address_seed")
.hidden(true)
)
.arg(
Arg::with_name("allow_unfunded_recipient")
.long("allow-unfunded-recipient")
.takes_value(false)
.help("Complete the transfer even if the recipient address is not funded")
)
.offline_args()
.nonce_args(false)
.arg(fee_payer_arg()),
@@ -2601,6 +2682,7 @@ mod tests {
},
amount: SpendAmount::Some(30),
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@@ -2620,6 +2702,7 @@ mod tests {
withdraw_authority: 0,
custodian: None,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@@ -2634,6 +2717,7 @@ mod tests {
stake_account_pubkey,
stake_authority: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: 0,
@@ -2648,6 +2732,7 @@ mod tests {
stake_account_pubkey,
stake_authority: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: 0,
@@ -2668,6 +2753,7 @@ mod tests {
source_stake_account_pubkey,
stake_authority: 1,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: 0,
@@ -2855,6 +2941,8 @@ mod tests {
to: to_pubkey,
from: 0,
sign_only: false,
dump_transaction_message: false,
allow_unfunded_recipient: false,
no_wait: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
@@ -2879,6 +2967,8 @@ mod tests {
to: to_pubkey,
from: 0,
sign_only: false,
dump_transaction_message: false,
allow_unfunded_recipient: false,
no_wait: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
@@ -2891,11 +2981,12 @@ mod tests {
}
);
// Test Transfer no-wait
// Test Transfer no-wait and --allow-unfunded-recipient
let test_transfer = test_commands.clone().get_matches_from(vec![
"test",
"transfer",
"--no-wait",
"--allow-unfunded-recipient",
&to_string,
"42",
]);
@@ -2907,6 +2998,8 @@ mod tests {
to: to_pubkey,
from: 0,
sign_only: false,
dump_transaction_message: false,
allow_unfunded_recipient: true,
no_wait: true,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
@@ -2939,6 +3032,8 @@ mod tests {
to: to_pubkey,
from: 0,
sign_only: true,
dump_transaction_message: false,
allow_unfunded_recipient: false,
no_wait: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
@@ -2976,6 +3071,8 @@ mod tests {
to: to_pubkey,
from: 0,
sign_only: false,
dump_transaction_message: false,
allow_unfunded_recipient: false,
no_wait: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::Cluster,
@@ -3017,6 +3114,8 @@ mod tests {
to: to_pubkey,
from: 0,
sign_only: false,
dump_transaction_message: false,
allow_unfunded_recipient: false,
no_wait: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_address),
@@ -3056,6 +3155,8 @@ mod tests {
to: to_pubkey,
from: 0,
sign_only: false,
dump_transaction_message: false,
allow_unfunded_recipient: false,
no_wait: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,

View File

@@ -24,8 +24,9 @@ use solana_client::{
pubsub_client::PubsubClient,
rpc_client::{GetConfirmedSignaturesForAddress2Config, RpcClient},
rpc_config::{
RpcAccountInfoConfig, RpcLargestAccountsConfig, RpcLargestAccountsFilter,
RpcProgramAccountsConfig, RpcTransactionLogsConfig, RpcTransactionLogsFilter,
RpcAccountInfoConfig, RpcConfirmedBlockConfig, RpcLargestAccountsConfig,
RpcLargestAccountsFilter, RpcProgramAccountsConfig, RpcTransactionLogsConfig,
RpcTransactionLogsFilter,
},
rpc_filter,
rpc_response::SlotInfo,
@@ -963,8 +964,16 @@ pub fn process_get_block(
rpc_client.get_slot_with_commitment(CommitmentConfig::finalized())?
};
let encoded_confirmed_block =
rpc_client.get_confirmed_block_with_encoding(slot, UiTransactionEncoding::Base64)?;
let encoded_confirmed_block = rpc_client
.get_confirmed_block_with_config(
slot,
RpcConfirmedBlockConfig {
encoding: Some(UiTransactionEncoding::Base64),
commitment: Some(CommitmentConfig::confirmed()),
..RpcConfirmedBlockConfig::default()
},
)?
.into();
let cli_block = CliBlock {
encoded_confirmed_block,
slot,
@@ -993,7 +1002,21 @@ pub fn process_get_epoch(rpc_client: &RpcClient, _config: &CliConfig) -> Process
}
pub fn process_get_epoch_info(rpc_client: &RpcClient, config: &CliConfig) -> ProcessResult {
let epoch_info: CliEpochInfo = rpc_client.get_epoch_info()?.into();
let epoch_info = rpc_client.get_epoch_info()?;
let average_slot_time_ms = rpc_client
.get_recent_performance_samples(Some(60))
.ok()
.and_then(|samples| {
let (slots, secs) = samples.iter().fold((0, 0), |(slots, secs), sample| {
(slots + sample.num_slots, secs + sample.sample_period_secs)
});
(secs as u64).saturating_mul(1000).checked_div(slots)
})
.unwrap_or(clock::DEFAULT_MS_PER_SLOT);
let epoch_info = CliEpochInfo {
epoch_info,
average_slot_time_ms,
};
Ok(config.output_format.formatted_string(&epoch_info))
}
@@ -1008,8 +1031,8 @@ pub fn process_get_slot(rpc_client: &RpcClient, _config: &CliConfig) -> ProcessR
}
pub fn process_get_block_height(rpc_client: &RpcClient, _config: &CliConfig) -> ProcessResult {
let epoch_info: CliEpochInfo = rpc_client.get_epoch_info()?.into();
Ok(epoch_info.epoch_info.block_height.to_string())
let epoch_info = rpc_client.get_epoch_info()?;
Ok(epoch_info.block_height.to_string())
}
pub fn parse_show_block_production(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {

View File

@@ -10,15 +10,22 @@ use bincode::serialize;
use bip39::{Language, Mnemonic, MnemonicType, Seed};
use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
use log::*;
use solana_account_decoder::{UiAccountEncoding, UiDataSliceConfig};
use solana_bpf_loader_program::{bpf_verifier, BpfError, ThisInstructionMeter};
use solana_clap_utils::{self, input_parsers::*, input_validators::*, keypair::*};
use solana_cli_output::{
display::new_spinner_progress_bar, CliProgram, CliProgramAccountType, CliProgramAuthority,
CliProgramBuffer, CliProgramId, CliUpgradeableBuffer, CliUpgradeableProgram,
CliProgramBuffer, CliProgramId, CliUpgradeableBuffer, CliUpgradeableBuffers,
CliUpgradeableProgram,
};
use solana_client::{
rpc_client::RpcClient, rpc_config::RpcSendTransactionConfig,
rpc_request::MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS, rpc_response::RpcLeaderSchedule,
client_error::ClientErrorKind,
rpc_client::RpcClient,
rpc_config::RpcSendTransactionConfig,
rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig},
rpc_filter::{Memcmp, MemcmpEncodedBytes, RpcFilterType},
rpc_request::MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS,
rpc_response::RpcLeaderSchedule,
};
use solana_rbpf::vm::{Config, Executable};
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
@@ -30,6 +37,7 @@ use solana_sdk::{
clock::Slot,
commitment_config::CommitmentConfig,
instruction::Instruction,
instruction::InstructionError,
loader_instruction,
message::Message,
native_token::Sol,
@@ -39,6 +47,7 @@ use solana_sdk::{
system_instruction::{self, SystemError},
system_program,
transaction::Transaction,
transaction::TransactionError,
};
use solana_transaction_status::TransactionConfirmationStatus;
use std::{
@@ -89,11 +98,20 @@ pub enum ProgramCliCommand {
},
Show {
account_pubkey: Option<Pubkey>,
authority_pubkey: Pubkey,
all: bool,
use_lamports_unit: bool,
},
Dump {
account_pubkey: Option<Pubkey>,
output_location: String,
},
Close {
account_pubkey: Option<Pubkey>,
recipient_pubkey: Pubkey,
authority_index: SignerIndex,
use_lamports_unit: bool,
},
}
pub trait ProgramSubCommands {
@@ -200,7 +218,7 @@ impl ProgramSubCommands for App<'_, '_> {
)
.subcommand(
SubCommand::with_name("set-buffer-authority")
.about("Set a new buffer authority") // TODO deploy with buffer and no file path?
.about("Set a new buffer authority")
.arg(
Arg::with_name("buffer")
.index(1)
@@ -266,9 +284,34 @@ impl ProgramSubCommands for App<'_, '_> {
.index(1)
.value_name("ACCOUNT_ADDRESS")
.takes_value(true)
.required(true)
.help("Address of the buffer or program to show")
)
.arg(
Arg::with_name("buffers")
.long("buffers")
.conflicts_with("account")
.required_unless("account")
.help("Show every buffer account that matches the authority")
)
.arg(
Arg::with_name("all")
.long("all")
.conflicts_with("account")
.help("Show accounts for all authorities")
)
.arg(
pubkey!(Arg::with_name("buffer_authority")
.long("buffer-authority")
.value_name("AUTHORITY")
.conflicts_with("all"),
"Authority [default: the default configured keypair]"),
)
.arg(
Arg::with_name("lamports")
.long("lamports")
.takes_value(false)
.help("Display balance in lamports instead of SOL"),
),
)
.subcommand(
SubCommand::with_name("dump")
@@ -290,6 +333,44 @@ impl ProgramSubCommands for App<'_, '_> {
.help("/path/to/program.so"),
),
)
.subcommand(
SubCommand::with_name("close")
.about("Close an acount and withdraw all lamports")
.arg(
Arg::with_name("account")
.index(1)
.value_name("BUFFER_ACCOUNT_ADDRESS")
.takes_value(true)
.help("Address of the buffer account to close"),
)
.arg(
Arg::with_name("buffers")
.long("buffers")
.conflicts_with("account")
.required_unless("account")
.help("Close every buffer accounts that match the authority")
)
.arg(
Arg::with_name("buffer_authority")
.long("buffer-authority")
.value_name("AUTHORITY_SIGNER")
.takes_value(true)
.validator(is_valid_signer)
.help("Authority [default: the default configured keypair]")
)
.arg(
pubkey!(Arg::with_name("recipient_account")
.long("recipient")
.value_name("RECIPIENT_ADDRESS"),
"Address of the account to deposit the closed account's lamports [default: the default configured keypair]"),
)
.arg(
Arg::with_name("lamports")
.long("lamports")
.takes_value(false)
.help("Display balance in lamports instead of SOL"),
),
)
)
}
}
@@ -425,15 +506,8 @@ pub fn parse_program_subcommand(
let (buffer_authority_signer, buffer_authority_pubkey) =
signer_of(matches, "buffer_authority", wallet_manager)?;
let new_buffer_authority = if let Some(new_buffer_authority) =
pubkey_of_signer(matches, "new_buffer_authority", wallet_manager)?
{
new_buffer_authority
} else {
let (_, new_buffer_authority) =
signer_of(matches, "new_buffer_authority", wallet_manager)?;
new_buffer_authority.unwrap()
};
let new_buffer_authority =
pubkey_of_signer(matches, "new_buffer_authority", wallet_manager)?.unwrap();
let signer_info = default_signer.generate_unique_signers(
vec![
@@ -459,14 +533,8 @@ pub fn parse_program_subcommand(
let program_pubkey = pubkey_of(matches, "program_id").unwrap();
let new_upgrade_authority = if matches.is_present("final") {
None
} else if let Some(new_upgrade_authority) =
pubkey_of_signer(matches, "new_upgrade_authority", wallet_manager)?
{
Some(new_upgrade_authority)
} else {
let (_, new_upgrade_authority) =
signer_of(matches, "new_upgrade_authority", wallet_manager)?;
new_upgrade_authority
pubkey_of_signer(matches, "new_upgrade_authority", wallet_manager)?
};
let signer_info = default_signer.generate_unique_signers(
@@ -487,12 +555,33 @@ pub fn parse_program_subcommand(
signers: signer_info.signers,
}
}
("show", Some(matches)) => CliCommandInfo {
command: CliCommand::Program(ProgramCliCommand::Show {
account_pubkey: pubkey_of(matches, "account"),
}),
signers: vec![],
},
("show", Some(matches)) => {
let account_pubkey = if matches.is_present("buffers") {
None
} else {
pubkey_of(matches, "account")
};
let authority_pubkey = if let Some(authority_pubkey) =
pubkey_of_signer(matches, "buffer_authority", wallet_manager)?
{
authority_pubkey
} else {
default_signer
.signer_from_path(matches, wallet_manager)?
.pubkey()
};
CliCommandInfo {
command: CliCommand::Program(ProgramCliCommand::Show {
account_pubkey,
authority_pubkey,
all: matches.is_present("all"),
use_lamports_unit: matches.is_present("lamports"),
}),
signers: vec![],
}
}
("dump", Some(matches)) => CliCommandInfo {
command: CliCommand::Program(ProgramCliCommand::Dump {
account_pubkey: pubkey_of(matches, "account"),
@@ -500,6 +589,45 @@ pub fn parse_program_subcommand(
}),
signers: vec![],
},
("close", Some(matches)) => {
let account_pubkey = if matches.is_present("buffers") {
None
} else {
pubkey_of(matches, "account")
};
let recipient_pubkey = if let Some(recipient_pubkey) =
pubkey_of_signer(matches, "recipient_account", wallet_manager)?
{
recipient_pubkey
} else {
default_signer
.signer_from_path(matches, wallet_manager)?
.pubkey()
};
let (authority_signer, authority_pubkey) =
signer_of(matches, "buffer_authority", wallet_manager)?;
let signer_info = default_signer.generate_unique_signers(
vec![
Some(default_signer.signer_from_path(matches, wallet_manager)?),
authority_signer,
],
matches,
wallet_manager,
)?;
CliCommandInfo {
command: CliCommand::Program(ProgramCliCommand::Close {
account_pubkey,
recipient_pubkey,
authority_index: signer_info.index_of(authority_pubkey).unwrap(),
use_lamports_unit: matches.is_present("lamports"),
}),
signers: signer_info.signers,
}
}
_ => unreachable!(),
};
Ok(response)
@@ -573,13 +701,36 @@ pub fn process_program_subcommand(
*upgrade_authority_index,
*new_upgrade_authority,
),
ProgramCliCommand::Show { account_pubkey } => {
process_show(&rpc_client, config, *account_pubkey)
}
ProgramCliCommand::Show {
account_pubkey,
authority_pubkey,
all,
use_lamports_unit,
} => process_show(
&rpc_client,
config,
*account_pubkey,
*authority_pubkey,
*all,
*use_lamports_unit,
),
ProgramCliCommand::Dump {
account_pubkey,
output_location,
} => process_dump(&rpc_client, config, *account_pubkey, output_location),
ProgramCliCommand::Close {
account_pubkey,
recipient_pubkey,
authority_index,
use_lamports_unit,
} => process_close(
&rpc_client,
config,
*account_pubkey,
*recipient_pubkey,
*authority_index,
*use_lamports_unit,
),
}
}
@@ -945,10 +1096,41 @@ fn process_set_authority(
Ok(config.output_format.formatted_string(&authority))
}
fn get_buffers(
rpc_client: &RpcClient,
authority_pubkey: Option<Pubkey>,
) -> Result<Vec<(Pubkey, Account)>, Box<dyn std::error::Error>> {
let mut bytes = vec![1, 0, 0, 0, 1];
let length = bytes.len() + 32; // Pubkey length
if let Some(authority_pubkey) = authority_pubkey {
bytes.extend_from_slice(authority_pubkey.as_ref());
}
let results = rpc_client.get_program_accounts_with_config(
&bpf_loader_upgradeable::id(),
RpcProgramAccountsConfig {
filters: Some(vec![RpcFilterType::Memcmp(Memcmp {
offset: 0,
bytes: MemcmpEncodedBytes::Binary(bs58::encode(bytes).into_string()),
encoding: None,
})]),
account_config: RpcAccountInfoConfig {
encoding: Some(UiAccountEncoding::Base64),
data_slice: Some(UiDataSliceConfig { offset: 0, length }),
..RpcAccountInfoConfig::default()
},
},
)?;
Ok(results)
}
fn process_show(
rpc_client: &RpcClient,
config: &CliConfig,
account_pubkey: Option<Pubkey>,
authority_pubkey: Pubkey,
all: bool,
use_lamports_unit: bool,
) -> ProcessResult {
if let Some(account_pubkey) = account_pubkey {
if let Some(account) = rpc_client
@@ -1013,6 +1195,8 @@ fn process_show(
.unwrap_or_else(|| "none".to_string()),
data_len: account.data.len()
- UpgradeableLoaderState::buffer_data_offset()?,
lamports: account.lamports,
use_lamports_unit,
}))
} else {
Err(format!(
@@ -1028,7 +1212,30 @@ fn process_show(
Err(format!("Unable to find the account {}", account_pubkey).into())
}
} else {
Err("No account specified".into())
let authority_pubkey = if all { None } else { Some(authority_pubkey) };
let mut buffers = vec![];
let results = get_buffers(rpc_client, authority_pubkey)?;
for (address, account) in results.iter() {
if let Ok(UpgradeableLoaderState::Buffer { authority_address }) = account.state() {
buffers.push(CliUpgradeableBuffer {
address: address.to_string(),
authority: authority_address
.map(|pubkey| pubkey.to_string())
.unwrap_or_else(|| "none".to_string()),
data_len: 0,
lamports: account.lamports,
use_lamports_unit,
});
} else {
return Err(format!("Error parsing account {}", address).into());
}
}
Ok(config
.output_format
.formatted_string(&CliUpgradeableBuffers {
buffers,
use_lamports_unit,
}))
}
}
@@ -1103,6 +1310,156 @@ fn process_dump(
}
}
fn close(
rpc_client: &RpcClient,
config: &CliConfig,
account_pubkey: &Pubkey,
recipient_pubkey: &Pubkey,
authority_signer: &dyn Signer,
) -> Result<(), Box<dyn std::error::Error>> {
let (blockhash, _) = rpc_client.get_recent_blockhash()?;
let mut tx = Transaction::new_unsigned(Message::new(
&[bpf_loader_upgradeable::close(
&account_pubkey,
&recipient_pubkey,
&authority_signer.pubkey(),
)],
Some(&config.signers[0].pubkey()),
));
tx.try_sign(&[config.signers[0], authority_signer], blockhash)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner_and_config(
&tx,
config.commitment,
RpcSendTransactionConfig {
skip_preflight: true,
preflight_commitment: Some(config.commitment.commitment),
..RpcSendTransactionConfig::default()
},
);
if let Err(err) = result {
if let ClientErrorKind::TransactionError(TransactionError::InstructionError(
_,
InstructionError::InvalidInstructionData,
)) = err.kind()
{
return Err("Closing a buffer account is not supported by the cluster".into());
} else {
return Err(format!("Close failed: {}", err).into());
}
}
Ok(())
}
fn process_close(
rpc_client: &RpcClient,
config: &CliConfig,
account_pubkey: Option<Pubkey>,
recipient_pubkey: Pubkey,
authority_index: SignerIndex,
use_lamports_unit: bool,
) -> ProcessResult {
let authority_signer = config.signers[authority_index];
let mut buffers = vec![];
if let Some(account_pubkey) = account_pubkey {
if let Some(account) = rpc_client
.get_account_with_commitment(&account_pubkey, config.commitment)?
.value
{
if let Ok(UpgradeableLoaderState::Buffer { authority_address }) = account.state() {
if authority_address != Some(authority_signer.pubkey()) {
return Err(format!(
"Buffer account authority {:?} does not match {:?}",
authority_address,
Some(authority_signer.pubkey())
)
.into());
} else {
close(
rpc_client,
config,
&account_pubkey,
&recipient_pubkey,
authority_signer,
)?;
buffers.push(CliUpgradeableBuffer {
address: account_pubkey.to_string(),
authority: authority_address
.map(|pubkey| pubkey.to_string())
.unwrap_or_else(|| "none".to_string()),
data_len: 0,
lamports: account.lamports,
use_lamports_unit,
});
}
} else {
return Err(format!(
"{} is not an upgradeble loader buffer account",
account_pubkey
)
.into());
}
} else {
return Err(format!("Unable to find the account {}", account_pubkey).into());
}
} else {
let mut bytes = vec![1, 0, 0, 0, 1];
bytes.extend_from_slice(authority_signer.pubkey().as_ref());
let length = bytes.len();
let results = rpc_client.get_program_accounts_with_config(
&bpf_loader_upgradeable::id(),
RpcProgramAccountsConfig {
filters: Some(vec![RpcFilterType::Memcmp(Memcmp {
offset: 0,
bytes: MemcmpEncodedBytes::Binary(bs58::encode(bytes).into_string()),
encoding: None,
})]),
account_config: RpcAccountInfoConfig {
encoding: Some(UiAccountEncoding::Base64),
data_slice: Some(UiDataSliceConfig { offset: 0, length }),
..RpcAccountInfoConfig::default()
},
},
)?;
for (address, account) in results.iter() {
if close(
rpc_client,
config,
&address,
&recipient_pubkey,
authority_signer,
)
.is_ok()
{
if let Ok(UpgradeableLoaderState::Buffer { authority_address }) = account.state() {
buffers.push(CliUpgradeableBuffer {
address: address.to_string(),
authority: authority_address
.map(|address| address.to_string())
.unwrap_or_else(|| "none".to_string()),
data_len: 0,
lamports: account.lamports,
use_lamports_unit,
});
} else {
return Err(format!("Error parsing account {}", address).into());
}
}
}
}
Ok(config
.output_format
.formatted_string(&CliUpgradeableBuffers {
buffers,
use_lamports_unit,
}))
}
/// Deploy using non-upgradeable loader
pub fn process_deploy(
rpc_client: &RpcClient,
@@ -1614,16 +1971,21 @@ fn report_ephemeral_mnemonic(words: usize, mnemonic: bip39::Mnemonic) {
let phrase: &str = mnemonic.phrase();
let divider = String::from_utf8(vec![b'='; phrase.len()]).unwrap();
eprintln!(
"{}\nTo resume a failed deploy, recover the ephemeral keypair file with",
"{}\nRecover the intermediate account's ephemeral keypair file with",
divider
);
eprintln!(
"`solana-keygen recover` and the following {}-word seed phrase,",
"`solana-keygen recover` and the following {}-word seed phrase:",
words
);
eprintln!("{}\n{}\n{}", divider, phrase, divider);
eprintln!("To resume a deploy, pass the recovered keypair as");
eprintln!("the [PROGRAM_ADDRESS_SIGNER] argument to `solana deploy` or");
eprintln!("as the [BUFFER_SIGNER] to `solana program deploy` or `solana write-buffer'.");
eprintln!("Or to recover the account's lamports, pass it as the");
eprintln!(
"then pass it as the [BUFFER_SIGNER] argument to `solana deploy` or `solana write-buffer`\n{}\n{}\n{}",
divider, phrase, divider
"[BUFFER_ACCOUNT_ADDRESS] argument to `solana program close`.\n{}",
divider
);
}
@@ -2267,9 +2629,6 @@ mod tests {
let authority = Keypair::new();
let authority_keypair_file = make_tmp_path("authority_keypair_file");
write_keypair_file(&authority, &authority_keypair_file).unwrap();
let new_authority_pubkey = Keypair::new();
let new_authority_pubkey_file = make_tmp_path("authority_keypair_file");
write_keypair_file(&new_authority_pubkey, &new_authority_pubkey_file).unwrap();
let test_deploy = test_commands.clone().get_matches_from(vec![
"test",
"program",
@@ -2331,16 +2690,16 @@ mod tests {
);
let buffer_pubkey = Pubkey::new_unique();
let new_authority_pubkey = Keypair::new();
let new_authority_pubkey_file = make_tmp_path("authority_keypair_file");
write_keypair_file(&new_authority_pubkey, &new_authority_pubkey_file).unwrap();
let new_authority_keypair = Keypair::new();
let new_authority_keypair_file = make_tmp_path("authority_keypair_file");
write_keypair_file(&new_authority_keypair, &new_authority_keypair_file).unwrap();
let test_deploy = test_commands.clone().get_matches_from(vec![
"test",
"program",
"set-buffer-authority",
&buffer_pubkey.to_string(),
"--new-buffer-authority",
&new_authority_pubkey_file,
&new_authority_keypair_file,
]);
assert_eq!(
parse_command(&test_deploy, &default_signer, &mut None).unwrap(),
@@ -2348,13 +2707,223 @@ mod tests {
command: CliCommand::Program(ProgramCliCommand::SetBufferAuthority {
buffer_pubkey,
buffer_authority_index: Some(0),
new_buffer_authority: new_authority_pubkey.pubkey(),
new_buffer_authority: new_authority_keypair.pubkey(),
}),
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
}
);
}
#[test]
#[allow(clippy::cognitive_complexity)]
fn test_cli_parse_show() {
let test_commands = app("test", "desc", "version");
let default_keypair = Keypair::new();
let keypair_file = make_tmp_path("keypair_file");
write_keypair_file(&default_keypair, &keypair_file).unwrap();
let default_signer = DefaultSigner {
path: keypair_file,
arg_name: "".to_string(),
};
// defaults
let buffer_pubkey = Pubkey::new_unique();
let authority_keypair = Keypair::new();
let authority_keypair_file = make_tmp_path("authority_keypair_file");
write_keypair_file(&authority_keypair, &authority_keypair_file).unwrap();
let test_command = test_commands.clone().get_matches_from(vec![
"test",
"program",
"show",
&buffer_pubkey.to_string(),
]);
assert_eq!(
parse_command(&test_command, &default_signer, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Program(ProgramCliCommand::Show {
account_pubkey: Some(buffer_pubkey),
authority_pubkey: default_keypair.pubkey(),
all: false,
use_lamports_unit: false,
}),
signers: vec![],
}
);
let test_command = test_commands.clone().get_matches_from(vec![
"test",
"program",
"show",
"--buffers",
"--all",
"--lamports",
]);
assert_eq!(
parse_command(&test_command, &default_signer, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Program(ProgramCliCommand::Show {
account_pubkey: None,
authority_pubkey: default_keypair.pubkey(),
all: true,
use_lamports_unit: true,
}),
signers: vec![],
}
);
let test_command = test_commands.clone().get_matches_from(vec![
"test",
"program",
"show",
"--buffers",
"--buffer-authority",
&authority_keypair.pubkey().to_string(),
]);
assert_eq!(
parse_command(&test_command, &default_signer, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Program(ProgramCliCommand::Show {
account_pubkey: None,
authority_pubkey: authority_keypair.pubkey(),
all: false,
use_lamports_unit: false,
}),
signers: vec![],
}
);
let test_command = test_commands.clone().get_matches_from(vec![
"test",
"program",
"show",
"--buffers",
"--buffer-authority",
&authority_keypair_file,
]);
assert_eq!(
parse_command(&test_command, &default_signer, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Program(ProgramCliCommand::Show {
account_pubkey: None,
authority_pubkey: authority_keypair.pubkey(),
all: false,
use_lamports_unit: false,
}),
signers: vec![],
}
);
}
#[test]
#[allow(clippy::cognitive_complexity)]
fn test_cli_parse_close() {
let test_commands = app("test", "desc", "version");
let default_keypair = Keypair::new();
let keypair_file = make_tmp_path("keypair_file");
write_keypair_file(&default_keypair, &keypair_file).unwrap();
let default_signer = DefaultSigner {
path: keypair_file.clone(),
arg_name: "".to_string(),
};
// defaults
let buffer_pubkey = Pubkey::new_unique();
let recipient_pubkey = Pubkey::new_unique();
let authority_keypair = Keypair::new();
let authority_keypair_file = make_tmp_path("authority_keypair_file");
let test_command = test_commands.clone().get_matches_from(vec![
"test",
"program",
"close",
&buffer_pubkey.to_string(),
]);
assert_eq!(
parse_command(&test_command, &default_signer, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Program(ProgramCliCommand::Close {
account_pubkey: Some(buffer_pubkey),
recipient_pubkey: default_keypair.pubkey(),
authority_index: 0,
use_lamports_unit: false,
}),
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
}
);
// with authority
write_keypair_file(&authority_keypair, &authority_keypair_file).unwrap();
let test_command = test_commands.clone().get_matches_from(vec![
"test",
"program",
"close",
&buffer_pubkey.to_string(),
"--buffer-authority",
&authority_keypair_file,
]);
assert_eq!(
parse_command(&test_command, &default_signer, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Program(ProgramCliCommand::Close {
account_pubkey: Some(buffer_pubkey),
recipient_pubkey: default_keypair.pubkey(),
authority_index: 1,
use_lamports_unit: false,
}),
signers: vec![
read_keypair_file(&keypair_file).unwrap().into(),
read_keypair_file(&authority_keypair_file).unwrap().into(),
],
}
);
// with recipient
let test_command = test_commands.clone().get_matches_from(vec![
"test",
"program",
"close",
&buffer_pubkey.to_string(),
"--recipient",
&recipient_pubkey.to_string(),
]);
assert_eq!(
parse_command(&test_command, &default_signer, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Program(ProgramCliCommand::Close {
account_pubkey: Some(buffer_pubkey),
recipient_pubkey,
authority_index: 0,
use_lamports_unit: false,
}),
signers: vec![read_keypair_file(&keypair_file).unwrap().into(),],
}
);
// --buffers and lamports
let test_command = test_commands.clone().get_matches_from(vec![
"test",
"program",
"close",
"--buffers",
"--lamports",
]);
assert_eq!(
parse_command(&test_command, &default_signer, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Program(ProgramCliCommand::Close {
account_pubkey: None,
recipient_pubkey: default_keypair.pubkey(),
authority_index: 0,
use_lamports_unit: true,
}),
signers: vec![read_keypair_file(&keypair_file).unwrap().into(),],
}
);
}
#[test]
fn test_cli_keypair_file() {
solana_logger::setup();

View File

@@ -19,14 +19,15 @@ use solana_clap_utils::{
ArgConstant,
};
use solana_cli_output::{
return_signers, CliEpochReward, CliStakeHistory, CliStakeHistoryEntry, CliStakeState,
CliStakeType,
return_signers_with_config, CliEpochReward, CliStakeHistory, CliStakeHistoryEntry,
CliStakeState, CliStakeType, ReturnSignersConfig,
};
use solana_client::{
blockhash_query::BlockhashQuery,
client_error::{ClientError, ClientErrorKind},
nonce_utils,
rpc_client::RpcClient,
rpc_config::RpcConfirmedBlockConfig,
rpc_custom_error,
rpc_request::{self, DELINQUENT_VALIDATOR_SLOT_DISTANCE},
};
@@ -441,6 +442,7 @@ pub fn parse_stake_create_account(
let withdrawer = pubkey_of_signer(matches, WITHDRAW_AUTHORITY_ARG.name, wallet_manager)?;
let amount = SpendAmount::new_from_matches(matches, "amount");
let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name);
let blockhash_query = BlockhashQuery::new_from_matches(matches);
let nonce_account = pubkey_of_signer(matches, NONCE_ARG.name, wallet_manager)?;
let (nonce_authority, nonce_authority_pubkey) =
@@ -470,6 +472,7 @@ pub fn parse_stake_create_account(
},
amount,
sign_only,
dump_transaction_message,
blockhash_query,
nonce_account,
nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(),
@@ -491,6 +494,7 @@ pub fn parse_stake_delegate_stake(
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
let force = matches.is_present("force");
let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name);
let blockhash_query = BlockhashQuery::new_from_matches(matches);
let nonce_account = pubkey_of(matches, NONCE_ARG.name);
let (stake_authority, stake_authority_pubkey) =
@@ -513,6 +517,7 @@ pub fn parse_stake_delegate_stake(
stake_authority: signer_info.index_of(stake_authority_pubkey).unwrap(),
force,
sign_only,
dump_transaction_message,
blockhash_query,
nonce_account,
nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(),
@@ -565,6 +570,7 @@ pub fn parse_stake_authorize(
bulk_signers.push(authority);
};
let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name);
let blockhash_query = BlockhashQuery::new_from_matches(matches);
let nonce_account = pubkey_of(matches, NONCE_ARG.name);
let (nonce_authority, nonce_authority_pubkey) =
@@ -600,6 +606,7 @@ pub fn parse_stake_authorize(
stake_account_pubkey,
new_authorizations,
sign_only,
dump_transaction_message,
blockhash_query,
nonce_account,
nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(),
@@ -623,6 +630,7 @@ pub fn parse_split_stake(
let seed = matches.value_of("seed").map(|s| s.to_string());
let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name);
let blockhash_query = BlockhashQuery::new_from_matches(matches);
let nonce_account = pubkey_of(matches, NONCE_ARG.name);
let (stake_authority, stake_authority_pubkey) =
@@ -643,6 +651,7 @@ pub fn parse_split_stake(
stake_account_pubkey,
stake_authority: signer_info.index_of(stake_authority_pubkey).unwrap(),
sign_only,
dump_transaction_message,
blockhash_query,
nonce_account,
nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(),
@@ -666,6 +675,7 @@ pub fn parse_merge_stake(
let source_stake_account_pubkey = pubkey_of(matches, "source_stake_account_pubkey").unwrap();
let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name);
let blockhash_query = BlockhashQuery::new_from_matches(matches);
let nonce_account = pubkey_of(matches, NONCE_ARG.name);
let (stake_authority, stake_authority_pubkey) =
@@ -687,6 +697,7 @@ pub fn parse_merge_stake(
source_stake_account_pubkey,
stake_authority: signer_info.index_of(stake_authority_pubkey).unwrap(),
sign_only,
dump_transaction_message,
blockhash_query,
nonce_account,
nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(),
@@ -704,6 +715,7 @@ pub fn parse_stake_deactivate_stake(
let stake_account_pubkey =
pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap();
let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name);
let blockhash_query = BlockhashQuery::new_from_matches(matches);
let nonce_account = pubkey_of(matches, NONCE_ARG.name);
let (stake_authority, stake_authority_pubkey) =
@@ -724,6 +736,7 @@ pub fn parse_stake_deactivate_stake(
stake_account_pubkey,
stake_authority: signer_info.index_of(stake_authority_pubkey).unwrap(),
sign_only,
dump_transaction_message,
blockhash_query,
nonce_account,
nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(),
@@ -744,6 +757,7 @@ pub fn parse_stake_withdraw_stake(
pubkey_of_signer(matches, "destination_account_pubkey", wallet_manager)?.unwrap();
let lamports = lamports_of_sol(matches, "amount").unwrap();
let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name);
let blockhash_query = BlockhashQuery::new_from_matches(matches);
let nonce_account = pubkey_of(matches, NONCE_ARG.name);
let (withdraw_authority, withdraw_authority_pubkey) =
@@ -770,6 +784,7 @@ pub fn parse_stake_withdraw_stake(
lamports,
withdraw_authority: signer_info.index_of(withdraw_authority_pubkey).unwrap(),
sign_only,
dump_transaction_message,
blockhash_query,
nonce_account,
nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(),
@@ -792,6 +807,7 @@ pub fn parse_stake_set_lockup(
let new_custodian = pubkey_of_signer(matches, "new_custodian", wallet_manager)?;
let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name);
let blockhash_query = BlockhashQuery::new_from_matches(matches);
let nonce_account = pubkey_of(matches, NONCE_ARG.name);
@@ -817,6 +833,7 @@ pub fn parse_stake_set_lockup(
},
custodian: signer_info.index_of(custodian_pubkey).unwrap(),
sign_only,
dump_transaction_message,
blockhash_query,
nonce_account,
nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(),
@@ -861,6 +878,7 @@ pub fn process_create_stake_account(
lockup: &Lockup,
amount: SpendAmount,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: &BlockhashQuery,
nonce_account: Option<&Pubkey>,
nonce_authority: SignerIndex,
@@ -970,7 +988,13 @@ pub fn process_create_stake_account(
let mut tx = Transaction::new_unsigned(message);
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx, &config.output_format)
return_signers_with_config(
&tx,
&config.output_format,
&ReturnSignersConfig {
dump_transaction_message,
},
)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
@@ -986,6 +1010,7 @@ pub fn process_stake_authorize(
new_authorizations: &[(StakeAuthorize, Pubkey, SignerIndex)],
custodian: Option<SignerIndex>,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: &BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
@@ -1028,7 +1053,13 @@ pub fn process_stake_authorize(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx, &config.output_format)
return_signers_with_config(
&tx,
&config.output_format,
&ReturnSignersConfig {
dump_transaction_message,
},
)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -1058,6 +1089,7 @@ pub fn process_deactivate_stake_account(
stake_account_pubkey: &Pubkey,
stake_authority: SignerIndex,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: &BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
@@ -1087,7 +1119,13 @@ pub fn process_deactivate_stake_account(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx, &config.output_format)
return_signers_with_config(
&tx,
&config.output_format,
&ReturnSignersConfig {
dump_transaction_message,
},
)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -1120,6 +1158,7 @@ pub fn process_withdraw_stake(
withdraw_authority: SignerIndex,
custodian: Option<SignerIndex>,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: &BlockhashQuery,
nonce_account: Option<&Pubkey>,
nonce_authority: SignerIndex,
@@ -1155,7 +1194,13 @@ pub fn process_withdraw_stake(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx, &config.output_format)
return_signers_with_config(
&tx,
&config.output_format,
&ReturnSignersConfig {
dump_transaction_message,
},
)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -1185,6 +1230,7 @@ pub fn process_split_stake(
stake_account_pubkey: &Pubkey,
stake_authority: SignerIndex,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: &BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
@@ -1294,7 +1340,13 @@ pub fn process_split_stake(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx, &config.output_format)
return_signers_with_config(
&tx,
&config.output_format,
&ReturnSignersConfig {
dump_transaction_message,
},
)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -1325,6 +1377,7 @@ pub fn process_merge_stake(
source_stake_account_pubkey: &Pubkey,
stake_authority: SignerIndex,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: &BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
@@ -1392,7 +1445,13 @@ pub fn process_merge_stake(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx, &config.output_format)
return_signers_with_config(
&tx,
&config.output_format,
&ReturnSignersConfig {
dump_transaction_message,
},
)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -1427,6 +1486,7 @@ pub fn process_stake_set_lockup(
lockup: &mut LockupArgs,
custodian: SignerIndex,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: &BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
@@ -1458,7 +1518,13 @@ pub fn process_stake_set_lockup(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx, &config.output_format)
return_signers_with_config(
&tx,
&config.output_format,
&ReturnSignersConfig {
dump_transaction_message,
},
)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -1609,9 +1675,9 @@ pub(crate) fn fetch_epoch_rewards(
.get(0)
.ok_or_else(|| format!("Unable to fetch first confirmed block for epoch {}", epoch))?;
let first_confirmed_block = match rpc_client.get_confirmed_block_with_encoding(
let first_confirmed_block = match rpc_client.get_confirmed_block_with_config(
first_confirmed_block_in_epoch,
solana_transaction_status::UiTransactionEncoding::Base64,
RpcConfirmedBlockConfig::rewards_only(),
) {
Ok(first_confirmed_block) => first_confirmed_block,
Err(ClientError {
@@ -1637,7 +1703,7 @@ pub(crate) fn fetch_epoch_rewards(
};
// Rewards for the previous epoch are found in the first confirmed block of the current epoch
let previous_epoch_rewards = first_confirmed_block.rewards;
let previous_epoch_rewards = first_confirmed_block.rewards.unwrap_or_default();
if let Some((effective_slot, epoch_end_time, epoch_rewards)) = epoch_info {
let wallclock_epoch_duration = if epoch_end_time > epoch_start_time {
@@ -1772,6 +1838,7 @@ pub fn process_delegate_stake(
stake_authority: SignerIndex,
force: bool,
sign_only: bool,
dump_transaction_message: bool,
blockhash_query: &BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
@@ -1856,7 +1923,13 @@ pub fn process_delegate_stake(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx, &config.output_format)
return_signers_with_config(
&tx,
&config.output_format,
&ReturnSignersConfig {
dump_transaction_message,
},
)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -1953,6 +2026,7 @@ mod tests {
(StakeAuthorize::Withdrawer, new_withdraw_authority, 0,),
],
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@@ -1988,6 +2062,7 @@ mod tests {
(StakeAuthorize::Withdrawer, new_withdraw_authority, 2,),
],
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@@ -2027,6 +2102,7 @@ mod tests {
(StakeAuthorize::Withdrawer, new_withdraw_authority, 1,),
],
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@@ -2055,6 +2131,7 @@ mod tests {
stake_account_pubkey,
new_authorizations: vec![(StakeAuthorize::Staker, new_stake_authority, 0,),],
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@@ -2080,6 +2157,7 @@ mod tests {
stake_account_pubkey,
new_authorizations: vec![(StakeAuthorize::Staker, new_stake_authority, 1,),],
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@@ -2111,6 +2189,7 @@ mod tests {
stake_account_pubkey,
new_authorizations: vec![(StakeAuthorize::Staker, new_stake_authority, 1,),],
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@@ -2143,6 +2222,7 @@ mod tests {
0,
),],
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@@ -2172,6 +2252,7 @@ mod tests {
1,
),],
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@@ -2207,6 +2288,7 @@ mod tests {
stake_account_pubkey,
new_authorizations: vec![(StakeAuthorize::Staker, stake_account_pubkey, 0)],
sign_only: true,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
@@ -2241,6 +2323,7 @@ mod tests {
stake_account_pubkey,
new_authorizations: vec![(StakeAuthorize::Staker, stake_account_pubkey, 0)],
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::Cluster,
blockhash
@@ -2288,6 +2371,7 @@ mod tests {
stake_account_pubkey,
new_authorizations: vec![(StakeAuthorize::Staker, stake_account_pubkey, 0)],
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_account),
blockhash
@@ -2321,6 +2405,7 @@ mod tests {
stake_account_pubkey,
new_authorizations: vec![(StakeAuthorize::Staker, stake_account_pubkey, 0)],
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::Cluster,
blockhash
@@ -2359,6 +2444,7 @@ mod tests {
stake_account_pubkey,
new_authorizations: vec![(StakeAuthorize::Staker, stake_account_pubkey, 0)],
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_account_pubkey),
blockhash
@@ -2396,6 +2482,7 @@ mod tests {
stake_account_pubkey,
new_authorizations: vec![(StakeAuthorize::Staker, stake_account_pubkey, 0)],
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@@ -2431,6 +2518,7 @@ mod tests {
stake_account_pubkey,
new_authorizations: vec![(StakeAuthorize::Staker, stake_account_pubkey, 0)],
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::Cluster,
blockhash
@@ -2481,6 +2569,7 @@ mod tests {
},
amount: SpendAmount::Some(50_000_000_000),
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@@ -2518,6 +2607,7 @@ mod tests {
lockup: Lockup::default(),
amount: SpendAmount::Some(50_000_000_000),
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@@ -2571,6 +2661,7 @@ mod tests {
lockup: Lockup::default(),
amount: SpendAmount::Some(50_000_000_000),
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_account),
nonce_hash
@@ -2605,6 +2696,7 @@ mod tests {
stake_authority: 0,
force: false,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: 0,
@@ -2634,6 +2726,7 @@ mod tests {
stake_authority: 1,
force: false,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: 0,
@@ -2665,6 +2758,7 @@ mod tests {
stake_authority: 0,
force: true,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: 0,
@@ -2694,6 +2788,7 @@ mod tests {
stake_authority: 0,
force: false,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::Cluster,
blockhash
@@ -2724,6 +2819,7 @@ mod tests {
stake_authority: 0,
force: false,
sign_only: true,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
@@ -2758,6 +2854,7 @@ mod tests {
stake_authority: 0,
force: false,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::Cluster,
blockhash
@@ -2804,6 +2901,7 @@ mod tests {
stake_authority: 0,
force: false,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_account),
blockhash
@@ -2841,6 +2939,7 @@ mod tests {
stake_authority: 0,
force: false,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@@ -2872,6 +2971,7 @@ mod tests {
withdraw_authority: 0,
custodian: None,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@@ -2902,6 +3002,7 @@ mod tests {
withdraw_authority: 1,
custodian: None,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@@ -2937,6 +3038,7 @@ mod tests {
withdraw_authority: 0,
custodian: Some(1),
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@@ -2980,6 +3082,7 @@ mod tests {
withdraw_authority: 0,
custodian: None,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_account),
nonce_hash
@@ -3010,6 +3113,7 @@ mod tests {
stake_account_pubkey,
stake_authority: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: 0,
@@ -3034,6 +3138,7 @@ mod tests {
stake_account_pubkey,
stake_authority: 1,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: 0,
@@ -3065,6 +3170,7 @@ mod tests {
stake_account_pubkey,
stake_authority: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::Cluster,
blockhash
@@ -3092,6 +3198,7 @@ mod tests {
stake_account_pubkey,
stake_authority: 0,
sign_only: true,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
@@ -3123,6 +3230,7 @@ mod tests {
stake_account_pubkey,
stake_authority: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::Cluster,
blockhash
@@ -3166,6 +3274,7 @@ mod tests {
stake_account_pubkey,
stake_authority: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_account),
blockhash
@@ -3197,6 +3306,7 @@ mod tests {
stake_account_pubkey,
stake_authority: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@@ -3231,6 +3341,7 @@ mod tests {
stake_account_pubkey: stake_account_keypair.pubkey(),
stake_authority: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: 0,
@@ -3292,6 +3403,7 @@ mod tests {
stake_account_pubkey: stake_account_keypair.pubkey(),
stake_authority: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_account),
nonce_hash
@@ -3333,6 +3445,7 @@ mod tests {
source_stake_account_pubkey,
stake_authority: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: 0,

View File

@@ -293,6 +293,8 @@ fn test_create_account_with_seed() {
to: to_address,
from: 0,
sign_only: true,
dump_transaction_message: true,
allow_unfunded_recipient: true,
no_wait: false,
blockhash_query: BlockhashQuery::None(nonce_hash),
nonce_account: Some(nonce_address),
@@ -316,6 +318,8 @@ fn test_create_account_with_seed() {
to: to_address,
from: 0,
sign_only: false,
dump_transaction_message: true,
allow_unfunded_recipient: true,
no_wait: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_address),

View File

@@ -442,6 +442,9 @@ fn test_cli_program_deploy_with_authority() {
config.signers = vec![&keypair];
config.command = CliCommand::Program(ProgramCliCommand::Show {
account_pubkey: Some(program_pubkey),
authority_pubkey: keypair.pubkey(),
all: false,
use_lamports_unit: false,
});
let response = process_command(&config);
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
@@ -530,6 +533,9 @@ fn test_cli_program_deploy_with_authority() {
config.signers = vec![&keypair];
config.command = CliCommand::Program(ProgramCliCommand::Show {
account_pubkey: Some(program_pubkey),
authority_pubkey: keypair.pubkey(),
all: false,
use_lamports_unit: false,
});
let response = process_command(&config);
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
@@ -657,9 +663,12 @@ fn test_cli_program_write_buffer() {
);
// Get buffer authority
config.signers = vec![&keypair];
config.signers = vec![];
config.command = CliCommand::Program(ProgramCliCommand::Show {
account_pubkey: Some(buffer_keypair.pubkey()),
authority_pubkey: keypair.pubkey(),
all: false,
use_lamports_unit: false,
});
let response = process_command(&config);
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
@@ -747,9 +756,12 @@ fn test_cli_program_write_buffer() {
);
// Get buffer authority
config.signers = vec![&keypair];
config.signers = vec![];
config.command = CliCommand::Program(ProgramCliCommand::Show {
account_pubkey: Some(buffer_pubkey),
authority_pubkey: keypair.pubkey(),
all: false,
use_lamports_unit: false,
});
let response = process_command(&config);
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
@@ -764,6 +776,60 @@ fn test_cli_program_write_buffer() {
authority_keypair.pubkey(),
Pubkey::from_str(&authority_pubkey_str).unwrap()
);
// Close buffer
let close_account = rpc_client.get_account(&buffer_pubkey).unwrap();
assert_eq!(minimum_balance_for_buffer, close_account.lamports);
let recipient_pubkey = Pubkey::new_unique();
config.signers = vec![&keypair, &authority_keypair];
config.command = CliCommand::Program(ProgramCliCommand::Close {
account_pubkey: Some(buffer_pubkey),
recipient_pubkey,
authority_index: 1,
use_lamports_unit: false,
});
process_command(&config).unwrap();
rpc_client.get_account(&buffer_pubkey).unwrap_err();
let recipient_account = rpc_client.get_account(&recipient_pubkey).unwrap();
assert_eq!(minimum_balance_for_buffer, recipient_account.lamports);
// Write a buffer with default params
config.signers = vec![&keypair];
config.command = CliCommand::Program(ProgramCliCommand::WriteBuffer {
program_location: pathbuf.to_str().unwrap().to_string(),
buffer_signer_index: None,
buffer_pubkey: None,
buffer_authority_signer_index: None,
max_len: None,
});
config.output_format = OutputFormat::JsonCompact;
let response = process_command(&config);
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
let buffer_pubkey_str = json
.as_object()
.unwrap()
.get("buffer")
.unwrap()
.as_str()
.unwrap();
let new_buffer_pubkey = Pubkey::from_str(&buffer_pubkey_str).unwrap();
// Close buffers and deposit default keypair
let pre_lamports = rpc_client.get_account(&keypair.pubkey()).unwrap().lamports;
config.signers = vec![&keypair];
config.command = CliCommand::Program(ProgramCliCommand::Close {
account_pubkey: Some(new_buffer_pubkey),
recipient_pubkey: keypair.pubkey(),
authority_index: 0,
use_lamports_unit: false,
});
process_command(&config).unwrap();
rpc_client.get_account(&new_buffer_pubkey).unwrap_err();
let recipient_account = rpc_client.get_account(&keypair.pubkey()).unwrap();
assert_eq!(
pre_lamports + minimum_balance_for_buffer,
recipient_account.lamports
);
}
#[test]
@@ -1029,6 +1095,9 @@ fn test_cli_program_show() {
config.signers = vec![&keypair];
config.command = CliCommand::Program(ProgramCliCommand::Show {
account_pubkey: Some(buffer_keypair.pubkey()),
authority_pubkey: keypair.pubkey(),
all: false,
use_lamports_unit: false,
});
let response = process_command(&config);
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
@@ -1086,6 +1155,9 @@ fn test_cli_program_show() {
config.signers = vec![&keypair];
config.command = CliCommand::Program(ProgramCliCommand::Show {
account_pubkey: Some(program_keypair.pubkey()),
authority_pubkey: keypair.pubkey(),
all: false,
use_lamports_unit: false,
});
let response = process_command(&config);
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();

View File

@@ -70,6 +70,7 @@ fn test_stake_delegation_force() {
lockup: Lockup::default(),
amount: SpendAmount::Some(50_000),
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@@ -86,6 +87,7 @@ fn test_stake_delegation_force() {
stake_authority: 0,
force: false,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: 0,
@@ -100,6 +102,7 @@ fn test_stake_delegation_force() {
stake_authority: 0,
force: true,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: 0,
@@ -151,6 +154,7 @@ fn test_seed_stake_delegation_and_deactivation() {
lockup: Lockup::default(),
amount: SpendAmount::Some(50_000),
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@@ -166,6 +170,7 @@ fn test_seed_stake_delegation_and_deactivation() {
stake_authority: 0,
force: true,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: 0,
@@ -178,6 +183,7 @@ fn test_seed_stake_delegation_and_deactivation() {
stake_account_pubkey: stake_address,
stake_authority: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: 0,
@@ -224,6 +230,7 @@ fn test_stake_delegation_and_deactivation() {
lockup: Lockup::default(),
amount: SpendAmount::Some(50_000),
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@@ -240,6 +247,7 @@ fn test_stake_delegation_and_deactivation() {
stake_authority: 0,
force: true,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: 0,
@@ -252,6 +260,7 @@ fn test_stake_delegation_and_deactivation() {
stake_account_pubkey: stake_keypair.pubkey(),
stake_authority: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: 0,
@@ -319,6 +328,7 @@ fn test_offline_stake_delegation_and_deactivation() {
lockup: Lockup::default(),
amount: SpendAmount::Some(50_000),
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@@ -335,6 +345,7 @@ fn test_offline_stake_delegation_and_deactivation() {
stake_authority: 0,
force: true,
sign_only: true,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
@@ -354,6 +365,7 @@ fn test_offline_stake_delegation_and_deactivation() {
stake_authority: 0,
force: true,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
nonce_account: None,
nonce_authority: 0,
@@ -367,6 +379,7 @@ fn test_offline_stake_delegation_and_deactivation() {
stake_account_pubkey: stake_keypair.pubkey(),
stake_authority: 0,
sign_only: true,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
@@ -383,6 +396,7 @@ fn test_offline_stake_delegation_and_deactivation() {
stake_account_pubkey: stake_keypair.pubkey(),
stake_authority: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
nonce_account: None,
nonce_authority: 0,
@@ -431,6 +445,7 @@ fn test_nonced_stake_delegation_and_deactivation() {
lockup: Lockup::default(),
amount: SpendAmount::Some(50_000),
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@@ -468,6 +483,7 @@ fn test_nonced_stake_delegation_and_deactivation() {
stake_authority: 0,
force: true,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_account.pubkey()),
nonce_hash,
@@ -493,6 +509,7 @@ fn test_nonced_stake_delegation_and_deactivation() {
stake_account_pubkey: stake_keypair.pubkey(),
stake_authority: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_account.pubkey()),
nonce_hash,
@@ -559,6 +576,7 @@ fn test_stake_authorize() {
lockup: Lockup::default(),
amount: SpendAmount::Some(50_000),
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@@ -575,6 +593,7 @@ fn test_stake_authorize() {
stake_account_pubkey,
new_authorizations: vec![(StakeAuthorize::Staker, online_authority_pubkey, 0)],
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: 0,
@@ -603,6 +622,7 @@ fn test_stake_authorize() {
(StakeAuthorize::Withdrawer, withdraw_authority_pubkey, 0),
],
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: 0,
@@ -626,6 +646,7 @@ fn test_stake_authorize() {
stake_account_pubkey,
new_authorizations: vec![(StakeAuthorize::Staker, offline_authority_pubkey, 1)],
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: 0,
@@ -649,6 +670,7 @@ fn test_stake_authorize() {
stake_account_pubkey,
new_authorizations: vec![(StakeAuthorize::Staker, nonced_authority_pubkey, 0)],
sign_only: true,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
@@ -665,6 +687,7 @@ fn test_stake_authorize() {
stake_account_pubkey,
new_authorizations: vec![(StakeAuthorize::Staker, nonced_authority_pubkey, 0)],
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
nonce_account: None,
nonce_authority: 0,
@@ -712,6 +735,7 @@ fn test_stake_authorize() {
stake_account_pubkey,
new_authorizations: vec![(StakeAuthorize::Staker, online_authority_pubkey, 1)],
sign_only: true,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::None(nonce_hash),
nonce_account: Some(nonce_account.pubkey()),
nonce_authority: 0,
@@ -729,6 +753,7 @@ fn test_stake_authorize() {
stake_account_pubkey,
new_authorizations: vec![(StakeAuthorize::Staker, online_authority_pubkey, 1)],
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_account.pubkey()),
sign_only.blockhash,
@@ -817,6 +842,7 @@ fn test_stake_authorize_with_fee_payer() {
lockup: Lockup::default(),
amount: SpendAmount::Some(50_000),
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@@ -833,6 +859,7 @@ fn test_stake_authorize_with_fee_payer() {
stake_account_pubkey,
new_authorizations: vec![(StakeAuthorize::Staker, offline_pubkey, 0)],
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@@ -852,6 +879,7 @@ fn test_stake_authorize_with_fee_payer() {
stake_account_pubkey,
new_authorizations: vec![(StakeAuthorize::Staker, payer_pubkey, 0)],
sign_only: true,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
@@ -868,6 +896,7 @@ fn test_stake_authorize_with_fee_payer() {
stake_account_pubkey,
new_authorizations: vec![(StakeAuthorize::Staker, payer_pubkey, 0)],
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
nonce_account: None,
nonce_authority: 0,
@@ -936,6 +965,7 @@ fn test_stake_split() {
lockup: Lockup::default(),
amount: SpendAmount::Some(10 * minimum_stake_balance),
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@@ -982,6 +1012,7 @@ fn test_stake_split() {
stake_account_pubkey,
stake_authority: 0,
sign_only: true,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::None(nonce_hash),
nonce_account: Some(nonce_account.pubkey()),
nonce_authority: 0,
@@ -1000,6 +1031,7 @@ fn test_stake_split() {
stake_account_pubkey,
stake_authority: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_account.pubkey()),
sign_only.blockhash,
@@ -1085,6 +1117,7 @@ fn test_stake_set_lockup() {
lockup,
amount: SpendAmount::Some(10 * minimum_stake_balance),
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@@ -1110,6 +1143,7 @@ fn test_stake_set_lockup() {
lockup,
custodian: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: 0,
@@ -1143,6 +1177,7 @@ fn test_stake_set_lockup() {
lockup,
custodian: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: 0,
@@ -1161,6 +1196,7 @@ fn test_stake_set_lockup() {
lockup,
custodian: 1,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: 0,
@@ -1191,6 +1227,7 @@ fn test_stake_set_lockup() {
lockup,
custodian: 1,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: 0,
@@ -1235,6 +1272,7 @@ fn test_stake_set_lockup() {
lockup,
custodian: 0,
sign_only: true,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::None(nonce_hash),
nonce_account: Some(nonce_account_pubkey),
nonce_authority: 0,
@@ -1251,6 +1289,7 @@ fn test_stake_set_lockup() {
lockup,
custodian: 0,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_account_pubkey),
sign_only.blockhash,
@@ -1349,6 +1388,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
lockup: Lockup::default(),
amount: SpendAmount::Some(50_000),
sign_only: true,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::None(nonce_hash),
nonce_account: Some(nonce_pubkey),
nonce_authority: 0,
@@ -1370,6 +1410,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
lockup: Lockup::default(),
amount: SpendAmount::Some(50_000),
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_pubkey),
sign_only.blockhash,
@@ -1403,6 +1444,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
withdraw_authority: 0,
custodian: None,
sign_only: true,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::None(nonce_hash),
nonce_account: Some(nonce_pubkey),
nonce_authority: 0,
@@ -1419,6 +1461,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
withdraw_authority: 0,
custodian: None,
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_pubkey),
sign_only.blockhash,
@@ -1451,6 +1494,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
lockup: Lockup::default(),
amount: SpendAmount::Some(50_000),
sign_only: true,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::None(nonce_hash),
nonce_account: Some(nonce_pubkey),
nonce_authority: 0,
@@ -1470,6 +1514,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
lockup: Lockup::default(),
amount: SpendAmount::Some(50_000),
sign_only: false,
dump_transaction_message: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_pubkey),
sign_only.blockhash,

View File

@@ -51,6 +51,8 @@ fn test_transfer() {
to: recipient_pubkey,
from: 0,
sign_only: false,
dump_transaction_message: false,
allow_unfunded_recipient: true,
no_wait: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
@@ -69,6 +71,8 @@ fn test_transfer() {
to: recipient_pubkey,
from: 0,
sign_only: false,
dump_transaction_message: false,
allow_unfunded_recipient: true,
no_wait: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
@@ -99,6 +103,8 @@ fn test_transfer() {
to: recipient_pubkey,
from: 0,
sign_only: true,
dump_transaction_message: false,
allow_unfunded_recipient: true,
no_wait: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
@@ -118,6 +124,8 @@ fn test_transfer() {
to: recipient_pubkey,
from: 0,
sign_only: false,
dump_transaction_message: false,
allow_unfunded_recipient: true,
no_wait: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
nonce_account: None,
@@ -162,6 +170,8 @@ fn test_transfer() {
to: recipient_pubkey,
from: 0,
sign_only: false,
dump_transaction_message: false,
allow_unfunded_recipient: true,
no_wait: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_account.pubkey()),
@@ -213,6 +223,8 @@ fn test_transfer() {
to: recipient_pubkey,
from: 0,
sign_only: true,
dump_transaction_message: false,
allow_unfunded_recipient: true,
no_wait: false,
blockhash_query: BlockhashQuery::None(nonce_hash),
nonce_account: Some(nonce_account.pubkey()),
@@ -231,6 +243,8 @@ fn test_transfer() {
to: recipient_pubkey,
from: 0,
sign_only: false,
dump_transaction_message: false,
allow_unfunded_recipient: true,
no_wait: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_account.pubkey()),
@@ -299,6 +313,8 @@ fn test_transfer_multisession_signing() {
to: to_pubkey,
from: 1,
sign_only: true,
dump_transaction_message: false,
allow_unfunded_recipient: true,
no_wait: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
@@ -327,6 +343,8 @@ fn test_transfer_multisession_signing() {
to: to_pubkey,
from: 1,
sign_only: true,
dump_transaction_message: false,
allow_unfunded_recipient: true,
no_wait: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
@@ -352,6 +370,8 @@ fn test_transfer_multisession_signing() {
to: to_pubkey,
from: 1,
sign_only: false,
dump_transaction_message: false,
allow_unfunded_recipient: true,
no_wait: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
nonce_account: None,
@@ -399,6 +419,8 @@ fn test_transfer_all() {
to: recipient_pubkey,
from: 0,
sign_only: false,
dump_transaction_message: false,
allow_unfunded_recipient: true,
no_wait: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
@@ -412,6 +434,53 @@ fn test_transfer_all() {
check_recent_balance(49_999, &rpc_client, &recipient_pubkey);
}
#[test]
fn test_transfer_unfunded_recipient() {
solana_logger::setup();
let mint_keypair = Keypair::new();
let test_validator = TestValidator::with_custom_fees(mint_keypair.pubkey(), 1);
let faucet_addr = run_local_faucet(mint_keypair, None);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
let default_signer = Keypair::new();
let mut config = CliConfig::recent_for_tests();
config.json_rpc_url = test_validator.rpc_url();
config.signers = vec![&default_signer];
let sender_pubkey = config.signers[0].pubkey();
let recipient_pubkey = Pubkey::new(&[1u8; 32]);
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &sender_pubkey, 50_000, &config)
.unwrap();
check_recent_balance(50_000, &rpc_client, &sender_pubkey);
check_recent_balance(0, &rpc_client, &recipient_pubkey);
check_ready(&rpc_client);
// Plain ole transfer
config.command = CliCommand::Transfer {
amount: SpendAmount::All,
to: recipient_pubkey,
from: 0,
sign_only: false,
dump_transaction_message: false,
allow_unfunded_recipient: false,
no_wait: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
fee_payer: 0,
derived_address_seed: None,
derived_address_program_id: None,
};
// Expect failure due to unfunded recipient and the lack of the `allow_unfunded_recipient` flag
process_command(&config).unwrap_err();
}
#[test]
fn test_transfer_with_seed() {
solana_logger::setup();
@@ -454,6 +523,8 @@ fn test_transfer_with_seed() {
to: recipient_pubkey,
from: 0,
sign_only: false,
dump_transaction_message: false,
allow_unfunded_recipient: true,
no_wait: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,

View File

@@ -71,6 +71,8 @@ fn test_vote_authorize_and_withdraw() {
to: vote_account_pubkey,
from: 0,
sign_only: false,
dump_transaction_message: false,
allow_unfunded_recipient: true,
no_wait: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-client"
version = "1.6.0"
version = "1.6.2"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -19,18 +19,18 @@ jsonrpc-core = "17.0.0"
log = "0.4.11"
net2 = "0.2.37"
rayon = "1.5.0"
reqwest = { version = "0.10.8", default-features = false, features = ["blocking", "rustls-tls", "json"] }
reqwest = { version = "0.11.2", default-features = false, features = ["blocking", "rustls-tls", "json"] }
semver = "0.11.0"
serde = "1.0.122"
serde_derive = "1.0.103"
serde_json = "1.0.56"
solana-account-decoder = { path = "../account-decoder", version = "1.6.0" }
solana-clap-utils = { path = "../clap-utils", version = "1.6.0" }
solana-net-utils = { path = "../net-utils", version = "1.6.0" }
solana-sdk = { path = "../sdk", version = "1.6.0" }
solana-transaction-status = { path = "../transaction-status", version = "1.6.0" }
solana-version = { path = "../version", version = "1.6.0" }
solana-vote-program = { path = "../programs/vote", version = "1.6.0" }
solana-account-decoder = { path = "../account-decoder", version = "=1.6.2" }
solana-clap-utils = { path = "../clap-utils", version = "=1.6.2" }
solana-net-utils = { path = "../net-utils", version = "=1.6.2" }
solana-sdk = { path = "../sdk", version = "=1.6.2" }
solana-transaction-status = { path = "../transaction-status", version = "=1.6.2" }
solana-version = { path = "../version", version = "=1.6.2" }
solana-vote-program = { path = "../programs/vote", version = "=1.6.2" }
thiserror = "1.0"
tungstenite = "0.10.1"
url = "2.1.1"
@@ -38,7 +38,7 @@ url = "2.1.1"
[dev-dependencies]
assert_matches = "1.3.0"
jsonrpc-http-server = "17.0.0"
solana-logger = { path = "../logger", version = "1.6.0" }
solana-logger = { path = "../logger", version = "=1.6.2" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,12 +1,15 @@
use crate::{nonce_utils, rpc_client::RpcClient};
use clap::ArgMatches;
use solana_clap_utils::{
input_parsers::{pubkey_of, value_of},
nonce::*,
offline::*,
};
use solana_sdk::{
commitment_config::CommitmentConfig, fee_calculator::FeeCalculator, hash::Hash, pubkey::Pubkey,
use {
crate::{nonce_utils, rpc_client::RpcClient},
clap::ArgMatches,
solana_clap_utils::{
input_parsers::{pubkey_of, value_of},
nonce::*,
offline::*,
},
solana_sdk::{
commitment_config::CommitmentConfig, fee_calculator::FeeCalculator, hash::Hash,
pubkey::Pubkey,
},
};
#[derive(Debug, PartialEq)]

View File

@@ -1,9 +1,11 @@
use crate::rpc_request;
use solana_sdk::{
signature::SignerError, transaction::TransactionError, transport::TransportError,
use {
crate::rpc_request,
solana_sdk::{
signature::SignerError, transaction::TransactionError, transport::TransportError,
},
std::io,
thiserror::Error,
};
use std::io;
use thiserror::Error;
pub use reqwest; // export `reqwest` for clients

View File

@@ -1,13 +1,15 @@
use crate::{
client_error::Result,
rpc_custom_error,
rpc_request::{RpcError, RpcRequest, RpcResponseErrorData},
rpc_response::RpcSimulateTransactionResult,
rpc_sender::RpcSender,
use {
crate::{
client_error::Result,
rpc_custom_error,
rpc_request::{RpcError, RpcRequest, RpcResponseErrorData},
rpc_response::RpcSimulateTransactionResult,
rpc_sender::RpcSender,
},
log::*,
reqwest::{self, header::CONTENT_TYPE, StatusCode},
std::{thread::sleep, time::Duration},
};
use log::*;
use reqwest::{self, header::CONTENT_TYPE, StatusCode};
use std::{thread::sleep, time::Duration};
pub struct HttpSender {
client: reqwest::blocking::Client,

View File

@@ -1,20 +1,22 @@
use crate::{
client_error::Result,
rpc_request::RpcRequest,
rpc_response::{Response, RpcResponseContext, RpcVersionInfo},
rpc_sender::RpcSender,
use {
crate::{
client_error::Result,
rpc_request::RpcRequest,
rpc_response::{Response, RpcResponseContext, RpcVersionInfo},
rpc_sender::RpcSender,
},
serde_json::{json, Number, Value},
solana_sdk::{
epoch_info::EpochInfo,
fee_calculator::{FeeCalculator, FeeRateGovernor},
instruction::InstructionError,
signature::Signature,
transaction::{self, Transaction, TransactionError},
},
solana_transaction_status::{TransactionConfirmationStatus, TransactionStatus},
solana_version::Version,
std::{collections::HashMap, sync::RwLock},
};
use serde_json::{json, Number, Value};
use solana_sdk::{
epoch_info::EpochInfo,
fee_calculator::{FeeCalculator, FeeRateGovernor},
instruction::InstructionError,
signature::Signature,
transaction::{self, Transaction, TransactionError},
};
use solana_transaction_status::{TransactionConfirmationStatus, TransactionStatus};
use solana_version::Version;
use std::{collections::HashMap, sync::RwLock};
pub const PUBKEY: &str = "7RoSF9fUmdphVCpabEoefH81WwrW7orsWonXWqTXkKV8";
pub const SIGNATURE: &str =

View File

@@ -1,14 +1,16 @@
use crate::rpc_client::RpcClient;
use solana_sdk::{
account::{Account, ReadableAccount},
account_utils::StateMut,
commitment_config::CommitmentConfig,
nonce::{
state::{Data, Versions},
State,
use {
crate::rpc_client::RpcClient,
solana_sdk::{
account::{Account, ReadableAccount},
account_utils::StateMut,
commitment_config::CommitmentConfig,
nonce::{
state::{Data, Versions},
State,
},
pubkey::Pubkey,
system_program,
},
pubkey::Pubkey,
system_program,
};
#[derive(Debug, thiserror::Error, PartialEq)]

View File

@@ -1,12 +1,14 @@
use log::*;
use solana_sdk::{client::Client, commitment_config::CommitmentConfig, timing::duration_as_s};
use std::{
sync::{
atomic::{AtomicBool, Ordering},
Arc, RwLock,
use {
log::*,
solana_sdk::{client::Client, commitment_config::CommitmentConfig, timing::duration_as_s},
std::{
sync::{
atomic::{AtomicBool, Ordering},
Arc, RwLock,
},
thread::sleep,
time::{Duration, Instant},
},
thread::sleep,
time::{Duration, Instant},
};
#[derive(Default)]

View File

@@ -1,27 +1,31 @@
use crate::{
rpc_config::{RpcSignatureSubscribeConfig, RpcTransactionLogsConfig, RpcTransactionLogsFilter},
rpc_response::{Response as RpcResponse, RpcLogsResponse, RpcSignatureResult, SlotInfo},
};
use log::*;
use serde::de::DeserializeOwned;
use serde_json::{
json,
value::Value::{Number, Object},
Map, Value,
};
use solana_sdk::signature::Signature;
use std::{
marker::PhantomData,
sync::{
atomic::{AtomicBool, Ordering},
mpsc::{channel, Receiver},
Arc, RwLock,
use {
crate::{
rpc_config::{
RpcSignatureSubscribeConfig, RpcTransactionLogsConfig, RpcTransactionLogsFilter,
},
rpc_response::{Response as RpcResponse, RpcLogsResponse, RpcSignatureResult, SlotInfo},
},
thread::JoinHandle,
log::*,
serde::de::DeserializeOwned,
serde_json::{
json,
value::Value::{Number, Object},
Map, Value,
},
solana_sdk::signature::Signature,
std::{
marker::PhantomData,
sync::{
atomic::{AtomicBool, Ordering},
mpsc::{channel, Receiver},
Arc, RwLock,
},
thread::JoinHandle,
},
thiserror::Error,
tungstenite::{client::AutoStream, connect, Message, WebSocket},
url::{ParseError, Url},
};
use thiserror::Error;
use tungstenite::{client::AutoStream, connect, Message, WebSocket};
use url::{ParseError, Url};
#[derive(Debug, Error)]
pub enum PubsubClientError {

View File

@@ -1,7 +1,9 @@
use crate::{rpc_config::RpcLargestAccountsFilter, rpc_response::RpcAccountBalance};
use std::{
collections::HashMap,
time::{Duration, SystemTime},
use {
crate::{rpc_config::RpcLargestAccountsFilter, rpc_response::RpcAccountBalance},
std::{
collections::HashMap,
time::{Duration, SystemTime},
},
};
#[derive(Debug, Clone)]

View File

@@ -1,47 +1,52 @@
use crate::{
client_error::{ClientError, ClientErrorKind, Result as ClientResult},
http_sender::HttpSender,
mock_sender::{MockSender, Mocks},
rpc_config::RpcAccountInfoConfig,
rpc_config::{
RpcGetConfirmedSignaturesForAddress2Config, RpcLargestAccountsConfig,
RpcProgramAccountsConfig, RpcSendTransactionConfig, RpcSimulateTransactionConfig,
RpcTokenAccountsFilter,
use {
crate::{
client_error::{ClientError, ClientErrorKind, Result as ClientResult},
http_sender::HttpSender,
mock_sender::{MockSender, Mocks},
rpc_config::RpcAccountInfoConfig,
rpc_config::{
RpcConfirmedBlockConfig, RpcConfirmedTransactionConfig,
RpcGetConfirmedSignaturesForAddress2Config, RpcLargestAccountsConfig,
RpcProgramAccountsConfig, RpcSendTransactionConfig, RpcSimulateTransactionConfig,
RpcStakeConfig, RpcTokenAccountsFilter,
},
rpc_request::{RpcError, RpcRequest, RpcResponseErrorData, TokenAccountsFilter},
rpc_response::*,
rpc_sender::RpcSender,
},
bincode::serialize,
indicatif::{ProgressBar, ProgressStyle},
log::*,
serde_json::{json, Value},
solana_account_decoder::{
parse_token::{TokenAccountType, UiTokenAccount, UiTokenAmount},
UiAccount, UiAccountData, UiAccountEncoding,
},
solana_sdk::{
account::Account,
clock::{Epoch, Slot, UnixTimestamp, DEFAULT_MS_PER_SLOT, MAX_HASH_AGE_IN_SECONDS},
commitment_config::{CommitmentConfig, CommitmentLevel},
epoch_info::EpochInfo,
epoch_schedule::EpochSchedule,
fee_calculator::{FeeCalculator, FeeRateGovernor},
hash::Hash,
pubkey::Pubkey,
signature::Signature,
transaction::{self, uses_durable_nonce, Transaction},
},
solana_transaction_status::{
EncodedConfirmedBlock, EncodedConfirmedTransaction, TransactionStatus, UiConfirmedBlock,
UiTransactionEncoding,
},
solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY,
std::{
cmp::min,
net::SocketAddr,
str::FromStr,
sync::RwLock,
thread::sleep,
time::{Duration, Instant},
},
rpc_request::{RpcError, RpcRequest, RpcResponseErrorData, TokenAccountsFilter},
rpc_response::*,
rpc_sender::RpcSender,
};
use bincode::serialize;
use indicatif::{ProgressBar, ProgressStyle};
use log::*;
use serde_json::{json, Value};
use solana_account_decoder::{
parse_token::{TokenAccountType, UiTokenAccount, UiTokenAmount},
UiAccount, UiAccountData, UiAccountEncoding,
};
use solana_sdk::{
account::Account,
clock::{Slot, UnixTimestamp, DEFAULT_MS_PER_SLOT, MAX_HASH_AGE_IN_SECONDS},
commitment_config::{CommitmentConfig, CommitmentLevel},
epoch_info::EpochInfo,
epoch_schedule::EpochSchedule,
fee_calculator::{FeeCalculator, FeeRateGovernor},
hash::Hash,
pubkey::Pubkey,
signature::Signature,
transaction::{self, uses_durable_nonce, Transaction},
};
use solana_transaction_status::{
EncodedConfirmedBlock, EncodedConfirmedTransaction, TransactionStatus, UiTransactionEncoding,
};
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
use std::{
cmp::min,
net::SocketAddr,
sync::RwLock,
thread::sleep,
time::{Duration, Instant},
};
pub struct RpcClient {
@@ -404,6 +409,41 @@ impl RpcClient {
)
}
pub fn get_slot_leaders(&self, start_slot: Slot, limit: u64) -> ClientResult<Vec<Pubkey>> {
self.send(RpcRequest::GetSlotLeaders, json!([start_slot, limit]))
.and_then(|slot_leaders: Vec<String>| {
slot_leaders
.iter()
.map(|slot_leader| {
Pubkey::from_str(slot_leader).map_err(|err| {
ClientErrorKind::Custom(format!(
"pubkey deserialization failed: {}",
err
))
.into()
})
})
.collect()
})
}
pub fn get_stake_activation(
&self,
stake_account: Pubkey,
epoch: Option<Epoch>,
) -> ClientResult<RpcStakeActivation> {
self.send(
RpcRequest::GetStakeActivation,
json!([
stake_account.to_string(),
RpcStakeConfig {
epoch,
commitment: Some(self.commitment_config),
}
]),
)
}
pub fn supply(&self) -> RpcResult<RpcSupply> {
self.supply_with_commitment(self.commitment_config)
}
@@ -507,6 +547,14 @@ impl RpcClient {
self.send(RpcRequest::GetConfirmedBlock, json!([slot, encoding]))
}
pub fn get_confirmed_block_with_config(
&self,
slot: Slot,
config: RpcConfirmedBlockConfig,
) -> ClientResult<UiConfirmedBlock> {
self.send(RpcRequest::GetConfirmedBlock, json!([slot, config]))
}
pub fn get_confirmed_blocks(
&self,
start_slot: Slot,
@@ -518,6 +566,24 @@ impl RpcClient {
)
}
pub fn get_confirmed_blocks_with_commitment(
&self,
start_slot: Slot,
end_slot: Option<Slot>,
commitment_config: CommitmentConfig,
) -> ClientResult<Vec<Slot>> {
let json = if end_slot.is_some() {
json!([
start_slot,
end_slot,
self.maybe_map_commitment(commitment_config)?
])
} else {
json!([start_slot, self.maybe_map_commitment(commitment_config)?])
};
self.send(RpcRequest::GetConfirmedBlocks, json)
}
pub fn get_confirmed_blocks_with_limit(
&self,
start_slot: Slot,
@@ -529,6 +595,22 @@ impl RpcClient {
)
}
pub fn get_confirmed_blocks_with_limit_and_commitment(
&self,
start_slot: Slot,
limit: usize,
commitment_config: CommitmentConfig,
) -> ClientResult<Vec<Slot>> {
self.send(
RpcRequest::GetConfirmedBlocksWithLimit,
json!([
start_slot,
limit,
self.maybe_map_commitment(commitment_config)?
]),
)
}
pub fn get_confirmed_signatures_for_address(
&self,
address: &Pubkey,
@@ -591,6 +673,17 @@ impl RpcClient {
)
}
pub fn get_confirmed_transaction_with_config(
&self,
signature: &Signature,
config: RpcConfirmedTransactionConfig,
) -> ClientResult<EncodedConfirmedTransaction> {
self.send(
RpcRequest::GetConfirmedTransaction,
json!([signature.to_string(), config]),
)
}
pub fn get_block_time(&self, slot: Slot) -> ClientResult<UnixTimestamp> {
let request = RpcRequest::GetBlockTime;
let response = self.sender.send(request, json!([slot]));
@@ -644,6 +737,13 @@ impl RpcClient {
self.send(RpcRequest::GetEpochSchedule, Value::Null)
}
pub fn get_recent_performance_samples(
&self,
limit: Option<usize>,
) -> ClientResult<Vec<RpcPerfSample>> {
self.send(RpcRequest::GetRecentPerformanceSamples, json!([limit]))
}
pub fn get_identity(&self) -> ClientResult<Pubkey> {
let rpc_identity: RpcIdentity = self.send(RpcRequest::GetIdentity, Value::Null)?;

View File

@@ -1,10 +1,12 @@
use crate::rpc_filter::RpcFilterType;
use solana_account_decoder::{UiAccountEncoding, UiDataSliceConfig};
use solana_sdk::{
clock::Epoch,
commitment_config::{CommitmentConfig, CommitmentLevel},
use {
crate::rpc_filter::RpcFilterType,
solana_account_decoder::{UiAccountEncoding, UiDataSliceConfig},
solana_sdk::{
clock::{Epoch, Slot},
commitment_config::{CommitmentConfig, CommitmentLevel},
},
solana_transaction_status::{TransactionDetails, UiTransactionEncoding},
};
use solana_transaction_status::UiTransactionEncoding;
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
@@ -133,12 +135,26 @@ pub trait EncodingConfig {
#[serde(rename_all = "camelCase")]
pub struct RpcConfirmedBlockConfig {
pub encoding: Option<UiTransactionEncoding>,
pub transaction_details: Option<TransactionDetails>,
pub rewards: Option<bool>,
#[serde(flatten)]
pub commitment: Option<CommitmentConfig>,
}
impl EncodingConfig for RpcConfirmedBlockConfig {
fn new_with_encoding(encoding: &Option<UiTransactionEncoding>) -> Self {
Self {
encoding: *encoding,
..Self::default()
}
}
}
impl RpcConfirmedBlockConfig {
pub fn rewards_only() -> Self {
Self {
transaction_details: Some(TransactionDetails::None),
..Self::default()
}
}
}
@@ -147,12 +163,31 @@ impl EncodingConfig for RpcConfirmedBlockConfig {
#[serde(rename_all = "camelCase")]
pub struct RpcConfirmedTransactionConfig {
pub encoding: Option<UiTransactionEncoding>,
#[serde(flatten)]
pub commitment: Option<CommitmentConfig>,
}
impl EncodingConfig for RpcConfirmedTransactionConfig {
fn new_with_encoding(encoding: &Option<UiTransactionEncoding>) -> Self {
Self {
encoding: *encoding,
..Self::default()
}
}
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(untagged)]
pub enum RpcConfirmedBlocksConfigWrapper {
EndSlotOnly(Option<Slot>),
CommitmentOnly(Option<CommitmentConfig>),
}
impl RpcConfirmedBlocksConfigWrapper {
pub fn unzip(&self) -> (Option<Slot>, Option<CommitmentConfig>) {
match &self {
RpcConfirmedBlocksConfigWrapper::EndSlotOnly(end_slot) => (*end_slot, None),
RpcConfirmedBlocksConfigWrapper::CommitmentOnly(commitment) => (None, *commitment),
}
}
}

View File

@@ -1,8 +1,10 @@
//! Implementation defined RPC server errors
use crate::rpc_response::RpcSimulateTransactionResult;
use jsonrpc_core::{Error, ErrorCode};
use solana_sdk::clock::Slot;
use {
crate::rpc_response::RpcSimulateTransactionResult,
jsonrpc_core::{Error, ErrorCode},
solana_sdk::clock::Slot,
};
pub const JSON_RPC_SERVER_ERROR_BLOCK_CLEANED_UP: i64 = -32001;
pub const JSON_RPC_SERVER_ERROR_SEND_TRANSACTION_PREFLIGHT_FAILURE: i64 = -32002;

View File

@@ -1,8 +1,10 @@
use crate::rpc_response::RpcSimulateTransactionResult;
use serde_json::{json, Value};
use solana_sdk::{clock::Slot, pubkey::Pubkey};
use std::fmt;
use thiserror::Error;
use {
crate::rpc_response::RpcSimulateTransactionResult,
serde_json::{json, Value},
solana_sdk::{clock::Slot, pubkey::Pubkey},
std::fmt,
thiserror::Error,
};
#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
pub enum RpcRequest {
@@ -34,13 +36,16 @@ pub enum RpcRequest {
GetMultipleAccounts,
GetProgramAccounts,
GetRecentBlockhash,
GetRecentPerformanceSamples,
GetSnapshotSlot,
GetSignatureStatuses,
GetSlot,
GetSlotLeader,
GetSlotLeaders,
GetStorageTurn,
GetStorageTurnRate,
GetSlotsPerSegment,
GetStakeActivation,
GetStoragePubkeysForSlot,
GetSupply,
GetTokenAccountBalance,
@@ -90,10 +95,13 @@ impl fmt::Display for RpcRequest {
RpcRequest::GetMultipleAccounts => "getMultipleAccounts",
RpcRequest::GetProgramAccounts => "getProgramAccounts",
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
RpcRequest::GetRecentPerformanceSamples => "getRecentPerformanceSamples",
RpcRequest::GetSnapshotSlot => "getSnapshotSlot",
RpcRequest::GetSignatureStatuses => "getSignatureStatuses",
RpcRequest::GetSlot => "getSlot",
RpcRequest::GetSlotLeader => "getSlotLeader",
RpcRequest::GetSlotLeaders => "getSlotLeaders",
RpcRequest::GetStakeActivation => "getStakeActivation",
RpcRequest::GetStorageTurn => "getStorageTurn",
RpcRequest::GetStorageTurnRate => "getStorageTurnRate",
RpcRequest::GetSlotsPerSegment => "getSlotsPerSegment",
@@ -126,6 +134,7 @@ pub const MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT: usize = 1_000;
pub const MAX_MULTIPLE_ACCOUNTS: usize = 100;
pub const NUM_LARGEST_ACCOUNTS: usize = 20;
pub const MAX_GET_PROGRAM_ACCOUNT_FILTERS: usize = 4;
pub const MAX_GET_SLOT_LEADERS: usize = 5000;
// Validators that are this number of slots behind are considered delinquent
pub const DELINQUENT_VALIDATOR_SLOT_DISTANCE: u64 = 128;

View File

@@ -1,13 +1,15 @@
use crate::client_error;
use solana_account_decoder::{parse_token::UiTokenAmount, UiAccount};
use solana_sdk::{
clock::{Epoch, Slot, UnixTimestamp},
fee_calculator::{FeeCalculator, FeeRateGovernor},
inflation::Inflation,
transaction::{Result, TransactionError},
use {
crate::client_error,
solana_account_decoder::{parse_token::UiTokenAmount, UiAccount},
solana_sdk::{
clock::{Epoch, Slot, UnixTimestamp},
fee_calculator::{FeeCalculator, FeeRateGovernor},
inflation::Inflation,
transaction::{Result, TransactionError},
},
solana_transaction_status::ConfirmedTransactionStatusWithSignature,
std::{collections::HashMap, fmt, net::SocketAddr},
};
use solana_transaction_status::ConfirmedTransactionStatusWithSignature;
use std::{collections::HashMap, fmt, net::SocketAddr};
pub type RpcResult<T> = client_error::Result<Response<T>>;
@@ -313,7 +315,7 @@ pub struct RpcSupply {
pub non_circulating_accounts: Vec<String>,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
#[serde(rename_all = "camelCase")]
pub enum StakeActivationState {
Activating,

View File

@@ -3,36 +3,38 @@
//! messages to the network directly. The binary encoding of its messages are
//! unstable and may change in future releases.
use crate::{rpc_client::RpcClient, rpc_config::RpcProgramAccountsConfig, rpc_response::Response};
use bincode::{serialize_into, serialized_size};
use log::*;
use solana_sdk::{
account::Account,
client::{AsyncClient, Client, SyncClient},
clock::{Slot, MAX_PROCESSING_AGE},
commitment_config::CommitmentConfig,
epoch_info::EpochInfo,
fee_calculator::{FeeCalculator, FeeRateGovernor},
hash::Hash,
instruction::Instruction,
message::Message,
packet::PACKET_DATA_SIZE,
pubkey::Pubkey,
signature::{Keypair, Signature, Signer},
signers::Signers,
system_instruction,
timing::duration_as_ms,
transaction::{self, Transaction},
transport::Result as TransportResult,
};
use std::{
io,
net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket},
sync::{
atomic::{AtomicBool, AtomicUsize, Ordering},
RwLock,
use {
crate::{rpc_client::RpcClient, rpc_config::RpcProgramAccountsConfig, rpc_response::Response},
bincode::{serialize_into, serialized_size},
log::*,
solana_sdk::{
account::Account,
client::{AsyncClient, Client, SyncClient},
clock::{Slot, MAX_PROCESSING_AGE},
commitment_config::CommitmentConfig,
epoch_info::EpochInfo,
fee_calculator::{FeeCalculator, FeeRateGovernor},
hash::Hash,
instruction::Instruction,
message::Message,
packet::PACKET_DATA_SIZE,
pubkey::Pubkey,
signature::{Keypair, Signature, Signer},
signers::Signers,
system_instruction,
timing::duration_as_ms,
transaction::{self, Transaction},
transport::Result as TransportResult,
},
std::{
io,
net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket},
sync::{
atomic::{AtomicBool, AtomicUsize, Ordering},
RwLock,
},
time::{Duration, Instant},
},
time::{Duration, Instant},
};
struct ClientOptimizer {

View File

@@ -1,7 +1,7 @@
[package]
name = "solana-core"
description = "Blockchain, Rebuilt for Scale"
version = "1.6.0"
version = "1.6.2"
homepage = "https://solana.com/"
documentation = "https://docs.rs/solana-core"
readme = "../README.md"
@@ -52,43 +52,43 @@ serde = "1.0.122"
serde_bytes = "0.11"
serde_derive = "1.0.103"
serde_json = "1.0.56"
solana-account-decoder = { path = "../account-decoder", version = "1.6.0" }
solana-banks-server = { path = "../banks-server", version = "1.6.0" }
solana-clap-utils = { path = "../clap-utils", version = "1.6.0" }
solana-client = { path = "../client", version = "1.6.0" }
solana-faucet = { path = "../faucet", version = "1.6.0" }
solana-ledger = { path = "../ledger", version = "1.6.0" }
solana-logger = { path = "../logger", version = "1.6.0" }
solana-merkle-tree = { path = "../merkle-tree", version = "1.6.0" }
solana-metrics = { path = "../metrics", version = "1.6.0" }
solana-measure = { path = "../measure", version = "1.6.0" }
solana-net-utils = { path = "../net-utils", version = "1.6.0" }
solana-perf = { path = "../perf", version = "1.6.0" }
solana-program-test = { path = "../program-test", version = "1.6.0" }
solana-runtime = { path = "../runtime", version = "1.6.0" }
solana-sdk = { path = "../sdk", version = "1.6.0" }
solana-frozen-abi = { path = "../frozen-abi", version = "1.6.0" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "1.6.0" }
solana-stake-program = { path = "../programs/stake", version = "1.6.0" }
solana-storage-bigtable = { path = "../storage-bigtable", version = "1.6.0" }
solana-streamer = { path = "../streamer", version = "1.6.0" }
solana-sys-tuner = { path = "../sys-tuner", version = "1.6.0" }
solana-transaction-status = { path = "../transaction-status", version = "1.6.0" }
solana-version = { path = "../version", version = "1.6.0" }
solana-vote-program = { path = "../programs/vote", version = "1.6.0" }
solana-account-decoder = { path = "../account-decoder", version = "=1.6.2" }
solana-banks-server = { path = "../banks-server", version = "=1.6.2" }
solana-clap-utils = { path = "../clap-utils", version = "=1.6.2" }
solana-client = { path = "../client", version = "=1.6.2" }
solana-faucet = { path = "../faucet", version = "=1.6.2" }
solana-ledger = { path = "../ledger", version = "=1.6.2" }
solana-logger = { path = "../logger", version = "=1.6.2" }
solana-merkle-tree = { path = "../merkle-tree", version = "=1.6.2" }
solana-metrics = { path = "../metrics", version = "=1.6.2" }
solana-measure = { path = "../measure", version = "=1.6.2" }
solana-net-utils = { path = "../net-utils", version = "=1.6.2" }
solana-perf = { path = "../perf", version = "=1.6.2" }
solana-program-test = { path = "../program-test", version = "=1.6.2" }
solana-runtime = { path = "../runtime", version = "=1.6.2" }
solana-sdk = { path = "../sdk", version = "=1.6.2" }
solana-frozen-abi = { path = "../frozen-abi", version = "=1.6.2" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.6.2" }
solana-stake-program = { path = "../programs/stake", version = "=1.6.2" }
solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.6.2" }
solana-streamer = { path = "../streamer", version = "=1.6.2" }
solana-sys-tuner = { path = "../sys-tuner", version = "=1.6.2" }
solana-transaction-status = { path = "../transaction-status", version = "=1.6.2" }
solana-version = { path = "../version", version = "=1.6.2" }
solana-vote-program = { path = "../programs/vote", version = "=1.6.2" }
spl-token-v2-0 = { package = "spl-token", version = "=3.1.0", features = ["no-entrypoint"] }
tempfile = "3.1.0"
thiserror = "1.0"
tokio = { version = "1.1", features = ["full"] }
tokio_02 = { version = "0.2", package = "tokio", features = ["full"] }
tokio-util = { version = "0.3", features = ["codec"] } # This crate needs to stay in sync with tokio_02, until that dependency can be removed
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.6.0" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.6.2" }
trees = "0.2.1"
[dev-dependencies]
matches = "0.1.6"
num_cpus = "1.13.0"
reqwest = { version = "0.10.8", default-features = false, features = ["blocking", "rustls-tls", "json"] }
reqwest = { version = "0.11.2", default-features = false, features = ["blocking", "rustls-tls", "json"] }
serial_test = "0.4.0"
symlink = "0.1.0"
systemstat = "0.1.5"

View File

@@ -66,6 +66,8 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
let (exit, poh_recorder, poh_service, _signal_receiver) =
create_test_recorder(&bank, &blockstore, None);
let recorder = poh_recorder.lock().unwrap().recorder();
let tx = test_tx();
let len = 4096;
let chunk_size = 1024;
@@ -81,12 +83,14 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
bencher.iter(move || {
let _ignored = BankingStage::consume_buffered_packets(
&my_pubkey,
std::u128::MAX,
&poh_recorder,
&mut packets,
None,
&s,
None::<Box<dyn Fn()>>,
None,
&recorder,
);
});
@@ -154,6 +158,9 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
let (verified_sender, verified_receiver) = unbounded();
let (vote_sender, vote_receiver) = unbounded();
let mut bank = Bank::new(&genesis_config);
// Allow arbitrary transaction processing time for the purposes of this bench
bank.ns_per_slot = std::u128::MAX;
let bank = Arc::new(Bank::new(&genesis_config));
debug!("threads: {} txs: {}", num_threads, txes);

View File

@@ -42,7 +42,13 @@ fn make_shreds(num_shreds: usize) -> Vec<Shred> {
let shredder =
Shredder::new(1, 0, RECOMMENDED_FEC_RATE, Arc::new(Keypair::new()), 0, 0).unwrap();
let data_shreds = shredder
.entries_to_data_shreds(&entries, true, 0, &mut ProcessShredsStats::default())
.entries_to_data_shreds(
&entries,
true, // is_last_in_slot
0, // next_shred_index
0, // fec_set_offset
&mut ProcessShredsStats::default(),
)
.0;
assert!(data_shreds.len() >= num_shreds);
data_shreds
@@ -127,10 +133,8 @@ fn bench_shredder_coding(bencher: &mut Bencher) {
let data_shreds = make_shreds(symbol_count);
bencher.iter(|| {
Shredder::generate_coding_shreds(
0,
RECOMMENDED_FEC_RATE,
&data_shreds[..symbol_count],
0,
symbol_count,
)
.len();
@@ -142,10 +146,8 @@ fn bench_shredder_decoding(bencher: &mut Bencher) {
let symbol_count = MAX_DATA_SHREDS_PER_FEC_BLOCK as usize;
let data_shreds = make_shreds(symbol_count);
let coding_shreds = Shredder::generate_coding_shreds(
0,
RECOMMENDED_FEC_RATE,
&data_shreds[..symbol_count],
0,
symbol_count,
);
bencher.iter(|| {

View File

@@ -0,0 +1,154 @@
use crate::{
consensus::{ComputedBankState, Tower},
fork_choice::ForkChoice,
progress_map::{ForkStats, ProgressMap},
};
use solana_runtime::{bank::Bank, bank_forks::BankForks};
use solana_sdk::{clock::Slot, timing};
use std::time::Instant;
use std::{
collections::{HashMap, HashSet},
sync::{Arc, RwLock},
};
#[derive(Default)]
pub struct BankWeightForkChoice {}
impl ForkChoice for BankWeightForkChoice {
fn compute_bank_stats(
&mut self,
bank: &Bank,
_tower: &Tower,
progress: &mut ProgressMap,
computed_bank_state: &ComputedBankState,
) {
let bank_slot = bank.slot();
// Only time progress map should be missing a bank slot
// is if this node was the leader for this slot as those banks
// are not replayed in replay_active_banks()
let parent_weight = bank
.parent()
.and_then(|b| progress.get(&b.slot()))
.map(|x| x.fork_stats.fork_weight)
.unwrap_or(0);
let stats = progress
.get_fork_stats_mut(bank_slot)
.expect("All frozen banks must exist in the Progress map");
let ComputedBankState { bank_weight, .. } = computed_bank_state;
stats.weight = *bank_weight;
stats.fork_weight = stats.weight + parent_weight;
}
// Returns:
// 1) The heaviest overall bank
// 2) The heaviest bank on the same fork as the last vote (doesn't require a
// switching proof to vote for)
fn select_forks(
&self,
frozen_banks: &[Arc<Bank>],
tower: &Tower,
progress: &ProgressMap,
ancestors: &HashMap<u64, HashSet<u64>>,
_bank_forks: &RwLock<BankForks>,
) -> (Arc<Bank>, Option<Arc<Bank>>) {
let tower_start = Instant::now();
assert!(!frozen_banks.is_empty());
let num_frozen_banks = frozen_banks.len();
trace!("frozen_banks {}", frozen_banks.len());
let num_old_banks = frozen_banks
.iter()
.filter(|b| b.slot() < tower.root())
.count();
let last_voted_slot = tower.last_voted_slot();
let mut heaviest_bank_on_same_fork = None;
let mut heaviest_same_fork_weight = 0;
let stats: Vec<&ForkStats> = frozen_banks
.iter()
.map(|bank| {
// Only time progress map should be missing a bank slot
// is if this node was the leader for this slot as those banks
// are not replayed in replay_active_banks()
let stats = progress
.get_fork_stats(bank.slot())
.expect("All frozen banks must exist in the Progress map");
if let Some(last_voted_slot) = last_voted_slot {
if ancestors
.get(&bank.slot())
.expect("Entry in frozen banks must exist in ancestors")
.contains(&last_voted_slot)
{
// Descendant of last vote cannot be locked out
assert!(!stats.is_locked_out);
// ancestors(slot) should not contain the slot itself,
// so we should never get the same bank as the last vote
assert_ne!(bank.slot(), last_voted_slot);
// highest weight, lowest slot first. frozen_banks is sorted
// from least slot to greatest slot, so if two banks have
// the same fork weight, the lower slot will be picked
if stats.fork_weight > heaviest_same_fork_weight {
heaviest_bank_on_same_fork = Some(bank.clone());
heaviest_same_fork_weight = stats.fork_weight;
}
}
}
stats
})
.collect();
let num_not_recent = stats.iter().filter(|s| !s.is_recent).count();
let num_has_voted = stats.iter().filter(|s| s.has_voted).count();
let num_empty = stats.iter().filter(|s| s.is_empty).count();
let num_threshold_failure = stats.iter().filter(|s| !s.vote_threshold).count();
let num_votable_threshold_failure = stats
.iter()
.filter(|s| s.is_recent && !s.has_voted && !s.vote_threshold)
.count();
let mut candidates: Vec<_> = frozen_banks.iter().zip(stats.iter()).collect();
//highest weight, lowest slot first
candidates.sort_by_key(|b| (b.1.fork_weight, 0i64 - b.0.slot() as i64));
let rv = candidates
.last()
.expect("frozen banks was nonempty so candidates must also be nonempty");
let ms = timing::duration_as_ms(&tower_start.elapsed());
let weights: Vec<(u128, u64, u64)> = candidates
.iter()
.map(|x| (x.1.weight, x.0.slot(), x.1.block_height))
.collect();
debug!(
"@{:?} tower duration: {:?} len: {}/{} weights: {:?}",
timing::timestamp(),
ms,
candidates.len(),
stats.iter().filter(|s| !s.has_voted).count(),
weights,
);
datapoint_debug!(
"replay_stage-select_forks",
("frozen_banks", num_frozen_banks as i64, i64),
("not_recent", num_not_recent as i64, i64),
("has_voted", num_has_voted as i64, i64),
("old_banks", num_old_banks as i64, i64),
("empty_banks", num_empty as i64, i64),
("threshold_failure", num_threshold_failure as i64, i64),
(
"votable_threshold_failure",
num_votable_threshold_failure as i64,
i64
),
("tower_duration", ms as i64, i64),
);
(rv.0.clone(), heaviest_bank_on_same_fork)
}
fn mark_fork_invalid_candidate(&mut self, _invalid_slot: Slot) {}
fn mark_fork_valid_candidate(&mut self, _valid_slots: &[Slot]) {}
}

View File

@@ -4,7 +4,7 @@
use crate::{
cluster_info::ClusterInfo,
packet_hasher::PacketHasher,
poh_recorder::{PohRecorder, PohRecorderError, WorkingBankEntry},
poh_recorder::{PohRecorder, PohRecorderError, TransactionRecorder, WorkingBankEntry},
poh_service::{self, PohService},
};
use crossbeam_channel::{Receiver as CrossbeamReceiver, RecvTimeoutError};
@@ -12,10 +12,8 @@ use itertools::Itertools;
use lru::LruCache;
use retain_mut::RetainMut;
use solana_ledger::{
blockstore::Blockstore,
blockstore_processor::{send_transaction_status_batch, TransactionStatusSender},
entry::hash_transactions,
leader_schedule_cache::LeaderScheduleCache,
blockstore::Blockstore, blockstore_processor::TransactionStatusSender,
entry::hash_transactions, leader_schedule_cache::LeaderScheduleCache,
};
use solana_measure::{measure::Measure, thread_mem_usage};
use solana_metrics::{inc_new_counter_debug, inc_new_counter_info};
@@ -159,9 +157,9 @@ pub struct BankingStage {
bank_thread_hdls: Vec<JoinHandle<()>>,
}
#[derive(Debug, PartialEq, Eq, Clone)]
#[derive(Debug, Clone)]
pub enum BufferedPacketsDecision {
Consume,
Consume(u128),
Forward,
ForwardAndHold,
Hold,
@@ -288,12 +286,14 @@ impl BankingStage {
pub fn consume_buffered_packets(
my_pubkey: &Pubkey,
max_tx_ingestion_ns: u128,
poh_recorder: &Arc<Mutex<PohRecorder>>,
buffered_packets: &mut UnprocessedPackets,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
test_fn: Option<impl Fn()>,
banking_stage_stats: Option<&BankingStageStats>,
recorder: &TransactionRecorder,
) {
let mut rebuffered_packets_len = 0;
let mut new_tx_count = 0;
@@ -316,18 +316,24 @@ impl BankingStage {
new_unprocessed_indexes,
)
} else {
let bank = poh_recorder.lock().unwrap().bank();
if let Some(bank) = bank {
let bank_start = poh_recorder.lock().unwrap().bank_start();
if let Some((bank, bank_creation_time)) = bank_start {
let (processed, verified_txs_len, new_unprocessed_indexes) =
Self::process_received_packets(
Self::process_packets_transactions(
&bank,
&poh_recorder,
&bank_creation_time,
&recorder,
&msgs,
original_unprocessed_indexes.to_owned(),
transaction_status_sender.clone(),
gossip_vote_sender,
);
if processed < verified_txs_len {
if processed < verified_txs_len
|| !Bank::should_bank_still_be_processing_txs(
&bank_creation_time,
max_tx_ingestion_ns,
)
{
reached_end_of_slot =
Some((poh_recorder.lock().unwrap().next_slot_leader(), bank));
}
@@ -380,7 +386,7 @@ impl BankingStage {
fn consume_or_forward_packets(
my_pubkey: &Pubkey,
leader_pubkey: Option<Pubkey>,
bank_is_available: bool,
bank_still_processing_txs: Option<&Arc<Bank>>,
would_be_leader: bool,
would_be_leader_shortly: bool,
) -> BufferedPacketsDecision {
@@ -389,9 +395,9 @@ impl BankingStage {
BufferedPacketsDecision::Hold,
// else process the packets
|x| {
if bank_is_available {
if let Some(bank) = bank_still_processing_txs {
// If the bank is available, this node is the leader
BufferedPacketsDecision::Consume
BufferedPacketsDecision::Consume(bank.ns_per_slot)
} else if would_be_leader_shortly {
// If the node will be the leader soon, hold the packets for now
BufferedPacketsDecision::Hold
@@ -421,12 +427,20 @@ impl BankingStage {
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
banking_stage_stats: &BankingStageStats,
recorder: &TransactionRecorder,
) -> BufferedPacketsDecision {
let (leader_at_slot_offset, poh_has_bank, would_be_leader, would_be_leader_shortly) = {
let bank_start;
let (
leader_at_slot_offset,
bank_still_processing_txs,
would_be_leader,
would_be_leader_shortly,
) = {
let poh = poh_recorder.lock().unwrap();
bank_start = poh.bank_start();
(
poh.leader_after_n_slots(FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET),
poh.has_bank(),
PohRecorder::get_bank_still_processing_txs(&bank_start),
poh.would_be_leader(HOLD_TRANSACTIONS_SLOT_OFFSET * DEFAULT_TICKS_PER_SLOT),
poh.would_be_leader(
(FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET - 1) * DEFAULT_TICKS_PER_SLOT,
@@ -437,21 +451,23 @@ impl BankingStage {
let decision = Self::consume_or_forward_packets(
my_pubkey,
leader_at_slot_offset,
poh_has_bank,
bank_still_processing_txs,
would_be_leader,
would_be_leader_shortly,
);
match decision {
BufferedPacketsDecision::Consume => {
BufferedPacketsDecision::Consume(max_tx_ingestion_ns) => {
Self::consume_buffered_packets(
my_pubkey,
max_tx_ingestion_ns,
poh_recorder,
buffered_packets,
transaction_status_sender,
gossip_vote_sender,
None::<Box<dyn Fn()>>,
Some(banking_stage_stats),
recorder,
);
}
BufferedPacketsDecision::Forward => {
@@ -529,6 +545,7 @@ impl BankingStage {
gossip_vote_sender: ReplayVoteSender,
duplicates: &Arc<Mutex<(LruCache<u64, ()>, PacketHasher)>>,
) {
let recorder = poh_recorder.lock().unwrap().recorder();
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut buffered_packets = VecDeque::with_capacity(batch_limit);
let banking_stage_stats = BankingStageStats::new(id);
@@ -537,16 +554,17 @@ impl BankingStage {
let decision = Self::process_buffered_packets(
&my_pubkey,
&socket,
poh_recorder,
&poh_recorder,
cluster_info,
&mut buffered_packets,
enable_forwarding,
transaction_status_sender.clone(),
&gossip_vote_sender,
&banking_stage_stats,
&recorder,
);
if decision == BufferedPacketsDecision::Hold
|| decision == BufferedPacketsDecision::ForwardAndHold
if matches!(decision, BufferedPacketsDecision::Hold)
|| matches!(decision, BufferedPacketsDecision::ForwardAndHold)
{
// If we are waiting on a new bank,
// check the receiver for more transactions/for exiting
@@ -577,6 +595,7 @@ impl BankingStage {
&mut buffered_packets,
&banking_stage_stats,
duplicates,
&recorder,
) {
Ok(()) | Err(RecvTimeoutError::Timeout) => (),
Err(RecvTimeoutError::Disconnected) => break,
@@ -610,7 +629,7 @@ impl BankingStage {
bank_slot: Slot,
txs: &[Transaction],
results: &[TransactionExecutionResult],
poh: &Arc<Mutex<PohRecorder>>,
recorder: &TransactionRecorder,
) -> (Result<usize, PohRecorderError>, Vec<usize>) {
let mut processed_generation = Measure::start("record::process_generation");
let (processed_transactions, processed_transactions_indexes): (Vec<_>, Vec<_>) = results
@@ -640,10 +659,7 @@ impl BankingStage {
let mut poh_record = Measure::start("record::poh_record");
// record and unlock will unlock all the successful transactions
let res = poh
.lock()
.unwrap()
.record(bank_slot, hash, processed_transactions);
let res = recorder.record(bank_slot, hash, processed_transactions);
match res {
Ok(()) => (),
Err(PohRecorderError::MaxHeightReached) => {
@@ -668,7 +684,7 @@ impl BankingStage {
fn process_and_record_transactions_locked(
bank: &Arc<Bank>,
poh: &Arc<Mutex<PohRecorder>>,
poh: &TransactionRecorder,
batch: &TransactionBatch,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
@@ -750,7 +766,7 @@ impl BankingStage {
if let Some(transaction_status_sender) = transaction_status_sender {
let post_balances = bank.collect_balances(batch);
let post_token_balances = collect_token_balances(&bank, &batch, &mut mint_decimals);
send_transaction_status_batch(
transaction_status_sender.send_transaction_status_batch(
bank.clone(),
batch.transactions(),
batch.iteration_order_vec(),
@@ -759,7 +775,6 @@ impl BankingStage {
TransactionTokenBalancesSet::new(pre_token_balances, post_token_balances),
inner_instructions,
transaction_logs,
transaction_status_sender,
);
}
}
@@ -787,7 +802,7 @@ impl BankingStage {
pub fn process_and_record_transactions(
bank: &Arc<Bank>,
txs: &[Transaction],
poh: &Arc<Mutex<PohRecorder>>,
poh: &TransactionRecorder,
chunk_offset: usize,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
@@ -829,8 +844,9 @@ impl BankingStage {
/// than the total number if max PoH height was reached and the bank halted
fn process_transactions(
bank: &Arc<Bank>,
bank_creation_time: &Instant,
transactions: &[Transaction],
poh: &Arc<Mutex<PohRecorder>>,
poh: &TransactionRecorder,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
) -> (usize, Vec<usize>) {
@@ -855,17 +871,25 @@ impl BankingStage {
// Add the retryable txs (transactions that errored in a way that warrants a retry)
// to the list of unprocessed txs.
unprocessed_txs.extend_from_slice(&retryable_txs_in_chunk);
if let Err(PohRecorderError::MaxHeightReached) = result {
info!(
"process transactions: max height reached slot: {} height: {}",
bank.slot(),
bank.tick_height()
);
// process_and_record_transactions has returned all retryable errors in
// transactions[chunk_start..chunk_end], so we just need to push the remaining
// transactions into the unprocessed queue.
unprocessed_txs.extend(chunk_end..transactions.len());
break;
// If `bank_creation_time` is None, it's a test so ignore the option so
// allow processing
let should_bank_still_be_processing_txs =
Bank::should_bank_still_be_processing_txs(bank_creation_time, bank.ns_per_slot);
match (result, should_bank_still_be_processing_txs) {
(Err(PohRecorderError::MaxHeightReached), _) | (_, false) => {
info!(
"process transactions: max height reached slot: {} height: {}",
bank.slot(),
bank.tick_height()
);
// process_and_record_transactions has returned all retryable errors in
// transactions[chunk_start..chunk_end], so we just need to push the remaining
// transactions into the unprocessed queue.
unprocessed_txs.extend(chunk_end..transactions.len());
break;
}
_ => (),
}
// Don't exit early on any other type of error, continue processing...
chunk_start = chunk_end;
@@ -990,9 +1014,10 @@ impl BankingStage {
Self::filter_valid_transaction_indexes(&result, transaction_to_packet_indexes)
}
fn process_received_packets(
fn process_packets_transactions(
bank: &Arc<Bank>,
poh: &Arc<Mutex<PohRecorder>>,
bank_creation_time: &Instant,
poh: &TransactionRecorder,
msgs: &Packets,
packet_indexes: Vec<usize>,
transaction_status_sender: Option<TransactionStatusSender>,
@@ -1013,6 +1038,7 @@ impl BankingStage {
let (processed, unprocessed_tx_indexes) = Self::process_transactions(
bank,
bank_creation_time,
&transactions,
poh,
transaction_status_sender,
@@ -1105,6 +1131,7 @@ impl BankingStage {
buffered_packets: &mut UnprocessedPackets,
banking_stage_stats: &BankingStageStats,
duplicates: &Arc<Mutex<(LruCache<u64, ()>, PacketHasher)>>,
recorder: &TransactionRecorder,
) -> Result<(), RecvTimeoutError> {
let mut recv_time = Measure::start("process_packets_recv");
let mms = verified_receiver.recv_timeout(recv_timeout)?;
@@ -1120,7 +1147,7 @@ impl BankingStage {
id,
);
inc_new_counter_debug!("banking_stage-transactions_received", count);
let mut proc_start = Measure::start("process_received_packets_process");
let mut proc_start = Measure::start("process_packets_transactions_process");
let mut new_tx_count = 0;
let mut mms_iter = mms.into_iter();
@@ -1128,8 +1155,8 @@ impl BankingStage {
let mut newly_buffered_packets_count = 0;
while let Some(msgs) = mms_iter.next() {
let packet_indexes = Self::generate_packet_indexes(&msgs.packets);
let bank = poh.lock().unwrap().bank();
if bank.is_none() {
let bank_start = poh.lock().unwrap().bank_start();
if PohRecorder::get_bank_still_processing_txs(&bank_start).is_none() {
Self::push_unprocessed(
buffered_packets,
msgs,
@@ -1141,16 +1168,18 @@ impl BankingStage {
);
continue;
}
let bank = bank.unwrap();
let (bank, bank_creation_time) = bank_start.unwrap();
let (processed, verified_txs_len, unprocessed_indexes) = Self::process_received_packets(
&bank,
&poh,
&msgs,
packet_indexes,
transaction_status_sender.clone(),
gossip_vote_sender,
);
let (processed, verified_txs_len, unprocessed_indexes) =
Self::process_packets_transactions(
&bank,
&bank_creation_time,
recorder,
&msgs,
packet_indexes,
transaction_status_sender.clone(),
gossip_vote_sender,
);
new_tx_count += processed;
@@ -1165,6 +1194,7 @@ impl BankingStage {
duplicates,
);
// If there were retryable transactions, add the unexpired ones to the buffered queue
if processed < verified_txs_len {
let next_leader = poh.lock().unwrap().next_slot_leader();
// Walk thru rest of the transactions and filter out the invalid (e.g. too old) ones
@@ -1280,7 +1310,7 @@ pub fn create_test_recorder(
) {
let exit = Arc::new(AtomicBool::new(false));
let poh_config = Arc::new(poh_config.unwrap_or_default());
let (mut poh_recorder, entry_receiver) = PohRecorder::new(
let (mut poh_recorder, entry_receiver, record_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
@@ -1301,6 +1331,7 @@ pub fn create_test_recorder(
bank.ticks_per_slot(),
poh_service::DEFAULT_PINNED_CPU_CORE,
poh_service::DEFAULT_HASHES_PER_BATCH,
record_receiver,
);
(exit, poh_recorder, poh_service, entry_receiver)
@@ -1310,7 +1341,7 @@ pub fn create_test_recorder(
mod tests {
use super::*;
use crate::{
cluster_info::Node, poh_recorder::WorkingBank,
cluster_info::Node, poh_recorder::Record, poh_recorder::WorkingBank,
transaction_status_service::TransactionStatusService,
};
use crossbeam_channel::unbounded;
@@ -1330,7 +1361,15 @@ mod tests {
transaction::TransactionError,
};
use solana_transaction_status::TransactionWithStatusMeta;
use std::{net::SocketAddr, path::Path, sync::atomic::Ordering, thread::sleep};
use std::{
net::SocketAddr,
path::Path,
sync::{
atomic::{AtomicBool, Ordering},
mpsc::Receiver,
},
thread::sleep,
};
#[test]
fn test_banking_stage_shutdown1() {
@@ -1441,7 +1480,7 @@ mod tests {
mint_keypair,
..
} = create_genesis_config(10);
let bank = Arc::new(Bank::new(&genesis_config));
let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config));
let start_hash = bank.last_blockhash();
let (verified_sender, verified_receiver) = unbounded();
let (vote_sender, vote_receiver) = unbounded();
@@ -1516,7 +1555,7 @@ mod tests {
drop(poh_recorder);
let mut blockhash = start_hash;
let bank = Bank::new(&genesis_config);
let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config));
bank.process_transaction(&fund_tx).unwrap();
//receive entries + ticks
loop {
@@ -1594,7 +1633,7 @@ mod tests {
let entry_receiver = {
// start a banking_stage to eat verified receiver
let bank = Arc::new(Bank::new(&genesis_config));
let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config));
let blockstore = Arc::new(
Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
@@ -1638,7 +1677,7 @@ mod tests {
.map(|(_bank, (entry, _tick_height))| entry)
.collect();
let bank = Bank::new(&genesis_config);
let bank = Bank::new_no_wallclock_throttle(&genesis_config);
for entry in &entries {
bank.process_transactions(&entry.transactions)
.iter()
@@ -1655,14 +1694,18 @@ mod tests {
#[test]
fn test_bank_record_transactions() {
solana_logger::setup();
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config(10_000);
let bank = Arc::new(Bank::new(&genesis_config));
let start = Arc::new(Instant::now());
let working_bank = WorkingBank {
bank: bank.clone(),
start,
min_tick_height: bank.tick_height(),
max_tick_height: std::u64::MAX,
};
@@ -1670,7 +1713,8 @@ mod tests {
{
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (poh_recorder, entry_receiver) = PohRecorder::new(
let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new(
// TODO use record_receiver
bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
@@ -1681,8 +1725,11 @@ mod tests {
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
let recorder = poh_recorder.recorder();
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let (poh_simulator, exit) = simulate_poh(record_receiver, &poh_recorder);
poh_recorder.lock().unwrap().set_working_bank(working_bank);
let pubkey = solana_sdk::pubkey::new_rand();
let keypair2 = Keypair::new();
@@ -1694,12 +1741,8 @@ mod tests {
];
let mut results = vec![(Ok(()), None), (Ok(()), None)];
let _ = BankingStage::record_transactions(
bank.slot(),
&transactions,
&results,
&poh_recorder,
);
let _ =
BankingStage::record_transactions(bank.slot(), &transactions, &results, &recorder);
let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap();
assert_eq!(entry.transactions.len(), transactions.len());
@@ -1711,12 +1754,8 @@ mod tests {
)),
None,
);
let (res, retryable) = BankingStage::record_transactions(
bank.slot(),
&transactions,
&results,
&poh_recorder,
);
let (res, retryable) =
BankingStage::record_transactions(bank.slot(), &transactions, &results, &recorder);
res.unwrap();
assert!(retryable.is_empty());
let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap();
@@ -1724,12 +1763,8 @@ mod tests {
// Other TransactionErrors should not be recorded
results[0] = (Err(TransactionError::AccountNotFound), None);
let (res, retryable) = BankingStage::record_transactions(
bank.slot(),
&transactions,
&results,
&poh_recorder,
);
let (res, retryable) =
BankingStage::record_transactions(bank.slot(), &transactions, &results, &recorder);
res.unwrap();
assert!(retryable.is_empty());
let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap();
@@ -1742,7 +1777,7 @@ mod tests {
bank.slot() + 1,
&transactions,
&results,
&poh_recorder,
&recorder,
);
assert_matches!(res, Err(PohRecorderError::MaxHeightReached));
// The first result was an error so it's filtered out. The second result was Ok(),
@@ -1750,6 +1785,9 @@ mod tests {
assert_eq!(retryable, vec![1]);
// Should receive nothing from PohRecorder b/c record failed
assert!(entry_receiver.try_recv().is_err());
exit.store(true, Ordering::Relaxed);
let _ = poh_simulator.join();
}
Blockstore::destroy(&ledger_path).unwrap();
}
@@ -1915,80 +1953,80 @@ mod tests {
fn test_should_process_or_forward_packets() {
let my_pubkey = solana_sdk::pubkey::new_rand();
let my_pubkey1 = solana_sdk::pubkey::new_rand();
assert_eq!(
BankingStage::consume_or_forward_packets(&my_pubkey, None, true, false, false),
let bank = Arc::new(Bank::default());
assert_matches!(
BankingStage::consume_or_forward_packets(&my_pubkey, None, Some(&bank), false, false),
BufferedPacketsDecision::Hold
);
assert_eq!(
BankingStage::consume_or_forward_packets(&my_pubkey, None, false, false, false),
assert_matches!(
BankingStage::consume_or_forward_packets(&my_pubkey, None, None, false, false),
BufferedPacketsDecision::Hold
);
assert_eq!(
BankingStage::consume_or_forward_packets(&my_pubkey1, None, false, false, false),
assert_matches!(
BankingStage::consume_or_forward_packets(&my_pubkey1, None, None, false, false),
BufferedPacketsDecision::Hold
);
assert_eq!(
assert_matches!(
BankingStage::consume_or_forward_packets(
&my_pubkey,
Some(my_pubkey1),
false,
None,
false,
false
),
BufferedPacketsDecision::Forward
);
assert_eq!(
assert_matches!(
BankingStage::consume_or_forward_packets(
&my_pubkey,
Some(my_pubkey1),
false,
None,
true,
true
),
BufferedPacketsDecision::Hold
);
assert_eq!(
assert_matches!(
BankingStage::consume_or_forward_packets(
&my_pubkey,
Some(my_pubkey1),
false,
None,
true,
false
),
BufferedPacketsDecision::ForwardAndHold
);
assert_eq!(
assert_matches!(
BankingStage::consume_or_forward_packets(
&my_pubkey,
Some(my_pubkey1),
true,
Some(&bank),
false,
false
),
BufferedPacketsDecision::Consume
BufferedPacketsDecision::Consume(_)
);
assert_eq!(
assert_matches!(
BankingStage::consume_or_forward_packets(
&my_pubkey1,
Some(my_pubkey1),
false,
None,
false,
false
),
BufferedPacketsDecision::Hold
);
assert_eq!(
assert_matches!(
BankingStage::consume_or_forward_packets(
&my_pubkey1,
Some(my_pubkey1),
true,
Some(&bank),
false,
false
),
BufferedPacketsDecision::Consume
BufferedPacketsDecision::Consume(_)
);
}
@@ -2010,8 +2048,10 @@ mod tests {
genesis_config.hash(),
)];
let start = Arc::new(Instant::now());
let working_bank = WorkingBank {
bank: bank.clone(),
start,
min_tick_height: bank.tick_height(),
max_tick_height: bank.tick_height() + 1,
};
@@ -2019,7 +2059,7 @@ mod tests {
{
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (poh_recorder, entry_receiver) = PohRecorder::new(
let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
@@ -2030,15 +2070,18 @@ mod tests {
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
let recorder = poh_recorder.recorder();
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let (poh_simulator, exit) = simulate_poh(record_receiver, &poh_recorder);
poh_recorder.lock().unwrap().set_working_bank(working_bank);
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
BankingStage::process_and_record_transactions(
&bank,
&transactions,
&poh_recorder,
&recorder,
0,
None,
&gossip_vote_sender,
@@ -2075,7 +2118,7 @@ mod tests {
BankingStage::process_and_record_transactions(
&bank,
&transactions,
&poh_recorder,
&recorder,
0,
None,
&gossip_vote_sender,
@@ -2084,11 +2127,36 @@ mod tests {
Err(PohRecorderError::MaxHeightReached)
);
exit.store(true, Ordering::Relaxed);
let _ = poh_simulator.join();
assert_eq!(bank.get_balance(&pubkey), 1);
}
Blockstore::destroy(&ledger_path).unwrap();
}
fn simulate_poh(
record_receiver: Receiver<Record>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
) -> (JoinHandle<()>, Arc<AtomicBool>) {
let exit = Arc::new(AtomicBool::new(false));
let exit_ = exit.clone();
let poh_recorder = poh_recorder.clone();
let tick_producer = Builder::new()
.name("solana-simulate_poh".to_string())
.spawn(move || loop {
PohService::read_record_receiver_and_process(
&poh_recorder,
&record_receiver,
Duration::from_millis(10),
);
if exit_.load(Ordering::Relaxed) {
break;
}
});
(tick_producer.unwrap(), exit)
}
#[test]
fn test_bank_process_and_record_transactions_account_in_use() {
solana_logger::setup();
@@ -2106,8 +2174,10 @@ mod tests {
system_transaction::transfer(&mint_keypair, &pubkey1, 1, genesis_config.hash()),
];
let start = Arc::new(Instant::now());
let working_bank = WorkingBank {
bank: bank.clone(),
start,
min_tick_height: bank.tick_height(),
max_tick_height: bank.tick_height() + 1,
};
@@ -2115,7 +2185,7 @@ mod tests {
{
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (poh_recorder, _entry_receiver) = PohRecorder::new(
let (poh_recorder, _entry_receiver, record_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
@@ -2126,21 +2196,27 @@ mod tests {
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
let recorder = poh_recorder.recorder();
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
poh_recorder.lock().unwrap().set_working_bank(working_bank);
let (poh_simulator, exit) = simulate_poh(record_receiver, &poh_recorder);
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
let (result, unprocessed) = BankingStage::process_and_record_transactions(
&bank,
&transactions,
&poh_recorder,
&recorder,
0,
None,
&gossip_vote_sender,
);
exit.store(true, Ordering::Relaxed);
let _ = poh_simulator.join();
assert!(result.is_ok());
assert_eq!(unprocessed.len(), 1);
}
@@ -2196,7 +2272,7 @@ mod tests {
mint_keypair,
..
} = create_genesis_config(10_000);
let bank = Arc::new(Bank::new(&genesis_config));
let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config));
let pubkey = solana_sdk::pubkey::new_rand();
@@ -2210,7 +2286,7 @@ mod tests {
{
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (poh_recorder, _entry_receiver) = PohRecorder::new(
let (poh_recorder, _entry_receiver, record_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
@@ -2222,17 +2298,21 @@ mod tests {
&Arc::new(PohConfig::default()),
);
// Poh Recorder has not working bank, so should throw MaxHeightReached error on
// Poh Recorder has no working bank, so should throw MaxHeightReached error on
// record
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let recorder = poh_recorder.recorder();
let (poh_simulator, exit) =
simulate_poh(record_receiver, &Arc::new(Mutex::new(poh_recorder)));
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
let (processed_transactions_count, mut retryable_txs) =
BankingStage::process_transactions(
&bank,
&Instant::now(),
&transactions,
&poh_recorder,
&recorder,
None,
&gossip_vote_sender,
);
@@ -2242,6 +2322,9 @@ mod tests {
retryable_txs.sort_unstable();
let expected: Vec<usize> = (0..transactions.len()).collect();
assert_eq!(retryable_txs, expected);
exit.store(true, Ordering::Relaxed);
let _ = poh_simulator.join();
}
Blockstore::destroy(&ledger_path).unwrap();
@@ -2276,8 +2359,10 @@ mod tests {
let transactions = vec![success_tx, ix_error_tx, fail_tx];
bank.transfer(4, &mint_keypair, &keypair1.pubkey()).unwrap();
let start = Arc::new(Instant::now());
let working_bank = WorkingBank {
bank: bank.clone(),
start,
min_tick_height: bank.tick_height(),
max_tick_height: bank.tick_height() + 1,
};
@@ -2286,7 +2371,7 @@ mod tests {
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let blockstore = Arc::new(blockstore);
let (poh_recorder, _entry_receiver) = PohRecorder::new(
let (poh_recorder, _entry_receiver, record_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
@@ -2297,8 +2382,11 @@ mod tests {
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
let recorder = poh_recorder.recorder();
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let (poh_simulator, exit) = simulate_poh(record_receiver, &poh_recorder);
poh_recorder.lock().unwrap().set_working_bank(working_bank);
let shreds = entries_to_test_shreds(entries, bank.slot(), 0, true, 0);
@@ -2308,6 +2396,7 @@ mod tests {
let (transaction_status_sender, transaction_status_receiver) = unbounded();
let transaction_status_service = TransactionStatusService::new(
transaction_status_receiver,
Arc::new(AtomicU64::default()),
blockstore.clone(),
&Arc::new(AtomicBool::new(false)),
);
@@ -2317,7 +2406,7 @@ mod tests {
let _ = BankingStage::process_and_record_transactions(
&bank,
&transactions,
&poh_recorder,
&recorder,
0,
Some(TransactionStatusSender {
sender: transaction_status_sender,
@@ -2328,7 +2417,7 @@ mod tests {
transaction_status_service.join().unwrap();
let confirmed_block = blockstore.get_confirmed_block(bank.slot(), false).unwrap();
let confirmed_block = blockstore.get_rooted_block(bank.slot(), false).unwrap();
assert_eq!(confirmed_block.transactions.len(), 3);
for TransactionWithStatusMeta { transaction, meta } in
@@ -2350,10 +2439,14 @@ mod tests {
assert_eq!(meta, None);
}
}
exit.store(true, Ordering::Relaxed);
let _ = poh_simulator.join();
}
Blockstore::destroy(&ledger_path).unwrap();
}
#[allow(clippy::type_complexity)]
fn setup_conflicting_transactions(
ledger_path: &Path,
) -> (
@@ -2361,6 +2454,8 @@ mod tests {
Arc<Bank>,
Arc<Mutex<PohRecorder>>,
Receiver<WorkingBankEntry>,
JoinHandle<()>,
Arc<AtomicBool>,
) {
Blockstore::destroy(&ledger_path).unwrap();
let genesis_config_info = create_genesis_config(10_000);
@@ -2372,7 +2467,7 @@ mod tests {
let blockstore =
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger");
let bank = Arc::new(Bank::new(&genesis_config));
let (poh_recorder, entry_receiver) = PohRecorder::new(
let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
@@ -2394,15 +2489,25 @@ mod tests {
system_transaction::transfer(&mint_keypair, &pubkey1, 1, genesis_config.hash()),
system_transaction::transfer(&mint_keypair, &pubkey2, 1, genesis_config.hash()),
];
(transactions, bank, poh_recorder, entry_receiver)
let (poh_simulator, exit) = simulate_poh(record_receiver, &poh_recorder);
(
transactions,
bank,
poh_recorder,
entry_receiver,
poh_simulator,
exit,
)
}
#[test]
fn test_consume_buffered_packets() {
let ledger_path = get_tmp_ledger_path!();
{
let (transactions, bank, poh_recorder, _entry_receiver) =
let (transactions, bank, poh_recorder, _entry_receiver, poh_simulator, exit) =
setup_conflicting_transactions(&ledger_path);
let recorder = poh_recorder.lock().unwrap().recorder();
let num_conflicting_transactions = transactions.len();
let mut packets_vec = to_packets_chunked(&transactions, num_conflicting_transactions);
assert_eq!(packets_vec.len(), 1);
@@ -2420,14 +2525,17 @@ mod tests {
// When the working bank in poh_recorder is None, no packets should be processed
assert!(!poh_recorder.lock().unwrap().has_bank());
let max_tx_processing_ns = std::u128::MAX;
BankingStage::consume_buffered_packets(
&Pubkey::default(),
max_tx_processing_ns,
&poh_recorder,
&mut buffered_packets,
None,
&gossip_vote_sender,
None::<Box<dyn Fn()>>,
None,
&recorder,
);
assert_eq!(buffered_packets[0].1.len(), num_conflicting_transactions);
// When the poh recorder has a bank, should process all non conflicting buffered packets.
@@ -2436,12 +2544,14 @@ mod tests {
poh_recorder.lock().unwrap().set_bank(&bank);
BankingStage::consume_buffered_packets(
&Pubkey::default(),
max_tx_processing_ns,
&poh_recorder,
&mut buffered_packets,
None,
&gossip_vote_sender,
None::<Box<dyn Fn()>>,
None,
&recorder,
);
if num_expected_unprocessed == 0 {
assert!(buffered_packets.is_empty())
@@ -2449,6 +2559,8 @@ mod tests {
assert_eq!(buffered_packets[0].1.len(), num_expected_unprocessed);
}
}
exit.store(true, Ordering::Relaxed);
let _ = poh_simulator.join();
}
Blockstore::destroy(&ledger_path).unwrap();
}
@@ -2457,7 +2569,7 @@ mod tests {
fn test_consume_buffered_packets_interrupted() {
let ledger_path = get_tmp_ledger_path!();
{
let (transactions, bank, poh_recorder, _entry_receiver) =
let (transactions, bank, poh_recorder, _entry_receiver, poh_simulator, exit) =
setup_conflicting_transactions(&ledger_path);
let num_conflicting_transactions = transactions.len();
let packets_vec = to_packets_chunked(&transactions, 1);
@@ -2485,6 +2597,7 @@ mod tests {
let interrupted_iteration = 1;
poh_recorder.lock().unwrap().set_bank(&bank);
let poh_recorder_ = poh_recorder.clone();
let recorder = poh_recorder_.lock().unwrap().recorder();
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
// Start up thread to process the banks
let t_consume = Builder::new()
@@ -2492,12 +2605,14 @@ mod tests {
.spawn(move || {
BankingStage::consume_buffered_packets(
&Pubkey::default(),
std::u128::MAX,
&poh_recorder_,
&mut buffered_packets,
None,
&gossip_vote_sender,
test_fn,
None,
&recorder,
);
// Check everything is correct. All indexes after `interrupted_iteration`
@@ -2531,6 +2646,8 @@ mod tests {
}
t_consume.join().unwrap();
exit.store(true, Ordering::Relaxed);
let _ = poh_simulator.join();
}
Blockstore::destroy(&ledger_path).unwrap();
}

View File

@@ -373,7 +373,7 @@ pub fn get_broadcast_peers(
/// # Remarks
pub fn broadcast_shreds(
s: &UdpSocket,
shreds: &Arc<Vec<Shred>>,
shreds: &[Shred],
peers_and_stakes: &[(u64, usize)],
peers: &[ContactInfo],
last_datapoint_submit: &Arc<AtomicU64>,
@@ -472,12 +472,14 @@ pub mod test {
) {
let num_entries = max_ticks_per_n_shreds(num, None);
let (data_shreds, _) = make_slot_entries(slot, 0, num_entries);
let keypair = Arc::new(Keypair::new());
let shredder = Shredder::new(slot, 0, RECOMMENDED_FEC_RATE, keypair, 0, 0)
.expect("Expected to create a new shredder");
let coding_shreds = shredder
.data_shreds_to_coding_shreds(&data_shreds[0..], &mut ProcessShredsStats::default());
let keypair = Keypair::new();
let coding_shreds = Shredder::data_shreds_to_coding_shreds(
&keypair,
&data_shreds[0..],
RECOMMENDED_FEC_RATE,
&mut ProcessShredsStats::default(),
)
.unwrap();
(
data_shreds.clone(),
coding_shreds.clone(),

View File

@@ -1,6 +1,6 @@
use crate::poh_recorder::WorkingBankEntry;
use crate::result::Result;
use solana_ledger::entry::Entry;
use solana_ledger::{entry::Entry, shred::Shred};
use solana_runtime::bank::Bank;
use solana_sdk::clock::Slot;
use std::{
@@ -16,11 +16,15 @@ pub(super) struct ReceiveResults {
pub last_tick_height: u64,
}
#[derive(Copy, Clone)]
#[derive(Clone)]
pub struct UnfinishedSlotInfo {
pub next_shred_index: u32,
pub slot: Slot,
pub parent: Slot,
// Data shreds buffered to make a batch of size
// MAX_DATA_SHREDS_PER_FEC_BLOCK.
pub(crate) data_shreds_buffer: Vec<Shred>,
pub(crate) fec_set_offset: u32, // See Shredder::fec_set_index.
}
/// This parameter tunes how many entries are received in one iteration of recv loop

View File

@@ -7,12 +7,13 @@ use super::{
use crate::broadcast_stage::broadcast_utils::UnfinishedSlotInfo;
use solana_ledger::{
entry::Entry,
shred::{ProcessShredsStats, Shred, Shredder, RECOMMENDED_FEC_RATE, SHRED_TICK_REFERENCE_MASK},
shred::{
ProcessShredsStats, Shred, Shredder, MAX_DATA_SHREDS_PER_FEC_BLOCK, RECOMMENDED_FEC_RATE,
SHRED_TICK_REFERENCE_MASK,
},
};
use solana_sdk::{pubkey::Pubkey, signature::Keypair, timing::duration_as_us};
use std::collections::HashMap;
use std::sync::RwLock;
use std::time::Duration;
use std::{collections::HashMap, ops::Deref, sync::RwLock, time::Duration};
#[derive(Clone)]
pub struct StandardBroadcastRun {
@@ -40,94 +41,114 @@ impl StandardBroadcastRun {
pub(super) fn new(keypair: Arc<Keypair>, shred_version: u16) -> Self {
Self {
process_shreds_stats: ProcessShredsStats::default(),
transmit_shreds_stats: Arc::new(Mutex::new(SlotBroadcastStats::default())),
insert_shreds_stats: Arc::new(Mutex::new(SlotBroadcastStats::default())),
transmit_shreds_stats: Arc::default(),
insert_shreds_stats: Arc::default(),
unfinished_slot: None,
current_slot_and_parent: None,
slot_broadcast_start: None,
keypair,
shred_version,
last_datapoint_submit: Arc::new(AtomicU64::new(0)),
last_datapoint_submit: Arc::default(),
num_batches: 0,
broadcast_peer_cache: Arc::new(RwLock::new(BroadcastPeerCache::default())),
last_peer_update: Arc::new(AtomicU64::new(0)),
broadcast_peer_cache: Arc::default(),
last_peer_update: Arc::default(),
}
}
fn check_for_interrupted_slot(&mut self, max_ticks_in_slot: u8) -> Option<Shred> {
let (slot, _) = self.current_slot_and_parent.unwrap();
let mut last_unfinished_slot_shred = self
.unfinished_slot
.map(|last_unfinished_slot| {
if last_unfinished_slot.slot != slot {
self.report_and_reset_stats();
Some(Shred::new_from_data(
last_unfinished_slot.slot,
last_unfinished_slot.next_shred_index,
(last_unfinished_slot.slot - last_unfinished_slot.parent) as u16,
None,
true,
true,
max_ticks_in_slot & SHRED_TICK_REFERENCE_MASK,
self.shred_version,
last_unfinished_slot.next_shred_index,
))
} else {
None
}
})
.unwrap_or(None);
// This shred should only be Some if the previous slot was interrupted
if let Some(ref mut shred) = last_unfinished_slot_shred {
Shredder::sign_shred(&self.keypair, shred);
self.unfinished_slot = None;
// If the current slot has changed, generates an empty shred indicating
// last shred in the previous slot, along with coding shreds for the data
// shreds buffered.
fn finish_prev_slot(
&mut self,
max_ticks_in_slot: u8,
stats: &mut ProcessShredsStats,
) -> Vec<Shred> {
let (current_slot, _) = self.current_slot_and_parent.unwrap();
match self.unfinished_slot {
None => Vec::default(),
Some(ref state) if state.slot == current_slot => Vec::default(),
Some(ref mut state) => {
let parent_offset = state.slot - state.parent;
let reference_tick = max_ticks_in_slot & SHRED_TICK_REFERENCE_MASK;
let fec_set_index =
Shredder::fec_set_index(state.next_shred_index, state.fec_set_offset);
let mut shred = Shred::new_from_data(
state.slot,
state.next_shred_index,
parent_offset as u16,
None, // data
true, // is_last_in_fec_set
true, // is_last_in_slot
reference_tick,
self.shred_version,
fec_set_index.unwrap(),
);
Shredder::sign_shred(self.keypair.deref(), &mut shred);
state.data_shreds_buffer.push(shred.clone());
let mut shreds = make_coding_shreds(
self.keypair.deref(),
&mut self.unfinished_slot,
true, // is_last_in_slot
stats,
);
shreds.insert(0, shred);
self.report_and_reset_stats();
self.unfinished_slot = None;
shreds
}
}
}
last_unfinished_slot_shred
}
fn init_shredder(&self, blockstore: &Blockstore, reference_tick: u8) -> (Shredder, u32) {
let (slot, parent_slot) = self.current_slot_and_parent.unwrap();
let next_shred_index = self
.unfinished_slot
.map(|s| s.next_shred_index)
.unwrap_or_else(|| {
blockstore
.meta(slot)
.expect("Database error")
.map(|meta| meta.consumed)
.unwrap_or(0) as u32
});
(
Shredder::new(
slot,
parent_slot,
RECOMMENDED_FEC_RATE,
self.keypair.clone(),
reference_tick,
self.shred_version,
)
.expect("Expected to create a new shredder"),
next_shred_index,
)
}
fn entries_to_data_shreds(
&mut self,
shredder: &Shredder,
next_shred_index: u32,
entries: &[Entry],
blockstore: &Blockstore,
reference_tick: u8,
is_slot_end: bool,
process_stats: &mut ProcessShredsStats,
) -> Vec<Shred> {
let (data_shreds, new_next_shred_index) =
shredder.entries_to_data_shreds(entries, is_slot_end, next_shred_index, process_stats);
let (slot, parent_slot) = self.current_slot_and_parent.unwrap();
let (next_shred_index, fec_set_offset) = match &self.unfinished_slot {
Some(state) => (state.next_shred_index, state.fec_set_offset),
None => match blockstore.meta(slot).unwrap() {
Some(slot_meta) => {
let shreds_consumed = slot_meta.consumed as u32;
(shreds_consumed, shreds_consumed)
}
None => (0, 0),
},
};
let (data_shreds, next_shred_index) = Shredder::new(
slot,
parent_slot,
RECOMMENDED_FEC_RATE,
self.keypair.clone(),
reference_tick,
self.shred_version,
)
.unwrap()
.entries_to_data_shreds(
entries,
is_slot_end,
next_shred_index,
fec_set_offset,
process_stats,
);
let mut data_shreds_buffer = match &mut self.unfinished_slot {
Some(state) => {
assert_eq!(state.slot, slot);
std::mem::take(&mut state.data_shreds_buffer)
}
None => Vec::default(),
};
data_shreds_buffer.extend(data_shreds.clone());
self.unfinished_slot = Some(UnfinishedSlotInfo {
next_shred_index: new_next_shred_index,
slot: shredder.slot,
parent: shredder.parent_slot,
next_shred_index,
slot,
parent: parent_slot,
data_shreds_buffer,
fec_set_offset,
});
data_shreds
}
@@ -184,19 +205,16 @@ impl StandardBroadcastRun {
let mut to_shreds_time = Measure::start("broadcast_to_shreds");
// 1) Check if slot was interrupted
let last_unfinished_slot_shred =
self.check_for_interrupted_slot(bank.ticks_per_slot() as u8);
let prev_slot_shreds =
self.finish_prev_slot(bank.ticks_per_slot() as u8, &mut process_stats);
// 2) Convert entries to shreds and coding shreds
let (shredder, next_shred_index) = self.init_shredder(
blockstore,
(bank.tick_height() % bank.ticks_per_slot()) as u8,
);
let is_last_in_slot = last_tick_height == bank.max_tick_height();
let reference_tick = bank.tick_height() % bank.ticks_per_slot();
let data_shreds = self.entries_to_data_shreds(
&shredder,
next_shred_index,
&receive_results.entries,
blockstore,
reference_tick as u8,
is_last_in_slot,
&mut process_stats,
);
@@ -208,27 +226,25 @@ impl StandardBroadcastRun {
.insert_shreds(first, None, true)
.expect("Failed to insert shreds in blockstore");
}
let last_data_shred = data_shreds.len();
to_shreds_time.stop();
let mut get_leader_schedule_time = Measure::start("broadcast_get_leader_schedule");
let bank_epoch = bank.get_leader_schedule_epoch(bank.slot());
let stakes = bank.epoch_staked_nodes(bank_epoch);
let stakes = stakes.map(Arc::new);
let stakes = bank.epoch_staked_nodes(bank_epoch).map(Arc::new);
// Broadcast the last shred of the interrupted slot if necessary
if let Some(last_shred) = last_unfinished_slot_shred {
if !prev_slot_shreds.is_empty() {
let batch_info = Some(BroadcastShredBatchInfo {
slot: last_shred.slot(),
slot: prev_slot_shreds[0].slot(),
num_expected_batches: Some(old_num_batches + 1),
slot_start_ts: old_broadcast_start.expect(
"Old broadcast start time for previous slot must exist if the previous slot
was interrupted",
),
});
let last_shred = Arc::new(vec![last_shred]);
socket_sender.send(((stakes.clone(), last_shred.clone()), batch_info.clone()))?;
blockstore_sender.send((last_shred, batch_info))?;
let shreds = Arc::new(prev_slot_shreds);
socket_sender.send(((stakes.clone(), shreds.clone()), batch_info.clone()))?;
blockstore_sender.send((shreds, batch_info))?;
}
// Increment by two batches, one for the data batch, one for the coding batch.
@@ -255,11 +271,15 @@ impl StandardBroadcastRun {
// Send data shreds
let data_shreds = Arc::new(data_shreds);
socket_sender.send(((stakes.clone(), data_shreds.clone()), batch_info.clone()))?;
blockstore_sender.send((data_shreds.clone(), batch_info.clone()))?;
blockstore_sender.send((data_shreds, batch_info.clone()))?;
// Create and send coding shreds
let coding_shreds = shredder
.data_shreds_to_coding_shreds(&data_shreds[0..last_data_shred], &mut process_stats);
let coding_shreds = make_coding_shreds(
self.keypair.deref(),
&mut self.unfinished_slot,
is_last_in_slot,
&mut process_stats,
);
let coding_shreds = Arc::new(coding_shreds);
socket_sender.send(((stakes, coding_shreds.clone()), batch_info.clone()))?;
blockstore_sender.send((coding_shreds, batch_info))?;
@@ -378,15 +398,15 @@ impl StandardBroadcastRun {
fn report_and_reset_stats(&mut self) {
let stats = &self.process_shreds_stats;
assert!(self.unfinished_slot.is_some());
let unfinished_slot = self.unfinished_slot.as_ref().unwrap();
datapoint_info!(
"broadcast-process-shreds-stats",
("slot", self.unfinished_slot.unwrap().slot as i64, i64),
("slot", unfinished_slot.slot as i64, i64),
("shredding_time", stats.shredding_elapsed, i64),
("receive_time", stats.receive_elapsed, i64),
(
"num_data_shreds",
i64::from(self.unfinished_slot.unwrap().next_shred_index),
unfinished_slot.next_shred_index as i64,
i64
),
(
@@ -409,6 +429,33 @@ impl StandardBroadcastRun {
}
}
// Consumes data_shreds_buffer returning corresponding coding shreds.
fn make_coding_shreds(
keypair: &Keypair,
unfinished_slot: &mut Option<UnfinishedSlotInfo>,
is_slot_end: bool,
stats: &mut ProcessShredsStats,
) -> Vec<Shred> {
let data_shreds = match unfinished_slot {
None => Vec::default(),
Some(unfinished_slot) => {
let size = unfinished_slot.data_shreds_buffer.len();
// Consume a multiple of 32, unless this is the slot end.
let offset = if is_slot_end {
0
} else {
size % MAX_DATA_SHREDS_PER_FEC_BLOCK as usize
};
unfinished_slot
.data_shreds_buffer
.drain(0..size - offset)
.collect()
}
};
Shredder::data_shreds_to_coding_shreds(keypair, &data_shreds, RECOMMENDED_FEC_RATE, stats)
.unwrap()
}
impl BroadcastRun for StandardBroadcastRun {
fn run(
&mut self,
@@ -418,6 +465,8 @@ impl BroadcastRun for StandardBroadcastRun {
blockstore_sender: &Sender<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>,
) -> Result<()> {
let receive_results = broadcast_utils::recv_slot_entries(receiver)?;
// TODO: Confirm that last chunk of coding shreds
// will not be lost or delayed for too long.
self.process_receive_results(
blockstore,
socket_sender,
@@ -508,6 +557,8 @@ mod test {
next_shred_index,
slot,
parent,
data_shreds_buffer: Vec::default(),
fec_set_offset: next_shred_index,
});
run.slot_broadcast_start = Some(Instant::now());
@@ -515,8 +566,9 @@ mod test {
run.current_slot_and_parent = Some((4, 2));
// Slot 2 interrupted slot 1
let shred = run
.check_for_interrupted_slot(0)
let shreds = run.finish_prev_slot(0, &mut ProcessShredsStats::default());
let shred = shreds
.get(0)
.expect("Expected a shred that signals an interrupt");
// Validate the shred
@@ -642,6 +694,50 @@ mod test {
);
}
#[test]
fn test_buffer_data_shreds() {
let num_shreds_per_slot = 2;
let (blockstore, genesis_config, _cluster_info, bank, leader_keypair, _socket) =
setup(num_shreds_per_slot);
let (bsend, brecv) = channel();
let (ssend, _srecv) = channel();
let mut last_tick_height = 0;
let mut standard_broadcast_run = StandardBroadcastRun::new(leader_keypair, 0);
let mut process_ticks = |num_ticks| {
let ticks = create_ticks(num_ticks, 0, genesis_config.hash());
last_tick_height += (ticks.len() - 1) as u64;
let receive_results = ReceiveResults {
entries: ticks,
time_elapsed: Duration::new(1, 0),
bank: bank.clone(),
last_tick_height,
};
standard_broadcast_run
.process_receive_results(&blockstore, &ssend, &bsend, receive_results)
.unwrap();
};
for i in 0..3 {
process_ticks((i + 1) * 100);
}
let mut shreds = Vec::<Shred>::new();
while let Ok((recv_shreds, _)) = brecv.recv_timeout(Duration::from_secs(1)) {
shreds.extend(recv_shreds.deref().clone());
}
assert!(shreds.len() < 32, "shreds.len(): {}", shreds.len());
assert!(shreds.iter().all(|shred| shred.is_data()));
process_ticks(75);
while let Ok((recv_shreds, _)) = brecv.recv_timeout(Duration::from_secs(1)) {
shreds.extend(recv_shreds.deref().clone());
}
assert!(shreds.len() > 64, "shreds.len(): {}", shreds.len());
let num_coding_shreds = shreds.iter().filter(|shred| shred.is_code()).count();
assert_eq!(
num_coding_shreds, 32,
"num coding shreds: {}",
num_coding_shreds
);
}
#[test]
fn test_slot_finish() {
// Setup

View File

@@ -115,7 +115,7 @@ pub const DEFAULT_CONTACT_SAVE_INTERVAL_MILLIS: u64 = 60_000;
/// Minimum serialized size of a Protocol::PullResponse packet.
const PULL_RESPONSE_MIN_SERIALIZED_SIZE: usize = 161;
// Limit number of unique pubkeys in the crds table.
const CRDS_UNIQUE_PUBKEY_CAPACITY: usize = 4096;
pub(crate) const CRDS_UNIQUE_PUBKEY_CAPACITY: usize = 4096;
/// Minimum stake that a node should have so that its CRDS values are
/// propagated through gossip (few types are exempted).
const MIN_STAKE_FOR_GOSSIP: u64 = solana_sdk::native_token::LAMPORTS_PER_SOL;
@@ -252,7 +252,6 @@ struct GossipStats {
get_accounts_hash: Counter,
all_tvu_peers: Counter,
tvu_peers: Counter,
retransmit_peers: Counter,
repair_peers: Counter,
new_push_requests: Counter,
new_push_requests2: Counter,
@@ -1383,21 +1382,6 @@ impl ClusterInfo {
.collect()
}
/// all peers that have a valid tvu
pub fn retransmit_peers(&self) -> Vec<ContactInfo> {
self.time_gossip_read_lock("retransmit_peers", &self.stats.retransmit_peers)
.crds
.get_nodes_contact_info()
.filter(|x| {
x.id != self.id()
&& x.shred_version == self.my_shred_version()
&& ContactInfo::is_valid_address(&x.tvu)
&& ContactInfo::is_valid_address(&x.tvu_forwards)
})
.cloned()
.collect()
}
/// all tvu peers with valid gossip addrs that likely have the slot being requested
pub fn repair_peers(&self, slot: Slot) -> Vec<ContactInfo> {
let mut time = Measure::start("repair_peers");
@@ -1461,9 +1445,9 @@ impl ClusterInfo {
stakes_and_index: &[(u64, usize)],
seed: [u8; 32],
) -> Vec<(u64, usize)> {
let stake_weights = stakes_and_index.iter().map(|(w, _)| *w).collect();
let stake_weights: Vec<_> = stakes_and_index.iter().map(|(w, _)| *w).collect();
let shuffle = weighted_shuffle(stake_weights, seed);
let shuffle = weighted_shuffle(&stake_weights, seed);
shuffle.iter().map(|x| stakes_and_index[*x]).collect()
}
@@ -1473,7 +1457,7 @@ impl ClusterInfo {
&self,
stakes: Option<&HashMap<Pubkey, u64>>,
) -> (Vec<ContactInfo>, Vec<(u64, usize)>) {
let mut peers = self.retransmit_peers();
let mut peers = self.tvu_peers();
// insert "self" into this list for the layer and neighborhood computation
peers.push(self.my_contact_info());
let stakes_and_index = ClusterInfo::sorted_stakes_with_index(&peers, stakes);
@@ -1520,20 +1504,22 @@ impl ClusterInfo {
pub fn retransmit_to(
peers: &[&ContactInfo],
packet: &mut Packet,
slot_leader_pubkey: Option<Pubkey>,
s: &UdpSocket,
forwarded: bool,
) -> Result<()> {
trace!("retransmit orders {}", peers.len());
let dests: Vec<_> = peers
.iter()
.filter(|v| v.id != slot_leader_pubkey.unwrap_or_default())
.map(|v| if forwarded { &v.tvu_forwards } else { &v.tvu })
.collect();
let dests: Vec<_> = if forwarded {
peers
.iter()
.map(|peer| &peer.tvu_forwards)
.filter(|addr| ContactInfo::is_valid_address(addr))
.collect()
} else {
peers.iter().map(|peer| &peer.tvu).collect()
};
let mut sent = 0;
while sent < dests.len() {
match multicast(s, &mut packet.data[..packet.meta.size], &dests[sent..]) {
match multicast(s, &packet.data[..packet.meta.size], &dests[sent..]) {
Ok(n) => sent += n,
Err(e) => {
inc_new_counter_error!(
@@ -2902,7 +2888,6 @@ impl ClusterInfo {
self.stats.gossip_packets_dropped_count.clear(),
i64
),
("retransmit_peers", self.stats.retransmit_peers.clear(), i64),
("repair_peers", self.stats.repair_peers.clear(), i64),
(
"new_push_requests",
@@ -3481,6 +3466,7 @@ mod tests {
};
use itertools::izip;
use rand::seq::SliceRandom;
use serial_test::serial;
use solana_ledger::shred::Shredder;
use solana_sdk::signature::{Keypair, Signer};
use solana_vote_program::{vote_instruction, vote_state::Vote};
@@ -4757,4 +4743,54 @@ mod tests {
}
}
}
#[test]
#[serial]
fn test_pull_request_time_pruning() {
let node = Node::new_localhost();
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(node.info));
let entrypoint_pubkey = solana_sdk::pubkey::new_rand();
let entrypoint = ContactInfo::new_localhost(&entrypoint_pubkey, timestamp());
cluster_info.set_entrypoint(entrypoint);
let mut rng = rand::thread_rng();
let shred_version = cluster_info.my_shred_version();
let mut peers: Vec<Pubkey> = vec![];
const NO_ENTRIES: usize = CRDS_UNIQUE_PUBKEY_CAPACITY + 128;
let data: Vec<_> = repeat_with(|| {
let keypair = Keypair::new();
peers.push(keypair.pubkey());
let mut rand_ci = ContactInfo::new_rand(&mut rng, Some(keypair.pubkey()));
rand_ci.shred_version = shred_version;
rand_ci.wallclock = timestamp();
CrdsValue::new_signed(CrdsData::ContactInfo(rand_ci), &keypair)
})
.take(NO_ENTRIES)
.collect();
let timeouts = cluster_info.gossip.read().unwrap().make_timeouts_test();
assert_eq!(
(0, 0, NO_ENTRIES),
cluster_info.handle_pull_response(&entrypoint_pubkey, data, &timeouts)
);
let now = timestamp();
for peer in peers {
cluster_info
.gossip
.write()
.unwrap()
.mark_pull_request_creation_time(&peer, now);
}
assert_eq!(
cluster_info
.gossip
.read()
.unwrap()
.pull
.pull_request_time
.len(),
CRDS_UNIQUE_PUBKEY_CAPACITY
);
}
}

View File

@@ -4,6 +4,7 @@ use crate::{
optimistic_confirmation_verifier::OptimisticConfirmationVerifier,
optimistically_confirmed_bank_tracker::{BankNotification, BankNotificationSender},
poh_recorder::PohRecorder,
replay_stage::DUPLICATE_THRESHOLD,
result::{Error, Result},
rpc_subscriptions::RpcSubscriptions,
sigverify,
@@ -21,6 +22,7 @@ use solana_perf::packet::{self, Packets};
use solana_runtime::{
bank::Bank,
bank_forks::BankForks,
commitment::VOTE_THRESHOLD_SIZE,
epoch_stakes::{EpochAuthorizedVoters, EpochStakes},
stakes::Stakes,
vote_sender_types::{ReplayVoteReceiver, ReplayedVote},
@@ -44,12 +46,17 @@ use std::{
};
// Map from a vote account to the authorized voter for an epoch
pub type ThresholdConfirmedSlots = Vec<(Slot, Hash)>;
pub type VerifiedLabelVotePacketsSender = CrossbeamSender<Vec<(CrdsValueLabel, Slot, Packets)>>;
pub type VerifiedLabelVotePacketsReceiver = CrossbeamReceiver<Vec<(CrdsValueLabel, Slot, Packets)>>;
pub type VerifiedVoteTransactionsSender = CrossbeamSender<Vec<Transaction>>;
pub type VerifiedVoteTransactionsReceiver = CrossbeamReceiver<Vec<Transaction>>;
pub type VerifiedVoteSender = CrossbeamSender<(Pubkey, Vec<Slot>)>;
pub type VerifiedVoteReceiver = CrossbeamReceiver<(Pubkey, Vec<Slot>)>;
pub type GossipDuplicateConfirmedSlotsSender = CrossbeamSender<ThresholdConfirmedSlots>;
pub type GossipDuplicateConfirmedSlotsReceiver = CrossbeamReceiver<ThresholdConfirmedSlots>;
const THRESHOLDS_TO_CHECK: [f64; 2] = [DUPLICATE_THRESHOLD, VOTE_THRESHOLD_SIZE];
#[derive(Default)]
pub struct SlotVoteTracker {
@@ -245,6 +252,7 @@ impl ClusterInfoVoteListener {
replay_votes_receiver: ReplayVoteReceiver,
blockstore: Arc<Blockstore>,
bank_notification_sender: Option<BankNotificationSender>,
cluster_confirmed_slot_sender: GossipDuplicateConfirmedSlotsSender,
) -> Self {
let exit_ = exit.clone();
@@ -291,6 +299,7 @@ impl ClusterInfoVoteListener {
replay_votes_receiver,
blockstore,
bank_notification_sender,
cluster_confirmed_slot_sender,
);
})
.unwrap();
@@ -406,6 +415,7 @@ impl ClusterInfoVoteListener {
}
}
#[allow(clippy::too_many_arguments)]
fn process_votes_loop(
exit: Arc<AtomicBool>,
gossip_vote_txs_receiver: VerifiedVoteTransactionsReceiver,
@@ -416,10 +426,12 @@ impl ClusterInfoVoteListener {
replay_votes_receiver: ReplayVoteReceiver,
blockstore: Arc<Blockstore>,
bank_notification_sender: Option<BankNotificationSender>,
cluster_confirmed_slot_sender: GossipDuplicateConfirmedSlotsSender,
) -> Result<()> {
let mut confirmation_verifier =
OptimisticConfirmationVerifier::new(bank_forks.read().unwrap().root());
let mut last_process_root = Instant::now();
let cluster_confirmed_slot_sender = Some(cluster_confirmed_slot_sender);
loop {
if exit.load(Ordering::Relaxed) {
return Ok(());
@@ -448,10 +460,12 @@ impl ClusterInfoVoteListener {
&verified_vote_sender,
&replay_votes_receiver,
&bank_notification_sender,
&cluster_confirmed_slot_sender,
);
match confirmed_slots {
Ok(confirmed_slots) => {
confirmation_verifier.add_new_optimistic_confirmed_slots(confirmed_slots);
confirmation_verifier
.add_new_optimistic_confirmed_slots(confirmed_slots.clone());
}
Err(e) => match e {
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Timeout)
@@ -472,7 +486,7 @@ impl ClusterInfoVoteListener {
subscriptions: &RpcSubscriptions,
verified_vote_sender: &VerifiedVoteSender,
replay_votes_receiver: &ReplayVoteReceiver,
) -> Result<Vec<(Slot, Hash)>> {
) -> Result<ThresholdConfirmedSlots> {
Self::listen_and_confirm_votes(
gossip_vote_txs_receiver,
vote_tracker,
@@ -481,6 +495,7 @@ impl ClusterInfoVoteListener {
verified_vote_sender,
replay_votes_receiver,
&None,
&None,
)
}
@@ -492,7 +507,8 @@ impl ClusterInfoVoteListener {
verified_vote_sender: &VerifiedVoteSender,
replay_votes_receiver: &ReplayVoteReceiver,
bank_notification_sender: &Option<BankNotificationSender>,
) -> Result<Vec<(Slot, Hash)>> {
cluster_confirmed_slot_sender: &Option<GossipDuplicateConfirmedSlotsSender>,
) -> Result<ThresholdConfirmedSlots> {
let mut sel = Select::new();
sel.recv(gossip_vote_txs_receiver);
sel.recv(replay_votes_receiver);
@@ -521,6 +537,7 @@ impl ClusterInfoVoteListener {
subscriptions,
verified_vote_sender,
bank_notification_sender,
cluster_confirmed_slot_sender,
));
} else {
remaining_wait_time = remaining_wait_time
@@ -539,9 +556,10 @@ impl ClusterInfoVoteListener {
subscriptions: &RpcSubscriptions,
verified_vote_sender: &VerifiedVoteSender,
diff: &mut HashMap<Slot, HashMap<Pubkey, bool>>,
new_optimistic_confirmed_slots: &mut Vec<(Slot, Hash)>,
new_optimistic_confirmed_slots: &mut ThresholdConfirmedSlots,
is_gossip_vote: bool,
bank_notification_sender: &Option<BankNotificationSender>,
cluster_confirmed_slot_sender: &Option<GossipDuplicateConfirmedSlotsSender>,
) {
if vote.slots.is_empty() {
return;
@@ -577,7 +595,7 @@ impl ClusterInfoVoteListener {
// Fast track processing of the last slot in a vote transactions
// so that notifications for optimistic confirmation can be sent
// as soon as possible.
let (is_confirmed, is_new) = Self::track_optimistic_confirmation_vote(
let (reached_threshold_results, is_new) = Self::track_optimistic_confirmation_vote(
vote_tracker,
last_vote_slot,
last_vote_hash,
@@ -586,7 +604,12 @@ impl ClusterInfoVoteListener {
total_stake,
);
if is_confirmed {
if reached_threshold_results[0] {
if let Some(sender) = cluster_confirmed_slot_sender {
let _ = sender.send(vec![(last_vote_slot, last_vote_hash)]);
}
}
if reached_threshold_results[1] {
new_optimistic_confirmed_slots.push((last_vote_slot, last_vote_hash));
// Notify subscribers about new optimistic confirmation
if let Some(sender) = bank_notification_sender {
@@ -670,7 +693,8 @@ impl ClusterInfoVoteListener {
subscriptions: &RpcSubscriptions,
verified_vote_sender: &VerifiedVoteSender,
bank_notification_sender: &Option<BankNotificationSender>,
) -> Vec<(Slot, Hash)> {
cluster_confirmed_slot_sender: &Option<GossipDuplicateConfirmedSlotsSender>,
) -> ThresholdConfirmedSlots {
let mut diff: HashMap<Slot, HashMap<Pubkey, bool>> = HashMap::new();
let mut new_optimistic_confirmed_slots = vec![];
@@ -697,6 +721,7 @@ impl ClusterInfoVoteListener {
&mut new_optimistic_confirmed_slots,
is_gossip,
bank_notification_sender,
cluster_confirmed_slot_sender,
);
}
@@ -756,14 +781,14 @@ impl ClusterInfoVoteListener {
pubkey: Pubkey,
stake: u64,
total_epoch_stake: u64,
) -> (bool, bool) {
) -> (Vec<bool>, bool) {
let slot_tracker = vote_tracker.get_or_insert_slot_tracker(slot);
// Insert vote and check for optimistic confirmation
let mut w_slot_tracker = slot_tracker.write().unwrap();
w_slot_tracker
.get_or_insert_optimistic_votes_tracker(hash)
.add_vote_pubkey(pubkey, stake, total_epoch_stake)
.add_vote_pubkey(pubkey, stake, total_epoch_stake, &THRESHOLDS_TO_CHECK)
}
fn sum_stake(sum: &mut u64, epoch_stakes: Option<&EpochStakes>, pubkey: &Pubkey) {
@@ -1005,6 +1030,7 @@ mod tests {
&verified_vote_sender,
&replay_votes_receiver,
&None,
&None,
)
.unwrap();
@@ -1034,6 +1060,7 @@ mod tests {
&verified_vote_sender,
&replay_votes_receiver,
&None,
&None,
)
.unwrap();
@@ -1112,6 +1139,7 @@ mod tests {
&verified_vote_sender,
&replay_votes_receiver,
&None,
&None,
)
.unwrap();
@@ -1231,6 +1259,7 @@ mod tests {
&verified_vote_sender,
&replay_votes_receiver,
&None,
&None,
)
.unwrap();
@@ -1326,6 +1355,7 @@ mod tests {
&verified_vote_sender,
&replay_votes_receiver,
&None,
&None,
);
}
let slot_vote_tracker = vote_tracker.get_slot_vote_tracker(vote_slot).unwrap();
@@ -1470,6 +1500,7 @@ mod tests {
&subscriptions,
&verified_vote_sender,
&None,
&None,
);
// Setup next epoch
@@ -1524,6 +1555,7 @@ mod tests {
&subscriptions,
&verified_vote_sender,
&None,
&None,
);
}

View File

@@ -0,0 +1,751 @@
use crate::{
fork_choice::ForkChoice, heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice,
progress_map::ProgressMap,
};
use solana_sdk::{clock::Slot, hash::Hash};
use std::collections::{BTreeMap, HashMap, HashSet};
pub type GossipDuplicateConfirmedSlots = BTreeMap<Slot, Hash>;
type SlotStateHandler = fn(Slot, &Hash, Option<&Hash>, bool, bool) -> Vec<ResultingStateChange>;
#[derive(PartialEq, Debug)]
pub enum SlotStateUpdate {
Frozen,
DuplicateConfirmed,
Dead,
Duplicate,
}
#[derive(PartialEq, Debug)]
pub enum ResultingStateChange {
MarkSlotDuplicate,
RepairDuplicateConfirmedVersion(Hash),
DuplicateConfirmedSlotMatchesCluster,
}
impl SlotStateUpdate {
fn to_handler(&self) -> SlotStateHandler {
match self {
SlotStateUpdate::Dead => on_dead_slot,
SlotStateUpdate::Frozen => on_frozen_slot,
SlotStateUpdate::DuplicateConfirmed => on_cluster_update,
SlotStateUpdate::Duplicate => on_cluster_update,
}
}
}
fn repair_correct_version(_slot: Slot, _hash: &Hash) {}
fn on_dead_slot(
slot: Slot,
bank_frozen_hash: &Hash,
cluster_duplicate_confirmed_hash: Option<&Hash>,
_is_slot_duplicate: bool,
is_dead: bool,
) -> Vec<ResultingStateChange> {
assert!(is_dead);
// Bank should not have been frozen if the slot was marked dead
assert_eq!(*bank_frozen_hash, Hash::default());
if let Some(cluster_duplicate_confirmed_hash) = cluster_duplicate_confirmed_hash {
// If the cluster duplicate_confirmed some version of this slot, then
// there's another version
warn!(
"Cluster duplicate_confirmed slot {} with hash {}, but we marked slot dead",
slot, cluster_duplicate_confirmed_hash
);
// No need to check `is_slot_duplicate` and modify fork choice as dead slots
// are never frozen, and thus never added to fork choice. The state change for
// `MarkSlotDuplicate` will try to modify fork choice, but won't find the slot
// in the fork choice tree, so is equivalent to a
return vec![
ResultingStateChange::MarkSlotDuplicate,
ResultingStateChange::RepairDuplicateConfirmedVersion(
*cluster_duplicate_confirmed_hash,
),
];
}
vec![]
}
fn on_frozen_slot(
slot: Slot,
bank_frozen_hash: &Hash,
cluster_duplicate_confirmed_hash: Option<&Hash>,
is_slot_duplicate: bool,
is_dead: bool,
) -> Vec<ResultingStateChange> {
// If a slot is marked frozen, the bank hash should not be default,
// and the slot should not be dead
assert!(*bank_frozen_hash != Hash::default());
assert!(!is_dead);
if let Some(cluster_duplicate_confirmed_hash) = cluster_duplicate_confirmed_hash {
// If the cluster duplicate_confirmed some version of this slot, then
// confirm our version agrees with the cluster,
if cluster_duplicate_confirmed_hash != bank_frozen_hash {
// If the versions do not match, modify fork choice rule
// to exclude our version from being voted on and also
// repair correct version
warn!(
"Cluster duplicate_confirmed slot {} with hash {}, but we froze slot with hash {}",
slot, cluster_duplicate_confirmed_hash, bank_frozen_hash
);
return vec![
ResultingStateChange::MarkSlotDuplicate,
ResultingStateChange::RepairDuplicateConfirmedVersion(
*cluster_duplicate_confirmed_hash,
),
];
} else {
// If the versions match, then add the slot to the candidate
// set to account for the case where it was removed earlier
// by the `on_duplicate_slot()` handler
return vec![ResultingStateChange::DuplicateConfirmedSlotMatchesCluster];
}
}
if is_slot_duplicate {
// If we detected a duplicate, but have not yet seen any version
// of the slot duplicate_confirmed (i.e. block above did not execute), then
// remove the slot from fork choice until we get confirmation.
// If we get here, we either detected duplicate from
// 1) WindowService
// 2) A gossip duplicate_confirmed version that didn't match our frozen
// version.
// In both cases, mark the progress map for this slot as duplicate
return vec![ResultingStateChange::MarkSlotDuplicate];
}
vec![]
}
// Called when we receive either:
// 1) A duplicate slot signal from WindowStage,
// 2) Confirmation of a slot by observing votes from replay or gossip.
//
// This signals external information about this slot, which affects
// this validator's understanding of the validity of this slot
fn on_cluster_update(
slot: Slot,
bank_frozen_hash: &Hash,
cluster_duplicate_confirmed_hash: Option<&Hash>,
is_slot_duplicate: bool,
is_dead: bool,
) -> Vec<ResultingStateChange> {
if is_dead {
on_dead_slot(
slot,
bank_frozen_hash,
cluster_duplicate_confirmed_hash,
is_slot_duplicate,
is_dead,
)
} else if *bank_frozen_hash != Hash::default() {
// This case is mutually exclusive with is_dead case above because if a slot is dead,
// it cannot have been frozen, and thus cannot have a non-default bank hash.
on_frozen_slot(
slot,
bank_frozen_hash,
cluster_duplicate_confirmed_hash,
is_slot_duplicate,
is_dead,
)
} else {
vec![]
}
}
fn get_cluster_duplicate_confirmed_hash<'a>(
slot: Slot,
gossip_duplicate_confirmed_hash: Option<&'a Hash>,
local_frozen_hash: &'a Hash,
is_local_replay_duplicate_confirmed: bool,
) -> Option<&'a Hash> {
let local_duplicate_confirmed_hash = if is_local_replay_duplicate_confirmed {
// If local replay has duplicate_confirmed this slot, this slot must have
// descendants with votes for this slot, hence this slot must be
// frozen.
assert!(*local_frozen_hash != Hash::default());
Some(local_frozen_hash)
} else {
None
};
match (
local_duplicate_confirmed_hash,
gossip_duplicate_confirmed_hash,
) {
(Some(local_duplicate_confirmed_hash), Some(gossip_duplicate_confirmed_hash)) => {
if local_duplicate_confirmed_hash != gossip_duplicate_confirmed_hash {
error!(
"For slot {}, the gossip duplicate confirmed hash {}, is not equal
to the confirmed hash we replayed: {}",
slot, gossip_duplicate_confirmed_hash, local_duplicate_confirmed_hash
);
}
Some(&local_frozen_hash)
}
(Some(local_frozen_hash), None) => Some(local_frozen_hash),
_ => gossip_duplicate_confirmed_hash,
}
}
fn apply_state_changes(
slot: Slot,
progress: &mut ProgressMap,
fork_choice: &mut HeaviestSubtreeForkChoice,
ancestors: &HashMap<Slot, HashSet<Slot>>,
descendants: &HashMap<Slot, HashSet<Slot>>,
state_changes: Vec<ResultingStateChange>,
) {
for state_change in state_changes {
match state_change {
ResultingStateChange::MarkSlotDuplicate => {
progress.set_unconfirmed_duplicate_slot(
slot,
descendants.get(&slot).unwrap_or(&HashSet::default()),
);
fork_choice.mark_fork_invalid_candidate(slot);
}
ResultingStateChange::RepairDuplicateConfirmedVersion(
cluster_duplicate_confirmed_hash,
) => {
// TODO: Should consider moving the updating of the duplicate slots in the
// progress map from ReplayStage::confirm_forks to here.
repair_correct_version(slot, &cluster_duplicate_confirmed_hash);
}
ResultingStateChange::DuplicateConfirmedSlotMatchesCluster => {
progress.set_confirmed_duplicate_slot(
slot,
ancestors.get(&slot).unwrap_or(&HashSet::default()),
descendants.get(&slot).unwrap_or(&HashSet::default()),
);
fork_choice.mark_fork_valid_candidate(slot);
}
}
}
}
pub(crate) fn check_slot_agrees_with_cluster(
slot: Slot,
root: Slot,
frozen_hash: Option<Hash>,
gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots,
ancestors: &HashMap<Slot, HashSet<Slot>>,
descendants: &HashMap<Slot, HashSet<Slot>>,
progress: &mut ProgressMap,
fork_choice: &mut HeaviestSubtreeForkChoice,
slot_state_update: SlotStateUpdate,
) {
if slot <= root {
return;
}
if frozen_hash.is_none() {
// If the bank doesn't even exist in BankForks yet,
// then there's nothing to do as replay of the slot
// hasn't even started
return;
}
let frozen_hash = frozen_hash.unwrap();
let gossip_duplicate_confirmed_hash = gossip_duplicate_confirmed_slots.get(&slot);
let is_local_replay_duplicate_confirmed = progress.is_duplicate_confirmed(slot).expect("If the frozen hash exists, then the slot must exist in bank forks and thus in progress map");
let cluster_duplicate_confirmed_hash = get_cluster_duplicate_confirmed_hash(
slot,
gossip_duplicate_confirmed_hash,
&frozen_hash,
is_local_replay_duplicate_confirmed,
);
let mut is_slot_duplicate =
progress.is_unconfirmed_duplicate(slot).expect("If the frozen hash exists, then the slot must exist in bank forks and thus in progress map");
if matches!(slot_state_update, SlotStateUpdate::Duplicate) {
if is_slot_duplicate {
// Already processed duplicate signal for this slot, no need to continue
return;
} else {
// Otherwise, mark the slot as duplicate so the appropriate state changes
// will trigger
is_slot_duplicate = true;
}
}
let is_dead = progress.is_dead(slot).expect("If the frozen hash exists, then the slot must exist in bank forks and thus in progress map");
let state_handler = slot_state_update.to_handler();
let state_changes = state_handler(
slot,
&frozen_hash,
cluster_duplicate_confirmed_hash,
is_slot_duplicate,
is_dead,
);
apply_state_changes(
slot,
progress,
fork_choice,
ancestors,
descendants,
state_changes,
);
}
#[cfg(test)]
mod test {
use super::*;
use crate::consensus::test::VoteSimulator;
use solana_runtime::bank_forks::BankForks;
use std::sync::RwLock;
use trees::tr;
struct InitialState {
heaviest_subtree_fork_choice: HeaviestSubtreeForkChoice,
progress: ProgressMap,
ancestors: HashMap<Slot, HashSet<Slot>>,
descendants: HashMap<Slot, HashSet<Slot>>,
slot: Slot,
bank_forks: RwLock<BankForks>,
}
fn setup() -> InitialState {
// Create simple fork 0 -> 1 -> 2 -> 3
let forks = tr(0) / (tr(1) / (tr(2) / tr(3)));
let mut vote_simulator = VoteSimulator::new(1);
vote_simulator.fill_bank_forks(forks, &HashMap::new());
let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
let descendants = vote_simulator
.bank_forks
.read()
.unwrap()
.descendants()
.clone();
InitialState {
heaviest_subtree_fork_choice: vote_simulator.heaviest_subtree_fork_choice,
progress: vote_simulator.progress,
ancestors,
descendants,
slot: 0,
bank_forks: vote_simulator.bank_forks,
}
}
#[test]
fn test_frozen_duplicate() {
// Common state
let slot = 0;
let cluster_duplicate_confirmed_hash = None;
let is_dead = false;
// Slot is not detected as duplicate yet
let mut is_slot_duplicate = false;
// Simulate freezing the bank, add a
// new non-default hash, should return
// no actionable state changes yet
let bank_frozen_hash = Hash::new_unique();
assert!(on_frozen_slot(
slot,
&bank_frozen_hash,
cluster_duplicate_confirmed_hash,
is_slot_duplicate,
is_dead
)
.is_empty());
// Now mark the slot as duplicate, should
// trigger marking the slot as a duplicate
is_slot_duplicate = true;
assert_eq!(
on_cluster_update(
slot,
&bank_frozen_hash,
cluster_duplicate_confirmed_hash,
is_slot_duplicate,
is_dead
),
vec![ResultingStateChange::MarkSlotDuplicate]
);
}
#[test]
fn test_frozen_duplicate_confirmed() {
// Common state
let slot = 0;
let is_slot_duplicate = false;
let is_dead = false;
// No cluster duplicate_confirmed hash yet
let mut cluster_duplicate_confirmed_hash = None;
// Simulate freezing the bank, add a
// new non-default hash, should return
// no actionable state changes
let bank_frozen_hash = Hash::new_unique();
assert!(on_frozen_slot(
slot,
&bank_frozen_hash,
cluster_duplicate_confirmed_hash,
is_slot_duplicate,
is_dead
)
.is_empty());
// Now mark the same frozen slot hash as duplicate_confirmed by the cluster,
// should just confirm the slot
cluster_duplicate_confirmed_hash = Some(&bank_frozen_hash);
assert_eq!(
on_cluster_update(
slot,
&bank_frozen_hash,
cluster_duplicate_confirmed_hash,
is_slot_duplicate,
is_dead
),
vec![ResultingStateChange::DuplicateConfirmedSlotMatchesCluster,]
);
// If the cluster_duplicate_confirmed_hash does not match, then we
// should trigger marking the slot as a duplicate, and also
// try to repair correct version
let mismatched_hash = Hash::new_unique();
cluster_duplicate_confirmed_hash = Some(&mismatched_hash);
assert_eq!(
on_cluster_update(
slot,
&bank_frozen_hash,
cluster_duplicate_confirmed_hash,
is_slot_duplicate,
is_dead
),
vec![
ResultingStateChange::MarkSlotDuplicate,
ResultingStateChange::RepairDuplicateConfirmedVersion(mismatched_hash),
]
);
}
#[test]
fn test_duplicate_frozen_duplicate_confirmed() {
// Common state
let slot = 0;
let is_dead = false;
let is_slot_duplicate = true;
// Bank is not frozen yet
let mut cluster_duplicate_confirmed_hash = None;
let mut bank_frozen_hash = Hash::default();
// Mark the slot as duplicate. Because our version of the slot is not
// frozen yet, we don't know which version we have, so no action is
// taken.
assert!(on_cluster_update(
slot,
&bank_frozen_hash,
cluster_duplicate_confirmed_hash,
is_slot_duplicate,
is_dead
)
.is_empty());
// Freeze the bank, should now mark the slot as duplicate since we have
// not seen confirmation yet.
bank_frozen_hash = Hash::new_unique();
assert_eq!(
on_cluster_update(
slot,
&bank_frozen_hash,
cluster_duplicate_confirmed_hash,
is_slot_duplicate,
is_dead
),
vec![ResultingStateChange::MarkSlotDuplicate,]
);
// If the cluster_duplicate_confirmed_hash matches, we just confirm
// the slot
cluster_duplicate_confirmed_hash = Some(&bank_frozen_hash);
assert_eq!(
on_cluster_update(
slot,
&bank_frozen_hash,
cluster_duplicate_confirmed_hash,
is_slot_duplicate,
is_dead
),
vec![ResultingStateChange::DuplicateConfirmedSlotMatchesCluster,]
);
// If the cluster_duplicate_confirmed_hash does not match, then we
// should trigger marking the slot as a duplicate, and also
// try to repair correct version
let mismatched_hash = Hash::new_unique();
cluster_duplicate_confirmed_hash = Some(&mismatched_hash);
assert_eq!(
on_cluster_update(
slot,
&bank_frozen_hash,
cluster_duplicate_confirmed_hash,
is_slot_duplicate,
is_dead
),
vec![
ResultingStateChange::MarkSlotDuplicate,
ResultingStateChange::RepairDuplicateConfirmedVersion(mismatched_hash),
]
);
}
#[test]
fn test_duplicate_duplicate_confirmed() {
let slot = 0;
let correct_hash = Hash::new_unique();
let cluster_duplicate_confirmed_hash = Some(&correct_hash);
let is_dead = false;
// Bank is not frozen yet
let bank_frozen_hash = Hash::default();
// Because our version of the slot is not frozen yet, then even though
// the cluster has duplicate_confirmed a hash, we don't know which version we
// have, so no action is taken.
let is_slot_duplicate = true;
assert!(on_cluster_update(
slot,
&bank_frozen_hash,
cluster_duplicate_confirmed_hash,
is_slot_duplicate,
is_dead
)
.is_empty());
}
#[test]
fn test_duplicate_dead() {
let slot = 0;
let cluster_duplicate_confirmed_hash = None;
let is_dead = true;
// Bank is not frozen yet
let bank_frozen_hash = Hash::default();
// Even though our version of the slot is dead, the cluster has not
// duplicate_confirmed a hash, we don't know which version we have, so no action
// is taken.
let is_slot_duplicate = true;
assert!(on_cluster_update(
slot,
&bank_frozen_hash,
cluster_duplicate_confirmed_hash,
is_slot_duplicate,
is_dead
)
.is_empty());
}
#[test]
fn test_duplicate_confirmed_dead_duplicate() {
let slot = 0;
let correct_hash = Hash::new_unique();
// Cluster has duplicate_confirmed some version of the slot
let cluster_duplicate_confirmed_hash = Some(&correct_hash);
// Our version of the slot is dead
let is_dead = true;
let bank_frozen_hash = Hash::default();
// Even if the duplicate signal hasn't come in yet,
// we can deduce the slot is duplicate AND we have,
// the wrong version, so should mark the slot as duplicate,
// and repair the correct version
let mut is_slot_duplicate = false;
assert_eq!(
on_cluster_update(
slot,
&bank_frozen_hash,
cluster_duplicate_confirmed_hash,
is_slot_duplicate,
is_dead
),
vec![
ResultingStateChange::MarkSlotDuplicate,
ResultingStateChange::RepairDuplicateConfirmedVersion(correct_hash),
]
);
// If the duplicate signal comes in, nothing should change
is_slot_duplicate = true;
assert_eq!(
on_cluster_update(
slot,
&bank_frozen_hash,
cluster_duplicate_confirmed_hash,
is_slot_duplicate,
is_dead
),
vec![
ResultingStateChange::MarkSlotDuplicate,
ResultingStateChange::RepairDuplicateConfirmedVersion(correct_hash),
]
);
}
#[test]
fn test_apply_state_changes() {
// Common state
let InitialState {
mut heaviest_subtree_fork_choice,
mut progress,
ancestors,
descendants,
slot,
..
} = setup();
// MarkSlotDuplicate should mark progress map and remove
// the slot from fork choice
apply_state_changes(
slot,
&mut progress,
&mut heaviest_subtree_fork_choice,
&ancestors,
&descendants,
vec![ResultingStateChange::MarkSlotDuplicate],
);
assert!(!heaviest_subtree_fork_choice
.is_candidate_slot(slot)
.unwrap());
for child_slot in descendants
.get(&slot)
.unwrap()
.iter()
.chain(std::iter::once(&slot))
{
assert_eq!(
progress
.latest_unconfirmed_duplicate_ancestor(*child_slot)
.unwrap(),
slot
);
}
// DuplicateConfirmedSlotMatchesCluster should re-enable fork choice
apply_state_changes(
slot,
&mut progress,
&mut heaviest_subtree_fork_choice,
&ancestors,
&descendants,
vec![ResultingStateChange::DuplicateConfirmedSlotMatchesCluster],
);
for child_slot in descendants
.get(&slot)
.unwrap()
.iter()
.chain(std::iter::once(&slot))
{
assert!(progress
.latest_unconfirmed_duplicate_ancestor(*child_slot)
.is_none());
}
assert!(heaviest_subtree_fork_choice
.is_candidate_slot(slot)
.unwrap());
}
#[test]
fn test_state_ancestor_confirmed_descendant_duplicate() {
// Common state
let InitialState {
mut heaviest_subtree_fork_choice,
mut progress,
ancestors,
descendants,
bank_forks,
..
} = setup();
assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), 3);
let root = 0;
let mut gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default();
// Mark slot 2 as duplicate confirmed
let slot2_hash = bank_forks.read().unwrap().get(2).unwrap().hash();
gossip_duplicate_confirmed_slots.insert(2, slot2_hash);
check_slot_agrees_with_cluster(
2,
root,
Some(slot2_hash),
&gossip_duplicate_confirmed_slots,
&ancestors,
&descendants,
&mut progress,
&mut heaviest_subtree_fork_choice,
SlotStateUpdate::DuplicateConfirmed,
);
assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), 3);
// Mark 3 as duplicate, should not remove slot 2 from fork choice
check_slot_agrees_with_cluster(
3,
root,
Some(bank_forks.read().unwrap().get(3).unwrap().hash()),
&gossip_duplicate_confirmed_slots,
&ancestors,
&descendants,
&mut progress,
&mut heaviest_subtree_fork_choice,
SlotStateUpdate::Duplicate,
);
assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), 2);
}
#[test]
fn test_state_ancestor_duplicate_descendant_confirmed() {
// Common state
let InitialState {
mut heaviest_subtree_fork_choice,
mut progress,
ancestors,
descendants,
bank_forks,
..
} = setup();
assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), 3);
let root = 0;
let mut gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default();
// Mark 2 as duplicate confirmed
check_slot_agrees_with_cluster(
2,
root,
Some(bank_forks.read().unwrap().get(2).unwrap().hash()),
&gossip_duplicate_confirmed_slots,
&ancestors,
&descendants,
&mut progress,
&mut heaviest_subtree_fork_choice,
SlotStateUpdate::Duplicate,
);
assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), 1);
// Mark slot 3 as duplicate confirmed, should mark slot 2 as duplicate confirmed as well
let slot3_hash = bank_forks.read().unwrap().get(3).unwrap().hash();
gossip_duplicate_confirmed_slots.insert(3, slot3_hash);
check_slot_agrees_with_cluster(
3,
root,
Some(slot3_hash),
&gossip_duplicate_confirmed_slots,
&ancestors,
&descendants,
&mut progress,
&mut heaviest_subtree_fork_choice,
SlotStateUpdate::DuplicateConfirmed,
);
assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), 3);
}
}

View File

@@ -37,6 +37,7 @@ pub enum SwitchForkDecision {
SwitchProof(Hash),
SameFork,
FailedSwitchThreshold(u64, u64),
FailedSwitchDuplicateRollback(Slot),
}
impl SwitchForkDecision {
@@ -51,6 +52,7 @@ impl SwitchForkDecision {
assert_ne!(*total_stake, 0);
None
}
SwitchForkDecision::FailedSwitchDuplicateRollback(_) => None,
SwitchForkDecision::SameFork => Some(vote_instruction::vote(
vote_account_pubkey,
authorized_voter_pubkey,
@@ -68,7 +70,12 @@ impl SwitchForkDecision {
}
pub fn can_vote(&self) -> bool {
!matches!(self, SwitchForkDecision::FailedSwitchThreshold(_, _))
match self {
SwitchForkDecision::FailedSwitchThreshold(_, _) => false,
SwitchForkDecision::FailedSwitchDuplicateRollback(_) => false,
SwitchForkDecision::SameFork => true,
SwitchForkDecision::SwitchProof(_) => true,
}
}
}
@@ -383,6 +390,17 @@ impl Tower {
slot
}
pub fn record_bank_vote(
&mut self,
bank: &Bank,
vote_account_pubkey: &Pubkey,
) -> (Option<Slot>, Vec<Slot> /*VoteState.tower*/) {
let (vote, tower_slots) = self.new_vote_from_bank(bank, vote_account_pubkey);
let new_root = self.record_bank_vote_update_lockouts(vote);
(new_root, tower_slots)
}
pub fn new_vote_from_bank(
&self,
bank: &Bank,
@@ -392,7 +410,7 @@ impl Tower {
Self::new_vote(&self.lockouts, bank.slot(), bank.hash(), voted_slot)
}
pub fn record_bank_vote(&mut self, vote: Vote) -> Option<Slot> {
pub fn record_bank_vote_update_lockouts(&mut self, vote: Vote) -> Option<Slot> {
let slot = vote.last_voted_slot().unwrap_or(0);
trace!("{} record_vote for {}", self.node_pubkey, slot);
let old_root = self.root();
@@ -411,7 +429,7 @@ impl Tower {
#[cfg(test)]
pub fn record_vote(&mut self, slot: Slot, hash: Hash) -> Option<Slot> {
let vote = Vote::new(vec![slot], hash);
self.record_bank_vote(vote)
self.record_bank_vote_update_lockouts(vote)
}
pub fn last_voted_slot(&self) -> Option<Slot> {
@@ -575,9 +593,13 @@ impl Tower {
SwitchForkDecision::FailedSwitchThreshold(0, total_stake)
};
let rollback_due_to_to_to_duplicate_ancestor = |latest_duplicate_ancestor| {
SwitchForkDecision::FailedSwitchDuplicateRollback(latest_duplicate_ancestor)
};
let last_vote_ancestors =
ancestors.get(&last_voted_slot).unwrap_or_else(|| {
if !self.is_stray_last_vote() {
if self.is_stray_last_vote() {
// Unless last vote is stray and stale, ancestors.get(last_voted_slot) must
// return Some(_), justifying to panic! here.
// Also, adjust_lockouts_after_replay() correctly makes last_voted_slot None,
@@ -586,9 +608,9 @@ impl Tower {
// In other words, except being stray, all other slots have been voted on while
// this validator has been running, so we must be able to fetch ancestors for
// all of them.
panic!("no ancestors found with slot: {}", last_voted_slot);
} else {
empty_ancestors_due_to_minor_unsynced_ledger()
} else {
panic!("no ancestors found with slot: {}", last_voted_slot);
}
});
@@ -601,15 +623,23 @@ impl Tower {
}
if last_vote_ancestors.contains(&switch_slot) {
if !self.is_stray_last_vote() {
panic!(
"Should never consider switching to slot ({}), which is ancestors({:?}) of last vote: {}",
switch_slot,
last_vote_ancestors,
last_voted_slot
);
} else {
if self.is_stray_last_vote() {
return suspended_decision_due_to_major_unsynced_ledger();
} else if let Some(latest_duplicate_ancestor) = progress.latest_unconfirmed_duplicate_ancestor(last_voted_slot) {
// We're rolling back because one of the ancestors of the last vote was a duplicate. In this
// case, it's acceptable if the switch candidate is one of ancestors of the previous vote,
// just fail the switch check because there's no point in voting on an ancestor. ReplayStage
// should then have a special case continue building an alternate fork from this ancestor, NOT
// the `last_voted_slot`. This is in contrast to usual SwitchFailure where ReplayStage continues to build blocks
// on latest vote. See `select_vote_and_reset_forks()` for more details.
return rollback_due_to_to_to_duplicate_ancestor(latest_duplicate_ancestor);
} else {
panic!(
"Should never consider switching to ancestor ({}) of last vote: {}, ancestors({:?})",
switch_slot,
last_voted_slot,
last_vote_ancestors,
);
}
}
@@ -1240,7 +1270,7 @@ pub mod test {
cluster_slots::ClusterSlots,
fork_choice::SelectVoteAndResetForkResult,
heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice,
progress_map::ForkProgress,
progress_map::{DuplicateStats, ForkProgress},
replay_stage::{HeaviestForkFailures, ReplayStage},
};
use solana_ledger::{blockstore::make_slot_entries, get_tmp_ledger_path};
@@ -1265,7 +1295,7 @@ pub mod test {
vote_transaction,
};
use std::{
collections::HashMap,
collections::{BTreeMap, HashMap},
fs::{remove_file, OpenOptions},
io::{Read, Seek, SeekFrom, Write},
sync::RwLock,
@@ -1313,9 +1343,9 @@ pub mod test {
while let Some(visit) = walk.get() {
let slot = visit.node().data;
self.progress
.entry(slot)
.or_insert_with(|| ForkProgress::new(Hash::default(), None, None, 0, 0));
self.progress.entry(slot).or_insert_with(|| {
ForkProgress::new(Hash::default(), None, DuplicateStats::default(), None, 0, 0)
});
if self.bank_forks.read().unwrap().get(slot).is_some() {
walk.forward();
continue;
@@ -1395,7 +1425,7 @@ pub mod test {
..
} = ReplayStage::select_vote_and_reset_forks(
&vote_bank,
&None,
None,
&ancestors,
&descendants,
&self.progress,
@@ -1407,8 +1437,9 @@ pub mod test {
if !heaviest_fork_failures.is_empty() {
return heaviest_fork_failures;
}
let vote = tower.new_vote_from_bank(&vote_bank, &my_vote_pubkey).0;
if let Some(new_root) = tower.record_bank_vote(vote) {
let (new_root, _) = tower.record_bank_vote(&vote_bank, &my_vote_pubkey);
if let Some(new_root) = new_root {
self.set_root(new_root);
}
@@ -1423,6 +1454,9 @@ pub mod test {
&AbsRequestSender::default(),
None,
&mut self.heaviest_subtree_fork_choice,
&mut true,
&mut Vec::new(),
&mut BTreeMap::new(),
)
}
@@ -1457,7 +1491,9 @@ pub mod test {
) {
self.progress
.entry(slot)
.or_insert_with(|| ForkProgress::new(Hash::default(), None, None, 0, 0))
.or_insert_with(|| {
ForkProgress::new(Hash::default(), None, DuplicateStats::default(), None, 0, 0)
})
.fork_stats
.lockout_intervals
.entry(lockout_interval.1)
@@ -1564,7 +1600,14 @@ pub mod test {
let mut progress = ProgressMap::default();
progress.insert(
0,
ForkProgress::new(bank0.last_blockhash(), None, None, 0, 0),
ForkProgress::new(
bank0.last_blockhash(),
None,
DuplicateStats::default(),
None,
0,
0,
),
);
let bank_forks = BankForks::new(bank0);
let heaviest_subtree_fork_choice =
@@ -1604,6 +1647,12 @@ pub mod test {
assert!(decision
.to_vote_instruction(vote.clone(), &Pubkey::default(), &Pubkey::default())
.is_none());
decision = SwitchForkDecision::FailedSwitchDuplicateRollback(0);
assert!(decision
.to_vote_instruction(vote.clone(), &Pubkey::default(), &Pubkey::default())
.is_none());
decision = SwitchForkDecision::SameFork;
assert_eq!(
decision.to_vote_instruction(vote.clone(), &Pubkey::default(), &Pubkey::default()),
@@ -1613,6 +1662,7 @@ pub mod test {
vote.clone(),
))
);
decision = SwitchForkDecision::SwitchProof(Hash::default());
assert_eq!(
decision.to_vote_instruction(vote.clone(), &Pubkey::default(), &Pubkey::default()),
@@ -1655,11 +1705,20 @@ pub mod test {
}
#[test]
fn test_switch_threshold() {
fn test_switch_threshold_duplicate_rollback() {
run_test_switch_threshold_duplicate_rollback(false);
}
#[test]
#[should_panic]
fn test_switch_threshold_duplicate_rollback_panic() {
run_test_switch_threshold_duplicate_rollback(true);
}
fn setup_switch_test(num_accounts: usize) -> (Arc<Bank>, VoteSimulator, u64) {
// Init state
let mut vote_simulator = VoteSimulator::new(2);
let my_pubkey = vote_simulator.node_pubkeys[0];
let other_vote_account = vote_simulator.vote_pubkeys[1];
assert!(num_accounts > 1);
let mut vote_simulator = VoteSimulator::new(num_accounts);
let bank0 = vote_simulator
.bank_forks
.read()
@@ -1690,6 +1749,82 @@ pub mod test {
for (_, fork_progress) in vote_simulator.progress.iter_mut() {
fork_progress.fork_stats.computed = true;
}
(bank0, vote_simulator, total_stake)
}
fn run_test_switch_threshold_duplicate_rollback(should_panic: bool) {
let (bank0, mut vote_simulator, total_stake) = setup_switch_test(2);
let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
let descendants = vote_simulator
.bank_forks
.read()
.unwrap()
.descendants()
.clone();
let mut tower = Tower::new_with_key(&vote_simulator.node_pubkeys[0]);
// Last vote is 47
tower.record_vote(47, Hash::default());
// Trying to switch to an ancestor of last vote should only not panic
// if the current vote has a duplicate ancestor
let ancestor_of_voted_slot = 43;
let duplicate_ancestor1 = 44;
let duplicate_ancestor2 = 45;
vote_simulator.progress.set_unconfirmed_duplicate_slot(
duplicate_ancestor1,
&descendants.get(&duplicate_ancestor1).unwrap(),
);
vote_simulator.progress.set_unconfirmed_duplicate_slot(
duplicate_ancestor2,
&descendants.get(&duplicate_ancestor2).unwrap(),
);
assert_eq!(
tower.check_switch_threshold(
ancestor_of_voted_slot,
&ancestors,
&descendants,
&vote_simulator.progress,
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
),
SwitchForkDecision::FailedSwitchDuplicateRollback(duplicate_ancestor2)
);
let mut confirm_ancestors = vec![duplicate_ancestor1];
if should_panic {
// Adding the last duplicate ancestor will
// 1) Cause loop below to confirm last ancestor
// 2) Check switch threshold on a vote ancestor when there
// are no duplicates on that fork, which will cause a panic
confirm_ancestors.push(duplicate_ancestor2);
}
for (i, duplicate_ancestor) in confirm_ancestors.into_iter().enumerate() {
vote_simulator.progress.set_confirmed_duplicate_slot(
duplicate_ancestor,
ancestors.get(&duplicate_ancestor).unwrap(),
&descendants.get(&duplicate_ancestor).unwrap(),
);
let res = tower.check_switch_threshold(
ancestor_of_voted_slot,
&ancestors,
&descendants,
&vote_simulator.progress,
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
);
if i == 0 {
assert_eq!(
res,
SwitchForkDecision::FailedSwitchDuplicateRollback(duplicate_ancestor2)
);
}
}
}
#[test]
fn test_switch_threshold() {
let (bank0, mut vote_simulator, total_stake) = setup_switch_test(2);
let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
let mut descendants = vote_simulator
.bank_forks
@@ -1697,7 +1832,8 @@ pub mod test {
.unwrap()
.descendants()
.clone();
let mut tower = Tower::new_with_key(&my_pubkey);
let mut tower = Tower::new_with_key(&vote_simulator.node_pubkeys[0]);
let other_vote_account = vote_simulator.vote_pubkeys[1];
// Last vote is 47
tower.record_vote(47, Hash::default());

View File

@@ -264,6 +264,11 @@ impl Crds {
.map(move |i| self.table.index(*i))
}
/// Returns number of known pubkeys (network size).
pub(crate) fn num_nodes(&self) -> usize {
self.records.len()
}
pub fn len(&self) -> usize {
self.table.len()
}

View File

@@ -207,7 +207,7 @@ impl CrdsGossip {
gossip_validators,
&self.id,
self.shred_version,
self.pull.pull_request_time.len(),
self.crds.num_nodes(),
CRDS_GOSSIP_NUM_ACTIVE,
)
}
@@ -341,7 +341,7 @@ impl CrdsGossip {
Self {
crds: self.crds.clone(),
push: self.push.mock_clone(),
pull: self.pull.clone(),
pull: self.pull.mock_clone(),
..*self
}
}

View File

@@ -9,12 +9,16 @@
//! with random hash functions. So each subsequent request will have a different distribution
//! of false positives.
use crate::contact_info::ContactInfo;
use crate::crds::{Crds, VersionedCrdsValue};
use crate::crds_gossip::{get_stake, get_weight, CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS};
use crate::crds_gossip_error::CrdsGossipError;
use crate::crds_value::{CrdsValue, CrdsValueLabel};
use crate::{
cluster_info::CRDS_UNIQUE_PUBKEY_CAPACITY,
contact_info::ContactInfo,
crds::{Crds, VersionedCrdsValue},
crds_gossip::{get_stake, get_weight, CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS},
crds_gossip_error::CrdsGossipError,
crds_value::{CrdsValue, CrdsValueLabel},
};
use itertools::Itertools;
use lru::LruCache;
use rand::distributions::{Distribution, WeightedIndex};
use rand::Rng;
use rayon::{prelude::*, ThreadPool};
@@ -168,10 +172,9 @@ pub struct ProcessPullStats {
pub timeout_count: usize,
}
#[derive(Clone)]
pub struct CrdsGossipPull {
/// timestamp of last request
pub pull_request_time: HashMap<Pubkey, u64>,
pub(crate) pull_request_time: LruCache<Pubkey, u64>,
/// hash and insert time
pub purged_values: VecDeque<(Hash, u64)>,
// Hash value and record time (ms) of the pull responses which failed to be
@@ -188,7 +191,7 @@ impl Default for CrdsGossipPull {
fn default() -> Self {
Self {
purged_values: VecDeque::new(),
pull_request_time: HashMap::new(),
pull_request_time: LruCache::new(CRDS_UNIQUE_PUBKEY_CAPACITY),
failed_inserts: VecDeque::new(),
crds_timeout: CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS,
msg_timeout: CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS,
@@ -263,8 +266,12 @@ impl CrdsGossipPull {
})
.map(|item| {
let max_weight = f32::from(u16::max_value()) - 1.0;
let req_time: u64 = *self.pull_request_time.get(&item.id).unwrap_or(&0);
let since = ((now - req_time) / 1024) as u32;
let req_time: u64 = self
.pull_request_time
.peek(&item.id)
.copied()
.unwrap_or_default();
let since = (now.saturating_sub(req_time).min(3600 * 1000) / 1024) as u32;
let stake = get_stake(&item.id, stakes);
let weight = get_weight(max_weight, since, stake);
(weight, item)
@@ -277,7 +284,7 @@ impl CrdsGossipPull {
/// It's important to use the local nodes request creation time as the weight
/// instead of the response received time otherwise failed nodes will increase their weight.
pub fn mark_pull_request_creation_time(&mut self, from: &Pubkey, now: u64) {
self.pull_request_time.insert(*from, now);
self.pull_request_time.put(*from, now);
}
/// Store an old hash in the purged values set
@@ -606,6 +613,20 @@ impl CrdsGossipPull {
stats.success,
)
}
// Only for tests and simulations.
pub(crate) fn mock_clone(&self) -> Self {
let mut pull_request_time = LruCache::new(self.pull_request_time.cap());
for (k, v) in self.pull_request_time.iter().rev() {
pull_request_time.put(*k, *v);
}
Self {
pull_request_time,
purged_values: self.purged_values.clone(),
failed_inserts: self.failed_inserts.clone(),
..*self
}
}
}
#[cfg(test)]
mod test {
@@ -617,8 +638,12 @@ mod test {
use rand::thread_rng;
use rayon::ThreadPoolBuilder;
use solana_perf::test_tx::test_tx;
use solana_sdk::hash::{hash, HASH_BYTES};
use solana_sdk::packet::PACKET_DATA_SIZE;
use solana_sdk::{
hash::{hash, HASH_BYTES},
packet::PACKET_DATA_SIZE,
timing::timestamp,
};
use std::iter::repeat_with;
#[test]
fn test_hash_as_u64() {
@@ -1009,6 +1034,41 @@ mod test {
}
}
#[test]
fn test_pull_request_time() {
const NUM_REPS: usize = 2 * CRDS_UNIQUE_PUBKEY_CAPACITY;
let mut rng = rand::thread_rng();
let pubkeys: Vec<_> = repeat_with(Pubkey::new_unique).take(NUM_REPS).collect();
let mut node = CrdsGossipPull::default();
let mut requests = HashMap::new();
let now = timestamp();
for k in 0..NUM_REPS {
let pubkey = pubkeys[rng.gen_range(0, pubkeys.len())];
let now = now + k as u64;
node.mark_pull_request_creation_time(&pubkey, now);
*requests.entry(pubkey).or_default() = now;
}
assert!(node.pull_request_time.len() <= CRDS_UNIQUE_PUBKEY_CAPACITY);
// Assert that timestamps match most recent request.
for (pk, ts) in &node.pull_request_time {
assert_eq!(*ts, requests[pk]);
}
// Assert that most recent pull timestamps are maintained.
let max_ts = requests
.iter()
.filter(|(pk, _)| !node.pull_request_time.contains(*pk))
.map(|(_, ts)| *ts)
.max()
.unwrap();
let min_ts = requests
.iter()
.filter(|(pk, _)| node.pull_request_time.contains(*pk))
.map(|(_, ts)| *ts)
.min()
.unwrap();
assert!(max_ts <= min_ts);
}
#[test]
fn test_generate_pull_responses() {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();

View File

@@ -9,6 +9,7 @@
//! 2. The prune set is stored in a Bloom filter.
use crate::{
cluster_info::CRDS_UNIQUE_PUBKEY_CAPACITY,
contact_info::ContactInfo,
crds::{Crds, VersionedCrdsValue},
crds_gossip::{get_stake, get_weight, CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS},
@@ -19,6 +20,7 @@ use crate::{
use bincode::serialized_size;
use indexmap::map::IndexMap;
use itertools::Itertools;
use lru::LruCache;
use rand::{seq::SliceRandom, Rng};
use solana_runtime::bloom::{AtomicBloom, Bloom};
use solana_sdk::{hash::Hash, packet::PACKET_DATA_SIZE, pubkey::Pubkey, timing::timestamp};
@@ -39,9 +41,6 @@ pub const CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES: usize = 3;
// Do not push to peers which have not been updated for this long.
const PUSH_ACTIVE_TIMEOUT_MS: u64 = 60_000;
// 10 minutes
const MAX_PUSHED_TO_TIMEOUT_MS: u64 = 10 * 60 * 1000;
pub struct CrdsGossipPush {
/// max bytes per message
pub max_bytes: usize,
@@ -54,8 +53,7 @@ pub struct CrdsGossipPush {
/// This cache represents a lagging view of which validators
/// currently have this node in their `active_set`
received_cache: HashMap<Pubkey, HashMap<Pubkey, (bool, u64)>>,
last_pushed_to: HashMap<Pubkey, u64>,
last_pushed_to_cleanup_ts: u64,
last_pushed_to: LruCache<Pubkey, u64>,
pub num_active: usize,
pub push_fanout: usize,
pub msg_timeout: u64,
@@ -73,8 +71,7 @@ impl Default for CrdsGossipPush {
active_set: IndexMap::new(),
push_messages: HashMap::new(),
received_cache: HashMap::new(),
last_pushed_to: HashMap::new(),
last_pushed_to_cleanup_ts: 0,
last_pushed_to: LruCache::new(CRDS_UNIQUE_PUBKEY_CAPACITY),
num_active: CRDS_GOSSIP_NUM_ACTIVE,
push_fanout: CRDS_GOSSIP_PUSH_FANOUT,
msg_timeout: CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS,
@@ -129,7 +126,7 @@ impl CrdsGossipPush {
let mut seed = [0; 32];
rand::thread_rng().fill(&mut seed[..]);
let shuffle = weighted_shuffle(
staked_peers.iter().map(|(_, stake)| *stake).collect_vec(),
&staked_peers.iter().map(|(_, stake)| *stake).collect_vec(),
seed,
);
@@ -269,13 +266,8 @@ impl CrdsGossipPush {
for label in labels {
self.push_messages.remove(&label);
}
for target_pubkey in push_messages.keys() {
*self.last_pushed_to.entry(*target_pubkey).or_insert(0) = now;
}
if now - self.last_pushed_to_cleanup_ts > MAX_PUSHED_TO_TIMEOUT_MS {
self.last_pushed_to
.retain(|_id, timestamp| now - *timestamp > MAX_PUSHED_TO_TIMEOUT_MS);
self.last_pushed_to_cleanup_ts = now;
for target_pubkey in push_messages.keys().copied() {
self.last_pushed_to.put(target_pubkey, now);
}
push_messages
}
@@ -326,7 +318,7 @@ impl CrdsGossipPush {
let mut seed = [0; 32];
rng.fill(&mut seed[..]);
let mut shuffle = weighted_shuffle(
options.iter().map(|weighted| weighted.0).collect_vec(),
&options.iter().map(|weighted| weighted.0).collect_vec(),
seed,
)
.into_iter();
@@ -395,8 +387,12 @@ impl CrdsGossipPush {
})
})
.map(|info| {
let last_pushed_to: u64 = *self.last_pushed_to.get(&info.id).unwrap_or(&0);
let since = (now.saturating_sub(last_pushed_to) / 1024) as u32;
let last_pushed_to = self
.last_pushed_to
.peek(&info.id)
.copied()
.unwrap_or_default();
let since = (now.saturating_sub(last_pushed_to).min(3600 * 1000) / 1024) as u32;
let stake = get_stake(&info.id, stakes);
let weight = get_weight(max_weight, since, stake);
(weight, info)
@@ -423,15 +419,20 @@ impl CrdsGossipPush {
// Only for tests and simulations.
pub(crate) fn mock_clone(&self) -> Self {
let mut active_set = IndexMap::<Pubkey, AtomicBloom<Pubkey>>::new();
for (k, v) in &self.active_set {
active_set.insert(*k, v.mock_clone());
let active_set = self
.active_set
.iter()
.map(|(k, v)| (*k, v.mock_clone()))
.collect();
let mut last_pushed_to = LruCache::new(self.last_pushed_to.cap());
for (k, v) in self.last_pushed_to.iter().rev() {
last_pushed_to.put(*k, *v);
}
Self {
active_set,
push_messages: self.push_messages.clone(),
received_cache: self.received_cache.clone(),
last_pushed_to: self.last_pushed_to.clone(),
last_pushed_to,
..*self
}
}
@@ -641,7 +642,7 @@ mod test {
let id = peer.label().pubkey();
crds.insert(peer.clone(), time).unwrap();
stakes.insert(id, i * 100);
push.last_pushed_to.insert(id, time);
push.last_pushed_to.put(id, time);
}
let mut options = push.push_options(&crds, &Pubkey::default(), 0, &stakes, None);
assert!(!options.is_empty());

View File

@@ -4,6 +4,7 @@ use crate::{
replay_stage::HeaviestForkFailures,
};
use solana_runtime::{bank::Bank, bank_forks::BankForks};
use solana_sdk::clock::Slot;
use std::{
collections::{HashMap, HashSet},
sync::{Arc, RwLock},
@@ -36,4 +37,8 @@ pub(crate) trait ForkChoice {
ancestors: &HashMap<u64, HashSet<u64>>,
bank_forks: &RwLock<BankForks>,
) -> (Arc<Bank>, Option<Arc<Bank>>);
fn mark_fork_invalid_candidate(&mut self, invalid_slot: Slot);
fn mark_fork_valid_candidate(&mut self, valid_slot: Slot);
}

View File

@@ -25,6 +25,7 @@ const MAX_ROOT_PRINT_SECONDS: u64 = 30;
enum UpdateLabel {
Aggregate,
Add,
MarkValid,
Subtract,
}
@@ -32,6 +33,7 @@ enum UpdateLabel {
enum UpdateOperation {
Aggregate,
Add(u64),
MarkValid,
Subtract(u64),
}
@@ -40,6 +42,7 @@ impl UpdateOperation {
match self {
Self::Aggregate => panic!("Should not get here"),
Self::Add(stake) => *stake += new_stake,
Self::MarkValid => panic!("Should not get here"),
Self::Subtract(stake) => *stake += new_stake,
}
}
@@ -56,6 +59,9 @@ struct ForkInfo {
best_slot: Slot,
parent: Option<Slot>,
children: Vec<Slot>,
// Whether the fork rooted at this slot is a valid contender
// for the best fork
is_candidate: bool,
}
pub struct HeaviestSubtreeForkChoice {
@@ -142,6 +148,12 @@ impl HeaviestSubtreeForkChoice {
.map(|fork_info| fork_info.stake_voted_subtree)
}
pub fn is_candidate_slot(&self, slot: Slot) -> Option<bool> {
self.fork_infos
.get(&slot)
.map(|fork_info| fork_info.is_candidate)
}
pub fn root(&self) -> Slot {
self.root
}
@@ -205,6 +217,7 @@ impl HeaviestSubtreeForkChoice {
best_slot: root_info.best_slot,
children: vec![self.root],
parent: None,
is_candidate: true,
};
self.fork_infos.insert(root_parent, root_parent_info);
self.root = root_parent;
@@ -226,6 +239,7 @@ impl HeaviestSubtreeForkChoice {
best_slot: slot,
children: vec![],
parent,
is_candidate: true,
});
if parent.is_none() {
@@ -259,6 +273,15 @@ impl HeaviestSubtreeForkChoice {
let child_weight = self
.stake_voted_subtree(*child)
.expect("child must exist in `self.fork_infos`");
// Don't count children currently marked as invalid
if !self
.is_candidate_slot(*child)
.expect("child must exist in tree")
{
continue;
}
if child_weight > maybe_best_child_weight
|| (maybe_best_child_weight == child_weight && *child < maybe_best_child)
{
@@ -268,6 +291,7 @@ impl HeaviestSubtreeForkChoice {
true
}
pub fn all_slots_stake_voted_subtree(&self) -> Vec<(Slot, u64)> {
self.fork_infos
.iter()
@@ -346,18 +370,39 @@ impl HeaviestSubtreeForkChoice {
}
}
#[allow(clippy::map_entry)]
fn insert_mark_valid_aggregate_operations(
&self,
update_operations: &mut BTreeMap<(Slot, UpdateLabel), UpdateOperation>,
slot: Slot,
) {
self.do_insert_aggregate_operations(update_operations, true, slot);
}
fn insert_aggregate_operations(
&self,
update_operations: &mut BTreeMap<(Slot, UpdateLabel), UpdateOperation>,
slot: Slot,
) {
self.do_insert_aggregate_operations(update_operations, false, slot);
}
#[allow(clippy::map_entry)]
fn do_insert_aggregate_operations(
&self,
update_operations: &mut BTreeMap<(Slot, UpdateLabel), UpdateOperation>,
should_mark_valid: bool,
slot: Slot,
) {
for parent in self.ancestor_iterator(slot) {
let label = (parent, UpdateLabel::Aggregate);
if update_operations.contains_key(&label) {
let aggregate_label = (parent, UpdateLabel::Aggregate);
if update_operations.contains_key(&aggregate_label) {
break;
} else {
update_operations.insert(label, UpdateOperation::Aggregate);
if should_mark_valid {
update_operations
.insert((parent, UpdateLabel::MarkValid), UpdateOperation::MarkValid);
}
update_operations.insert(aggregate_label, UpdateOperation::Aggregate);
}
}
}
@@ -375,17 +420,44 @@ impl HeaviestSubtreeForkChoice {
let mut best_child_slot = slot;
for &child in &fork_info.children {
let child_stake_voted_subtree = self.stake_voted_subtree(child).unwrap();
// Child forks that are not candidates still contribute to the weight
// of the subtree rooted at `slot`. For instance:
/*
Build fork structure:
slot 0
|
slot 1
/ \
slot 2 |
| slot 3 (34%)
slot 4 (66%)
If slot 4 is a duplicate slot, so no longer qualifies as a candidate until
the slot is confirmed, the weight of votes on slot 4 should still count towards
slot 2, otherwise we might pick slot 3 as the heaviest fork to build blocks on
instead of slot 2.
*/
// See comment above for why this check is outside of the `is_candidate` check.
stake_voted_subtree += child_stake_voted_subtree;
if best_child_slot == slot ||
child_stake_voted_subtree > best_child_stake_voted_subtree ||
// tiebreaker by slot height, prioritize earlier slot
(child_stake_voted_subtree == best_child_stake_voted_subtree && child < best_child_slot)
// Note: If there's no valid children, then the best slot should default to the
// input `slot` itself.
if self
.is_candidate_slot(child)
.expect("Child must exist in fork_info map")
&& (best_child_slot == slot ||
child_stake_voted_subtree > best_child_stake_voted_subtree ||
// tiebreaker by slot height, prioritize earlier slot
(child_stake_voted_subtree == best_child_stake_voted_subtree && child < best_child_slot))
{
best_child_stake_voted_subtree = child_stake_voted_subtree;
best_child_slot = child;
best_slot = self
.best_slot(child)
.expect("`child` must exist in `self.fork_infos`");
{
best_child_stake_voted_subtree = child_stake_voted_subtree;
best_child_slot = child;
best_slot = self
.best_slot(child)
.expect("`child` must exist in `self.fork_infos`");
}
}
}
} else {
@@ -397,6 +469,12 @@ impl HeaviestSubtreeForkChoice {
fork_info.best_slot = best_slot;
}
fn mark_slot_valid(&mut self, valid_slot: Slot) {
if let Some(fork_info) = self.fork_infos.get_mut(&valid_slot) {
fork_info.is_candidate = true;
}
}
fn generate_update_operations(
&mut self,
pubkey_votes: &[(Pubkey, Slot)],
@@ -453,6 +531,7 @@ impl HeaviestSubtreeForkChoice {
// Iterate through the update operations from greatest to smallest slot
for ((slot, _), operation) in update_operations.into_iter().rev() {
match operation {
UpdateOperation::MarkValid => self.mark_slot_valid(slot),
UpdateOperation::Aggregate => self.aggregate_slot(slot),
UpdateOperation::Add(stake) => self.add_slot_stake(slot, stake),
UpdateOperation::Subtract(stake) => self.subtract_slot_stake(slot, stake),
@@ -602,6 +681,33 @@ impl ForkChoice for HeaviestSubtreeForkChoice {
}),
)
}
fn mark_fork_invalid_candidate(&mut self, invalid_slot: Slot) {
let fork_info = self.fork_infos.get_mut(&invalid_slot);
if let Some(fork_info) = fork_info {
if fork_info.is_candidate {
fork_info.is_candidate = false;
// Aggregate to find the new best slots excluding this fork
let mut aggregate_operations = BTreeMap::new();
self.insert_aggregate_operations(&mut aggregate_operations, invalid_slot);
self.process_update_operations(aggregate_operations);
}
}
}
fn mark_fork_valid_candidate(&mut self, valid_slot: Slot) {
let mut aggregate_operations = BTreeMap::new();
let fork_info = self.fork_infos.get_mut(&valid_slot);
if let Some(fork_info) = fork_info {
// If a bunch of slots on the same fork are confirmed at once, then only the latest
// slot will incur this aggregation operation
fork_info.is_candidate = true;
self.insert_mark_valid_aggregate_operations(&mut aggregate_operations, valid_slot);
}
// Aggregate to find the new best slots including this fork
self.process_update_operations(aggregate_operations);
}
}
struct AncestorIterator<'a> {
@@ -1563,6 +1669,78 @@ mod test {
);
}
#[test]
fn test_mark_valid_invalid_forks() {
let mut heaviest_subtree_fork_choice = setup_forks();
let stake = 100;
let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(3, stake);
let pubkey_votes: Vec<(Pubkey, Slot)> = vec![
(vote_pubkeys[0], 6),
(vote_pubkeys[1], 6),
(vote_pubkeys[2], 2),
];
let expected_best_slot = 6;
assert_eq!(
heaviest_subtree_fork_choice.add_votes(
&pubkey_votes,
bank.epoch_stakes_map(),
bank.epoch_schedule()
),
expected_best_slot,
);
// Mark slot 5 as invalid, the best fork should be its ancestor 3,
// not the other for at 4.
let invalid_candidate = 5;
heaviest_subtree_fork_choice.mark_fork_invalid_candidate(invalid_candidate);
assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), 3);
assert!(!heaviest_subtree_fork_choice
.is_candidate_slot(invalid_candidate)
.unwrap());
// The ancestor is still a candidate
assert!(heaviest_subtree_fork_choice.is_candidate_slot(3).unwrap());
// Adding another descendant to the invalid candidate won't
// update the best slot, even if it contains votes
let new_leaf_slot7 = 7;
heaviest_subtree_fork_choice.add_new_leaf_slot(new_leaf_slot7, Some(6));
assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), 3);
let pubkey_votes: Vec<(Pubkey, Slot)> = vec![(vote_pubkeys[0], new_leaf_slot7)];
let invalid_slot_ancestor = 3;
assert_eq!(
heaviest_subtree_fork_choice.add_votes(
&pubkey_votes,
bank.epoch_stakes_map(),
bank.epoch_schedule()
),
invalid_slot_ancestor,
);
// Adding a descendant to the ancestor of the invalid candidate *should* update
// the best slot though, since the ancestor is on the heaviest fork
let new_leaf_slot8 = 8;
heaviest_subtree_fork_choice.add_new_leaf_slot(new_leaf_slot8, Some(invalid_slot_ancestor));
assert_eq!(
heaviest_subtree_fork_choice.best_overall_slot(),
new_leaf_slot8
);
// If we mark slot a descendant of `invalid_candidate` as valid, then that
// should also mark `invalid_candidate` as valid, and the best slot should
// be the leaf of the heaviest fork, `new_leaf_slot`.
heaviest_subtree_fork_choice.mark_fork_valid_candidate(invalid_candidate);
assert!(heaviest_subtree_fork_choice
.is_candidate_slot(invalid_candidate)
.unwrap());
assert_eq!(
heaviest_subtree_fork_choice.best_overall_slot(),
// Should pick the smaller slot of the two new equally weighted leaves
new_leaf_slot7
);
}
fn setup_forks() -> HeaviestSubtreeForkChoice {
/*
Build fork structure:

View File

@@ -22,6 +22,7 @@ pub mod shred_fetch_stage;
#[macro_use]
pub mod contact_info;
pub mod cluster_info;
pub mod cluster_slot_state_verifier;
pub mod cluster_slots;
pub mod cluster_slots_service;
pub mod consensus;

View File

@@ -76,7 +76,7 @@ impl OptimisticConfirmationVerifier {
self.last_optimistic_slot_ts = Instant::now();
}
pub fn format_optimistic_confirmd_slot_violation_log(slot: Slot) -> String {
pub fn format_optimistic_confirmed_slot_violation_log(slot: Slot) -> String {
format!("Optimistically confirmed slot {} was not rooted", slot)
}
@@ -109,7 +109,7 @@ impl OptimisticConfirmationVerifier {
voted stake: {},
total epoch stake: {},
pct: {}",
Self::format_optimistic_confirmd_slot_violation_log(*optimistic_slot),
Self::format_optimistic_confirmed_slot_violation_log(*optimistic_slot),
hash,
epoch,
r_slot_tracker

View File

@@ -49,10 +49,86 @@ pub enum PohRecorderError {
type Result<T> = std::result::Result<T, PohRecorderError>;
pub type WorkingBankEntry = (Arc<Bank>, (Entry, u64));
pub type BankStart = (Arc<Bank>, Arc<Instant>);
pub struct Record {
pub mixin: Hash,
pub transactions: Vec<Transaction>,
pub slot: Slot,
pub sender: Sender<Result<()>>,
}
impl Record {
pub fn new(
mixin: Hash,
transactions: Vec<Transaction>,
slot: Slot,
sender: Sender<Result<()>>,
) -> Self {
Self {
mixin,
transactions,
slot,
sender,
}
}
}
pub struct TransactionRecorder {
// shared by all users of PohRecorder
pub record_sender: Sender<Record>,
// unique to this caller
pub result_sender: Sender<Result<()>>,
pub result_receiver: Receiver<Result<()>>,
}
impl Clone for TransactionRecorder {
fn clone(&self) -> Self {
TransactionRecorder::new(self.record_sender.clone())
}
}
impl TransactionRecorder {
pub fn new(record_sender: Sender<Record>) -> Self {
let (result_sender, result_receiver) = channel();
Self {
// shared
record_sender,
// unique to this caller
result_sender,
result_receiver,
}
}
pub fn record(
&self,
bank_slot: Slot,
mixin: Hash,
transactions: Vec<Transaction>,
) -> Result<()> {
let res = self.record_sender.send(Record::new(
mixin,
transactions,
bank_slot,
self.result_sender.clone(),
));
if res.is_err() {
// If the channel is dropped, then the validator is shutting down so return that we are hitting
// the max tick height to stop transaction processing and flush any transactions in the pipeline.
return Err(PohRecorderError::MaxHeightReached);
}
let res = self
.result_receiver
.recv_timeout(std::time::Duration::from_millis(2000));
match res {
Err(_err) => Err(PohRecorderError::MaxHeightReached),
Ok(result) => result,
}
}
}
#[derive(Clone)]
pub struct WorkingBank {
pub bank: Arc<Bank>,
pub start: Arc<Instant>,
pub min_tick_height: u64,
pub max_tick_height: u64,
}
@@ -78,6 +154,8 @@ pub struct PohRecorder {
tick_lock_contention_us: u64,
tick_overhead_us: u64,
record_us: u64,
last_metric: Instant,
record_sender: Sender<Record>,
}
impl PohRecorder {
@@ -97,7 +175,14 @@ impl PohRecorder {
self.grace_ticks = grace_ticks;
self.leader_first_tick_height = leader_first_tick_height;
self.leader_last_tick_height = leader_last_tick_height;
datapoint_info!(
"leader-slot-start-to-cleared-elapsed-ms",
("slot", bank.slot(), i64),
("elapsed", working_bank.start.elapsed().as_millis(), i64),
);
}
if let Some(ref signal) = self.clear_bank_signal {
let _ = signal.try_send(true);
}
@@ -126,7 +211,13 @@ impl PohRecorder {
}
pub fn bank(&self) -> Option<Arc<Bank>> {
self.working_bank.clone().map(|w| w.bank)
self.working_bank.as_ref().map(|w| w.bank.clone())
}
pub fn bank_start(&self) -> Option<BankStart> {
self.working_bank
.as_ref()
.map(|w| (w.bank.clone(), w.start.clone()))
}
pub fn has_bank(&self) -> bool {
@@ -141,6 +232,10 @@ impl PohRecorder {
self.ticks_per_slot
}
pub fn recorder(&self) -> TransactionRecorder {
TransactionRecorder::new(self.record_sender.clone())
}
fn is_same_fork_as_previous_leader(&self, slot: Slot) -> bool {
(slot.saturating_sub(NUM_CONSECUTIVE_LEADER_SLOTS)..slot).any(|slot| {
// Check if the last slot Poh reset to was any of the
@@ -175,6 +270,10 @@ impl PohRecorder {
|| !self.is_same_fork_as_previous_leader(current_slot)))
}
pub fn last_reset_slot(&self) -> Slot {
self.start_slot
}
/// returns if leader slot has been reached, how many grace ticks were afforded,
/// imputed leader_slot and self.start_slot
/// reached_leader_slot() == true means "ready for a bank"
@@ -247,14 +346,15 @@ impl PohRecorder {
) {
self.clear_bank();
let mut cache = vec![];
{
let poh_hash = {
let mut poh = self.poh.lock().unwrap();
info!(
"reset poh from: {},{},{} to: {},{}",
poh.hash, self.tick_height, self.start_slot, blockhash, start_slot
);
poh.reset(blockhash, self.poh_config.hashes_per_tick);
}
poh.hash
};
info!(
"reset poh from: {},{},{} to: {},{}",
poh_hash, self.tick_height, self.start_slot, blockhash, start_slot
);
std::mem::swap(&mut cache, &mut self.tick_cache);
@@ -273,11 +373,15 @@ impl PohRecorder {
trace!("new working bank");
assert_eq!(working_bank.bank.ticks_per_slot(), self.ticks_per_slot());
self.working_bank = Some(working_bank);
// TODO: adjust the working_bank.start time based on number of ticks
// that have already elapsed based on current tick height.
let _ = self.flush_cache(false);
}
pub fn set_bank(&mut self, bank: &Arc<Bank>) {
let working_bank = WorkingBank {
bank: bank.clone(),
start: Arc::new(Instant::now()),
min_tick_height: bank.tick_height(),
max_tick_height: bank.max_tick_height(),
};
@@ -377,23 +481,26 @@ impl PohRecorder {
}
fn report_metrics(&mut self, bank_slot: Slot) {
datapoint_info!(
"poh_recorder",
("slot", bank_slot, i64),
("tick_lock_contention", self.tick_lock_contention_us, i64),
("record_us", self.record_us, i64),
("tick_overhead", self.tick_overhead_us, i64),
(
"record_lock_contention",
self.record_lock_contention_us,
i64
),
);
if self.last_metric.elapsed().as_millis() > 1000 {
datapoint_info!(
"poh_recorder",
("slot", bank_slot, i64),
("tick_lock_contention", self.tick_lock_contention_us, i64),
("record_us", self.record_us, i64),
("tick_overhead", self.tick_overhead_us, i64),
(
"record_lock_contention",
self.record_lock_contention_us,
i64
),
);
self.tick_lock_contention_us = 0;
self.record_us = 0;
self.tick_overhead_us = 0;
self.record_lock_contention_us = 0;
self.tick_lock_contention_us = 0;
self.record_us = 0;
self.tick_overhead_us = 0;
self.record_lock_contention_us = 0;
self.last_metric = Instant::now();
}
}
pub fn record(
@@ -405,6 +512,7 @@ impl PohRecorder {
// Entries without transactions are used to track real-time passing in the ledger and
// cannot be generated by `record()`
assert!(!transactions.is_empty(), "No transactions provided");
self.report_metrics(bank_slot);
loop {
self.flush_cache(false)?;
@@ -413,7 +521,6 @@ impl PohRecorder {
.as_ref()
.ok_or(PohRecorderError::MaxHeightReached)?;
if bank_slot != working_bank.bank.slot() {
self.report_metrics(bank_slot);
return Err(PohRecorderError::MaxHeightReached);
}
@@ -424,6 +531,7 @@ impl PohRecorder {
self.record_lock_contention_us += timing::duration_as_us(&now.elapsed());
let now = Instant::now();
let res = poh_lock.record(mixin);
drop(poh_lock);
self.record_us += timing::duration_as_us(&now.elapsed());
if let Some(poh_entry) = res {
let entry = Entry {
@@ -454,12 +562,13 @@ impl PohRecorder {
clear_bank_signal: Option<SyncSender<bool>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
poh_config: &Arc<PohConfig>,
) -> (Self, Receiver<WorkingBankEntry>) {
) -> (Self, Receiver<WorkingBankEntry>, Receiver<Record>) {
let poh = Arc::new(Mutex::new(Poh::new(
last_entry_hash,
poh_config.hashes_per_tick,
)));
let (sender, receiver) = channel();
let (record_sender, record_receiver) = channel();
let (leader_first_tick_height, leader_last_tick_height, grace_ticks) =
Self::compute_leader_slot_tick_heights(next_leader_slot, ticks_per_slot);
(
@@ -484,8 +593,11 @@ impl PohRecorder {
tick_lock_contention_us: 0,
record_us: 0,
tick_overhead_us: 0,
last_metric: Instant::now(),
record_sender,
},
receiver,
record_receiver,
)
}
@@ -502,7 +614,7 @@ impl PohRecorder {
blockstore: &Arc<Blockstore>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
poh_config: &Arc<PohConfig>,
) -> (Self, Receiver<WorkingBankEntry>) {
) -> (Self, Receiver<WorkingBankEntry>, Receiver<Record>) {
Self::new_with_clear_signal(
tick_height,
last_entry_hash,
@@ -517,6 +629,18 @@ impl PohRecorder {
)
}
// Filters the return result of PohRecorder::bank_start(), returns the bank
// if it's still processing transactions
pub fn get_bank_still_processing_txs(bank_start: &Option<BankStart>) -> Option<&Arc<Bank>> {
bank_start.as_ref().and_then(|(bank, bank_creation_time)| {
if Bank::should_bank_still_be_processing_txs(bank_creation_time, bank.ns_per_slot) {
Some(bank)
} else {
None
}
})
}
#[cfg(test)]
pub fn schedule_dummy_max_height_reached_failure(&mut self) {
self.reset(Hash::default(), 1, None);
@@ -542,7 +666,7 @@ mod tests {
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new(
0,
prev_hash,
0,
@@ -569,7 +693,7 @@ mod tests {
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new(
0,
prev_hash,
0,
@@ -595,7 +719,7 @@ mod tests {
{
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new(
0,
Hash::default(),
0,
@@ -623,7 +747,7 @@ mod tests {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new(
0,
prev_hash,
0,
@@ -635,8 +759,10 @@ mod tests {
&Arc::new(PohConfig::default()),
);
let start = Arc::new(Instant::now());
let working_bank = WorkingBank {
bank,
start,
min_tick_height: 2,
max_tick_height: 3,
};
@@ -657,7 +783,7 @@ mod tests {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
let (mut poh_recorder, entry_receiver) = PohRecorder::new(
let (mut poh_recorder, entry_receiver, _record_receiver) = PohRecorder::new(
0,
prev_hash,
0,
@@ -669,8 +795,10 @@ mod tests {
&Arc::new(PohConfig::default()),
);
let start = Arc::new(Instant::now());
let working_bank = WorkingBank {
bank: bank.clone(),
start,
min_tick_height: 2,
max_tick_height: 3,
};
@@ -706,7 +834,7 @@ mod tests {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
let (mut poh_recorder, entry_receiver) = PohRecorder::new(
let (mut poh_recorder, entry_receiver, _record_receiver) = PohRecorder::new(
0,
prev_hash,
0,
@@ -725,8 +853,10 @@ mod tests {
assert_eq!(poh_recorder.tick_cache.last().unwrap().1, 4);
assert_eq!(poh_recorder.tick_height, 4);
let start = Arc::new(Instant::now());
let working_bank = WorkingBank {
bank,
start,
min_tick_height: 2,
max_tick_height: 3,
};
@@ -753,7 +883,7 @@ mod tests {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
let (mut poh_recorder, entry_receiver) = PohRecorder::new(
let (mut poh_recorder, entry_receiver, _record_receiver) = PohRecorder::new(
0,
prev_hash,
0,
@@ -765,8 +895,10 @@ mod tests {
&Arc::new(PohConfig::default()),
);
let start = Arc::new(Instant::now());
let working_bank = WorkingBank {
bank: bank.clone(),
start,
min_tick_height: 2,
max_tick_height: 3,
};
@@ -789,7 +921,7 @@ mod tests {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new(
0,
prev_hash,
0,
@@ -801,8 +933,10 @@ mod tests {
&Arc::new(PohConfig::default()),
);
let start = Arc::new(Instant::now());
let working_bank = WorkingBank {
bank: bank.clone(),
start,
min_tick_height: 1,
max_tick_height: 2,
};
@@ -829,7 +963,7 @@ mod tests {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
let (mut poh_recorder, entry_receiver) = PohRecorder::new(
let (mut poh_recorder, entry_receiver, _record_receiver) = PohRecorder::new(
0,
prev_hash,
0,
@@ -841,8 +975,10 @@ mod tests {
&Arc::new(PohConfig::default()),
);
let start = Arc::new(Instant::now());
let working_bank = WorkingBank {
bank: bank.clone(),
start,
min_tick_height: 1,
max_tick_height: 2,
};
@@ -873,7 +1009,7 @@ mod tests {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
let (mut poh_recorder, entry_receiver) = PohRecorder::new(
let (mut poh_recorder, entry_receiver, _record_receiver) = PohRecorder::new(
0,
prev_hash,
0,
@@ -885,8 +1021,10 @@ mod tests {
&Arc::new(PohConfig::default()),
);
let start = Arc::new(Instant::now());
let working_bank = WorkingBank {
bank: bank.clone(),
start,
min_tick_height: 1,
max_tick_height: 2,
};
@@ -915,7 +1053,7 @@ mod tests {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
let (mut poh_recorder, entry_receiver) = PohRecorder::new(
let (mut poh_recorder, entry_receiver, _record_receiver) = PohRecorder::new(
0,
prev_hash,
0,
@@ -927,8 +1065,10 @@ mod tests {
&Arc::new(PohConfig::default()),
);
let start = Arc::new(Instant::now());
let working_bank = WorkingBank {
bank,
start,
min_tick_height: 2,
max_tick_height: 3,
};
@@ -950,7 +1090,7 @@ mod tests {
{
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new(
0,
Hash::default(),
0,
@@ -977,7 +1117,7 @@ mod tests {
{
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new(
0,
Hash::default(),
0,
@@ -1005,7 +1145,7 @@ mod tests {
{
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new(
0,
Hash::default(),
0,
@@ -1038,7 +1178,7 @@ mod tests {
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new(
0,
Hash::default(),
0,
@@ -1049,8 +1189,10 @@ mod tests {
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
let start = Arc::new(Instant::now());
let working_bank = WorkingBank {
bank,
start,
min_tick_height: 2,
max_tick_height: 3,
};
@@ -1070,18 +1212,19 @@ mod tests {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let (sender, receiver) = sync_channel(1);
let (mut poh_recorder, _entry_receiver) = PohRecorder::new_with_clear_signal(
0,
Hash::default(),
0,
None,
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blockstore),
Some(sender),
&Arc::new(LeaderScheduleCache::default()),
&Arc::new(PohConfig::default()),
);
let (mut poh_recorder, _entry_receiver, _record_receiver) =
PohRecorder::new_with_clear_signal(
0,
Hash::default(),
0,
None,
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blockstore),
Some(sender),
&Arc::new(LeaderScheduleCache::default()),
&Arc::new(PohConfig::default()),
);
poh_recorder.set_bank(&bank);
poh_recorder.clear_bank();
assert!(receiver.try_recv().is_ok());
@@ -1104,7 +1247,7 @@ mod tests {
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new(
0,
prev_hash,
0,
@@ -1118,8 +1261,10 @@ mod tests {
let end_slot = 3;
let max_tick_height = (end_slot + 1) * ticks_per_slot;
let start = Arc::new(Instant::now());
let working_bank = WorkingBank {
bank: bank.clone(),
start,
min_tick_height: 1,
max_tick_height,
};
@@ -1151,7 +1296,7 @@ mod tests {
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new(
0,
prev_hash,
0,
@@ -1213,7 +1358,7 @@ mod tests {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new(
0,
prev_hash,
0,
@@ -1342,7 +1487,7 @@ mod tests {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new(
0,
prev_hash,
0,
@@ -1410,7 +1555,7 @@ mod tests {
let bank = Arc::new(Bank::new(&genesis_config));
let genesis_hash = bank.last_blockhash();
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new(
0,
bank.last_blockhash(),
0,

View File

@@ -1,11 +1,13 @@
//! The `poh_service` module implements a service that records the passing of
//! "ticks", a measure of time in the PoH stream
use crate::poh_recorder::PohRecorder;
use crate::poh_recorder::{PohRecorder, Record};
use solana_ledger::poh::Poh;
use solana_measure::measure::Measure;
use solana_sdk::poh_config::PohConfig;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::sync::{mpsc::Receiver, Arc, Mutex};
use std::thread::{self, sleep, Builder, JoinHandle};
use std::time::Instant;
use std::time::{Duration, Instant};
pub struct PohService {
tick_producer: JoinHandle<()>,
@@ -23,6 +25,54 @@ pub const DEFAULT_PINNED_CPU_CORE: usize = 0;
const TARGET_SLOT_ADJUSTMENT_NS: u64 = 50_000_000;
#[derive(Debug)]
struct PohTiming {
num_ticks: u64,
num_hashes: u64,
total_sleep_us: u64,
total_lock_time_ns: u64,
total_hash_time_ns: u64,
total_tick_time_ns: u64,
last_metric: Instant,
}
impl PohTiming {
fn new() -> Self {
Self {
num_ticks: 0,
num_hashes: 0,
total_sleep_us: 0,
total_lock_time_ns: 0,
total_hash_time_ns: 0,
total_tick_time_ns: 0,
last_metric: Instant::now(),
}
}
fn report(&mut self, ticks_per_slot: u64) {
if self.last_metric.elapsed().as_millis() > 1000 {
let elapsed_us = self.last_metric.elapsed().as_micros() as u64;
let us_per_slot = (elapsed_us * ticks_per_slot) / self.num_ticks;
datapoint_info!(
"poh-service",
("ticks", self.num_ticks as i64, i64),
("hashes", self.num_hashes as i64, i64),
("elapsed_us", us_per_slot, i64),
("total_sleep_us", self.total_sleep_us, i64),
("total_tick_time_us", self.total_tick_time_ns / 1000, i64),
("total_lock_time_us", self.total_lock_time_ns / 1000, i64),
("total_hash_time_us", self.total_hash_time_ns / 1000, i64),
);
self.total_sleep_us = 0;
self.num_ticks = 0;
self.num_hashes = 0;
self.total_tick_time_ns = 0;
self.total_lock_time_ns = 0;
self.total_hash_time_ns = 0;
self.last_metric = Instant::now();
}
}
}
impl PohService {
pub fn new(
poh_recorder: Arc<Mutex<PohRecorder>>,
@@ -31,6 +81,7 @@ impl PohService {
ticks_per_slot: u64,
pinned_cpu_core: usize,
hashes_per_batch: u64,
record_receiver: Receiver<Record>,
) -> Self {
let poh_exit_ = poh_exit.clone();
let poh_config = poh_config.clone();
@@ -40,12 +91,18 @@ impl PohService {
solana_sys_tuner::request_realtime_poh();
if poh_config.hashes_per_tick.is_none() {
if poh_config.target_tick_count.is_none() {
Self::sleepy_tick_producer(poh_recorder, &poh_config, &poh_exit_);
Self::sleepy_tick_producer(
poh_recorder,
&poh_config,
&poh_exit_,
record_receiver,
);
} else {
Self::short_lived_sleepy_tick_producer(
poh_recorder,
&poh_config,
&poh_exit_,
record_receiver,
);
}
} else {
@@ -68,6 +125,7 @@ impl PohService {
poh_config.target_tick_duration.as_nanos() as u64 - adjustment_per_tick,
ticks_per_slot,
hashes_per_batch,
record_receiver,
);
}
poh_exit_.store(true, Ordering::Relaxed);
@@ -81,20 +139,53 @@ impl PohService {
poh_recorder: Arc<Mutex<PohRecorder>>,
poh_config: &PohConfig,
poh_exit: &AtomicBool,
record_receiver: Receiver<Record>,
) {
while !poh_exit.load(Ordering::Relaxed) {
Self::read_record_receiver_and_process(
&poh_recorder,
&record_receiver,
Duration::from_millis(0),
);
sleep(poh_config.target_tick_duration);
poh_recorder.lock().unwrap().tick();
}
}
pub fn read_record_receiver_and_process(
poh_recorder: &Arc<Mutex<PohRecorder>>,
record_receiver: &Receiver<Record>,
timeout: Duration,
) {
let record = record_receiver.recv_timeout(timeout);
if let Ok(record) = record {
if record
.sender
.send(poh_recorder.lock().unwrap().record(
record.slot,
record.mixin,
record.transactions,
))
.is_err()
{
panic!("Error returning mixin hash");
}
}
}
fn short_lived_sleepy_tick_producer(
poh_recorder: Arc<Mutex<PohRecorder>>,
poh_config: &PohConfig,
poh_exit: &AtomicBool,
record_receiver: Receiver<Record>,
) {
let mut warned = false;
for _ in 0..poh_config.target_tick_count.unwrap() {
Self::read_record_receiver_and_process(
&poh_recorder,
&record_receiver,
Duration::from_millis(0),
);
sleep(poh_config.target_tick_duration);
poh_recorder.lock().unwrap().tick();
if poh_exit.load(Ordering::Relaxed) && !warned {
@@ -104,49 +195,121 @@ impl PohService {
}
}
fn record_or_hash(
next_record: &mut Option<Record>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
timing: &mut PohTiming,
record_receiver: &Receiver<Record>,
hashes_per_batch: u64,
poh: &Arc<Mutex<Poh>>,
) -> bool {
match next_record.take() {
Some(mut record) => {
// received message to record
// so, record for as long as we have queued up record requests
let mut lock_time = Measure::start("lock");
let mut poh_recorder_l = poh_recorder.lock().unwrap();
lock_time.stop();
timing.total_lock_time_ns += lock_time.as_ns();
loop {
let res = poh_recorder_l.record(
record.slot,
record.mixin,
std::mem::take(&mut record.transactions),
);
let _ = record.sender.send(res); // what do we do on failure here? Ignore for now.
timing.num_hashes += 1; // note: may have also ticked inside record
let new_record_result = record_receiver.try_recv();
match new_record_result {
Ok(new_record) => {
// we already have second request to record, so record again while we still have the mutex
record = new_record;
}
Err(_) => {
break;
}
}
}
// PohRecorder.record would have ticked if it needed to, so should_tick will be false
}
None => {
// did not receive instructions to record, so hash until we notice we've been asked to record (or we need to tick) and then remember what to record
let mut lock_time = Measure::start("lock");
let mut poh_l = poh.lock().unwrap();
lock_time.stop();
timing.total_lock_time_ns += lock_time.as_ns();
loop {
timing.num_hashes += hashes_per_batch;
let mut hash_time = Measure::start("hash");
let should_tick = poh_l.hash(hashes_per_batch);
hash_time.stop();
timing.total_hash_time_ns += hash_time.as_ns();
if should_tick {
return true; // nothing else can be done. tick required.
}
// check to see if a record request has been sent
let get_again = record_receiver.try_recv();
match get_again {
Ok(record) => {
// remember the record we just received as the next record to occur
*next_record = Some(record);
break;
}
Err(_) => {
continue;
}
}
}
}
};
false // should_tick = false for all code that reaches here
}
fn tick_producer(
poh_recorder: Arc<Mutex<PohRecorder>>,
poh_exit: &AtomicBool,
target_tick_ns: u64,
ticks_per_slot: u64,
hashes_per_batch: u64,
record_receiver: Receiver<Record>,
) {
let poh = poh_recorder.lock().unwrap().poh.clone();
let mut now = Instant::now();
let mut last_metric = Instant::now();
let mut num_ticks = 0;
let mut num_hashes = 0;
let mut total_sleep_us = 0;
let mut timing = PohTiming::new();
let mut next_record = None;
loop {
num_hashes += hashes_per_batch;
if poh.lock().unwrap().hash(hashes_per_batch) {
// Lock PohRecorder only for the final hash...
poh_recorder.lock().unwrap().tick();
num_ticks += 1;
let should_tick = Self::record_or_hash(
&mut next_record,
&poh_recorder,
&mut timing,
&record_receiver,
hashes_per_batch,
&poh,
);
if should_tick {
// Lock PohRecorder only for the final hash. record_or_hash will lock PohRecorder for record calls but not for hashing.
{
let mut lock_time = Measure::start("lock");
let mut poh_recorder_l = poh_recorder.lock().unwrap();
lock_time.stop();
timing.total_lock_time_ns += lock_time.as_ns();
let mut tick_time = Measure::start("tick");
poh_recorder_l.tick();
tick_time.stop();
timing.total_tick_time_ns += tick_time.as_ns();
}
timing.num_ticks += 1;
let elapsed_ns = now.elapsed().as_nanos() as u64;
// sleep is not accurate enough to get a predictable time.
// Kernel can not schedule the thread for a while.
while (now.elapsed().as_nanos() as u64) < target_tick_ns {
std::hint::spin_loop();
}
total_sleep_us += (now.elapsed().as_nanos() as u64 - elapsed_ns) / 1000;
timing.total_sleep_us += (now.elapsed().as_nanos() as u64 - elapsed_ns) / 1000;
now = Instant::now();
if last_metric.elapsed().as_millis() > 1000 {
let elapsed_ms = last_metric.elapsed().as_millis() as u64;
let ms_per_slot = (elapsed_ms * ticks_per_slot) / num_ticks;
datapoint_info!(
"poh-service",
("ticks", num_ticks as i64, i64),
("hashes", num_hashes as i64, i64),
("elapsed_ms", ms_per_slot, i64),
("total_sleep_ms", total_sleep_us / 1000, i64),
);
total_sleep_us = 0;
num_ticks = 0;
num_hashes = 0;
last_metric = Instant::now();
}
timing.report(ticks_per_slot);
if poh_exit.load(Ordering::Relaxed) {
break;
}
@@ -195,7 +358,7 @@ mod tests {
target_tick_duration,
target_tick_count: None,
});
let (poh_recorder, entry_receiver) = PohRecorder::new(
let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new(
bank.tick_height(),
prev_hash,
bank.slot(),
@@ -208,8 +371,10 @@ mod tests {
);
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let exit = Arc::new(AtomicBool::new(false));
let start = Arc::new(Instant::now());
let working_bank = WorkingBank {
bank: bank.clone(),
start,
min_tick_height: bank.tick_height(),
max_tick_height: std::u64::MAX,
};
@@ -273,6 +438,7 @@ mod tests {
0,
DEFAULT_PINNED_CPU_CORE,
hashes_per_batch,
record_receiver,
);
poh_recorder.lock().unwrap().set_working_bank(working_bank);

View File

@@ -139,6 +139,7 @@ pub(crate) struct ForkProgress {
pub(crate) propagated_stats: PropagatedStats,
pub(crate) replay_stats: ReplaySlotStats,
pub(crate) replay_progress: ConfirmationProgress,
pub(crate) duplicate_stats: DuplicateStats,
// Note `num_blocks_on_fork` and `num_dropped_blocks_on_fork` only
// count new blocks replayed since last restart, which won't include
// blocks already existing in the ledger/before snapshot at start,
@@ -151,6 +152,7 @@ impl ForkProgress {
pub fn new(
last_entry: Hash,
prev_leader_slot: Option<Slot>,
duplicate_stats: DuplicateStats,
validator_stake_info: Option<ValidatorStakeInfo>,
num_blocks_on_fork: u64,
num_dropped_blocks_on_fork: u64,
@@ -184,6 +186,7 @@ impl ForkProgress {
fork_stats: ForkStats::default(),
replay_stats: ReplaySlotStats::default(),
replay_progress: ConfirmationProgress::new(last_entry),
duplicate_stats,
num_blocks_on_fork,
num_dropped_blocks_on_fork,
propagated_stats: PropagatedStats {
@@ -203,6 +206,7 @@ impl ForkProgress {
my_pubkey: &Pubkey,
voting_pubkey: &Pubkey,
prev_leader_slot: Option<Slot>,
duplicate_stats: DuplicateStats,
num_blocks_on_fork: u64,
num_dropped_blocks_on_fork: u64,
) -> Self {
@@ -222,11 +226,20 @@ impl ForkProgress {
Self::new(
bank.last_blockhash(),
prev_leader_slot,
duplicate_stats,
validator_fork_info,
num_blocks_on_fork,
num_dropped_blocks_on_fork,
)
}
pub fn is_duplicate_confirmed(&self) -> bool {
self.duplicate_stats.is_duplicate_confirmed
}
pub fn set_duplicate_confirmed(&mut self) {
self.duplicate_stats.set_duplicate_confirmed();
}
}
#[derive(Debug, Clone, Default)]
@@ -241,7 +254,7 @@ pub(crate) struct ForkStats {
pub(crate) vote_threshold: bool,
pub(crate) is_locked_out: bool,
pub(crate) voted_stakes: VotedStakes,
pub(crate) confirmation_reported: bool,
pub(crate) is_supermajority_confirmed: bool,
pub(crate) computed: bool,
pub(crate) lockout_intervals: LockoutIntervals,
}
@@ -259,6 +272,38 @@ pub(crate) struct PropagatedStats {
pub(crate) total_epoch_stake: u64,
}
#[derive(Clone, Default)]
pub(crate) struct DuplicateStats {
latest_unconfirmed_duplicate_ancestor: Option<Slot>,
is_duplicate_confirmed: bool,
}
impl DuplicateStats {
pub fn new_with_unconfirmed_duplicate_ancestor(
latest_unconfirmed_duplicate_ancestor: Option<Slot>,
) -> Self {
Self {
latest_unconfirmed_duplicate_ancestor,
is_duplicate_confirmed: false,
}
}
fn set_duplicate_confirmed(&mut self) {
self.is_duplicate_confirmed = true;
self.latest_unconfirmed_duplicate_ancestor = None;
}
fn update_with_newly_confirmed_duplicate_ancestor(&mut self, newly_confirmed_ancestor: Slot) {
if let Some(latest_unconfirmed_duplicate_ancestor) =
self.latest_unconfirmed_duplicate_ancestor
{
if latest_unconfirmed_duplicate_ancestor <= newly_confirmed_ancestor {
self.latest_unconfirmed_duplicate_ancestor = None;
}
}
}
}
impl PropagatedStats {
pub fn add_vote_pubkey(&mut self, vote_pubkey: Pubkey, stake: u64) {
if self.propagated_validators.insert(vote_pubkey) {
@@ -347,6 +392,12 @@ impl ProgressMap {
.map(|fork_progress| &mut fork_progress.fork_stats)
}
pub fn is_dead(&self, slot: Slot) -> Option<bool> {
self.progress_map
.get(&slot)
.map(|fork_progress| fork_progress.is_dead)
}
pub fn is_propagated(&self, slot: Slot) -> bool {
let leader_slot_to_check = self.get_latest_leader_slot(slot);
@@ -378,6 +429,118 @@ impl ProgressMap {
}
}
pub fn is_unconfirmed_duplicate(&self, slot: Slot) -> Option<bool> {
self.get(&slot).map(|p| {
p.duplicate_stats
.latest_unconfirmed_duplicate_ancestor
.map(|ancestor| ancestor == slot)
.unwrap_or(false)
})
}
pub fn latest_unconfirmed_duplicate_ancestor(&self, slot: Slot) -> Option<Slot> {
self.get(&slot)
.map(|p| p.duplicate_stats.latest_unconfirmed_duplicate_ancestor)
.unwrap_or(None)
}
pub fn set_unconfirmed_duplicate_slot(&mut self, slot: Slot, descendants: &HashSet<u64>) {
if let Some(fork_progress) = self.get_mut(&slot) {
if fork_progress.is_duplicate_confirmed() {
assert!(fork_progress
.duplicate_stats
.latest_unconfirmed_duplicate_ancestor
.is_none());
return;
}
if fork_progress
.duplicate_stats
.latest_unconfirmed_duplicate_ancestor
== Some(slot)
{
// Already been marked
return;
}
fork_progress
.duplicate_stats
.latest_unconfirmed_duplicate_ancestor = Some(slot);
for d in descendants {
if let Some(fork_progress) = self.get_mut(&d) {
fork_progress
.duplicate_stats
.latest_unconfirmed_duplicate_ancestor = Some(std::cmp::max(
fork_progress
.duplicate_stats
.latest_unconfirmed_duplicate_ancestor
.unwrap_or(0),
slot,
));
}
}
}
}
pub fn set_confirmed_duplicate_slot(
&mut self,
slot: Slot,
ancestors: &HashSet<u64>,
descendants: &HashSet<u64>,
) {
for a in ancestors {
if let Some(fork_progress) = self.get_mut(&a) {
fork_progress.set_duplicate_confirmed();
}
}
if let Some(slot_fork_progress) = self.get_mut(&slot) {
// Setting the fields here is nly correct and necessary if the loop above didn't
// already do this, so check with an assert.
assert!(!ancestors.contains(&slot));
let slot_had_unconfirmed_duplicate_ancestor = slot_fork_progress
.duplicate_stats
.latest_unconfirmed_duplicate_ancestor
.is_some();
slot_fork_progress.set_duplicate_confirmed();
if slot_had_unconfirmed_duplicate_ancestor {
for d in descendants {
if let Some(descendant_fork_progress) = self.get_mut(&d) {
descendant_fork_progress
.duplicate_stats
.update_with_newly_confirmed_duplicate_ancestor(slot);
}
}
} else {
// Neither this slot `S`, nor earlier ancestors were marked as duplicate,
// so this means all descendants either:
// 1) Have no duplicate ancestors
// 2) Have a duplicate ancestor > `S`
// In both cases, there's no need to iterate through descendants because
// this confirmation on `S` is irrelevant to them.
}
}
}
pub fn set_supermajority_confirmed_slot(&mut self, slot: Slot) {
let slot_progress = self.get_mut(&slot).unwrap();
slot_progress.fork_stats.is_supermajority_confirmed = true;
}
pub fn is_supermajority_confirmed(&self, slot: Slot) -> Option<bool> {
self.progress_map
.get(&slot)
.map(|s| s.fork_stats.is_supermajority_confirmed)
}
pub fn is_duplicate_confirmed(&self, slot: Slot) -> Option<bool> {
self.progress_map
.get(&slot)
.map(|s| s.is_duplicate_confirmed())
}
pub fn get_bank_prev_leader_slot(&self, bank: &Bank) -> Option<Slot> {
let parent_slot = bank.parent_slot();
self.get_propagated_stats(parent_slot)
@@ -420,6 +583,8 @@ impl ProgressMap {
#[cfg(test)]
mod test {
use super::*;
use crate::consensus::test::VoteSimulator;
use trees::tr;
#[test]
fn test_add_vote_pubkey() {
@@ -510,13 +675,21 @@ mod test {
fn test_is_propagated_status_on_construction() {
// If the given ValidatorStakeInfo == None, then this is not
// a leader slot and is_propagated == false
let progress = ForkProgress::new(Hash::default(), Some(9), None, 0, 0);
let progress = ForkProgress::new(
Hash::default(),
Some(9),
DuplicateStats::default(),
None,
0,
0,
);
assert!(!progress.propagated_stats.is_propagated);
// If the stake is zero, then threshold is always achieved
let progress = ForkProgress::new(
Hash::default(),
Some(9),
DuplicateStats::default(),
Some(ValidatorStakeInfo {
total_epoch_stake: 0,
..ValidatorStakeInfo::default()
@@ -531,6 +704,7 @@ mod test {
let progress = ForkProgress::new(
Hash::default(),
Some(9),
DuplicateStats::default(),
Some(ValidatorStakeInfo {
total_epoch_stake: 2,
..ValidatorStakeInfo::default()
@@ -544,6 +718,7 @@ mod test {
let progress = ForkProgress::new(
Hash::default(),
Some(9),
DuplicateStats::default(),
Some(ValidatorStakeInfo {
stake: 1,
total_epoch_stake: 2,
@@ -560,6 +735,7 @@ mod test {
let progress = ForkProgress::new(
Hash::default(),
Some(9),
DuplicateStats::default(),
Some(ValidatorStakeInfo::default()),
0,
0,
@@ -573,12 +749,23 @@ mod test {
// Insert new ForkProgress for slot 10 (not a leader slot) and its
// previous leader slot 9 (leader slot)
progress_map.insert(10, ForkProgress::new(Hash::default(), Some(9), None, 0, 0));
progress_map.insert(
10,
ForkProgress::new(
Hash::default(),
Some(9),
DuplicateStats::default(),
None,
0,
0,
),
);
progress_map.insert(
9,
ForkProgress::new(
Hash::default(),
None,
DuplicateStats::default(),
Some(ValidatorStakeInfo::default()),
0,
0,
@@ -593,7 +780,17 @@ mod test {
// The previous leader before 8, slot 7, does not exist in
// progress map, so is_propagated(8) should return true as
// this implies the parent is rooted
progress_map.insert(8, ForkProgress::new(Hash::default(), Some(7), None, 0, 0));
progress_map.insert(
8,
ForkProgress::new(
Hash::default(),
Some(7),
DuplicateStats::default(),
None,
0,
0,
),
);
assert!(progress_map.is_propagated(8));
// If we set the is_propagated = true, is_propagated should return true
@@ -616,4 +813,157 @@ mod test {
.is_leader_slot = true;
assert!(!progress_map.is_propagated(10));
}
fn setup_set_unconfirmed_and_confirmed_duplicate_slot_tests(
smaller_duplicate_slot: Slot,
larger_duplicate_slot: Slot,
) -> (ProgressMap, RwLock<BankForks>) {
// Create simple fork 0 -> 1 -> 2 -> 3 -> 4 -> 5
let forks = tr(0) / (tr(1) / (tr(2) / (tr(3) / (tr(4) / tr(5)))));
let mut vote_simulator = VoteSimulator::new(1);
vote_simulator.fill_bank_forks(forks, &HashMap::new());
let VoteSimulator {
mut progress,
bank_forks,
..
} = vote_simulator;
let descendants = bank_forks.read().unwrap().descendants().clone();
// Mark the slots as unconfirmed duplicates
progress.set_unconfirmed_duplicate_slot(
smaller_duplicate_slot,
&descendants.get(&smaller_duplicate_slot).unwrap(),
);
progress.set_unconfirmed_duplicate_slot(
larger_duplicate_slot,
&descendants.get(&larger_duplicate_slot).unwrap(),
);
// Correctness checks
for slot in bank_forks.read().unwrap().banks().keys() {
if *slot < smaller_duplicate_slot {
assert!(progress
.latest_unconfirmed_duplicate_ancestor(*slot)
.is_none());
} else if *slot < larger_duplicate_slot {
assert_eq!(
progress
.latest_unconfirmed_duplicate_ancestor(*slot)
.unwrap(),
smaller_duplicate_slot
);
} else {
assert_eq!(
progress
.latest_unconfirmed_duplicate_ancestor(*slot)
.unwrap(),
larger_duplicate_slot
);
}
}
(progress, bank_forks)
}
#[test]
fn test_set_unconfirmed_duplicate_confirm_smaller_slot_first() {
let smaller_duplicate_slot = 1;
let larger_duplicate_slot = 4;
let (mut progress, bank_forks) = setup_set_unconfirmed_and_confirmed_duplicate_slot_tests(
smaller_duplicate_slot,
larger_duplicate_slot,
);
let descendants = bank_forks.read().unwrap().descendants().clone();
let ancestors = bank_forks.read().unwrap().ancestors();
// Mark the smaller duplicate slot as confirmed
progress.set_confirmed_duplicate_slot(
smaller_duplicate_slot,
&ancestors.get(&smaller_duplicate_slot).unwrap(),
&descendants.get(&smaller_duplicate_slot).unwrap(),
);
for slot in bank_forks.read().unwrap().banks().keys() {
if *slot < larger_duplicate_slot {
// Only slots <= smaller_duplicate_slot have been duplicate confirmed
if *slot <= smaller_duplicate_slot {
assert!(progress.is_duplicate_confirmed(*slot).unwrap());
} else {
assert!(!progress.is_duplicate_confirmed(*slot).unwrap());
}
// The unconfirmed duplicate flag has been cleared on the smaller
// descendants because their most recent duplicate ancestor has
// been confirmed
assert!(progress
.latest_unconfirmed_duplicate_ancestor(*slot)
.is_none());
} else {
assert!(!progress.is_duplicate_confirmed(*slot).unwrap(),);
// The unconfirmed duplicate flag has not been cleared on the smaller
// descendants because their most recent duplicate ancestor,
// `larger_duplicate_slot` has not yet been confirmed
assert_eq!(
progress
.latest_unconfirmed_duplicate_ancestor(*slot)
.unwrap(),
larger_duplicate_slot
);
}
}
// Mark the larger duplicate slot as confirmed, all slots should no longer
// have any unconfirmed duplicate ancestors, and should be marked as duplciate confirmed
progress.set_confirmed_duplicate_slot(
larger_duplicate_slot,
&ancestors.get(&larger_duplicate_slot).unwrap(),
&descendants.get(&larger_duplicate_slot).unwrap(),
);
for slot in bank_forks.read().unwrap().banks().keys() {
// All slots <= the latest duplciate confirmed slot are ancestors of
// that slot, so they should all be marked duplicate confirmed
assert_eq!(
progress.is_duplicate_confirmed(*slot).unwrap(),
*slot <= larger_duplicate_slot
);
assert!(progress
.latest_unconfirmed_duplicate_ancestor(*slot)
.is_none());
}
}
#[test]
fn test_set_unconfirmed_duplicate_confirm_larger_slot_first() {
let smaller_duplicate_slot = 1;
let larger_duplicate_slot = 4;
let (mut progress, bank_forks) = setup_set_unconfirmed_and_confirmed_duplicate_slot_tests(
smaller_duplicate_slot,
larger_duplicate_slot,
);
let descendants = bank_forks.read().unwrap().descendants().clone();
let ancestors = bank_forks.read().unwrap().ancestors();
// Mark the larger duplicate slot as confirmed
progress.set_confirmed_duplicate_slot(
larger_duplicate_slot,
&ancestors.get(&larger_duplicate_slot).unwrap(),
&descendants.get(&larger_duplicate_slot).unwrap(),
);
// All slots should no longer have any unconfirmed duplicate ancestors
progress.set_confirmed_duplicate_slot(
larger_duplicate_slot,
&ancestors.get(&larger_duplicate_slot).unwrap(),
&descendants.get(&larger_duplicate_slot).unwrap(),
);
for slot in bank_forks.read().unwrap().banks().keys() {
// All slots <= the latest duplciate confirmed slot are ancestors of
// that slot, so they should all be marked duplicate confirmed
assert_eq!(
progress.is_duplicate_confirmed(*slot).unwrap(),
*slot <= larger_duplicate_slot
);
assert!(progress
.latest_unconfirmed_duplicate_ancestor(*slot)
.is_none());
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -16,7 +16,7 @@ use crate::{
rpc_subscriptions::RpcSubscriptions,
window_service::{should_retransmit_and_persist, WindowService},
};
use crossbeam_channel::Receiver;
use crossbeam_channel::{Receiver, Sender};
use lru::LruCache;
use solana_client::rpc_response::SlotUpdate;
use solana_ledger::shred::{get_shred_slot_index_type, ShredFetchStats};
@@ -289,6 +289,33 @@ fn enable_turbine_retransmit_peers_patch(shred_slot: Slot, root_bank: &Bank) ->
}
}
// Drops shred slot leader from retransmit peers.
// TODO: decide which bank should be used here.
fn get_retransmit_peers(
self_pubkey: Pubkey,
shred_slot: Slot,
leader_schedule_cache: &LeaderScheduleCache,
bank: &Bank,
stakes_cache: &EpochStakesCache,
) -> Vec<(u64 /*stakes*/, usize /*index*/)> {
match leader_schedule_cache.slot_leader_at(shred_slot, Some(bank)) {
None => {
error!("unknown leader for shred slot");
stakes_cache.stakes_and_index.clone()
}
Some(pubkey) if pubkey == self_pubkey => {
error!("retransmit from slot leader: {}", pubkey);
stakes_cache.stakes_and_index.clone()
}
Some(pubkey) => stakes_cache
.stakes_and_index
.iter()
.filter(|(_, i)| stakes_cache.peers[*i].id != pubkey)
.copied()
.collect(),
}
}
#[allow(clippy::too_many_arguments)]
fn retransmit(
bank_forks: &RwLock<BankForks>,
@@ -390,10 +417,17 @@ fn retransmit(
}
let mut compute_turbine_peers = Measure::start("turbine_start");
let stakes_and_index = get_retransmit_peers(
my_id,
shred_slot,
leader_schedule_cache,
r_bank.deref(),
r_epoch_stakes_cache.deref(),
);
let (my_index, mut shuffled_stakes_and_index) = ClusterInfo::shuffle_peers_and_index(
&my_id,
&r_epoch_stakes_cache.peers,
&r_epoch_stakes_cache.stakes_and_index,
&stakes_and_index,
packet.meta.seed,
);
peers_len = cmp::max(peers_len, shuffled_stakes_and_index.len());
@@ -432,15 +466,11 @@ fn retransmit(
.entry(packet.meta.addr().to_string())
.or_insert(0) += 1;
let leader =
leader_schedule_cache.slot_leader_at(packet.meta.slot, Some(r_bank.as_ref()));
let mut retransmit_time = Measure::start("retransmit_to");
if !packet.meta.forward {
ClusterInfo::retransmit_to(&neighbors, packet, leader, sock, true)?;
ClusterInfo::retransmit_to(&children, packet, leader, sock, false)?;
} else {
ClusterInfo::retransmit_to(&children, packet, leader, sock, true)?;
ClusterInfo::retransmit_to(&neighbors, packet, sock, true)?;
}
ClusterInfo::retransmit_to(&children, packet, sock, packet.meta.forward)?;
retransmit_time.stop();
retransmit_total += retransmit_time.as_us();
}
@@ -575,6 +605,7 @@ impl RetransmitStage {
completed_data_sets_sender: CompletedDataSetsSender,
max_slots: &Arc<MaxSlots>,
rpc_subscriptions: Option<Arc<RpcSubscriptions>>,
duplicate_slots_sender: Sender<Slot>,
) -> Self {
let (retransmit_sender, retransmit_receiver) = channel();
@@ -636,6 +667,7 @@ impl RetransmitStage {
cluster_slots,
verified_vote_receiver,
completed_data_sets_sender,
duplicate_slots_sender,
);
let mut thread_hdls = t_retransmit;

View File

@@ -30,13 +30,17 @@ use solana_client::{
TokenAccountsFilter, DELINQUENT_VALIDATOR_SLOT_DISTANCE, MAX_GET_CONFIRMED_BLOCKS_RANGE,
MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT,
MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE, MAX_GET_PROGRAM_ACCOUNT_FILTERS,
MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS, MAX_MULTIPLE_ACCOUNTS, NUM_LARGEST_ACCOUNTS,
MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS, MAX_GET_SLOT_LEADERS, MAX_MULTIPLE_ACCOUNTS,
NUM_LARGEST_ACCOUNTS,
},
rpc_response::Response as RpcResponse,
rpc_response::*,
};
use solana_faucet::faucet::request_airdrop_transaction;
use solana_ledger::{blockstore::Blockstore, blockstore_db::BlockstoreError, get_tmp_ledger_path};
use solana_ledger::{
blockstore::Blockstore, blockstore_db::BlockstoreError, get_tmp_ledger_path,
leader_schedule_cache::LeaderScheduleCache,
};
use solana_metrics::inc_new_counter_info;
use solana_perf::packet::PACKET_DATA_SIZE;
use solana_runtime::{
@@ -66,8 +70,8 @@ use solana_sdk::{
};
use solana_stake_program::stake_state::StakeState;
use solana_transaction_status::{
EncodedConfirmedBlock, EncodedConfirmedTransaction, TransactionConfirmationStatus,
TransactionStatus, UiTransactionEncoding,
EncodedConfirmedTransaction, TransactionConfirmationStatus, TransactionStatus,
UiConfirmedBlock, UiTransactionEncoding,
};
use solana_vote_program::vote_state::{VoteState, MAX_LOCKOUT_HISTORY};
use spl_token_v2_0::{
@@ -80,7 +84,7 @@ use std::{
net::SocketAddr,
str::FromStr,
sync::{
atomic::{AtomicBool, Ordering},
atomic::{AtomicBool, AtomicU64, Ordering},
mpsc::{channel, Receiver, Sender},
Arc, Mutex, RwLock,
},
@@ -96,7 +100,7 @@ fn new_response<T>(bank: &Bank, value: T) -> RpcResponse<T> {
Response { context, value }
}
pub fn is_confirmed_rooted(
fn is_finalized(
block_commitment_cache: &BlockCommitmentCache,
bank: &Bank,
blockstore: &Blockstore,
@@ -139,6 +143,8 @@ pub struct JsonRpcRequestProcessor {
optimistically_confirmed_bank: Arc<RwLock<OptimisticallyConfirmedBank>>,
largest_accounts_cache: Arc<RwLock<LargestAccountsCache>>,
max_slots: Arc<MaxSlots>,
leader_schedule_cache: Arc<LeaderScheduleCache>,
max_complete_transaction_status_slot: Arc<AtomicU64>,
}
impl Metadata for JsonRpcRequestProcessor {}
@@ -223,6 +229,8 @@ impl JsonRpcRequestProcessor {
optimistically_confirmed_bank: Arc<RwLock<OptimisticallyConfirmedBank>>,
largest_accounts_cache: Arc<RwLock<LargestAccountsCache>>,
max_slots: Arc<MaxSlots>,
leader_schedule_cache: Arc<LeaderScheduleCache>,
max_complete_transaction_status_slot: Arc<AtomicU64>,
) -> (Self, Receiver<TransactionInfo>) {
let (sender, receiver) = channel();
(
@@ -242,6 +250,8 @@ impl JsonRpcRequestProcessor {
optimistically_confirmed_bank,
largest_accounts_cache,
max_slots,
leader_schedule_cache,
max_complete_transaction_status_slot,
},
receiver,
)
@@ -283,6 +293,8 @@ impl JsonRpcRequestProcessor {
})),
largest_accounts_cache: Arc::new(RwLock::new(LargestAccountsCache::new(30))),
max_slots: Arc::new(MaxSlots::default()),
leader_schedule_cache: Arc::new(LeaderScheduleCache::new_from_bank(bank)),
max_complete_transaction_status_slot: Arc::new(AtomicU64::default()),
}
}
@@ -720,52 +732,83 @@ impl JsonRpcRequestProcessor {
&self,
slot: Slot,
config: Option<RpcEncodingConfigWrapper<RpcConfirmedBlockConfig>>,
) -> Result<Option<EncodedConfirmedBlock>> {
let config = config
.map(|config| config.convert_to_current())
.unwrap_or_default();
let encoding = config.encoding.unwrap_or(UiTransactionEncoding::Json);
if self.config.enable_rpc_transaction_history
&& slot
) -> Result<Option<UiConfirmedBlock>> {
if self.config.enable_rpc_transaction_history {
let config = config
.map(|config| config.convert_to_current())
.unwrap_or_default();
let encoding = config.encoding.unwrap_or(UiTransactionEncoding::Json);
let transaction_details = config.transaction_details.unwrap_or_default();
let show_rewards = config.rewards.unwrap_or(true);
let commitment = config.commitment.unwrap_or_default();
check_is_at_least_confirmed(commitment)?;
// Block is old enough to be finalized
if slot
<= self
.block_commitment_cache
.read()
.unwrap()
.highest_confirmed_root()
{
let result = self.blockstore.get_confirmed_block(slot, true);
self.check_blockstore_root(&result, slot)?;
if result.is_err() {
if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage {
let bigtable_result = self
.runtime
.block_on(bigtable_ledger_storage.get_confirmed_block(slot));
self.check_bigtable_result(&bigtable_result)?;
return Ok(bigtable_result
.ok()
.map(|confirmed_block| confirmed_block.encode(encoding)));
{
let result = self.blockstore.get_rooted_block(slot, true);
self.check_blockstore_root(&result, slot)?;
if result.is_err() {
if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage {
let bigtable_result = self
.runtime
.block_on(bigtable_ledger_storage.get_confirmed_block(slot));
self.check_bigtable_result(&bigtable_result)?;
return Ok(bigtable_result.ok().map(|confirmed_block| {
confirmed_block.configure(encoding, transaction_details, show_rewards)
}));
}
}
self.check_slot_cleaned_up(&result, slot)?;
return Ok(result.ok().map(|confirmed_block| {
confirmed_block.configure(encoding, transaction_details, show_rewards)
}));
} else if commitment.is_confirmed() {
// Check if block is confirmed
let confirmed_bank = self.bank(Some(CommitmentConfig::confirmed()));
if confirmed_bank.status_cache_ancestors().contains(&slot)
&& slot
<= self
.max_complete_transaction_status_slot
.load(Ordering::SeqCst)
{
let result = self.blockstore.get_complete_block(slot, true);
return Ok(result.ok().map(|confirmed_block| {
confirmed_block.configure(encoding, transaction_details, show_rewards)
}));
}
}
self.check_slot_cleaned_up(&result, slot)?;
Ok(result
.ok()
.map(|confirmed_block| confirmed_block.encode(encoding)))
} else {
Err(RpcCustomError::BlockNotAvailable { slot }.into())
}
Err(RpcCustomError::BlockNotAvailable { slot }.into())
}
pub fn get_confirmed_blocks(
&self,
start_slot: Slot,
end_slot: Option<Slot>,
commitment: Option<CommitmentConfig>,
) -> Result<Vec<Slot>> {
let commitment = commitment.unwrap_or_default();
check_is_at_least_confirmed(commitment)?;
let highest_confirmed_root = self
.block_commitment_cache
.read()
.unwrap()
.highest_confirmed_root();
let end_slot = min(
end_slot.unwrap_or(std::u64::MAX),
self.block_commitment_cache
.read()
.unwrap()
.highest_confirmed_root(),
end_slot.unwrap_or_else(|| start_slot.saturating_add(MAX_GET_CONFIRMED_BLOCKS_RANGE)),
if commitment.is_finalized() {
highest_confirmed_root
} else {
self.bank(Some(CommitmentConfig::confirmed())).slot()
},
);
if end_slot < start_slot {
return Ok(vec![]);
@@ -780,7 +823,8 @@ impl JsonRpcRequestProcessor {
let lowest_blockstore_slot = self.blockstore.lowest_slot();
if start_slot < lowest_blockstore_slot {
// If the starting slot is lower than what's available in blockstore assume the entire
// [start_slot..end_slot] can be fetched from BigTable.
// [start_slot..end_slot] can be fetched from BigTable. This range should not ever run
// into unfinalized confirmed blocks due to MAX_GET_CONFIRMED_BLOCKS_RANGE
if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage {
return self
.runtime
@@ -801,19 +845,38 @@ impl JsonRpcRequestProcessor {
}
}
Ok(self
// Finalized blocks
let mut blocks: Vec<_> = self
.blockstore
.rooted_slot_iterator(max(start_slot, lowest_blockstore_slot))
.map_err(|_| Error::internal_error())?
.filter(|&slot| slot <= end_slot)
.collect())
.filter(|&slot| slot <= end_slot && slot <= highest_confirmed_root)
.collect();
let last_element = blocks.last().cloned().unwrap_or_default();
// Maybe add confirmed blocks
if commitment.is_confirmed() && last_element < end_slot {
let confirmed_bank = self.bank(Some(CommitmentConfig::confirmed()));
let mut confirmed_blocks = confirmed_bank
.status_cache_ancestors()
.into_iter()
.filter(|&slot| slot <= end_slot && slot > last_element)
.collect();
blocks.append(&mut confirmed_blocks);
}
Ok(blocks)
}
pub fn get_confirmed_blocks_with_limit(
&self,
start_slot: Slot,
limit: usize,
commitment: Option<CommitmentConfig>,
) -> Result<Vec<Slot>> {
let commitment = commitment.unwrap_or_default();
check_is_at_least_confirmed(commitment)?;
if limit > MAX_GET_CONFIRMED_BLOCKS_RANGE as usize {
return Err(Error::invalid_params(format!(
"Limit too large; max {}",
@@ -825,7 +888,8 @@ impl JsonRpcRequestProcessor {
if start_slot < lowest_blockstore_slot {
// If the starting slot is lower than what's available in blockstore assume the entire
// range can be fetched from BigTable.
// range can be fetched from BigTable. This range should not ever run into unfinalized
// confirmed blocks due to MAX_GET_CONFIRMED_BLOCKS_RANGE
if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage {
return Ok(self
.runtime
@@ -834,12 +898,35 @@ impl JsonRpcRequestProcessor {
}
}
Ok(self
let highest_confirmed_root = self
.block_commitment_cache
.read()
.unwrap()
.highest_confirmed_root();
// Finalized blocks
let mut blocks: Vec<_> = self
.blockstore
.rooted_slot_iterator(max(start_slot, lowest_blockstore_slot))
.map_err(|_| Error::internal_error())?
.take(limit)
.collect())
.filter(|&slot| slot <= highest_confirmed_root)
.collect();
// Maybe add confirmed blocks
if commitment.is_confirmed() && blocks.len() < limit {
let last_element = blocks.last().cloned().unwrap_or_default();
let confirmed_bank = self.bank(Some(CommitmentConfig::confirmed()));
let mut confirmed_blocks = confirmed_bank
.status_cache_ancestors()
.into_iter()
.filter(|&slot| slot > last_element)
.collect();
blocks.append(&mut confirmed_blocks);
blocks.truncate(limit);
}
Ok(blocks)
}
pub fn get_block_time(&self, slot: Slot) -> Result<Option<UnixTimestamp>> {
@@ -866,7 +953,12 @@ impl JsonRpcRequestProcessor {
self.check_slot_cleaned_up(&result, slot)?;
Ok(result.ok().unwrap_or(None))
} else {
Err(RpcCustomError::BlockNotAvailable { slot }.into())
let r_bank_forks = self.bank_forks.read().unwrap();
if let Some(bank) = r_bank_forks.get(slot) {
Ok(Some(bank.clock().unix_timestamp))
} else {
Err(RpcCustomError::BlockNotAvailable { slot }.into())
}
}
}
@@ -913,7 +1005,7 @@ impl JsonRpcRequestProcessor {
Some(status)
} else if self.config.enable_rpc_transaction_history && search_transaction_history {
self.blockstore
.get_transaction_status(signature)
.get_transaction_status(signature, true)
.map_err(|_| Error::internal_error())?
.filter(|(slot, _status_meta)| {
slot <= &self
@@ -963,7 +1055,7 @@ impl JsonRpcRequestProcessor {
optimistically_confirmed_bank.get_signature_status_slot(&signature);
let confirmations = if r_block_commitment_cache.root() >= slot
&& is_confirmed_rooted(&r_block_commitment_cache, bank, &self.blockstore, slot)
&& is_finalized(&r_block_commitment_cache, bank, &self.blockstore, slot)
{
None
} else {
@@ -991,18 +1083,30 @@ impl JsonRpcRequestProcessor {
&self,
signature: Signature,
config: Option<RpcEncodingConfigWrapper<RpcConfirmedTransactionConfig>>,
) -> Option<EncodedConfirmedTransaction> {
) -> Result<Option<EncodedConfirmedTransaction>> {
let config = config
.map(|config| config.convert_to_current())
.unwrap_or_default();
let encoding = config.encoding.unwrap_or(UiTransactionEncoding::Json);
let commitment = config.commitment.unwrap_or_default();
check_is_at_least_confirmed(commitment)?;
if self.config.enable_rpc_transaction_history {
match self
.blockstore
.get_confirmed_transaction(signature)
.get_complete_transaction(signature)
.unwrap_or(None)
{
Some(confirmed_transaction) => {
if commitment.is_confirmed() {
let confirmed_bank = self.bank(Some(CommitmentConfig::confirmed()));
if confirmed_bank
.status_cache_ancestors()
.contains(&confirmed_transaction.slot)
{
return Ok(Some(confirmed_transaction.encode(encoding)));
}
}
if confirmed_transaction.slot
<= self
.block_commitment_cache
@@ -1010,21 +1114,21 @@ impl JsonRpcRequestProcessor {
.unwrap()
.highest_confirmed_root()
{
return Some(confirmed_transaction.encode(encoding));
return Ok(Some(confirmed_transaction.encode(encoding)));
}
}
None => {
if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage {
return self
return Ok(self
.runtime
.block_on(bigtable_ledger_storage.get_confirmed_transaction(&signature))
.unwrap_or(None)
.map(|confirmed| confirmed.encode(encoding));
.map(|confirmed| confirmed.encode(encoding)));
}
}
}
}
None
Ok(None)
}
pub fn get_confirmed_signatures_for_address(
@@ -1155,9 +1259,25 @@ impl JsonRpcRequestProcessor {
let stake_state: StakeState = stake_account
.state()
.map_err(|_| Error::invalid_params("Invalid param: not a stake account".to_string()))?;
let delegation = stake_state.delegation().ok_or_else(|| {
Error::invalid_params("Invalid param: stake account has not been delegated".to_string())
})?;
let delegation = stake_state.delegation();
if delegation.is_none() {
match stake_state.meta() {
None => {
return Err(Error::invalid_params(
"Invalid param: stake account not initialized".to_string(),
));
}
Some(meta) => {
let rent_exempt_reserve = meta.rent_exempt_reserve;
return Ok(RpcStakeActivation {
state: StakeActivationState::Inactive,
active: 0,
inactive: stake_account.lamports().saturating_sub(rent_exempt_reserve),
});
}
}
}
let delegation = delegation.unwrap();
let stake_history_account = bank
.get_account(&stake_history::id())
@@ -1537,6 +1657,15 @@ fn verify_token_account_filter(
}
}
fn check_is_at_least_confirmed(commitment: CommitmentConfig) -> Result<()> {
if !commitment.is_at_least_confirmed() {
return Err(Error::invalid_params(
"Method does not support commitment below `confirmed`",
));
}
Ok(())
}
fn check_slice_and_encoding(encoding: &UiAccountEncoding, data_slice_is_some: bool) -> Result<()> {
match encoding {
UiAccountEncoding::JsonParsed => {
@@ -1953,29 +2082,14 @@ pub mod rpc_minimal {
debug!("get_leader_schedule rpc request received: {:?}", slot);
Ok(
solana_ledger::leader_schedule_utils::leader_schedule(epoch, &bank).map(
|leader_schedule| {
let mut leader_schedule_by_identity = HashMap::new();
for (slot_index, identity_pubkey) in
leader_schedule.get_slot_leaders().iter().enumerate()
{
leader_schedule_by_identity
.entry(identity_pubkey)
.or_insert_with(Vec::new)
.push(slot_index);
}
leader_schedule_by_identity
.into_iter()
.map(|(identity_pubkey, slot_indices)| {
(identity_pubkey.to_string(), slot_indices)
})
.collect()
},
),
)
Ok(meta
.leader_schedule_cache
.get_epoch_leader_schedule(epoch)
.map(|leader_schedule| {
solana_ledger::leader_schedule_utils::leader_schedule_by_identity(
leader_schedule.get_slot_leaders().iter().enumerate(),
)
}))
}
}
}
@@ -2176,6 +2290,14 @@ pub mod rpc_full {
commitment: Option<CommitmentConfig>,
) -> Result<String>;
#[rpc(meta, name = "getSlotLeaders")]
fn get_slot_leaders(
&self,
meta: Self::Metadata,
start_slot: Slot,
end_slot: Slot,
) -> Result<Vec<String>>;
#[rpc(meta, name = "minimumLedgerSlot")]
fn minimum_ledger_slot(&self, meta: Self::Metadata) -> Result<Slot>;
@@ -2185,7 +2307,7 @@ pub mod rpc_full {
meta: Self::Metadata,
slot: Slot,
config: Option<RpcEncodingConfigWrapper<RpcConfirmedBlockConfig>>,
) -> Result<Option<EncodedConfirmedBlock>>;
) -> Result<Option<UiConfirmedBlock>>;
#[rpc(meta, name = "getBlockTime")]
fn get_block_time(&self, meta: Self::Metadata, slot: Slot)
@@ -2196,7 +2318,8 @@ pub mod rpc_full {
&self,
meta: Self::Metadata,
start_slot: Slot,
end_slot: Option<Slot>,
config: Option<RpcConfirmedBlocksConfigWrapper>,
commitment: Option<CommitmentConfig>,
) -> Result<Vec<Slot>>;
#[rpc(meta, name = "getConfirmedBlocksWithLimit")]
@@ -2205,6 +2328,7 @@ pub mod rpc_full {
meta: Self::Metadata,
start_slot: Slot,
limit: usize,
commitment: Option<CommitmentConfig>,
) -> Result<Vec<Slot>>;
#[rpc(meta, name = "getConfirmedTransaction")]
@@ -2783,6 +2907,56 @@ pub mod rpc_full {
Ok(meta.get_slot_leader(commitment))
}
fn get_slot_leaders(
&self,
meta: Self::Metadata,
start_slot: Slot,
limit: u64,
) -> Result<Vec<String>> {
debug!(
"get_slot_leaders rpc request received (start: {} limit: {})",
start_slot, limit
);
let limit = limit as usize;
if limit > MAX_GET_SLOT_LEADERS {
return Err(Error::invalid_params(format!(
"Invalid limit; max {}",
MAX_GET_SLOT_LEADERS
)));
}
let bank = meta.bank(None);
let (mut epoch, mut slot_index) =
bank.epoch_schedule().get_epoch_and_slot_index(start_slot);
let mut slot_leaders = Vec::with_capacity(limit);
while slot_leaders.len() < limit {
if let Some(leader_schedule) =
meta.leader_schedule_cache.get_epoch_leader_schedule(epoch)
{
slot_leaders.extend(
leader_schedule
.get_slot_leaders()
.iter()
.skip(slot_index as usize)
.take(limit.saturating_sub(slot_leaders.len()))
.map(|pubkey| pubkey.to_string()),
);
} else {
return Err(Error::invalid_params(format!(
"Invalid slot range: leader schedule for epoch {} is unavailable",
epoch
)));
}
epoch += 1;
slot_index = 0;
}
Ok(slot_leaders)
}
fn minimum_ledger_slot(&self, meta: Self::Metadata) -> Result<Slot> {
debug!("minimum_ledger_slot rpc request received");
meta.minimum_ledger_slot()
@@ -2793,7 +2967,7 @@ pub mod rpc_full {
meta: Self::Metadata,
slot: Slot,
config: Option<RpcEncodingConfigWrapper<RpcConfirmedBlockConfig>>,
) -> Result<Option<EncodedConfirmedBlock>> {
) -> Result<Option<UiConfirmedBlock>> {
debug!("get_confirmed_block rpc request received: {:?}", slot);
meta.get_confirmed_block(slot, config)
}
@@ -2802,13 +2976,16 @@ pub mod rpc_full {
&self,
meta: Self::Metadata,
start_slot: Slot,
end_slot: Option<Slot>,
config: Option<RpcConfirmedBlocksConfigWrapper>,
commitment: Option<CommitmentConfig>,
) -> Result<Vec<Slot>> {
let (end_slot, maybe_commitment) =
config.map(|config| config.unzip()).unwrap_or_default();
debug!(
"get_confirmed_blocks rpc request received: {}-{:?}",
start_slot, end_slot
);
meta.get_confirmed_blocks(start_slot, end_slot)
meta.get_confirmed_blocks(start_slot, end_slot, commitment.or(maybe_commitment))
}
fn get_confirmed_blocks_with_limit(
@@ -2816,12 +2993,13 @@ pub mod rpc_full {
meta: Self::Metadata,
start_slot: Slot,
limit: usize,
commitment: Option<CommitmentConfig>,
) -> Result<Vec<Slot>> {
debug!(
"get_confirmed_blocks_with_limit rpc request received: {}-{}",
start_slot, limit,
);
meta.get_confirmed_blocks_with_limit(start_slot, limit)
meta.get_confirmed_blocks_with_limit(start_slot, limit, commitment)
}
fn get_block_time(
@@ -2843,7 +3021,7 @@ pub mod rpc_full {
signature_str
);
let signature = verify_signature(&signature_str)?;
Ok(meta.get_confirmed_transaction(signature, config))
meta.get_confirmed_transaction(signature, config)
}
fn get_confirmed_signatures_for_address(
@@ -3119,7 +3297,8 @@ pub mod tests {
transaction::{self, TransactionError},
};
use solana_transaction_status::{
EncodedTransaction, EncodedTransactionWithStatusMeta, UiMessage,
EncodedConfirmedBlock, EncodedTransaction, EncodedTransactionWithStatusMeta,
TransactionDetails, UiMessage,
};
use solana_vote_program::{
vote_instruction,
@@ -3178,11 +3357,13 @@ pub mod tests {
let keypair2 = Keypair::new();
let keypair3 = Keypair::new();
bank.transfer(4, &alice, &keypair2.pubkey()).unwrap();
let max_complete_transaction_status_slot = Arc::new(AtomicU64::new(blockstore.max_root()));
let confirmed_block_signatures = create_test_transactions_and_populate_blockstore(
vec![&alice, &keypair1, &keypair2, &keypair3],
0,
bank.clone(),
blockstore.clone(),
max_complete_transaction_status_slot.clone(),
);
let mut commitment_slot0 = BlockCommitment::default();
@@ -3295,6 +3476,8 @@ pub mod tests {
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
Arc::new(RwLock::new(LargestAccountsCache::new(30))),
max_slots,
Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
max_complete_transaction_status_slot,
);
SendTransactionService::new(tpu_address, &bank_forks, None, receiver, 1000, 1);
@@ -3800,6 +3983,62 @@ pub mod tests {
assert_eq!(schedule, None);
}
#[test]
fn test_rpc_get_slot_leaders() {
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey);
// Test that slot leaders will be returned across epochs
let query_start = 0;
let query_limit = 2 * bank.epoch_schedule().slots_per_epoch;
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getSlotLeaders", "params": [{}, {}]}}"#,
query_start, query_limit
);
let rep = io.handle_request_sync(&req, meta.clone());
let res: Response = serde_json::from_str(&rep.expect("actual response"))
.expect("actual response deserialization");
let slot_leaders: Vec<String> = if let Response::Single(res) = res {
if let Output::Success(res) = res {
serde_json::from_value(res.result).unwrap()
} else {
panic!("Expected success for {} but received: {:?}", req, res);
}
} else {
panic!("Expected single response");
};
assert_eq!(slot_leaders.len(), query_limit as usize);
// Test that invalid limit returns an error
let query_start = 0;
let query_limit = 5001;
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getSlotLeaders", "params": [{}, {}]}}"#,
query_start, query_limit
);
let rep = io.handle_request_sync(&req, meta.clone());
let res: Value = serde_json::from_str(&rep.expect("actual response"))
.expect("actual response deserialization");
assert!(res.get("error").is_some());
// Test that invalid epoch returns an error
let query_start = 2 * bank.epoch_schedule().slots_per_epoch;
let query_limit = 10;
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getSlotLeaders", "params": [{}, {}]}}"#,
query_start, query_limit
);
let rep = io.handle_request_sync(&req, meta);
let res: Value = serde_json::from_str(&rep.expect("actual response"))
.expect("actual response deserialization");
assert!(res.get("error").is_some());
}
#[test]
fn test_rpc_get_account_info() {
let bob_pubkey = solana_sdk::pubkey::new_rand();
@@ -4704,6 +4943,8 @@ pub mod tests {
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
Arc::new(RwLock::new(LargestAccountsCache::new(30))),
Arc::new(MaxSlots::default()),
Arc::new(LeaderScheduleCache::default()),
Arc::new(AtomicU64::default()),
);
SendTransactionService::new(tpu_address, &bank_forks, None, receiver, 1000, 1);
@@ -4979,6 +5220,8 @@ pub mod tests {
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
Arc::new(RwLock::new(LargestAccountsCache::new(30))),
Arc::new(MaxSlots::default()),
Arc::new(LeaderScheduleCache::default()),
Arc::new(AtomicU64::default()),
);
SendTransactionService::new(tpu_address, &bank_forks, None, receiver, 1000, 1);
assert_eq!(
@@ -5077,6 +5320,7 @@ pub mod tests {
serde_json::from_value(result["result"].clone()).unwrap();
let confirmed_block = confirmed_block.unwrap();
assert_eq!(confirmed_block.transactions.len(), 3);
assert_eq!(confirmed_block.rewards, vec![]);
for EncodedTransactionWithStatusMeta { transaction, meta } in
confirmed_block.transactions.into_iter()
@@ -5121,6 +5365,7 @@ pub mod tests {
serde_json::from_value(result["result"].clone()).unwrap();
let confirmed_block = confirmed_block.unwrap();
assert_eq!(confirmed_block.transactions.len(), 3);
assert_eq!(confirmed_block.rewards, vec![]);
for EncodedTransactionWithStatusMeta { transaction, meta } in
confirmed_block.transactions.into_iter()
@@ -5156,6 +5401,57 @@ pub mod tests {
}
}
#[test]
fn test_get_confirmed_block_config() {
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler {
io,
meta,
confirmed_block_signatures,
..
} = start_rpc_handler_with_tx(&bob_pubkey);
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlock","params":[0,{}]}}"#,
json!(RpcConfirmedBlockConfig {
encoding: None,
transaction_details: Some(TransactionDetails::Signatures),
rewards: Some(false),
commitment: None,
})
);
let res = io.handle_request_sync(&req, meta.clone());
let result: Value = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
let confirmed_block: Option<UiConfirmedBlock> =
serde_json::from_value(result["result"].clone()).unwrap();
let confirmed_block = confirmed_block.unwrap();
assert!(confirmed_block.transactions.is_none());
assert!(confirmed_block.rewards.is_none());
for (i, signature) in confirmed_block.signatures.unwrap()[..2].iter().enumerate() {
assert_eq!(*signature, confirmed_block_signatures[i].to_string());
}
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlock","params":[0,{}]}}"#,
json!(RpcConfirmedBlockConfig {
encoding: None,
transaction_details: Some(TransactionDetails::None),
rewards: Some(true),
commitment: None,
})
);
let res = io.handle_request_sync(&req, meta);
let result: Value = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
let confirmed_block: Option<UiConfirmedBlock> =
serde_json::from_value(result["result"].clone()).unwrap();
let confirmed_block = confirmed_block.unwrap();
assert!(confirmed_block.transactions.is_none());
assert!(confirmed_block.signatures.is_none());
assert_eq!(confirmed_block.rewards.unwrap(), vec![]);
}
#[test]
fn test_get_confirmed_blocks() {
let bob_pubkey = solana_sdk::pubkey::new_rand();
@@ -5547,7 +5843,7 @@ pub mod tests {
}
#[test]
fn test_is_confirmed_rooted() {
fn test_is_finalized() {
let bank = Arc::new(Bank::default());
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
@@ -5575,25 +5871,15 @@ pub mod tests {
},
);
assert!(is_confirmed_rooted(
&block_commitment_cache,
&bank,
&blockstore,
0
));
assert!(is_confirmed_rooted(
&block_commitment_cache,
&bank,
&blockstore,
1
));
assert!(!is_confirmed_rooted(
assert!(is_finalized(&block_commitment_cache, &bank, &blockstore, 0));
assert!(is_finalized(&block_commitment_cache, &bank, &blockstore, 1));
assert!(!is_finalized(
&block_commitment_cache,
&bank,
&blockstore,
2
));
assert!(!is_confirmed_rooted(
assert!(!is_finalized(
&block_commitment_cache,
&bank,
&blockstore,
@@ -6217,6 +6503,8 @@ pub mod tests {
optimistically_confirmed_bank.clone(),
Arc::new(RwLock::new(LargestAccountsCache::new(30))),
Arc::new(MaxSlots::default()),
Arc::new(LeaderScheduleCache::default()),
Arc::new(AtomicU64::default()),
);
let mut io = MetaIoHandler::default();

View File

@@ -18,7 +18,7 @@ use jsonrpc_http_server::{
};
use regex::Regex;
use solana_client::rpc_cache::LargestAccountsCache;
use solana_ledger::blockstore::Blockstore;
use solana_ledger::{blockstore::Blockstore, leader_schedule_cache::LeaderScheduleCache};
use solana_metrics::inc_new_counter_info;
use solana_runtime::{
bank_forks::{BankForks, SnapshotConfig},
@@ -30,7 +30,7 @@ use std::{
collections::HashSet,
net::SocketAddr,
path::{Path, PathBuf},
sync::atomic::{AtomicBool, Ordering},
sync::atomic::{AtomicBool, AtomicU64, Ordering},
sync::{mpsc::channel, Arc, Mutex, RwLock},
thread::{self, Builder, JoinHandle},
};
@@ -275,6 +275,8 @@ impl JsonRpcService {
send_transaction_retry_ms: u64,
send_transaction_leader_forward_count: u64,
max_slots: Arc<MaxSlots>,
leader_schedule_cache: Arc<LeaderScheduleCache>,
current_transaction_status_slot: Arc<AtomicU64>,
) -> Self {
info!("rpc bound to {:?}", rpc_addr);
info!("rpc configuration: {:?}", config);
@@ -354,6 +356,8 @@ impl JsonRpcService {
optimistically_confirmed_bank,
largest_accounts_cache,
max_slots,
leader_schedule_cache,
current_transaction_status_slot,
);
let leader_info =
@@ -518,6 +522,8 @@ mod tests {
1000,
1,
Arc::new(MaxSlots::default()),
Arc::new(LeaderScheduleCache::default()),
Arc::new(AtomicU64::default()),
);
let thread = rpc_service.thread_hdl.thread();
assert_eq!(thread.name().unwrap(), "solana-jsonrpc");

View File

@@ -801,7 +801,7 @@ mod test {
);
let bank = Arc::new(Bank::new(&genesis_config));
let (poh_recorder, _entry_receiver) = PohRecorder::new(
let (poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new(
0,
bank.last_blockhash(),
0,

View File

@@ -236,7 +236,16 @@ mod tests {
let mut stats = ShredFetchStats::default();
let slot = 1;
let shred = Shred::new_from_data(slot, 3, 0, None, true, true, 0, 0, 0);
let shred = Shred::new_from_data(
slot, 3, // shred index
0, // parent offset
None, // data
true, // is_last_in_fec_set
true, // is_last_in_slot
0, // reference_tick
0, // version
3, // fec_set_index
);
shred.copy_to_packet(&mut packet);
let hasher = PacketHasher::default();
@@ -256,8 +265,7 @@ mod tests {
);
assert!(!packet.meta.discard);
let coding =
solana_ledger::shred::Shredder::generate_coding_shreds(slot, 1.0f32, &[shred], 10, 1);
let coding = solana_ledger::shred::Shredder::generate_coding_shreds(1.0f32, &[shred], 1);
coding[0].copy_to_packet(&mut packet);
ShredFetchStage::process_packet(
&mut packet,

View File

@@ -16,6 +16,7 @@ use {
account::{Account, AccountSharedData},
clock::{Slot, DEFAULT_MS_PER_SLOT},
commitment_config::CommitmentConfig,
epoch_schedule::EpochSchedule,
fee_calculator::{FeeCalculator, FeeRateGovernor},
hash::Hash,
native_token::sol_to_lamports,
@@ -27,7 +28,7 @@ use {
collections::HashMap,
fs::remove_dir_all,
net::{IpAddr, Ipv4Addr, SocketAddr},
path::PathBuf,
path::{Path, PathBuf},
sync::{Arc, RwLock},
thread::sleep,
time::Duration,
@@ -52,6 +53,7 @@ pub struct TestValidatorGenesis {
no_bpf_jit: bool,
accounts: HashMap<Pubkey, AccountSharedData>,
programs: Vec<ProgramInfo>,
epoch_schedule: Option<EpochSchedule>,
pub validator_exit: Arc<RwLock<ValidatorExit>>,
pub start_progress: Arc<RwLock<ValidatorStartProgress>>,
}
@@ -62,11 +64,21 @@ impl TestValidatorGenesis {
self
}
/// Check if a given TestValidator ledger has already been initialized
pub fn ledger_exists(ledger_path: &Path) -> bool {
ledger_path.join("vote-account-keypair.json").exists()
}
pub fn fee_rate_governor(&mut self, fee_rate_governor: FeeRateGovernor) -> &mut Self {
self.fee_rate_governor = fee_rate_governor;
self
}
pub fn epoch_schedule(&mut self, epoch_schedule: EpochSchedule) -> &mut Self {
self.epoch_schedule = Some(epoch_schedule);
self
}
pub fn rent(&mut self, rent: Rent) -> &mut Self {
self.rent = rent;
self
@@ -308,12 +320,14 @@ impl TestValidator {
solana_sdk::genesis_config::ClusterType::Development,
accounts.into_iter().collect(),
);
genesis_config.epoch_schedule = solana_sdk::epoch_schedule::EpochSchedule::without_warmup();
genesis_config.epoch_schedule = config
.epoch_schedule
.unwrap_or_else(EpochSchedule::without_warmup);
let ledger_path = match &config.ledger_path {
None => create_new_tmp_ledger!(&genesis_config).0,
Some(ledger_path) => {
if ledger_path.join("validator-keypair.json").exists() {
if TestValidatorGenesis::ledger_exists(ledger_path) {
return Ok(ledger_path.to_path_buf());
}
@@ -338,6 +352,10 @@ impl TestValidator {
&validator_identity,
ledger_path.join("validator-keypair.json").to_str().unwrap(),
)?;
// `ledger_exists` should fail until the vote account keypair is written
assert!(!TestValidatorGenesis::ledger_exists(&ledger_path));
write_keypair_file(
&validator_vote_account,
ledger_path
@@ -404,6 +422,7 @@ impl TestValidator {
warp_slot: config.warp_slot,
bpf_jit: !config.no_bpf_jit,
validator_exit: config.validator_exit.clone(),
no_wait_for_vote_to_start_leader: true,
..ValidatorConfig::default()
};

View File

@@ -5,7 +5,10 @@ use crate::{
banking_stage::BankingStage,
broadcast_stage::{BroadcastStage, BroadcastStageType, RetransmitSlotsReceiver},
cluster_info::ClusterInfo,
cluster_info_vote_listener::{ClusterInfoVoteListener, VerifiedVoteSender, VoteTracker},
cluster_info_vote_listener::{
ClusterInfoVoteListener, GossipDuplicateConfirmedSlotsSender, VerifiedVoteSender,
VoteTracker,
},
fetch_stage::FetchStage,
optimistically_confirmed_bank_tracker::BankNotificationSender,
poh_recorder::{PohRecorder, WorkingBankEntry},
@@ -62,6 +65,7 @@ impl Tpu {
replay_vote_sender: ReplayVoteSender,
bank_notification_sender: Option<BankNotificationSender>,
tpu_coalesce_ms: u64,
cluster_confirmed_slot_sender: GossipDuplicateConfirmedSlotsSender,
) -> Self {
let (packet_sender, packet_receiver) = channel();
let fetch_stage = FetchStage::new_with_sender(
@@ -95,6 +99,7 @@ impl Tpu {
replay_vote_receiver,
blockstore.clone(),
bank_notification_sender,
cluster_confirmed_slot_sender,
);
let banking_stage = BankingStage::new(

View File

@@ -1,6 +1,9 @@
use crossbeam_channel::{Receiver, RecvTimeoutError};
use itertools::izip;
use solana_ledger::{blockstore::Blockstore, blockstore_processor::TransactionStatusBatch};
use solana_ledger::{
blockstore::Blockstore,
blockstore_processor::{TransactionStatusBatch, TransactionStatusMessage},
};
use solana_runtime::{
bank::{Bank, InnerInstructionsList, NonceRollbackInfo, TransactionLogMessages},
transaction_utils::OrderedIterator,
@@ -8,7 +11,7 @@ use solana_runtime::{
use solana_transaction_status::{InnerInstructions, TransactionStatusMeta};
use std::{
sync::{
atomic::{AtomicBool, Ordering},
atomic::{AtomicBool, AtomicU64, Ordering},
Arc,
},
thread::{self, Builder, JoinHandle},
@@ -22,7 +25,8 @@ pub struct TransactionStatusService {
impl TransactionStatusService {
#[allow(clippy::new_ret_no_self)]
pub fn new(
write_transaction_status_receiver: Receiver<TransactionStatusBatch>,
write_transaction_status_receiver: Receiver<TransactionStatusMessage>,
max_complete_transaction_status_slot: Arc<AtomicU64>,
blockstore: Arc<Blockstore>,
exit: &Arc<AtomicBool>,
) -> Self {
@@ -35,6 +39,7 @@ impl TransactionStatusService {
}
if let Err(RecvTimeoutError::Disconnected) = Self::write_transaction_status_batch(
&write_transaction_status_receiver,
&max_complete_transaction_status_slot,
&blockstore,
) {
break;
@@ -45,97 +50,104 @@ impl TransactionStatusService {
}
fn write_transaction_status_batch(
write_transaction_status_receiver: &Receiver<TransactionStatusBatch>,
write_transaction_status_receiver: &Receiver<TransactionStatusMessage>,
max_complete_transaction_status_slot: &Arc<AtomicU64>,
blockstore: &Arc<Blockstore>,
) -> Result<(), RecvTimeoutError> {
let TransactionStatusBatch {
bank,
transactions,
iteration_order,
statuses,
balances,
token_balances,
inner_instructions,
transaction_logs,
} = write_transaction_status_receiver.recv_timeout(Duration::from_secs(1))?;
match write_transaction_status_receiver.recv_timeout(Duration::from_secs(1))? {
TransactionStatusMessage::Batch(TransactionStatusBatch {
bank,
transactions,
iteration_order,
statuses,
balances,
token_balances,
inner_instructions,
transaction_logs,
}) => {
let slot = bank.slot();
let inner_instructions_iter: Box<
dyn Iterator<Item = Option<InnerInstructionsList>>,
> = if let Some(inner_instructions) = inner_instructions {
Box::new(inner_instructions.into_iter())
} else {
Box::new(std::iter::repeat_with(|| None))
};
let transaction_logs_iter: Box<dyn Iterator<Item = TransactionLogMessages>> =
if let Some(transaction_logs) = transaction_logs {
Box::new(transaction_logs.into_iter())
} else {
Box::new(std::iter::repeat_with(Vec::new))
};
for (
(_, transaction),
(status, nonce_rollback),
pre_balances,
post_balances,
pre_token_balances,
post_token_balances,
inner_instructions,
log_messages,
) in izip!(
OrderedIterator::new(&transactions, iteration_order.as_deref()),
statuses,
balances.pre_balances,
balances.post_balances,
token_balances.pre_token_balances,
token_balances.post_token_balances,
inner_instructions_iter,
transaction_logs_iter
) {
if Bank::can_commit(&status) && !transaction.signatures.is_empty() {
let fee_calculator = nonce_rollback
.map(|nonce_rollback| nonce_rollback.fee_calculator())
.unwrap_or_else(|| {
bank.get_fee_calculator(&transaction.message().recent_blockhash)
})
.expect("FeeCalculator must exist");
let fee = fee_calculator.calculate_fee(transaction.message());
let (writable_keys, readonly_keys) =
transaction.message.get_account_keys_by_lock_type();
let slot = bank.slot();
let inner_instructions_iter: Box<dyn Iterator<Item = Option<InnerInstructionsList>>> =
if let Some(inner_instructions) = inner_instructions {
Box::new(inner_instructions.into_iter())
} else {
Box::new(std::iter::repeat_with(|| None))
};
let transaction_logs_iter: Box<dyn Iterator<Item = TransactionLogMessages>> =
if let Some(transaction_logs) = transaction_logs {
Box::new(transaction_logs.into_iter())
} else {
Box::new(std::iter::repeat_with(Vec::new))
};
for (
(_, transaction),
(status, nonce_rollback),
pre_balances,
post_balances,
pre_token_balances,
post_token_balances,
inner_instructions,
log_messages,
) in izip!(
OrderedIterator::new(&transactions, iteration_order.as_deref()),
statuses,
balances.pre_balances,
balances.post_balances,
token_balances.pre_token_balances,
token_balances.post_token_balances,
inner_instructions_iter,
transaction_logs_iter
) {
if Bank::can_commit(&status) && !transaction.signatures.is_empty() {
let fee_calculator = nonce_rollback
.map(|nonce_rollback| nonce_rollback.fee_calculator())
.unwrap_or_else(|| {
bank.get_fee_calculator(&transaction.message().recent_blockhash)
})
.expect("FeeCalculator must exist");
let fee = fee_calculator.calculate_fee(transaction.message());
let (writable_keys, readonly_keys) =
transaction.message.get_account_keys_by_lock_type();
let inner_instructions = inner_instructions.map(|inner_instructions| {
inner_instructions
.into_iter()
.enumerate()
.map(|(index, instructions)| InnerInstructions {
index: index as u8,
instructions,
})
.filter(|i| !i.instructions.is_empty())
.collect()
});
let inner_instructions = inner_instructions.map(|inner_instructions| {
inner_instructions
.into_iter()
.enumerate()
.map(|(index, instructions)| InnerInstructions {
index: index as u8,
instructions,
})
.filter(|i| !i.instructions.is_empty())
.collect()
});
let log_messages = Some(log_messages);
let pre_token_balances = Some(pre_token_balances);
let post_token_balances = Some(post_token_balances);
let log_messages = Some(log_messages);
let pre_token_balances = Some(pre_token_balances);
let post_token_balances = Some(post_token_balances);
blockstore
.write_transaction_status(
slot,
transaction.signatures[0],
writable_keys,
readonly_keys,
TransactionStatusMeta {
status,
fee,
pre_balances,
post_balances,
inner_instructions,
log_messages,
pre_token_balances,
post_token_balances,
},
)
.expect("Expect database write to succeed");
blockstore
.write_transaction_status(
slot,
transaction.signatures[0],
writable_keys,
readonly_keys,
TransactionStatusMeta {
status,
fee,
pre_balances,
post_balances,
inner_instructions,
log_messages,
pre_token_balances,
post_token_balances,
},
)
.expect("Expect database write to succeed");
}
}
}
TransactionStatusMessage::Freeze(slot) => {
max_complete_transaction_status_slot.fetch_max(slot, Ordering::SeqCst);
}
}
Ok(())

View File

@@ -6,7 +6,9 @@ use crate::{
broadcast_stage::RetransmitSlotsSender,
cache_block_time_service::CacheBlockTimeSender,
cluster_info::ClusterInfo,
cluster_info_vote_listener::{VerifiedVoteReceiver, VoteTracker},
cluster_info_vote_listener::{
GossipDuplicateConfirmedSlotsReceiver, VerifiedVoteReceiver, VoteTracker,
},
cluster_slots::ClusterSlots,
completed_data_sets_service::CompletedDataSetsSender,
consensus::Tower,
@@ -84,6 +86,7 @@ pub struct TvuConfig {
pub use_index_hash_calculation: bool,
pub rocksdb_compaction_interval: Option<u64>,
pub rocksdb_max_compaction_jitter: Option<u64>,
pub wait_for_vote_to_start_leader: bool,
}
impl Tvu {
@@ -120,6 +123,7 @@ impl Tvu {
replay_vote_sender: ReplayVoteSender,
completed_data_sets_sender: CompletedDataSetsSender,
bank_notification_sender: Option<BankNotificationSender>,
gossip_confirmed_slots_receiver: GossipDuplicateConfirmedSlotsReceiver,
tvu_config: TvuConfig,
max_slots: &Arc<MaxSlots>,
) -> Self {
@@ -159,6 +163,7 @@ impl Tvu {
let (duplicate_slots_reset_sender, duplicate_slots_reset_receiver) = unbounded();
let compaction_interval = tvu_config.rocksdb_compaction_interval;
let max_compaction_jitter = tvu_config.rocksdb_max_compaction_jitter;
let (duplicate_slots_sender, duplicate_slots_receiver) = unbounded();
let retransmit_stage = RetransmitStage::new(
bank_forks.clone(),
leader_schedule_cache,
@@ -179,6 +184,7 @@ impl Tvu {
completed_data_sets_sender,
max_slots,
Some(subscriptions.clone()),
duplicate_slots_sender,
);
let (ledger_cleanup_slot_sender, ledger_cleanup_slot_receiver) = channel();
@@ -254,6 +260,7 @@ impl Tvu {
rewards_recorder_sender,
cache_block_time_sender,
bank_notification_sender,
wait_for_vote_to_start_leader: tvu_config.wait_for_vote_to_start_leader,
};
let replay_stage = ReplayStage::new(
@@ -262,6 +269,7 @@ impl Tvu {
bank_forks.clone(),
cluster_info.clone(),
ledger_signal_receiver,
duplicate_slots_receiver,
poh_recorder.clone(),
tower,
vote_tracker,
@@ -269,6 +277,7 @@ impl Tvu {
retransmit_slots_sender,
duplicate_slots_reset_receiver,
replay_vote_sender,
gossip_confirmed_slots_receiver,
);
let ledger_cleanup_service = tvu_config.max_ledger_shreds.map(|max_ledger_shreds| {
@@ -371,6 +380,7 @@ pub mod tests {
let (_verified_vote_sender, verified_vote_receiver) = unbounded();
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
let (completed_data_sets_sender, _completed_data_sets_receiver) = unbounded();
let (_, gossip_confirmed_slots_receiver) = unbounded();
let bank_forks = Arc::new(RwLock::new(bank_forks));
let tower = Tower::new_with_key(&target1_keypair.pubkey());
let tvu = Tvu::new(
@@ -411,6 +421,7 @@ pub mod tests {
replay_vote_sender,
completed_data_sets_sender,
None,
gossip_confirmed_slots_receiver,
TvuConfig::default(),
&Arc::new(MaxSlots::default()),
);

View File

@@ -70,7 +70,7 @@ use std::{
net::SocketAddr,
ops::Deref,
path::{Path, PathBuf},
sync::atomic::{AtomicBool, Ordering},
sync::atomic::{AtomicBool, AtomicU64, Ordering},
sync::mpsc::Receiver,
sync::{Arc, Mutex, RwLock},
thread::sleep,
@@ -130,6 +130,7 @@ pub struct ValidatorConfig {
pub accounts_db_use_index_hash_calculation: bool,
pub tpu_coalesce_ms: u64,
pub validator_exit: Arc<RwLock<ValidatorExit>>,
pub no_wait_for_vote_to_start_leader: bool,
}
impl Default for ValidatorConfig {
@@ -184,6 +185,7 @@ impl Default for ValidatorConfig {
accounts_db_use_index_hash_calculation: true,
tpu_coalesce_ms: DEFAULT_TPU_COALESCE_MS,
validator_exit: Arc::new(RwLock::new(ValidatorExit::default())),
no_wait_for_vote_to_start_leader: true,
}
}
}
@@ -247,6 +249,7 @@ impl fmt::Debug for ValidatorExit {
struct TransactionHistoryServices {
transaction_status_sender: Option<TransactionStatusSender>,
transaction_status_service: Option<TransactionStatusService>,
max_complete_transaction_status_slot: Arc<AtomicU64>,
rewards_recorder_sender: Option<RewardsRecorderSender>,
rewards_recorder_service: Option<RewardsRecorderService>,
cache_block_time_sender: Option<CacheBlockTimeSender>,
@@ -382,6 +385,7 @@ impl Validator {
TransactionHistoryServices {
transaction_status_sender,
transaction_status_service,
max_complete_transaction_status_slot,
rewards_recorder_sender,
rewards_recorder_service,
cache_block_time_sender,
@@ -486,24 +490,25 @@ impl Validator {
);
let poh_config = Arc::new(genesis_config.poh_config.clone());
let (mut poh_recorder, entry_receiver) = PohRecorder::new_with_clear_signal(
bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
leader_schedule_cache.next_leader_slot(
&id,
let (mut poh_recorder, entry_receiver, record_receiver) =
PohRecorder::new_with_clear_signal(
bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
&bank,
Some(&blockstore),
GRACE_TICKS_FACTOR * MAX_GRACE_SLOTS,
),
bank.ticks_per_slot(),
&id,
&blockstore,
blockstore.new_shreds_signals.first().cloned(),
&leader_schedule_cache,
&poh_config,
);
leader_schedule_cache.next_leader_slot(
&id,
bank.slot(),
&bank,
Some(&blockstore),
GRACE_TICKS_FACTOR * MAX_GRACE_SLOTS,
),
bank.ticks_per_slot(),
&id,
&blockstore,
blockstore.new_shreds_signals.first().cloned(),
&leader_schedule_cache,
&poh_config,
);
if config.snapshot_config.is_some() {
poh_recorder.set_bank(&bank);
}
@@ -541,6 +546,8 @@ impl Validator {
config.send_transaction_retry_ms,
config.send_transaction_leader_forward_count,
max_slots.clone(),
leader_schedule_cache.clone(),
max_complete_transaction_status_slot,
)),
if config.rpc_config.minimal_api {
None
@@ -627,15 +634,20 @@ impl Validator {
check_poh_speed(&genesis_config, None);
}
if wait_for_supermajority(
let waited_for_supermajority = if let Ok(waited) = wait_for_supermajority(
config,
&bank,
&cluster_info,
rpc_override_health_check,
&start_progress,
) {
waited
} else {
abort();
}
};
let wait_for_vote_to_start_leader =
!waited_for_supermajority && !config.no_wait_for_vote_to_start_leader;
let poh_service = PohService::new(
poh_recorder.clone(),
@@ -644,6 +656,7 @@ impl Validator {
bank.ticks_per_slot(),
config.poh_pinned_cpu_core,
config.poh_hashes_per_batch,
record_receiver,
);
assert_eq!(
blockstore.new_shreds_signals.len(),
@@ -657,6 +670,7 @@ impl Validator {
let (retransmit_slots_sender, retransmit_slots_receiver) = unbounded();
let (verified_vote_sender, verified_vote_receiver) = unbounded();
let (cluster_confirmed_slot_sender, cluster_confirmed_slot_receiver) = unbounded();
let tvu = Tvu::new(
vote_account,
authorized_voter_keypairs,
@@ -707,6 +721,7 @@ impl Validator {
replay_vote_sender.clone(),
completed_data_sets_sender,
bank_notification_sender.clone(),
cluster_confirmed_slot_receiver,
TvuConfig {
max_ledger_shreds: config.max_ledger_shreds,
halt_on_trusted_validators_accounts_hash_mismatch: config
@@ -720,6 +735,7 @@ impl Validator {
use_index_hash_calculation: config.accounts_db_use_index_hash_calculation,
rocksdb_compaction_interval: config.rocksdb_compaction_interval,
rocksdb_max_compaction_jitter: config.rocksdb_compaction_interval,
wait_for_vote_to_start_leader,
},
&max_slots,
);
@@ -745,6 +761,7 @@ impl Validator {
replay_vote_sender,
bank_notification_sender,
config.tpu_coalesce_ms,
cluster_confirmed_slot_sender,
);
datapoint_info!("validator-new", ("id", id.to_string(), String));
@@ -1250,6 +1267,7 @@ fn initialize_rpc_transaction_history_services(
exit: &Arc<AtomicBool>,
enable_cpi_and_log_storage: bool,
) -> TransactionHistoryServices {
let max_complete_transaction_status_slot = Arc::new(AtomicU64::new(blockstore.max_root()));
let (transaction_status_sender, transaction_status_receiver) = unbounded();
let transaction_status_sender = Some(TransactionStatusSender {
sender: transaction_status_sender,
@@ -1257,6 +1275,7 @@ fn initialize_rpc_transaction_history_services(
});
let transaction_status_service = Some(TransactionStatusService::new(
transaction_status_receiver,
max_complete_transaction_status_slot.clone(),
blockstore.clone(),
exit,
));
@@ -1279,6 +1298,7 @@ fn initialize_rpc_transaction_history_services(
TransactionHistoryServices {
transaction_status_sender,
transaction_status_service,
max_complete_transaction_status_slot,
rewards_recorder_sender,
rewards_recorder_service,
cache_block_time_sender,
@@ -1286,17 +1306,28 @@ fn initialize_rpc_transaction_history_services(
}
}
// Return true on error, indicating the validator should exit.
#[derive(Debug, PartialEq)]
enum ValidatorError {
BadExpectedBankHash,
NotEnoughLedgerData,
}
// Return if the validator waited on other nodes to start. In this case
// it should not wait for one of it's votes to land to produce blocks
// because if the whole network is waiting, then it will stall.
//
// Error indicates that a bad hash was encountered or another condition
// that is unrecoverable and the validator should exit.
fn wait_for_supermajority(
config: &ValidatorConfig,
bank: &Bank,
cluster_info: &ClusterInfo,
rpc_override_health_check: Arc<AtomicBool>,
start_progress: &Arc<RwLock<ValidatorStartProgress>>,
) -> bool {
) -> Result<bool, ValidatorError> {
if let Some(wait_for_supermajority) = config.wait_for_supermajority {
match wait_for_supermajority.cmp(&bank.slot()) {
std::cmp::Ordering::Less => return false,
std::cmp::Ordering::Less => return Ok(false),
std::cmp::Ordering::Greater => {
error!(
"Ledger does not have enough data to wait for supermajority, \
@@ -1304,12 +1335,12 @@ fn wait_for_supermajority(
bank.slot(),
wait_for_supermajority
);
return true;
return Err(ValidatorError::NotEnoughLedgerData);
}
_ => {}
}
} else {
return false;
return Ok(false);
}
if let Some(expected_bank_hash) = config.expected_bank_hash {
@@ -1319,7 +1350,7 @@ fn wait_for_supermajority(
bank.hash(),
expected_bank_hash
);
return true;
return Err(ValidatorError::BadExpectedBankHash);
}
}
@@ -1344,7 +1375,7 @@ fn wait_for_supermajority(
sleep(Duration::new(1, 0));
}
rpc_override_health_check.store(false, Ordering::Relaxed);
false
Ok(true)
}
fn report_target_features() {
@@ -1635,17 +1666,21 @@ mod tests {
&cluster_info,
rpc_override_health_check.clone(),
&start_progress,
));
)
.unwrap());
// bank=0, wait=1, should fail
config.wait_for_supermajority = Some(1);
assert!(wait_for_supermajority(
&config,
&bank,
&cluster_info,
rpc_override_health_check.clone(),
&start_progress,
));
assert_eq!(
wait_for_supermajority(
&config,
&bank,
&cluster_info,
rpc_override_health_check.clone(),
&start_progress,
),
Err(ValidatorError::NotEnoughLedgerData)
);
// bank=1, wait=0, should pass, bank is past the wait slot
let bank = Bank::new_from_parent(&bank, &Pubkey::default(), 1);
@@ -1656,18 +1691,22 @@ mod tests {
&cluster_info,
rpc_override_health_check.clone(),
&start_progress,
));
)
.unwrap());
// bank=1, wait=1, equal, but bad hash provided
config.wait_for_supermajority = Some(1);
config.expected_bank_hash = Some(hash(&[1]));
assert!(wait_for_supermajority(
&config,
&bank,
&cluster_info,
rpc_override_health_check,
&start_progress,
));
assert_eq!(
wait_for_supermajority(
&config,
&bank,
&cluster_info,
rpc_override_health_check,
&start_progress,
),
Err(ValidatorError::BadExpectedBankHash)
);
}
#[test]

View File

@@ -1,4 +1,3 @@
use solana_runtime::commitment::VOTE_THRESHOLD_SIZE;
use solana_sdk::pubkey::Pubkey;
use std::collections::HashSet;
@@ -9,29 +8,33 @@ pub struct VoteStakeTracker {
}
impl VoteStakeTracker {
// Returns tuple (is_confirmed, is_new) where
// `is_confirmed` is true if the stake that has voted has just crosssed the supermajority
// of stake
// Returns tuple (reached_threshold_results, is_new) where
// Each index in `reached_threshold_results` is true if the corresponding threshold in the input
// `thresholds_to_check` was newly reached by adding the stake of the input `vote_pubkey`
// `is_new` is true if the vote has not been seen before
pub fn add_vote_pubkey(
&mut self,
vote_pubkey: Pubkey,
stake: u64,
total_stake: u64,
) -> (bool, bool) {
thresholds_to_check: &[f64],
) -> (Vec<bool>, bool) {
let is_new = !self.voted.contains(&vote_pubkey);
if is_new {
self.voted.insert(vote_pubkey);
let supermajority_stake = (total_stake as f64 * VOTE_THRESHOLD_SIZE) as u64;
let old_stake = self.stake;
let new_stake = self.stake + stake;
self.stake = new_stake;
(
old_stake <= supermajority_stake && supermajority_stake < new_stake,
is_new,
)
let reached_threshold_results: Vec<bool> = thresholds_to_check
.iter()
.map(|threshold| {
let threshold_stake = (total_stake as f64 * threshold) as u64;
old_stake <= threshold_stake && threshold_stake < new_stake
})
.collect();
(reached_threshold_results, is_new)
} else {
(false, is_new)
(vec![false; thresholds_to_check.len()], is_new)
}
}
@@ -47,6 +50,7 @@ impl VoteStakeTracker {
#[cfg(test)]
mod test {
use super::*;
use solana_runtime::commitment::VOTE_THRESHOLD_SIZE;
#[test]
fn test_add_vote_pubkey() {
@@ -54,24 +58,43 @@ mod test {
let mut vote_stake_tracker = VoteStakeTracker::default();
for i in 0..10 {
let pubkey = solana_sdk::pubkey::new_rand();
let (is_confirmed, is_new) =
vote_stake_tracker.add_vote_pubkey(pubkey, 1, total_epoch_stake);
let (is_confirmed_thresholds, is_new) = vote_stake_tracker.add_vote_pubkey(
pubkey,
1,
total_epoch_stake,
&[VOTE_THRESHOLD_SIZE, 0.0],
);
let stake = vote_stake_tracker.stake();
let (is_confirmed2, is_new2) =
vote_stake_tracker.add_vote_pubkey(pubkey, 1, total_epoch_stake);
let (is_confirmed_thresholds2, is_new2) = vote_stake_tracker.add_vote_pubkey(
pubkey,
1,
total_epoch_stake,
&[VOTE_THRESHOLD_SIZE, 0.0],
);
let stake2 = vote_stake_tracker.stake();
// Stake should not change from adding same pubkey twice
assert_eq!(stake, stake2);
assert!(!is_confirmed2);
assert!(!is_confirmed_thresholds2[0]);
assert!(!is_confirmed_thresholds2[1]);
assert!(!is_new2);
assert_eq!(is_confirmed_thresholds.len(), 2);
assert_eq!(is_confirmed_thresholds2.len(), 2);
// at i == 6, the voted stake is 70%, which is the first time crossing
// the supermajority threshold
if i == 6 {
assert!(is_confirmed);
assert!(is_confirmed_thresholds[0]);
} else {
assert!(!is_confirmed);
assert!(!is_confirmed_thresholds[0]);
}
// at i == 6, the voted stake is 10%, which is the first time crossing
// the 0% threshold
if i == 0 {
assert!(is_confirmed_thresholds[1]);
} else {
assert!(!is_confirmed_thresholds[1]);
}
assert!(is_new);
}

View File

@@ -9,18 +9,18 @@ use std::ops::Div;
/// Returns a list of indexes shuffled based on the input weights
/// Note - The sum of all weights must not exceed `u64::MAX`
pub fn weighted_shuffle<T>(weights: Vec<T>, seed: [u8; 32]) -> Vec<usize>
pub fn weighted_shuffle<T>(weights: &[T], seed: [u8; 32]) -> Vec<usize>
where
T: Copy + PartialOrd + iter::Sum + Div<T, Output = T> + FromPrimitive + ToPrimitive,
{
let total_weight: T = weights.clone().into_iter().sum();
let total_weight: T = weights.iter().copied().sum();
let mut rng = ChaChaRng::from_seed(seed);
weights
.into_iter()
.iter()
.enumerate()
.map(|(i, v)| {
// This generates an "inverse" weight but it avoids floating point math
let x = (total_weight / v)
let x = (total_weight / *v)
.to_u64()
.expect("values > u64::max are not supported");
(
@@ -71,7 +71,7 @@ mod tests {
fn test_weighted_shuffle_iterator() {
let mut test_set = [0; 6];
let mut count = 0;
let shuffle = weighted_shuffle(vec![50, 10, 2, 1, 1, 1], [0x5a; 32]);
let shuffle = weighted_shuffle(&[50, 10, 2, 1, 1, 1], [0x5a; 32]);
shuffle.into_iter().for_each(|x| {
assert_eq!(test_set[x], 0);
test_set[x] = 1;
@@ -86,7 +86,7 @@ mod tests {
let mut test_weights = vec![0; 100];
(0..100).for_each(|i| test_weights[i] = (i + 1) as u64);
let mut count = 0;
let shuffle = weighted_shuffle(test_weights, [0xa5; 32]);
let shuffle = weighted_shuffle(&test_weights, [0xa5; 32]);
shuffle.into_iter().for_each(|x| {
assert_eq!(test_set[x], 0);
test_set[x] = 1;
@@ -97,9 +97,9 @@ mod tests {
#[test]
fn test_weighted_shuffle_compare() {
let shuffle = weighted_shuffle(vec![50, 10, 2, 1, 1, 1], [0x5a; 32]);
let shuffle = weighted_shuffle(&[50, 10, 2, 1, 1, 1], [0x5a; 32]);
let shuffle1 = weighted_shuffle(vec![50, 10, 2, 1, 1, 1], [0x5a; 32]);
let shuffle1 = weighted_shuffle(&[50, 10, 2, 1, 1, 1], [0x5a; 32]);
shuffle1
.into_iter()
.zip(shuffle.into_iter())
@@ -112,7 +112,7 @@ mod tests {
fn test_weighted_shuffle_imbalanced() {
let mut weights = vec![std::u32::MAX as u64; 3];
weights.push(1);
let shuffle = weighted_shuffle(weights.clone(), [0x5a; 32]);
let shuffle = weighted_shuffle(&weights, [0x5a; 32]);
shuffle.into_iter().for_each(|x| {
if x == weights.len() - 1 {
assert_eq!(weights[x], 1);

View File

@@ -26,7 +26,7 @@ use solana_metrics::{inc_new_counter_debug, inc_new_counter_error};
use solana_perf::packet::Packets;
use solana_rayon_threadlimit::get_thread_count;
use solana_runtime::{bank::Bank, bank_forks::BankForks};
use solana_sdk::{packet::PACKET_DATA_SIZE, pubkey::Pubkey, timing::duration_as_ms};
use solana_sdk::{clock::Slot, packet::PACKET_DATA_SIZE, pubkey::Pubkey, timing::duration_as_ms};
use solana_streamer::streamer::PacketSender;
use std::{
net::{SocketAddr, UdpSocket},
@@ -36,6 +36,9 @@ use std::{
time::{Duration, Instant},
};
pub type DuplicateSlotSender = CrossbeamSender<Slot>;
pub type DuplicateSlotReceiver = CrossbeamReceiver<Slot>;
fn verify_shred_slot(shred: &Shred, root: u64) -> bool {
if shred.is_data() {
// Only data shreds have parent information
@@ -86,21 +89,25 @@ fn run_check_duplicate(
cluster_info: &ClusterInfo,
blockstore: &Blockstore,
shred_receiver: &CrossbeamReceiver<Shred>,
duplicate_slot_sender: &DuplicateSlotSender,
) -> Result<()> {
let check_duplicate = |shred: Shred| -> Result<()> {
if !blockstore.has_duplicate_shreds_in_slot(shred.slot()) {
let shred_slot = shred.slot();
if !blockstore.has_duplicate_shreds_in_slot(shred_slot) {
if let Some(existing_shred_payload) = blockstore.is_shred_duplicate(
shred.slot(),
shred_slot,
shred.index(),
&shred.payload,
shred.is_data(),
) {
cluster_info.push_duplicate_shred(&shred, &existing_shred_payload)?;
blockstore.store_duplicate_slot(
shred.slot(),
shred_slot,
existing_shred_payload,
shred.payload,
)?;
duplicate_slot_sender.send(shred_slot)?;
}
}
@@ -319,6 +326,7 @@ impl WindowService {
cluster_slots: Arc<ClusterSlots>,
verified_vote_receiver: VerifiedVoteReceiver,
completed_data_sets_sender: CompletedDataSetsSender,
duplicate_slots_sender: DuplicateSlotSender,
) -> WindowService
where
F: 'static
@@ -346,6 +354,7 @@ impl WindowService {
exit.clone(),
blockstore.clone(),
duplicate_receiver,
duplicate_slots_sender,
);
let t_insert = Self::start_window_insert_thread(
@@ -381,6 +390,7 @@ impl WindowService {
exit: Arc<AtomicBool>,
blockstore: Arc<Blockstore>,
duplicate_receiver: CrossbeamReceiver<Shred>,
duplicate_slot_sender: DuplicateSlotSender,
) -> JoinHandle<()> {
let handle_error = || {
inc_new_counter_error!("solana-check-duplicate-error", 1, 1);
@@ -393,8 +403,12 @@ impl WindowService {
}
let mut noop = || {};
if let Err(e) = run_check_duplicate(&cluster_info, &blockstore, &duplicate_receiver)
{
if let Err(e) = run_check_duplicate(
&cluster_info,
&blockstore,
&duplicate_receiver,
&duplicate_slot_sender,
) {
if Self::should_exit_on_error(e, &mut noop, &handle_error) {
break;
}
@@ -408,7 +422,7 @@ impl WindowService {
blockstore: &Arc<Blockstore>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
insert_receiver: CrossbeamReceiver<(Vec<Shred>, Vec<Option<RepairMeta>>)>,
duplicate_sender: CrossbeamSender<Shred>,
check_duplicate_sender: CrossbeamSender<Shred>,
completed_data_sets_sender: CompletedDataSetsSender,
) -> JoinHandle<()> {
let exit = exit.clone();
@@ -423,7 +437,7 @@ impl WindowService {
.name("solana-window-insert".to_string())
.spawn(move || {
let handle_duplicate = |shred| {
let _ = duplicate_sender.send(shred);
let _ = check_duplicate_sender.send(shred);
};
let mut metrics = BlockstoreInsertionMetrics::default();
let mut last_print = Instant::now();
@@ -538,6 +552,7 @@ impl WindowService {
handle_timeout();
false
}
Error::CrossbeamSendError => true,
_ => {
handle_error();
error!("thread {:?} error {:?}", thread::current().name(), e);
@@ -566,7 +581,6 @@ mod test {
shred::{DataShredHeader, Shredder},
};
use solana_sdk::{
clock::Slot,
epoch_schedule::MINIMUM_SLOTS_PER_EPOCH,
hash::Hash,
signature::{Keypair, Signer},
@@ -680,6 +694,7 @@ mod test {
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&blockstore_path).unwrap());
let (sender, receiver) = unbounded();
let (duplicate_slot_sender, duplicate_slot_receiver) = unbounded();
let (shreds, _) = make_many_slot_entries(5, 5, 10);
blockstore
.insert_shreds(shreds.clone(), None, false)
@@ -692,7 +707,17 @@ mod test {
let keypair = Keypair::new();
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), timestamp());
let cluster_info = ClusterInfo::new(contact_info, Arc::new(keypair));
run_check_duplicate(&cluster_info, &blockstore, &receiver).unwrap();
run_check_duplicate(
&cluster_info,
&blockstore,
&receiver,
&duplicate_slot_sender,
)
.unwrap();
assert!(blockstore.has_duplicate_shreds_in_slot(duplicate_shred_slot));
assert_eq!(
duplicate_slot_receiver.try_recv().unwrap(),
duplicate_shred_slot
);
}
}

View File

@@ -199,9 +199,9 @@ pub fn cluster_info_retransmit() {
assert!(done);
let mut p = Packet::default();
p.meta.size = 10;
let peers = c1.retransmit_peers();
let peers = c1.tvu_peers();
let retransmit_peers: Vec<_> = peers.iter().collect();
ClusterInfo::retransmit_to(&retransmit_peers, &mut p, None, &tn1, false).unwrap();
ClusterInfo::retransmit_to(&retransmit_peers, &mut p, &tn1, false).unwrap();
let res: Vec<_> = [tn1, tn2, tn3]
.into_par_iter()
.map(|s| {

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-crate-features"
version = "1.6.0"
version = "1.6.2"
description = "Solana Crate Features"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -19,7 +19,7 @@ lazy_static = { version = "1.4.0", features = ["spin", "spin_no_std"] }
libc = { version = "0.2.62", features = ["extra_traits"] }
rand_chacha = { version = "0.2.2" }
regex-syntax = { version = "0.6.12" }
reqwest = { version = "0.10.8", default-features = false, features = ["blocking", "rustls-tls", "json"] }
reqwest = { version = "0.11.2", default-features = false, features = ["blocking", "rustls-tls", "json"] }
serde = { version = "1.0.100", features = ["rc"] }
ed25519-dalek = { version = "=1.0.1", features = ["serde"] }
syn_0_15 = { package = "syn", version = "0.15.42", features = ["extra-traits", "fold", "full"] }

View File

@@ -124,20 +124,25 @@ call to `deploy`.
Deployment failures will print an error message specifying the seed phrase
needed to recover the generated intermediate buffer's keypair:
```bash
=======================================================================
To resume a failed deploy, recover the ephemeral keypair file with
`solana-keygen recover` and the following 12-word seed phrase,
then pass it as the [BUFFER_SIGNER] argument to `solana deploy` or `solana write-buffer`
=======================================================================
spy axis cream equip bonus daring muffin fish noise churn broken diesel
=======================================================================
```
==================================================================================
Recover the intermediate account's ephemeral keypair file with
`solana-keygen recover` and the following 12-word seed phrase:
==================================================================================
valley flat great hockey share token excess clever benefit traffic avocado athlete
==================================================================================
To resume a deploy, pass the recovered keypair as
the [PROGRAM_ADDRESS_SIGNER] argument to `solana deploy` or
as the [BUFFER_SIGNER] to `solana program deploy` or `solana write-buffer'.
Or to recover the account's lamports, pass it as the
[BUFFER_ACCOUNT_ADDRESS] argument to `solana program drain`.
==================================================================================
```
To recover the keypair:
```bash
$ solana-keypair recover -o <KEYPAIR_PATH>
solana-keypair recover -o <KEYPAIR_PATH>
```
When asked, enter the 12-word seed phrase.
@@ -145,7 +150,57 @@ When asked, enter the 12-word seed phrase.
Then issue a new `deploy` command and specify the buffer:
```bash
$ solana program deploy --buffer <KEYPAIR_PATH> <PROGRAM_FILEPATH>
solana program deploy --buffer <KEYPAIR_PATH> <PROGRAM_FILEPATH>
```
### Closing buffer accounts and reclaiming their lamports
If deployment fails there will be a left over buffer account that holds
lamports. The buffer account can either be used to [resume a
deploy](#resuming-a-failed-deploy) or closed. When closed, the full balance of
the buffer account will be transferred to the recipient's account.
The buffer account's authority must be present to close a buffer account, to
list all the open buffer accounts that match the default authority:
```bash
solana program show --buffers
```
To specify a different authority:
```bash
solana program show --buffers --buffer-authority <AURTHORITY_ADRESS>
```
To close a single account:
```bash
solana program close <BUFFER_ADDRESS>
```
To close a single account and specify a different authority than the default:
```bash
solana program close <BUFFER_ADDRESS> --buffer-authority <KEYPAIR_FILEPATH>
```
To close a single account and specify a different recipient than the default:
```bash
solana program close <BUFFER_ADDRESS> --recipient <RECIPIENT_ADDRESS>
```
To close all the buffer accounts associated with the current authority:
```bash
solana program close --buffers
```
To show all buffer accounts regardless of the authority
```bash
solana program show --buffers --all
```
### Set a program's upgrade authority

View File

@@ -71,7 +71,7 @@ with the private keypair corresponding to the sender's public key in the
transaction.
```bash
solana transfer --from <KEYPAIR> <RECIPIENT_ACCOUNT_ADDRESS> 5 --url https://devnet.solana.com --fee-payer <KEYPAIR>
solana transfer --from <KEYPAIR> <RECIPIENT_ACCOUNT_ADDRESS> 5 --allow-unfunded-recipient --url https://devnet.solana.com --fee-payer <KEYPAIR>
```
where you replace `<KEYPAIR>` with the path to a keypair in your first wallet,
@@ -118,7 +118,7 @@ Save this seed phrase to recover your new keypair:
clump panic cousin hurt coast charge engage fall eager urge win love # If this was a real wallet, never share these words on the internet like this!
====================================================================
$ solana transfer --from my_solana_wallet.json 7S3P4HxJpyyigGzodYwHtCxZyUQe9JiBMHyRWXArAaKv 5 --url https://devnet.solana.com --fee-payer my_solana_wallet.json # Transferring tokens to the public address of the paper wallet
$ solana transfer --from my_solana_wallet.json 7S3P4HxJpyyigGzodYwHtCxZyUQe9JiBMHyRWXArAaKv 5 --allow-unfunded-recipient --url https://devnet.solana.com --fee-payer my_solana_wallet.json # Transferring tokens to the public address of the paper wallet
3gmXvykAd1nCQQ7MjosaHLf69Xyaqyq1qw2eu1mgPyYXd5G4v1rihhg1CiRw35b9fHzcftGKKEu4mbUeXY2pEX2z # This is the transaction signature
$ solana balance DYw8jCTfwHNRJhhmFcbXvVDTqWMEVFBX6ZKUmG5CNSKK --url https://devnet.solana.com

View File

@@ -40,11 +40,11 @@ solana config set --url https://devnet.solana.com
```bash
$ solana-validator \
--identity ~/validator-keypair.json \
--vote-account ~/vote-account-keypair.json \
--identity validator-keypair.json \
--vote-account vote-account-keypair.json \
--trusted-validator dv1LfzJvDF7S1fBKpFgKoKXK5yoSosmkAdfbxBo1GqJ \
--no-untrusted-rpc \
--ledger ~/validator-ledger \
--ledger ledger \
--rpc-port 8899 \
--dynamic-port-range 8000-8010 \
--entrypoint entrypoint.devnet.solana.com:8001 \
@@ -85,14 +85,14 @@ solana config set --url https://testnet.solana.com
```bash
$ solana-validator \
--identity ~/validator-keypair.json \
--vote-account ~/vote-account-keypair.json \
--identity validator-keypair.json \
--vote-account vote-account-keypair.json \
--trusted-validator 5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on \
--trusted-validator 7XSY3MrYnK8vq693Rju17bbPkCN3Z7KvvfvJx4kdrsSY \
--trusted-validator Ft5fbkqNa76vnsjYNwjDZUXoTWpP7VYm3mtsaQckQADN \
--trusted-validator 9QxCLckBiJc783jnMvXZubK4wH86Eqqvashtrwvcsgkv \
--no-untrusted-rpc \
--ledger ~/validator-ledger \
--ledger ledger \
--rpc-port 8899 \
--dynamic-port-range 8000-8010 \
--entrypoint entrypoint.testnet.solana.com:8001 \
@@ -143,7 +143,7 @@ $ solana-validator \
--trusted-validator DE1bawNcRJB9rVm3buyMVfr8mBEoyyu73NBovf2oXJsJ \
--trusted-validator CakcnaRDHka2gXyfbEd2d3xsvkJkqsLw2akB3zsN1D2S \
--no-untrusted-rpc \
--ledger ~/validator-ledger \
--ledger ledger \
--rpc-port 8899 \
--private-rpc \
--dynamic-port-range 8000-8010 \

View File

@@ -52,6 +52,7 @@ gives a convenient interface for the RPC methods.
- [getSignatureStatuses](jsonrpc-api.md#getsignaturestatuses)
- [getSlot](jsonrpc-api.md#getslot)
- [getSlotLeader](jsonrpc-api.md#getslotleader)
- [getSlotLeaders](jsonrpc-api.md#getslotleaders)
- [getStakeActivation](jsonrpc-api.md#getstakeactivation)
- [getSupply](jsonrpc-api.md#getsupply)
- [getTokenAccountBalance](jsonrpc-api.md#gettokenaccountbalance)
@@ -376,7 +377,7 @@ Result:
### getBlockTime
Returns the estimated production time of a confirmed block.
Returns the estimated production time of a block.
Each validator reports their UTC time to the ledger on a regular interval by
intermittently adding a timestamp to a Vote for a particular block. A requested
@@ -460,6 +461,9 @@ Returns identity and transaction information about a confirmed block in the ledg
- `<object>` - (optional) Configuration object containing the following optional fields:
- (optional) `encoding: <string>` - encoding for each returned Transaction, either "json", "jsonParsed", "base58" (*slow*), "base64". If parameter not provided, the default encoding is "json".
"jsonParsed" encoding attempts to use program-specific instruction parsers to return more human-readable and explicit data in the `transaction.message.instructions` list. If "jsonParsed" is requested but a parser cannot be found, the instruction falls back to regular JSON encoding (`accounts`, `data`, and `programIdIndex` fields).
- (optional) `transactionDetails: <string>` - level of transaction detail to return, either "full", "signatures", or "none". If parameter not provided, the default detail level is "full".
- (optional) `rewards: bool` - whether to populate the `rewards` array. If parameter not provided, the default includes rewards.
- (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment); "processed" is not supported. If parameter not provided, the default is "finalized".
#### Results:
@@ -470,7 +474,7 @@ The result field will be an object with the following fields:
- `blockhash: <string>` - the blockhash of this block, as base-58 encoded string
- `previousBlockhash: <string>` - the blockhash of this block's parent, as base-58 encoded string; if the parent block is not available due to ledger cleanup, this field will return "11111111111111111111111111111111"
- `parentSlot: <u64>` - the slot index of this block's parent
- `transactions: <array>` - an array of JSON objects containing:
- `transactions: <array>` - present if "full" transaction details are requested; an array of JSON objects containing:
- `transaction: <object|[string,encoding]>` - [Transaction](#transaction-structure) object, either in JSON format or encoded binary data, depending on encoding parameter
- `meta: <object>` - transaction status metadata object, containing `null` or:
- `err: <object | null>` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L24)
@@ -484,7 +488,8 @@ The result field will be an object with the following fields:
- DEPRECATED: `status: <object>` - Transaction status
- `"Ok": <null>` - Transaction was successful
- `"Err": <ERR>` - Transaction failed with TransactionError
- `rewards: <array>` - an array of JSON objects containing:
- `signatures: <array>` - present if "signatures" are requested for transaction details; an array of signatures strings, corresponding to the transaction order in the block
- `rewards: <array>` - present if rewards are requested; an array of JSON objects containing:
- `pubkey: <string>` - The public key, as base-58 encoded string, of the account that received the reward
- `lamports: <i64>`- number of reward lamports credited or debited by the account, as a i64
- `postBalance: <u64>` - account balance in lamports after the reward was applied
@@ -496,7 +501,7 @@ The result field will be an object with the following fields:
Request:
```bash
curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d '
{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlock","params":[430, {"encoding": "json"}]}
{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlock","params":[430, {"encoding": "json","transactionDetails":"full","rewards":false}]}
'
```
@@ -509,7 +514,6 @@ Result:
"blockhash": "3Eq21vXNB5s86c62bVuUfTeaMif1N2kUqRPBmGRJhyTA",
"parentSlot": 429,
"previousBlockhash": "mfcyqEXB3DnHXki6KjjmZck6YjmZLvpAByy2fj4nh6B",
"rewards": [],
"transactions": [
{
"meta": {
@@ -683,6 +687,7 @@ Returns a list of confirmed blocks between two slots
- `<u64>` - start_slot, as u64 integer
- `<u64>` - (optional) end_slot, as u64 integer
- (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment); "processed" is not supported. If parameter not provided, the default is "finalized".
#### Results:
@@ -713,6 +718,7 @@ Returns a list of confirmed blocks starting at the given slot
- `<u64>` - start_slot, as u64 integer
- `<u64>` - limit, as u64 integer
- (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment); "processed" is not supported. If parameter not provided, the default is "finalized".
#### Results:
@@ -853,6 +859,7 @@ Returns transaction details for a confirmed transaction
- `<object>` - (optional) Configuration object containing the following optional fields:
- (optional) `encoding: <string>` - encoding for each returned Transaction, either "json", "jsonParsed", "base58" (*slow*), "base64". If parameter not provided, the default encoding is "json".
"jsonParsed" encoding attempts to use program-specific instruction parsers to return more human-readable and explicit data in the `transaction.message.instructions` list. If "jsonParsed" is requested but a parser cannot be found, the instruction falls back to regular JSON encoding (`accounts`, `data`, and `programIdIndex` fields).
- (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment); "processed" is not supported. If parameter not provided, the default is "finalized".
#### Results:
@@ -2250,6 +2257,53 @@ Result:
{"jsonrpc":"2.0","result":"ENvAW7JScgYq6o4zKZwewtkzzJgDzuJAFxYasvmEQdpS","id":1}
```
### getSlotLeaders
Returns the slot leaders for a given slot range
#### Parameters:
- `<u64>` - Start slot, as u64 integer
- `<u64>` - Limit, as u64 integer
#### Results:
- `<array<string>>` - Node identity public keys as base-58 encoded strings
#### Example:
If the current slot is #99, query the next 10 leaders with the following request:
Request:
```bash
curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d '
{"jsonrpc":"2.0","id":1, "method":"getSlotLeaders", "params":[100, 10]}
'
```
Result:
The first leader returned is the leader for slot #100:
```json
{
"jsonrpc": "2.0",
"result": [
"ChorusmmK7i1AxXeiTtQgQZhQNiXYU84ULeaYF1EH15n",
"ChorusmmK7i1AxXeiTtQgQZhQNiXYU84ULeaYF1EH15n",
"ChorusmmK7i1AxXeiTtQgQZhQNiXYU84ULeaYF1EH15n",
"ChorusmmK7i1AxXeiTtQgQZhQNiXYU84ULeaYF1EH15n",
"Awes4Tr6TX8JDzEhCZY2QVNimT6iD1zWHzf1vNyGvpLM",
"Awes4Tr6TX8JDzEhCZY2QVNimT6iD1zWHzf1vNyGvpLM",
"Awes4Tr6TX8JDzEhCZY2QVNimT6iD1zWHzf1vNyGvpLM",
"Awes4Tr6TX8JDzEhCZY2QVNimT6iD1zWHzf1vNyGvpLM",
"DWvDTSh3qfn88UoQTEKRV2JnLt5jtJAVoiCo3ivtMwXP",
"DWvDTSh3qfn88UoQTEKRV2JnLt5jtJAVoiCo3ivtMwXP"
],
"id": 1
}
```
### getStakeActivation
Returns epoch activation information for a stake account
@@ -2742,7 +2796,7 @@ curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d '
Result:
```json
{"jsonrpc":"2.0","result":{"solana-core": "1.6.0"},"id":1}
{"jsonrpc":"2.0","result":{"solana-core": "1.6.2"},"id":1}
```
### getVoteAccounts

View File

@@ -388,7 +388,7 @@ will wait and track progress on stderr until the transaction has been finalized
by the cluster. If the transaction fails, it will report any transaction errors.
```bash
solana transfer <USER_ADDRESS> <AMOUNT> --keypair <KEYPAIR> --url http://localhost:8899
solana transfer <USER_ADDRESS> <AMOUNT> --allow-unfunded-recipient --keypair <KEYPAIR> --url http://localhost:8899
```
The [Solana Javascript SDK](https://github.com/solana-labs/solana-web3.js)
@@ -420,7 +420,7 @@ In the command-line tool, pass the `--no-wait` argument to send a transfer
asynchronously, and include your recent blockhash with the `--blockhash` argument:
```bash
solana transfer <USER_ADDRESS> <AMOUNT> --no-wait --blockhash <RECENT_BLOCKHASH> --keypair <KEYPAIR> --url http://localhost:8899
solana transfer <USER_ADDRESS> <AMOUNT> --no-wait --allow-unfunded-recipient --blockhash <RECENT_BLOCKHASH> --keypair <KEYPAIR> --url http://localhost:8899
```
You can also build, sign, and serialize the transaction manually, and fire it off to

View File

@@ -157,6 +157,15 @@ You can generate a custom vanity keypair using solana-keygen. For instance:
solana-keygen grind --starts-with e1v1s:1
```
You may request that the generated vanity keypair be expressed as a seed phrase
which allows recovery of the keypair from the seed phrase and an optionally
supplied passphrase (note that this is significantly slower than grinding without
a mnemonic):
```bash
solana-keygen grind --use-mnemonic --starts-with e1v1s:1
```
Depending on the string requested, it may take days to find a match...
---
@@ -254,7 +263,6 @@ Connect to the cluster by running:
solana-validator \
--identity ~/validator-keypair.json \
--vote-account ~/vote-account-keypair.json \
--ledger ~/validator-ledger \
--rpc-port 8899 \
--entrypoint devnet.solana.com:8001 \
--limit-ledger-size \
@@ -264,6 +272,9 @@ solana-validator \
To force validator logging to the console add a `--log -` argument, otherwise
the validator will automatically log to a file.
The ledger will be placed in the `ledger/` directory by default, use the
`--ledger` argument to specify a different location.
> Note: You can use a
> [paper wallet seed phrase](../wallet-guide/paper-wallet.md)
> for your `--identity` and/or
@@ -355,6 +366,11 @@ very large over time and it's recommended that log rotation be configured.
The validator will re-open its when it receives the `USR1` signal, which is the
basic primitive that enables log rotation.
If the validator is being started by a wrapper shell script, it is important to
launch the process with `exec` (`exec solana-validator ...`) when using logrotate.
This will prevent the `USR1` signal from being sent to the script's process
instead of the validator's, which will kill them both.
#### Using logrotate
An example setup for the `logrotate`, which assumes that the validator is

View File

@@ -5,6 +5,13 @@ title: Mobile App Wallets
Solana is supported by multiple third-party apps which should provide a familiar
experience for most people who are new or experienced with using crypto wallets.
## Exodus
Send, receive & exchange cryptocurrency with ease on the world's leading Desktop, Mobile and Hardware crypto wallets.
Download [Exodus](https://exodus.com/) to easily and securely manage your Solana tokens.
Exodus includes live charts, a built-in exchange, and 24/7 human support.
## Trust Wallet
[Trust Wallet](https://trustwallet.com/) is an app available for iOS and Android
and can be used to send and receive SOL tokens.

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-dos"
version = "1.6.0"
version = "1.6.2"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -14,15 +14,15 @@ clap = "2.33.1"
log = "0.4.11"
rand = "0.7.0"
rayon = "1.5.0"
solana-clap-utils = { path = "../clap-utils", version = "1.6.0" }
solana-core = { path = "../core", version = "1.6.0" }
solana-ledger = { path = "../ledger", version = "1.6.0" }
solana-logger = { path = "../logger", version = "1.6.0" }
solana-net-utils = { path = "../net-utils", version = "1.6.0" }
solana-runtime = { path = "../runtime", version = "1.6.0" }
solana-sdk = { path = "../sdk", version = "1.6.0" }
solana-version = { path = "../version", version = "1.6.0" }
solana-client = { path = "../client", version = "1.6.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.6.2" }
solana-core = { path = "../core", version = "=1.6.2" }
solana-ledger = { path = "../ledger", version = "=1.6.2" }
solana-logger = { path = "../logger", version = "=1.6.2" }
solana-net-utils = { path = "../net-utils", version = "=1.6.2" }
solana-runtime = { path = "../runtime", version = "=1.6.2" }
solana-sdk = { path = "../sdk", version = "=1.6.2" }
solana-version = { path = "../version", version = "=1.6.2" }
solana-client = { path = "../client", version = "=1.6.2" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-download-utils"
version = "1.6.0"
version = "1.6.2"
description = "Solana Download Utils"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -14,9 +14,9 @@ bzip2 = "0.3.3"
console = "0.11.3"
indicatif = "0.15.0"
log = "0.4.11"
reqwest = { version = "0.10.8", default-features = false, features = ["blocking", "rustls-tls", "json"] }
solana-sdk = { path = "../sdk", version = "1.6.0" }
solana-runtime = { path = "../runtime", version = "1.6.0" }
reqwest = { version = "0.11.2", default-features = false, features = ["blocking", "rustls-tls", "json"] }
solana-sdk = { path = "../sdk", version = "=1.6.2" }
solana-runtime = { path = "../runtime", version = "=1.6.2" }
tar = "0.4.28"
[lib]

View File

@@ -34,9 +34,18 @@ pub fn download_file(
}
let download_start = Instant::now();
fs::create_dir_all(destination_file.parent().unwrap()).map_err(|err| err.to_string())?;
fs::create_dir_all(destination_file.parent().expect("parent"))
.map_err(|err| err.to_string())?;
let temp_destination_file = destination_file.with_extension("tmp");
let mut temp_destination_file = destination_file.to_path_buf();
temp_destination_file.set_file_name(format!(
"tmp-{}",
destination_file
.file_name()
.expect("file_name")
.to_str()
.expect("to_str")
));
let progress_bar = new_spinner_progress_bar();
if use_progress_bar {
@@ -169,11 +178,11 @@ pub fn download_genesis_if_missing(
pub fn download_snapshot(
rpc_addr: &SocketAddr,
ledger_path: &Path,
snapshot_output_dir: &Path,
desired_snapshot_hash: (Slot, Hash),
use_progress_bar: bool,
) -> Result<(), String> {
snapshot_utils::purge_old_snapshot_archives(ledger_path);
snapshot_utils::purge_old_snapshot_archives(snapshot_output_dir);
for compression in &[
ArchiveFormat::TarZstd,
@@ -181,7 +190,7 @@ pub fn download_snapshot(
ArchiveFormat::TarBzip2,
] {
let desired_snapshot_package = snapshot_utils::get_snapshot_archive_path(
ledger_path.to_path_buf(),
snapshot_output_dir.to_path_buf(),
&desired_snapshot_hash,
*compression,
);

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-faucet"
version = "1.6.0"
version = "1.6.2"
description = "Solana Faucet"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -16,12 +16,12 @@ clap = "2.33"
log = "0.4.11"
serde = "1.0.122"
serde_derive = "1.0.103"
solana-clap-utils = { path = "../clap-utils", version = "1.6.0" }
solana-cli-config = { path = "../cli-config", version = "1.6.0" }
solana-logger = { path = "../logger", version = "1.6.0" }
solana-metrics = { path = "../metrics", version = "1.6.0" }
solana-sdk = { path = "../sdk", version = "1.6.0" }
solana-version = { path = "../version", version = "1.6.0" }
solana-clap-utils = { path = "../clap-utils", version = "=1.6.2" }
solana-cli-config = { path = "../cli-config", version = "=1.6.2" }
solana-logger = { path = "../logger", version = "=1.6.2" }
solana-metrics = { path = "../metrics", version = "=1.6.2" }
solana-sdk = { path = "../sdk", version = "=1.6.2" }
solana-version = { path = "../version", version = "=1.6.2" }
tokio = { version = "1.1", features = ["full"] }
[lib]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-frozen-abi"
version = "1.6.0"
version = "1.6.2"
description = "Solana Frozen ABI"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -16,11 +16,11 @@ log = "0.4.11"
serde = "1.0.122"
serde_derive = "1.0.103"
sha2 = "0.9.2"
solana-frozen-abi-macro = { path = "macro", version = "1.6.0" }
solana-frozen-abi-macro = { path = "macro", version = "=1.6.2" }
thiserror = "1.0"
[target.'cfg(not(target_arch = "bpf"))'.dependencies]
solana-logger = { path = "../logger", version = "1.6.0" }
solana-logger = { path = "../logger", version = "=1.6.2" }
generic-array = { version = "0.14.3", default-features = false, features = ["serde", "more_lengths"]}
memmap2 = "0.1.0"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-frozen-abi-macro"
version = "1.6.0"
version = "1.6.2"
description = "Solana Frozen ABI Macro"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"

Some files were not shown because too many files have changed in this diff Show More