Compare commits

...

197 Commits

Author SHA1 Message Date
fa254ff18f Fuzzer test and fixes (#9853) (#9858) 2020-05-02 10:05:13 -07:00
f78df36363 Put empty accounts in the accounts list on load (#9840) (#9854)
Indexing into accounts array does not match account_keys otherwise.
Also enforce program accounts not at index 0
Enforce at least 1 Read-write signing fee-payer account.
2020-05-01 21:37:13 -07:00
bae6fe17e9 Watchtower can now emit a notifiation on all non-vote transactions (#9845) (#9852)
automerge
2020-05-01 19:45:32 -07:00
4b1d338e04 Add delay to keep RPC traffic down on error 2020-05-01 10:39:11 -07:00
8079359420 Add incinerator sysvar (#9815) (#9836)
(cherry picked from commit 8dfe0affd4)
2020-05-01 09:02:59 -07:00
3aa52f95a2 v1.1 backport custom error rename (#9826)
* Add program_error conversions (#9203)

* Rename CustomError to Custom (#9207)

* More custom error rename (#9227)

automerge

* Remove librapay conflicts

* Fix rebase

Co-authored-by: Jack May <jack@solana.com>
2020-04-30 23:54:11 -06:00
3d88b9ac22 Cleanup BPF helper symbols (bp #9804) (#9825)
automerge
2020-04-30 13:10:13 -07:00
948487d9a7 Bump Ledger Beta app version (#9822) (#9824)
automerge
2020-04-30 11:21:08 -07:00
d775855a23 Clarify Ledger security implications (#9820) (#9823)
automerge
2020-04-30 11:18:58 -07:00
3f41d60793 Add commitment Root variant, and add fleshed out --commitment arg to Cli (#9806) (#9813)
automerge
2020-04-30 10:40:07 -07:00
7a6543eb5b thiserror, docs, remove general Failure case (#9741) (#9810)
automerge
2020-04-30 10:23:54 -07:00
892abd2a24 Make default programs static (bp #9717) (#9814)
automerge
2020-04-30 02:50:34 -07:00
8fef8eaed9 Remove old logging enabler artifacts (bp #9777) (#9818)
automerge
2020-04-30 01:52:01 -07:00
b7bd9d9fbb Nit: More informative error message (#9616) (#9816)
automerge
2020-04-30 00:54:47 -07:00
a8eb9357cb Upgrade to Rust 1.43.0 (#9754) (#9808)
automerge
2020-04-29 19:50:13 -07:00
87601facf1 Bump Rust-BPF version to be interoperable with latest Rust (#9783) (#9802)
automerge
2020-04-29 16:53:43 -07:00
4facdb25d0 Rpc Client: Prevent error out on get_num_blocks_since_signature_confirmation (#9792) (#9800)
automerge
2020-04-29 16:25:25 -07:00
bef59c3bd7 Rpc: remove unwraps (#9793) (#9797)
automerge
2020-04-29 15:03:05 -07:00
307064949a Fix BPF tool caching (#9781) (#9795)
automerge
2020-04-29 13:35:58 -07:00
40cb8d857b Don't divide by zero 2020-04-29 11:04:08 -07:00
972381efff catchup now estimates the time remaining (#9782) (#9785)
automerge
2020-04-29 01:26:20 -07:00
a2098c9ea9 Update dalek (v1.1 bp) (#9765)
* Disable Move/Libra components

* Update dalek version

Co-authored-by: Trent Nelson <trent@solana.com>
2020-04-28 18:37:20 -06:00
5af9963ea9 Docs: Fix linkcheck errors (#9743)
automerge
2020-04-28 18:37:20 -06:00
69736a792c Remove commented code 2020-04-28 15:49:07 -06:00
59446d5c50 v1.1: backport commitment max changes (#9775)
* Add largest_confirmed_root to BlockCommitmentCache (#9640)

* Add largest_confirmed_root to BlockCommitmentCache

* clippy

* Add blockstore to BlockCommitmentCache to check root

* Add rooted_stake helper fn and test

* Nodes that are behind should correctly id confirmed roots

* Simplify rooted_stake collector

* Cache banks in BankForks until optional largest_confirmed_root (#9678)

automerge

* Rpc: Use cluster largest_confirmed_root as commitment max (#9750)

automerge
2020-04-28 15:04:41 -06:00
ac538d0395 Don't --use-move 2020-04-28 12:47:23 -07:00
e79c910c41 Reorder steps by relative priority for when there aren't enough agents 2020-04-28 12:46:06 -07:00
0a7ef32ec7 Disable move more 2020-04-28 12:39:16 -07:00
1f6a7c174a Report duration of last alarm in the All Clear message (#9766) (#9771)
automerge

(cherry picked from commit 6e42989309)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-04-28 12:30:54 -07:00
109bfc3e7a Use Blockstore lowest_slot to start root iterator (#9738) (#9768)
automerge
2020-04-28 11:36:17 -07:00
682b700ec8 Set HOME correctly (#9757) (#9762)
automerge
2020-04-28 03:07:16 -07:00
89bfe5fab0 sanitize lowest slots (#9747) (#9752)
automerge
2020-04-27 22:01:41 -07:00
fbcc107086 Clean up use to keep rust 1.43.0 from complaining (#9740) (#9749)
automerge
2020-04-27 18:49:58 -07:00
9c6f613f8c Input values are not sanitized after they are deserialized, making it far too easy for Leo to earn SOL (bp #9706) (#9736)
automerge
2020-04-27 16:23:59 -07:00
34f5f48e43 Fix broken doc link to anatomy of transaction (#9728) (#9730)
automerge
2020-04-27 01:28:00 -07:00
09cd6197c2 Update metrics dashboard 2020-04-26 09:53:38 -07:00
4a86a794ed Filter program ids to store (bp #9721) (#9724)
automerge
2020-04-26 01:49:15 -07:00
7cd1c06a50 validator: Add support for log rotation, sending SIGUSR1 will cause the log file to be re-opened (bp #9713) (#9716)
automerge
2020-04-24 16:22:03 -07:00
240433ef25 Add missing slash 2020-04-23 21:36:40 -07:00
0afb058616 Bump version to 1.1.8 2020-04-23 20:40:29 -07:00
f1cda98aeb Update testnet expected shred version 2020-04-23 20:37:17 -07:00
d5cbac41cb Fix vote listener passing bad transactions (#9694) (#9696)
automerge
2020-04-23 18:43:07 -07:00
f19209b23d Update solana-user-authorized_keys.sh 2020-04-23 16:32:05 -06:00
fd670d0ae0 Exit cleanly on panic! so the process don't limp along in a half-dead state (#9690) (#9693)
automerge
2020-04-23 13:52:34 -07:00
948d869c49 Update to rocksdb 0.14 and set max wal size (#9668) (#9688) 2020-04-23 10:42:31 -07:00
2bab4ace8b Remove stray 'v' (#9679) (#9681)
automerge
2020-04-23 01:04:26 -07:00
1ddff68642 Bump version to 1.1.7 2020-04-22 22:13:46 -07:00
c0b250285a Add custodian option to withdraw-stake command (bp #9662) (#9675)
* Merge stake::withdraw instructions (#9617)

* Add custodian option to withdraw-stake command (#9662)

automerge

Co-authored-by: Greg Fitzgerald <greg@solana.com>
2020-04-22 20:30:46 -07:00
4509579e10 Clean up wallet doc URLs and validator/TdS docs (#9676) 2020-04-22 20:30:27 -07:00
9b8d59cc2b Add docs for installing the beta Ledger app (#9641) (#9677)
automerge
2020-04-22 19:38:11 -07:00
bd91fc2985 Don't attempt to rebase or move empty accounts (#9651) (#9674)
automerge
2020-04-22 19:35:20 -07:00
9a3ebe0b20 Remove validator-info publish command from net scripts 2020-04-22 18:05:32 -06:00
6c08dc9c9d Add getLowestNonpurgedBlock rpc; use blockstore api in getConfirmedBlocks (#9656) (#9664)
automerge
2020-04-22 15:13:23 -07:00
740c1df045 Extend snapshot interval in multinode demo (#9657) (#9661)
automerge
2020-04-22 14:39:25 -07:00
0a5905a02c Add single region TPS report testcases (#9609) (#9659) 2020-04-22 12:43:53 -06:00
237eceb6c1 Relax setting withdraw authority during lockup (#9644) (#9646)
automerge
2020-04-21 22:50:18 -07:00
dabbdcf988 Push down cluster_info lock (bp #9594) (#9637)
automerge
2020-04-21 16:42:07 -07:00
3fceaf694c Flag test_tvu_exit as serial to hopefully reduce CI flakiness (#9509) (#9639)
automerge
2020-04-21 14:43:29 -07:00
34df5ad364 cli: Add transaction-history (bp #9614) (#9623)
automerge
2020-04-21 10:49:24 -07:00
573aed2b4b RPC: Allow single slot address history queries (#9630) (#9635)
(cherry picked from commit 3023691487)

Co-authored-by: Justin Starry <justin@solana.com>
2020-04-21 10:23:34 -07:00
6ef65d8513 Document potential null responses in RPC API docs (#9629) (#9634)
automerge
2020-04-21 10:11:55 -07:00
7c6fb3d554 Wait for supermajority of cluster to have rooted a transaction to consider it finalized (#9618) (#9627)
automerge
2020-04-21 00:59:37 -07:00
886eaac211 Handle outdated and current ledger-solana-apps (#9605) (#9612)
automerge
2020-04-20 16:14:06 -07:00
6ca69a1525 Add decode-transaction (#9608)
automerge
2020-04-20 14:20:09 -07:00
1cc2f67391 test (#9601)
automerge
2020-04-20 10:09:49 -07:00
5d547130f0 Calculate distance between u64 without overflow (#9592) (#9599)
automerge
2020-04-20 01:25:41 -07:00
facb209720 Error for invalid shred. (#9588) (#9597)
automerge
2020-04-19 22:50:08 -07:00
5e215ac854 log proper slot (#9576) (#9589)
automerge
2020-04-19 15:52:03 -07:00
d3dd9ec6e2 Fix local-cluster test - archiver should wait for itself + 1 validator (#9577) (#9585)
automerge
2020-04-19 01:44:46 -07:00
12ed7c6845 Bump version to 1.1.6 2020-04-18 23:37:34 -07:00
f9d68b5d86 Budget for gossip traffic (#9550)
(cherry picked from commit 65a9658b13)
2020-04-18 23:09:16 -07:00
8f9f11e37f Tame wallet manager better (#9567) (#9575)
automerge
2020-04-18 13:37:40 -07:00
6a5f67f78c validator: Consider the activated stake of this node to be online again (#9573) (#9574)
automerge
2020-04-18 11:54:42 -07:00
a505e92487 Remove wait_for_majority (#9572)
automerge
2020-04-18 09:54:34 -07:00
701300334a Reduce metrics log output
(cherry picked from commit f142451a33)
2020-04-18 08:40:01 -07:00
b9cf02fd6a Report offline/wrong-shred nodes while waiting for a super majority in gossip
(cherry picked from commit 8509dcb8a0)
2020-04-17 16:21:43 -07:00
71cb8de0dd Reduce ReceiveUpdates log spam
(cherry picked from commit 7b5cdf6adf)
2020-04-17 16:21:43 -07:00
13e5b479eb confirm --verbose now displays failed transactions (#9562)
automerge
2020-04-17 14:56:58 -07:00
2ad435587a Increase the number of JSON RPC service threads (#9551) (#9561)
automerge
2020-04-17 14:48:32 -07:00
8f54d409e2 Consider config in check_for_usb (#9555) (#9557)
automerge
2020-04-17 13:03:45 -07:00
b4345c039a Make rpc_subscriptions.rs tests serial (#9556)
automerge

(cherry picked from commit b58338b066)
2020-04-17 11:40:12 -07:00
e61545ad18 Make rpc tests serial (#9537) (#9553)
automerge
2020-04-16 23:34:26 -07:00
961d1f0ee5 Simplify EpochSlots update (#9545) (#9548)
automerge
2020-04-16 21:02:28 -07:00
b260f686a3 Only build x86_64-unknown-linux-gnu on docs.rs 2020-04-16 19:07:08 -07:00
3cfc38850b Don't upload tarballs to buildkite to speed up build 2020-04-16 13:54:59 -07:00
f12a933a54 Write wallet key to explicit file
(cherry picked from commit 93669ab1fc)
2020-04-16 13:41:33 -07:00
135763e019 Bump version to 1.1.5 2020-04-16 13:23:21 -07:00
aaec7de881 Fix broadcast metrics (#9461)
* Rework broadcast metrics to support multiple threads

* Update dashboards

Co-authored-by: Carl <carl@solana.com>
2020-04-16 13:02:02 -07:00
420ea2f143 Reduce cluster-info metrics. (#9465) 2020-04-16 13:02:02 -07:00
cb2dd56317 Passing -v/--verbose to solana confirm now displays the full transaction (#9530)
automerge
2020-04-16 10:01:42 -07:00
a420d1e91e Don't unwrap on session new
(cherry picked from commit 30b3862770)
2020-04-16 09:46:05 -07:00
0073448afc Default to RUST_BACKTRACE=1 for more informative validator logs
(cherry picked from commit 4ac15e68cf)
2020-04-15 22:46:45 -07:00
086cdd8ef7 Rpc: Speed up getBlockTime (#9510) (#9514)
automerge
2020-04-15 19:12:09 -07:00
dd57cbd6a4 Pacify shellcheck
(cherry picked from commit a7ed33b552)
2020-04-15 17:50:53 -07:00
8937a1db3b Always run shellcheck
(cherry picked from commit 9cc7265b05)
2020-04-15 17:50:53 -07:00
89a914f7c1 Use $rust_stable (#9516)
automerge
2020-04-15 17:22:02 -07:00
cf9936a314 RPC: Add health check URI (bp #9499) (#9505)
automerge
2020-04-15 11:33:20 -07:00
6f95524be3 Fix race in multi_bind_in_range (#9493)
(cherry picked from commit ee72714c08)
2020-04-14 17:52:30 -07:00
8021d368fe limit test jobs to 16 to prevent OOM (#9500)
(cherry picked from commit 2b2b2cac1f)
2020-04-14 17:51:57 -07:00
d7c43f0c0b Cli: enable json output (#9478) (#9495)
automerge
2020-04-14 14:22:23 -07:00
6765453f8a validator: Improve --dynamic-port-range and sys-tuner error messages (bp #9494) (#9496)
automerge
2020-04-14 13:54:29 -07:00
adb0824da5 Use same max_age regardless of leader/not-leader (#9423) (#9487)
automerge
2020-04-14 02:50:42 -07:00
f86dcec94b Print signature as part of progress spinner (#9484) (#9485)
automerge
2020-04-14 01:06:22 -07:00
8f28989520 Fail coverage faster in CI 2020-04-13 21:09:55 -07:00
1823d7bdec Assume json_rpc_url can be upgrade to a websocket if no port is supplied
(cherry picked from commit bcfadd6085)
2020-04-13 20:32:49 -07:00
892a3b6dc4 Rename UpdateNode to UpdateValidatorIdentity (#9475)
automerge
2020-04-13 19:09:42 -07:00
cc987b8884 Unfold coverage test failures
(cherry picked from commit d4ea1ec6ad)
2020-04-13 18:08:37 -07:00
32d616da1e Reorder CI jobs to allow for more concurrent PRs
(cherry picked from commit ce027da236)
2020-04-13 13:00:43 -07:00
6d62d0cd42 Improve address in use error message for RPC pubsub
(cherry picked from commit 37b048effb)
2020-04-13 12:33:17 -07:00
c7d6e2b4a5 Update buildkite-tests.yml
(cherry picked from commit 92a5a51632)
2020-04-13 11:01:16 -07:00
d6f1e4b10a Bump version to v1.1.4 2020-04-12 18:00:06 -07:00
73dad25d74 Sort the output of solana validators by active stake (#9459)
automerge

(cherry picked from commit 3f33f4d3a9)
2020-04-12 17:54:13 -07:00
a895ce51ee Fix flaky new_archiver_external_ip_test (#9457) (#9458)
automerge
2020-04-12 12:48:49 -07:00
3f95e7f055 accounts subcommand now prints account balances in SOL instead of lamports
(cherry picked from commit 3f1399cb0d)
2020-04-12 10:40:06 -07:00
a54042fc11 Don't subject authorizing a new stake authority to lockup (#9434)
(cherry picked from commit 31ebdbc77f)
2020-04-12 10:09:31 -07:00
68525a961f Remove slot field, add test (#9444) (#9449)
automerge
2020-04-11 15:33:12 -07:00
45093c8092 Calculate account refs fix (#9447) (#9450)
automerge
2020-04-11 13:48:11 -07:00
c3227ab671 Simplify vote simulation (#9435) (#9439)
automerge
2020-04-10 18:50:11 -07:00
967c178f5d Safer cargo command (#9437) (#9440)
automerge
2020-04-10 16:54:33 -07:00
310aa1a63f ReceiveUpdates spams the log, adjust the threshold higher (#9429) (#9430)
automerge
2020-04-10 11:24:00 -07:00
d5ae850169 Search for ports sequentially instead of at random for more predictable port selection (bp #9411) (#9419)
automerge
2020-04-09 21:43:29 -07:00
89f5153316 Rpc: Add getConfirmedSignaturesForAddress (#9407) (#9418)
automerge
2020-04-09 21:15:33 -07:00
677008b6cc Allow lower shred count (#9410) (#9412)
automerge
2020-04-09 18:20:28 -07:00
7936f34df8 Use consistent vote account filename (#9414) (#9415)
automerge
2020-04-09 18:00:31 -07:00
65f0187324 Remove dead code (#9404) (#9409)
automerge
2020-04-09 14:08:15 -07:00
8dc5d10f9c Rpc: Add getConfirmedTransaction (#9381) (#9393)
automerge
2020-04-09 09:44:24 -07:00
58d8c3ad70 Remove Trust Wallet Beta install instructions (#9396) (#9398)
automerge
2020-04-09 08:56:50 -07:00
7df45cf58a Fix partition setup (#9386) (#9394)
automerge
2020-04-09 02:55:26 -07:00
3379a8470d Add --no-wait arg to transfer (#9388) (#9391)
automerge
2020-04-09 00:05:06 -07:00
0969e87b08 Moar vm.max_map_count (#9385)
automerge
2020-04-08 19:14:15 -07:00
7a0dcdd1a4 Add Metrics/Dashboards tracking block production (#9342) (#9380)
automerge
2020-04-08 15:54:04 -07:00
34893d2449 Add blockstore address-to-signature index (#9367) (#9379)
automerge
2020-04-08 14:05:54 -07:00
ec8d1c5e2b Improve ledger-tool/accounts for easier debuging (#9370) (#9372)
automerge
2020-04-08 11:43:00 -07:00
e1dbed25b6 Default to mainnet-beta (#9326) (#9368)
automerge
2020-04-07 21:40:04 -07:00
3b08a2a116 Add 1 SOL grace, to allow for a complaint system account to fund a reasonable number of transactions. (#9359) (#9364)
automerge
2020-04-07 14:39:16 -07:00
7e42eca4b0 Cache solana-perf.tgz to speed up CI (#9360)
automerge

(cherry picked from commit dc91698b3a)
2020-04-07 13:31:57 -07:00
580304add4 Bump version to 1.1.3 2020-04-07 09:33:26 -07:00
b58ce6c740 Cache downloads to speed up CI (#9355)
automerge
2020-04-06 23:40:30 -07:00
0b27d0b363 Add support for monitoring system account balances (#9345)
automerge

(cherry picked from commit 03978ac5a5)
2020-04-06 22:58:10 -07:00
6ea74c3d29 RpcClient: include signature check in send_transaction, bump send retries in get_num_blocks_since_signature_confirmation (#9341) (#9346)
automerge
2020-04-06 20:24:27 -07:00
15631f8194 Fix docs (#9349) (#9350)
automerge
2020-04-06 20:22:11 -07:00
b87a1d2bc5 Optimize broadcast cluster_info critical section (#9327) (#9344)
automerge
2020-04-06 19:11:23 -07:00
eae98ad8ab Clean up paper/file system wallet docs (#9340) (#9347)
* Add filesystem wallet page

* Move validator paper wallet instructions to validator page

* Remove paper wallet staking section

* Add steps for multiple fs and paper wallets

* Add keypair convention page and better multi-wallet example
2020-04-06 19:33:59 -06:00
3a6c23e995 Make TestValidator mint_lamports configurable (#9337) (#9339)
automerge
2020-04-06 17:32:49 -07:00
2e3db6aba8 Add instructions for multiple trust wallet addresses (#9335) (#9336)
automerge
2020-04-06 15:04:57 -07:00
f1e635d088 Update choose cluster docs (#9328) (#9331)
automerge
2020-04-06 12:21:31 -07:00
cc07c86aab Reinstate commitment param to support old clients (#9324)
automerge
2020-04-06 11:10:06 -07:00
543b6016ea Remove write lock (#9311) (#9315)
automerge
2020-04-06 09:22:42 -07:00
f4e05909f7 Update getSignatureStatuses to return historical statuses (#9314) (#9322)
automerge
2020-04-06 04:54:12 -07:00
5da1466d08 Introduce background stale AppendVec shrink mechanism (#9219) (#9318)
automerge
2020-04-06 02:31:12 -07:00
7a8528793e Deprecate confirmTransaction, getSignatureStatus, and getSignatureConfirmation (#9298) (#9309)
automerge
2020-04-05 00:51:44 -07:00
4a0338c902 Rework TransactionStatus index in blockstore (#9281) (#9304)
automerge
2020-04-04 23:09:06 -07:00
11b4da4146 RPC: add err field to TransactionStatus, alongside the now deprecated status field (#9296) (#9303)
automerge
2020-04-04 21:58:44 -07:00
33c19130b5 Add log before opening database
(cherry picked from commit b557b3170e)
2020-04-03 15:02:16 -07:00
0c7689206c Advance if no blocks are available in the given range 2020-04-03 14:56:22 -07:00
756bc3b5bb vote-authorize-voter no longer fails if the current authorized voter is not the fee payer (#9288)
automerge
2020-04-03 13:57:15 -07:00
571b2eb807 Update set-solana-release-tag.sh 2020-04-03 11:21:54 -06:00
9819fe6684 Fix sed command for mac and linux (#9287) 2020-04-03 10:44:03 -06:00
ec7e44659d Minor doc fixup (#9285)
automerge
2020-04-03 09:32:57 -07:00
40d0f8da2d Bump version to 1.1.2 2020-04-02 22:35:56 -07:00
47ddb84078 cli: Add --follow option to catchup command to allow for easy ongoing monitoring between two nodes (bp #9260) (#9278)
automerge
2020-04-02 20:43:19 -07:00
4649378f95 ReplayStage fixes (#9271)
Co-authored-by: Carl <carl@solana.com>
2020-04-02 18:12:59 -07:00
3f6027055c Tame overeager wallet manager (#9262) (#9272)
automerge
2020-04-02 16:54:14 -07:00
d61a46476a Backport wallet doc changes to v1.1 (#9266)
* Add ledger live screenshots and reduce duplicate instructions (#9258)

automerge

* Add instructions for Trust Wallet Beta for Android (#9261)

automerge
2020-04-02 15:36:50 -06:00
c112f51f97 Add instructions for Trust Wallet Beta for Android (#9261) (#9265)
automerge
2020-04-02 12:25:46 -07:00
c1351d6b12 Set checks timeout to 20 minutes 2020-04-02 13:11:22 -06:00
c1acfe4843 Add epoch subcommand (#9249) (#9255)
automerge
2020-04-01 21:41:22 -07:00
68a4288078 Place AccountsHashes in same enum ordinal position as the v1.0 version (#9251) (#9253)
automerge

(cherry picked from commit 8b14eb9020)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-04-01 19:29:55 -07:00
c4c96e1460 Undo breaking rpc removal of getSignatureConfirmation (#9245) (#9250)
automerge
2020-04-01 17:57:41 -07:00
32ab57fa83 Do not trigger tests if only docs were modified (#9240) (#9242)
automerge
2020-04-01 14:30:36 -07:00
a33e8cc164 Do not trigger tests if only docs were modified (#9240) (#9243) 2020-04-01 14:46:38 -06:00
c8b4f616b0 Undo getSignatureStatus breaking change, add getSignatureStatuses (#9232)
automerge
2020-04-01 11:53:55 -07:00
380c3b0080 Add fee-payer option to docs (#9230) (#9237)
automerge
2020-04-01 11:29:39 -07:00
2d6847c27b Add a support page for wallet docs (#9229) (#9235)
automerge
2020-04-01 11:26:36 -07:00
d5b9899ac9 Clean up solana-stake-accounts (#9211) (#9213)
automerge
2020-04-01 10:37:27 -07:00
9817cd769a Fix solana-stake-accounts rebase/move (#9199) (#9210)
automerge
2020-04-01 09:11:54 -07:00
ec3d2fdbdc Fix repair dos (#9056) (#9221)
automerge
2020-04-01 07:47:15 -07:00
1f794fb1da Tune udp buffers and vmmap immediately (#9194) (#9217)
automerge
2020-04-01 01:19:19 -07:00
89e1d7300d Fix error with account hash list getting too big for gossip (#9197) (#9215)
automerge
2020-03-31 23:44:58 -07:00
d239550e68 Fix panic (#9195) (#9209)
automerge
2020-03-31 21:35:12 -07:00
3dc336e1f1 Remove unecessary exception and add a new one (#9200) (#9206)
(cherry picked from commit 62e12e3af5)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-03-31 19:38:37 -07:00
220a369efa Enforce an executable's rent exemption in the runtime (#9134) (#9191)
(cherry picked from commit 130c0b484d)

Co-authored-by: Jack May <jack@solana.com>
2020-03-31 11:57:19 -07:00
b079564a13 Add more Ledger wallet documentation (#9182) (#9190)
automerge
2020-03-31 10:08:02 -07:00
e8935aa99e Fix links (#9184) (#9188)
automerge
2020-03-31 09:57:43 -07:00
016a342de0 solana-validator now supports multiple --authorized-voter arguments (#9174) (#9181)
automerge
2020-03-31 09:21:47 -07:00
47c6dfe1aa Bump version to v1.1.1 2020-03-30 23:15:07 -07:00
c66d528e85 Check ClusterSlots for confirmation of block propagation (#9115) (#9178)
(cherry picked from commit 66946a4680)

Co-authored-by: carllin <wumu727@gmail.com>
2020-03-30 23:09:00 -07:00
8ba8deb933 Ledger cleanup fixes (#9131) (#9176)
automerge
2020-03-30 20:41:48 -07:00
587342d5e3 Install solana-stake-accounts (#9169) (#9173)
automerge
2020-03-30 19:53:39 -07:00
f31d2d9cc4 Use cluster confirmations in rpc and pubsub (#9138) (#9170)
automerge
2020-03-30 18:11:45 -07:00
bc761c2c02 Add solana-stake-accounts CLI tool (bp #9164) (#9168)
automerge
2020-03-30 17:25:07 -07:00
6f4bc3aaff Store BlockCommitmentCache slot and root metadata (#9154) (#9162)
automerge
2020-03-30 11:40:11 -07:00
070664ff94 Make repair metrics less chatty (#9094) (#9156)
automerge
2020-03-29 16:18:48 -07:00
61c2883de6 Calculate ref counts earlier to prevent bad clean (#9147) (#9155)
automerge
2020-03-29 15:53:56 -07:00
e32f7dbe49 catchup now retries when the desired node is not yet online (#9148) (#9152)
automerge
2020-03-29 10:39:56 -07:00
c0b178db45 Sanitize zero lamport accounts in append vecs (#9083) (#9149)
automerge
2020-03-29 00:39:28 -07:00
1027b0681b Fix race in RPC subscriptions test (#9142) (#9145)
automerge
2020-03-28 12:00:20 -07:00
347 changed files with 21189 additions and 14655 deletions

View File

@ -3,3 +3,16 @@ root: ./docs/src
structure:
readme: introduction.md
summary: SUMMARY.md
redirects:
wallet: ./wallet-guide/README.md
wallet/app-wallets: ./wallet-guide/apps.md
wallet/app-wallets/trust-wallet: ./wallet-guide/trust-wallet.md
wallet/app-wallets/ledger-live: ./wallet-guide/ledger-live.md
wallet/cli-wallets: ./wallet-guide/cli.md
wallet/cli-wallets/paper-wallet: ./paper-wallet/README.md
wallet/cli-wallets/paper-wallet/paper-wallet-usage: ./paper-wallet/paper-wallet-usage.md
wallet/cli-wallets/remote-wallet: ./hardware-wallets/README.md
wallet/cli-wallets/remote-wallet/ledger: ./hardware-wallets/ledger.md
wallet/cli-wallets/file-system-wallet: ./file-system-wallet/README.md
wallet/support: ./wallet-guide/support.md

6576
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -52,6 +52,7 @@ members = [
"sdk",
"sdk-c",
"scripts",
"stake-accounts",
"stake-monitor",
"sys-tuner",
"transaction-status",

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-accounts-bench"
version = "1.1.0"
version = "1.1.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -10,10 +10,13 @@ homepage = "https://solana.com/"
[dependencies]
log = "0.4.6"
rayon = "1.3.0"
solana-logger = { path = "../logger", version = "1.1.0" }
solana-runtime = { path = "../runtime", version = "1.1.0" }
solana-measure = { path = "../measure", version = "1.1.0" }
solana-sdk = { path = "../sdk", version = "1.1.0" }
rand = "0.6.5"
solana-logger = { path = "../logger", version = "1.1.8" }
solana-runtime = { path = "../runtime", version = "1.1.8" }
solana-measure = { path = "../measure", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.8" }
rand = "0.7.0"
clap = "2.33.0"
crossbeam-channel = "0.4"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -1,6 +1,6 @@
[package]
name = "solana-archiver-lib"
version = "1.1.0"
version = "1.1.8"
description = "Solana Archiver Library"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -11,30 +11,33 @@ edition = "2018"
[dependencies]
bincode = "1.2.1"
crossbeam-channel = "0.4"
ed25519-dalek = "=1.0.0-pre.1"
ed25519-dalek = "=1.0.0-pre.3"
log = "0.4.8"
rand = "0.6.5"
rand_chacha = "0.1.1"
solana-client = { path = "../client", version = "1.1.0" }
solana-storage-program = { path = "../programs/storage", version = "1.1.0" }
rand = "0.7.0"
rand_chacha = "0.2.2"
solana-client = { path = "../client", version = "1.1.8" }
solana-storage-program = { path = "../programs/storage", version = "1.1.8" }
thiserror = "1.0"
serde = "1.0.105"
serde_json = "1.0.48"
serde_derive = "1.0.103"
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
solana-chacha = { path = "../chacha", version = "1.1.0" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.0" }
solana-ledger = { path = "../ledger", version = "1.1.0" }
solana-logger = { path = "../logger", version = "1.1.0" }
solana-perf = { path = "../perf", version = "1.1.0" }
solana-sdk = { path = "../sdk", version = "1.1.0" }
solana-core = { path = "../core", version = "1.1.0" }
solana-streamer = { path = "../streamer", version = "1.1.0" }
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.0" }
solana-metrics = { path = "../metrics", version = "1.1.0" }
solana-net-utils = { path = "../net-utils", version = "1.1.8" }
solana-chacha = { path = "../chacha", version = "1.1.8" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.8" }
solana-ledger = { path = "../ledger", version = "1.1.8" }
solana-logger = { path = "../logger", version = "1.1.8" }
solana-perf = { path = "../perf", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.8" }
solana-core = { path = "../core", version = "1.1.8" }
solana-streamer = { path = "../streamer", version = "1.1.8" }
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.8" }
solana-metrics = { path = "../metrics", version = "1.1.8" }
[dev-dependencies]
hex = "0.4.2"
[lib]
name = "solana_archiver_lib"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -1,7 +1,7 @@
use crate::result::ArchiverError;
use crossbeam_channel::unbounded;
use rand::{thread_rng, Rng, SeedableRng};
use rand_chacha::ChaChaRng;
use rand::{thread_rng, Rng};
use rand_chacha::{rand_core::SeedableRng, ChaChaRng};
use solana_archiver_utils::sample_file;
use solana_chacha::chacha::{chacha_cbc_encrypt_ledger, CHACHA_BLOCK_SIZE};
use solana_client::{
@ -14,7 +14,7 @@ use solana_core::{
contact_info::ContactInfo,
gossip_service::GossipService,
repair_service,
repair_service::{RepairService, RepairSlotRange, RepairStrategy},
repair_service::{RepairService, RepairSlotRange, RepairStats, RepairStrategy},
serve_repair::ServeRepair,
shred_fetch_stage::ShredFetchStage,
sigverify_stage::{DisabledSigVerifier, SigVerifyStage},
@ -53,7 +53,7 @@ use std::{
result,
sync::atomic::{AtomicBool, Ordering},
sync::mpsc::{channel, Receiver, Sender},
sync::{Arc, RwLock},
sync::Arc,
thread::{sleep, spawn, JoinHandle},
time::Duration,
};
@ -185,9 +185,9 @@ impl Archiver {
info!("Archiver: id: {}", keypair.pubkey());
info!("Creating cluster info....");
let mut cluster_info = ClusterInfo::new(node.info.clone(), keypair.clone());
let cluster_info = ClusterInfo::new(node.info.clone(), keypair.clone());
cluster_info.set_entrypoint(cluster_entrypoint.clone());
let cluster_info = Arc::new(RwLock::new(cluster_info));
let cluster_info = Arc::new(cluster_info);
let cluster_slots = Arc::new(ClusterSlots::default());
// Note for now, this ledger will not contain any of the existing entries
// in the ledger located at ledger_path, and will only append on newly received
@ -200,7 +200,7 @@ impl Archiver {
info!("Connecting to the cluster via {:?}", cluster_entrypoint);
let (nodes, _) =
match solana_core::gossip_service::discover_cluster(&cluster_entrypoint.gossip, 1) {
match solana_core::gossip_service::discover_cluster(&cluster_entrypoint.gossip, 2) {
Ok(nodes_and_archivers) => nodes_and_archivers,
Err(e) => {
//shutdown services before exiting
@ -308,7 +308,7 @@ impl Archiver {
fn run(
meta: &mut ArchiverMeta,
blockstore: &Arc<Blockstore>,
cluster_info: Arc<RwLock<ClusterInfo>>,
cluster_info: Arc<ClusterInfo>,
archiver_keypair: &Arc<Keypair>,
storage_keypair: &Arc<Keypair>,
exit: &Arc<AtomicBool>,
@ -365,12 +365,12 @@ impl Archiver {
}
fn redeem_rewards(
cluster_info: &Arc<RwLock<ClusterInfo>>,
cluster_info: &ClusterInfo,
archiver_keypair: &Arc<Keypair>,
storage_keypair: &Arc<Keypair>,
client_commitment: CommitmentConfig,
) {
let nodes = cluster_info.read().unwrap().tvu_peers();
let nodes = cluster_info.tvu_peers();
let client = solana_core::gossip_service::get_client(&nodes);
if let Ok(Some(account)) =
@ -405,7 +405,7 @@ impl Archiver {
#[allow(clippy::too_many_arguments)]
fn setup(
meta: &mut ArchiverMeta,
cluster_info: Arc<RwLock<ClusterInfo>>,
cluster_info: Arc<ClusterInfo>,
blockstore: &Arc<Blockstore>,
exit: &Arc<AtomicBool>,
node_info: &ContactInfo,
@ -491,7 +491,7 @@ impl Archiver {
blockstore: &Arc<Blockstore>,
exit: &Arc<AtomicBool>,
node_info: &ContactInfo,
cluster_info: Arc<RwLock<ClusterInfo>>,
cluster_info: Arc<ClusterInfo>,
) {
info!(
"window created, waiting for ledger download starting at slot {:?}",
@ -519,11 +519,8 @@ impl Archiver {
contact_info.tvu = "0.0.0.0:0".parse().unwrap();
contact_info.wallclock = timestamp();
// copy over the adopted shred_version from the entrypoint
contact_info.shred_version = cluster_info.read().unwrap().my_data().shred_version;
{
let mut cluster_info_w = cluster_info.write().unwrap();
cluster_info_w.insert_self(contact_info);
}
contact_info.shred_version = cluster_info.my_shred_version();
cluster_info.update_contact_info(|current| *current = contact_info);
}
fn encrypt_ledger(meta: &mut ArchiverMeta, blockstore: &Arc<Blockstore>) -> Result<()> {
@ -626,12 +623,12 @@ impl Archiver {
fn submit_mining_proof(
meta: &ArchiverMeta,
cluster_info: &Arc<RwLock<ClusterInfo>>,
cluster_info: &ClusterInfo,
archiver_keypair: &Arc<Keypair>,
storage_keypair: &Arc<Keypair>,
) {
// No point if we've got no storage account...
let nodes = cluster_info.read().unwrap().tvu_peers();
let nodes = cluster_info.tvu_peers();
let client = solana_core::gossip_service::get_client(&nodes);
let storage_balance = client
.poll_get_balance_with_commitment(&storage_keypair.pubkey(), meta.client_commitment);
@ -689,13 +686,10 @@ impl Archiver {
}
fn get_segment_config(
cluster_info: &Arc<RwLock<ClusterInfo>>,
cluster_info: &ClusterInfo,
client_commitment: CommitmentConfig,
) -> Result<u64> {
let rpc_peers = {
let cluster_info = cluster_info.read().unwrap();
cluster_info.all_rpc_peers()
};
let rpc_peers = cluster_info.all_rpc_peers();
debug!("rpc peers: {:?}", rpc_peers);
if !rpc_peers.is_empty() {
let rpc_client = {
@ -721,7 +715,7 @@ impl Archiver {
/// Waits until the first segment is ready, and returns the current segment
fn poll_for_segment(
cluster_info: &Arc<RwLock<ClusterInfo>>,
cluster_info: &ClusterInfo,
slots_per_segment: u64,
previous_blockhash: &Hash,
exit: &Arc<AtomicBool>,
@ -741,17 +735,14 @@ impl Archiver {
/// Poll for a different blockhash and associated max_slot than `previous_blockhash`
fn poll_for_blockhash_and_slot(
cluster_info: &Arc<RwLock<ClusterInfo>>,
cluster_info: &ClusterInfo,
slots_per_segment: u64,
previous_blockhash: &Hash,
exit: &Arc<AtomicBool>,
) -> Result<(Hash, u64)> {
info!("waiting for the next turn...");
loop {
let rpc_peers = {
let cluster_info = cluster_info.read().unwrap();
cluster_info.all_rpc_peers()
};
let rpc_peers = cluster_info.all_rpc_peers();
debug!("rpc peers: {:?}", rpc_peers);
if !rpc_peers.is_empty() {
let rpc_client = {
@ -844,13 +835,14 @@ impl Archiver {
repair_service::MAX_REPAIR_LENGTH,
&repair_slot_range,
);
let mut repair_stats = RepairStats::default();
//iter over the repairs and send them
if let Ok(repairs) = repairs {
let reqs: Vec<_> = repairs
.into_iter()
.filter_map(|repair_request| {
serve_repair
.map_repair_request(&repair_request)
.map_repair_request(&repair_request, &mut repair_stats)
.map(|result| ((archiver_info.gossip, result), repair_request))
.ok()
})

View File

@ -1,4 +1,3 @@
use serde_json;
use solana_client::client_error;
use solana_ledger::blockstore;
use solana_sdk::transport;

View File

@ -1,6 +1,6 @@
[package]
name = "solana-archiver-utils"
version = "1.1.0"
version = "1.1.8"
description = "Solana Archiver Utils"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -10,16 +10,19 @@ edition = "2018"
[dependencies]
log = "0.4.8"
rand = "0.6.5"
solana-chacha = { path = "../chacha", version = "1.1.0" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.0" }
solana-ledger = { path = "../ledger", version = "1.1.0" }
solana-logger = { path = "../logger", version = "1.1.0" }
solana-perf = { path = "../perf", version = "1.1.0" }
solana-sdk = { path = "../sdk", version = "1.1.0" }
rand = "0.7.0"
solana-chacha = { path = "../chacha", version = "1.1.8" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.8" }
solana-ledger = { path = "../ledger", version = "1.1.8" }
solana-logger = { path = "../logger", version = "1.1.8" }
solana-perf = { path = "../perf", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.8" }
[dev-dependencies]
hex = "0.4.2"
[lib]
name = "solana_archiver_utils"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-archiver"
version = "1.1.0"
version = "1.1.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -10,11 +10,14 @@ homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
console = "0.10.0"
solana-clap-utils = { path = "../clap-utils", version = "1.1.0" }
solana-core = { path = "../core", version = "1.1.0" }
solana-logger = { path = "../logger", version = "1.1.0" }
solana-metrics = { path = "../metrics", version = "1.1.0" }
solana-archiver-lib = { path = "../archiver-lib", version = "1.1.0" }
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
solana-sdk = { path = "../sdk", version = "1.1.0" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.8" }
solana-core = { path = "../core", version = "1.1.8" }
solana-logger = { path = "../logger", version = "1.1.8" }
solana-metrics = { path = "../metrics", version = "1.1.8" }
solana-archiver-lib = { path = "../archiver-lib", version = "1.1.8" }
solana-net-utils = { path = "../net-utils", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.8" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-banking-bench"
version = "1.1.0"
version = "1.1.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -10,13 +10,16 @@ homepage = "https://solana.com/"
[dependencies]
log = "0.4.6"
rayon = "1.3.0"
solana-core = { path = "../core", version = "1.1.0" }
solana-streamer = { path = "../streamer", version = "1.1.0" }
solana-perf = { path = "../perf", version = "1.1.0" }
solana-ledger = { path = "../ledger", version = "1.1.0" }
solana-logger = { path = "../logger", version = "1.1.0" }
solana-runtime = { path = "../runtime", version = "1.1.0" }
solana-measure = { path = "../measure", version = "1.1.0" }
solana-sdk = { path = "../sdk", version = "1.1.0" }
rand = "0.6.5"
solana-core = { path = "../core", version = "1.1.8" }
solana-streamer = { path = "../streamer", version = "1.1.8" }
solana-perf = { path = "../perf", version = "1.1.8" }
solana-ledger = { path = "../ledger", version = "1.1.8" }
solana-logger = { path = "../logger", version = "1.1.8" }
solana-runtime = { path = "../runtime", version = "1.1.8" }
solana-measure = { path = "../measure", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.8" }
rand = "0.7.0"
crossbeam-channel = "0.4"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -28,7 +28,7 @@ use solana_sdk::{
transaction::Transaction,
};
use std::{
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex, RwLock},
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex},
thread::sleep,
time::{Duration, Instant},
};
@ -152,7 +152,7 @@ fn main() {
let (exit, poh_recorder, poh_service, signal_receiver) =
create_test_recorder(&bank, &blockstore, None);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let cluster_info = Arc::new(cluster_info);
let banking_stage = BankingStage::new(
&cluster_info,
&poh_recorder,
@ -253,7 +253,7 @@ fn main() {
poh_recorder.lock().unwrap().set_bank(&bank);
assert!(poh_recorder.lock().unwrap().bank().is_some());
if bank.slot() > 32 {
bank_forks.set_root(root, &None);
bank_forks.set_root(root, &None, None);
root += 1;
}
debug!(

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-exchange"
version = "1.1.0"
version = "1.1.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -14,21 +14,24 @@ itertools = "0.9.0"
log = "0.4.8"
num-derive = "0.3"
num-traits = "0.2"
rand = "0.6.5"
rand = "0.7.0"
rayon = "1.3.0"
serde_json = "1.0.48"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "1.1.0" }
solana-core = { path = "../core", version = "1.1.0" }
solana-genesis = { path = "../genesis", version = "1.1.0" }
solana-client = { path = "../client", version = "1.1.0" }
solana-faucet = { path = "../faucet", version = "1.1.0" }
solana-exchange-program = { path = "../programs/exchange", version = "1.1.0" }
solana-logger = { path = "../logger", version = "1.1.0" }
solana-metrics = { path = "../metrics", version = "1.1.0" }
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
solana-runtime = { path = "../runtime", version = "1.1.0" }
solana-sdk = { path = "../sdk", version = "1.1.0" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.8" }
solana-core = { path = "../core", version = "1.1.8" }
solana-genesis = { path = "../genesis", version = "1.1.8" }
solana-client = { path = "../client", version = "1.1.8" }
solana-faucet = { path = "../faucet", version = "1.1.8" }
solana-exchange-program = { path = "../programs/exchange", version = "1.1.8" }
solana-logger = { path = "../logger", version = "1.1.8" }
solana-metrics = { path = "../metrics", version = "1.1.8" }
solana-net-utils = { path = "../net-utils", version = "1.1.8" }
solana-runtime = { path = "../runtime", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.8" }
[dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "1.1.0" }
solana-local-cluster = { path = "../local-cluster", version = "1.1.8" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -86,7 +86,7 @@ fn test_exchange_bank_client() {
solana_logger::setup();
let (genesis_config, identity) = create_genesis_config(100_000_000_000_000);
let mut bank = Bank::new(&genesis_config);
bank.add_instruction_processor(id(), process_instruction);
bank.add_static_program("exchange_program", id(), process_instruction);
let clients = vec![BankClient::new(bank)];
let mut config = Config::default();

View File

@ -2,14 +2,17 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-streamer"
version = "1.1.0"
version = "1.1.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
solana-clap-utils = { path = "../clap-utils", version = "1.1.0" }
solana-streamer = { path = "../streamer", version = "1.1.0" }
solana-logger = { path = "../logger", version = "1.1.0" }
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.8" }
solana-streamer = { path = "../streamer", version = "1.1.8" }
solana-logger = { path = "../logger", version = "1.1.8" }
solana-net-utils = { path = "../net-utils", version = "1.1.8" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-tps"
version = "1.1.0"
version = "1.1.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -14,24 +14,27 @@ log = "0.4.8"
rayon = "1.3.0"
serde_json = "1.0.48"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "1.1.0" }
solana-core = { path = "../core", version = "1.1.0" }
solana-genesis = { path = "../genesis", version = "1.1.0" }
solana-client = { path = "../client", version = "1.1.0" }
solana-faucet = { path = "../faucet", version = "1.1.0" }
solana-librapay = { path = "../programs/librapay", version = "1.1.0", optional = true }
solana-logger = { path = "../logger", version = "1.1.0" }
solana-metrics = { path = "../metrics", version = "1.1.0" }
solana-measure = { path = "../measure", version = "1.1.0" }
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
solana-runtime = { path = "../runtime", version = "1.1.0" }
solana-sdk = { path = "../sdk", version = "1.1.0" }
solana-move-loader-program = { path = "../programs/move_loader", version = "1.1.0", optional = true }
solana-clap-utils = { path = "../clap-utils", version = "1.1.8" }
solana-core = { path = "../core", version = "1.1.8" }
solana-genesis = { path = "../genesis", version = "1.1.8" }
solana-client = { path = "../client", version = "1.1.8" }
solana-faucet = { path = "../faucet", version = "1.1.8" }
#solana-librapay = { path = "../programs/librapay", version = "1.1.8", optional = true }
solana-logger = { path = "../logger", version = "1.1.8" }
solana-metrics = { path = "../metrics", version = "1.1.8" }
solana-measure = { path = "../measure", version = "1.1.8" }
solana-net-utils = { path = "../net-utils", version = "1.1.8" }
solana-runtime = { path = "../runtime", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.8" }
#solana-move-loader-program = { path = "../programs/move_loader", version = "1.1.8", optional = true }
[dev-dependencies]
serial_test = "0.4.0"
serial_test_derive = "0.4.0"
solana-local-cluster = { path = "../local-cluster", version = "1.1.0" }
solana-local-cluster = { path = "../local-cluster", version = "1.1.8" }
[features]
move = ["solana-librapay", "solana-move-loader-program"]
#[features]
#move = ["solana-librapay", "solana-move-loader-program"]
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -1,6 +1,6 @@
[package]
name = "solana-chacha-cuda"
version = "1.1.0"
version = "1.1.8"
description = "Solana Chacha Cuda APIs"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -10,15 +10,18 @@ edition = "2018"
[dependencies]
log = "0.4.8"
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.0" }
solana-chacha = { path = "../chacha", version = "1.1.0" }
solana-ledger = { path = "../ledger", version = "1.1.0" }
solana-logger = { path = "../logger", version = "1.1.0" }
solana-perf = { path = "../perf", version = "1.1.0" }
solana-sdk = { path = "../sdk", version = "1.1.0" }
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.8" }
solana-chacha = { path = "../chacha", version = "1.1.8" }
solana-ledger = { path = "../ledger", version = "1.1.8" }
solana-logger = { path = "../logger", version = "1.1.8" }
solana-perf = { path = "../perf", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.8" }
[dev-dependencies]
hex-literal = "0.2.1"
[lib]
name = "solana_chacha_cuda"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -1,6 +1,6 @@
[package]
name = "solana-chacha-sys"
version = "1.1.0"
version = "1.1.8"
description = "Solana chacha-sys"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -10,3 +10,6 @@ edition = "2018"
[build-dependencies]
cc = "1.0.49"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -1,6 +1,6 @@
[package]
name = "solana-chacha"
version = "1.1.0"
version = "1.1.8"
description = "Solana Chacha APIs"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -10,16 +10,19 @@ edition = "2018"
[dependencies]
log = "0.4.8"
rand = "0.6.5"
rand_chacha = "0.1.1"
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.0" }
solana-ledger = { path = "../ledger", version = "1.1.0" }
solana-logger = { path = "../logger", version = "1.1.0" }
solana-perf = { path = "../perf", version = "1.1.0" }
solana-sdk = { path = "../sdk", version = "1.1.0" }
rand = "0.7.0"
rand_chacha = "0.2.2"
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.8" }
solana-ledger = { path = "../ledger", version = "1.1.8" }
solana-logger = { path = "../logger", version = "1.1.8" }
solana-perf = { path = "../perf", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.8" }
[dev-dependencies]
hex-literal = "0.2.1"
[lib]
name = "solana_chacha"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -2,6 +2,16 @@
# Build steps that run after the primary pipeline on pushes and tags.
# Pull requests to not run these steps.
steps:
- command: "ci/publish-tarball.sh"
timeout_in_minutes: 60
name: "publish tarball"
- command: "ci/publish-docs.sh"
timeout_in_minutes: 15
name: "publish docs"
- command: "ci/publish-bpf-sdk.sh"
timeout_in_minutes: 5
name: "publish bpf sdk"
- wait
- command: "sdk/docker-solana/build.sh"
timeout_in_minutes: 60
name: "publish docker"
@ -9,12 +19,6 @@ steps:
timeout_in_minutes: 240
name: "publish crate"
branches: "!master"
- command: "ci/publish-bpf-sdk.sh"
timeout_in_minutes: 5
name: "publish bpf sdk"
- command: "ci/publish-tarball.sh"
timeout_in_minutes: 60
name: "publish tarball"
- command: "ci/publish-docs.sh"
timeout_in_minutes: 15
name: "publish docs"
# - command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-move.sh"
# name: "move"
# timeout_in_minutes: 20

26
ci/buildkite-tests.yml Normal file
View File

@ -0,0 +1,26 @@
# These steps are conditionally triggered by ci/buildkite.yml when files
# other than those in docs/ are modified
steps:
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
name: "coverage"
timeout_in_minutes: 30
- wait
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable.sh"
name: "stable"
timeout_in_minutes: 60
artifact_paths: "log-*.txt"
- wait
- command: "ci/test-stable-perf.sh"
name: "stable-perf"
timeout_in_minutes: 40
artifact_paths: "log-*.txt"
agents:
- "queue=cuda"
- command: "ci/test-bench.sh"
name: "bench"
timeout_in_minutes: 30
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-local-cluster.sh"
name: "local-cluster"
timeout_in_minutes: 45
artifact_paths: "log-*.txt"

View File

@ -1,42 +1,25 @@
# Build steps that run on pushes and pull requests.
# If files other than those in docs/ were modified, this will be followed up by
# ci/buildkite-tests.yml
#
# Release tags use buildkite-release.yml instead
steps:
- command: "ci/shellcheck.sh"
name: "shellcheck"
timeout_in_minutes: 5
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-checks.sh"
name: "checks"
timeout_in_minutes: 20
- command: "ci/shellcheck.sh"
name: "shellcheck"
timeout_in_minutes: 5
- wait
- command: "ci/test-stable-perf.sh"
name: "stable-perf"
timeout_in_minutes: 40
artifact_paths: "log-*.txt"
agents:
- "queue=cuda"
- command: "ci/test-bench.sh"
name: "bench"
timeout_in_minutes: 30
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable.sh"
name: "stable"
timeout_in_minutes: 60
artifact_paths: "log-*.txt"
agents:
- "queue=rpc-test-capable"
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-move.sh"
name: "move"
timeout_in_minutes: 20
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-local-cluster.sh"
name: "local-cluster"
timeout_in_minutes: 45
artifact_paths: "log-*.txt"
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
name: "coverage"
timeout_in_minutes: 30
agents:
- "queue=rpc-test-capable"
- command: "ci/maybe-trigger-tests.sh"
name: "maybe-trigger-tests"
timeout_in_minutes: 2
- wait
- trigger: "solana-secondary"
branches: "!pull/*"
async: true

View File

@ -49,7 +49,7 @@ else
# ~/.cargo
ARGS+=(--volume "$PWD:/home")
fi
ARGS+=(--env "CARGO_HOME=/home/.cargo")
ARGS+=(--env "HOME=/home" --env "CARGO_HOME=/home/.cargo")
# kcov tries to set the personality of the binary which docker
# doesn't allow by default.

View File

@ -1,4 +1,4 @@
FROM solanalabs/rust:1.42.0
FROM solanalabs/rust:1.43.0
ARG date
RUN set -x \

View File

@ -1,6 +1,6 @@
# Note: when the rust version is changed also modify
# ci/rust-version.sh to pick up the new image tag
FROM rust:1.42.0
FROM rust:1.43.0
# Add Google Protocol Buffers for Libra's metrics library.
ENV PROTOC_VERSION 3.8.0

21
ci/maybe-trigger-tests.sh Executable file
View File

@ -0,0 +1,21 @@
#!/usr/bin/env bash
set -e
cd "$(dirname "$0")/.."
annotate() {
${BUILDKITE:-false} && {
buildkite-agent annotate "$@"
}
}
# Skip if only the docs have been modified
ci/affects-files.sh \
\!^docs/ \
|| {
annotate --style info \
"Skipping all further tests as only docs/ files were modified"
exit 0
}
annotate --style info "Triggering tests"
buildkite-agent pipeline upload ci/buildkite-tests.yml

View File

@ -71,7 +71,7 @@ echo --- Creating release tarball
export CHANNEL
source ci/rust-version.sh stable
scripts/cargo-install-all.sh +"$rust_stable" --use-move solana-release
scripts/cargo-install-all.sh +"$rust_stable" solana-release
tar cvf solana-release-$TARGET.tar solana-release
bzip2 solana-release-$TARGET.tar
@ -95,9 +95,8 @@ fi
source ci/upload-ci-artifact.sh
for file in solana-release-$TARGET.tar.bz2 solana-release-$TARGET.yml solana-install-init-"$TARGET"* $MAYBE_TARBALLS; do
upload-ci-artifact "$file"
if [[ -n $DO_NOT_PUBLISH_TAR ]]; then
upload-ci-artifact "$file"
echo "Skipped $file due to DO_NOT_PUBLISH_TAR"
continue
fi

View File

@ -1,28 +1,30 @@
#
# This file maintains the rust versions for use by CI.
#
# Build with stable rust, updating the stable toolchain if necessary:
# $ source ci/rust-version.sh stable
# $ cargo +"$rust_stable" build
#
# Build with nightly rust, updating the nightly toolchain if necessary:
# $ source ci/rust-version.sh nightly
# $ cargo +"$rust_nightly" build
#
# Obtain the environment variables without any automatic toolchain updating:
# $ source ci/rust-version.sh
#
# Obtain the environment variables updating both stable and nightly, only stable, or
# only nightly:
# $ source ci/rust-version.sh all
# $ source ci/rust-version.sh stable
# $ source ci/rust-version.sh nightly
# Then to build with either stable or nightly:
# $ cargo +"$rust_stable" build
# $ cargo +"$rust_nightly" build
#
if [[ -n $RUST_STABLE_VERSION ]]; then
stable_version="$RUST_STABLE_VERSION"
else
stable_version=1.42.0
stable_version=1.43.0
fi
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
nightly_version="$RUST_NIGHTLY_VERSION"
else
nightly_version=2020-03-12
nightly_version=2020-04-23
fi
@ -51,6 +53,10 @@ export rust_nightly_docker_image=solanalabs/rust-nightly:"$nightly_version"
nightly)
rustup_install "$rust_nightly"
;;
all)
rustup_install "$rust_stable"
rustup_install "$rust_nightly"
;;
*)
echo "Note: ignoring unknown argument: $1"
;;

View File

@ -25,7 +25,7 @@ source ci/_
source ci/upload-ci-artifact.sh
eval "$(ci/channel-info.sh)"
source ci/rust-version.sh nightly
source ci/rust-version.sh all
set -o pipefail
export RUST_BACKTRACE=1

View File

@ -22,7 +22,7 @@ _ cargo +"$rust_stable" clippy --all --exclude solana-sdk-c -- --deny=warnings
_ cargo +"$rust_stable" clippy --manifest-path sdk-c/Cargo.toml -- --deny=warnings
_ cargo +"$rust_stable" audit --version
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2020-0002 --ignore RUSTSEC-2020-0006
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2020-0002 --ignore RUSTSEC-2020-0008
_ ci/nits.sh
_ ci/order-crates-for-publishing.py
_ docs/build.sh

View File

@ -38,11 +38,16 @@ test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
# Clear the BPF sysroot files, they are not automatically rebuilt
rm -rf target/xargo # Issue #3105
# Limit compiler jobs to reduce memory usage
# on machines with 1gb/thread of memory
NPROC=$(nproc)
NPROC=$((NPROC>16 ? 16 : NPROC))
echo "Executing $testName"
case $testName in
test-stable)
_ cargo +"$rust_stable" test --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
_ cargo +"$rust_stable" test --manifest-path bench-tps/Cargo.toml --features=move ${V:+--verbose} test_bench_tps_local_cluster_move -- --nocapture
_ cargo +"$rust_stable" test --jobs "$NPROC" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
#_ cargo +"$rust_stable" test --manifest-path bench-tps/Cargo.toml --features=move ${V:+--verbose} test_bench_tps_local_cluster_move -- --nocapture
;;
test-stable-perf)
ci/affects-files.sh \

View File

@ -1,6 +1,6 @@
[package]
name = "solana-clap-utils"
version = "1.1.0"
version = "1.1.8"
description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -11,8 +11,8 @@ edition = "2018"
[dependencies]
clap = "2.33.0"
rpassword = "4.0"
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.0" }
solana-sdk = { path = "../sdk", version = "1.1.0" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.8" }
thiserror = "1.0.11"
tiny-bip39 = "0.7.0"
url = "2.1.0"
@ -20,3 +20,6 @@ chrono = "0.4"
[lib]
name = "solana_clap_utils"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -0,0 +1,17 @@
use crate::ArgConstant;
use clap::Arg;
pub const COMMITMENT_ARG: ArgConstant<'static> = ArgConstant {
name: "commitment",
long: "commitment",
help: "Return information at the selected commitment level",
};
pub fn commitment_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name(COMMITMENT_ARG.name)
.long(COMMITMENT_ARG.long)
.takes_value(true)
.possible_values(&["default", "max", "recent", "root"])
.value_name("COMMITMENT_LEVEL")
.help(COMMITMENT_ARG.help)
}

View File

@ -7,6 +7,7 @@ use clap::ArgMatches;
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_sdk::{
clock::UnixTimestamp,
commitment_config::CommitmentConfig,
native_token::sol_to_lamports,
pubkey::Pubkey,
signature::{read_keypair_file, Keypair, Signature, Signer},
@ -62,6 +63,21 @@ pub fn keypair_of(matches: &ArgMatches<'_>, name: &str) -> Option<Keypair> {
}
}
pub fn keypairs_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<Keypair>> {
matches.values_of(name).map(|values| {
values
.filter_map(|value| {
if value == ASK_KEYWORD {
let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name);
keypair_from_seed_phrase(name, skip_validation, true).ok()
} else {
read_keypair_file(value).ok()
}
})
.collect()
})
}
// Return a pubkey for an argument that can itself be parsed into a pubkey,
// or is a filename that can be read as a keypair
pub fn pubkey_of(matches: &ArgMatches<'_>, name: &str) -> Option<Pubkey> {
@ -101,7 +117,7 @@ pub fn pubkeys_sigs_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<(Pubk
pub fn signer_of(
matches: &ArgMatches<'_>,
name: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<(Option<Box<dyn Signer>>, Option<Pubkey>), Box<dyn std::error::Error>> {
if let Some(location) = matches.value_of(name) {
let signer = signer_from_path(matches, location, name, wallet_manager)?;
@ -115,7 +131,7 @@ pub fn signer_of(
pub fn pubkey_of_signer(
matches: &ArgMatches<'_>,
name: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<Option<Pubkey>, Box<dyn std::error::Error>> {
if let Some(location) = matches.value_of(name) {
Ok(Some(pubkey_from_path(
@ -132,7 +148,7 @@ pub fn pubkey_of_signer(
pub fn pubkeys_of_multiple_signers(
matches: &ArgMatches<'_>,
name: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<Option<Vec<Pubkey>>, Box<dyn std::error::Error>> {
if let Some(pubkey_matches) = matches.values_of(name) {
let mut pubkeys: Vec<Pubkey> = vec![];
@ -148,7 +164,7 @@ pub fn pubkeys_of_multiple_signers(
pub fn resolve_signer(
matches: &ArgMatches<'_>,
name: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<Option<String>, Box<dyn std::error::Error>> {
Ok(resolve_signer_from_path(
matches,
@ -162,6 +178,15 @@ pub fn lamports_of_sol(matches: &ArgMatches<'_>, name: &str) -> Option<u64> {
value_of(matches, name).map(sol_to_lamports)
}
pub fn commitment_of(matches: &ArgMatches<'_>, name: &str) -> Option<CommitmentConfig> {
matches.value_of(name).map(|value| match value {
"max" => CommitmentConfig::max(),
"recent" => CommitmentConfig::recent(),
"root" => CommitmentConfig::root(),
_ => CommitmentConfig::default(),
})
}
#[cfg(test)]
mod tests {
use super::*;

View File

@ -8,7 +8,7 @@ use clap::ArgMatches;
use rpassword::prompt_password_stderr;
use solana_remote_wallet::{
remote_keypair::generate_remote_keypair,
remote_wallet::{RemoteWalletError, RemoteWalletManager},
remote_wallet::{maybe_wallet_manager, RemoteWalletError, RemoteWalletManager},
};
use solana_sdk::{
pubkey::Pubkey,
@ -64,7 +64,7 @@ pub fn signer_from_path(
matches: &ArgMatches,
path: &str,
keypair_name: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<Box<dyn Signer>, Box<dyn error::Error>> {
match parse_keypair_path(path) {
KeypairUrl::Ask => {
@ -88,6 +88,9 @@ pub fn signer_from_path(
Ok(Box::new(read_keypair(&mut stdin)?))
}
KeypairUrl::Usb(path) => {
if wallet_manager.is_none() {
*wallet_manager = maybe_wallet_manager()?;
}
if let Some(wallet_manager) = wallet_manager {
Ok(Box::new(generate_remote_keypair(
path,
@ -122,7 +125,7 @@ pub fn pubkey_from_path(
matches: &ArgMatches,
path: &str,
keypair_name: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<Pubkey, Box<dyn error::Error>> {
match parse_keypair_path(path) {
KeypairUrl::Pubkey(pubkey) => Ok(pubkey),
@ -134,7 +137,7 @@ pub fn resolve_signer_from_path(
matches: &ArgMatches,
path: &str,
keypair_name: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<Option<String>, Box<dyn error::Error>> {
match parse_keypair_path(path) {
KeypairUrl::Ask => {
@ -158,6 +161,9 @@ pub fn resolve_signer_from_path(
read_keypair(&mut stdin).map(|_| None)
}
KeypairUrl::Usb(path) => {
if wallet_manager.is_none() {
*wallet_manager = maybe_wallet_manager()?;
}
if let Some(wallet_manager) = wallet_manager {
let path = generate_remote_keypair(
path,

View File

@ -42,6 +42,7 @@ impl std::fmt::Debug for DisplayError {
}
}
pub mod commitment;
pub mod input_parsers;
pub mod input_validators;
pub mod keypair;

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-cli-config"
description = "Blockchain, Rebuilt for Scale"
version = "1.1.0"
version = "1.1.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -15,3 +15,6 @@ serde = "1.0.105"
serde_derive = "1.0.103"
serde_yaml = "0.8.11"
url = "2.1.1"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -26,7 +26,7 @@ impl Default for Config {
keypair_path.extend(&[".config", "solana", "id.json"]);
keypair_path.to_str().unwrap().to_string()
};
let json_rpc_url = "http://127.0.0.1:8899".to_string();
let json_rpc_url = "https://api.mainnet-beta.solana.com".to_string();
// Empty websocket_url string indicates the client should
// `Config::compute_websocket_url(&json_rpc_url)`
@ -60,17 +60,38 @@ impl Config {
ws_url
.set_scheme(if is_secure { "wss" } else { "ws" })
.expect("unable to set scheme");
let ws_port = match json_rpc_url.port() {
Some(port) => port + 1,
None => {
if is_secure {
8901
} else {
8900
}
}
};
ws_url.set_port(Some(ws_port)).expect("unable to set port");
if let Some(port) = json_rpc_url.port() {
ws_url.set_port(Some(port + 1)).expect("unable to set port");
}
ws_url.to_string()
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn compute_websocket_url() {
assert_eq!(
Config::compute_websocket_url(&"http://devnet.solana.com"),
"ws://devnet.solana.com/".to_string()
);
assert_eq!(
Config::compute_websocket_url(&"https://devnet.solana.com"),
"wss://devnet.solana.com/".to_string()
);
assert_eq!(
Config::compute_websocket_url(&"http://example.com:8899"),
"ws://example.com:8900/".to_string()
);
assert_eq!(
Config::compute_websocket_url(&"https://example.com:1234"),
"wss://example.com:1235/".to_string()
);
assert_eq!(Config::compute_websocket_url(&"garbage"), String::new());
}
}

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-cli"
description = "Blockchain, Rebuilt for Scale"
version = "1.1.0"
version = "1.1.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -18,6 +18,7 @@ ctrlc = { version = "3.1.4", features = ["termination"] }
console = "0.10.0"
dirs = "2.0.2"
log = "0.4.8"
Inflector = "0.11.4"
indicatif = "0.14.0"
humantime = "2.0.0"
num-traits = "0.2"
@ -26,30 +27,33 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
serde = "1.0.105"
serde_derive = "1.0.103"
serde_json = "1.0.48"
solana-budget-program = { path = "../programs/budget", version = "1.1.0" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.0" }
solana-cli-config = { path = "../cli-config", version = "1.1.0" }
solana-client = { path = "../client", version = "1.1.0" }
solana-config-program = { path = "../programs/config", version = "1.1.0" }
solana-faucet = { path = "../faucet", version = "1.1.0" }
solana-logger = { path = "../logger", version = "1.1.0" }
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.0" }
solana-runtime = { path = "../runtime", version = "1.1.0" }
solana-sdk = { path = "../sdk", version = "1.1.0" }
solana-stake-program = { path = "../programs/stake", version = "1.1.0" }
solana-storage-program = { path = "../programs/storage", version = "1.1.0" }
solana-vote-program = { path = "../programs/vote", version = "1.1.0" }
solana-vote-signer = { path = "../vote-signer", version = "1.1.0" }
titlecase = "1.1.0"
solana-budget-program = { path = "../programs/budget", version = "1.1.8" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.8" }
solana-cli-config = { path = "../cli-config", version = "1.1.8" }
solana-client = { path = "../client", version = "1.1.8" }
solana-config-program = { path = "../programs/config", version = "1.1.8" }
solana-faucet = { path = "../faucet", version = "1.1.8" }
solana-logger = { path = "../logger", version = "1.1.8" }
solana-net-utils = { path = "../net-utils", version = "1.1.8" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.8" }
solana-runtime = { path = "../runtime", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.8" }
solana-stake-program = { path = "../programs/stake", version = "1.1.8" }
solana-storage-program = { path = "../programs/storage", version = "1.1.8" }
solana-transaction-status = { path = "../transaction-status", version = "1.1.8" }
solana-vote-program = { path = "../programs/vote", version = "1.1.8" }
solana-vote-signer = { path = "../vote-signer", version = "1.1.8" }
thiserror = "1.0.13"
url = "2.1.1"
[dev-dependencies]
solana-core = { path = "../core", version = "1.1.0" }
solana-budget-program = { path = "../programs/budget", version = "1.1.0" }
solana-core = { path = "../core", version = "1.1.8" }
solana-budget-program = { path = "../programs/budget", version = "1.1.8" }
tempfile = "3.1.0"
[[bin]]
name = "solana"
path = "src/main.rs"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -1,4 +1,5 @@
use crate::{
cli_output::{CliAccount, OutputFormat},
cluster_query::*,
display::{println_name_value, println_signers},
nonce::{self, *},
@ -21,6 +22,7 @@ use solana_clap_utils::{
use solana_client::{
client_error::{ClientErrorKind, Result as ClientResult},
rpc_client::RpcClient,
rpc_response::{RpcAccount, RpcKeyedAccount},
};
#[cfg(not(test))]
use solana_faucet::faucet::request_airdrop_transaction;
@ -49,6 +51,7 @@ use solana_stake_program::{
stake_state::{Lockup, StakeAuthorize},
};
use solana_storage_program::storage_instruction::StorageAccountType;
use solana_transaction_status::{EncodedTransaction, TransactionEncoding};
use solana_vote_program::vote_state::VoteAuthorize;
use std::{
error,
@ -84,7 +87,7 @@ pub(crate) fn generate_unique_signers(
bulk_signers: Vec<Option<Box<dyn Signer>>>,
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliSignerInfo, Box<dyn error::Error>> {
let mut unique_signers = vec![];
@ -173,6 +176,8 @@ pub enum CliCommand {
Catchup {
node_pubkey: Pubkey,
node_json_rpc_url: Option<String>,
commitment_config: CommitmentConfig,
follow: bool,
},
ClusterVersion,
CreateAddressWithSeed {
@ -188,6 +193,9 @@ pub enum CliCommand {
commitment_config: CommitmentConfig,
},
GetGenesisHash,
GetEpoch {
commitment_config: CommitmentConfig,
},
GetSlot {
commitment_config: CommitmentConfig,
},
@ -219,6 +227,11 @@ pub enum CliCommand {
use_lamports_unit: bool,
commitment_config: CommitmentConfig,
},
TransactionHistory {
address: Pubkey,
end_slot: Option<Slot>, // None == latest slot
slot_limit: u64,
},
// Nonce commands
AuthorizeNonceAccount {
nonce_account: Pubkey,
@ -326,6 +339,7 @@ pub enum CliCommand {
destination_account_pubkey: Pubkey,
lamports: u64,
withdraw_authority: SignerIndex,
custodian: Option<SignerIndex>,
sign_only: bool,
blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>,
@ -392,6 +406,7 @@ pub enum CliCommand {
},
Cancel(Pubkey),
Confirm(Signature),
DecodeTransaction(Transaction),
Pay(PayCommand),
ResolveSigner(Option<String>),
ShowAccount {
@ -405,6 +420,7 @@ pub enum CliCommand {
to: Pubkey,
from: SignerIndex,
sign_only: bool,
no_wait: bool,
blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>,
nonce_authority: SignerIndex,
@ -466,6 +482,7 @@ pub struct CliConfig<'a> {
pub keypair_path: String,
pub rpc_client: Option<RpcClient>,
pub verbose: bool,
pub output_format: OutputFormat,
}
impl CliConfig<'_> {
@ -537,7 +554,7 @@ impl CliConfig<'_> {
if !self.signers.is_empty() {
self.signers[0].try_pubkey()
} else {
Err(SignerError::CustomError(
Err(SignerError::Custom(
"Default keypair must be set if pubkey arg not provided".to_string(),
))
}
@ -557,6 +574,7 @@ impl Default for CliConfig<'_> {
keypair_path: Self::default_keypair_path(),
rpc_client: None,
verbose: false,
output_format: OutputFormat::Display,
}
}
}
@ -564,7 +582,7 @@ impl Default for CliConfig<'_> {
pub fn parse_command(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, Box<dyn error::Error>> {
let response = match matches.subcommand() {
// Cluster Query Commands
@ -586,6 +604,7 @@ pub fn parse_command(
command: CliCommand::GetGenesisHash,
signers: vec![],
}),
("epoch", Some(matches)) => parse_get_epoch(matches),
("slot", Some(matches)) => parse_get_slot(matches),
("total-supply", Some(matches)) => parse_total_supply(matches),
("transaction-count", Some(matches)) => parse_get_transaction_count(matches),
@ -605,6 +624,9 @@ pub fn parse_command(
}),
("stakes", Some(matches)) => parse_show_stakes(matches, wallet_manager),
("validators", Some(matches)) => parse_show_validators(matches),
("transaction-history", Some(matches)) => {
parse_transaction_history(matches, wallet_manager)
}
// Nonce Commands
("authorize-nonce-account", Some(matches)) => {
parse_authorize_nonce_account(matches, default_signer_path, wallet_manager)
@ -787,11 +809,23 @@ pub fn parse_command(
command: CliCommand::Confirm(signature),
signers: vec![],
}),
_ => {
eprintln!("{}", matches.usage());
Err(CliError::BadParameter("Invalid signature".to_string()))
}
_ => Err(CliError::BadParameter("Invalid signature".to_string())),
},
("decode-transaction", Some(matches)) => {
let encoded_transaction = EncodedTransaction::Binary(
matches.value_of("base85_transaction").unwrap().to_string(),
);
if let Some(transaction) = encoded_transaction.decode() {
Ok(CliCommandInfo {
command: CliCommand::DecodeTransaction(transaction),
signers: vec![],
})
} else {
Err(CliError::BadParameter(
"Unable to decode transaction".to_string(),
))
}
}
("pay", Some(matches)) => {
let lamports = lamports_of_sol(matches, "amount").unwrap();
let to = pubkey_of_signer(matches, "to", wallet_manager)?.unwrap();
@ -902,6 +936,7 @@ pub fn parse_command(
let lamports = lamports_of_sol(matches, "amount").unwrap();
let to = pubkey_of_signer(matches, "to", wallet_manager)?.unwrap();
let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
let no_wait = matches.is_present("no_wait");
let blockhash_query = BlockhashQuery::new_from_matches(matches);
let nonce_account = pubkey_of_signer(matches, NONCE_ARG.name, wallet_manager)?;
let (nonce_authority, nonce_authority_pubkey) =
@ -927,6 +962,7 @@ pub fn parse_command(
lamports,
to,
sign_only,
no_wait,
blockhash_query,
nonce_account,
nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(),
@ -1041,7 +1077,7 @@ pub fn return_signers(tx: &Transaction) -> ProcessResult {
pub fn parse_create_address_with_seed(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let from_pubkey = pubkey_of_signer(matches, "from", wallet_manager)?;
let signers = if from_pubkey.is_some() {
@ -1152,13 +1188,47 @@ fn process_balance(
}
}
fn process_confirm(rpc_client: &RpcClient, signature: &Signature) -> ProcessResult {
match rpc_client.get_signature_status(&signature) {
fn process_confirm(
rpc_client: &RpcClient,
config: &CliConfig,
signature: &Signature,
) -> ProcessResult {
match rpc_client.get_signature_status_with_commitment_and_history(
&signature,
CommitmentConfig::max(),
true,
) {
Ok(status) => {
if let Some(result) = status {
match result {
if let Some(transaction_status) = status {
if config.verbose {
match rpc_client
.get_confirmed_transaction(signature, TransactionEncoding::Binary)
{
Ok(confirmed_transaction) => {
println!(
"\nTransaction executed in slot {}:",
confirmed_transaction.slot
);
crate::display::println_transaction(
&confirmed_transaction
.transaction
.transaction
.decode()
.expect("Successful decode"),
&confirmed_transaction.transaction.meta,
" ",
);
}
Err(err) => {
println!("Unable to get confirmed transaction details: {}", err)
}
}
println!();
}
match transaction_status {
Ok(_) => Ok("Confirmed".to_string()),
Err(err) => Ok(format!("Transaction failed with error: {}", err)),
Err(err) => Ok(format!("Transaction failed: {}", err)),
}
} else {
Ok("Not found".to_string())
@ -1168,33 +1238,40 @@ fn process_confirm(rpc_client: &RpcClient, signature: &Signature) -> ProcessResu
}
}
fn process_decode_transaction(transaction: &Transaction) -> ProcessResult {
crate::display::println_transaction(transaction, &None, "");
Ok("".to_string())
}
fn process_show_account(
rpc_client: &RpcClient,
_config: &CliConfig,
config: &CliConfig,
account_pubkey: &Pubkey,
output_file: &Option<String>,
use_lamports_unit: bool,
) -> ProcessResult {
let account = rpc_client.get_account(account_pubkey)?;
let data = account.data.clone();
let cli_account = CliAccount {
keyed_account: RpcKeyedAccount {
pubkey: account_pubkey.to_string(),
account: RpcAccount::encode(account),
},
use_lamports_unit,
};
println!();
println_name_value("Public Key:", &account_pubkey.to_string());
println_name_value(
"Balance:",
&build_balance_message(account.lamports, use_lamports_unit, true),
);
println_name_value("Owner:", &account.owner.to_string());
println_name_value("Executable:", &account.executable.to_string());
println_name_value("Rent Epoch:", &account.rent_epoch.to_string());
config.output_format.formatted_print(&cli_account);
if let Some(output_file) = output_file {
let mut f = File::create(output_file)?;
f.write_all(&account.data)?;
println!();
println!("Wrote account data to {}", output_file);
} else if !account.data.is_empty() {
use pretty_hex::*;
println!("{:?}", account.data.hex_dump());
if config.output_format == OutputFormat::Display {
if let Some(output_file) = output_file {
let mut f = File::create(output_file)?;
f.write_all(&data)?;
println!();
println!("Wrote account data to {}", output_file);
} else if !data.is_empty() {
use pretty_hex::*;
println!("{:?}", data.hex_dump());
}
}
Ok("".to_string())
@ -1272,8 +1349,8 @@ fn process_deploy(
trace!("Finalizing program account");
rpc_client
.send_and_confirm_transaction_with_spinner(&mut finalize_tx, &signers)
.map_err(|_| {
CliError::DynamicProgramError("Program finalize transaction failed".to_string())
.map_err(|e| {
CliError::DynamicProgramError(format!("Program finalize transaction failed: {}", e))
})?;
Ok(json!({
@ -1488,6 +1565,7 @@ fn process_transfer(
to: &Pubkey,
from: SignerIndex,
sign_only: bool,
no_wait: bool,
blockhash_query: &BlockhashQuery,
nonce_account: Option<&Pubkey>,
nonce_authority: SignerIndex,
@ -1534,7 +1612,11 @@ fn process_transfer(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
let result = if no_wait {
rpc_client.send_transaction(&tx)
} else {
rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers)
};
log_instruction_custom_error::<SystemError>(result)
}
}
@ -1563,7 +1645,7 @@ fn process_witness(
}
pub fn process_command(config: &CliConfig) -> ProcessResult {
if config.verbose {
if config.verbose && config.output_format == OutputFormat::Display {
println_name_value("RPC URL:", &config.json_rpc_url);
println_name_value("Default Signer Path:", &config.keypair_path);
if config.keypair_path.starts_with("usb://") {
@ -1589,7 +1671,15 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
CliCommand::Catchup {
node_pubkey,
node_json_rpc_url,
} => process_catchup(&rpc_client, node_pubkey, node_json_rpc_url),
commitment_config,
follow,
} => process_catchup(
&rpc_client,
node_pubkey,
node_json_rpc_url,
*commitment_config,
*follow,
),
CliCommand::ClusterVersion => process_cluster_version(&rpc_client),
CliCommand::CreateAddressWithSeed {
from_pubkey,
@ -1600,7 +1690,10 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
CliCommand::GetBlockTime { slot } => process_get_block_time(&rpc_client, *slot),
CliCommand::GetGenesisHash => process_get_genesis_hash(&rpc_client),
CliCommand::GetEpochInfo { commitment_config } => {
process_get_epoch_info(&rpc_client, *commitment_config)
process_get_epoch_info(&rpc_client, config, *commitment_config)
}
CliCommand::GetEpoch { commitment_config } => {
process_get_epoch(&rpc_client, *commitment_config)
}
CliCommand::GetSlot { commitment_config } => {
process_get_slot(&rpc_client, *commitment_config)
@ -1637,13 +1730,19 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
vote_account_pubkeys,
} => process_show_stakes(
&rpc_client,
config,
*use_lamports_unit,
vote_account_pubkeys.as_deref(),
),
CliCommand::ShowValidators {
use_lamports_unit,
commitment_config,
} => process_show_validators(&rpc_client, *use_lamports_unit, *commitment_config),
} => process_show_validators(&rpc_client, config, *use_lamports_unit, *commitment_config),
CliCommand::TransactionHistory {
address,
end_slot,
slot_limit,
} => process_transaction_history(&rpc_client, address, *end_slot, *slot_limit),
// Nonce Commands
@ -1686,7 +1785,12 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
CliCommand::ShowNonceAccount {
nonce_account_pubkey,
use_lamports_unit,
} => process_show_nonce_account(&rpc_client, &nonce_account_pubkey, *use_lamports_unit),
} => process_show_nonce_account(
&rpc_client,
config,
&nonce_account_pubkey,
*use_lamports_unit,
),
// Withdraw lamports from a nonce account
CliCommand::WithdrawFromNonceAccount {
nonce_account,
@ -1865,6 +1969,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
destination_account_pubkey,
lamports,
withdraw_authority,
custodian,
sign_only,
blockhash_query,
ref nonce_account,
@ -1877,6 +1982,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
&destination_account_pubkey,
*lamports,
*withdraw_authority,
*custodian,
*sign_only,
blockhash_query,
nonce_account.as_ref(),
@ -1915,7 +2021,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
// Return all or single validator info
CliCommand::GetValidatorInfo(info_pubkey) => {
process_get_validator_info(&rpc_client, *info_pubkey)
process_get_validator_info(&rpc_client, config, *info_pubkey)
}
// Publish validator info
CliCommand::SetValidatorInfo {
@ -2026,7 +2132,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
// Cancel a contract by contract Pubkey
CliCommand::Cancel(pubkey) => process_cancel(&rpc_client, config, &pubkey),
// Confirm the last client transaction by signature
CliCommand::Confirm(signature) => process_confirm(&rpc_client, signature),
CliCommand::Confirm(signature) => process_confirm(&rpc_client, config, signature),
CliCommand::DecodeTransaction(transaction) => process_decode_transaction(transaction),
// If client has positive balance, pay lamports to another address
CliCommand::Pay(PayCommand {
lamports,
@ -2080,6 +2187,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
to,
from,
sign_only,
no_wait,
ref blockhash_query,
ref nonce_account,
nonce_authority,
@ -2091,6 +2199,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
to,
*from,
*sign_only,
*no_wait,
blockhash_query,
nonce_account.as_ref(),
*nonce_authority,
@ -2173,7 +2282,7 @@ where
Err(err) => {
if let ClientErrorKind::TransactionError(TransactionError::InstructionError(
_,
InstructionError::CustomError(code),
InstructionError::Custom(code),
)) = err.kind()
{
if let Some(specific_error) = E::decode_custom_error_to_enum(*code) {
@ -2306,6 +2415,18 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
.help("The transaction signature to confirm"),
),
)
.subcommand(
SubCommand::with_name("decode-transaction")
.about("Decode a base-85 binary transaction")
.arg(
Arg::with_name("base85_transaction")
.index(1)
.value_name("BASE58_TRANSACTION")
.takes_value(true)
.required(true)
.help("The transaction to decode"),
),
)
.subcommand(
SubCommand::with_name("create-address-with-seed")
.about("Generate a derived account address with a seed")
@ -2497,6 +2618,12 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
.validator(is_valid_signer)
.help("Source account of funds (if different from client local account)"),
)
.arg(
Arg::with_name("no_wait")
.long("no-wait")
.takes_value(false)
.help("Return signature immediately after submitting the transaction, instead of waiting for confirmations"),
)
.offline_args()
.arg(nonce_arg())
.arg(nonce_authority_arg())
@ -2517,7 +2644,7 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
)
.arg(
Arg::with_name("output_file")
.long("output")
.long("output-file")
.short("o")
.value_name("FILEPATH")
.takes_value(true)
@ -2571,11 +2698,11 @@ mod tests {
write_keypair_file(&default_keypair, &default_keypair_file).unwrap();
let signer_info =
generate_unique_signers(vec![], &matches, &default_keypair_file, None).unwrap();
generate_unique_signers(vec![], &matches, &default_keypair_file, &mut None).unwrap();
assert_eq!(signer_info.signers.len(), 0);
let signer_info =
generate_unique_signers(vec![None, None], &matches, &default_keypair_file, None)
generate_unique_signers(vec![None, None], &matches, &default_keypair_file, &mut None)
.unwrap();
assert_eq!(signer_info.signers.len(), 1);
assert_eq!(signer_info.index_of(None), Some(0));
@ -2587,7 +2714,7 @@ mod tests {
let keypair0_clone_pubkey = keypair0.pubkey();
let signers = vec![None, Some(keypair0.into()), Some(keypair0_clone.into())];
let signer_info =
generate_unique_signers(signers, &matches, &default_keypair_file, None).unwrap();
generate_unique_signers(signers, &matches, &default_keypair_file, &mut None).unwrap();
assert_eq!(signer_info.signers.len(), 2);
assert_eq!(signer_info.index_of(None), Some(0));
assert_eq!(signer_info.index_of(Some(keypair0_pubkey)), Some(1));
@ -2598,7 +2725,7 @@ mod tests {
let keypair0_clone = keypair_from_seed(&[1u8; 32]).unwrap();
let signers = vec![Some(keypair0.into()), Some(keypair0_clone.into())];
let signer_info =
generate_unique_signers(signers, &matches, &default_keypair_file, None).unwrap();
generate_unique_signers(signers, &matches, &default_keypair_file, &mut None).unwrap();
assert_eq!(signer_info.signers.len(), 1);
assert_eq!(signer_info.index_of(Some(keypair0_pubkey)), Some(0));
@ -2619,7 +2746,7 @@ mod tests {
Some(keypair1.into()),
];
let signer_info =
generate_unique_signers(signers, &matches, &default_keypair_file, None).unwrap();
generate_unique_signers(signers, &matches, &default_keypair_file, &mut None).unwrap();
assert_eq!(signer_info.signers.len(), 2);
assert_eq!(signer_info.index_of(Some(keypair0_pubkey)), Some(0));
assert_eq!(signer_info.index_of(Some(keypair1_pubkey)), Some(1));
@ -2645,7 +2772,7 @@ mod tests {
.clone()
.get_matches_from(vec!["test", "airdrop", "50", &pubkey_string]);
assert_eq!(
parse_command(&test_airdrop, "", None).unwrap(),
parse_command(&test_airdrop, "", &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Airdrop {
faucet_host: None,
@ -2668,7 +2795,7 @@ mod tests {
&keypair.pubkey().to_string(),
]);
assert_eq!(
parse_command(&test_balance, "", None).unwrap(),
parse_command(&test_balance, "", &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Balance {
pubkey: Some(keypair.pubkey()),
@ -2684,7 +2811,7 @@ mod tests {
"--lamports",
]);
assert_eq!(
parse_command(&test_balance, "", None).unwrap(),
parse_command(&test_balance, "", &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Balance {
pubkey: Some(keypair.pubkey()),
@ -2698,7 +2825,7 @@ mod tests {
.clone()
.get_matches_from(vec!["test", "balance", "--lamports"]);
assert_eq!(
parse_command(&test_balance, &keypair_file, None).unwrap(),
parse_command(&test_balance, &keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Balance {
pubkey: None,
@ -2714,7 +2841,7 @@ mod tests {
.clone()
.get_matches_from(vec!["test", "cancel", &pubkey_string]);
assert_eq!(
parse_command(&test_cancel, &keypair_file, None).unwrap(),
parse_command(&test_cancel, &keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Cancel(pubkey),
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
@ -2729,7 +2856,7 @@ mod tests {
.clone()
.get_matches_from(vec!["test", "confirm", &signature_string]);
assert_eq!(
parse_command(&test_confirm, "", None).unwrap(),
parse_command(&test_confirm, "", &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Confirm(signature),
signers: vec![],
@ -2738,7 +2865,7 @@ mod tests {
let test_bad_signature = test_commands
.clone()
.get_matches_from(vec!["test", "confirm", "deadbeef"]);
assert!(parse_command(&test_bad_signature, "", None).is_err());
assert!(parse_command(&test_bad_signature, "", &mut None).is_err());
// Test CreateAddressWithSeed
let from_pubkey = Some(Pubkey::new_rand());
@ -2758,7 +2885,7 @@ mod tests {
&from_str,
]);
assert_eq!(
parse_command(&test_create_address_with_seed, "", None).unwrap(),
parse_command(&test_create_address_with_seed, "", &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::CreateAddressWithSeed {
from_pubkey,
@ -2776,7 +2903,7 @@ mod tests {
"STAKE",
]);
assert_eq!(
parse_command(&test_create_address_with_seed, &keypair_file, None).unwrap(),
parse_command(&test_create_address_with_seed, &keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::CreateAddressWithSeed {
from_pubkey: None,
@ -2793,7 +2920,7 @@ mod tests {
.clone()
.get_matches_from(vec!["test", "deploy", "/Users/test/program.o"]);
assert_eq!(
parse_command(&test_deploy, &keypair_file, None).unwrap(),
parse_command(&test_deploy, &keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Deploy("/Users/test/program.o".to_string()),
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
@ -2806,7 +2933,7 @@ mod tests {
.clone()
.get_matches_from(vec!["test", "resolve-signer", &keypair_file]);
assert_eq!(
parse_command(&test_resolve_signer, "", None).unwrap(),
parse_command(&test_resolve_signer, "", &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::ResolveSigner(Some(keypair_file.clone())),
signers: vec![],
@ -2818,7 +2945,7 @@ mod tests {
.clone()
.get_matches_from(vec!["test", "resolve-signer", &pubkey_string]);
assert_eq!(
parse_command(&test_resolve_signer, "", None).unwrap(),
parse_command(&test_resolve_signer, "", &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::ResolveSigner(Some(pubkey.to_string())),
signers: vec![],
@ -2831,7 +2958,7 @@ mod tests {
.clone()
.get_matches_from(vec!["test", "pay", &pubkey_string, "50"]);
assert_eq!(
parse_command(&test_pay, &keypair_file, None).unwrap(),
parse_command(&test_pay, &keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Pay(PayCommand {
lamports: 50_000_000_000,
@ -2854,7 +2981,7 @@ mod tests {
&witness1_string,
]);
assert_eq!(
parse_command(&test_pay_multiple_witnesses, &keypair_file, None).unwrap(),
parse_command(&test_pay_multiple_witnesses, &keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Pay(PayCommand {
lamports: 50_000_000_000,
@ -2874,7 +3001,7 @@ mod tests {
&witness0_string,
]);
assert_eq!(
parse_command(&test_pay_single_witness, &keypair_file, None).unwrap(),
parse_command(&test_pay_single_witness, &keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Pay(PayCommand {
lamports: 50_000_000_000,
@ -2898,7 +3025,7 @@ mod tests {
&witness0_string,
]);
assert_eq!(
parse_command(&test_pay_timestamp, &keypair_file, None).unwrap(),
parse_command(&test_pay_timestamp, &keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Pay(PayCommand {
lamports: 50_000_000_000,
@ -2924,7 +3051,7 @@ mod tests {
"--sign-only",
]);
assert_eq!(
parse_command(&test_pay, &keypair_file, None).unwrap(),
parse_command(&test_pay, &keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Pay(PayCommand {
lamports: 50_000_000_000,
@ -2947,7 +3074,7 @@ mod tests {
&blockhash_string,
]);
assert_eq!(
parse_command(&test_pay, &keypair_file, None).unwrap(),
parse_command(&test_pay, &keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Pay(PayCommand {
lamports: 50_000_000_000,
@ -2976,7 +3103,7 @@ mod tests {
&pubkey_string,
]);
assert_eq!(
parse_command(&test_pay, &keypair_file, None).unwrap(),
parse_command(&test_pay, &keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Pay(PayCommand {
lamports: 50_000_000_000,
@ -3009,7 +3136,7 @@ mod tests {
&keypair_file,
]);
assert_eq!(
parse_command(&test_pay, &keypair_file, None).unwrap(),
parse_command(&test_pay, &keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Pay(PayCommand {
lamports: 50_000_000_000,
@ -3047,7 +3174,7 @@ mod tests {
&signer_arg,
]);
assert_eq!(
parse_command(&test_pay, &keypair_file, None).unwrap(),
parse_command(&test_pay, &keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Pay(PayCommand {
lamports: 50_000_000_000,
@ -3085,7 +3212,7 @@ mod tests {
"--signer",
&signer_arg,
]);
assert!(parse_command(&test_pay, &keypair_file, None).is_err());
assert!(parse_command(&test_pay, &keypair_file, &mut None).is_err());
// Test Send-Signature Subcommand
let test_send_signature = test_commands.clone().get_matches_from(vec![
@ -3095,7 +3222,7 @@ mod tests {
&pubkey_string,
]);
assert_eq!(
parse_command(&test_send_signature, &keypair_file, None).unwrap(),
parse_command(&test_send_signature, &keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Witness(pubkey, pubkey),
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
@ -3116,7 +3243,7 @@ mod tests {
&witness1_string,
]);
assert_eq!(
parse_command(&test_pay_multiple_witnesses, &keypair_file, None).unwrap(),
parse_command(&test_pay_multiple_witnesses, &keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Pay(PayCommand {
lamports: 50_000_000_000,
@ -3140,7 +3267,7 @@ mod tests {
"2018-09-19T17:30:59",
]);
assert_eq!(
parse_command(&test_send_timestamp, &keypair_file, None).unwrap(),
parse_command(&test_send_timestamp, &keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::TimeElapsed(pubkey, pubkey, dt),
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
@ -3154,7 +3281,7 @@ mod tests {
"--date",
"20180919T17:30:59",
]);
assert!(parse_command(&test_bad_timestamp, &keypair_file, None).is_err());
assert!(parse_command(&test_bad_timestamp, &keypair_file, &mut None).is_err());
}
#[test]
@ -3183,7 +3310,7 @@ mod tests {
let process_id = Pubkey::new_rand();
config.command = CliCommand::Cancel(process_id);
assert_eq!(process_command(&config).unwrap(), SIGNATURE);
assert!(process_command(&config).is_ok());
let good_signature = Signature::new(&bs58::decode(SIGNATURE).into_vec().unwrap());
config.command = CliCommand::Confirm(good_signature);
@ -3200,8 +3327,8 @@ mod tests {
commission: 0,
};
config.signers = vec![&keypair, &bob_keypair, &identity_keypair];
let signature = process_command(&config);
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
let result = process_command(&config);
assert!(result.is_ok());
let new_authorized_pubkey = Pubkey::new_rand();
config.signers = vec![&bob_keypair];
@ -3210,8 +3337,8 @@ mod tests {
new_authorized_pubkey,
vote_authorize: VoteAuthorize::Voter,
};
let signature = process_command(&config);
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
let result = process_command(&config);
assert!(result.is_ok());
let new_identity_keypair = Keypair::new();
config.signers = vec![&keypair, &bob_keypair, &new_identity_keypair];
@ -3219,8 +3346,8 @@ mod tests {
vote_account_pubkey: bob_pubkey,
new_identity_account: 2,
};
let signature = process_command(&config);
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
let result = process_command(&config);
assert!(result.is_ok());
let bob_keypair = Keypair::new();
let bob_pubkey = bob_keypair.pubkey();
@ -3244,8 +3371,8 @@ mod tests {
from: 0,
};
config.signers = vec![&keypair, &bob_keypair];
let signature = process_command(&config);
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
let result = process_command(&config);
assert!(result.is_ok());
let stake_pubkey = Pubkey::new_rand();
let to_pubkey = Pubkey::new_rand();
@ -3254,6 +3381,7 @@ mod tests {
destination_account_pubkey: to_pubkey,
lamports: 100,
withdraw_authority: 0,
custodian: None,
sign_only: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
@ -3261,8 +3389,8 @@ mod tests {
fee_payer: 0,
};
config.signers = vec![&keypair];
let signature = process_command(&config);
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
let result = process_command(&config);
assert!(result.is_ok());
let stake_pubkey = Pubkey::new_rand();
config.command = CliCommand::DeactivateStake {
@ -3274,8 +3402,8 @@ mod tests {
nonce_authority: 0,
fee_payer: 0,
};
let signature = process_command(&config);
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
let result = process_command(&config);
assert!(result.is_ok());
let stake_pubkey = Pubkey::new_rand();
let split_stake_account = Keypair::new();
@ -3292,8 +3420,8 @@ mod tests {
fee_payer: 0,
};
config.signers = vec![&keypair, &split_stake_account];
let signature = process_command(&config);
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
let result = process_command(&config);
assert!(result.is_ok());
config.command = CliCommand::GetSlot {
commitment_config: CommitmentConfig::default(),
@ -3311,8 +3439,8 @@ mod tests {
to: bob_pubkey,
..PayCommand::default()
});
let signature = process_command(&config);
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
let result = process_command(&config);
assert!(result.is_ok());
let date_string = "\"2018-09-19T17:30:59Z\"";
let dt: DateTime<Utc> = serde_json::from_str(&date_string).unwrap();
@ -3324,16 +3452,7 @@ mod tests {
..PayCommand::default()
});
let result = process_command(&config);
let json: Value = serde_json::from_str(&result.unwrap()).unwrap();
assert_eq!(
json.as_object()
.unwrap()
.get("signature")
.unwrap()
.as_str()
.unwrap(),
SIGNATURE.to_string()
);
assert!(result.is_ok());
let witness = Pubkey::new_rand();
config.command = CliCommand::Pay(PayCommand {
@ -3344,27 +3463,18 @@ mod tests {
..PayCommand::default()
});
let result = process_command(&config);
let json: Value = serde_json::from_str(&result.unwrap()).unwrap();
assert_eq!(
json.as_object()
.unwrap()
.get("signature")
.unwrap()
.as_str()
.unwrap(),
SIGNATURE.to_string()
);
assert!(result.is_ok());
let process_id = Pubkey::new_rand();
config.command = CliCommand::TimeElapsed(bob_pubkey, process_id, dt);
config.signers = vec![&keypair];
let signature = process_command(&config);
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
let result = process_command(&config);
assert!(result.is_ok());
let witness = Pubkey::new_rand();
config.command = CliCommand::Witness(bob_pubkey, witness);
let signature = process_command(&config);
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
let result = process_command(&config);
assert!(result.is_ok());
// CreateAddressWithSeed
let from_pubkey = Pubkey::new_rand();
@ -3391,13 +3501,13 @@ mod tests {
assert!(process_command(&config).is_ok());
config.command = CliCommand::TimeElapsed(bob_pubkey, process_id, dt);
let signature = process_command(&config);
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
let result = process_command(&config);
assert!(result.is_ok());
let witness = Pubkey::new_rand();
config.command = CliCommand::Witness(bob_pubkey, witness);
let signature = process_command(&config);
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
let result = process_command(&config);
assert!(result.is_ok());
// sig_not_found case
config.rpc_client = Some(RpcClient::new_mock("sig_not_found".to_string()));
@ -3411,10 +3521,7 @@ mod tests {
config.command = CliCommand::Confirm(any_signature);
assert_eq!(
process_command(&config).unwrap(),
format!(
"Transaction failed with error: {}",
TransactionError::AccountInUse
)
format!("Transaction failed: {}", TransactionError::AccountInUse)
);
// Failure cases
@ -3550,13 +3657,40 @@ mod tests {
.clone()
.get_matches_from(vec!["test", "transfer", &to_string, "42"]);
assert_eq!(
parse_command(&test_transfer, &default_keypair_file, None).unwrap(),
parse_command(&test_transfer, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Transfer {
lamports: 42_000_000_000,
to: to_pubkey,
from: 0,
sign_only: false,
no_wait: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
fee_payer: 0,
},
signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()],
}
);
// Test Transfer no-wait
let test_transfer = test_commands.clone().get_matches_from(vec![
"test",
"transfer",
"--no-wait",
&to_string,
"42",
]);
assert_eq!(
parse_command(&test_transfer, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Transfer {
lamports: 42_000_000_000,
to: to_pubkey,
from: 0,
sign_only: false,
no_wait: true,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@ -3579,13 +3713,14 @@ mod tests {
"--sign-only",
]);
assert_eq!(
parse_command(&test_transfer, &default_keypair_file, None).unwrap(),
parse_command(&test_transfer, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Transfer {
lamports: 42_000_000_000,
to: to_pubkey,
from: 0,
sign_only: true,
no_wait: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
@ -3613,13 +3748,14 @@ mod tests {
&blockhash_string,
]);
assert_eq!(
parse_command(&test_transfer, &default_keypair_file, None).unwrap(),
parse_command(&test_transfer, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Transfer {
lamports: 42_000_000_000,
to: to_pubkey,
from: 0,
sign_only: false,
no_wait: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::Cluster,
blockhash
@ -3651,13 +3787,14 @@ mod tests {
&nonce_authority_file,
]);
assert_eq!(
parse_command(&test_transfer, &default_keypair_file, None).unwrap(),
parse_command(&test_transfer, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Transfer {
lamports: 42_000_000_000,
to: to_pubkey,
from: 0,
sign_only: false,
no_wait: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_address),
blockhash

841
cli/src/cli_output.rs Normal file
View File

@ -0,0 +1,841 @@
use crate::{cli::build_balance_message, display::writeln_name_value};
use chrono::{DateTime, NaiveDateTime, SecondsFormat, Utc};
use console::{style, Emoji};
use inflector::cases::titlecase::to_title_case;
use serde::Serialize;
use serde_json::{Map, Value};
use solana_client::rpc_response::{RpcEpochInfo, RpcKeyedAccount, RpcVoteAccountInfo};
use solana_sdk::{
clock::{self, Epoch, Slot, UnixTimestamp},
stake_history::StakeHistoryEntry,
};
use solana_stake_program::stake_state::{Authorized, Lockup};
use solana_vote_program::{
authorized_voters::AuthorizedVoters,
vote_state::{BlockTimestamp, Lockout},
};
use std::{collections::BTreeMap, fmt, time::Duration};
static WARNING: Emoji = Emoji("⚠️", "!");
#[derive(PartialEq)]
pub enum OutputFormat {
Display,
Json,
JsonCompact,
}
impl OutputFormat {
pub fn formatted_print<T>(&self, item: &T)
where
T: Serialize + fmt::Display,
{
match self {
OutputFormat::Display => {
println!("{}", item);
}
OutputFormat::Json => {
println!("{}", serde_json::to_string_pretty(item).unwrap());
}
OutputFormat::JsonCompact => {
println!("{}", serde_json::to_value(item).unwrap());
}
}
}
}
#[derive(Serialize, Deserialize)]
pub struct CliAccount {
#[serde(flatten)]
pub keyed_account: RpcKeyedAccount,
#[serde(skip_serializing)]
pub use_lamports_unit: bool,
}
impl fmt::Display for CliAccount {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f)?;
writeln_name_value(f, "Public Key:", &self.keyed_account.pubkey)?;
writeln_name_value(
f,
"Balance:",
&build_balance_message(
self.keyed_account.account.lamports,
self.use_lamports_unit,
true,
),
)?;
writeln_name_value(f, "Owner:", &self.keyed_account.account.owner)?;
writeln_name_value(
f,
"Executable:",
&self.keyed_account.account.executable.to_string(),
)?;
writeln_name_value(
f,
"Rent Epoch:",
&self.keyed_account.account.rent_epoch.to_string(),
)?;
Ok(())
}
}
#[derive(Default, Serialize, Deserialize)]
pub struct CliBlockProduction {
pub epoch: Epoch,
pub start_slot: Slot,
pub end_slot: Slot,
pub total_slots: usize,
pub total_blocks_produced: usize,
pub total_slots_skipped: usize,
pub leaders: Vec<CliBlockProductionEntry>,
pub individual_slot_status: Vec<CliSlotStatus>,
#[serde(skip_serializing)]
pub verbose: bool,
}
impl fmt::Display for CliBlockProduction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f)?;
writeln!(
f,
"{}",
style(format!(
" {:<44} {:>15} {:>15} {:>15} {:>23}",
"Identity Pubkey",
"Leader Slots",
"Blocks Produced",
"Skipped Slots",
"Skipped Slot Percentage",
))
.bold()
)?;
for leader in &self.leaders {
writeln!(
f,
" {:<44} {:>15} {:>15} {:>15} {:>22.2}%",
leader.identity_pubkey,
leader.leader_slots,
leader.blocks_produced,
leader.skipped_slots,
leader.skipped_slots as f64 / leader.leader_slots as f64 * 100.
)?;
}
writeln!(f)?;
writeln!(
f,
" {:<44} {:>15} {:>15} {:>15} {:>22.2}%",
format!("Epoch {} total:", self.epoch),
self.total_slots,
self.total_blocks_produced,
self.total_slots_skipped,
self.total_slots_skipped as f64 / self.total_slots as f64 * 100.
)?;
writeln!(
f,
" (using data from {} slots: {} to {})",
self.total_slots, self.start_slot, self.end_slot
)?;
if self.verbose {
writeln!(f)?;
writeln!(f)?;
writeln!(
f,
"{}",
style(format!(" {:<15} {:<44}", "Slot", "Identity Pubkey")).bold(),
)?;
for status in &self.individual_slot_status {
if status.skipped {
writeln!(
f,
"{}",
style(format!(
" {:<15} {:<44} SKIPPED",
status.slot, status.leader
))
.red()
)?;
} else {
writeln!(
f,
"{}",
style(format!(" {:<15} {:<44}", status.slot, status.leader))
)?;
}
}
}
Ok(())
}
}
#[derive(Default, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliBlockProductionEntry {
pub identity_pubkey: String,
pub leader_slots: u64,
pub blocks_produced: u64,
pub skipped_slots: u64,
}
#[derive(Default, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliSlotStatus {
pub slot: Slot,
pub leader: String,
pub skipped: bool,
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliEpochInfo {
#[serde(flatten)]
pub epoch_info: RpcEpochInfo,
}
impl From<RpcEpochInfo> for CliEpochInfo {
fn from(epoch_info: RpcEpochInfo) -> Self {
Self { epoch_info }
}
}
impl fmt::Display for CliEpochInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f)?;
writeln_name_value(f, "Slot:", &self.epoch_info.absolute_slot.to_string())?;
writeln_name_value(f, "Epoch:", &self.epoch_info.epoch.to_string())?;
let start_slot = self.epoch_info.absolute_slot - self.epoch_info.slot_index;
let end_slot = start_slot + self.epoch_info.slots_in_epoch;
writeln_name_value(
f,
"Epoch Slot Range:",
&format!("[{}..{})", start_slot, end_slot),
)?;
writeln_name_value(
f,
"Epoch Completed Percent:",
&format!(
"{:>3.3}%",
self.epoch_info.slot_index as f64 / self.epoch_info.slots_in_epoch as f64 * 100_f64
),
)?;
let remaining_slots_in_epoch = self.epoch_info.slots_in_epoch - self.epoch_info.slot_index;
writeln_name_value(
f,
"Epoch Completed Slots:",
&format!(
"{}/{} ({} remaining)",
self.epoch_info.slot_index,
self.epoch_info.slots_in_epoch,
remaining_slots_in_epoch
),
)?;
writeln_name_value(
f,
"Epoch Completed Time:",
&format!(
"{}/{} ({} remaining)",
slot_to_human_time(self.epoch_info.slot_index),
slot_to_human_time(self.epoch_info.slots_in_epoch),
slot_to_human_time(remaining_slots_in_epoch)
),
)
}
}
fn slot_to_human_time(slot: Slot) -> String {
humantime::format_duration(Duration::from_secs(
slot * clock::DEFAULT_TICKS_PER_SLOT / clock::DEFAULT_TICKS_PER_SECOND,
))
.to_string()
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliValidators {
pub total_active_stake: u64,
pub total_current_stake: u64,
pub total_deliquent_stake: u64,
pub current_validators: Vec<CliValidator>,
pub delinquent_validators: Vec<CliValidator>,
#[serde(skip_serializing)]
pub use_lamports_unit: bool,
}
impl fmt::Display for CliValidators {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn write_vote_account(
f: &mut fmt::Formatter,
validator: &CliValidator,
total_active_stake: u64,
use_lamports_unit: bool,
delinquent: bool,
) -> fmt::Result {
fn non_zero_or_dash(v: u64) -> String {
if v == 0 {
"-".into()
} else {
format!("{}", v)
}
}
writeln!(
f,
"{} {:<44} {:<44} {:>9}% {:>8} {:>10} {:>7} {}",
if delinquent {
WARNING.to_string()
} else {
" ".to_string()
},
validator.identity_pubkey,
validator.vote_account_pubkey,
validator.commission,
non_zero_or_dash(validator.last_vote),
non_zero_or_dash(validator.root_slot),
validator.credits,
if validator.activated_stake > 0 {
format!(
"{} ({:.2}%)",
build_balance_message(validator.activated_stake, use_lamports_unit, true),
100. * validator.activated_stake as f64 / total_active_stake as f64
)
} else {
"-".into()
},
)
}
writeln_name_value(
f,
"Active Stake:",
&build_balance_message(self.total_active_stake, self.use_lamports_unit, true),
)?;
if self.total_deliquent_stake > 0 {
writeln_name_value(
f,
"Current Stake:",
&format!(
"{} ({:0.2}%)",
&build_balance_message(self.total_current_stake, self.use_lamports_unit, true),
100. * self.total_current_stake as f64 / self.total_active_stake as f64
),
)?;
writeln_name_value(
f,
"Delinquent Stake:",
&format!(
"{} ({:0.2}%)",
&build_balance_message(
self.total_deliquent_stake,
self.use_lamports_unit,
true
),
100. * self.total_deliquent_stake as f64 / self.total_active_stake as f64
),
)?;
}
writeln!(f)?;
writeln!(
f,
"{}",
style(format!(
" {:<44} {:<44} {} {} {} {:>7} {}",
"Identity Pubkey",
"Vote Account Pubkey",
"Commission",
"Last Vote",
"Root Block",
"Credits",
"Active Stake",
))
.bold()
)?;
for validator in &self.current_validators {
write_vote_account(
f,
validator,
self.total_active_stake,
self.use_lamports_unit,
false,
)?;
}
for validator in &self.delinquent_validators {
write_vote_account(
f,
validator,
self.total_active_stake,
self.use_lamports_unit,
true,
)?;
}
Ok(())
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliValidator {
pub identity_pubkey: String,
pub vote_account_pubkey: String,
pub commission: u8,
pub last_vote: u64,
pub root_slot: u64,
pub credits: u64,
pub activated_stake: u64,
}
impl CliValidator {
pub fn new(vote_account: &RpcVoteAccountInfo, current_epoch: Epoch) -> Self {
Self {
identity_pubkey: vote_account.node_pubkey.to_string(),
vote_account_pubkey: vote_account.vote_pubkey.to_string(),
commission: vote_account.commission,
last_vote: vote_account.last_vote,
root_slot: vote_account.root_slot,
credits: vote_account
.epoch_credits
.iter()
.find_map(|(epoch, credits, _)| {
if *epoch == current_epoch {
Some(*credits)
} else {
None
}
})
.unwrap_or(0),
activated_stake: vote_account.activated_stake,
}
}
}
#[derive(Default, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliNonceAccount {
pub balance: u64,
pub minimum_balance_for_rent_exemption: u64,
pub nonce: Option<String>,
pub lamports_per_signature: Option<u64>,
pub authority: Option<String>,
#[serde(skip_serializing)]
pub use_lamports_unit: bool,
}
impl fmt::Display for CliNonceAccount {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
f,
"Balance: {}",
build_balance_message(self.balance, self.use_lamports_unit, true)
)?;
writeln!(
f,
"Minimum Balance Required: {}",
build_balance_message(
self.minimum_balance_for_rent_exemption,
self.use_lamports_unit,
true
)
)?;
let nonce = self.nonce.as_deref().unwrap_or("uninitialized");
writeln!(f, "Nonce: {}", nonce)?;
if let Some(fees) = self.lamports_per_signature {
writeln!(f, "Fee: {} lamports per signature", fees)?;
} else {
writeln!(f, "Fees: uninitialized")?;
}
let authority = self.authority.as_deref().unwrap_or("uninitialized");
writeln!(f, "Authority: {}", authority)
}
}
#[derive(Serialize, Deserialize)]
pub struct CliStakeVec(Vec<CliKeyedStakeState>);
impl CliStakeVec {
pub fn new(list: Vec<CliKeyedStakeState>) -> Self {
Self(list)
}
}
impl fmt::Display for CliStakeVec {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for state in &self.0 {
writeln!(f)?;
write!(f, "{}", state)?;
}
Ok(())
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliKeyedStakeState {
pub stake_pubkey: String,
#[serde(flatten)]
pub stake_state: CliStakeState,
}
impl fmt::Display for CliKeyedStakeState {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "Stake Pubkey: {}", self.stake_pubkey)?;
write!(f, "{}", self.stake_state)
}
}
#[derive(Default, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliStakeState {
pub stake_type: CliStakeType,
pub total_stake: u64,
#[serde(skip_serializing_if = "Option::is_none")]
pub delegated_stake: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub delegated_vote_account_address: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub activation_epoch: Option<Epoch>,
#[serde(skip_serializing_if = "Option::is_none")]
pub deactivation_epoch: Option<Epoch>,
#[serde(flatten, skip_serializing_if = "Option::is_none")]
pub authorized: Option<CliAuthorized>,
#[serde(flatten, skip_serializing_if = "Option::is_none")]
pub lockup: Option<CliLockup>,
#[serde(skip_serializing)]
pub use_lamports_unit: bool,
}
impl fmt::Display for CliStakeState {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn show_authorized(f: &mut fmt::Formatter, authorized: &CliAuthorized) -> fmt::Result {
writeln!(f, "Stake Authority: {}", authorized.staker)?;
writeln!(f, "Withdraw Authority: {}", authorized.withdrawer)?;
Ok(())
}
fn show_lockup(f: &mut fmt::Formatter, lockup: &CliLockup) -> fmt::Result {
writeln!(
f,
"Lockup Timestamp: {} (UnixTimestamp: {})",
DateTime::<Utc>::from_utc(
NaiveDateTime::from_timestamp(lockup.unix_timestamp, 0),
Utc
)
.to_rfc3339_opts(SecondsFormat::Secs, true),
lockup.unix_timestamp
)?;
writeln!(f, "Lockup Epoch: {}", lockup.epoch)?;
writeln!(f, "Lockup Custodian: {}", lockup.custodian)?;
Ok(())
}
match self.stake_type {
CliStakeType::RewardsPool => writeln!(f, "Stake account is a rewards pool")?,
CliStakeType::Uninitialized => writeln!(f, "Stake account is uninitialized")?,
CliStakeType::Initialized => {
writeln!(
f,
"Total Stake: {}",
build_balance_message(self.total_stake, self.use_lamports_unit, true)
)?;
writeln!(f, "Stake account is undelegated")?;
show_authorized(f, self.authorized.as_ref().unwrap())?;
show_lockup(f, self.lockup.as_ref().unwrap())?;
}
CliStakeType::Stake => {
writeln!(
f,
"Total Stake: {}",
build_balance_message(self.total_stake, self.use_lamports_unit, true)
)?;
writeln!(
f,
"Delegated Stake: {}",
build_balance_message(
self.delegated_stake.unwrap(),
self.use_lamports_unit,
true
)
)?;
if let Some(delegated_vote_account_address) = &self.delegated_vote_account_address {
writeln!(
f,
"Delegated Vote Account Address: {}",
delegated_vote_account_address
)?;
}
writeln!(
f,
"Stake activates starting from epoch: {}",
self.activation_epoch.unwrap()
)?;
if let Some(deactivation_epoch) = self.deactivation_epoch {
writeln!(
f,
"Stake deactivates starting from epoch: {}",
deactivation_epoch
)?;
}
show_authorized(f, self.authorized.as_ref().unwrap())?;
show_lockup(f, self.lockup.as_ref().unwrap())?;
}
}
Ok(())
}
}
#[derive(Serialize, Deserialize)]
pub enum CliStakeType {
Stake,
RewardsPool,
Uninitialized,
Initialized,
}
impl Default for CliStakeType {
fn default() -> Self {
Self::Uninitialized
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliStakeHistory {
pub entries: Vec<CliStakeHistoryEntry>,
#[serde(skip_serializing)]
pub use_lamports_unit: bool,
}
impl fmt::Display for CliStakeHistory {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f)?;
writeln!(
f,
"{}",
style(format!(
" {:<5} {:>20} {:>20} {:>20}",
"Epoch", "Effective Stake", "Activating Stake", "Deactivating Stake",
))
.bold()
)?;
for entry in &self.entries {
writeln!(
f,
" {:>5} {:>20} {:>20} {:>20} {}",
entry.epoch,
build_balance_message(entry.effective_stake, self.use_lamports_unit, false),
build_balance_message(entry.activating_stake, self.use_lamports_unit, false),
build_balance_message(entry.deactivating_stake, self.use_lamports_unit, false),
if self.use_lamports_unit {
"lamports"
} else {
"SOL"
}
)?;
}
Ok(())
}
}
impl From<&(Epoch, StakeHistoryEntry)> for CliStakeHistoryEntry {
fn from((epoch, entry): &(Epoch, StakeHistoryEntry)) -> Self {
Self {
epoch: *epoch,
effective_stake: entry.effective,
activating_stake: entry.activating,
deactivating_stake: entry.deactivating,
}
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliStakeHistoryEntry {
pub epoch: Epoch,
pub effective_stake: u64,
pub activating_stake: u64,
pub deactivating_stake: u64,
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliAuthorized {
pub staker: String,
pub withdrawer: String,
}
impl From<&Authorized> for CliAuthorized {
fn from(authorized: &Authorized) -> Self {
Self {
staker: authorized.staker.to_string(),
withdrawer: authorized.withdrawer.to_string(),
}
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliLockup {
pub unix_timestamp: UnixTimestamp,
pub epoch: Epoch,
pub custodian: String,
}
impl From<&Lockup> for CliLockup {
fn from(lockup: &Lockup) -> Self {
Self {
unix_timestamp: lockup.unix_timestamp,
epoch: lockup.epoch,
custodian: lockup.custodian.to_string(),
}
}
}
#[derive(Serialize, Deserialize)]
pub struct CliValidatorInfoVec(Vec<CliValidatorInfo>);
impl CliValidatorInfoVec {
pub fn new(list: Vec<CliValidatorInfo>) -> Self {
Self(list)
}
}
impl fmt::Display for CliValidatorInfoVec {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.0.is_empty() {
writeln!(f, "No validator info accounts found")?;
}
for validator_info in &self.0 {
writeln!(f)?;
write!(f, "{}", validator_info)?;
}
Ok(())
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliValidatorInfo {
pub identity_pubkey: String,
pub info_pubkey: String,
pub info: Map<String, Value>,
}
impl fmt::Display for CliValidatorInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln_name_value(f, "Validator Identity Pubkey:", &self.identity_pubkey)?;
writeln_name_value(f, " Info Pubkey:", &self.info_pubkey)?;
for (key, value) in self.info.iter() {
writeln_name_value(
f,
&format!(" {}:", to_title_case(key)),
&value.as_str().unwrap_or("?"),
)?;
}
Ok(())
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliVoteAccount {
pub account_balance: u64,
pub validator_identity: String,
#[serde(flatten)]
pub authorized_voters: CliAuthorizedVoters,
pub authorized_withdrawer: String,
pub credits: u64,
pub commission: u8,
pub root_slot: Option<Slot>,
pub recent_timestamp: BlockTimestamp,
pub votes: Vec<CliLockout>,
pub epoch_voting_history: Vec<CliEpochVotingHistory>,
#[serde(skip_serializing)]
pub use_lamports_unit: bool,
}
impl fmt::Display for CliVoteAccount {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
f,
"Account Balance: {}",
build_balance_message(self.account_balance, self.use_lamports_unit, true)
)?;
writeln!(f, "Validator Identity: {}", self.validator_identity)?;
writeln!(f, "Authorized Voters: {}", self.authorized_voters)?;
writeln!(f, "Authorized Withdrawer: {}", self.authorized_withdrawer)?;
writeln!(f, "Credits: {}", self.credits)?;
writeln!(f, "Commission: {}%", self.commission)?;
writeln!(
f,
"Root Slot: {}",
match self.root_slot {
Some(slot) => slot.to_string(),
None => "~".to_string(),
}
)?;
writeln!(f, "Recent Timestamp: {:?}", self.recent_timestamp)?;
if !self.votes.is_empty() {
writeln!(f, "Recent Votes:")?;
for vote in &self.votes {
writeln!(
f,
"- slot: {}\n confirmation count: {}",
vote.slot, vote.confirmation_count
)?;
}
writeln!(f, "Epoch Voting History:")?;
for epoch_info in &self.epoch_voting_history {
writeln!(
f,
"- epoch: {}\n slots in epoch: {}\n credits earned: {}",
epoch_info.epoch, epoch_info.slots_in_epoch, epoch_info.credits_earned,
)?;
}
}
Ok(())
}
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliAuthorizedVoters {
authorized_voters: BTreeMap<Epoch, String>,
}
impl fmt::Display for CliAuthorizedVoters {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self.authorized_voters)
}
}
impl From<&AuthorizedVoters> for CliAuthorizedVoters {
fn from(authorized_voters: &AuthorizedVoters) -> Self {
let mut voter_map: BTreeMap<Epoch, String> = BTreeMap::new();
for (epoch, voter) in authorized_voters.iter() {
voter_map.insert(*epoch, voter.to_string());
}
Self {
authorized_voters: voter_map,
}
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliEpochVotingHistory {
pub epoch: Epoch,
pub slots_in_epoch: u64,
pub credits_earned: u64,
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliLockout {
pub slot: Slot,
pub confirmation_count: u32,
}
impl From<&Lockout> for CliLockout {
fn from(lockout: &Lockout) -> Self {
Self {
slot: lockout.slot,
confirmation_count: lockout.confirmation_count,
}
}
}

View File

@ -1,7 +1,8 @@
use crate::{
cli::{
build_balance_message, check_account_for_fee, CliCommand, CliCommandInfo, CliConfig,
CliError, ProcessResult,
cli::{check_account_for_fee, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult},
cli_output::{
CliBlockProduction, CliBlockProductionEntry, CliEpochInfo, CliKeyedStakeState,
CliSlotStatus, CliStakeVec, CliValidator, CliValidators,
},
display::println_name_value,
};
@ -9,11 +10,16 @@ use chrono::{DateTime, NaiveDateTime, SecondsFormat, Utc};
use clap::{value_t, value_t_or_exit, App, Arg, ArgMatches, SubCommand};
use console::{style, Emoji};
use indicatif::{ProgressBar, ProgressStyle};
use solana_clap_utils::{input_parsers::*, input_validators::*, keypair::signer_from_path};
use solana_clap_utils::{
commitment::{commitment_arg, COMMITMENT_ARG},
input_parsers::*,
input_validators::*,
keypair::signer_from_path,
};
use solana_client::{
pubsub_client::{PubsubClient, SlotInfoMessage},
rpc_client::RpcClient,
rpc_response::RpcVoteAccountInfo,
rpc_request::MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE,
};
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_sdk::{
@ -42,7 +48,6 @@ use std::{
static CHECK_MARK: Emoji = Emoji("", "");
static CROSS_MARK: Emoji = Emoji("", "");
static WARNING: Emoji = Emoji("⚠️", "!");
pub trait ClusterQuerySubCommands {
fn cluster_query_subcommands(self) -> Self;
@ -69,7 +74,14 @@ impl ClusterQuerySubCommands for App<'_, '_> {
.takes_value(true)
.validator(is_url)
.help("JSON RPC URL for validator, which is useful for validators with a private RPC service")
),
)
.arg(
Arg::with_name("follow")
.long("follow")
.takes_value(false)
.help("Continue reporting progress even after the validator has caught up"),
)
.arg(commitment_arg()),
)
.subcommand(
SubCommand::with_name("cluster-version")
@ -93,14 +105,7 @@ impl ClusterQuerySubCommands for App<'_, '_> {
SubCommand::with_name("epoch-info")
.about("Get information about the current epoch")
.alias("get-epoch-info")
.arg(
Arg::with_name("confirmed")
.long("confirmed")
.takes_value(false)
.help(
"Return information at maximum-lockout commitment level",
),
),
.arg(commitment_arg()),
)
.subcommand(
SubCommand::with_name("genesis-hash")
@ -110,37 +115,20 @@ impl ClusterQuerySubCommands for App<'_, '_> {
.subcommand(
SubCommand::with_name("slot").about("Get current slot")
.alias("get-slot")
.arg(
Arg::with_name("confirmed")
.long("confirmed")
.takes_value(false)
.help(
"Return slot at maximum-lockout commitment level",
),
),
.arg(commitment_arg()),
)
.subcommand(
SubCommand::with_name("epoch").about("Get current epoch")
.arg(commitment_arg()),
)
.subcommand(
SubCommand::with_name("total-supply").about("Get total number of SOL")
.arg(
Arg::with_name("confirmed")
.long("confirmed")
.takes_value(false)
.help(
"Return count at maximum-lockout commitment level",
),
),
.arg(commitment_arg()),
)
.subcommand(
SubCommand::with_name("transaction-count").about("Get current transaction count")
.alias("get-transaction-count")
.arg(
Arg::with_name("confirmed")
.long("confirmed")
.takes_value(false)
.help(
"Return count at maximum-lockout commitment level",
),
),
.arg(commitment_arg()),
)
.subcommand(
SubCommand::with_name("ping")
@ -181,12 +169,12 @@ impl ClusterQuerySubCommands for App<'_, '_> {
.help("Wait up to timeout seconds for transaction confirmation"),
)
.arg(
Arg::with_name("confirmed")
.long("confirmed")
.takes_value(false)
.help(
"Wait until the transaction is confirmed at maximum-lockout commitment level",
),
Arg::with_name(COMMITMENT_ARG.name)
.long(COMMITMENT_ARG.long)
.takes_value(true)
.possible_values(&["default", "max", "recent", "root"])
.value_name("COMMITMENT_LEVEL")
.help("Wait until the transaction is confirmed at selected commitment level"),
),
)
.subcommand(
@ -238,19 +226,46 @@ impl ClusterQuerySubCommands for App<'_, '_> {
SubCommand::with_name("validators")
.about("Show summary information about the current validators")
.alias("show-validators")
.arg(
Arg::with_name("confirmed")
.long("confirmed")
.takes_value(false)
.help(
"Return information at maximum-lockout commitment level",
),
)
.arg(
Arg::with_name("lamports")
.long("lamports")
.takes_value(false)
.help("Display balance in lamports instead of SOL"),
)
.arg(commitment_arg()),
)
.subcommand(
SubCommand::with_name("transaction-history")
.about("Show historical transactions affecting the given address, \
ordered based on the slot in which they were confirmed in \
from lowest to highest slot")
.arg(
Arg::with_name("address")
.index(1)
.value_name("ADDRESS")
.required(true)
.validator(is_valid_pubkey)
.help("Account address"),
)
.arg(
Arg::with_name("end_slot")
.takes_value(false)
.value_name("SLOT")
.index(2)
.validator(is_slot)
.help(
"Slot to start from [default: latest slot at maximum commitment]"
),
)
.arg(
Arg::with_name("limit")
.long("limit")
.takes_value(true)
.value_name("NUMBER OF SLOTS")
.validator(is_slot)
.help(
"Limit the search to this many slots"
),
),
)
}
@ -258,14 +273,19 @@ impl ClusterQuerySubCommands for App<'_, '_> {
pub fn parse_catchup(
matches: &ArgMatches<'_>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let node_pubkey = pubkey_of_signer(matches, "node_pubkey", wallet_manager)?.unwrap();
let node_json_rpc_url = value_t!(matches, "node_json_rpc_url", String).ok();
let commitment_config =
commitment_of(matches, COMMITMENT_ARG.long).unwrap_or_else(CommitmentConfig::recent);
let follow = matches.is_present("follow");
Ok(CliCommandInfo {
command: CliCommand::Catchup {
node_pubkey,
node_json_rpc_url,
commitment_config,
follow,
},
signers: vec![],
})
@ -274,7 +294,7 @@ pub fn parse_catchup(
pub fn parse_cluster_ping(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let lamports = value_t_or_exit!(matches, "lamports", u64);
let interval = Duration::from_secs(value_t_or_exit!(matches, "interval", u64));
@ -284,11 +304,8 @@ pub fn parse_cluster_ping(
None
};
let timeout = Duration::from_secs(value_t_or_exit!(matches, "timeout", u64));
let commitment_config = if matches.is_present("confirmed") {
CommitmentConfig::default()
} else {
CommitmentConfig::recent()
};
let commitment_config =
commitment_of(matches, COMMITMENT_ARG.long).unwrap_or_else(CommitmentConfig::recent);
Ok(CliCommandInfo {
command: CliCommand::Ping {
lamports,
@ -315,11 +332,8 @@ pub fn parse_get_block_time(matches: &ArgMatches<'_>) -> Result<CliCommandInfo,
}
pub fn parse_get_epoch_info(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let commitment_config = if matches.is_present("confirmed") {
CommitmentConfig::default()
} else {
CommitmentConfig::recent()
};
let commitment_config =
commitment_of(matches, COMMITMENT_ARG.long).unwrap_or_else(CommitmentConfig::recent);
Ok(CliCommandInfo {
command: CliCommand::GetEpochInfo { commitment_config },
signers: vec![],
@ -327,23 +341,26 @@ pub fn parse_get_epoch_info(matches: &ArgMatches<'_>) -> Result<CliCommandInfo,
}
pub fn parse_get_slot(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let commitment_config = if matches.is_present("confirmed") {
CommitmentConfig::default()
} else {
CommitmentConfig::recent()
};
let commitment_config =
commitment_of(matches, COMMITMENT_ARG.long).unwrap_or_else(CommitmentConfig::recent);
Ok(CliCommandInfo {
command: CliCommand::GetSlot { commitment_config },
signers: vec![],
})
}
pub fn parse_get_epoch(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let commitment_config =
commitment_of(matches, COMMITMENT_ARG.long).unwrap_or_else(CommitmentConfig::recent);
Ok(CliCommandInfo {
command: CliCommand::GetEpoch { commitment_config },
signers: vec![],
})
}
pub fn parse_total_supply(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let commitment_config = if matches.is_present("confirmed") {
CommitmentConfig::default()
} else {
CommitmentConfig::recent()
};
let commitment_config =
commitment_of(matches, COMMITMENT_ARG.long).unwrap_or_else(CommitmentConfig::recent);
Ok(CliCommandInfo {
command: CliCommand::TotalSupply { commitment_config },
signers: vec![],
@ -351,11 +368,8 @@ pub fn parse_total_supply(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Cl
}
pub fn parse_get_transaction_count(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let commitment_config = if matches.is_present("confirmed") {
CommitmentConfig::default()
} else {
CommitmentConfig::recent()
};
let commitment_config =
commitment_of(matches, COMMITMENT_ARG.long).unwrap_or_else(CommitmentConfig::recent);
Ok(CliCommandInfo {
command: CliCommand::GetTransactionCount { commitment_config },
signers: vec![],
@ -364,7 +378,7 @@ pub fn parse_get_transaction_count(matches: &ArgMatches<'_>) -> Result<CliComman
pub fn parse_show_stakes(
matches: &ArgMatches<'_>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let use_lamports_unit = matches.is_present("lamports");
let vote_account_pubkeys =
@ -381,11 +395,8 @@ pub fn parse_show_stakes(
pub fn parse_show_validators(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let use_lamports_unit = matches.is_present("lamports");
let commitment_config = if matches.is_present("confirmed") {
CommitmentConfig::default()
} else {
CommitmentConfig::recent()
};
let commitment_config =
commitment_of(matches, COMMITMENT_ARG.long).unwrap_or_else(CommitmentConfig::recent);
Ok(CliCommandInfo {
command: CliCommand::ShowValidators {
@ -396,6 +407,25 @@ pub fn parse_show_validators(matches: &ArgMatches<'_>) -> Result<CliCommandInfo,
})
}
pub fn parse_transaction_history(
matches: &ArgMatches<'_>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let address = pubkey_of_signer(matches, "address", wallet_manager)?.unwrap();
let end_slot = value_t!(matches, "end_slot", Slot).ok();
let slot_limit = value_t!(matches, "limit", u64)
.unwrap_or(MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE);
Ok(CliCommandInfo {
command: CliCommand::TransactionHistory {
address,
end_slot,
slot_limit,
},
signers: vec![],
})
}
/// Creates a new process bar for processing that will take an unknown amount of time
fn new_spinner_progress_bar() -> ProgressBar {
let progress_bar = ProgressBar::new(42);
@ -409,20 +439,37 @@ pub fn process_catchup(
rpc_client: &RpcClient,
node_pubkey: &Pubkey,
node_json_rpc_url: &Option<String>,
commitment_config: CommitmentConfig,
follow: bool,
) -> ProcessResult {
let cluster_nodes = rpc_client.get_cluster_nodes()?;
let sleep_interval = 5;
let progress_bar = new_spinner_progress_bar();
progress_bar.set_message("Connecting...");
let node_client = if let Some(node_json_rpc_url) = node_json_rpc_url {
RpcClient::new(node_json_rpc_url.to_string())
} else {
RpcClient::new_socket(
cluster_nodes
let rpc_addr = loop {
let cluster_nodes = rpc_client.get_cluster_nodes()?;
if let Some(contact_info) = cluster_nodes
.iter()
.find(|contact_info| contact_info.pubkey == node_pubkey.to_string())
.ok_or_else(|| format!("Contact information not found for {}", node_pubkey))?
.rpc
.ok_or_else(|| format!("RPC service not found for {}", node_pubkey))?,
)
{
if let Some(rpc_addr) = contact_info.rpc {
break rpc_addr;
}
progress_bar.set_message(&format!("RPC service not found for {}", node_pubkey));
} else {
progress_bar.set_message(&format!(
"Contact information not found for {}",
node_pubkey
));
}
sleep(Duration::from_secs(sleep_interval as u64));
};
RpcClient::new_socket(rpc_addr)
};
let reported_node_pubkey = node_client.get_identity()?;
@ -438,16 +485,12 @@ pub fn process_catchup(
return Err("Both RPC URLs reference the same node, unable to monitor for catchup. Try a different --url".into());
}
let progress_bar = new_spinner_progress_bar();
progress_bar.set_message("Connecting...");
let mut previous_rpc_slot = std::u64::MAX;
let mut previous_slot_distance = 0;
let sleep_interval = 5;
loop {
let rpc_slot = rpc_client.get_slot_with_commitment(CommitmentConfig::recent())?;
let node_slot = node_client.get_slot_with_commitment(CommitmentConfig::recent())?;
if node_slot > std::cmp::min(previous_rpc_slot, rpc_slot) {
let rpc_slot = rpc_client.get_slot_with_commitment(commitment_config)?;
let node_slot = node_client.get_slot_with_commitment(commitment_config)?;
if !follow && node_slot > std::cmp::min(previous_rpc_slot, rpc_slot) {
progress_bar.finish_and_clear();
return Ok(format!(
"{} has caught up (us:{} them:{})",
@ -456,25 +499,36 @@ pub fn process_catchup(
}
let slot_distance = rpc_slot as i64 - node_slot as i64;
let slots_per_second =
(previous_slot_distance - slot_distance) as f64 / f64::from(sleep_interval);
let time_remaining = if slots_per_second <= 0.0 {
"".to_string()
} else {
format!(
". Time remaining: {}",
humantime::format_duration(Duration::from_secs_f64(
(slot_distance as f64 / slots_per_second).round()
))
)
};
progress_bar.set_message(&format!(
"Validator is {} slots away (us:{} them:{}){}",
"{} slots behind (us:{} them:{}){}",
slot_distance,
node_slot,
rpc_slot,
if previous_rpc_slot == std::u64::MAX {
if slot_distance == 0 || previous_rpc_slot == std::u64::MAX {
"".to_string()
} else {
let slots_per_second =
(previous_slot_distance - slot_distance) as f64 / f64::from(sleep_interval);
format!(
" and {} at {:.1} slots/second",
", {} at {:.1} slots/second{}",
if slots_per_second < 0.0 {
"falling behind"
} else {
"gaining"
},
slots_per_second,
time_remaining
)
}
));
@ -545,51 +599,15 @@ pub fn process_get_block_time(rpc_client: &RpcClient, slot: Slot) -> ProcessResu
Ok(result)
}
fn slot_to_human_time(slot: Slot) -> String {
humantime::format_duration(Duration::from_secs(
slot * clock::DEFAULT_TICKS_PER_SLOT / clock::DEFAULT_TICKS_PER_SECOND,
))
.to_string()
}
pub fn process_get_epoch_info(
rpc_client: &RpcClient,
config: &CliConfig,
commitment_config: CommitmentConfig,
) -> ProcessResult {
let epoch_info = rpc_client.get_epoch_info_with_commitment(commitment_config.clone())?;
println!();
println_name_value("Slot:", &epoch_info.absolute_slot.to_string());
println_name_value("Epoch:", &epoch_info.epoch.to_string());
let start_slot = epoch_info.absolute_slot - epoch_info.slot_index;
let end_slot = start_slot + epoch_info.slots_in_epoch;
println_name_value(
"Epoch Slot Range:",
&format!("[{}..{})", start_slot, end_slot),
);
println_name_value(
"Epoch Completed Percent:",
&format!(
"{:>3.3}%",
epoch_info.slot_index as f64 / epoch_info.slots_in_epoch as f64 * 100_f64
),
);
let remaining_slots_in_epoch = epoch_info.slots_in_epoch - epoch_info.slot_index;
println_name_value(
"Epoch Completed Slots:",
&format!(
"{}/{} ({} remaining)",
epoch_info.slot_index, epoch_info.slots_in_epoch, remaining_slots_in_epoch
),
);
println_name_value(
"Epoch Completed Time:",
&format!(
"{}/{} ({} remaining)",
slot_to_human_time(epoch_info.slot_index),
slot_to_human_time(epoch_info.slots_in_epoch),
slot_to_human_time(remaining_slots_in_epoch)
),
);
let epoch_info: CliEpochInfo = rpc_client
.get_epoch_info_with_commitment(commitment_config.clone())?
.into();
config.output_format.formatted_print(&epoch_info);
Ok("".to_string())
}
@ -606,6 +624,14 @@ pub fn process_get_slot(
Ok(slot.to_string())
}
pub fn process_get_epoch(
rpc_client: &RpcClient,
commitment_config: CommitmentConfig,
) -> ProcessResult {
let epoch_info = rpc_client.get_epoch_info_with_commitment(commitment_config.clone())?;
Ok(epoch_info.epoch.to_string())
}
pub fn parse_show_block_production(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let epoch = value_t!(matches, "epoch", Epoch).ok();
let slot_limit = value_t!(matches, "slot_limit", u64).ok();
@ -674,9 +700,9 @@ pub fn process_show_block_production(
let start_slot_index = (start_slot - first_slot_in_epoch) as usize;
let end_slot_index = (end_slot - first_slot_in_epoch) as usize;
let total_slots = end_slot_index - start_slot_index + 1;
let total_blocks = confirmed_blocks.len();
assert!(total_blocks <= total_slots);
let total_slots_skipped = total_slots - total_blocks;
let total_blocks_produced = confirmed_blocks.len();
assert!(total_blocks_produced <= total_slots);
let total_slots_skipped = total_slots - total_blocks_produced;
let mut leader_slot_count = HashMap::new();
let mut leader_skipped_slots = HashMap::new();
@ -700,7 +726,7 @@ pub fn process_show_block_production(
progress_bar.set_message(&format!(
"Processing {} slots containing {} blocks and {} empty slots...",
total_slots, total_blocks, total_slots_skipped
total_slots, total_blocks_produced, total_slots_skipped
));
let mut confirmed_blocks_index = 0;
@ -719,71 +745,52 @@ pub fn process_show_block_production(
continue;
}
if slot_of_next_confirmed_block == slot {
individual_slot_status
.push(style(format!(" {:<15} {:<44}", slot, leader)).to_string());
individual_slot_status.push(CliSlotStatus {
slot,
leader: (*leader).to_string(),
skipped: false,
});
break;
}
}
*skipped_slots += 1;
individual_slot_status.push(
style(format!(" {:<15} {:<44} SKIPPED", slot, leader))
.red()
.to_string(),
);
individual_slot_status.push(CliSlotStatus {
slot,
leader: (*leader).to_string(),
skipped: true,
});
break;
}
}
progress_bar.finish_and_clear();
println!(
"\n{}",
style(format!(
" {:<44} {:>15} {:>15} {:>15} {:>23}",
"Identity Pubkey",
"Leader Slots",
"Blocks Produced",
"Skipped Slots",
"Skipped Slot Percentage",
))
.bold()
);
let mut table = vec![];
for (leader, leader_slots) in leader_slot_count.iter() {
let skipped_slots = leader_skipped_slots.get(leader).unwrap();
let blocks_produced = leader_slots - skipped_slots;
table.push(format!(
" {:<44} {:>15} {:>15} {:>15} {:>22.2}%",
leader,
leader_slots,
blocks_produced,
skipped_slots,
*skipped_slots as f64 / *leader_slots as f64 * 100.
));
}
table.sort();
println!(
"{}\n\n {:<44} {:>15} {:>15} {:>15} {:>22.2}%",
table.join("\n"),
format!("Epoch {} total:", epoch),
let mut leaders: Vec<CliBlockProductionEntry> = leader_slot_count
.iter()
.map(|(leader, leader_slots)| {
let skipped_slots = leader_skipped_slots.get(leader).unwrap();
let blocks_produced = leader_slots - skipped_slots;
CliBlockProductionEntry {
identity_pubkey: (**leader).to_string(),
leader_slots: *leader_slots,
blocks_produced,
skipped_slots: *skipped_slots,
}
})
.collect();
leaders.sort_by(|a, b| a.identity_pubkey.partial_cmp(&b.identity_pubkey).unwrap());
let block_production = CliBlockProduction {
epoch,
start_slot,
end_slot,
total_slots,
total_blocks,
total_blocks_produced,
total_slots_skipped,
total_slots_skipped as f64 / total_slots as f64 * 100.
);
println!(
" (using data from {} slots: {} to {})",
total_slots, start_slot, end_slot
);
if config.verbose {
println!(
"\n\n{}\n{}",
style(format!(" {:<15} {:<44}", "Slot", "Identity Pubkey")).bold(),
individual_slot_status.join("\n")
);
}
leaders,
individual_slot_status,
verbose: config.verbose,
};
config.output_format.formatted_print(&block_production);
Ok("".to_string())
}
@ -1065,10 +1072,11 @@ pub fn process_show_gossip(rpc_client: &RpcClient) -> ProcessResult {
pub fn process_show_stakes(
rpc_client: &RpcClient,
config: &CliConfig,
use_lamports_unit: bool,
vote_account_pubkeys: Option<&[Pubkey]>,
) -> ProcessResult {
use crate::stake::print_stake_state;
use crate::stake::build_stake_state;
use solana_stake_program::stake_state::StakeState;
let progress_bar = new_spinner_progress_bar();
@ -1076,13 +1084,20 @@ pub fn process_show_stakes(
let all_stake_accounts = rpc_client.get_program_accounts(&solana_stake_program::id())?;
progress_bar.finish_and_clear();
let mut stake_accounts: Vec<CliKeyedStakeState> = vec![];
for (stake_pubkey, stake_account) in all_stake_accounts {
if let Ok(stake_state) = stake_account.state() {
match stake_state {
StakeState::Initialized(_) => {
if vote_account_pubkeys.is_none() {
println!("\nstake pubkey: {}", stake_pubkey);
print_stake_state(stake_account.lamports, &stake_state, use_lamports_unit);
stake_accounts.push(CliKeyedStakeState {
stake_pubkey: stake_pubkey.to_string(),
stake_state: build_stake_state(
stake_account.lamports,
&stake_state,
use_lamports_unit,
),
});
}
}
StakeState::Stake(_, stake) => {
@ -1091,19 +1106,29 @@ pub fn process_show_stakes(
.unwrap()
.contains(&stake.delegation.voter_pubkey)
{
println!("\nstake pubkey: {}", stake_pubkey);
print_stake_state(stake_account.lamports, &stake_state, use_lamports_unit);
stake_accounts.push(CliKeyedStakeState {
stake_pubkey: stake_pubkey.to_string(),
stake_state: build_stake_state(
stake_account.lamports,
&stake_state,
use_lamports_unit,
),
});
}
}
_ => {}
}
}
}
config
.output_format
.formatted_print(&CliStakeVec::new(stake_accounts));
Ok("".to_string())
}
pub fn process_show_validators(
rpc_client: &RpcClient,
config: &CliConfig,
use_lamports_unit: bool,
commitment_config: CommitmentConfig,
) -> ProcessResult {
@ -1113,123 +1138,64 @@ pub fn process_show_validators(
.current
.iter()
.chain(vote_accounts.delinquent.iter())
.fold(0, |acc, vote_account| acc + vote_account.activated_stake)
as f64;
.fold(0, |acc, vote_account| acc + vote_account.activated_stake);
let total_deliquent_stake = vote_accounts
.delinquent
.iter()
.fold(0, |acc, vote_account| acc + vote_account.activated_stake)
as f64;
.fold(0, |acc, vote_account| acc + vote_account.activated_stake);
let total_current_stake = total_active_stake - total_deliquent_stake;
println_name_value(
"Active Stake:",
&build_balance_message(total_active_stake as u64, use_lamports_unit, true),
);
if total_deliquent_stake > 0. {
println_name_value(
"Current Stake:",
&format!(
"{} ({:0.2}%)",
&build_balance_message(total_current_stake as u64, use_lamports_unit, true),
100. * total_current_stake / total_active_stake
),
);
println_name_value(
"Delinquent Stake:",
&format!(
"{} ({:0.2}%)",
&build_balance_message(total_deliquent_stake as u64, use_lamports_unit, true),
100. * total_deliquent_stake / total_active_stake
),
);
}
println!();
let mut current = vote_accounts.current;
current.sort_by(|a, b| b.activated_stake.cmp(&a.activated_stake));
let current_validators: Vec<CliValidator> = current
.iter()
.map(|vote_account| CliValidator::new(vote_account, epoch_info.epoch))
.collect();
let mut delinquent = vote_accounts.delinquent;
delinquent.sort_by(|a, b| b.activated_stake.cmp(&a.activated_stake));
let delinquent_validators: Vec<CliValidator> = delinquent
.iter()
.map(|vote_account| CliValidator::new(vote_account, epoch_info.epoch))
.collect();
let cli_validators = CliValidators {
total_active_stake,
total_current_stake,
total_deliquent_stake,
current_validators,
delinquent_validators,
use_lamports_unit,
};
config.output_format.formatted_print(&cli_validators);
Ok("".to_string())
}
pub fn process_transaction_history(
rpc_client: &RpcClient,
address: &Pubkey,
end_slot: Option<Slot>, // None == use latest slot
slot_limit: u64,
) -> ProcessResult {
let end_slot = {
if let Some(end_slot) = end_slot {
end_slot
} else {
rpc_client.get_slot_with_commitment(CommitmentConfig::max())?
}
};
let start_slot = end_slot.saturating_sub(slot_limit);
println!(
"{}",
style(format!(
" {:<44} {:<44} {} {} {} {:>7} {}",
"Identity Pubkey",
"Vote Account Pubkey",
"Commission",
"Last Vote",
"Root Block",
"Credits",
"Active Stake",
))
.bold()
"Transactions affecting {} within slots [{},{}]",
address, start_slot, end_slot
);
fn print_vote_account(
vote_account: RpcVoteAccountInfo,
current_epoch: Epoch,
total_active_stake: f64,
use_lamports_unit: bool,
delinquent: bool,
) {
fn non_zero_or_dash(v: u64) -> String {
if v == 0 {
"-".into()
} else {
format!("{}", v)
}
}
println!(
"{} {:<44} {:<44} {:>9}% {:>8} {:>10} {:>7} {}",
if delinquent {
WARNING.to_string()
} else {
" ".to_string()
},
vote_account.node_pubkey,
vote_account.vote_pubkey,
vote_account.commission,
non_zero_or_dash(vote_account.last_vote),
non_zero_or_dash(vote_account.root_slot),
vote_account
.epoch_credits
.iter()
.find_map(|(epoch, credits, _)| if *epoch == current_epoch {
Some(*credits)
} else {
None
})
.unwrap_or(0),
if vote_account.activated_stake > 0 {
format!(
"{} ({:.2}%)",
build_balance_message(vote_account.activated_stake, use_lamports_unit, true),
100. * vote_account.activated_stake as f64 / total_active_stake
)
} else {
"-".into()
},
);
let signatures =
rpc_client.get_confirmed_signatures_for_address(address, start_slot, end_slot)?;
for signature in &signatures {
println!("{}", signature);
}
for vote_account in vote_accounts.current.into_iter() {
print_vote_account(
vote_account,
epoch_info.epoch,
total_active_stake,
use_lamports_unit,
false,
);
}
for vote_account in vote_accounts.delinquent.into_iter() {
print_vote_account(
vote_account,
epoch_info.epoch,
total_active_stake,
use_lamports_unit,
true,
);
}
Ok("".to_string())
Ok(format!("{} transactions found", signatures.len(),))
}
#[cfg(test)]
@ -1255,7 +1221,7 @@ mod tests {
.clone()
.get_matches_from(vec!["test", "cluster-version"]);
assert_eq!(
parse_command(&test_cluster_version, &default_keypair_file, None).unwrap(),
parse_command(&test_cluster_version, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::ClusterVersion,
signers: vec![],
@ -1264,7 +1230,7 @@ mod tests {
let test_fees = test_commands.clone().get_matches_from(vec!["test", "fees"]);
assert_eq!(
parse_command(&test_fees, &default_keypair_file, None).unwrap(),
parse_command(&test_fees, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Fees,
signers: vec![],
@ -1277,7 +1243,7 @@ mod tests {
.clone()
.get_matches_from(vec!["test", "block-time", &slot.to_string()]);
assert_eq!(
parse_command(&test_get_block_time, &default_keypair_file, None).unwrap(),
parse_command(&test_get_block_time, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::GetBlockTime { slot },
signers: vec![],
@ -1288,7 +1254,7 @@ mod tests {
.clone()
.get_matches_from(vec!["test", "epoch-info"]);
assert_eq!(
parse_command(&test_get_epoch_info, &default_keypair_file, None).unwrap(),
parse_command(&test_get_epoch_info, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::GetEpochInfo {
commitment_config: CommitmentConfig::recent(),
@ -1301,7 +1267,7 @@ mod tests {
.clone()
.get_matches_from(vec!["test", "genesis-hash"]);
assert_eq!(
parse_command(&test_get_genesis_hash, &default_keypair_file, None).unwrap(),
parse_command(&test_get_genesis_hash, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::GetGenesisHash,
signers: vec![],
@ -1310,7 +1276,7 @@ mod tests {
let test_get_slot = test_commands.clone().get_matches_from(vec!["test", "slot"]);
assert_eq!(
parse_command(&test_get_slot, &default_keypair_file, None).unwrap(),
parse_command(&test_get_slot, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::GetSlot {
commitment_config: CommitmentConfig::recent(),
@ -1319,11 +1285,24 @@ mod tests {
}
);
let test_get_epoch = test_commands
.clone()
.get_matches_from(vec!["test", "epoch"]);
assert_eq!(
parse_command(&test_get_epoch, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::GetEpoch {
commitment_config: CommitmentConfig::recent(),
},
signers: vec![],
}
);
let test_total_supply = test_commands
.clone()
.get_matches_from(vec!["test", "total-supply"]);
assert_eq!(
parse_command(&test_total_supply, &default_keypair_file, None).unwrap(),
parse_command(&test_total_supply, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::TotalSupply {
commitment_config: CommitmentConfig::recent(),
@ -1336,7 +1315,7 @@ mod tests {
.clone()
.get_matches_from(vec!["test", "transaction-count"]);
assert_eq!(
parse_command(&test_transaction_count, &default_keypair_file, None).unwrap(),
parse_command(&test_transaction_count, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::GetTransactionCount {
commitment_config: CommitmentConfig::recent(),
@ -1354,17 +1333,18 @@ mod tests {
"2",
"-t",
"3",
"--confirmed",
"--commitment",
"max",
]);
assert_eq!(
parse_command(&test_ping, &default_keypair_file, None).unwrap(),
parse_command(&test_ping, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::Ping {
lamports: 1,
interval: Duration::from_secs(1),
count: Some(2),
timeout: Duration::from_secs(3),
commitment_config: CommitmentConfig::default(),
commitment_config: CommitmentConfig::max(),
},
signers: vec![default_keypair.into()],
}

View File

@ -1,6 +1,11 @@
use crate::cli::SettingType;
use console::style;
use solana_sdk::hash::Hash;
use solana_sdk::{
hash::Hash, native_token::lamports_to_sol, program_utils::limited_deserialize,
transaction::Transaction,
};
use solana_transaction_status::RpcTransactionStatusMeta;
use std::{fmt, io};
// Pretty print a "name value"
pub fn println_name_value(name: &str, value: &str) {
@ -12,6 +17,15 @@ pub fn println_name_value(name: &str, value: &str) {
println!("{} {}", style(name).bold(), styled_value);
}
pub fn writeln_name_value(f: &mut fmt::Formatter, name: &str, value: &str) -> fmt::Result {
let styled_value = if value == "" {
style("(not set)").italic()
} else {
style(value)
};
writeln!(f, "{} {}", style(name).bold(), styled_value)
}
pub fn println_name_value_or(name: &str, value: &str, setting_type: SettingType) {
let description = match setting_type {
SettingType::Explicit => "",
@ -49,3 +63,135 @@ pub fn println_signers(
}
println!();
}
pub fn write_transaction<W: io::Write>(
w: &mut W,
transaction: &Transaction,
transaction_status: &Option<RpcTransactionStatusMeta>,
prefix: &str,
) -> io::Result<()> {
let message = &transaction.message;
writeln!(
w,
"{}Recent Blockhash: {:?}",
prefix, message.recent_blockhash
)?;
for (signature_index, signature) in transaction.signatures.iter().enumerate() {
writeln!(
w,
"{}Signature {}: {:?}",
prefix, signature_index, signature
)?;
}
writeln!(w, "{}{:?}", prefix, message.header)?;
for (account_index, account) in message.account_keys.iter().enumerate() {
writeln!(w, "{}Account {}: {:?}", prefix, account_index, account)?;
}
for (instruction_index, instruction) in message.instructions.iter().enumerate() {
let program_pubkey = message.account_keys[instruction.program_id_index as usize];
writeln!(w, "{}Instruction {}", prefix, instruction_index)?;
writeln!(
w,
"{} Program: {} ({})",
prefix, program_pubkey, instruction.program_id_index
)?;
for (account_index, account) in instruction.accounts.iter().enumerate() {
let account_pubkey = message.account_keys[*account as usize];
writeln!(
w,
"{} Account {}: {} ({})",
prefix, account_index, account_pubkey, account
)?;
}
let mut raw = true;
if program_pubkey == solana_vote_program::id() {
if let Ok(vote_instruction) = limited_deserialize::<
solana_vote_program::vote_instruction::VoteInstruction,
>(&instruction.data)
{
writeln!(w, "{} {:?}", prefix, vote_instruction)?;
raw = false;
}
} else if program_pubkey == solana_stake_program::id() {
if let Ok(stake_instruction) = limited_deserialize::<
solana_stake_program::stake_instruction::StakeInstruction,
>(&instruction.data)
{
writeln!(w, "{} {:?}", prefix, stake_instruction)?;
raw = false;
}
} else if program_pubkey == solana_sdk::system_program::id() {
if let Ok(system_instruction) = limited_deserialize::<
solana_sdk::system_instruction::SystemInstruction,
>(&instruction.data)
{
writeln!(w, "{} {:?}", prefix, system_instruction)?;
raw = false;
}
}
if raw {
writeln!(w, "{} Data: {:?}", prefix, instruction.data)?;
}
}
if let Some(transaction_status) = transaction_status {
writeln!(
w,
"{}Status: {}",
prefix,
match &transaction_status.status {
Ok(_) => "Ok".into(),
Err(err) => err.to_string(),
}
)?;
writeln!(w, "{} Fee: {}", prefix, transaction_status.fee)?;
assert_eq!(
transaction_status.pre_balances.len(),
transaction_status.post_balances.len()
);
for (i, (pre, post)) in transaction_status
.pre_balances
.iter()
.zip(transaction_status.post_balances.iter())
.enumerate()
{
if pre == post {
writeln!(
w,
"{} Account {} balance: {} SOL",
prefix,
i,
lamports_to_sol(*pre)
)?;
} else {
writeln!(
w,
"{} Account {} balance: {} SOL -> {} SOL",
prefix,
i,
lamports_to_sol(*pre),
lamports_to_sol(*post)
)?;
}
}
} else {
writeln!(w, "{}Status: Unavailable", prefix)?;
}
Ok(())
}
pub fn println_transaction(
transaction: &Transaction,
transaction_status: &Option<RpcTransactionStatusMeta>,
prefix: &str,
) {
let mut w = Vec::new();
if write_transaction(&mut w, transaction, transaction_status, prefix).is_ok() {
if let Ok(s) = String::from_utf8(w) {
print!("{}", s);
}
}
}

View File

@ -1,4 +1,8 @@
#[macro_use]
extern crate serde_derive;
pub mod cli;
pub mod cli_output;
pub mod cluster_query;
pub mod display;
pub mod nonce;

View File

@ -7,10 +7,11 @@ use solana_clap_utils::{
};
use solana_cli::{
cli::{app, parse_command, process_command, CliCommandInfo, CliConfig, CliSigners},
cli_output::OutputFormat,
display::{println_name_value, println_name_value_or},
};
use solana_cli_config::{Config, CONFIG_FILE};
use solana_remote_wallet::remote_wallet::{maybe_wallet_manager, RemoteWalletManager};
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use std::{error, sync::Arc};
fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error>> {
@ -102,7 +103,7 @@ fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error
pub fn parse_args<'a>(
matches: &ArgMatches<'_>,
wallet_manager: Option<Arc<RemoteWalletManager>>,
mut wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<(CliConfig<'a>, CliSigners), Box<dyn error::Error>> {
let config = if let Some(config_file) = matches.value_of("config_file") {
Config::load(config_file).unwrap_or_default()
@ -125,7 +126,16 @@ pub fn parse_args<'a>(
);
let CliCommandInfo { command, signers } =
parse_command(&matches, &default_signer_path, wallet_manager.as_ref())?;
parse_command(&matches, &default_signer_path, &mut wallet_manager)?;
let output_format = matches
.value_of("output_format")
.map(|value| match value {
"json" => OutputFormat::Json,
"json-compact" => OutputFormat::JsonCompact,
_ => unreachable!(),
})
.unwrap_or(OutputFormat::Display);
Ok((
CliConfig {
@ -136,6 +146,7 @@ pub fn parse_args<'a>(
keypair_path: default_signer_path,
rpc_client: None,
verbose: matches.is_present("verbose"),
output_format,
},
signers,
))
@ -197,6 +208,14 @@ fn main() -> Result<(), Box<dyn error::Error>> {
.global(true)
.help("Show additional information"),
)
.arg(
Arg::with_name("output_format")
.long("output")
.global(true)
.takes_value(true)
.possible_values(&["json", "json-compact"])
.help("Return information in specified output format. Supports: json, json-compact"),
)
.arg(
Arg::with_name(SKIP_SEED_PHRASE_VALIDATION_ARG.name)
.long(SKIP_SEED_PHRASE_VALIDATION_ARG.long)
@ -238,9 +257,9 @@ fn main() -> Result<(), Box<dyn error::Error>> {
fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box<dyn error::Error>> {
if parse_settings(&matches)? {
let wallet_manager = maybe_wallet_manager()?;
let mut wallet_manager = None;
let (mut config, signers) = parse_args(&matches, wallet_manager)?;
let (mut config, signers) = parse_args(&matches, &mut wallet_manager)?;
config.signers = signers.iter().map(|s| s.as_ref()).collect();
let result = process_command(&config)?;
let (_, submatches) = matches.subcommand();

View File

@ -1,7 +1,10 @@
use crate::cli::{
build_balance_message, check_account_for_fee, check_unique_pubkeys, generate_unique_signers,
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult,
SignerIndex,
use crate::{
cli::{
check_account_for_fee, check_unique_pubkeys, generate_unique_signers,
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError,
ProcessResult, SignerIndex,
},
cli_output::CliNonceAccount,
};
use clap::{App, Arg, ArgMatches, SubCommand};
use solana_clap_utils::{
@ -276,7 +279,7 @@ pub fn data_from_state(state: &State) -> Result<&Data, CliNonceError> {
pub fn parse_authorize_nonce_account(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let nonce_account = pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
let new_authority = pubkey_of_signer(matches, "new_authority", wallet_manager)?.unwrap();
@ -304,7 +307,7 @@ pub fn parse_authorize_nonce_account(
pub fn parse_nonce_create_account(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let (nonce_account, nonce_account_pubkey) =
signer_of(matches, "nonce_account_keypair", wallet_manager)?;
@ -333,7 +336,7 @@ pub fn parse_nonce_create_account(
pub fn parse_get_nonce(
matches: &ArgMatches<'_>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let nonce_account_pubkey =
pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
@ -347,7 +350,7 @@ pub fn parse_get_nonce(
pub fn parse_new_nonce(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let nonce_account = pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
let (nonce_authority, nonce_authority_pubkey) =
@ -372,7 +375,7 @@ pub fn parse_new_nonce(
pub fn parse_show_nonce_account(
matches: &ArgMatches<'_>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let nonce_account_pubkey =
pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
@ -390,7 +393,7 @@ pub fn parse_show_nonce_account(
pub fn parse_withdraw_from_nonce_account(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let nonce_account = pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
let destination_account_pubkey =
@ -584,38 +587,26 @@ pub fn process_new_nonce(
pub fn process_show_nonce_account(
rpc_client: &RpcClient,
config: &CliConfig,
nonce_account_pubkey: &Pubkey,
use_lamports_unit: bool,
) -> ProcessResult {
let nonce_account = get_account(rpc_client, nonce_account_pubkey)?;
let print_account = |data: Option<&nonce::state::Data>| {
println!(
"Balance: {}",
build_balance_message(nonce_account.lamports, use_lamports_unit, true)
);
println!(
"Minimum Balance Required: {}",
build_balance_message(
rpc_client.get_minimum_balance_for_rent_exemption(State::size())?,
use_lamports_unit,
true
)
);
match data {
Some(ref data) => {
println!("Nonce: {}", data.blockhash);
println!(
"Fee: {} lamports per signature",
data.fee_calculator.lamports_per_signature
);
println!("Authority: {}", data.authority);
}
None => {
println!("Nonce: uninitialized");
println!("Fees: uninitialized");
println!("Authority: uninitialized");
}
let mut nonce_account = CliNonceAccount {
balance: nonce_account.lamports,
minimum_balance_for_rent_exemption: rpc_client
.get_minimum_balance_for_rent_exemption(State::size())?,
use_lamports_unit,
..CliNonceAccount::default()
};
if let Some(ref data) = data {
nonce_account.nonce = Some(data.blockhash.to_string());
nonce_account.lamports_per_signature = Some(data.fee_calculator.lamports_per_signature);
nonce_account.authority = Some(data.authority.to_string());
}
config.output_format.formatted_print(&nonce_account);
Ok("".to_string())
};
match state_from_account(&nonce_account)? {
@ -697,7 +688,12 @@ mod tests {
&Pubkey::default().to_string(),
]);
assert_eq!(
parse_command(&test_authorize_nonce_account, &default_keypair_file, None).unwrap(),
parse_command(
&test_authorize_nonce_account,
&default_keypair_file,
&mut None
)
.unwrap(),
CliCommandInfo {
command: CliCommand::AuthorizeNonceAccount {
nonce_account: nonce_account_pubkey,
@ -718,7 +714,12 @@ mod tests {
&authority_keypair_file,
]);
assert_eq!(
parse_command(&test_authorize_nonce_account, &default_keypair_file, None).unwrap(),
parse_command(
&test_authorize_nonce_account,
&default_keypair_file,
&mut None
)
.unwrap(),
CliCommandInfo {
command: CliCommand::AuthorizeNonceAccount {
nonce_account: read_keypair_file(&keypair_file).unwrap().pubkey(),
@ -740,7 +741,7 @@ mod tests {
"50",
]);
assert_eq!(
parse_command(&test_create_nonce_account, &default_keypair_file, None).unwrap(),
parse_command(&test_create_nonce_account, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::CreateNonceAccount {
nonce_account: 1,
@ -765,7 +766,7 @@ mod tests {
&authority_keypair_file,
]);
assert_eq!(
parse_command(&test_create_nonce_account, &default_keypair_file, None).unwrap(),
parse_command(&test_create_nonce_account, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::CreateNonceAccount {
nonce_account: 1,
@ -787,7 +788,7 @@ mod tests {
&nonce_account_string,
]);
assert_eq!(
parse_command(&test_get_nonce, &default_keypair_file, None).unwrap(),
parse_command(&test_get_nonce, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::GetNonce(nonce_account_keypair.pubkey()),
signers: vec![],
@ -801,7 +802,7 @@ mod tests {
.get_matches_from(vec!["test", "new-nonce", &keypair_file]);
let nonce_account = read_keypair_file(&keypair_file).unwrap();
assert_eq!(
parse_command(&test_new_nonce, &default_keypair_file, None).unwrap(),
parse_command(&test_new_nonce, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::NewNonce {
nonce_account: nonce_account.pubkey(),
@ -821,7 +822,7 @@ mod tests {
]);
let nonce_account = read_keypair_file(&keypair_file).unwrap();
assert_eq!(
parse_command(&test_new_nonce, &default_keypair_file, None).unwrap(),
parse_command(&test_new_nonce, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::NewNonce {
nonce_account: nonce_account.pubkey(),
@ -841,7 +842,7 @@ mod tests {
&nonce_account_string,
]);
assert_eq!(
parse_command(&test_show_nonce_account, &default_keypair_file, None).unwrap(),
parse_command(&test_show_nonce_account, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::ShowNonceAccount {
nonce_account_pubkey: nonce_account_keypair.pubkey(),
@ -863,7 +864,7 @@ mod tests {
parse_command(
&test_withdraw_from_nonce_account,
&default_keypair_file,
None
&mut None
)
.unwrap(),
CliCommandInfo {
@ -891,7 +892,7 @@ mod tests {
parse_command(
&test_withdraw_from_nonce_account,
&default_keypair_file,
None
&mut None
)
.unwrap(),
CliCommandInfo {

View File

@ -1,15 +1,14 @@
use crate::{
cli::{
build_balance_message, check_account_for_fee, check_unique_pubkeys, fee_payer_arg,
generate_unique_signers, log_instruction_custom_error, nonce_authority_arg, return_signers,
CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult, SignerIndex, FEE_PAYER_ARG,
check_account_for_fee, check_unique_pubkeys, fee_payer_arg, generate_unique_signers,
log_instruction_custom_error, nonce_authority_arg, return_signers, CliCommand,
CliCommandInfo, CliConfig, CliError, ProcessResult, SignerIndex, FEE_PAYER_ARG,
},
cli_output::{CliStakeHistory, CliStakeHistoryEntry, CliStakeState, CliStakeType},
nonce::{check_nonce_account, nonce_arg, NONCE_ARG, NONCE_AUTHORITY_ARG},
offline::{blockhash_query::BlockhashQuery, *},
};
use chrono::{DateTime, NaiveDateTime, SecondsFormat, Utc};
use clap::{App, Arg, ArgGroup, ArgMatches, SubCommand};
use console::style;
use solana_clap_utils::{input_parsers::*, input_validators::*, offline::*, ArgConstant};
use solana_client::rpc_client::RpcClient;
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
@ -314,6 +313,14 @@ impl StakeSubCommands for App<'_, '_> {
.arg(nonce_arg())
.arg(nonce_authority_arg())
.arg(fee_payer_arg())
.arg(
Arg::with_name("custodian")
.long("custodian")
.takes_value(true)
.value_name("KEYPAIR")
.validator(is_valid_signer)
.help("Authority to override account lockup")
)
)
.subcommand(
SubCommand::with_name("stake-set-lockup")
@ -403,7 +410,7 @@ impl StakeSubCommands for App<'_, '_> {
pub fn parse_stake_create_account(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let seed = matches.value_of("seed").map(|s| s.to_string());
let epoch = value_of(matches, "lockup_epoch").unwrap_or(0);
@ -455,7 +462,7 @@ pub fn parse_stake_create_account(
pub fn parse_stake_delegate_stake(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let stake_account_pubkey =
pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap();
@ -497,7 +504,7 @@ pub fn parse_stake_delegate_stake(
pub fn parse_stake_authorize(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let stake_account_pubkey =
pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap();
@ -580,7 +587,7 @@ pub fn parse_stake_authorize(
pub fn parse_split_stake(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let stake_account_pubkey =
pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap();
@ -625,7 +632,7 @@ pub fn parse_split_stake(
pub fn parse_stake_deactivate_stake(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let stake_account_pubkey =
pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap();
@ -662,7 +669,7 @@ pub fn parse_stake_deactivate_stake(
pub fn parse_stake_withdraw_stake(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let stake_account_pubkey =
pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap();
@ -677,11 +684,15 @@ pub fn parse_stake_withdraw_stake(
let (nonce_authority, nonce_authority_pubkey) =
signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?;
let (fee_payer, fee_payer_pubkey) = signer_of(matches, FEE_PAYER_ARG.name, wallet_manager)?;
let (custodian, custodian_pubkey) = signer_of(matches, "custodian", wallet_manager)?;
let mut bulk_signers = vec![withdraw_authority, fee_payer];
if nonce_account.is_some() {
bulk_signers.push(nonce_authority);
}
if custodian.is_some() {
bulk_signers.push(custodian);
}
let signer_info =
generate_unique_signers(bulk_signers, matches, default_signer_path, wallet_manager)?;
@ -696,6 +707,7 @@ pub fn parse_stake_withdraw_stake(
nonce_account,
nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(),
fee_payer: signer_info.index_of(fee_payer_pubkey).unwrap(),
custodian: custodian_pubkey.and_then(|_| signer_info.index_of(custodian_pubkey)),
},
signers: signer_info.signers,
})
@ -704,7 +716,7 @@ pub fn parse_stake_withdraw_stake(
pub fn parse_stake_set_lockup(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let stake_account_pubkey =
pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap();
@ -749,7 +761,7 @@ pub fn parse_stake_set_lockup(
pub fn parse_show_stake_account(
matches: &ArgMatches<'_>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let stake_account_pubkey =
pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap();
@ -1014,6 +1026,7 @@ pub fn process_withdraw_stake(
destination_account_pubkey: &Pubkey,
lamports: u64,
withdraw_authority: SignerIndex,
custodian: Option<SignerIndex>,
sign_only: bool,
blockhash_query: &BlockhashQuery,
nonce_account: Option<&Pubkey>,
@ -1023,12 +1036,14 @@ pub fn process_withdraw_stake(
let (recent_blockhash, fee_calculator) =
blockhash_query.get_blockhash_and_fee_calculator(rpc_client)?;
let withdraw_authority = config.signers[withdraw_authority];
let custodian = custodian.map(|index| config.signers[index]);
let ixs = vec![stake_instruction::withdraw(
stake_account_pubkey,
&withdraw_authority.pubkey(),
destination_account_pubkey,
lamports,
custodian.map(|signer| signer.pubkey()).as_ref(),
)];
let fee_payer = config.signers[fee_payer];
@ -1257,79 +1272,61 @@ pub fn process_stake_set_lockup(
}
}
pub fn print_stake_state(stake_lamports: u64, stake_state: &StakeState, use_lamports_unit: bool) {
fn show_authorized(authorized: &Authorized) {
println!("Stake Authority: {}", authorized.staker);
println!("Withdraw Authority: {}", authorized.withdrawer);
}
fn show_lockup(lockup: &Lockup) {
println!(
"Lockup Timestamp: {} (UnixTimestamp: {})",
DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(lockup.unix_timestamp, 0), Utc)
.to_rfc3339_opts(SecondsFormat::Secs, true),
lockup.unix_timestamp
);
println!("Lockup Epoch: {}", lockup.epoch);
println!("Lockup Custodian: {}", lockup.custodian);
}
pub fn build_stake_state(
stake_lamports: u64,
stake_state: &StakeState,
use_lamports_unit: bool,
) -> CliStakeState {
match stake_state {
StakeState::Stake(
Meta {
authorized, lockup, ..
},
stake,
) => {
println!(
"Total Stake: {}",
build_balance_message(stake_lamports, use_lamports_unit, true)
);
println!("Credits Observed: {}", stake.credits_observed);
println!(
"Delegated Stake: {}",
build_balance_message(stake.delegation.stake, use_lamports_unit, true)
);
if stake.delegation.voter_pubkey != Pubkey::default() {
println!(
"Delegated Vote Account Address: {}",
stake.delegation.voter_pubkey
);
}
println!(
"Stake activates starting from epoch: {}",
if stake.delegation.activation_epoch < std::u64::MAX {
stake.delegation.activation_epoch
} else {
0
}
);
if stake.delegation.deactivation_epoch < std::u64::MAX {
println!(
"Stake deactivates starting from epoch: {}",
stake.delegation.deactivation_epoch
);
}
show_authorized(&authorized);
show_lockup(&lockup);
}
StakeState::RewardsPool => println!("Stake account is a rewards pool"),
StakeState::Uninitialized => println!("Stake account is uninitialized"),
) => CliStakeState {
stake_type: CliStakeType::Stake,
total_stake: stake_lamports,
delegated_stake: Some(stake.delegation.stake),
delegated_vote_account_address: if stake.delegation.voter_pubkey != Pubkey::default() {
Some(stake.delegation.voter_pubkey.to_string())
} else {
None
},
activation_epoch: Some(if stake.delegation.activation_epoch < std::u64::MAX {
stake.delegation.activation_epoch
} else {
0
}),
deactivation_epoch: if stake.delegation.deactivation_epoch < std::u64::MAX {
Some(stake.delegation.deactivation_epoch)
} else {
None
},
authorized: Some(authorized.into()),
lockup: Some(lockup.into()),
use_lamports_unit,
},
StakeState::RewardsPool => CliStakeState {
stake_type: CliStakeType::RewardsPool,
..CliStakeState::default()
},
StakeState::Uninitialized => CliStakeState::default(),
StakeState::Initialized(Meta {
authorized, lockup, ..
}) => {
println!(
"Total Stake: {}",
build_balance_message(stake_lamports, use_lamports_unit, true)
);
println!("Stake account is undelegated");
show_authorized(&authorized);
show_lockup(&lockup);
}
}) => CliStakeState {
stake_type: CliStakeType::Initialized,
total_stake: stake_lamports,
authorized: Some(authorized.into()),
lockup: Some(lockup.into()),
use_lamports_unit,
..CliStakeState::default()
},
}
}
pub fn process_show_stake_account(
rpc_client: &RpcClient,
_config: &CliConfig,
config: &CliConfig,
stake_account_pubkey: &Pubkey,
use_lamports_unit: bool,
) -> ProcessResult {
@ -1343,7 +1340,8 @@ pub fn process_show_stake_account(
}
match stake_account.state() {
Ok(stake_state) => {
print_stake_state(stake_account.lamports, &stake_state, use_lamports_unit);
let state = build_stake_state(stake_account.lamports, &stake_state, use_lamports_unit);
config.output_format.formatted_print(&state);
Ok("".to_string())
}
Err(err) => Err(CliError::RpcRequestError(format!(
@ -1356,7 +1354,7 @@ pub fn process_show_stake_account(
pub fn process_show_stake_history(
rpc_client: &RpcClient,
_config: &CliConfig,
config: &CliConfig,
use_lamports_unit: bool,
) -> ProcessResult {
let stake_history_account = rpc_client.get_account(&stake_history::id())?;
@ -1364,26 +1362,15 @@ pub fn process_show_stake_history(
CliError::RpcRequestError("Failed to deserialize stake history".to_string())
})?;
println!();
println!(
"{}",
style(format!(
" {:<5} {:>20} {:>20} {:>20}",
"Epoch", "Effective Stake", "Activating Stake", "Deactivating Stake",
))
.bold()
);
for (epoch, entry) in stake_history.deref() {
println!(
" {:>5} {:>20} {:>20} {:>20} {}",
epoch,
build_balance_message(entry.effective, use_lamports_unit, false),
build_balance_message(entry.activating, use_lamports_unit, false),
build_balance_message(entry.deactivating, use_lamports_unit, false),
if use_lamports_unit { "lamports" } else { "SOL" }
);
let mut entries: Vec<CliStakeHistoryEntry> = vec![];
for entry in stake_history.deref() {
entries.push(entry.into());
}
let stake_history_output = CliStakeHistory {
entries,
use_lamports_unit,
};
config.output_format.formatted_print(&stake_history_output);
Ok("".to_string())
}
@ -1525,6 +1512,9 @@ mod tests {
let (stake_authority_keypair_file, mut tmp_file) = make_tmp_file();
let stake_authority_keypair = Keypair::new();
write_keypair(&stake_authority_keypair, tmp_file.as_file_mut()).unwrap();
let (custodian_keypair_file, mut tmp_file) = make_tmp_file();
let custodian_keypair = Keypair::new();
write_keypair(&custodian_keypair, tmp_file.as_file_mut()).unwrap();
// stake-authorize subcommand
let stake_account_string = stake_account_pubkey.to_string();
@ -1542,7 +1532,7 @@ mod tests {
&new_withdraw_string,
]);
assert_eq!(
parse_command(&test_stake_authorize, &default_keypair_file, None).unwrap(),
parse_command(&test_stake_authorize, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@ -1576,7 +1566,7 @@ mod tests {
&withdraw_authority_keypair_file,
]);
assert_eq!(
parse_command(&test_stake_authorize, &default_keypair_file, None).unwrap(),
parse_command(&test_stake_authorize, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@ -1614,7 +1604,7 @@ mod tests {
&withdraw_authority_keypair_file,
]);
assert_eq!(
parse_command(&test_stake_authorize, &default_keypair_file, None).unwrap(),
parse_command(&test_stake_authorize, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@ -1644,7 +1634,7 @@ mod tests {
&new_stake_string,
]);
assert_eq!(
parse_command(&test_stake_authorize, &default_keypair_file, None).unwrap(),
parse_command(&test_stake_authorize, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@ -1668,7 +1658,7 @@ mod tests {
&stake_authority_keypair_file,
]);
assert_eq!(
parse_command(&test_stake_authorize, &default_keypair_file, None).unwrap(),
parse_command(&test_stake_authorize, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@ -1698,7 +1688,7 @@ mod tests {
&withdraw_authority_keypair_file,
]);
assert_eq!(
parse_command(&test_stake_authorize, &default_keypair_file, None).unwrap(),
parse_command(&test_stake_authorize, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@ -1725,7 +1715,7 @@ mod tests {
&new_withdraw_string,
]);
assert_eq!(
parse_command(&test_stake_authorize, &default_keypair_file, None).unwrap(),
parse_command(&test_stake_authorize, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@ -1753,7 +1743,7 @@ mod tests {
&withdraw_authority_keypair_file,
]);
assert_eq!(
parse_command(&test_stake_authorize, &default_keypair_file, None).unwrap(),
parse_command(&test_stake_authorize, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@ -1791,7 +1781,7 @@ mod tests {
"--sign-only",
]);
assert_eq!(
parse_command(&test_authorize, &default_keypair_file, None).unwrap(),
parse_command(&test_authorize, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@ -1824,7 +1814,7 @@ mod tests {
&pubkey.to_string(),
]);
assert_eq!(
parse_command(&test_authorize, &default_keypair_file, None).unwrap(),
parse_command(&test_authorize, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@ -1870,7 +1860,7 @@ mod tests {
&pubkey2.to_string(),
]);
assert_eq!(
parse_command(&test_authorize, &default_keypair_file, None).unwrap(),
parse_command(&test_authorize, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@ -1902,7 +1892,7 @@ mod tests {
&blockhash_string,
]);
assert_eq!(
parse_command(&test_authorize, &default_keypair_file, None).unwrap(),
parse_command(&test_authorize, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@ -1939,7 +1929,7 @@ mod tests {
&nonce_keypair_file,
]);
assert_eq!(
parse_command(&test_authorize, &default_keypair_file, None).unwrap(),
parse_command(&test_authorize, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@ -1975,7 +1965,7 @@ mod tests {
&fee_payer_keypair_file,
]);
assert_eq!(
parse_command(&test_authorize, &default_keypair_file, None).unwrap(),
parse_command(&test_authorize, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@ -2009,7 +1999,7 @@ mod tests {
&signer,
]);
assert_eq!(
parse_command(&test_authorize, &default_keypair_file, None).unwrap(),
parse_command(&test_authorize, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@ -2050,7 +2040,7 @@ mod tests {
"43",
]);
assert_eq!(
parse_command(&test_create_stake_account, &default_keypair_file, None).unwrap(),
parse_command(&test_create_stake_account, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::CreateStakeAccount {
stake_account: 1,
@ -2091,7 +2081,12 @@ mod tests {
]);
assert_eq!(
parse_command(&test_create_stake_account2, &default_keypair_file, None).unwrap(),
parse_command(
&test_create_stake_account2,
&default_keypair_file,
&mut None
)
.unwrap(),
CliCommandInfo {
command: CliCommand::CreateStakeAccount {
stake_account: 1,
@ -2144,7 +2139,12 @@ mod tests {
]);
assert_eq!(
parse_command(&test_create_stake_account2, &default_keypair_file, None).unwrap(),
parse_command(
&test_create_stake_account2,
&default_keypair_file,
&mut None
)
.unwrap(),
CliCommandInfo {
command: CliCommand::CreateStakeAccount {
stake_account: 1,
@ -2180,7 +2180,7 @@ mod tests {
&vote_account_string,
]);
assert_eq!(
parse_command(&test_delegate_stake, &default_keypair_file, None).unwrap(),
parse_command(&test_delegate_stake, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::DelegateStake {
stake_account_pubkey,
@ -2209,7 +2209,7 @@ mod tests {
&stake_authority_keypair_file,
]);
assert_eq!(
parse_command(&test_delegate_stake, &default_keypair_file, None).unwrap(),
parse_command(&test_delegate_stake, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::DelegateStake {
stake_account_pubkey,
@ -2240,7 +2240,7 @@ mod tests {
&vote_account_string,
]);
assert_eq!(
parse_command(&test_delegate_stake, &default_keypair_file, None).unwrap(),
parse_command(&test_delegate_stake, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::DelegateStake {
stake_account_pubkey,
@ -2269,7 +2269,7 @@ mod tests {
&blockhash_string,
]);
assert_eq!(
parse_command(&test_delegate_stake, &default_keypair_file, None).unwrap(),
parse_command(&test_delegate_stake, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::DelegateStake {
stake_account_pubkey,
@ -2299,7 +2299,7 @@ mod tests {
"--sign-only",
]);
assert_eq!(
parse_command(&test_delegate_stake, &default_keypair_file, None).unwrap(),
parse_command(&test_delegate_stake, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::DelegateStake {
stake_account_pubkey,
@ -2333,7 +2333,7 @@ mod tests {
&key1.to_string(),
]);
assert_eq!(
parse_command(&test_delegate_stake, &default_keypair_file, None).unwrap(),
parse_command(&test_delegate_stake, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::DelegateStake {
stake_account_pubkey,
@ -2379,7 +2379,7 @@ mod tests {
&key2.to_string(),
]);
assert_eq!(
parse_command(&test_delegate_stake, &default_keypair_file, None).unwrap(),
parse_command(&test_delegate_stake, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::DelegateStake {
stake_account_pubkey,
@ -2416,7 +2416,7 @@ mod tests {
&fee_payer_keypair_file,
]);
assert_eq!(
parse_command(&test_delegate_stake, &default_keypair_file, None).unwrap(),
parse_command(&test_delegate_stake, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::DelegateStake {
stake_account_pubkey,
@ -2446,13 +2446,14 @@ mod tests {
]);
assert_eq!(
parse_command(&test_withdraw_stake, &default_keypair_file, None).unwrap(),
parse_command(&test_withdraw_stake, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::WithdrawStake {
stake_account_pubkey,
destination_account_pubkey: stake_account_pubkey,
lamports: 42_000_000_000,
withdraw_authority: 0,
custodian: None,
sign_only: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
@ -2475,13 +2476,14 @@ mod tests {
]);
assert_eq!(
parse_command(&test_withdraw_stake, &default_keypair_file, None).unwrap(),
parse_command(&test_withdraw_stake, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::WithdrawStake {
stake_account_pubkey,
destination_account_pubkey: stake_account_pubkey,
lamports: 42_000_000_000,
withdraw_authority: 1,
custodian: None,
sign_only: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
@ -2497,6 +2499,39 @@ mod tests {
}
);
// Test WithdrawStake Subcommand w/ custodian
let test_withdraw_stake = test_commands.clone().get_matches_from(vec![
"test",
"withdraw-stake",
&stake_account_string,
&stake_account_string,
"42",
"--custodian",
&custodian_keypair_file,
]);
assert_eq!(
parse_command(&test_withdraw_stake, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::WithdrawStake {
stake_account_pubkey,
destination_account_pubkey: stake_account_pubkey,
lamports: 42_000_000_000,
withdraw_authority: 0,
custodian: Some(1),
sign_only: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
fee_payer: 0,
},
signers: vec![
read_keypair_file(&default_keypair_file).unwrap().into(),
read_keypair_file(&custodian_keypair_file).unwrap().into()
],
}
);
// Test WithdrawStake Subcommand w/ authority and offline nonce
let test_withdraw_stake = test_commands.clone().get_matches_from(vec![
"test",
@ -2519,13 +2554,14 @@ mod tests {
]);
assert_eq!(
parse_command(&test_withdraw_stake, &default_keypair_file, None).unwrap(),
parse_command(&test_withdraw_stake, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::WithdrawStake {
stake_account_pubkey,
destination_account_pubkey: stake_account_pubkey,
lamports: 42_000_000_000,
withdraw_authority: 0,
custodian: None,
sign_only: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_account),
@ -2551,7 +2587,7 @@ mod tests {
&stake_account_string,
]);
assert_eq!(
parse_command(&test_deactivate_stake, &default_keypair_file, None).unwrap(),
parse_command(&test_deactivate_stake, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::DeactivateStake {
stake_account_pubkey,
@ -2575,7 +2611,7 @@ mod tests {
&stake_authority_keypair_file,
]);
assert_eq!(
parse_command(&test_deactivate_stake, &default_keypair_file, None).unwrap(),
parse_command(&test_deactivate_stake, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::DeactivateStake {
stake_account_pubkey,
@ -2606,7 +2642,7 @@ mod tests {
&blockhash_string,
]);
assert_eq!(
parse_command(&test_deactivate_stake, &default_keypair_file, None).unwrap(),
parse_command(&test_deactivate_stake, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::DeactivateStake {
stake_account_pubkey,
@ -2633,7 +2669,7 @@ mod tests {
"--sign-only",
]);
assert_eq!(
parse_command(&test_deactivate_stake, &default_keypair_file, None).unwrap(),
parse_command(&test_deactivate_stake, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::DeactivateStake {
stake_account_pubkey,
@ -2664,7 +2700,7 @@ mod tests {
&key1.to_string(),
]);
assert_eq!(
parse_command(&test_deactivate_stake, &default_keypair_file, None).unwrap(),
parse_command(&test_deactivate_stake, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::DeactivateStake {
stake_account_pubkey,
@ -2707,7 +2743,7 @@ mod tests {
&key2.to_string(),
]);
assert_eq!(
parse_command(&test_deactivate_stake, &default_keypair_file, None).unwrap(),
parse_command(&test_deactivate_stake, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::DeactivateStake {
stake_account_pubkey,
@ -2738,7 +2774,7 @@ mod tests {
&fee_payer_keypair_file,
]);
assert_eq!(
parse_command(&test_deactivate_stake, &default_keypair_file, None).unwrap(),
parse_command(&test_deactivate_stake, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::DeactivateStake {
stake_account_pubkey,
@ -2772,7 +2808,7 @@ mod tests {
"50",
]);
assert_eq!(
parse_command(&test_split_stake_account, &default_keypair_file, None).unwrap(),
parse_command(&test_split_stake_account, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::SplitStake {
stake_account_pubkey: stake_account_keypair.pubkey(),
@ -2833,7 +2869,7 @@ mod tests {
&stake_signer,
]);
assert_eq!(
parse_command(&test_split_stake_account, &default_keypair_file, None).unwrap(),
parse_command(&test_split_stake_account, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::SplitStake {
stake_account_pubkey: stake_account_keypair.pubkey(),

View File

@ -102,7 +102,7 @@ impl StorageSubCommands for App<'_, '_> {
pub fn parse_storage_create_archiver_account(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let account_owner =
pubkey_of_signer(matches, "storage_account_owner", wallet_manager)?.unwrap();
@ -130,7 +130,7 @@ pub fn parse_storage_create_archiver_account(
pub fn parse_storage_create_validator_account(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let account_owner =
pubkey_of_signer(matches, "storage_account_owner", wallet_manager)?.unwrap();
@ -158,7 +158,7 @@ pub fn parse_storage_create_validator_account(
pub fn parse_storage_claim_reward(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let node_account_pubkey =
pubkey_of_signer(matches, "node_account_pubkey", wallet_manager)?.unwrap();
@ -180,7 +180,7 @@ pub fn parse_storage_claim_reward(
pub fn parse_storage_get_account_command(
matches: &ArgMatches<'_>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let storage_account_pubkey =
pubkey_of_signer(matches, "storage_account_pubkey", wallet_manager)?.unwrap();
@ -330,7 +330,7 @@ mod tests {
parse_command(
&test_create_archiver_storage_account,
&default_keypair_file,
None
&mut None
)
.unwrap(),
CliCommandInfo {
@ -362,7 +362,7 @@ mod tests {
parse_command(
&test_create_validator_storage_account,
&default_keypair_file,
None
&mut None
)
.unwrap(),
CliCommandInfo {
@ -385,7 +385,7 @@ mod tests {
&storage_account_string,
]);
assert_eq!(
parse_command(&test_claim_storage_reward, &default_keypair_file, None).unwrap(),
parse_command(&test_claim_storage_reward, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::ClaimStorageReward {
node_account_pubkey: pubkey,

View File

@ -1,6 +1,6 @@
use crate::{
cli::{check_account_for_fee, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult},
display::println_name_value,
cli_output::{CliValidatorInfo, CliValidatorInfoVec},
};
use bincode::deserialize;
use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
@ -25,7 +25,6 @@ use solana_sdk::{
transaction::Transaction,
};
use std::{error, sync::Arc};
use titlecase::titlecase;
pub const MAX_SHORT_FIELD_LENGTH: usize = 70;
pub const MAX_LONG_FIELD_LENGTH: usize = 300;
@ -229,7 +228,7 @@ impl ValidatorInfoSubCommands for App<'_, '_> {
pub fn parse_validator_info_command(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let info_pubkey = pubkey_of(matches, "info_pubkey");
// Prepare validator info
@ -375,7 +374,11 @@ pub fn process_set_validator_info(
Ok("".to_string())
}
pub fn process_get_validator_info(rpc_client: &RpcClient, pubkey: Option<Pubkey>) -> ProcessResult {
pub fn process_get_validator_info(
rpc_client: &RpcClient,
config: &CliConfig,
pubkey: Option<Pubkey>,
) -> ProcessResult {
let validator_info: Vec<(Pubkey, Account)> = if let Some(validator_info_pubkey) = pubkey {
vec![(
validator_info_pubkey,
@ -394,23 +397,22 @@ pub fn process_get_validator_info(rpc_client: &RpcClient, pubkey: Option<Pubkey>
.collect()
};
let mut validator_info_list: Vec<CliValidatorInfo> = vec![];
if validator_info.is_empty() {
println!("No validator info accounts found");
}
for (validator_info_pubkey, validator_info_account) in validator_info.iter() {
let (validator_pubkey, validator_info) =
parse_validator_info(&validator_info_pubkey, &validator_info_account)?;
println!();
println_name_value("Validator Identity Pubkey:", &validator_pubkey.to_string());
println_name_value(" Info Pubkey:", &validator_info_pubkey.to_string());
for (key, value) in validator_info.iter() {
println_name_value(
&format!(" {}:", titlecase(key)),
&value.as_str().unwrap_or("?"),
);
}
validator_info_list.push(CliValidatorInfo {
identity_pubkey: validator_pubkey.to_string(),
info_pubkey: validator_info_pubkey.to_string(),
info: validator_info,
});
}
config
.output_format
.formatted_print(&CliValidatorInfoVec::new(validator_info_list));
Ok("".to_string())
}

View File

@ -1,10 +1,17 @@
use crate::cli::{
build_balance_message, check_account_for_fee, check_unique_pubkeys, generate_unique_signers,
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, CliSignerInfo,
ProcessResult, SignerIndex,
use crate::{
cli::{
check_account_for_fee, check_unique_pubkeys, generate_unique_signers,
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError,
ProcessResult, SignerIndex,
},
cli_output::{CliEpochVotingHistory, CliLockout, CliVoteAccount},
};
use clap::{value_t_or_exit, App, Arg, ArgMatches, SubCommand};
use solana_clap_utils::{input_parsers::*, input_validators::*};
use solana_clap_utils::{
commitment::{commitment_arg, COMMITMENT_ARG},
input_parsers::*,
input_validators::*,
};
use solana_client::rpc_client::RpcClient;
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_sdk::{
@ -89,8 +96,16 @@ impl VoteSubCommands for App<'_, '_> {
.help("Vote account in which to set the authorized voter"),
)
.arg(
Arg::with_name("new_authorized_pubkey")
Arg::with_name("authorized")
.index(2)
.value_name("AUTHORIZED_KEYPAIR")
.required(true)
.validator(is_valid_signer)
.help("Current authorized vote signer"),
)
.arg(
Arg::with_name("new_authorized_pubkey")
.index(3)
.value_name("AUTHORIZED_PUBKEY")
.takes_value(true)
.required(true)
@ -111,8 +126,16 @@ impl VoteSubCommands for App<'_, '_> {
.help("Vote account in which to set the authorized withdrawer"),
)
.arg(
Arg::with_name("new_authorized_pubkey")
Arg::with_name("authorized")
.index(2)
.value_name("AUTHORIZED_KEYPAIR")
.required(true)
.validator(is_valid_signer)
.help("Current authorized withdrawer"),
)
.arg(
Arg::with_name("new_authorized_pubkey")
.index(3)
.value_name("AUTHORIZED_PUBKEY")
.takes_value(true)
.required(true)
@ -155,14 +178,6 @@ impl VoteSubCommands for App<'_, '_> {
SubCommand::with_name("vote-account")
.about("Show the contents of a vote account")
.alias("show-vote-account")
.arg(
Arg::with_name("confirmed")
.long("confirmed")
.takes_value(false)
.help(
"Return information at maximum-lockout commitment level",
),
)
.arg(
Arg::with_name("vote_account_pubkey")
.index(1)
@ -177,7 +192,8 @@ impl VoteSubCommands for App<'_, '_> {
.long("lamports")
.takes_value(false)
.help("Display balance in lamports instead of SOL"),
),
)
.arg(commitment_arg()),
)
.subcommand(
SubCommand::with_name("withdraw-from-vote-account")
@ -224,7 +240,7 @@ impl VoteSubCommands for App<'_, '_> {
pub fn parse_create_vote_account(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let (vote_account, _) = signer_of(matches, "vote_account", wallet_manager)?;
let seed = matches.value_of("seed").map(|s| s.to_string());
@ -257,17 +273,18 @@ pub fn parse_create_vote_account(
pub fn parse_vote_authorize(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
vote_authorize: VoteAuthorize,
) -> Result<CliCommandInfo, CliError> {
let vote_account_pubkey =
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
let new_authorized_pubkey =
pubkey_of_signer(matches, "new_authorized_pubkey", wallet_manager)?.unwrap();
let (authorized, _) = signer_of(matches, "authorized", wallet_manager)?;
let authorized_voter_provided = None;
let CliSignerInfo { signers } = generate_unique_signers(
vec![authorized_voter_provided],
let payer_provided = None;
let signer_info = generate_unique_signers(
vec![payer_provided, authorized],
matches,
default_signer_path,
wallet_manager,
@ -279,14 +296,14 @@ pub fn parse_vote_authorize(
new_authorized_pubkey,
vote_authorize,
},
signers,
signers: signer_info.signers,
})
}
pub fn parse_vote_update_validator(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let vote_account_pubkey =
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
@ -313,16 +330,13 @@ pub fn parse_vote_update_validator(
pub fn parse_vote_get_account_command(
matches: &ArgMatches<'_>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let vote_account_pubkey =
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
let use_lamports_unit = matches.is_present("lamports");
let commitment_config = if matches.is_present("confirmed") {
CommitmentConfig::default()
} else {
CommitmentConfig::recent()
};
let commitment_config =
commitment_of(matches, COMMITMENT_ARG.long).unwrap_or_else(CommitmentConfig::recent);
Ok(CliCommandInfo {
command: CliCommand::ShowVoteAccount {
pubkey: vote_account_pubkey,
@ -336,7 +350,7 @@ pub fn parse_vote_get_account_command(
pub fn parse_withdraw_from_vote_account(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let vote_account_pubkey =
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
@ -455,16 +469,24 @@ pub fn process_vote_authorize(
new_authorized_pubkey: &Pubkey,
vote_authorize: VoteAuthorize,
) -> ProcessResult {
// If the `authorized_account` is also the fee payer, `config.signers` will only have one
// keypair in it
let authorized = if config.signers.len() == 2 {
config.signers[1]
} else {
config.signers[0]
};
check_unique_pubkeys(
(vote_account_pubkey, "vote_account_pubkey".to_string()),
(&authorized.pubkey(), "authorized_account".to_string()),
(new_authorized_pubkey, "new_authorized_pubkey".to_string()),
)?;
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let ixs = vec![vote_instruction::authorize(
vote_account_pubkey, // vote account to update
&config.signers[0].pubkey(), // current authorized voter
new_authorized_pubkey, // new vote signer/withdrawer
vote_authorize, // vote or withdraw
vote_account_pubkey, // vote account to update
&authorized.pubkey(), // current authorized
new_authorized_pubkey, // new vote signer/withdrawer
vote_authorize, // vote or withdraw
)];
let message = Message::new_with_payer(&ixs, Some(&config.signers[0].pubkey()));
@ -495,7 +517,7 @@ pub fn process_vote_update_validator(
(&new_identity_pubkey, "new_identity_account".to_string()),
)?;
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let ixs = vec![vote_instruction::update_node(
let ixs = vec![vote_instruction::update_validator_identity(
vote_account_pubkey,
&authorized_withdrawer.pubkey(),
&new_identity_pubkey,
@ -544,7 +566,7 @@ fn get_vote_account(
pub fn process_show_vote_account(
rpc_client: &RpcClient,
_config: &CliConfig,
config: &CliConfig,
vote_account_pubkey: &Pubkey,
use_lamports_unit: bool,
commitment_config: CommitmentConfig,
@ -554,45 +576,38 @@ pub fn process_show_vote_account(
let epoch_schedule = rpc_client.get_epoch_schedule()?;
println!(
"Account Balance: {}",
build_balance_message(vote_account.lamports, use_lamports_unit, true)
);
println!("Validator Identity: {}", vote_state.node_pubkey);
println!("Authorized Voter: {:?}", vote_state.authorized_voters());
println!(
"Authorized Withdrawer: {}",
vote_state.authorized_withdrawer
);
println!("Credits: {}", vote_state.credits());
println!("Commission: {}%", vote_state.commission);
println!(
"Root Slot: {}",
match vote_state.root_slot {
Some(slot) => slot.to_string(),
None => "~".to_string(),
}
);
println!("Recent Timestamp: {:?}", vote_state.last_timestamp);
let mut votes: Vec<CliLockout> = vec![];
let mut epoch_voting_history: Vec<CliEpochVotingHistory> = vec![];
if !vote_state.votes.is_empty() {
println!("recent votes:");
for vote in &vote_state.votes {
println!(
"- slot: {}\n confirmation count: {}",
vote.slot, vote.confirmation_count
);
votes.push(vote.into());
}
println!("Epoch Voting History:");
for (epoch, credits, prev_credits) in vote_state.epoch_credits() {
let credits_earned = credits - prev_credits;
let slots_in_epoch = epoch_schedule.get_slots_in_epoch(*epoch);
println!(
"- epoch: {}\n slots in epoch: {}\n credits earned: {}",
epoch, slots_in_epoch, credits_earned,
);
epoch_voting_history.push(CliEpochVotingHistory {
epoch: *epoch,
slots_in_epoch,
credits_earned,
});
}
}
let vote_account_data = CliVoteAccount {
account_balance: vote_account.lamports,
validator_identity: vote_state.node_pubkey.to_string(),
authorized_voters: vote_state.authorized_voters().into(),
authorized_withdrawer: vote_state.authorized_withdrawer.to_string(),
credits: vote_state.credits(),
commission: vote_state.commission,
root_slot: vote_state.root_slot,
recent_timestamp: vote_state.last_timestamp.clone(),
votes,
epoch_voting_history,
use_lamports_unit,
};
config.output_format.formatted_print(&vote_account_data);
Ok("".to_string())
}
@ -658,10 +673,11 @@ mod tests {
"test",
"vote-authorize-voter",
&pubkey_string,
&default_keypair_file,
&pubkey2_string,
]);
assert_eq!(
parse_command(&test_authorize_voter, &default_keypair_file, None).unwrap(),
parse_command(&test_authorize_voter, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::VoteAuthorize {
vote_account_pubkey: pubkey,
@ -672,6 +688,32 @@ mod tests {
}
);
let authorized_keypair = Keypair::new();
let (authorized_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&authorized_keypair, tmp_file.as_file_mut()).unwrap();
let test_authorize_voter = test_commands.clone().get_matches_from(vec![
"test",
"vote-authorize-voter",
&pubkey_string,
&authorized_keypair_file,
&pubkey2_string,
]);
assert_eq!(
parse_command(&test_authorize_voter, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::VoteAuthorize {
vote_account_pubkey: pubkey,
new_authorized_pubkey: pubkey2,
vote_authorize: VoteAuthorize::Voter
},
signers: vec![
read_keypair_file(&default_keypair_file).unwrap().into(),
read_keypair_file(&authorized_keypair_file).unwrap().into(),
],
}
);
let (keypair_file, mut tmp_file) = make_tmp_file();
let keypair = Keypair::new();
write_keypair(&keypair, tmp_file.as_file_mut()).unwrap();
@ -688,7 +730,7 @@ mod tests {
"10",
]);
assert_eq!(
parse_command(&test_create_vote_account, &default_keypair_file, None).unwrap(),
parse_command(&test_create_vote_account, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::CreateVoteAccount {
seed: None,
@ -716,7 +758,7 @@ mod tests {
&identity_keypair_file,
]);
assert_eq!(
parse_command(&test_create_vote_account2, &default_keypair_file, None).unwrap(),
parse_command(&test_create_vote_account2, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::CreateVoteAccount {
seed: None,
@ -748,7 +790,7 @@ mod tests {
&authed.to_string(),
]);
assert_eq!(
parse_command(&test_create_vote_account3, &default_keypair_file, None).unwrap(),
parse_command(&test_create_vote_account3, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::CreateVoteAccount {
seed: None,
@ -778,7 +820,7 @@ mod tests {
&authed.to_string(),
]);
assert_eq!(
parse_command(&test_create_vote_account4, &default_keypair_file, None).unwrap(),
parse_command(&test_create_vote_account4, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::CreateVoteAccount {
seed: None,
@ -803,7 +845,7 @@ mod tests {
&keypair_file,
]);
assert_eq!(
parse_command(&test_update_validator, &default_keypair_file, None).unwrap(),
parse_command(&test_update_validator, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::VoteUpdateValidator {
vote_account_pubkey: pubkey,
@ -829,7 +871,7 @@ mod tests {
parse_command(
&test_withdraw_from_vote_account,
&default_keypair_file,
None
&mut None
)
.unwrap(),
CliCommandInfo {
@ -860,7 +902,7 @@ mod tests {
parse_command(
&test_withdraw_from_vote_account,
&default_keypair_file,
None
&mut None
)
.unwrap(),
CliCommandInfo {

View File

@ -265,6 +265,7 @@ fn test_create_account_with_seed() {
} = TestValidator::run_with_options(TestValidatorOptions {
fees: 1,
bootstrap_validator_lamports: 42_000,
..TestValidatorOptions::default()
});
let (sender, receiver) = channel();
@ -337,6 +338,7 @@ fn test_create_account_with_seed() {
to: to_address,
from: 0,
sign_only: true,
no_wait: false,
blockhash_query: BlockhashQuery::None(nonce_hash),
nonce_account: Some(nonce_address),
nonce_authority: 0,
@ -357,6 +359,7 @@ fn test_create_account_with_seed() {
to: to_address,
from: 0,
sign_only: false,
no_wait: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_address),
sign_only.blockhash,

View File

@ -811,6 +811,7 @@ fn test_stake_authorize_with_fee_payer() {
} = TestValidator::run_with_options(TestValidatorOptions {
fees: SIG_FEE,
bootstrap_validator_lamports: 42_000,
..TestValidatorOptions::default()
});
let (sender, receiver) = channel();
run_local_faucet(alice, sender, None);
@ -938,6 +939,7 @@ fn test_stake_split() {
} = TestValidator::run_with_options(TestValidatorOptions {
fees: 1,
bootstrap_validator_lamports: 42_000,
..TestValidatorOptions::default()
});
let (sender, receiver) = channel();
run_local_faucet(alice, sender, None);
@ -1085,6 +1087,7 @@ fn test_stake_set_lockup() {
} = TestValidator::run_with_options(TestValidatorOptions {
fees: 1,
bootstrap_validator_lamports: 42_000,
..TestValidatorOptions::default()
});
let (sender, receiver) = channel();
run_local_faucet(alice, sender, None);
@ -1448,6 +1451,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
destination_account_pubkey: recipient_pubkey,
lamports: 42,
withdraw_authority: 0,
custodian: None,
sign_only: true,
blockhash_query: BlockhashQuery::None(nonce_hash),
nonce_account: Some(nonce_pubkey),
@ -1463,6 +1467,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
destination_account_pubkey: recipient_pubkey,
lamports: 42,
withdraw_authority: 0,
custodian: None,
sign_only: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_pubkey),

View File

@ -40,6 +40,7 @@ fn test_transfer() {
} = TestValidator::run_with_options(TestValidatorOptions {
fees: 1,
bootstrap_validator_lamports: 42_000,
..TestValidatorOptions::default()
});
let (sender, receiver) = channel();
@ -68,6 +69,7 @@ fn test_transfer() {
to: recipient_pubkey,
from: 0,
sign_only: false,
no_wait: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
@ -95,6 +97,7 @@ fn test_transfer() {
to: recipient_pubkey,
from: 0,
sign_only: true,
no_wait: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
@ -110,6 +113,7 @@ fn test_transfer() {
to: recipient_pubkey,
from: 0,
sign_only: false,
no_wait: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
nonce_account: None,
nonce_authority: 0,
@ -147,6 +151,7 @@ fn test_transfer() {
to: recipient_pubkey,
from: 0,
sign_only: false,
no_wait: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_account.pubkey()),
nonce_hash,
@ -187,6 +192,7 @@ fn test_transfer() {
to: recipient_pubkey,
from: 0,
sign_only: true,
no_wait: false,
blockhash_query: BlockhashQuery::None(nonce_hash),
nonce_account: Some(nonce_account.pubkey()),
nonce_authority: 0,
@ -202,6 +208,7 @@ fn test_transfer() {
to: recipient_pubkey,
from: 0,
sign_only: false,
no_wait: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_account.pubkey()),
sign_only.blockhash,
@ -229,6 +236,7 @@ fn test_transfer_multisession_signing() {
} = TestValidator::run_with_options(TestValidatorOptions {
fees: 1,
bootstrap_validator_lamports: 42_000,
..TestValidatorOptions::default()
});
let (sender, receiver) = channel();
@ -269,6 +277,7 @@ fn test_transfer_multisession_signing() {
to: to_pubkey,
from: 1,
sign_only: true,
no_wait: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
@ -293,6 +302,7 @@ fn test_transfer_multisession_signing() {
to: to_pubkey,
from: 1,
sign_only: true,
no_wait: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
@ -314,6 +324,7 @@ fn test_transfer_multisession_signing() {
to: to_pubkey,
from: 1,
sign_only: false,
no_wait: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
nonce_account: None,
nonce_authority: 0,

View File

@ -1,6 +1,6 @@
[package]
name = "solana-client"
version = "1.1.0"
version = "1.1.8"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -19,10 +19,10 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
serde = "1.0.105"
serde_derive = "1.0.103"
serde_json = "1.0.48"
solana-transaction-status = { path = "../transaction-status", version = "1.1.0" }
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
solana-sdk = { path = "../sdk", version = "1.1.0" }
solana-vote-program = { path = "../programs/vote", version = "1.1.0" }
solana-transaction-status = { path = "../transaction-status", version = "1.1.8" }
solana-net-utils = { path = "../net-utils", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.8" }
solana-vote-program = { path = "../programs/vote", version = "1.1.8" }
thiserror = "1.0"
tungstenite = "0.10.1"
url = "2.1.1"
@ -31,4 +31,7 @@ url = "2.1.1"
assert_matches = "1.3.0"
jsonrpc-core = "14.0.5"
jsonrpc-http-server = "14.0.6"
solana-logger = { path = "../logger", version = "1.1.0" }
solana-logger = { path = "../logger", version = "1.1.8" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -52,7 +52,6 @@ impl Into<TransportError> for ClientErrorKind {
pub struct ClientError {
command: Option<&'static str>,
#[source]
#[error(transparent)]
kind: ClientErrorKind,
}

View File

@ -8,7 +8,8 @@ use serde_json::{Number, Value};
use solana_sdk::{
fee_calculator::{FeeCalculator, FeeRateGovernor},
instruction::InstructionError,
transaction::{self, TransactionError},
signature::Signature,
transaction::{self, Transaction, TransactionError},
};
use solana_transaction_status::TransactionStatus;
use std::{collections::HashMap, sync::RwLock};
@ -50,17 +51,6 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
return Ok(Value::Null);
}
let val = match request {
RpcRequest::ConfirmTransaction => {
if let Some(params_array) = params.as_array() {
if let Value::String(param_string) = &params_array[0] {
Value::Bool(param_string == SIGNATURE)
} else {
Value::Null
}
} else {
Value::Null
}
}
RpcRequest::GetBalance => serde_json::to_value(Response {
context: RpcResponseContext { slot: 1 },
value: Value::Number(Number::from(50)),
@ -87,7 +77,7 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
context: RpcResponseContext { slot: 1 },
value: serde_json::to_value(FeeRateGovernor::default()).unwrap(),
})?,
RpcRequest::GetSignatureStatus => {
RpcRequest::GetSignatureStatuses => {
let status: transaction::Result<()> = if self.url == "account_in_use" {
Err(TransactionError::AccountInUse)
} else if self.url == "instruction_error" {
@ -101,10 +91,12 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
let status = if self.url == "sig_not_found" {
None
} else {
let err = status.clone().err();
Some(TransactionStatus {
status,
slot: 1,
confirmations: Some(0),
confirmations: None,
err,
})
};
serde_json::to_value(Response {
@ -114,7 +106,17 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
}
RpcRequest::GetTransactionCount => Value::Number(Number::from(1234)),
RpcRequest::GetSlot => Value::Number(Number::from(0)),
RpcRequest::SendTransaction => Value::String(SIGNATURE.to_string()),
RpcRequest::SendTransaction => {
let signature = if self.url == "malicious" {
Signature::new(&[8; 64]).to_string()
} else {
let tx_str = params.as_array().unwrap()[0].as_str().unwrap().to_string();
let data = bs58::decode(tx_str).into_vec().unwrap();
let tx: Transaction = bincode::deserialize(&data).unwrap();
tx.signatures[0].to_string()
};
Value::String(signature)
}
RpcRequest::GetMinimumBalanceForRentExemption => Value::Number(Number::from(1234)),
_ => Value::Null,
};

View File

@ -12,7 +12,10 @@ use log::*;
use serde_json::{json, Value};
use solana_sdk::{
account::Account,
clock::{Slot, UnixTimestamp, DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT},
clock::{
Slot, UnixTimestamp, DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT,
MAX_HASH_AGE_IN_SECONDS,
},
commitment_config::CommitmentConfig,
epoch_schedule::EpochSchedule,
fee_calculator::{FeeCalculator, FeeRateGovernor},
@ -23,7 +26,9 @@ use solana_sdk::{
signers::Signers,
transaction::{self, Transaction, TransactionError},
};
use solana_transaction_status::{ConfirmedBlock, TransactionEncoding, TransactionStatus};
use solana_transaction_status::{
ConfirmedBlock, ConfirmedTransaction, TransactionEncoding, TransactionStatus,
};
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
use std::{
error,
@ -77,17 +82,16 @@ impl RpcClient {
signature: &Signature,
commitment_config: CommitmentConfig,
) -> RpcResult<bool> {
let response = self
.client
.send(
&RpcRequest::ConfirmTransaction,
json!([signature.to_string(), commitment_config]),
0,
)
.map_err(|err| err.into_with_command("ConfirmTransaction"))?;
let Response { context, value } = self.get_signature_statuses(&[*signature])?;
serde_json::from_value::<Response<bool>>(response)
.map_err(|err| ClientError::new_with_command(err.into(), "ConfirmTransaction"))
Ok(Response {
context,
value: value[0]
.as_ref()
.filter(|result| result.satisfies_commitment(commitment_config))
.map(|result| result.status.is_ok())
.unwrap_or_default(),
})
}
pub fn send_transaction(&self, transaction: &Transaction) -> ClientResult<Signature> {
@ -100,9 +104,24 @@ impl RpcClient {
None => {
Err(RpcError::ForUser("Received result of an unexpected type".to_string()).into())
}
Some(signature_base58_str) => signature_base58_str
.parse::<Signature>()
.map_err(|err| RpcError::ParseError(err.to_string()).into()),
Some(signature_base58_str) => {
let signature = signature_base58_str.parse::<Signature>().map_err(|err| {
Into::<ClientError>::into(RpcError::ParseError(err.to_string()))
})?;
// A mismatching RPC response signature indicates an issue with the RPC node, and
// should not be passed along to confirmation methods. The transaction may or may
// not have been submitted to the cluster, so callers should verify the success of
// the correct transaction signature independently.
if signature != transaction.signatures[0] {
Err(RpcError::RpcRequestError(format!(
"RPC node returned mismatched signature {:?}, expected {:?}",
signature, transaction.signatures[0]
))
.into())
} else {
Ok(transaction.signatures[0])
}
}
}
}
@ -113,20 +132,56 @@ impl RpcClient {
self.get_signature_status_with_commitment(signature, CommitmentConfig::default())
}
pub fn get_signature_statuses(
&self,
signatures: &[Signature],
) -> RpcResult<Vec<Option<TransactionStatus>>> {
let signatures: Vec<_> = signatures.iter().map(|s| s.to_string()).collect();
let signature_status =
self.client
.send(&RpcRequest::GetSignatureStatuses, json!([signatures]), 5)?;
Ok(serde_json::from_value(signature_status)
.map_err(|err| ClientError::new_with_command(err.into(), "GetSignatureStatuses"))?)
}
pub fn get_signature_status_with_commitment(
&self,
signature: &Signature,
commitment_config: CommitmentConfig,
) -> ClientResult<Option<transaction::Result<()>>> {
let signature_status = self.client.send(
&RpcRequest::GetSignatureStatus,
json!([[signature.to_string()], commitment_config]),
&RpcRequest::GetSignatureStatuses,
json!([[signature.to_string()]]),
5,
)?;
let result: Response<Vec<Option<TransactionStatus>>> =
serde_json::from_value(signature_status).unwrap();
serde_json::from_value(signature_status)
.map_err(|err| ClientError::new_with_command(err.into(), "GetSignatureStatuses"))?;
Ok(result.value[0]
.clone()
.filter(|result| result.satisfies_commitment(commitment_config))
.map(|status_meta| status_meta.status))
}
pub fn get_signature_status_with_commitment_and_history(
&self,
signature: &Signature,
commitment_config: CommitmentConfig,
search_transaction_history: bool,
) -> ClientResult<Option<transaction::Result<()>>> {
let signature_status = self.client.send(
&RpcRequest::GetSignatureStatuses,
json!([[signature.to_string()], {
"searchTransactionHistory": search_transaction_history
}]),
5,
)?;
let result: Response<Vec<Option<TransactionStatus>>> =
serde_json::from_value(signature_status)
.map_err(|err| ClientError::new_with_command(err.into(), "GetSignatureStatuses"))?;
Ok(result.value[0]
.clone()
.filter(|result| result.satisfies_commitment(commitment_config))
.map(|status_meta| status_meta.status))
}
@ -227,6 +282,55 @@ impl RpcClient {
.map_err(|err| ClientError::new_with_command(err.into(), "GetConfirmedBlocks"))
}
pub fn get_confirmed_signatures_for_address(
&self,
address: &Pubkey,
start_slot: Slot,
end_slot: Slot,
) -> ClientResult<Vec<Signature>> {
let response = self
.client
.send(
&RpcRequest::GetConfirmedSignaturesForAddress,
json!([address.to_string(), start_slot, end_slot]),
0,
)
.map_err(|err| err.into_with_command("GetConfirmedSignaturesForAddress"))?;
let signatures_base58_str: Vec<String> =
serde_json::from_value(response).map_err(|err| {
ClientError::new_with_command(err.into(), "GetConfirmedSignaturesForAddress")
})?;
let mut signatures = vec![];
for signature_base58_str in signatures_base58_str {
signatures.push(
signature_base58_str.parse::<Signature>().map_err(|err| {
Into::<ClientError>::into(RpcError::ParseError(err.to_string()))
})?,
);
}
Ok(signatures)
}
pub fn get_confirmed_transaction(
&self,
signature: &Signature,
encoding: TransactionEncoding,
) -> ClientResult<ConfirmedTransaction> {
let response = self
.client
.send(
&RpcRequest::GetConfirmedTransaction,
json!([signature.to_string(), encoding]),
0,
)
.map_err(|err| err.into_with_command("GetConfirmedTransaction"))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetConfirmedTransaction"))
}
pub fn get_block_time(&self, slot: Slot) -> ClientResult<UnixTimestamp> {
let response = self
.client
@ -855,14 +959,13 @@ impl RpcClient {
trace!("check_signature: {:?}", signature);
for _ in 0..30 {
let response = self.client.send(
&RpcRequest::ConfirmTransaction,
json!([signature.to_string(), CommitmentConfig::recent()]),
0,
);
let response =
self.confirm_transaction_with_commitment(signature, CommitmentConfig::recent());
match response {
Ok(Value::Bool(signature_status)) => {
Ok(Response {
value: signature_status,
..
}) => {
if signature_status {
trace!("Response found signature");
} else {
@ -871,12 +974,6 @@ impl RpcClient {
return signature_status;
}
Ok(other) => {
debug!(
"check_signature request failed, expected bool, got: {:?}",
other
);
}
Err(err) => {
debug!("check_signature request failed: {:?}", err);
}
@ -948,20 +1045,20 @@ impl RpcClient {
let response = self
.client
.send(
&RpcRequest::GetSignatureStatus,
json!([[signature.to_string()], CommitmentConfig::recent().ok()]),
1,
&RpcRequest::GetSignatureStatuses,
json!([[signature.to_string()]]),
5,
)
.map_err(|err| err.into_with_command("GetSignatureStatus"))?;
let result: Response<Vec<Option<TransactionStatus>>> =
serde_json::from_value(response).unwrap();
.map_err(|err| err.into_with_command("GetSignatureStatuses"))?;
let result: Response<Vec<Option<TransactionStatus>>> = serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetSignatureStatuses"))?;
let confirmations = result.value[0]
.clone()
.ok_or_else(|| {
ClientError::new_with_command(
ClientErrorKind::Custom("signature not found".to_string()),
"GetSignatureStatus",
"GetSignatureStatuses",
)
})?
.confirmations
@ -977,14 +1074,15 @@ impl RpcClient {
let mut confirmations = 0;
let progress_bar = new_spinner_progress_bar();
progress_bar.set_message(&format!(
"[{}/{}] Waiting for confirmations",
confirmations,
MAX_LOCKOUT_HISTORY + 1,
));
let mut send_retries = 20;
let signature = loop {
progress_bar.set_message(&format!(
"[{}/{}] Finalizing transaction {}",
confirmations,
MAX_LOCKOUT_HISTORY + 1,
transaction.signatures[0],
));
let mut status_retries = 15;
let (signature, status) = loop {
let signature = self.send_transaction(transaction)?;
@ -1036,6 +1134,7 @@ impl RpcClient {
}
}
};
let now = Instant::now();
loop {
// Return when default (max) commitment is reached
// Failed transactions have already been eliminated, `is_some` check is sufficient
@ -1045,12 +1144,20 @@ impl RpcClient {
return Ok(signature);
}
progress_bar.set_message(&format!(
"[{}/{}] Waiting for confirmations",
"[{}/{}] Finalizing transaction {}",
confirmations + 1,
MAX_LOCKOUT_HISTORY + 1,
signature,
));
sleep(Duration::from_millis(500));
confirmations = self.get_num_blocks_since_signature_confirmation(&signature)?;
confirmations = self
.get_num_blocks_since_signature_confirmation(&signature)
.unwrap_or(confirmations);
if now.elapsed().as_secs() >= MAX_HASH_AGE_IN_SECONDS as u64 {
return Err(
RpcError::ForUser("transaction not finalized. This can happen when a transaction lands in an abandoned fork. Please retry.".to_string()).into(),
);
}
}
}
@ -1088,10 +1195,7 @@ pub fn get_rpc_request_str(rpc_addr: SocketAddr, tls: bool) -> String {
#[cfg(test)]
mod tests {
use super::*;
use crate::{
client_error::ClientErrorKind,
mock_rpc_client_request::{PUBKEY, SIGNATURE},
};
use crate::{client_error::ClientErrorKind, mock_rpc_client_request::PUBKEY};
use assert_matches::assert_matches;
use jsonrpc_core::{Error, IoHandler, Params};
use jsonrpc_http_server::{AccessControlAllowOrigin, DomainsValidation, ServerBuilder};
@ -1204,12 +1308,17 @@ mod tests {
let tx = system_transaction::transfer(&key, &to, 50, blockhash);
let signature = rpc_client.send_transaction(&tx);
assert_eq!(signature.unwrap(), SIGNATURE.parse().unwrap());
assert_eq!(signature.unwrap(), tx.signatures[0]);
let rpc_client = RpcClient::new_mock("fails".to_string());
let signature = rpc_client.send_transaction(&tx);
assert!(signature.is_err());
// Test bad signature returned from rpc node
let rpc_client = RpcClient::new_mock("malicious".to_string());
let signature = rpc_client.send_transaction(&tx);
assert!(signature.is_err());
}
#[test]
fn test_get_recent_blockhash() {

View File

@ -3,7 +3,6 @@ use thiserror::Error;
#[derive(Debug, PartialEq, Eq, Hash)]
pub enum RpcRequest {
ConfirmTransaction,
DeregisterNode,
ValidatorExit,
GetAccountInfo,
@ -12,6 +11,8 @@ pub enum RpcRequest {
GetClusterNodes,
GetConfirmedBlock,
GetConfirmedBlocks,
GetConfirmedSignaturesForAddress,
GetConfirmedTransaction,
GetEpochInfo,
GetEpochSchedule,
GetGenesisHash,
@ -22,7 +23,7 @@ pub enum RpcRequest {
GetRecentBlockhash,
GetFeeCalculatorForBlockhash,
GetFeeRateGovernor,
GetSignatureStatus,
GetSignatureStatuses,
GetSlot,
GetSlotLeader,
GetStorageTurn,
@ -41,11 +42,13 @@ pub enum RpcRequest {
MinimumLedgerSlot,
}
pub const MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS: usize = 256;
pub const MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE: u64 = 10_000;
impl RpcRequest {
pub(crate) fn build_request_json(&self, id: u64, params: Value) -> Value {
let jsonrpc = "2.0";
let method = match self {
RpcRequest::ConfirmTransaction => "confirmTransaction",
RpcRequest::DeregisterNode => "deregisterNode",
RpcRequest::ValidatorExit => "validatorExit",
RpcRequest::GetAccountInfo => "getAccountInfo",
@ -54,6 +57,8 @@ impl RpcRequest {
RpcRequest::GetClusterNodes => "getClusterNodes",
RpcRequest::GetConfirmedBlock => "getConfirmedBlock",
RpcRequest::GetConfirmedBlocks => "getConfirmedBlocks",
RpcRequest::GetConfirmedSignaturesForAddress => "getConfirmedSignaturesForAddress",
RpcRequest::GetConfirmedTransaction => "getConfirmedTransaction",
RpcRequest::GetEpochInfo => "getEpochInfo",
RpcRequest::GetEpochSchedule => "getEpochSchedule",
RpcRequest::GetGenesisHash => "getGenesisHash",
@ -64,7 +69,7 @@ impl RpcRequest {
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
RpcRequest::GetFeeCalculatorForBlockhash => "getFeeCalculatorForBlockhash",
RpcRequest::GetFeeRateGovernor => "getFeeRateGovernor",
RpcRequest::GetSignatureStatus => "getSignatureStatus",
RpcRequest::GetSignatureStatuses => "getSignatureStatuses",
RpcRequest::GetSlot => "getSlot",
RpcRequest::GetSlotLeader => "getSlotLeader",
RpcRequest::GetStorageTurn => "getStorageTurn",

View File

@ -4,7 +4,7 @@ use solana_sdk::{
clock::{Epoch, Slot},
fee_calculator::{FeeCalculator, FeeRateGovernor},
pubkey::Pubkey,
transaction::Result,
transaction::{Result, TransactionError},
};
use std::{collections::HashMap, net::SocketAddr, str::FromStr};
@ -54,6 +54,12 @@ pub struct RpcKeyedAccount {
pub account: RpcAccount,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcSignatureResult {
pub err: Option<TransactionError>,
}
/// A duplicate representation of a Message for pretty JSON serialization
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]

View File

@ -1,7 +1,7 @@
[package]
name = "solana-core"
description = "Blockchain, Rebuilt for Scale"
version = "1.1.0"
version = "1.1.8"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "../README.md"
@ -34,42 +34,43 @@ jsonrpc-ws-server = "14.0.6"
log = "0.4.8"
num_cpus = "1.0.0"
num-traits = "0.2"
rand = "0.6.5"
rand_chacha = "0.1.1"
rand = "0.7.0"
rand_chacha = "0.2.2"
rayon = "1.3.0"
regex = "1.3.6"
serde = "1.0.105"
serde_derive = "1.0.103"
serde_json = "1.0.48"
solana-budget-program = { path = "../programs/budget", version = "1.1.0" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.0" }
solana-client = { path = "../client", version = "1.1.0" }
solana-transaction-status = { path = "../transaction-status", version = "1.1.0" }
solana-faucet = { path = "../faucet", version = "1.1.0" }
ed25519-dalek = "=1.0.0-pre.1"
solana-ledger = { path = "../ledger", version = "1.1.0" }
solana-logger = { path = "../logger", version = "1.1.0" }
solana-merkle-tree = { path = "../merkle-tree", version = "1.1.0" }
solana-metrics = { path = "../metrics", version = "1.1.0" }
solana-measure = { path = "../measure", version = "1.1.0" }
solana-net-utils = { path = "../net-utils", version = "1.1.0" }
solana-chacha-cuda = { path = "../chacha-cuda", version = "1.1.0" }
solana-perf = { path = "../perf", version = "1.1.0" }
solana-runtime = { path = "../runtime", version = "1.1.0" }
solana-sdk = { path = "../sdk", version = "1.1.0" }
solana-stake-program = { path = "../programs/stake", version = "1.1.0" }
solana-storage-program = { path = "../programs/storage", version = "1.1.0" }
solana-streamer = { path = "../streamer", version = "1.1.0" }
solana-vote-program = { path = "../programs/vote", version = "1.1.0" }
solana-vote-signer = { path = "../vote-signer", version = "1.1.0" }
solana-sys-tuner = { path = "../sys-tuner", version = "1.1.0" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.1.8" }
solana-budget-program = { path = "../programs/budget", version = "1.1.8" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.8" }
solana-client = { path = "../client", version = "1.1.8" }
solana-transaction-status = { path = "../transaction-status", version = "1.1.8" }
solana-faucet = { path = "../faucet", version = "1.1.8" }
ed25519-dalek = "=1.0.0-pre.3"
solana-ledger = { path = "../ledger", version = "1.1.8" }
solana-logger = { path = "../logger", version = "1.1.8" }
solana-merkle-tree = { path = "../merkle-tree", version = "1.1.8" }
solana-metrics = { path = "../metrics", version = "1.1.8" }
solana-measure = { path = "../measure", version = "1.1.8" }
solana-net-utils = { path = "../net-utils", version = "1.1.8" }
solana-chacha-cuda = { path = "../chacha-cuda", version = "1.1.8" }
solana-perf = { path = "../perf", version = "1.1.8" }
solana-runtime = { path = "../runtime", version = "1.1.8" }
solana-sdk = { path = "../sdk", version = "1.1.8" }
solana-stake-program = { path = "../programs/stake", version = "1.1.8" }
solana-storage-program = { path = "../programs/storage", version = "1.1.8" }
solana-streamer = { path = "../streamer", version = "1.1.8" }
solana-vote-program = { path = "../programs/vote", version = "1.1.8" }
solana-vote-signer = { path = "../vote-signer", version = "1.1.8" }
solana-sys-tuner = { path = "../sys-tuner", version = "1.1.8" }
tempfile = "3.1.0"
thiserror = "1.0"
tokio = "0.1"
tokio-codec = "0.1"
tokio-fs = "0.1"
tokio-io = "0.1"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.0" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.8" }
trees = "0.2.1"
[dev-dependencies]
@ -103,3 +104,6 @@ name = "cluster_info"
[[bench]]
name = "chacha"
required-features = ["chacha"]
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -190,7 +190,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
let (exit, poh_recorder, poh_service, signal_receiver) =
create_test_recorder(&bank, &blockstore, None);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let cluster_info = Arc::new(cluster_info);
let _banking_stage = BankingStage::new(
&cluster_info,
&poh_recorder,

View File

@ -3,13 +3,18 @@
extern crate test;
use rand::{thread_rng, Rng};
use solana_core::broadcast_stage::{broadcast_shreds, get_broadcast_peers};
use solana_core::cluster_info::{ClusterInfo, Node};
use solana_core::contact_info::ContactInfo;
use solana_ledger::shred::Shred;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::timing::timestamp;
use std::collections::HashMap;
use std::net::UdpSocket;
use std::sync::Arc;
use std::sync::RwLock;
use std::{
collections::HashMap,
net::UdpSocket,
sync::{atomic::AtomicU64, Arc},
};
use test::Bencher;
#[bench]
@ -20,10 +25,8 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info.clone());
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
const SHRED_SIZE: usize = 1024;
const NUM_SHREDS: usize = 32;
let shreds = vec![vec![0; SHRED_SIZE]; NUM_SHREDS];
let seeds = vec![[0u8; 32]; NUM_SHREDS];
let shreds = vec![Shred::new_empty_data_shred(); NUM_SHREDS];
let mut stakes = HashMap::new();
const NUM_PEERS: usize = 200;
for _ in 0..NUM_PEERS {
@ -33,10 +36,20 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
stakes.insert(id, thread_rng().gen_range(1, NUM_PEERS) as u64);
}
let stakes = Arc::new(stakes);
let cluster_info = Arc::new(cluster_info);
let (peers, peers_and_stakes) = get_broadcast_peers(&cluster_info, Some(stakes.clone()));
let shreds = Arc::new(shreds);
let last_datapoint = Arc::new(AtomicU64::new(0));
bencher.iter(move || {
let shreds = shreds.clone();
cluster_info
.broadcast_shreds(&socket, shreds, &seeds, Some(stakes.clone()))
.unwrap();
broadcast_shreds(
&socket,
&shreds,
&peers_and_stakes,
&peers,
&last_datapoint,
&mut 0,
)
.unwrap();
});
}

View File

@ -45,7 +45,7 @@ fn bench_retransmitter(bencher: &mut Bencher) {
peer_sockets.push(socket);
}
let peer_sockets = Arc::new(peer_sockets);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let cluster_info = Arc::new(cluster_info);
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(100_000);
let bank0 = Bank::new(&genesis_config);

View File

@ -10,29 +10,36 @@ use std::sync::{
use std::thread::{self, sleep, Builder, JoinHandle};
use std::time::Duration;
pub struct AccountsCleanupService {
t_cleanup: JoinHandle<()>,
pub struct AccountsBackgroundService {
t_background: JoinHandle<()>,
}
impl AccountsCleanupService {
const INTERVAL_MS: u64 = 100;
impl AccountsBackgroundService {
pub fn new(bank_forks: Arc<RwLock<BankForks>>, exit: &Arc<AtomicBool>) -> Self {
info!("AccountsCleanupService active");
info!("AccountsBackgroundService active");
let exit = exit.clone();
let t_cleanup = Builder::new()
.name("solana-accounts-cleanup".to_string())
let t_background = Builder::new()
.name("solana-accounts-background".to_string())
.spawn(move || loop {
if exit.load(Ordering::Relaxed) {
break;
}
let bank = bank_forks.read().unwrap().working_bank();
bank.clean_dead_slots();
sleep(Duration::from_millis(100));
bank.process_dead_slots();
// Currently, given INTERVAL_MS, we process 1 slot/100 ms
bank.process_stale_slot();
sleep(Duration::from_millis(INTERVAL_MS));
})
.unwrap();
Self { t_cleanup }
Self { t_background }
}
pub fn join(self) -> thread::Result<()> {
self.t_cleanup.join()
self.t_background.join()
}
}

View File

@ -4,7 +4,7 @@
// hash on gossip. Monitor gossip for messages from validators in the --trusted-validators
// set and halt the node if a mismatch is detected.
use crate::cluster_info::ClusterInfo;
use crate::cluster_info::{ClusterInfo, MAX_SNAPSHOT_HASHES};
use solana_ledger::{
snapshot_package::SnapshotPackage, snapshot_package::SnapshotPackageReceiver,
snapshot_package::SnapshotPackageSender,
@ -15,7 +15,7 @@ use std::{
sync::{
atomic::{AtomicBool, Ordering},
mpsc::RecvTimeoutError,
Arc, RwLock,
Arc,
},
thread::{self, Builder, JoinHandle},
time::Duration,
@ -30,7 +30,7 @@ impl AccountsHashVerifier {
snapshot_package_receiver: SnapshotPackageReceiver,
snapshot_package_sender: Option<SnapshotPackageSender>,
exit: &Arc<AtomicBool>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
cluster_info: &Arc<ClusterInfo>,
trusted_validators: Option<HashSet<Pubkey>>,
halt_on_trusted_validators_accounts_hash_mismatch: bool,
fault_injection_rate_slots: u64,
@ -72,7 +72,7 @@ impl AccountsHashVerifier {
fn process_snapshot(
snapshot_package: SnapshotPackage,
cluster_info: &Arc<RwLock<ClusterInfo>>,
cluster_info: &ClusterInfo,
trusted_validators: &Option<HashSet<Pubkey>>,
halt_on_trusted_validator_accounts_hash_mismatch: bool,
snapshot_package_sender: &Option<SnapshotPackageSender>,
@ -94,6 +94,10 @@ impl AccountsHashVerifier {
hashes.push((snapshot_package.root, snapshot_package.hash));
}
while hashes.len() > MAX_SNAPSHOT_HASHES {
hashes.remove(0);
}
if halt_on_trusted_validator_accounts_hash_mismatch {
let mut slot_to_hash = HashMap::new();
for (slot, hash) in hashes.iter() {
@ -107,25 +111,21 @@ impl AccountsHashVerifier {
if sender.send(snapshot_package).is_err() {}
}
cluster_info
.write()
.unwrap()
.push_accounts_hashes(hashes.clone());
cluster_info.push_accounts_hashes(hashes.clone());
}
fn should_halt(
cluster_info: &Arc<RwLock<ClusterInfo>>,
cluster_info: &ClusterInfo,
trusted_validators: &Option<HashSet<Pubkey>>,
slot_to_hash: &mut HashMap<Slot, Hash>,
) -> bool {
let mut verified_count = 0;
let mut highest_slot = 0;
if let Some(trusted_validators) = trusted_validators.as_ref() {
for trusted_validator in trusted_validators {
let cluster_info_r = cluster_info.read().unwrap();
if let Some(accounts_hashes) =
cluster_info_r.get_accounts_hash_for_node(trusted_validator)
let is_conflicting = cluster_info.get_accounts_hash_for_node(trusted_validator, |accounts_hashes|
{
for (slot, hash) in accounts_hashes {
accounts_hashes.iter().any(|(slot, hash)| {
if let Some(reference_hash) = slot_to_hash.get(slot) {
if *hash != *reference_hash {
error!("Trusted validator {} produced conflicting hashes for slot: {} ({} != {})",
@ -134,19 +134,29 @@ impl AccountsHashVerifier {
hash,
reference_hash,
);
return true;
true
} else {
verified_count += 1;
false
}
} else {
highest_slot = std::cmp::max(*slot, highest_slot);
slot_to_hash.insert(*slot, *hash);
false
}
}
})
}).unwrap_or(false);
if is_conflicting {
return true;
}
}
}
inc_new_counter_info!("accounts_hash_verifier-hashes_verified", verified_count);
datapoint_info!(
"accounts_hash_verifier",
("highest_slot_verified", highest_slot, i64),
);
false
}
@ -171,7 +181,7 @@ mod tests {
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let cluster_info = Arc::new(cluster_info);
let mut trusted_validators = HashSet::new();
let mut slot_to_hash = HashMap::new();
@ -186,8 +196,7 @@ mod tests {
let hash2 = hash(&[2]);
{
let message = make_accounts_hashes_message(&validator1, vec![(0, hash1)]).unwrap();
let mut cluster_info_w = cluster_info.write().unwrap();
cluster_info_w.push_message(message);
cluster_info.push_message(message);
}
slot_to_hash.insert(0, hash2);
trusted_validators.insert(validator1.pubkey());
@ -197,4 +206,56 @@ mod tests {
&mut slot_to_hash,
));
}
#[test]
fn test_max_hashes() {
solana_logger::setup();
use std::path::PathBuf;
use tempfile::TempDir;
let keypair = Keypair::new();
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
let cluster_info = Arc::new(cluster_info);
let trusted_validators = HashSet::new();
let exit = Arc::new(AtomicBool::new(false));
let mut hashes = vec![];
for i in 0..MAX_SNAPSHOT_HASHES + 1 {
let snapshot_links = TempDir::new().unwrap();
let snapshot_package = SnapshotPackage {
hash: hash(&[i as u8]),
root: 100 + i as u64,
slot_deltas: vec![],
snapshot_links,
tar_output_file: PathBuf::from("."),
storages: vec![],
};
AccountsHashVerifier::process_snapshot(
snapshot_package,
&cluster_info,
&Some(trusted_validators.clone()),
false,
&None,
&mut hashes,
&exit,
0,
);
}
let cluster_hashes = cluster_info
.get_accounts_hash_for_node(&keypair.pubkey(), |c| c.clone())
.unwrap();
info!("{:?}", cluster_hashes);
assert_eq!(hashes.len(), MAX_SNAPSHOT_HASHES);
assert_eq!(cluster_hashes.len(), MAX_SNAPSHOT_HASHES);
assert_eq!(cluster_hashes[0], (101, hash(&[1])));
assert_eq!(
cluster_hashes[MAX_SNAPSHOT_HASHES - 1],
(
100 + MAX_SNAPSHOT_HASHES as u64,
hash(&[MAX_SNAPSHOT_HASHES as u8])
)
);
}
}

View File

@ -41,7 +41,7 @@ use std::{
net::UdpSocket,
sync::atomic::AtomicBool,
sync::mpsc::Receiver,
sync::{Arc, Mutex, RwLock},
sync::{Arc, Mutex},
thread::{self, Builder, JoinHandle},
time::Duration,
time::Instant,
@ -76,7 +76,7 @@ impl BankingStage {
/// Create the stage using `bank`. Exit when `verified_receiver` is dropped.
#[allow(clippy::new_ret_no_self)]
pub fn new(
cluster_info: &Arc<RwLock<ClusterInfo>>,
cluster_info: &Arc<ClusterInfo>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
verified_vote_receiver: CrossbeamReceiver<Vec<Packets>>,
@ -93,7 +93,7 @@ impl BankingStage {
}
fn new_num_threads(
cluster_info: &Arc<RwLock<ClusterInfo>>,
cluster_info: &Arc<ClusterInfo>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
verified_vote_receiver: CrossbeamReceiver<Vec<Packets>>,
@ -104,7 +104,7 @@ impl BankingStage {
// Single thread to generate entries from many banks.
// This thread talks to poh_service and broadcasts the entries once they have been recorded.
// Once an entry has been recorded, its blockhash is registered with the bank.
let my_pubkey = cluster_info.read().unwrap().id();
let my_pubkey = cluster_info.id();
// Many banks that process transactions in parallel.
let bank_thread_hdls: Vec<JoinHandle<()>> = (0..num_threads)
.map(|i| {
@ -287,7 +287,7 @@ impl BankingStage {
my_pubkey: &Pubkey,
socket: &std::net::UdpSocket,
poh_recorder: &Arc<Mutex<PohRecorder>>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
cluster_info: &ClusterInfo,
buffered_packets: &mut Vec<PacketsAndOffsets>,
enable_forwarding: bool,
batch_limit: usize,
@ -331,10 +331,7 @@ impl BankingStage {
next_leader.map_or((), |leader_pubkey| {
let leader_addr = {
cluster_info
.read()
.unwrap()
.lookup(&leader_pubkey)
.map(|leader| leader.tpu_forwards)
.lookup_contact_info(&leader_pubkey, |leader| leader.tpu_forwards)
};
leader_addr.map_or((), |leader_addr| {
@ -358,7 +355,7 @@ impl BankingStage {
my_pubkey: Pubkey,
verified_receiver: &CrossbeamReceiver<Vec<Packets>>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
cluster_info: &ClusterInfo,
recv_start: &mut Instant,
enable_forwarding: bool,
id: u32,
@ -1049,7 +1046,7 @@ mod tests {
let (exit, poh_recorder, poh_service, _entry_receiever) =
create_test_recorder(&bank, &blockstore, None);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let cluster_info = Arc::new(cluster_info);
let banking_stage = BankingStage::new(
&cluster_info,
&poh_recorder,
@ -1089,7 +1086,7 @@ mod tests {
let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let cluster_info = Arc::new(cluster_info);
let banking_stage = BankingStage::new(
&cluster_info,
&poh_recorder,
@ -1152,7 +1149,7 @@ mod tests {
let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let cluster_info = Arc::new(cluster_info);
let banking_stage = BankingStage::new(
&cluster_info,
&poh_recorder,
@ -1293,7 +1290,7 @@ mod tests {
create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info =
ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let cluster_info = Arc::new(cluster_info);
let _banking_stage = BankingStage::new_num_threads(
&cluster_info,
&poh_recorder,
@ -1982,13 +1979,23 @@ mod tests {
{
if let EncodedTransaction::Json(transaction) = transaction {
if transaction.signatures[0] == success_signature.to_string() {
assert_eq!(meta.unwrap().status, Ok(()));
let meta = meta.unwrap();
assert_eq!(meta.err, None);
assert_eq!(meta.status, Ok(()));
} else if transaction.signatures[0] == ix_error_signature.to_string() {
let meta = meta.unwrap();
assert_eq!(
meta.unwrap().status,
meta.err,
Some(TransactionError::InstructionError(
0,
InstructionError::Custom(1)
))
);
assert_eq!(
meta.status,
Err(TransactionError::InstructionError(
0,
InstructionError::CustomError(1)
InstructionError::Custom(1)
))
);
} else {

View File

@ -1,9 +1,12 @@
//! A stage to broadcast data from a leader node to validators
use self::{
broadcast_fake_shreds_run::BroadcastFakeShredsRun,
broadcast_fake_shreds_run::BroadcastFakeShredsRun, broadcast_metrics::*,
fail_entry_verification_broadcast_run::FailEntryVerificationBroadcastRun,
standard_broadcast_run::StandardBroadcastRun,
};
use crate::contact_info::ContactInfo;
use crate::crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
use crate::weighted_shuffle::weighted_best;
use crate::{
cluster_info::{ClusterInfo, ClusterInfoError},
poh_recorder::WorkingBankEntry,
@ -14,27 +17,34 @@ use crossbeam_channel::{
Sender as CrossbeamSender,
};
use solana_ledger::{blockstore::Blockstore, shred::Shred, staking_utils};
use solana_measure::measure::Measure;
use solana_metrics::{inc_new_counter_error, inc_new_counter_info};
use solana_runtime::bank::Bank;
use solana_sdk::timing::timestamp;
use solana_sdk::{clock::Slot, pubkey::Pubkey};
use solana_streamer::sendmmsg::send_mmsg;
use std::sync::atomic::AtomicU64;
use std::{
collections::HashMap,
net::UdpSocket,
sync::atomic::{AtomicBool, Ordering},
sync::mpsc::{channel, Receiver, RecvError, RecvTimeoutError, Sender},
sync::{Arc, Mutex, RwLock},
sync::{Arc, Mutex},
thread::{self, Builder, JoinHandle},
time::{Duration, Instant},
};
mod broadcast_fake_shreds_run;
pub(crate) mod broadcast_metrics;
pub(crate) mod broadcast_utils;
mod fail_entry_verification_broadcast_run;
mod standard_broadcast_run;
pub const NUM_INSERT_THREADS: usize = 2;
pub type RetransmitSlotsSender = CrossbeamSender<HashMap<Slot, Arc<Bank>>>;
pub type RetransmitSlotsReceiver = CrossbeamReceiver<HashMap<Slot, Arc<Bank>>>;
pub(crate) const NUM_INSERT_THREADS: usize = 2;
pub(crate) type RetransmitSlotsSender = CrossbeamSender<HashMap<Slot, Arc<Bank>>>;
pub(crate) type RetransmitSlotsReceiver = CrossbeamReceiver<HashMap<Slot, Arc<Bank>>>;
pub(crate) type RecordReceiver = Receiver<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>;
pub(crate) type TransmitReceiver = Receiver<(TransmitShreds, Option<BroadcastShredBatchInfo>)>;
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum BroadcastStageReturnType {
@ -52,14 +62,14 @@ impl BroadcastStageType {
pub fn new_broadcast_stage(
&self,
sock: Vec<UdpSocket>,
cluster_info: Arc<RwLock<ClusterInfo>>,
cluster_info: Arc<ClusterInfo>,
receiver: Receiver<WorkingBankEntry>,
retransmit_slots_receiver: RetransmitSlotsReceiver,
exit_sender: &Arc<AtomicBool>,
blockstore: &Arc<Blockstore>,
shred_version: u16,
) -> BroadcastStage {
let keypair = cluster_info.read().unwrap().keypair.clone();
let keypair = cluster_info.keypair.clone();
match self {
BroadcastStageType::Standard => BroadcastStage::new(
sock,
@ -100,18 +110,18 @@ trait BroadcastRun {
&mut self,
blockstore: &Arc<Blockstore>,
receiver: &Receiver<WorkingBankEntry>,
socket_sender: &Sender<TransmitShreds>,
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
socket_sender: &Sender<(TransmitShreds, Option<BroadcastShredBatchInfo>)>,
blockstore_sender: &Sender<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>,
) -> Result<()>;
fn transmit(
&self,
receiver: &Arc<Mutex<Receiver<TransmitShreds>>>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
&mut self,
receiver: &Arc<Mutex<TransmitReceiver>>,
cluster_info: &ClusterInfo,
sock: &UdpSocket,
) -> Result<()>;
fn record(
&self,
receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>,
&mut self,
receiver: &Arc<Mutex<RecordReceiver>>,
blockstore: &Arc<Blockstore>,
) -> Result<()>;
}
@ -143,8 +153,8 @@ impl BroadcastStage {
fn run(
blockstore: &Arc<Blockstore>,
receiver: &Receiver<WorkingBankEntry>,
socket_sender: &Sender<TransmitShreds>,
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
socket_sender: &Sender<(TransmitShreds, Option<BroadcastShredBatchInfo>)>,
blockstore_sender: &Sender<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>,
mut broadcast_stage_run: impl BroadcastRun,
) -> BroadcastStageReturnType {
loop {
@ -195,7 +205,7 @@ impl BroadcastStage {
#[allow(clippy::too_many_arguments)]
fn new(
socks: Vec<UdpSocket>,
cluster_info: Arc<RwLock<ClusterInfo>>,
cluster_info: Arc<ClusterInfo>,
receiver: Receiver<WorkingBankEntry>,
retransmit_slots_receiver: RetransmitSlotsReceiver,
exit_sender: &Arc<AtomicBool>,
@ -226,7 +236,7 @@ impl BroadcastStage {
let socket_receiver = Arc::new(Mutex::new(socket_receiver));
for sock in socks.into_iter() {
let socket_receiver = socket_receiver.clone();
let bs_transmit = broadcast_stage_run.clone();
let mut bs_transmit = broadcast_stage_run.clone();
let cluster_info = cluster_info.clone();
let t = Builder::new()
.name("solana-broadcaster-transmit".to_string())
@ -243,7 +253,7 @@ impl BroadcastStage {
let blockstore_receiver = Arc::new(Mutex::new(blockstore_receiver));
for _ in 0..NUM_INSERT_THREADS {
let blockstore_receiver = blockstore_receiver.clone();
let bs_record = broadcast_stage_run.clone();
let mut bs_record = broadcast_stage_run.clone();
let btree = blockstore.clone();
let t = Builder::new()
.name("solana-broadcaster-record".to_string())
@ -282,7 +292,7 @@ impl BroadcastStage {
fn check_retransmit_signals(
blockstore: &Blockstore,
retransmit_slots_receiver: &RetransmitSlotsReceiver,
socket_sender: &Sender<TransmitShreds>,
socket_sender: &Sender<(TransmitShreds, Option<BroadcastShredBatchInfo>)>,
) -> Result<()> {
let timer = Duration::from_millis(100);
@ -303,7 +313,7 @@ impl BroadcastStage {
);
if !data_shreds.is_empty() {
socket_sender.send((stakes.clone(), data_shreds))?;
socket_sender.send(((stakes.clone(), data_shreds), None))?;
}
let coding_shreds = Arc::new(
@ -313,7 +323,7 @@ impl BroadcastStage {
);
if !coding_shreds.is_empty() {
socket_sender.send((stakes.clone(), coding_shreds))?;
socket_sender.send(((stakes.clone(), coding_shreds), None))?;
}
}
@ -328,6 +338,99 @@ impl BroadcastStage {
}
}
fn update_peer_stats(
num_live_peers: i64,
broadcast_len: i64,
last_datapoint_submit: &Arc<AtomicU64>,
) {
let now = timestamp();
let last = last_datapoint_submit.load(Ordering::Relaxed);
if now - last > 1000
&& last_datapoint_submit.compare_and_swap(last, now, Ordering::Relaxed) == last
{
datapoint_info!(
"cluster_info-num_nodes",
("live_count", num_live_peers, i64),
("broadcast_count", broadcast_len, i64)
);
}
}
pub fn get_broadcast_peers<S: std::hash::BuildHasher>(
cluster_info: &ClusterInfo,
stakes: Option<Arc<HashMap<Pubkey, u64, S>>>,
) -> (Vec<ContactInfo>, Vec<(u64, usize)>) {
use crate::cluster_info;
let mut peers = cluster_info.tvu_peers();
let peers_and_stakes = cluster_info::stake_weight_peers(&mut peers, stakes);
(peers, peers_and_stakes)
}
/// broadcast messages from the leader to layer 1 nodes
/// # Remarks
pub fn broadcast_shreds(
s: &UdpSocket,
shreds: &Arc<Vec<Shred>>,
peers_and_stakes: &[(u64, usize)],
peers: &[ContactInfo],
last_datapoint_submit: &Arc<AtomicU64>,
send_mmsg_total: &mut u64,
) -> Result<()> {
let broadcast_len = peers_and_stakes.len();
if broadcast_len == 0 {
update_peer_stats(1, 1, last_datapoint_submit);
return Ok(());
}
let packets: Vec<_> = shreds
.iter()
.map(|shred| {
let broadcast_index = weighted_best(&peers_and_stakes, shred.seed());
(&shred.payload, &peers[broadcast_index].tvu)
})
.collect();
let mut sent = 0;
let mut send_mmsg_time = Measure::start("send_mmsg");
while sent < packets.len() {
match send_mmsg(s, &packets[sent..]) {
Ok(n) => sent += n,
Err(e) => {
return Err(Error::IO(e));
}
}
}
send_mmsg_time.stop();
*send_mmsg_total += send_mmsg_time.as_us();
let num_live_peers = num_live_peers(&peers);
update_peer_stats(
num_live_peers,
broadcast_len as i64 + 1,
last_datapoint_submit,
);
Ok(())
}
fn distance(a: u64, b: u64) -> u64 {
if a > b {
a - b
} else {
b - a
}
}
fn num_live_peers(peers: &[ContactInfo]) -> i64 {
let mut num_live_peers = 1i64;
peers.iter().for_each(|p| {
// A peer is considered live if they generated their contact info recently
if distance(timestamp(), p.wallclock) <= CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS {
num_live_peers += 1;
}
});
num_live_peers
}
#[cfg(test)]
pub mod test {
use super::*;
@ -347,11 +450,7 @@ pub mod test {
signature::{Keypair, Signer},
};
use std::{
path::Path,
sync::atomic::AtomicBool,
sync::mpsc::channel,
sync::{Arc, RwLock},
thread::sleep,
path::Path, sync::atomic::AtomicBool, sync::mpsc::channel, sync::Arc, thread::sleep,
};
pub fn make_transmit_shreds(
@ -386,13 +485,13 @@ pub mod test {
}
fn check_all_shreds_received(
transmit_receiver: &Receiver<TransmitShreds>,
transmit_receiver: &TransmitReceiver,
mut data_index: u64,
mut coding_index: u64,
num_expected_data_shreds: u64,
num_expected_coding_shreds: u64,
) {
while let Ok(new_retransmit_slots) = transmit_receiver.try_recv() {
while let Ok((new_retransmit_slots, _)) = transmit_receiver.try_recv() {
if new_retransmit_slots.1[0].is_data() {
for data_shred in new_retransmit_slots.1.iter() {
assert_eq!(data_shred.index() as u64, data_index);
@ -411,6 +510,17 @@ pub mod test {
assert_eq!(num_expected_coding_shreds, coding_index);
}
#[test]
fn test_num_live_peers() {
let mut ci = ContactInfo::default();
ci.wallclock = std::u64::MAX;
assert_eq!(num_live_peers(&[ci.clone()]), 1);
ci.wallclock = timestamp() - 1;
assert_eq!(num_live_peers(&[ci.clone()]), 2);
ci.wallclock = timestamp() - CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS - 1;
assert_eq!(num_live_peers(&[ci]), 1);
}
#[test]
fn test_duplicate_retransmit_signal() {
// Setup
@ -484,16 +594,16 @@ pub mod test {
let broadcast_buddy = Node::new_localhost_with_pubkey(&buddy_keypair.pubkey());
// Fill the cluster_info with the buddy's info
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info.clone());
let cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info.clone());
cluster_info.insert_info(broadcast_buddy.info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let cluster_info = Arc::new(cluster_info);
let exit_sender = Arc::new(AtomicBool::new(false));
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let bank = Arc::new(Bank::new(&genesis_config));
let leader_keypair = cluster_info.read().unwrap().keypair.clone();
let leader_keypair = cluster_info.keypair.clone();
// Start up the broadcast stage
let broadcast_service = BroadcastStage::new(
leader_info.sockets.broadcast,

View File

@ -28,8 +28,8 @@ impl BroadcastRun for BroadcastFakeShredsRun {
&mut self,
blockstore: &Arc<Blockstore>,
receiver: &Receiver<WorkingBankEntry>,
socket_sender: &Sender<TransmitShreds>,
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
socket_sender: &Sender<(TransmitShreds, Option<BroadcastShredBatchInfo>)>,
blockstore_sender: &Sender<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>,
) -> Result<()> {
// 1) Pull entries from banking stage
let receive_results = broadcast_utils::recv_slot_entries(receiver)?;
@ -83,26 +83,32 @@ impl BroadcastRun for BroadcastFakeShredsRun {
}
let data_shreds = Arc::new(data_shreds);
blockstore_sender.send(data_shreds.clone())?;
blockstore_sender.send((data_shreds.clone(), None))?;
// 3) Start broadcast step
//some indicates fake shreds
socket_sender.send((Some(Arc::new(HashMap::new())), Arc::new(fake_data_shreds)))?;
socket_sender.send((Some(Arc::new(HashMap::new())), Arc::new(fake_coding_shreds)))?;
socket_sender.send((
(Some(Arc::new(HashMap::new())), Arc::new(fake_data_shreds)),
None,
))?;
socket_sender.send((
(Some(Arc::new(HashMap::new())), Arc::new(fake_coding_shreds)),
None,
))?;
//none indicates real shreds
socket_sender.send((None, data_shreds))?;
socket_sender.send((None, Arc::new(coding_shreds)))?;
socket_sender.send(((None, data_shreds), None))?;
socket_sender.send(((None, Arc::new(coding_shreds)), None))?;
Ok(())
}
fn transmit(
&self,
receiver: &Arc<Mutex<Receiver<TransmitShreds>>>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
&mut self,
receiver: &Arc<Mutex<TransmitReceiver>>,
cluster_info: &ClusterInfo,
sock: &UdpSocket,
) -> Result<()> {
for (stakes, data_shreds) in receiver.lock().unwrap().iter() {
let peers = cluster_info.read().unwrap().tvu_peers();
for ((stakes, data_shreds), _) in receiver.lock().unwrap().iter() {
let peers = cluster_info.tvu_peers();
peers.iter().enumerate().for_each(|(i, peer)| {
if i <= self.partition && stakes.is_some() {
// Send fake shreds to the first N peers
@ -119,11 +125,11 @@ impl BroadcastRun for BroadcastFakeShredsRun {
Ok(())
}
fn record(
&self,
receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>,
&mut self,
receiver: &Arc<Mutex<RecordReceiver>>,
blockstore: &Arc<Blockstore>,
) -> Result<()> {
for data_shreds in receiver.lock().unwrap().iter() {
for (data_shreds, _) in receiver.lock().unwrap().iter() {
blockstore.insert_shreds(data_shreds.to_vec(), None, true)?;
}
Ok(())
@ -139,7 +145,7 @@ mod tests {
#[test]
fn test_tvu_peers_ordering() {
let mut cluster = ClusterInfo::new_with_invalid_keypair(ContactInfo::new_localhost(
let cluster = ClusterInfo::new_with_invalid_keypair(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
));

View File

@ -0,0 +1,288 @@
use super::*;
pub(crate) trait BroadcastStats {
fn update(&mut self, new_stats: &Self);
fn report_stats(&mut self, slot: Slot, slot_start: Instant);
}
#[derive(Clone)]
pub(crate) struct BroadcastShredBatchInfo {
pub(crate) slot: Slot,
pub(crate) num_expected_batches: Option<usize>,
pub(crate) slot_start_ts: Instant,
}
#[derive(Default, Clone)]
pub(crate) struct ProcessShredsStats {
// Per-slot elapsed time
pub(crate) shredding_elapsed: u64,
pub(crate) receive_elapsed: u64,
}
impl ProcessShredsStats {
pub(crate) fn update(&mut self, new_stats: &ProcessShredsStats) {
self.shredding_elapsed += new_stats.shredding_elapsed;
self.receive_elapsed += new_stats.receive_elapsed;
}
pub(crate) fn reset(&mut self) {
*self = Self::default();
}
}
#[derive(Default, Clone)]
pub(crate) struct TransmitShredsStats {
pub(crate) transmit_elapsed: u64,
pub(crate) send_mmsg_elapsed: u64,
pub(crate) get_peers_elapsed: u64,
pub(crate) num_shreds: usize,
}
impl BroadcastStats for TransmitShredsStats {
fn update(&mut self, new_stats: &TransmitShredsStats) {
self.transmit_elapsed += new_stats.transmit_elapsed;
self.send_mmsg_elapsed += new_stats.send_mmsg_elapsed;
self.get_peers_elapsed += new_stats.get_peers_elapsed;
self.num_shreds += new_stats.num_shreds;
}
fn report_stats(&mut self, slot: Slot, slot_start: Instant) {
datapoint_info!(
"broadcast-transmit-shreds-stats",
("slot", slot as i64, i64),
(
"end_to_end_elapsed",
// `slot_start` signals when the first batch of shreds was
// received, used to measure duration of broadcast
slot_start.elapsed().as_micros() as i64,
i64
),
("transmit_elapsed", self.transmit_elapsed as i64, i64),
("send_mmsg_elapsed", self.send_mmsg_elapsed as i64, i64),
("get_peers_elapsed", self.get_peers_elapsed as i64, i64),
("num_shreds", self.num_shreds as i64, i64),
);
}
}
#[derive(Default, Clone)]
pub(crate) struct InsertShredsStats {
pub(crate) insert_shreds_elapsed: u64,
pub(crate) num_shreds: usize,
}
impl BroadcastStats for InsertShredsStats {
fn update(&mut self, new_stats: &InsertShredsStats) {
self.insert_shreds_elapsed += new_stats.insert_shreds_elapsed;
self.num_shreds += new_stats.num_shreds;
}
fn report_stats(&mut self, slot: Slot, slot_start: Instant) {
datapoint_info!(
"broadcast-insert-shreds-stats",
("slot", slot as i64, i64),
(
"end_to_end_elapsed",
// `slot_start` signals when the first batch of shreds was
// received, used to measure duration of broadcast
slot_start.elapsed().as_micros() as i64,
i64
),
(
"insert_shreds_elapsed",
self.insert_shreds_elapsed as i64,
i64
),
("num_shreds", self.num_shreds as i64, i64),
);
}
}
// Tracks metrics of type `T` acrosss multiple threads
#[derive(Default)]
pub(crate) struct BatchCounter<T: BroadcastStats + Default> {
// The number of batches processed across all threads so far
num_batches: usize,
// Filled in when the last batch of shreds is received,
// signals how many batches of shreds to expect
num_expected_batches: Option<usize>,
broadcast_shred_stats: T,
}
impl<T: BroadcastStats + Default> BatchCounter<T> {
#[cfg(test)]
pub(crate) fn num_batches(&self) -> usize {
self.num_batches
}
}
#[derive(Default)]
pub(crate) struct SlotBroadcastStats<T: BroadcastStats + Default>(HashMap<Slot, BatchCounter<T>>);
impl<T: BroadcastStats + Default> SlotBroadcastStats<T> {
#[cfg(test)]
pub(crate) fn get(&self, slot: Slot) -> Option<&BatchCounter<T>> {
self.0.get(&slot)
}
pub(crate) fn update(&mut self, new_stats: &T, batch_info: &Option<BroadcastShredBatchInfo>) {
if let Some(batch_info) = batch_info {
let mut should_delete = false;
{
let slot_batch_counter = self.0.entry(batch_info.slot).or_default();
slot_batch_counter.broadcast_shred_stats.update(new_stats);
// Only count the ones where `broadcast_shred_batch_info`.is_some(), because
// there could potentially be other `retransmit` slots inserted into the
// transmit pipeline (signaled by ReplayStage) that are not created by the
// main shredding/broadcast pipeline
slot_batch_counter.num_batches += 1;
if let Some(num_expected_batches) = batch_info.num_expected_batches {
slot_batch_counter.num_expected_batches = Some(num_expected_batches);
}
if let Some(num_expected_batches) = slot_batch_counter.num_expected_batches {
if slot_batch_counter.num_batches == num_expected_batches {
slot_batch_counter
.broadcast_shred_stats
.report_stats(batch_info.slot, batch_info.slot_start_ts);
should_delete = true;
}
}
}
if should_delete {
self.0
.remove(&batch_info.slot)
.expect("delete should be successful");
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[derive(Default)]
struct TestStats {
sender: Option<Sender<(usize, Slot, Instant)>>,
count: usize,
}
impl BroadcastStats for TestStats {
fn update(&mut self, new_stats: &TestStats) {
self.count += new_stats.count;
self.sender = new_stats.sender.clone();
}
fn report_stats(&mut self, slot: Slot, slot_start: Instant) {
self.sender
.as_ref()
.unwrap()
.send((self.count, slot, slot_start))
.unwrap()
}
}
#[test]
fn test_update() {
let start = Instant::now();
let mut slot_broadcast_stats = SlotBroadcastStats::default();
slot_broadcast_stats.update(
&TransmitShredsStats {
transmit_elapsed: 1,
get_peers_elapsed: 1,
send_mmsg_elapsed: 1,
num_shreds: 1,
},
&Some(BroadcastShredBatchInfo {
slot: 0,
num_expected_batches: Some(2),
slot_start_ts: start.clone(),
}),
);
// Singular update
let slot_0_stats = slot_broadcast_stats.0.get(&0).unwrap();
assert_eq!(slot_0_stats.num_batches, 1);
assert_eq!(slot_0_stats.num_expected_batches.unwrap(), 2);
assert_eq!(slot_0_stats.broadcast_shred_stats.transmit_elapsed, 1);
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 1);
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 1);
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 1);
slot_broadcast_stats.update(
&TransmitShredsStats {
transmit_elapsed: 1,
get_peers_elapsed: 1,
send_mmsg_elapsed: 1,
num_shreds: 1,
},
&None,
);
// If BroadcastShredBatchInfo == None, then update should be ignored
let slot_0_stats = slot_broadcast_stats.0.get(&0).unwrap();
assert_eq!(slot_0_stats.num_batches, 1);
assert_eq!(slot_0_stats.num_expected_batches.unwrap(), 2);
assert_eq!(slot_0_stats.broadcast_shred_stats.transmit_elapsed, 1);
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 1);
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 1);
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 1);
// If another batch is given, then total number of batches == num_expected_batches == 2,
// so the batch should be purged from the HashMap
slot_broadcast_stats.update(
&TransmitShredsStats {
transmit_elapsed: 1,
get_peers_elapsed: 1,
send_mmsg_elapsed: 1,
num_shreds: 1,
},
&Some(BroadcastShredBatchInfo {
slot: 0,
num_expected_batches: None,
slot_start_ts: start.clone(),
}),
);
assert!(slot_broadcast_stats.0.get(&0).is_none());
}
#[test]
fn test_update_multi_threaded() {
for round in 0..50 {
let start = Instant::now();
let slot_broadcast_stats = Arc::new(Mutex::new(SlotBroadcastStats::default()));
let num_threads = 5;
let slot = 0;
let (sender, receiver) = channel();
let thread_handles: Vec<_> = (0..num_threads)
.into_iter()
.map(|i| {
let slot_broadcast_stats = slot_broadcast_stats.clone();
let sender = Some(sender.clone());
let test_stats = TestStats { sender, count: 1 };
let mut broadcast_batch_info = BroadcastShredBatchInfo {
slot,
num_expected_batches: None,
slot_start_ts: start.clone(),
};
if i == round % num_threads {
broadcast_batch_info.num_expected_batches = Some(num_threads);
}
Builder::new()
.name("test_update_multi_threaded".to_string())
.spawn(move || {
slot_broadcast_stats
.lock()
.unwrap()
.update(&test_stats, &Some(broadcast_batch_info))
})
.unwrap()
})
.collect();
for t in thread_handles {
t.join().unwrap();
}
assert!(slot_broadcast_stats.lock().unwrap().0.get(&slot).is_none());
let (returned_count, returned_slot, returned_instant) = receiver.recv().unwrap();
assert_eq!(returned_count, num_threads);
assert_eq!(returned_slot, slot);
assert_eq!(returned_instant, returned_instant);
}
}
}

View File

@ -23,8 +23,8 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
&mut self,
blockstore: &Arc<Blockstore>,
receiver: &Receiver<WorkingBankEntry>,
socket_sender: &Sender<TransmitShreds>,
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
socket_sender: &Sender<(TransmitShreds, Option<BroadcastShredBatchInfo>)>,
blockstore_sender: &Sender<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>,
) -> Result<()> {
// 1) Pull entries from banking stage
let mut receive_results = broadcast_utils::recv_slot_entries(receiver)?;
@ -61,38 +61,44 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
);
let data_shreds = Arc::new(data_shreds);
blockstore_sender.send(data_shreds.clone())?;
blockstore_sender.send((data_shreds.clone(), None))?;
// 3) Start broadcast step
let bank_epoch = bank.get_leader_schedule_epoch(bank.slot());
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch);
let stakes = stakes.map(Arc::new);
socket_sender.send((stakes.clone(), data_shreds))?;
socket_sender.send((stakes, Arc::new(coding_shreds)))?;
socket_sender.send(((stakes.clone(), data_shreds), None))?;
socket_sender.send(((stakes, Arc::new(coding_shreds)), None))?;
Ok(())
}
fn transmit(
&self,
receiver: &Arc<Mutex<Receiver<TransmitShreds>>>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
&mut self,
receiver: &Arc<Mutex<TransmitReceiver>>,
cluster_info: &ClusterInfo,
sock: &UdpSocket,
) -> Result<()> {
let (stakes, shreds) = receiver.lock().unwrap().recv()?;
let all_seeds: Vec<[u8; 32]> = shreds.iter().map(|s| s.seed()).collect();
let ((stakes, shreds), _) = receiver.lock().unwrap().recv()?;
// Broadcast data
let all_shred_bufs: Vec<Vec<u8>> = shreds.to_vec().into_iter().map(|s| s.payload).collect();
cluster_info
.write()
.unwrap()
.broadcast_shreds(sock, all_shred_bufs, &all_seeds, stakes)?;
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes);
let mut send_mmsg_total = 0;
broadcast_shreds(
sock,
&shreds,
&peers_and_stakes,
&peers,
&Arc::new(AtomicU64::new(0)),
&mut send_mmsg_total,
)?;
Ok(())
}
fn record(
&self,
receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>,
&mut self,
receiver: &Arc<Mutex<RecordReceiver>>,
blockstore: &Arc<Blockstore>,
) -> Result<()> {
let all_shreds = receiver.lock().unwrap().recv()?;
let (all_shreds, _) = receiver.lock().unwrap().recv()?;
blockstore
.insert_shreds(all_shreds.to_vec(), None, true)
.expect("Failed to insert shreds in blockstore");

View File

@ -1,5 +1,7 @@
use super::broadcast_utils::{self, ReceiveResults};
use super::*;
use super::{
broadcast_utils::{self, ReceiveResults},
*,
};
use crate::broadcast_stage::broadcast_utils::UnfinishedSlotInfo;
use solana_ledger::{
entry::Entry,
@ -9,45 +11,33 @@ use solana_sdk::{pubkey::Pubkey, signature::Keypair, timing::duration_as_us};
use std::collections::HashMap;
use std::time::Duration;
#[derive(Default)]
struct BroadcastStats {
// Per-slot elapsed time
shredding_elapsed: u64,
insert_shreds_elapsed: u64,
broadcast_elapsed: u64,
receive_elapsed: u64,
seed_elapsed: u64,
}
impl BroadcastStats {
fn reset(&mut self) {
self.insert_shreds_elapsed = 0;
self.shredding_elapsed = 0;
self.broadcast_elapsed = 0;
self.receive_elapsed = 0;
self.seed_elapsed = 0;
}
}
#[derive(Clone)]
pub(super) struct StandardBroadcastRun {
stats: Arc<RwLock<BroadcastStats>>,
pub struct StandardBroadcastRun {
process_shreds_stats: ProcessShredsStats,
transmit_shreds_stats: Arc<Mutex<SlotBroadcastStats<TransmitShredsStats>>>,
insert_shreds_stats: Arc<Mutex<SlotBroadcastStats<InsertShredsStats>>>,
unfinished_slot: Option<UnfinishedSlotInfo>,
current_slot_and_parent: Option<(u64, u64)>,
slot_broadcast_start: Option<Instant>,
keypair: Arc<Keypair>,
shred_version: u16,
last_datapoint_submit: Arc<AtomicU64>,
num_batches: usize,
}
impl StandardBroadcastRun {
pub(super) fn new(keypair: Arc<Keypair>, shred_version: u16) -> Self {
Self {
stats: Arc::new(RwLock::new(BroadcastStats::default())),
process_shreds_stats: ProcessShredsStats::default(),
transmit_shreds_stats: Arc::new(Mutex::new(SlotBroadcastStats::default())),
insert_shreds_stats: Arc::new(Mutex::new(SlotBroadcastStats::default())),
unfinished_slot: None,
current_slot_and_parent: None,
slot_broadcast_start: None,
keypair,
shred_version,
last_datapoint_submit: Arc::new(AtomicU64::new(0)),
num_batches: 0,
}
}
@ -130,7 +120,7 @@ impl StandardBroadcastRun {
#[cfg(test)]
fn test_process_receive_results(
&mut self,
cluster_info: &Arc<RwLock<ClusterInfo>>,
cluster_info: &ClusterInfo,
sock: &UdpSocket,
blockstore: &Arc<Blockstore>,
receive_results: ReceiveResults,
@ -142,6 +132,7 @@ impl StandardBroadcastRun {
let brecv = Arc::new(Mutex::new(brecv));
//data
let _ = self.transmit(&srecv, cluster_info, sock);
let _ = self.record(&brecv, blockstore);
//coding
let _ = self.transmit(&srecv, cluster_info, sock);
let _ = self.record(&brecv, blockstore);
@ -151,8 +142,8 @@ impl StandardBroadcastRun {
fn process_receive_results(
&mut self,
blockstore: &Arc<Blockstore>,
socket_sender: &Sender<TransmitShreds>,
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
socket_sender: &Sender<(TransmitShreds, Option<BroadcastShredBatchInfo>)>,
blockstore_sender: &Sender<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>,
receive_results: ReceiveResults,
) -> Result<()> {
let mut receive_elapsed = receive_results.time_elapsed;
@ -160,11 +151,13 @@ impl StandardBroadcastRun {
let bank = receive_results.bank.clone();
let last_tick_height = receive_results.last_tick_height;
inc_new_counter_info!("broadcast_service-entries_received", num_entries);
let old_broadcast_start = self.slot_broadcast_start;
let old_num_batches = self.num_batches;
if self.current_slot_and_parent.is_none()
|| bank.slot() != self.current_slot_and_parent.unwrap().0
{
self.slot_broadcast_start = Some(Instant::now());
self.num_batches = 0;
let slot = bank.slot();
let parent_slot = bank.parent_slot();
@ -179,19 +172,19 @@ impl StandardBroadcastRun {
self.check_for_interrupted_slot(bank.ticks_per_slot() as u8);
// 2) Convert entries to shreds and coding shreds
let (shredder, next_shred_index) = self.init_shredder(
blockstore,
(bank.tick_height() % bank.ticks_per_slot()) as u8,
);
let mut data_shreds = self.entries_to_data_shreds(
let is_last_in_slot = last_tick_height == bank.max_tick_height();
let data_shreds = self.entries_to_data_shreds(
&shredder,
next_shred_index,
&receive_results.entries,
last_tick_height == bank.max_tick_height(),
is_last_in_slot,
);
//Insert the first shred so blockstore stores that the leader started this block
//This must be done before the blocks are sent out over the wire.
// Insert the first shred so blockstore stores that the leader started this block
// This must be done before the blocks are sent out over the wire.
if !data_shreds.is_empty() && data_shreds[0].index() == 0 {
let first = vec![data_shreds[0].clone()];
blockstore
@ -199,27 +192,56 @@ impl StandardBroadcastRun {
.expect("Failed to insert shreds in blockstore");
}
let last_data_shred = data_shreds.len();
if let Some(last_shred) = last_unfinished_slot_shred {
data_shreds.push(last_shred);
}
let to_shreds_elapsed = to_shreds_start.elapsed();
let bank_epoch = bank.get_leader_schedule_epoch(bank.slot());
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch);
let stakes = stakes.map(Arc::new);
let data_shreds = Arc::new(data_shreds);
socket_sender.send((stakes.clone(), data_shreds.clone()))?;
blockstore_sender.send(data_shreds.clone())?;
let coding_shreds = shredder.data_shreds_to_coding_shreds(&data_shreds[0..last_data_shred]);
let coding_shreds = Arc::new(coding_shreds);
socket_sender.send((stakes, coding_shreds.clone()))?;
blockstore_sender.send(coding_shreds)?;
self.update_broadcast_stats(BroadcastStats {
shredding_elapsed: duration_as_us(&to_shreds_elapsed),
receive_elapsed: duration_as_us(&receive_elapsed),
..BroadcastStats::default()
// Broadcast the last shred of the interrupted slot if necessary
if let Some(last_shred) = last_unfinished_slot_shred {
let batch_info = Some(BroadcastShredBatchInfo {
slot: last_shred.slot(),
num_expected_batches: Some(old_num_batches + 1),
slot_start_ts: old_broadcast_start.expect(
"Old broadcast start time for previous slot must exist if the previous slot
was interrupted",
),
});
let last_shred = Arc::new(vec![last_shred]);
socket_sender.send(((stakes.clone(), last_shred.clone()), batch_info.clone()))?;
blockstore_sender.send((last_shred, batch_info))?;
}
// Increment by two batches, one for the data batch, one for the coding batch.
self.num_batches += 2;
let num_expected_batches = {
if is_last_in_slot {
Some(self.num_batches)
} else {
None
}
};
let batch_info = Some(BroadcastShredBatchInfo {
slot: bank.slot(),
num_expected_batches,
slot_start_ts: self
.slot_broadcast_start
.clone()
.expect("Start timestamp must exist for a slot if we're broadcasting the slot"),
});
let data_shreds = Arc::new(data_shreds);
socket_sender.send(((stakes.clone(), data_shreds.clone()), batch_info.clone()))?;
blockstore_sender.send((data_shreds.clone(), batch_info.clone()))?;
let coding_shreds = shredder.data_shreds_to_coding_shreds(&data_shreds[0..last_data_shred]);
let coding_shreds = Arc::new(coding_shreds);
socket_sender.send(((stakes, coding_shreds.clone()), batch_info.clone()))?;
blockstore_sender.send((coding_shreds, batch_info))?;
self.process_shreds_stats.update(&ProcessShredsStats {
shredding_elapsed: duration_as_us(&to_shreds_elapsed),
receive_elapsed: duration_as_us(&receive_elapsed),
});
if last_tick_height == bank.max_tick_height() {
self.report_and_reset_stats();
self.unfinished_slot = None;
@ -228,10 +250,15 @@ impl StandardBroadcastRun {
Ok(())
}
fn insert(&self, blockstore: &Arc<Blockstore>, shreds: Arc<Vec<Shred>>) -> Result<()> {
fn insert(
&mut self,
blockstore: &Arc<Blockstore>,
shreds: Arc<Vec<Shred>>,
broadcast_shred_batch_info: Option<BroadcastShredBatchInfo>,
) -> Result<()> {
// Insert shreds into blockstore
let insert_shreds_start = Instant::now();
//The first shred is inserted synchronously
// The first shred is inserted synchronously
let data_shreds = if !shreds.is_empty() && shreds[0].index() == 0 {
shreds[1..].to_vec()
} else {
@ -241,66 +268,80 @@ impl StandardBroadcastRun {
.insert_shreds(data_shreds, None, true)
.expect("Failed to insert shreds in blockstore");
let insert_shreds_elapsed = insert_shreds_start.elapsed();
self.update_broadcast_stats(BroadcastStats {
let new_insert_shreds_stats = InsertShredsStats {
insert_shreds_elapsed: duration_as_us(&insert_shreds_elapsed),
..BroadcastStats::default()
});
num_shreds: shreds.len(),
};
self.update_insertion_metrics(&new_insert_shreds_stats, &broadcast_shred_batch_info);
Ok(())
}
fn update_insertion_metrics(
&mut self,
new_insertion_shreds_stats: &InsertShredsStats,
broadcast_shred_batch_info: &Option<BroadcastShredBatchInfo>,
) {
let mut insert_shreds_stats = self.insert_shreds_stats.lock().unwrap();
insert_shreds_stats.update(new_insertion_shreds_stats, broadcast_shred_batch_info);
}
fn broadcast(
&self,
&mut self,
sock: &UdpSocket,
cluster_info: &Arc<RwLock<ClusterInfo>>,
cluster_info: &ClusterInfo,
stakes: Option<Arc<HashMap<Pubkey, u64>>>,
shreds: Arc<Vec<Shred>>,
broadcast_shred_batch_info: Option<BroadcastShredBatchInfo>,
) -> Result<()> {
let seed_start = Instant::now();
let seeds: Vec<[u8; 32]> = shreds.iter().map(|s| s.seed()).collect();
let seed_elapsed = seed_start.elapsed();
trace!("Broadcasting {:?} shreds", shreds.len());
// Get the list of peers to broadcast to
let get_peers_start = Instant::now();
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes);
let get_peers_elapsed = get_peers_start.elapsed();
// Broadcast the shreds
let broadcast_start = Instant::now();
let shred_bufs: Vec<Vec<u8>> = shreds.to_vec().into_iter().map(|s| s.payload).collect();
trace!("Broadcasting {:?} shreds", shred_bufs.len());
let transmit_start = Instant::now();
let mut send_mmsg_total = 0;
broadcast_shreds(
sock,
&shreds,
&peers_and_stakes,
&peers,
&self.last_datapoint_submit,
&mut send_mmsg_total,
)?;
let transmit_elapsed = transmit_start.elapsed();
let new_transmit_shreds_stats = TransmitShredsStats {
transmit_elapsed: duration_as_us(&transmit_elapsed),
get_peers_elapsed: duration_as_us(&get_peers_elapsed),
send_mmsg_elapsed: send_mmsg_total,
num_shreds: shreds.len(),
};
cluster_info
.write()
.unwrap()
.broadcast_shreds(sock, shred_bufs, &seeds, stakes)?;
let broadcast_elapsed = broadcast_start.elapsed();
self.update_broadcast_stats(BroadcastStats {
broadcast_elapsed: duration_as_us(&broadcast_elapsed),
seed_elapsed: duration_as_us(&seed_elapsed),
..BroadcastStats::default()
});
// Process metrics
self.update_transmit_metrics(&new_transmit_shreds_stats, &broadcast_shred_batch_info);
Ok(())
}
fn update_broadcast_stats(&self, stats: BroadcastStats) {
let mut wstats = self.stats.write().unwrap();
wstats.receive_elapsed += stats.receive_elapsed;
wstats.shredding_elapsed += stats.shredding_elapsed;
wstats.insert_shreds_elapsed += stats.insert_shreds_elapsed;
wstats.broadcast_elapsed += stats.broadcast_elapsed;
wstats.seed_elapsed += stats.seed_elapsed;
fn update_transmit_metrics(
&mut self,
new_transmit_shreds_stats: &TransmitShredsStats,
broadcast_shred_batch_info: &Option<BroadcastShredBatchInfo>,
) {
let mut transmit_shreds_stats = self.transmit_shreds_stats.lock().unwrap();
transmit_shreds_stats.update(new_transmit_shreds_stats, broadcast_shred_batch_info);
}
fn report_and_reset_stats(&mut self) {
let stats = self.stats.read().unwrap();
let stats = &self.process_shreds_stats;
assert!(self.unfinished_slot.is_some());
datapoint_info!(
"broadcast-bank-stats",
"broadcast-process-shreds-stats",
("slot", self.unfinished_slot.unwrap().slot as i64, i64),
("shredding_time", stats.shredding_elapsed as i64, i64),
("insertion_time", stats.insert_shreds_elapsed as i64, i64),
("broadcast_time", stats.broadcast_elapsed as i64, i64),
("receive_time", stats.receive_elapsed as i64, i64),
("seed", stats.seed_elapsed as i64, i64),
(
"num_shreds",
"num_data_shreds",
i64::from(self.unfinished_slot.unwrap().next_shred_index),
i64
),
@ -310,8 +351,7 @@ impl StandardBroadcastRun {
i64
),
);
drop(stats);
self.stats.write().unwrap().reset();
self.process_shreds_stats.reset();
}
}
@ -320,8 +360,8 @@ impl BroadcastRun for StandardBroadcastRun {
&mut self,
blockstore: &Arc<Blockstore>,
receiver: &Receiver<WorkingBankEntry>,
socket_sender: &Sender<TransmitShreds>,
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
socket_sender: &Sender<(TransmitShreds, Option<BroadcastShredBatchInfo>)>,
blockstore_sender: &Sender<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>,
) -> Result<()> {
let receive_results = broadcast_utils::recv_slot_entries(receiver)?;
self.process_receive_results(
@ -332,21 +372,21 @@ impl BroadcastRun for StandardBroadcastRun {
)
}
fn transmit(
&self,
receiver: &Arc<Mutex<Receiver<TransmitShreds>>>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
&mut self,
receiver: &Arc<Mutex<TransmitReceiver>>,
cluster_info: &ClusterInfo,
sock: &UdpSocket,
) -> Result<()> {
let (stakes, shreds) = receiver.lock().unwrap().recv()?;
self.broadcast(sock, cluster_info, stakes, shreds)
let ((stakes, shreds), slot_start_ts) = receiver.lock().unwrap().recv()?;
self.broadcast(sock, cluster_info, stakes, shreds, slot_start_ts)
}
fn record(
&self,
receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>,
&mut self,
receiver: &Arc<Mutex<RecordReceiver>>,
blockstore: &Arc<Blockstore>,
) -> Result<()> {
let shreds = receiver.lock().unwrap().recv()?;
self.insert(blockstore, shreds)
let (shreds, slot_start_ts) = receiver.lock().unwrap().recv()?;
self.insert(blockstore, shreds, slot_start_ts)
}
}
@ -364,7 +404,7 @@ mod test {
genesis_config::GenesisConfig,
signature::{Keypair, Signer},
};
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use std::time::Duration;
fn setup(
@ -372,7 +412,7 @@ mod test {
) -> (
Arc<Blockstore>,
GenesisConfig,
Arc<RwLock<ClusterInfo>>,
Arc<ClusterInfo>,
Arc<Bank>,
Arc<Keypair>,
UdpSocket,
@ -385,9 +425,9 @@ mod test {
let leader_keypair = Arc::new(Keypair::new());
let leader_pubkey = leader_keypair.pubkey();
let leader_info = Node::new_localhost_with_pubkey(&leader_pubkey);
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(
leader_info.info.clone(),
)));
));
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut genesis_config = create_genesis_config(10_000).genesis_config;
genesis_config.ticks_per_slot = max_ticks_per_n_shreds(num_shreds_per_slot) + 1;
@ -462,25 +502,40 @@ mod test {
// Make sure the slot is not complete
assert!(!blockstore.is_full(0));
// Modify the stats, should reset later
standard_broadcast_run
.stats
.write()
.unwrap()
.receive_elapsed = 10;
// Try to fetch ticks from blockstore, nothing should break
assert_eq!(blockstore.get_slot_entries(0, 0, None).unwrap(), ticks0);
standard_broadcast_run.process_shreds_stats.receive_elapsed = 10;
// Broadcast stats should exist, and 2 batches should have been sent,
// one for data, one for coding
assert_eq!(
blockstore
.get_slot_entries(0, num_shreds_per_slot, None)
.unwrap(),
standard_broadcast_run
.transmit_shreds_stats
.lock()
.unwrap()
.get(unfinished_slot.slot)
.unwrap()
.num_batches(),
2
);
assert_eq!(
standard_broadcast_run
.insert_shreds_stats
.lock()
.unwrap()
.get(unfinished_slot.slot)
.unwrap()
.num_batches(),
2
);
// Try to fetch ticks from blockstore, nothing should break
assert_eq!(blockstore.get_slot_entries(0, 0).unwrap(), ticks0);
assert_eq!(
blockstore.get_slot_entries(0, num_shreds_per_slot).unwrap(),
vec![],
);
// Step 2: Make a transmission for another bank that interrupts the transmission for
// slot 0
let bank2 = Arc::new(Bank::new_from_parent(&bank0, &leader_keypair.pubkey(), 2));
let interrupted_slot = unfinished_slot.slot;
// Interrupting the slot should cause the unfinished_slot and stats to reset
let num_shreds = 1;
assert!(num_shreds < num_shreds_per_slot);
@ -504,16 +559,28 @@ mod test {
// Check that the stats were reset as well
assert_eq!(
standard_broadcast_run.stats.read().unwrap().receive_elapsed,
standard_broadcast_run.process_shreds_stats.receive_elapsed,
0
);
// Broadcast stats for interrupted slot should be cleared
assert!(standard_broadcast_run
.transmit_shreds_stats
.lock()
.unwrap()
.get(interrupted_slot)
.is_none());
assert!(standard_broadcast_run
.insert_shreds_stats
.lock()
.unwrap()
.get(interrupted_slot)
.is_none());
// Try to fetch the incomplete ticks from blockstore, should succeed
assert_eq!(blockstore.get_slot_entries(0, 0, None).unwrap(), ticks0);
assert_eq!(blockstore.get_slot_entries(0, 0).unwrap(), ticks0);
assert_eq!(
blockstore
.get_slot_entries(0, num_shreds_per_slot, None)
.unwrap(),
blockstore.get_slot_entries(0, num_shreds_per_slot).unwrap(),
vec![],
);
}

File diff suppressed because it is too large Load Diff

View File

@ -197,7 +197,7 @@ pub struct ClusterInfoVoteListener {
impl ClusterInfoVoteListener {
pub fn new(
exit: &Arc<AtomicBool>,
cluster_info: Arc<RwLock<ClusterInfo>>,
cluster_info: Arc<ClusterInfo>,
sigverify_disabled: bool,
sender: CrossbeamSender<Vec<Packets>>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
@ -262,7 +262,7 @@ impl ClusterInfoVoteListener {
fn recv_loop(
exit: Arc<AtomicBool>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
cluster_info: &ClusterInfo,
sigverify_disabled: bool,
verified_vote_packets_sender: VerifiedVotePacketsSender,
verified_vote_transactions_sender: VerifiedVoteTransactionsSender,
@ -272,40 +272,12 @@ impl ClusterInfoVoteListener {
if exit.load(Ordering::Relaxed) {
return Ok(());
}
let (labels, votes, new_ts) = cluster_info.read().unwrap().get_votes(last_ts);
let (labels, votes, new_ts) = cluster_info.get_votes(last_ts);
inc_new_counter_debug!("cluster_info_vote_listener-recv_count", votes.len());
last_ts = new_ts;
let msgs = packet::to_packets(&votes);
if !msgs.is_empty() {
let r = if sigverify_disabled {
sigverify::ed25519_verify_disabled(&msgs)
} else {
sigverify::ed25519_verify_cpu(&msgs)
};
assert_eq!(
r.iter()
.map(|packets_results| packets_results.len())
.sum::<usize>(),
votes.len()
);
let (vote_txs, packets) = izip!(
labels.into_iter(),
votes.into_iter(),
r.iter().flatten(),
msgs
)
.filter_map(|(label, vote, verify_result, packet)| {
if *verify_result != 0 {
Some((vote, (label, packet)))
} else {
None
}
})
.unzip();
if !votes.is_empty() {
let (vote_txs, packets) = Self::verify_votes(votes, labels, sigverify_disabled);
verified_vote_transactions_sender.send(vote_txs)?;
verified_vote_packets_sender.send(packets)?;
}
@ -314,6 +286,42 @@ impl ClusterInfoVoteListener {
}
}
fn verify_votes(
votes: Vec<Transaction>,
labels: Vec<CrdsValueLabel>,
sigverify_disabled: bool,
) -> (Vec<Transaction>, Vec<(CrdsValueLabel, Packets)>) {
let msgs = packet::to_packets_chunked(&votes, 1);
let r = if sigverify_disabled {
sigverify::ed25519_verify_disabled(&msgs)
} else {
sigverify::ed25519_verify_cpu(&msgs)
};
assert_eq!(
r.iter()
.map(|packets_results| packets_results.len())
.sum::<usize>(),
votes.len()
);
let (vote_txs, packets) = izip!(
labels.into_iter(),
votes.into_iter(),
r.iter().flatten(),
msgs,
)
.filter_map(|(label, vote, verify_result, packet)| {
if *verify_result != 0 {
Some((vote, (label, packet)))
} else {
None
}
})
.unzip();
(vote_txs, packets)
}
fn bank_send_loop(
exit: Arc<AtomicBool>,
verified_vote_packets_receiver: VerifiedVotePacketsReceiver,
@ -525,6 +533,7 @@ mod tests {
genesis_utils::{self, GenesisConfigInfo, ValidatorVoteKeypairs},
};
use solana_sdk::hash::Hash;
use solana_sdk::signature::Signature;
use solana_sdk::signature::{Keypair, Signer};
use solana_vote_program::vote_transaction;
@ -971,4 +980,61 @@ mod tests {
validator_voting_keypairs,
)
}
#[test]
fn test_verify_votes_empty() {
solana_logger::setup();
let votes = vec![];
let labels = vec![];
let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, labels, false);
assert!(vote_txs.is_empty());
assert!(packets.is_empty());
}
fn verify_packets_len(packets: &Vec<(CrdsValueLabel, Packets)>, ref_value: usize) {
let num_packets: usize = packets.iter().map(|p| p.1.packets.len()).sum();
assert_eq!(num_packets, ref_value);
}
fn test_vote_tx() -> Transaction {
let node_keypair = Keypair::new();
let vote_keypair = Keypair::new();
let auth_voter_keypair = Keypair::new();
let vote_tx = vote_transaction::new_vote_transaction(
vec![0],
Hash::default(),
Hash::default(),
&node_keypair,
&vote_keypair,
&auth_voter_keypair,
);
vote_tx
}
#[test]
fn test_verify_votes_1_pass() {
let vote_tx = test_vote_tx();
let votes = vec![vote_tx.clone()];
let labels = vec![CrdsValueLabel::Vote(0, Pubkey::new_rand())];
let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, labels, false);
assert_eq!(vote_txs.len(), 1);
verify_packets_len(&packets, 1);
}
#[test]
fn test_bad_vote() {
let vote_tx = test_vote_tx();
let mut bad_vote = vote_tx.clone();
bad_vote.signatures[0] = Signature::default();
let votes = vec![vote_tx.clone(), bad_vote, vote_tx];
let label = CrdsValueLabel::Vote(0, Pubkey::new_rand());
let labels: Vec<_> = (0..votes.len())
.into_iter()
.map(|_| label.clone())
.collect();
let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, labels, false);
assert_eq!(vote_txs.len(), 2);
verify_packets_len(&packets, 2);
}
}

View File

@ -27,15 +27,10 @@ impl ClusterSlots {
pub fn lookup(&self, slot: Slot) -> Option<Arc<RwLock<SlotPubkeys>>> {
self.cluster_slots.read().unwrap().get(&slot).cloned()
}
pub fn update(
&self,
root: Slot,
cluster_info: &RwLock<ClusterInfo>,
bank_forks: &RwLock<BankForks>,
) {
pub fn update(&self, root: Slot, cluster_info: &ClusterInfo, bank_forks: &RwLock<BankForks>) {
self.update_peers(cluster_info, bank_forks);
let since = *self.since.read().unwrap();
let epoch_slots = cluster_info.read().unwrap().get_epoch_slots_since(since);
let epoch_slots = cluster_info.get_epoch_slots_since(since);
self.update_internal(root, epoch_slots);
}
fn update_internal(&self, root: Slot, epoch_slots: (Vec<EpochSlots>, Option<u64>)) {
@ -95,7 +90,7 @@ impl ClusterSlots {
.collect()
}
fn update_peers(&self, cluster_info: &RwLock<ClusterInfo>, bank_forks: &RwLock<BankForks>) {
fn update_peers(&self, cluster_info: &ClusterInfo, bank_forks: &RwLock<BankForks>) {
let root_bank = bank_forks.read().unwrap().root_bank().clone();
let root_epoch = root_bank.epoch();
let my_epoch = *self.epoch.read().unwrap();
@ -111,7 +106,7 @@ impl ClusterSlots {
.clone();
*self.validator_stakes.write().unwrap() = validator_stakes;
let id = cluster_info.read().unwrap().id();
let id = cluster_info.id();
*self.self_id.write().unwrap() = id;
*self.epoch.write().unwrap() = Some(root_epoch);
}

View File

@ -1,3 +1,7 @@
use crate::consensus::VOTE_THRESHOLD_SIZE;
use solana_ledger::blockstore::Blockstore;
use solana_measure::measure::Measure;
use solana_metrics::inc_new_counter_info;
use solana_runtime::bank::Bank;
use solana_sdk::clock::Slot;
use solana_vote_program::{vote_state::VoteState, vote_state::MAX_LOCKOUT_HISTORY};
@ -10,9 +14,11 @@ use std::{
time::Duration,
};
pub type BlockCommitmentArray = [u64; MAX_LOCKOUT_HISTORY + 1];
#[derive(Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)]
pub struct BlockCommitment {
pub commitment: [u64; MAX_LOCKOUT_HISTORY],
pub commitment: BlockCommitmentArray,
}
impl BlockCommitment {
@ -25,23 +31,71 @@ impl BlockCommitment {
assert!(confirmation_count > 0 && confirmation_count <= MAX_LOCKOUT_HISTORY);
self.commitment[confirmation_count - 1]
}
pub fn increase_rooted_stake(&mut self, stake: u64) {
self.commitment[MAX_LOCKOUT_HISTORY] += stake;
}
pub fn get_rooted_stake(&self) -> u64 {
self.commitment[MAX_LOCKOUT_HISTORY]
}
#[cfg(test)]
pub(crate) fn new(commitment: [u64; MAX_LOCKOUT_HISTORY]) -> Self {
pub(crate) fn new(commitment: BlockCommitmentArray) -> Self {
Self { commitment }
}
}
#[derive(Debug, Default)]
pub struct BlockCommitmentCache {
block_commitment: HashMap<Slot, BlockCommitment>,
largest_confirmed_root: Slot,
total_stake: u64,
bank: Arc<Bank>,
blockstore: Arc<Blockstore>,
root: Slot,
}
impl std::fmt::Debug for BlockCommitmentCache {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("BlockCommitmentCache")
.field("block_commitment", &self.block_commitment)
.field("total_stake", &self.total_stake)
.field(
"bank",
&format_args!("Bank({{current_slot: {:?}}})", self.bank.slot()),
)
.field("root", &self.root)
.finish()
}
}
impl BlockCommitmentCache {
pub fn new(block_commitment: HashMap<Slot, BlockCommitment>, total_stake: u64) -> Self {
pub fn new(
block_commitment: HashMap<Slot, BlockCommitment>,
largest_confirmed_root: Slot,
total_stake: u64,
bank: Arc<Bank>,
blockstore: Arc<Blockstore>,
root: Slot,
) -> Self {
Self {
block_commitment,
largest_confirmed_root,
total_stake,
bank,
blockstore,
root,
}
}
pub fn default_with_blockstore(blockstore: Arc<Blockstore>) -> Self {
Self {
block_commitment: HashMap::default(),
largest_confirmed_root: Slot::default(),
total_stake: u64::default(),
bank: Arc::new(Bank::default()),
blockstore,
root: Slot::default(),
}
}
@ -49,45 +103,99 @@ impl BlockCommitmentCache {
self.block_commitment.get(&slot)
}
pub fn largest_confirmed_root(&self) -> Slot {
self.largest_confirmed_root
}
pub fn total_stake(&self) -> u64 {
self.total_stake
}
pub fn get_block_with_depth_commitment(
&self,
minimum_depth: usize,
minimum_stake_percentage: f64,
) -> Option<Slot> {
self.block_commitment
.iter()
.filter(|&(_, block_commitment)| {
let fork_stake_minimum_depth: u64 = block_commitment.commitment[minimum_depth..]
.iter()
.cloned()
.sum();
fork_stake_minimum_depth as f64 / self.total_stake as f64
>= minimum_stake_percentage
})
.map(|(slot, _)| *slot)
.max()
pub fn bank(&self) -> Arc<Bank> {
self.bank.clone()
}
pub fn get_rooted_block_with_commitment(&self, minimum_stake_percentage: f64) -> Option<u64> {
self.get_block_with_depth_commitment(MAX_LOCKOUT_HISTORY - 1, minimum_stake_percentage)
pub fn slot(&self) -> Slot {
self.bank.slot()
}
pub fn root(&self) -> Slot {
self.root
}
pub fn get_confirmation_count(&self, slot: Slot) -> Option<usize> {
self.get_lockout_count(slot, VOTE_THRESHOLD_SIZE)
}
// Returns the lowest level at which at least `minimum_stake_percentage` of the total epoch
// stake is locked out
fn get_lockout_count(&self, slot: Slot, minimum_stake_percentage: f64) -> Option<usize> {
self.get_block_commitment(slot).map(|block_commitment| {
let iterator = block_commitment.commitment.iter().enumerate().rev();
let mut sum = 0;
for (i, stake) in iterator {
sum += stake;
if (sum as f64 / self.total_stake as f64) > minimum_stake_percentage {
return i + 1;
}
}
0
})
}
pub fn is_confirmed_rooted(&self, slot: Slot) -> bool {
slot <= self.largest_confirmed_root()
&& (self.blockstore.is_root(slot) || self.bank.status_cache_ancestors().contains(&slot))
}
#[cfg(test)]
pub fn new_for_tests_with_blockstore(blockstore: Arc<Blockstore>) -> Self {
let mut block_commitment: HashMap<Slot, BlockCommitment> = HashMap::new();
block_commitment.insert(0, BlockCommitment::default());
Self {
block_commitment,
blockstore,
total_stake: 42,
largest_confirmed_root: Slot::default(),
bank: Arc::new(Bank::default()),
root: Slot::default(),
}
}
#[cfg(test)]
pub(crate) fn set_get_largest_confirmed_root(&mut self, root: Slot) {
self.largest_confirmed_root = root;
}
}
pub struct CommitmentAggregationData {
bank: Arc<Bank>,
root: Slot,
total_staked: u64,
}
impl CommitmentAggregationData {
pub fn new(bank: Arc<Bank>, total_staked: u64) -> Self {
Self { bank, total_staked }
pub fn new(bank: Arc<Bank>, root: Slot, total_staked: u64) -> Self {
Self {
bank,
root,
total_staked,
}
}
}
fn get_largest_confirmed_root(mut rooted_stake: Vec<(Slot, u64)>, total_stake: u64) -> Slot {
rooted_stake.sort_by(|a, b| a.0.cmp(&b.0).reverse());
let mut stake_sum = 0;
for (root, stake) in rooted_stake {
stake_sum += stake;
if (stake_sum as f64 / total_stake as f64) > VOTE_THRESHOLD_SIZE {
return root;
}
}
0
}
pub struct AggregateCommitmentService {
t_commitment: JoinHandle<()>,
}
@ -144,18 +252,37 @@ impl AggregateCommitmentService {
continue;
}
let block_commitment = Self::aggregate_commitment(&ancestors, &aggregation_data.bank);
let mut aggregate_commitment_time = Measure::start("aggregate-commitment-ms");
let (block_commitment, rooted_stake) =
Self::aggregate_commitment(&ancestors, &aggregation_data.bank);
let mut new_block_commitment =
BlockCommitmentCache::new(block_commitment, aggregation_data.total_staked);
let largest_confirmed_root =
get_largest_confirmed_root(rooted_stake, aggregation_data.total_staked);
let mut new_block_commitment = BlockCommitmentCache::new(
block_commitment,
largest_confirmed_root,
aggregation_data.total_staked,
aggregation_data.bank,
block_commitment_cache.read().unwrap().blockstore.clone(),
aggregation_data.root,
);
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
aggregate_commitment_time.stop();
inc_new_counter_info!(
"aggregate-commitment-ms",
aggregate_commitment_time.as_ms() as usize
);
}
}
pub fn aggregate_commitment(ancestors: &[Slot], bank: &Bank) -> HashMap<Slot, BlockCommitment> {
pub fn aggregate_commitment(
ancestors: &[Slot],
bank: &Bank,
) -> (HashMap<Slot, BlockCommitment>, Vec<(Slot, u64)>) {
assert!(!ancestors.is_empty());
// Check ancestors is sorted
@ -164,6 +291,7 @@ impl AggregateCommitmentService {
}
let mut commitment = HashMap::new();
let mut rooted_stake: Vec<(Slot, u64)> = Vec::new();
for (_, (lamports, account)) in bank.vote_accounts().into_iter() {
if lamports == 0 {
continue;
@ -176,17 +304,19 @@ impl AggregateCommitmentService {
let vote_state = vote_state.unwrap();
Self::aggregate_commitment_for_vote_account(
&mut commitment,
&mut rooted_stake,
&vote_state,
ancestors,
lamports,
);
}
commitment
(commitment, rooted_stake)
}
fn aggregate_commitment_for_vote_account(
commitment: &mut HashMap<Slot, BlockCommitment>,
rooted_stake: &mut Vec<(Slot, u64)>,
vote_state: &VoteState,
ancestors: &[Slot],
lamports: u64,
@ -199,12 +329,13 @@ impl AggregateCommitmentService {
commitment
.entry(*a)
.or_insert_with(BlockCommitment::default)
.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, lamports);
.increase_rooted_stake(lamports);
} else {
ancestors_index = i;
break;
}
}
rooted_stake.push((root, lamports));
}
for vote in &vote_state.votes {
@ -230,7 +361,10 @@ impl AggregateCommitmentService {
#[cfg(test)]
mod tests {
use super::*;
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_ledger::{
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
};
use solana_sdk::pubkey::Pubkey;
use solana_stake_program::stake_state;
use solana_vote_program::vote_state::{self, VoteStateVersions};
@ -246,97 +380,98 @@ mod tests {
}
#[test]
fn test_get_block_with_depth_commitment() {
fn test_get_confirmations() {
let bank = Arc::new(Bank::default());
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
// Build BlockCommitmentCache with votes at depths 0 and 1 for 2 slots
let mut cache0 = BlockCommitment::default();
cache0.increase_confirmation_stake(1, 15);
cache0.increase_confirmation_stake(2, 25);
cache0.increase_confirmation_stake(1, 5);
cache0.increase_confirmation_stake(2, 40);
let mut cache1 = BlockCommitment::default();
cache1.increase_confirmation_stake(1, 10);
cache1.increase_confirmation_stake(2, 20);
cache1.increase_confirmation_stake(1, 40);
cache1.increase_confirmation_stake(2, 5);
let mut cache2 = BlockCommitment::default();
cache2.increase_confirmation_stake(1, 20);
cache2.increase_confirmation_stake(2, 5);
let mut block_commitment = HashMap::new();
block_commitment.entry(0).or_insert(cache0.clone());
block_commitment.entry(1).or_insert(cache1.clone());
let block_commitment_cache = BlockCommitmentCache::new(block_commitment, 50);
block_commitment.entry(2).or_insert(cache2.clone());
let block_commitment_cache =
BlockCommitmentCache::new(block_commitment, 0, 50, bank, blockstore, 0);
// Neither slot has rooted votes
assert_eq!(
block_commitment_cache.get_rooted_block_with_commitment(0.1),
None
);
// Neither slot meets the minimum level of commitment 0.6 at depth 1
assert_eq!(
block_commitment_cache.get_block_with_depth_commitment(1, 0.6),
None
);
// Only slot 0 meets the minimum level of commitment 0.5 at depth 1
assert_eq!(
block_commitment_cache.get_block_with_depth_commitment(1, 0.5),
Some(0)
);
// If multiple slots meet the minimum level of commitment, method should return the most recent
assert_eq!(
block_commitment_cache.get_block_with_depth_commitment(1, 0.4),
Some(1)
);
// If multiple slots meet the minimum level of commitment, method should return the most recent
assert_eq!(
block_commitment_cache.get_block_with_depth_commitment(0, 0.6),
Some(1)
);
// Neither slot meets the minimum level of commitment 0.9 at depth 0
assert_eq!(
block_commitment_cache.get_block_with_depth_commitment(0, 0.9),
None
);
assert_eq!(block_commitment_cache.get_confirmation_count(0), Some(2));
assert_eq!(block_commitment_cache.get_confirmation_count(1), Some(1));
assert_eq!(block_commitment_cache.get_confirmation_count(2), Some(0),);
assert_eq!(block_commitment_cache.get_confirmation_count(3), None,);
}
#[test]
fn test_get_rooted_block_with_commitment() {
// Build BlockCommitmentCache with rooted votes
let mut cache0 = BlockCommitment::new([0; MAX_LOCKOUT_HISTORY]);
cache0.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, 40);
cache0.increase_confirmation_stake(MAX_LOCKOUT_HISTORY - 1, 10);
let mut cache1 = BlockCommitment::new([0; MAX_LOCKOUT_HISTORY]);
cache1.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, 30);
cache1.increase_confirmation_stake(MAX_LOCKOUT_HISTORY - 1, 10);
cache1.increase_confirmation_stake(MAX_LOCKOUT_HISTORY - 2, 10);
fn test_is_confirmed_rooted() {
let bank = Arc::new(Bank::default());
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
blockstore.set_roots(&[0, 1]).unwrap();
// Build BlockCommitmentCache with rooted slots
let mut cache0 = BlockCommitment::default();
cache0.increase_rooted_stake(50);
let mut cache1 = BlockCommitment::default();
cache1.increase_rooted_stake(40);
let mut cache2 = BlockCommitment::default();
cache2.increase_rooted_stake(20);
let mut block_commitment = HashMap::new();
block_commitment.entry(0).or_insert(cache0.clone());
block_commitment.entry(1).or_insert(cache1.clone());
let block_commitment_cache = BlockCommitmentCache::new(block_commitment, 50);
block_commitment.entry(1).or_insert(cache0.clone());
block_commitment.entry(2).or_insert(cache1.clone());
block_commitment.entry(3).or_insert(cache2.clone());
let largest_confirmed_root = 1;
let block_commitment_cache = BlockCommitmentCache::new(
block_commitment,
largest_confirmed_root,
50,
bank,
blockstore,
0,
);
// Only slot 0 meets the minimum level of commitment 0.66 at root
assert_eq!(
block_commitment_cache.get_rooted_block_with_commitment(0.66),
Some(0)
);
// If multiple slots meet the minimum level of commitment, method should return the most recent
assert_eq!(
block_commitment_cache.get_rooted_block_with_commitment(0.6),
Some(1)
);
// Neither slot meets the minimum level of commitment 0.9 at root
assert_eq!(
block_commitment_cache.get_rooted_block_with_commitment(0.9),
None
);
assert!(block_commitment_cache.is_confirmed_rooted(0));
assert!(block_commitment_cache.is_confirmed_rooted(1));
assert!(!block_commitment_cache.is_confirmed_rooted(2));
assert!(!block_commitment_cache.is_confirmed_rooted(3));
}
#[test]
fn test_get_largest_confirmed_root() {
assert_eq!(get_largest_confirmed_root(vec![], 10), 0);
let mut rooted_stake = vec![];
rooted_stake.push((0, 5));
rooted_stake.push((1, 5));
assert_eq!(get_largest_confirmed_root(rooted_stake, 10), 0);
let mut rooted_stake = vec![];
rooted_stake.push((1, 5));
rooted_stake.push((0, 10));
rooted_stake.push((2, 5));
rooted_stake.push((1, 4));
assert_eq!(get_largest_confirmed_root(rooted_stake, 10), 1);
}
#[test]
fn test_aggregate_commitment_for_vote_account_1() {
let ancestors = vec![3, 4, 5, 7, 9, 11];
let mut commitment = HashMap::new();
let mut rooted_stake = vec![];
let lamports = 5;
let mut vote_state = VoteState::default();
let root = ancestors.last().unwrap();
vote_state.root_slot = Some(*root);
let root = ancestors.last().unwrap().clone();
vote_state.root_slot = Some(root);
AggregateCommitmentService::aggregate_commitment_for_vote_account(
&mut commitment,
&mut rooted_stake,
&vote_state,
&ancestors,
lamports,
@ -344,15 +479,17 @@ mod tests {
for a in ancestors {
let mut expected = BlockCommitment::default();
expected.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, lamports);
expected.increase_rooted_stake(lamports);
assert_eq!(*commitment.get(&a).unwrap(), expected);
}
assert_eq!(rooted_stake[0], (root, lamports));
}
#[test]
fn test_aggregate_commitment_for_vote_account_2() {
let ancestors = vec![3, 4, 5, 7, 9, 11];
let mut commitment = HashMap::new();
let mut rooted_stake = vec![];
let lamports = 5;
let mut vote_state = VoteState::default();
@ -361,6 +498,7 @@ mod tests {
vote_state.process_slot_vote_unchecked(*ancestors.last().unwrap());
AggregateCommitmentService::aggregate_commitment_for_vote_account(
&mut commitment,
&mut rooted_stake,
&vote_state,
&ancestors,
lamports,
@ -369,7 +507,7 @@ mod tests {
for a in ancestors {
if a <= root {
let mut expected = BlockCommitment::default();
expected.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, lamports);
expected.increase_rooted_stake(lamports);
assert_eq!(*commitment.get(&a).unwrap(), expected);
} else {
let mut expected = BlockCommitment::default();
@ -377,12 +515,14 @@ mod tests {
assert_eq!(*commitment.get(&a).unwrap(), expected);
}
}
assert_eq!(rooted_stake[0], (root, lamports));
}
#[test]
fn test_aggregate_commitment_for_vote_account_3() {
let ancestors = vec![3, 4, 5, 7, 9, 10, 11];
let mut commitment = HashMap::new();
let mut rooted_stake = vec![];
let lamports = 5;
let mut vote_state = VoteState::default();
@ -393,6 +533,7 @@ mod tests {
vote_state.process_slot_vote_unchecked(ancestors[6]);
AggregateCommitmentService::aggregate_commitment_for_vote_account(
&mut commitment,
&mut rooted_stake,
&vote_state,
&ancestors,
lamports,
@ -401,7 +542,7 @@ mod tests {
for (i, a) in ancestors.iter().enumerate() {
if *a <= root {
let mut expected = BlockCommitment::default();
expected.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, lamports);
expected.increase_rooted_stake(lamports);
assert_eq!(*commitment.get(&a).unwrap(), expected);
} else if i <= 4 {
let mut expected = BlockCommitment::default();
@ -413,6 +554,7 @@ mod tests {
assert_eq!(*commitment.get(&a).unwrap(), expected);
}
}
assert_eq!(rooted_stake[0], (root, lamports));
}
#[test]
@ -422,6 +564,8 @@ mod tests {
mut genesis_config, ..
} = create_genesis_config(10_000);
let rooted_stake_amount = 40;
let sk1 = Pubkey::new_rand();
let pk1 = Pubkey::new_rand();
let mut vote_account1 = vote_state::create_account(&pk1, &Pubkey::new_rand(), 0, 100);
@ -432,12 +576,36 @@ mod tests {
let mut vote_account2 = vote_state::create_account(&pk2, &Pubkey::new_rand(), 0, 50);
let stake_account2 =
stake_state::create_account(&sk2, &pk2, &vote_account2, &genesis_config.rent, 50);
let sk3 = Pubkey::new_rand();
let pk3 = Pubkey::new_rand();
let mut vote_account3 = vote_state::create_account(&pk3, &Pubkey::new_rand(), 0, 1);
let stake_account3 = stake_state::create_account(
&sk3,
&pk3,
&vote_account3,
&genesis_config.rent,
rooted_stake_amount,
);
let sk4 = Pubkey::new_rand();
let pk4 = Pubkey::new_rand();
let mut vote_account4 = vote_state::create_account(&pk4, &Pubkey::new_rand(), 0, 1);
let stake_account4 = stake_state::create_account(
&sk4,
&pk4,
&vote_account4,
&genesis_config.rent,
rooted_stake_amount,
);
genesis_config.accounts.extend(vec![
(pk1, vote_account1.clone()),
(sk1, stake_account1),
(pk2, vote_account2.clone()),
(sk2, stake_account2),
(pk3, vote_account3.clone()),
(sk3, stake_account3),
(pk4, vote_account4.clone()),
(sk4, stake_account4),
]);
// Create bank
@ -457,7 +625,20 @@ mod tests {
VoteState::to(&versioned, &mut vote_account2).unwrap();
bank.store_account(&pk2, &vote_account2);
let commitment = AggregateCommitmentService::aggregate_commitment(&ancestors, &bank);
let mut vote_state3 = VoteState::from(&vote_account3).unwrap();
vote_state3.root_slot = Some(1);
let versioned = VoteStateVersions::Current(Box::new(vote_state3));
VoteState::to(&versioned, &mut vote_account3).unwrap();
bank.store_account(&pk3, &vote_account3);
let mut vote_state4 = VoteState::from(&vote_account4).unwrap();
vote_state4.root_slot = Some(2);
let versioned = VoteStateVersions::Current(Box::new(vote_state4));
VoteState::to(&versioned, &mut vote_account4).unwrap();
bank.store_account(&pk4, &vote_account4);
let (commitment, rooted_stake) =
AggregateCommitmentService::aggregate_commitment(&ancestors, &bank);
for a in ancestors {
if a <= 3 {
@ -481,5 +662,7 @@ mod tests {
assert!(commitment.get(&a).is_none());
}
}
assert_eq!(rooted_stake.len(), 2);
assert_eq!(get_largest_confirmed_root(rooted_stake, 100), 1)
}
}

View File

@ -321,7 +321,7 @@ impl Tower {
}
pub fn check_vote_stake_threshold(
&self,
slot: u64,
slot: Slot,
stake_lockouts: &HashMap<u64, StakeLockout>,
total_staked: u64,
) -> bool {
@ -332,11 +332,8 @@ impl Tower {
if let Some(fork_stake) = stake_lockouts.get(&vote.slot) {
let lockout = fork_stake.stake as f64 / total_staked as f64;
trace!(
"fork_stake slot: {} lockout: {} fork_stake: {} total_stake: {}",
slot,
lockout,
fork_stake.stake,
total_staked
"fork_stake slot: {}, vote slot: {}, lockout: {} fork_stake: {} total_stake: {}",
slot, vote.slot, lockout, fork_stake.stake, total_staked
);
if vote.confirmation_count as usize > self.threshold_depth {
for old_vote in &self.lockouts.votes {
@ -358,11 +355,12 @@ impl Tower {
pub(crate) fn check_switch_threshold(
&self,
_slot: u64,
_slot: Slot,
_ancestors: &HashMap<Slot, HashSet<u64>>,
_descendants: &HashMap<Slot, HashSet<u64>>,
_progress: &ProgressMap,
_total_stake: u64,
_total_epoch_stake: u64,
_epoch_vote_accounts: &HashMap<Pubkey, (u64, Account)>,
) -> bool {
true
}
@ -482,6 +480,7 @@ pub mod test {
use super::*;
use crate::{
cluster_info_vote_listener::VoteTracker,
cluster_slots::ClusterSlots,
progress_map::ForkProgress,
replay_stage::{HeaviestForkFailures, ReplayStage},
};
@ -497,107 +496,96 @@ pub mod test {
hash::Hash,
pubkey::Pubkey,
signature::{Keypair, Signer},
transaction::Transaction,
};
use solana_vote_program::{
vote_instruction,
vote_state::{Vote, VoteStateVersions},
vote_transaction,
};
use std::collections::{HashMap, VecDeque};
use std::collections::HashMap;
use std::sync::RwLock;
use std::{thread::sleep, time::Duration};
use trees::{tr, Node, Tree};
use trees::{tr, Tree, TreeWalk};
pub(crate) struct VoteSimulator<'a> {
searchable_nodes: HashMap<u64, &'a Node<u64>>,
pub(crate) struct VoteSimulator {
pub validator_keypairs: HashMap<Pubkey, ValidatorVoteKeypairs>,
pub node_pubkeys: Vec<Pubkey>,
pub vote_pubkeys: Vec<Pubkey>,
pub bank_forks: RwLock<BankForks>,
pub progress: ProgressMap,
}
impl<'a> VoteSimulator<'a> {
pub(crate) fn new(forks: &'a Tree<u64>) -> Self {
let mut searchable_nodes = HashMap::new();
let root = forks.root();
searchable_nodes.insert(root.data, root);
Self { searchable_nodes }
impl VoteSimulator {
pub(crate) fn new(num_keypairs: usize) -> Self {
let (validator_keypairs, node_pubkeys, vote_pubkeys, bank_forks, progress) =
Self::init_state(num_keypairs);
Self {
validator_keypairs,
node_pubkeys,
vote_pubkeys,
bank_forks: RwLock::new(bank_forks),
progress,
}
}
pub(crate) fn fill_bank_forks(
&mut self,
forks: Tree<u64>,
cluster_votes: &HashMap<Pubkey, Vec<u64>>,
) {
let root = forks.root().data;
assert!(self.bank_forks.read().unwrap().get(root).is_some());
let mut walk = TreeWalk::from(forks);
loop {
if let Some(visit) = walk.get() {
let slot = visit.node().data;
self.progress
.entry(slot)
.or_insert_with(|| ForkProgress::new(Hash::default(), None, None, 0, 0));
if self.bank_forks.read().unwrap().get(slot).is_some() {
walk.forward();
continue;
}
let parent = walk.get_parent().unwrap().data;
let parent_bank = self.bank_forks.read().unwrap().get(parent).unwrap().clone();
let new_bank = Bank::new_from_parent(&parent_bank, &Pubkey::default(), slot);
for (pubkey, vote) in cluster_votes.iter() {
if vote.contains(&parent) {
let keypairs = self.validator_keypairs.get(pubkey).unwrap();
let last_blockhash = parent_bank.last_blockhash();
let vote_tx = vote_transaction::new_vote_transaction(
// Must vote > root to be processed
vec![parent],
parent_bank.hash(),
last_blockhash,
&keypairs.node_keypair,
&keypairs.vote_keypair,
&keypairs.vote_keypair,
);
info!("voting {} {}", parent_bank.slot(), parent_bank.hash());
new_bank.process_transaction(&vote_tx).unwrap();
}
}
new_bank.freeze();
self.bank_forks.write().unwrap().insert(new_bank);
walk.forward();
} else {
break;
}
}
}
pub(crate) fn simulate_vote(
&mut self,
vote_slot: Slot,
bank_forks: &RwLock<BankForks>,
cluster_votes: &mut HashMap<Pubkey, Vec<u64>>,
validator_keypairs: &HashMap<Pubkey, ValidatorVoteKeypairs>,
my_keypairs: &ValidatorVoteKeypairs,
progress: &mut ProgressMap,
my_pubkey: &Pubkey,
tower: &mut Tower,
) -> Vec<HeaviestForkFailures> {
let node = self
.find_node_and_update_simulation(vote_slot)
.expect("Vote to simulate must be for a slot in the tree");
let mut missing_nodes = VecDeque::new();
let mut current = node;
loop {
let current_slot = current.data;
if bank_forks.read().unwrap().get(current_slot).is_some()
|| tower.root().map(|r| current_slot < r).unwrap_or(false)
{
break;
} else {
missing_nodes.push_front(current);
}
if let Some(parent) = current.parent() {
current = parent;
} else {
break;
}
}
// Create any missing banks along the path
for missing_node in missing_nodes {
let missing_slot = missing_node.data;
let parent = missing_node.parent().unwrap().data;
let parent_bank = bank_forks
.read()
.unwrap()
.get(parent)
.expect("parent bank must exist")
.clone();
info!("parent of {} is {}", missing_slot, parent_bank.slot(),);
progress
.entry(missing_slot)
.or_insert_with(|| ForkProgress::new(parent_bank.last_blockhash(), None, None));
// Create the missing bank
let new_bank =
Bank::new_from_parent(&parent_bank, &Pubkey::default(), missing_slot);
// Simulate ingesting the cluster's votes for the parent into this bank
for (pubkey, vote) in cluster_votes.iter() {
if vote.contains(&parent_bank.slot()) {
let keypairs = validator_keypairs.get(pubkey).unwrap();
let node_pubkey = keypairs.node_keypair.pubkey();
let vote_pubkey = keypairs.vote_keypair.pubkey();
let last_blockhash = parent_bank.last_blockhash();
let votes = Vote::new(vec![parent_bank.slot()], parent_bank.hash());
info!("voting {} {}", parent_bank.slot(), parent_bank.hash());
let vote_ix = vote_instruction::vote(&vote_pubkey, &vote_pubkey, votes);
let mut vote_tx =
Transaction::new_with_payer(vec![vote_ix], Some(&node_pubkey));
vote_tx.partial_sign(&[&keypairs.node_keypair], last_blockhash);
vote_tx.partial_sign(&[&keypairs.vote_keypair], last_blockhash);
new_bank.process_transaction(&vote_tx).unwrap();
}
}
new_bank.freeze();
bank_forks.write().unwrap().insert(new_bank);
}
// Now try to simulate the vote
let my_pubkey = my_keypairs.node_keypair.pubkey();
// Try to simulate the vote
let my_keypairs = self.validator_keypairs.get(&my_pubkey).unwrap();
let my_vote_pubkey = my_keypairs.vote_keypair.pubkey();
let ancestors = bank_forks.read().unwrap().ancestors();
let mut frozen_banks: Vec<_> = bank_forks
let ancestors = self.bank_forks.read().unwrap().ancestors();
let mut frozen_banks: Vec<_> = self
.bank_forks
.read()
.unwrap()
.frozen_banks()
@ -610,90 +598,119 @@ pub mod test {
&ancestors,
&mut frozen_banks,
tower,
progress,
&mut self.progress,
&VoteTracker::default(),
bank_forks,
&ClusterSlots::default(),
&self.bank_forks,
&mut HashSet::new(),
);
let bank = bank_forks
let vote_bank = self
.bank_forks
.read()
.unwrap()
.get(vote_slot)
.expect("Bank must have been created before vote simulation")
.clone();
// Try to vote on the given slot
let descendants = self.bank_forks.read().unwrap().descendants();
let (_, _, failure_reasons) = ReplayStage::select_vote_and_reset_forks(
&Some(vote_bank.clone()),
&None,
&ancestors,
&descendants,
&self.progress,
&tower,
);
// Make sure this slot isn't locked out or failing threshold
let fork_progress = progress
.get(&vote_slot)
.expect("Slot for vote must exist in progress map");
info!("Checking vote: {}", vote_slot);
info!("lockouts: {:?}", fork_progress.fork_stats.stake_lockouts);
let mut failures = vec![];
if fork_progress.fork_stats.is_locked_out {
failures.push(HeaviestForkFailures::LockedOut(vote_slot));
info!("Checking vote: {}", vote_bank.slot());
if !failure_reasons.is_empty() {
return failure_reasons;
}
if !fork_progress.fork_stats.vote_threshold {
failures.push(HeaviestForkFailures::FailedThreshold(vote_slot));
}
if !failures.is_empty() {
return failures;
}
let vote = tower.new_vote_from_bank(&bank, &my_vote_pubkey).0;
let vote = tower.new_vote_from_bank(&vote_bank, &my_vote_pubkey).0;
if let Some(new_root) = tower.record_bank_vote(vote) {
ReplayStage::handle_new_root(
new_root,
bank_forks,
progress,
&self.bank_forks,
&mut self.progress,
&None,
&mut 0,
&mut HashSet::new(),
None,
);
}
// Mark the vote for this bank under this node's pubkey so it will be
// integrated into any future child banks
cluster_votes.entry(my_pubkey).or_default().push(vote_slot);
vec![]
}
// Find a node representing the given slot
fn find_node_and_update_simulation(&mut self, slot: u64) -> Option<&'a Node<u64>> {
let mut successful_search_node: Option<&'a Node<u64>> = None;
let mut found_node = None;
for search_node in self.searchable_nodes.values() {
if let Some((target, new_searchable_nodes)) = Self::find_node(search_node, slot) {
successful_search_node = Some(search_node);
found_node = Some(target);
for node in new_searchable_nodes {
self.searchable_nodes.insert(node.data, node);
}
break;
fn can_progress_on_fork(
&mut self,
my_pubkey: &Pubkey,
tower: &mut Tower,
start_slot: u64,
num_slots: u64,
cluster_votes: &mut HashMap<Pubkey, Vec<u64>>,
) -> bool {
// Check that within some reasonable time, validator can make a new
// root on this fork
let old_root = tower.root();
for i in 1..num_slots {
// The parent of the tip of the fork
let mut fork_tip_parent = tr(start_slot + i - 1);
// The tip of the fork
fork_tip_parent.push_front(tr(start_slot + i));
self.fill_bank_forks(fork_tip_parent, cluster_votes);
if self
.simulate_vote(i + start_slot, &my_pubkey, tower)
.is_empty()
{
cluster_votes
.entry(*my_pubkey)
.or_default()
.push(start_slot + i);
}
if old_root != tower.root() {
return true;
}
}
successful_search_node.map(|node| {
self.searchable_nodes.remove(&node.data);
});
found_node
false
}
fn find_node(
node: &'a Node<u64>,
slot: u64,
) -> Option<(&'a Node<u64>, Vec<&'a Node<u64>>)> {
if node.data == slot {
Some((node, node.iter().collect()))
} else {
let mut search_result: Option<(&'a Node<u64>, Vec<&'a Node<u64>>)> = None;
for child in node.iter() {
if let Some((_, ref mut new_searchable_nodes)) = search_result {
new_searchable_nodes.push(child);
continue;
}
search_result = Self::find_node(child, slot);
}
fn init_state(
num_keypairs: usize,
) -> (
HashMap<Pubkey, ValidatorVoteKeypairs>,
Vec<Pubkey>,
Vec<Pubkey>,
BankForks,
ProgressMap,
) {
let keypairs: HashMap<_, _> = std::iter::repeat_with(|| {
let node_keypair = Keypair::new();
let vote_keypair = Keypair::new();
let stake_keypair = Keypair::new();
let node_pubkey = node_keypair.pubkey();
(
node_pubkey,
ValidatorVoteKeypairs::new(node_keypair, vote_keypair, stake_keypair),
)
})
.take(num_keypairs)
.collect();
let node_pubkeys: Vec<_> = keypairs
.values()
.map(|keys| keys.node_keypair.pubkey())
.collect();
let vote_pubkeys: Vec<_> = keypairs
.values()
.map(|keys| keys.vote_keypair.pubkey())
.collect();
search_result
}
let (bank_forks, progress) = initialize_state(&keypairs, 10_000);
(keypairs, node_pubkeys, vote_pubkeys, bank_forks, progress)
}
}
@ -717,7 +734,10 @@ pub mod test {
bank0.freeze();
let mut progress = ProgressMap::default();
progress.insert(0, ForkProgress::new(bank0.last_blockhash(), None, None));
progress.insert(
0,
ForkProgress::new(bank0.last_blockhash(), None, None, 0, 0),
);
(BankForks::new(0, bank0), progress)
}
@ -741,84 +761,26 @@ pub mod test {
stakes
}
fn can_progress_on_fork(
my_pubkey: &Pubkey,
tower: &mut Tower,
start_slot: u64,
num_slots: u64,
bank_forks: &RwLock<BankForks>,
cluster_votes: &mut HashMap<Pubkey, Vec<u64>>,
keypairs: &HashMap<Pubkey, ValidatorVoteKeypairs>,
progress: &mut ProgressMap,
) -> bool {
// Check that within some reasonable time, validator can make a new
// root on this fork
let old_root = tower.root();
let mut main_fork = tr(start_slot);
let mut tip = main_fork.root_mut();
for i in 1..num_slots {
tip.push_front(tr(start_slot + i));
tip = tip.first_mut().unwrap();
}
let mut voting_simulator = VoteSimulator::new(&main_fork);
for i in 1..num_slots {
voting_simulator.simulate_vote(
i + start_slot,
&bank_forks,
cluster_votes,
&keypairs,
keypairs.get(&my_pubkey).unwrap(),
progress,
tower,
);
if old_root != tower.root() {
return true;
}
}
false
}
#[test]
fn test_simple_votes() {
let node_keypair = Keypair::new();
let vote_keypair = Keypair::new();
let stake_keypair = Keypair::new();
let node_pubkey = node_keypair.pubkey();
let mut keypairs = HashMap::new();
keypairs.insert(
node_pubkey,
ValidatorVoteKeypairs::new(node_keypair, vote_keypair, stake_keypair),
);
// Initialize BankForks
let (bank_forks, mut progress) = initialize_state(&keypairs, 10_000);
let bank_forks = RwLock::new(bank_forks);
// Init state
let mut vote_simulator = VoteSimulator::new(1);
let node_pubkey = vote_simulator.node_pubkeys[0];
let mut tower = Tower::new_with_key(&node_pubkey);
// Create the tree of banks
let forks = tr(0) / (tr(1) / (tr(2) / (tr(3) / (tr(4) / tr(5)))));
// Set the voting behavior
let mut voting_simulator = VoteSimulator::new(&forks);
let mut cluster_votes = HashMap::new();
let votes = vec![0, 1, 2, 3, 4, 5];
cluster_votes.insert(node_pubkey, votes.clone());
vote_simulator.fill_bank_forks(forks, &cluster_votes);
// Simulate the votes
let mut tower = Tower::new_with_key(&node_pubkey);
let mut cluster_votes = HashMap::new();
for vote in votes {
assert!(voting_simulator
.simulate_vote(
vote,
&bank_forks,
&mut cluster_votes,
&keypairs,
keypairs.get(&node_pubkey).unwrap(),
&mut progress,
&mut tower,
)
assert!(vote_simulator
.simulate_vote(vote, &node_pubkey, &mut tower,)
.is_empty());
}
@ -830,21 +792,14 @@ pub mod test {
#[test]
fn test_double_partition() {
solana_logger::setup();
let node_keypair = Keypair::new();
let vote_keypair = Keypair::new();
let stake_keypair = Keypair::new();
let node_pubkey = node_keypair.pubkey();
let vote_pubkey = vote_keypair.pubkey();
// Init state
let mut vote_simulator = VoteSimulator::new(2);
let node_pubkey = vote_simulator.node_pubkeys[0];
let vote_pubkey = vote_simulator.vote_pubkeys[0];
let mut tower = Tower::new_with_key(&node_pubkey);
let mut keypairs = HashMap::new();
info!("my_pubkey: {}", node_pubkey);
keypairs.insert(
node_pubkey,
ValidatorVoteKeypairs::new(node_keypair, vote_keypair, stake_keypair),
);
// Create the tree of banks in a BankForks object
let num_slots_to_try = 200;
// Create the tree of banks
let forks = tr(0)
/ (tr(1)
/ (tr(2)
@ -861,56 +816,37 @@ pub mod test {
/ (tr(44)
// Minor fork 2
/ (tr(45) / (tr(46) / (tr(47) / (tr(48) / (tr(49) / (tr(50)))))))
/ (tr(110)))))))))))));
/ (tr(110) / (tr(110 + 2 * num_slots_to_try))))))))))))));
// Set the voting behavior
let mut voting_simulator = VoteSimulator::new(&forks);
let mut votes: Vec<Slot> = vec![];
// Set the successful voting behavior
let mut cluster_votes = HashMap::new();
let mut my_votes: Vec<Slot> = vec![];
let next_unlocked_slot = 110;
// Vote on the first minor fork
votes.extend((0..=14).into_iter());
my_votes.extend((0..=14).into_iter());
// Come back to the main fork
votes.extend((43..=44).into_iter());
my_votes.extend((43..=44).into_iter());
// Vote on the second minor fork
votes.extend((45..=50).into_iter());
my_votes.extend((45..=50).into_iter());
// Vote to come back to main fork
my_votes.push(next_unlocked_slot);
cluster_votes.insert(node_pubkey, my_votes.clone());
// Make the other validator vote fork to pass the threshold checks
let other_votes = my_votes.clone();
cluster_votes.insert(vote_simulator.node_pubkeys[1], other_votes);
vote_simulator.fill_bank_forks(forks, &cluster_votes);
let mut cluster_votes: HashMap<Pubkey, Vec<Slot>> = HashMap::new();
let (bank_forks, mut progress) = initialize_state(&keypairs, 10_000);
let bank_forks = RwLock::new(bank_forks);
// Simulate the votes. Should fail on trying to come back to the main fork
// at 106 exclusively due to threshold failure
let mut tower = Tower::new_with_key(&node_pubkey);
for vote in &votes {
// Simulate the votes.
for vote in &my_votes {
// All these votes should be ok
assert!(voting_simulator
.simulate_vote(
*vote,
&bank_forks,
&mut cluster_votes,
&keypairs,
keypairs.get(&node_pubkey).unwrap(),
&mut progress,
&mut tower,
)
assert!(vote_simulator
.simulate_vote(*vote, &node_pubkey, &mut tower,)
.is_empty());
}
// Try to come back to main fork
let next_unlocked_slot = 110;
assert!(voting_simulator
.simulate_vote(
next_unlocked_slot,
&bank_forks,
&mut cluster_votes,
&keypairs,
keypairs.get(&node_pubkey).unwrap(),
&mut progress,
&mut tower,
)
.is_empty());
info!("local tower: {:#?}", tower.lockouts.votes);
let vote_accounts = bank_forks
let vote_accounts = vote_simulator
.bank_forks
.read()
.unwrap()
.get(next_unlocked_slot)
@ -920,15 +856,17 @@ pub mod test {
let state = VoteState::from(&observed.1).unwrap();
info!("observed tower: {:#?}", state.votes);
assert!(can_progress_on_fork(
let num_slots_to_try = 200;
cluster_votes
.get_mut(&vote_simulator.node_pubkeys[1])
.unwrap()
.extend(next_unlocked_slot + 1..next_unlocked_slot + num_slots_to_try);
assert!(vote_simulator.can_progress_on_fork(
&node_pubkey,
&mut tower,
next_unlocked_slot,
200,
&bank_forks,
num_slots_to_try,
&mut cluster_votes,
&keypairs,
&mut progress
));
}

View File

@ -1,6 +1,8 @@
use crate::crds_value::MAX_WALLCLOCK;
use solana_sdk::pubkey::Pubkey;
#[cfg(test)]
use solana_sdk::rpc_port;
use solana_sdk::sanitize::{Sanitize, SanitizeError};
#[cfg(test)]
use solana_sdk::signature::{Keypair, Signer};
use solana_sdk::timing::timestamp;
@ -37,6 +39,15 @@ pub struct ContactInfo {
pub shred_version: u16,
}
impl Sanitize for ContactInfo {
fn sanitize(&self) -> std::result::Result<(), SanitizeError> {
if self.wallclock >= MAX_WALLCLOCK {
return Err(SanitizeError::ValueOutOfBounds);
}
Ok(())
}
}
impl Ord for ContactInfo {
fn cmp(&self, other: &Self) -> Ordering {
self.id.cmp(&other.id)
@ -314,4 +325,12 @@ mod tests {
ci.rpc = socketaddr!("127.0.0.1:234");
assert!(ci.valid_client_facing_addr().is_some());
}
#[test]
fn test_sanitize() {
let mut ci = ContactInfo::default();
assert_eq!(ci.sanitize(), Ok(()));
ci.wallclock = MAX_WALLCLOCK;
assert_eq!(ci.sanitize(), Err(SanitizeError::ValueOutOfBounds));
}
}

View File

@ -14,7 +14,6 @@ use crate::crds::Crds;
use crate::crds_gossip::{get_stake, get_weight, CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS};
use crate::crds_gossip_error::CrdsGossipError;
use crate::crds_value::{CrdsValue, CrdsValueLabel};
use rand;
use rand::distributions::{Distribution, WeightedIndex};
use rand::Rng;
use solana_runtime::bloom::Bloom;
@ -37,6 +36,13 @@ pub struct CrdsFilter {
mask_bits: u32,
}
impl solana_sdk::sanitize::Sanitize for CrdsFilter {
fn sanitize(&self) -> std::result::Result<(), solana_sdk::sanitize::SanitizeError> {
self.filter.sanitize()?;
Ok(())
}
}
impl CrdsFilter {
pub fn new_rand(num_items: usize, max_bytes: usize) -> Self {
let max_bits = (max_bytes * 8) as f64;
@ -278,7 +284,7 @@ impl CrdsGossipPull {
failed
}
// build a set of filters of the current crds table
// num_filters - used to increase the likely hood of a value in crds being added to some filter
// num_filters - used to increase the likelyhood of a value in crds being added to some filter
pub fn build_crds_filters(&self, crds: &Crds, bloom_size: usize) -> Vec<CrdsFilter> {
let num = cmp::max(
CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS,

View File

@ -2,6 +2,7 @@ use crate::contact_info::ContactInfo;
use crate::deprecated;
use crate::epoch_slots::EpochSlots;
use bincode::{serialize, serialized_size};
use solana_sdk::sanitize::{Sanitize, SanitizeError};
use solana_sdk::timing::timestamp;
use solana_sdk::{
clock::Slot,
@ -16,6 +17,9 @@ use std::{
fmt,
};
pub const MAX_WALLCLOCK: u64 = 1_000_000_000_000_000;
pub const MAX_SLOT: u64 = 1_000_000_000_000_000;
pub type VoteIndex = u8;
pub const MAX_VOTES: VoteIndex = 32;
@ -29,6 +33,13 @@ pub struct CrdsValue {
pub data: CrdsData,
}
impl Sanitize for CrdsValue {
fn sanitize(&self) -> Result<(), SanitizeError> {
self.signature.sanitize()?;
self.data.sanitize()
}
}
impl Signable for CrdsValue {
fn pubkey(&self) -> Pubkey {
self.pubkey()
@ -47,15 +58,8 @@ impl Signable for CrdsValue {
}
fn verify(&self) -> bool {
let sig_check = self
.get_signature()
.verify(&self.pubkey().as_ref(), self.signable_data().borrow());
let data_check = match &self.data {
CrdsData::Vote(ix, _) => *ix < MAX_VOTES,
CrdsData::EpochSlots(ix, _) => *ix < MAX_EPOCH_SLOTS,
_ => true,
};
sig_check && data_check
self.get_signature()
.verify(&self.pubkey().as_ref(), self.signable_data().borrow())
}
}
@ -69,8 +73,36 @@ pub enum CrdsData {
Vote(VoteIndex, Vote),
LowestSlot(u8, LowestSlot),
SnapshotHashes(SnapshotHash),
EpochSlots(EpochSlotsIndex, EpochSlots),
AccountsHashes(SnapshotHash),
EpochSlots(EpochSlotsIndex, EpochSlots),
}
impl Sanitize for CrdsData {
fn sanitize(&self) -> Result<(), SanitizeError> {
match self {
CrdsData::ContactInfo(val) => val.sanitize(),
CrdsData::Vote(ix, val) => {
if *ix >= MAX_VOTES {
return Err(SanitizeError::ValueOutOfBounds);
}
val.sanitize()
}
CrdsData::LowestSlot(ix, val) => {
if *ix as usize >= 1 {
return Err(SanitizeError::ValueOutOfBounds);
}
val.sanitize()
}
CrdsData::SnapshotHashes(val) => val.sanitize(),
CrdsData::AccountsHashes(val) => val.sanitize(),
CrdsData::EpochSlots(ix, val) => {
if *ix as usize >= MAX_EPOCH_SLOTS as usize {
return Err(SanitizeError::ValueOutOfBounds);
}
val.sanitize()
}
}
}
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
@ -80,6 +112,20 @@ pub struct SnapshotHash {
pub wallclock: u64,
}
impl Sanitize for SnapshotHash {
fn sanitize(&self) -> Result<(), SanitizeError> {
if self.wallclock >= MAX_WALLCLOCK {
return Err(SanitizeError::ValueOutOfBounds);
}
for (slot, _) in &self.hashes {
if *slot >= MAX_SLOT {
return Err(SanitizeError::ValueOutOfBounds);
}
}
self.from.sanitize()
}
}
impl SnapshotHash {
pub fn new(from: Pubkey, hashes: Vec<(Slot, Hash)>) -> Self {
Self {
@ -112,6 +158,27 @@ impl LowestSlot {
}
}
impl Sanitize for LowestSlot {
fn sanitize(&self) -> Result<(), SanitizeError> {
if self.wallclock >= MAX_WALLCLOCK {
return Err(SanitizeError::ValueOutOfBounds);
}
if self.lowest >= MAX_SLOT {
return Err(SanitizeError::ValueOutOfBounds);
}
if self.root != 0 {
return Err(SanitizeError::InvalidValue);
}
if !self.slots.is_empty() {
return Err(SanitizeError::InvalidValue);
}
if !self.stash.is_empty() {
return Err(SanitizeError::InvalidValue);
}
self.from.sanitize()
}
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct Vote {
pub from: Pubkey,
@ -119,6 +186,16 @@ pub struct Vote {
pub wallclock: u64,
}
impl Sanitize for Vote {
fn sanitize(&self) -> Result<(), SanitizeError> {
if self.wallclock >= MAX_WALLCLOCK {
return Err(SanitizeError::ValueOutOfBounds);
}
self.from.sanitize()?;
self.transaction.sanitize()
}
}
impl Vote {
pub fn new(from: &Pubkey, transaction: Transaction, wallclock: u64) -> Self {
Self {
@ -189,8 +266,8 @@ impl CrdsValue {
CrdsData::Vote(_, vote) => vote.wallclock,
CrdsData::LowestSlot(_, obj) => obj.wallclock,
CrdsData::SnapshotHashes(hash) => hash.wallclock,
CrdsData::EpochSlots(_, p) => p.wallclock,
CrdsData::AccountsHashes(hash) => hash.wallclock,
CrdsData::EpochSlots(_, p) => p.wallclock,
}
}
pub fn pubkey(&self) -> Pubkey {
@ -199,8 +276,8 @@ impl CrdsValue {
CrdsData::Vote(_, vote) => vote.from,
CrdsData::LowestSlot(_, slots) => slots.from,
CrdsData::SnapshotHashes(hash) => hash.from,
CrdsData::EpochSlots(_, p) => p.from,
CrdsData::AccountsHashes(hash) => hash.from,
CrdsData::EpochSlots(_, p) => p.from,
}
}
pub fn label(&self) -> CrdsValueLabel {
@ -209,8 +286,8 @@ impl CrdsValue {
CrdsData::Vote(ix, _) => CrdsValueLabel::Vote(*ix, self.pubkey()),
CrdsData::LowestSlot(_, _) => CrdsValueLabel::LowestSlot(self.pubkey()),
CrdsData::SnapshotHashes(_) => CrdsValueLabel::SnapshotHashes(self.pubkey()),
CrdsData::EpochSlots(ix, _) => CrdsValueLabel::EpochSlots(*ix, self.pubkey()),
CrdsData::AccountsHashes(_) => CrdsValueLabel::AccountsHashes(self.pubkey()),
CrdsData::EpochSlots(ix, _) => CrdsValueLabel::EpochSlots(*ix, self.pubkey()),
}
}
pub fn contact_info(&self) -> Option<&ContactInfo> {
@ -358,6 +435,32 @@ mod test {
assert_eq!(v.label(), CrdsValueLabel::LowestSlot(key));
}
#[test]
fn test_lowest_slot_sanitize() {
let ls = LowestSlot::new(Pubkey::default(), 0, 0);
let v = CrdsValue::new_unsigned(CrdsData::LowestSlot(0, ls.clone()));
assert_eq!(v.sanitize(), Ok(()));
let mut o = ls.clone();
o.root = 1;
let v = CrdsValue::new_unsigned(CrdsData::LowestSlot(0, o.clone()));
assert_eq!(v.sanitize(), Err(SanitizeError::InvalidValue));
let o = ls.clone();
let v = CrdsValue::new_unsigned(CrdsData::LowestSlot(1, o.clone()));
assert_eq!(v.sanitize(), Err(SanitizeError::ValueOutOfBounds));
let mut o = ls.clone();
o.slots.insert(1);
let v = CrdsValue::new_unsigned(CrdsData::LowestSlot(0, o.clone()));
assert_eq!(v.sanitize(), Err(SanitizeError::InvalidValue));
let mut o = ls.clone();
o.stash.push(deprecated::EpochIncompleteSlots::default());
let v = CrdsValue::new_unsigned(CrdsData::LowestSlot(0, o.clone()));
assert_eq!(v.sanitize(), Err(SanitizeError::InvalidValue));
}
#[test]
fn test_signature() {
let keypair = Keypair::new();
@ -389,7 +492,7 @@ mod test {
),
&keypair,
);
assert!(!vote.verify());
assert!(vote.sanitize().is_err());
}
#[test]
@ -402,7 +505,7 @@ mod test {
),
&keypair,
);
assert!(!item.verify());
assert_eq!(item.sanitize(), Err(SanitizeError::ValueOutOfBounds));
}
#[test]
fn test_compute_vote_index_empty() {

View File

@ -1,10 +1,14 @@
use crate::cluster_info::MAX_CRDS_OBJECT_SIZE;
use crate::crds_value::MAX_SLOT;
use crate::crds_value::MAX_WALLCLOCK;
use bincode::serialized_size;
use bv::BitVec;
use flate2::{Compress, Compression, Decompress, FlushCompress, FlushDecompress};
use solana_sdk::clock::Slot;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::sanitize::{Sanitize, SanitizeError};
const MAX_SLOTS_PER_ENTRY: usize = 2048 * 8;
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct Uncompressed {
pub first_slot: Slot,
@ -12,6 +16,18 @@ pub struct Uncompressed {
pub slots: BitVec<u8>,
}
impl Sanitize for Uncompressed {
fn sanitize(&self) -> std::result::Result<(), SanitizeError> {
if self.first_slot >= MAX_SLOT {
return Err(SanitizeError::ValueOutOfBounds);
}
if self.num >= MAX_SLOTS_PER_ENTRY {
return Err(SanitizeError::ValueOutOfBounds);
}
Ok(())
}
}
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq)]
pub struct Flate2 {
pub first_slot: Slot,
@ -19,6 +35,18 @@ pub struct Flate2 {
pub compressed: Vec<u8>,
}
impl Sanitize for Flate2 {
fn sanitize(&self) -> std::result::Result<(), SanitizeError> {
if self.first_slot >= MAX_SLOT {
return Err(SanitizeError::ValueOutOfBounds);
}
if self.num >= MAX_SLOTS_PER_ENTRY {
return Err(SanitizeError::ValueOutOfBounds);
}
Ok(())
}
}
#[derive(Debug, PartialEq)]
pub enum Error {
CompressError,
@ -84,6 +112,9 @@ impl Uncompressed {
(min_slot - self.first_slot) as usize
};
for i in start..self.num {
if i >= self.slots.len() as usize {
break;
}
if self.slots.get(i as u64) {
rv.push(self.first_slot + i as Slot);
}
@ -95,6 +126,9 @@ impl Uncompressed {
if self.num == 0 {
self.first_slot = *s;
}
if self.num >= MAX_SLOTS_PER_ENTRY {
return i;
}
if *s < self.first_slot {
return i;
}
@ -114,6 +148,15 @@ pub enum CompressedSlots {
Uncompressed(Uncompressed),
}
impl Sanitize for CompressedSlots {
fn sanitize(&self) -> std::result::Result<(), SanitizeError> {
match self {
CompressedSlots::Uncompressed(a) => a.sanitize(),
CompressedSlots::Flate2(b) => b.sanitize(),
}
}
}
impl Default for CompressedSlots {
fn default() -> Self {
CompressedSlots::new(0)
@ -168,13 +211,40 @@ impl CompressedSlots {
}
}
#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)]
#[derive(Serialize, Deserialize, Clone, Default, PartialEq)]
pub struct EpochSlots {
pub from: Pubkey,
pub slots: Vec<CompressedSlots>,
pub wallclock: u64,
}
impl Sanitize for EpochSlots {
fn sanitize(&self) -> std::result::Result<(), SanitizeError> {
if self.wallclock >= MAX_WALLCLOCK {
return Err(SanitizeError::ValueOutOfBounds);
}
self.from.sanitize()?;
self.slots.sanitize()
}
}
use std::fmt;
impl fmt::Debug for EpochSlots {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let num_slots: usize = self.slots.iter().map(|s| s.num_slots()).sum();
let lowest_slot = self
.slots
.iter()
.map(|s| s.first_slot())
.fold(0, std::cmp::min);
write!(
f,
"EpochSlots {{ from: {} num_slots: {} lowest_slot: {} wallclock: {} }}",
self.from, num_slots, lowest_slot, self.wallclock
)
}
}
impl EpochSlots {
pub fn new(from: Pubkey, now: u64) -> Self {
Self {
@ -256,6 +326,14 @@ mod tests {
assert_eq!(slots.to_slots(1), vec![1]);
assert!(slots.to_slots(2).is_empty());
}
#[test]
fn test_epoch_slots_to_slots_overflow() {
let mut slots = Uncompressed::new(1);
slots.num = 100;
assert!(slots.to_slots(0).is_empty());
}
#[test]
fn test_epoch_slots_uncompressed_add_2() {
let mut slots = Uncompressed::new(1);
@ -299,6 +377,44 @@ mod tests {
assert_eq!(slots.num, 701);
assert_eq!(slots.to_slots(1), vec![1, 2, 701]);
}
#[test]
fn test_epoch_slots_sanitize() {
let mut slots = Uncompressed::new(100);
slots.add(&[1, 701, 2]);
assert_eq!(slots.num, 701);
assert!(slots.sanitize().is_ok());
let mut o = slots.clone();
o.first_slot = MAX_SLOT;
assert_eq!(o.sanitize(), Err(SanitizeError::ValueOutOfBounds));
let mut o = slots.clone();
o.num = MAX_SLOTS_PER_ENTRY;
assert_eq!(o.sanitize(), Err(SanitizeError::ValueOutOfBounds));
let compressed = Flate2::deflate(slots).unwrap();
assert!(compressed.sanitize().is_ok());
let mut o = compressed.clone();
o.first_slot = MAX_SLOT;
assert_eq!(o.sanitize(), Err(SanitizeError::ValueOutOfBounds));
let mut o = compressed.clone();
o.num = MAX_SLOTS_PER_ENTRY;
assert_eq!(o.sanitize(), Err(SanitizeError::ValueOutOfBounds));
let mut slots = EpochSlots::default();
let range: Vec<Slot> = (0..5000).into_iter().collect();
assert_eq!(slots.fill(&range, 1), 5000);
assert_eq!(slots.wallclock, 1);
assert!(slots.sanitize().is_ok());
let mut o = slots.clone();
o.wallclock = MAX_WALLCLOCK;
assert_eq!(o.sanitize(), Err(SanitizeError::ValueOutOfBounds));
}
#[test]
fn test_epoch_slots_fill_range() {
let range: Vec<Slot> = (0..5000).into_iter().collect();

View File

@ -22,7 +22,7 @@ pub struct GossipService {
impl GossipService {
pub fn new(
cluster_info: &Arc<RwLock<ClusterInfo>>,
cluster_info: &Arc<ClusterInfo>,
bank_forks: Option<Arc<RwLock<BankForks>>>,
gossip_socket: UdpSocket,
exit: &Arc<AtomicBool>,
@ -31,7 +31,7 @@ impl GossipService {
let gossip_socket = Arc::new(gossip_socket);
trace!(
"GossipService: id: {}, listening on: {:?}",
&cluster_info.read().unwrap().my_data().id,
&cluster_info.id(),
gossip_socket.local_addr().unwrap()
);
let t_receiver = streamer::receiver(
@ -89,7 +89,7 @@ pub fn discover(
let exit = Arc::new(AtomicBool::new(false));
let (gossip_service, ip_echo, spy_ref) = make_gossip_node(entrypoint, &exit, my_gossip_addr);
let id = spy_ref.read().unwrap().keypair.pubkey();
let id = spy_ref.id();
info!("Entrypoint: {:?}", entrypoint);
info!("Node Id: {:?}", id);
if let Some(my_gossip_addr) = my_gossip_addr {
@ -113,7 +113,7 @@ pub fn discover(
info!(
"discover success in {}s...\n{}",
secs,
spy_ref.read().unwrap().contact_info_trace()
spy_ref.contact_info_trace()
);
return Ok((tvu_peers, storage_peers));
}
@ -121,15 +121,12 @@ pub fn discover(
if !tvu_peers.is_empty() {
info!(
"discover failed to match criteria by timeout...\n{}",
spy_ref.read().unwrap().contact_info_trace()
spy_ref.contact_info_trace()
);
return Ok((tvu_peers, storage_peers));
}
info!(
"discover failed...\n{}",
spy_ref.read().unwrap().contact_info_trace()
);
info!("discover failed...\n{}", spy_ref.contact_info_trace());
Err(std::io::Error::new(
std::io::ErrorKind::Other,
"Discover failed",
@ -176,7 +173,7 @@ pub fn get_multi_client(nodes: &[ContactInfo]) -> (ThinClient, usize) {
}
fn spy(
spy_ref: Arc<RwLock<ClusterInfo>>,
spy_ref: Arc<ClusterInfo>,
num_nodes: Option<usize>,
timeout: Option<u64>,
find_node_by_pubkey: Option<Pubkey>,
@ -194,13 +191,8 @@ fn spy(
}
}
tvu_peers = spy_ref
.read()
.unwrap()
.all_tvu_peers()
.into_iter()
.collect::<Vec<_>>();
storage_peers = spy_ref.read().unwrap().all_storage_peers();
tvu_peers = spy_ref.all_tvu_peers().into_iter().collect::<Vec<_>>();
storage_peers = spy_ref.all_storage_peers();
let mut nodes: Vec<_> = tvu_peers.iter().chain(storage_peers.iter()).collect();
nodes.sort();
@ -232,10 +224,7 @@ fn spy(
met_criteria = true;
}
if i % 20 == 0 {
info!(
"discovering...\n{}",
spy_ref.read().unwrap().contact_info_trace()
);
info!("discovering...\n{}", spy_ref.contact_info_trace());
}
sleep(Duration::from_millis(
crate::cluster_info::GOSSIP_SLEEP_MILLIS,
@ -256,18 +245,18 @@ fn make_gossip_node(
entrypoint: Option<&SocketAddr>,
exit: &Arc<AtomicBool>,
gossip_addr: Option<&SocketAddr>,
) -> (GossipService, Option<TcpListener>, Arc<RwLock<ClusterInfo>>) {
) -> (GossipService, Option<TcpListener>, Arc<ClusterInfo>) {
let keypair = Arc::new(Keypair::new());
let (node, gossip_socket, ip_echo) = if let Some(gossip_addr) = gossip_addr {
ClusterInfo::gossip_node(&keypair.pubkey(), gossip_addr)
} else {
ClusterInfo::spy_node(&keypair.pubkey())
};
let mut cluster_info = ClusterInfo::new(node, keypair);
let cluster_info = ClusterInfo::new(node, keypair);
if let Some(entrypoint) = entrypoint {
cluster_info.set_entrypoint(ContactInfo::new_gossip_entry_point(entrypoint));
}
let cluster_info = Arc::new(RwLock::new(cluster_info));
let cluster_info = Arc::new(cluster_info);
let gossip_service = GossipService::new(&cluster_info.clone(), None, gossip_socket, &exit);
(gossip_service, ip_echo, cluster_info)
}
@ -277,7 +266,7 @@ mod tests {
use super::*;
use crate::cluster_info::{ClusterInfo, Node};
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, RwLock};
use std::sync::Arc;
#[test]
#[ignore]
@ -286,7 +275,7 @@ mod tests {
let exit = Arc::new(AtomicBool::new(false));
let tn = Node::new_localhost();
let cluster_info = ClusterInfo::new_with_invalid_keypair(tn.info.clone());
let c = Arc::new(RwLock::new(cluster_info));
let c = Arc::new(cluster_info);
let d = GossipService::new(&c, None, tn.sockets.gossip, &exit);
exit.store(true, Ordering::Relaxed);
d.join().unwrap();
@ -300,16 +289,16 @@ mod tests {
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
let peer0_info = ContactInfo::new_localhost(&peer0, 0);
let peer1_info = ContactInfo::new_localhost(&peer1, 0);
let mut cluster_info = ClusterInfo::new(contact_info.clone(), Arc::new(keypair));
let cluster_info = ClusterInfo::new(contact_info.clone(), Arc::new(keypair));
cluster_info.insert_info(peer0_info.clone());
cluster_info.insert_info(peer1_info);
let spy_ref = Arc::new(RwLock::new(cluster_info));
let spy_ref = Arc::new(cluster_info);
let (met_criteria, secs, tvu_peers, _) = spy(spy_ref.clone(), None, Some(1), None, None);
assert_eq!(met_criteria, false);
assert_eq!(secs, 1);
assert_eq!(tvu_peers, spy_ref.read().unwrap().tvu_peers());
assert_eq!(tvu_peers, spy_ref.tvu_peers());
// Find num_nodes
let (met_criteria, _, _, _) = spy(spy_ref.clone(), Some(1), None, None, None);

View File

@ -1,6 +1,8 @@
//! The `ledger_cleanup_service` drops older ledger data to limit disk space usage
use solana_ledger::blockstore::Blockstore;
use solana_ledger::blockstore_db::Result as BlockstoreResult;
use solana_measure::measure::Measure;
use solana_metrics::datapoint_debug;
use solana_sdk::clock::Slot;
use std::string::ToString;
@ -11,13 +13,25 @@ use std::thread;
use std::thread::{Builder, JoinHandle};
use std::time::Duration;
// - To try and keep the RocksDB size under 400GB:
// Seeing about 1600b/shred, using 2000b/shred for margin, so 200m shreds can be stored in 400gb.
// at 5k shreds/slot at 50k tps, this is 500k slots (~5 hours).
// At idle, 60 shreds/slot this is about 4m slots (18 days)
// This is chosen to allow enough time for
// - To try and keep the RocksDB size under 512GB at 50k tps (100 slots take ~2GB).
// - A validator to download a snapshot from a peer and boot from it
// - To make sure that if a validator needs to reboot from its own snapshot, it has enough slots locally
// to catch back up to where it was when it stopped
pub const DEFAULT_MAX_LEDGER_SLOTS: u64 = 270_000;
// Remove a fixed number of slots at a time, it's more efficient than doing it one-by-one
pub const DEFAULT_MAX_LEDGER_SHREDS: u64 = 200_000_000;
// Allow down to 50m, or 3.5 days at idle, 1hr at 50k load, around ~100GB
pub const DEFAULT_MIN_MAX_LEDGER_SHREDS: u64 = 50_000_000;
// Check for removing slots at this interval so we don't purge too often
// and starve other blockstore users.
pub const DEFAULT_PURGE_SLOT_INTERVAL: u64 = 512;
// Remove a limited number of slots at a time, so the operation
// does not take too long and block other blockstore users.
pub const DEFAULT_PURGE_BATCH_SIZE: u64 = 256;
pub struct LedgerCleanupService {
@ -36,7 +50,7 @@ impl LedgerCleanupService {
max_ledger_slots
);
let exit = exit.clone();
let mut next_purge_batch = max_ledger_slots;
let mut last_purge_slot = 0;
let t_cleanup = Builder::new()
.name("solana-ledger-cleanup".to_string())
.spawn(move || loop {
@ -47,7 +61,8 @@ impl LedgerCleanupService {
&new_root_receiver,
&blockstore,
max_ledger_slots,
&mut next_purge_batch,
&mut last_purge_slot,
DEFAULT_PURGE_SLOT_INTERVAL,
) {
match e {
RecvTimeoutError::Disconnected => break,
@ -59,45 +74,123 @@ impl LedgerCleanupService {
Self { t_cleanup }
}
fn find_slots_to_clean(
blockstore: &Arc<Blockstore>,
root: Slot,
max_ledger_shreds: u64,
) -> (u64, Slot, Slot) {
let mut shreds = Vec::new();
let mut iterate_time = Measure::start("iterate_time");
let mut total_shreds = 0;
let mut first_slot = 0;
for (i, (slot, meta)) in blockstore.slot_meta_iterator(0).unwrap().enumerate() {
if i == 0 {
first_slot = slot;
debug!("purge: searching from slot: {}", slot);
}
// Not exact since non-full slots will have holes
total_shreds += meta.received;
shreds.push((slot, meta.received));
if slot > root {
break;
}
}
iterate_time.stop();
info!(
"checking for ledger purge: max_shreds: {} slots: {} total_shreds: {} {}",
max_ledger_shreds,
shreds.len(),
total_shreds,
iterate_time
);
if (total_shreds as u64) < max_ledger_shreds {
return (0, 0, 0);
}
let mut cur_shreds = 0;
let mut lowest_slot_to_clean = shreds[0].0;
for (slot, num_shreds) in shreds.iter().rev() {
cur_shreds += *num_shreds as u64;
if cur_shreds > max_ledger_shreds {
lowest_slot_to_clean = *slot;
break;
}
}
(cur_shreds, lowest_slot_to_clean, first_slot)
}
fn cleanup_ledger(
new_root_receiver: &Receiver<Slot>,
blockstore: &Arc<Blockstore>,
max_ledger_slots: u64,
next_purge_batch: &mut u64,
max_ledger_shreds: u64,
last_purge_slot: &mut u64,
purge_interval: u64,
) -> Result<(), RecvTimeoutError> {
let disk_utilization_pre = blockstore.storage_size();
let root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
// Notify blockstore of impending purge
if root > *next_purge_batch {
//cleanup
let lowest_slot = root - max_ledger_slots;
*blockstore.lowest_cleanup_slot.write().unwrap() = lowest_slot;
blockstore.purge_slots(0, Some(lowest_slot));
*next_purge_batch += DEFAULT_PURGE_BATCH_SIZE;
let mut root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
// Get the newest root
while let Ok(new_root) = new_root_receiver.try_recv() {
root = new_root;
}
let disk_utilization_post = blockstore.storage_size();
if let (Ok(disk_utilization_pre), Ok(disk_utilization_post)) =
(disk_utilization_pre, disk_utilization_post)
{
datapoint_debug!(
"ledger_disk_utilization",
("disk_utilization_pre", disk_utilization_pre as i64, i64),
("disk_utilization_post", disk_utilization_post as i64, i64),
(
"disk_utilization_delta",
(disk_utilization_pre as i64 - disk_utilization_post as i64),
i64
)
if root - *last_purge_slot > purge_interval {
let disk_utilization_pre = blockstore.storage_size();
info!(
"purge: new root: {} last_purge: {} purge_interval: {} disk: {:?}",
root, last_purge_slot, purge_interval, disk_utilization_pre
);
*last_purge_slot = root;
let (num_shreds_to_clean, lowest_slot_to_clean, mut first_slot) =
Self::find_slots_to_clean(blockstore, root, max_ledger_shreds);
if num_shreds_to_clean > 0 {
debug!(
"cleaning up to: {} shreds: {} first: {}",
lowest_slot_to_clean, num_shreds_to_clean, first_slot
);
loop {
let current_lowest =
std::cmp::min(lowest_slot_to_clean, first_slot + DEFAULT_PURGE_BATCH_SIZE);
let mut slot_update_time = Measure::start("slot_update");
*blockstore.lowest_cleanup_slot.write().unwrap() = current_lowest;
slot_update_time.stop();
let mut clean_time = Measure::start("ledger_clean");
blockstore.purge_slots(first_slot, Some(current_lowest));
clean_time.stop();
debug!(
"ledger purge {} -> {}: {} {}",
first_slot, current_lowest, slot_update_time, clean_time
);
first_slot += DEFAULT_PURGE_BATCH_SIZE;
if current_lowest == lowest_slot_to_clean {
break;
}
thread::sleep(Duration::from_millis(500));
}
}
let disk_utilization_post = blockstore.storage_size();
Self::report_disk_metrics(disk_utilization_pre, disk_utilization_post);
}
Ok(())
}
fn report_disk_metrics(pre: BlockstoreResult<u64>, post: BlockstoreResult<u64>) {
if let (Ok(pre), Ok(post)) = (pre, post) {
datapoint_debug!(
"ledger_disk_utilization",
("disk_utilization_pre", pre as i64, i64),
("disk_utilization_post", post as i64, i64),
("disk_utilization_delta", (pre as i64 - post as i64), i64)
);
}
}
pub fn join(self) -> thread::Result<()> {
self.t_cleanup.join()
}
@ -111,6 +204,7 @@ mod tests {
#[test]
fn test_cleanup() {
solana_logger::setup();
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let (shreds, _) = make_many_slot_entries(0, 50, 5);
@ -118,10 +212,10 @@ mod tests {
let blockstore = Arc::new(blockstore);
let (sender, receiver) = channel();
//send a signal to kill slots 0-40
let mut next_purge_slot = 0;
//send a signal to kill all but 5 shreds, which will be in the newest slots
let mut last_purge_slot = 0;
sender.send(50).unwrap();
LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 10, &mut next_purge_slot)
LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 5, &mut last_purge_slot, 10)
.unwrap();
//check that 0-40 don't exist
@ -134,6 +228,62 @@ mod tests {
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_cleanup_speed() {
solana_logger::setup();
let blockstore_path = get_tmp_ledger_path!();
let mut blockstore = Blockstore::open(&blockstore_path).unwrap();
blockstore.set_no_compaction(true);
let blockstore = Arc::new(blockstore);
let (sender, receiver) = channel();
let mut first_insert = Measure::start("first_insert");
let initial_slots = 50;
let initial_entries = 5;
let (shreds, _) = make_many_slot_entries(0, initial_slots, initial_entries);
blockstore.insert_shreds(shreds, None, false).unwrap();
first_insert.stop();
info!("{}", first_insert);
let mut last_purge_slot = 0;
let mut slot = initial_slots;
let mut num_slots = 6;
for _ in 0..5 {
let mut insert_time = Measure::start("insert time");
let batch_size = 2;
let batches = num_slots / batch_size;
for i in 0..batches {
let (shreds, _) = make_many_slot_entries(slot + i * batch_size, batch_size, 5);
blockstore.insert_shreds(shreds, None, false).unwrap();
if i % 100 == 0 {
info!("inserting..{} of {}", i, batches);
}
}
insert_time.stop();
let mut time = Measure::start("purge time");
sender.send(slot + num_slots).unwrap();
LedgerCleanupService::cleanup_ledger(
&receiver,
&blockstore,
initial_slots,
&mut last_purge_slot,
10,
)
.unwrap();
time.stop();
info!(
"slot: {} size: {} {} {}",
slot, num_slots, insert_time, time
);
slot += num_slots;
num_slots *= 2;
}
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_compaction() {
let blockstore_path = get_tmp_ledger_path!();
@ -142,7 +292,7 @@ mod tests {
let n = 10_000;
let batch_size = 100;
let batches = n / batch_size;
let max_ledger_slots = 100;
let max_ledger_shreds = 100;
for i in 0..batches {
let (shreds, _) = make_many_slot_entries(i * batch_size, batch_size, 1);
@ -158,8 +308,9 @@ mod tests {
LedgerCleanupService::cleanup_ledger(
&receiver,
&blockstore,
max_ledger_slots,
max_ledger_shreds,
&mut next_purge_batch,
10,
)
.unwrap();
@ -170,7 +321,7 @@ mod tests {
assert!(u2 < u1, "insufficient compaction! pre={},post={}", u1, u2,);
// check that early slots don't exist
let max_slot = n - max_ledger_slots;
let max_slot = n - max_ledger_shreds - 1;
blockstore
.slot_meta_iterator(0)
.unwrap()

View File

@ -5,7 +5,7 @@
//! command-line tools to spin up validators and a Rust library
//!
pub mod accounts_cleanup_service;
pub mod accounts_background_service;
pub mod accounts_hash_verifier;
pub mod banking_stage;
pub mod broadcast_stage;
@ -58,6 +58,9 @@ pub mod verified_vote_packets;
pub mod weighted_shuffle;
pub mod window_service;
#[macro_use]
extern crate solana_bpf_loader_program;
#[macro_use]
extern crate solana_budget_program;

View File

@ -1,10 +1,8 @@
//! The `poh_service` module implements a service that records the passing of
//! "ticks", a measure of time in the PoH stream
use crate::poh_recorder::PohRecorder;
use core_affinity;
use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT;
use solana_sdk::poh_config::PohConfig;
use solana_sys_tuner;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::thread::{self, sleep, Builder, JoinHandle};

View File

@ -1,13 +1,13 @@
use crate::{
cluster_info_vote_listener::SlotVoteTracker, consensus::StakeLockout,
replay_stage::SUPERMINORITY_THRESHOLD,
cluster_info_vote_listener::SlotVoteTracker, cluster_slots::SlotPubkeys,
consensus::StakeLockout, replay_stage::SUPERMINORITY_THRESHOLD,
};
use solana_ledger::{
bank_forks::BankForks,
blockstore_processor::{ConfirmationProgress, ConfirmationTiming},
};
use solana_runtime::bank::Bank;
use solana_sdk::{clock::Slot, hash::Hash, pubkey::Pubkey};
use solana_sdk::{account::Account, clock::Slot, hash::Hash, pubkey::Pubkey};
use std::{
collections::{HashMap, HashSet},
rc::Rc,
@ -85,6 +85,12 @@ pub(crate) struct ForkProgress {
pub(crate) propagated_stats: PropagatedStats,
pub(crate) replay_stats: ReplaySlotStats,
pub(crate) replay_progress: ConfirmationProgress,
// Note `num_blocks_on_fork` and `num_dropped_blocks_on_fork` only
// count new blocks replayed since last restart, which won't include
// blocks already existing in the ledger/before snapshot at start,
// so these stats do not span all of time
pub(crate) num_blocks_on_fork: u64,
pub(crate) num_dropped_blocks_on_fork: u64,
}
impl ForkProgress {
@ -92,6 +98,8 @@ impl ForkProgress {
last_entry: Hash,
prev_leader_slot: Option<Slot>,
validator_stake_info: Option<ValidatorStakeInfo>,
num_blocks_on_fork: u64,
num_dropped_blocks_on_fork: u64,
) -> Self {
let (
is_leader_slot,
@ -124,6 +132,8 @@ impl ForkProgress {
fork_stats: ForkStats::default(),
replay_stats: ReplaySlotStats::default(),
replay_progress: ConfirmationProgress::new(last_entry),
num_blocks_on_fork,
num_dropped_blocks_on_fork,
propagated_stats: PropagatedStats {
prev_leader_slot,
is_leader_slot,
@ -141,6 +151,8 @@ impl ForkProgress {
my_pubkey: &Pubkey,
voting_pubkey: &Pubkey,
prev_leader_slot: Option<Slot>,
num_blocks_on_fork: u64,
num_dropped_blocks_on_fork: u64,
) -> Self {
let validator_fork_info = {
if bank.collector_id() == my_pubkey {
@ -155,7 +167,13 @@ impl ForkProgress {
}
};
Self::new(bank.last_blockhash(), prev_leader_slot, validator_fork_info)
Self::new(
bank.last_blockhash(),
prev_leader_slot,
validator_fork_info,
num_blocks_on_fork,
num_dropped_blocks_on_fork,
)
}
}
@ -164,7 +182,6 @@ pub(crate) struct ForkStats {
pub(crate) weight: u128,
pub(crate) fork_weight: u128,
pub(crate) total_staked: u64,
pub(crate) slot: Slot,
pub(crate) block_height: u64,
pub(crate) has_voted: bool,
pub(crate) is_recent: bool,
@ -179,14 +196,84 @@ pub(crate) struct ForkStats {
#[derive(Clone, Default)]
pub(crate) struct PropagatedStats {
pub(crate) propagated_validators: HashSet<Rc<Pubkey>>,
pub(crate) propagated_node_ids: HashSet<Rc<Pubkey>>,
pub(crate) propagated_validators_stake: u64,
pub(crate) is_propagated: bool,
pub(crate) is_leader_slot: bool,
pub(crate) prev_leader_slot: Option<Slot>,
pub(crate) slot_vote_tracker: Option<Arc<RwLock<SlotVoteTracker>>>,
pub(crate) cluster_slot_pubkeys: Option<Arc<RwLock<SlotPubkeys>>>,
pub(crate) total_epoch_stake: u64,
}
impl PropagatedStats {
pub fn add_vote_pubkey(
&mut self,
vote_pubkey: &Pubkey,
all_pubkeys: &mut HashSet<Rc<Pubkey>>,
stake: u64,
) {
if !self.propagated_validators.contains(vote_pubkey) {
let mut cached_pubkey: Option<Rc<Pubkey>> = all_pubkeys.get(vote_pubkey).cloned();
if cached_pubkey.is_none() {
let new_pubkey = Rc::new(*vote_pubkey);
all_pubkeys.insert(new_pubkey.clone());
cached_pubkey = Some(new_pubkey);
}
let vote_pubkey = cached_pubkey.unwrap();
self.propagated_validators.insert(vote_pubkey);
self.propagated_validators_stake += stake;
}
}
pub fn add_node_pubkey(
&mut self,
node_pubkey: &Pubkey,
all_pubkeys: &mut HashSet<Rc<Pubkey>>,
bank: &Bank,
) {
if !self.propagated_node_ids.contains(node_pubkey) {
let node_vote_accounts = bank
.epoch_vote_accounts_for_node_id(&node_pubkey)
.map(|v| &v.vote_accounts);
if let Some(node_vote_accounts) = node_vote_accounts {
self.add_node_pubkey_internal(
node_pubkey,
all_pubkeys,
node_vote_accounts,
bank.epoch_vote_accounts(bank.epoch())
.expect("Epoch stakes for bank's own epoch must exist"),
);
}
}
}
fn add_node_pubkey_internal(
&mut self,
node_pubkey: &Pubkey,
all_pubkeys: &mut HashSet<Rc<Pubkey>>,
vote_account_pubkeys: &[Pubkey],
epoch_vote_accounts: &HashMap<Pubkey, (u64, Account)>,
) {
let mut cached_pubkey: Option<Rc<Pubkey>> = all_pubkeys.get(node_pubkey).cloned();
if cached_pubkey.is_none() {
let new_pubkey = Rc::new(*node_pubkey);
all_pubkeys.insert(new_pubkey.clone());
cached_pubkey = Some(new_pubkey);
}
let node_pubkey = cached_pubkey.unwrap();
self.propagated_node_ids.insert(node_pubkey);
for vote_account_pubkey in vote_account_pubkeys.iter() {
let stake = epoch_vote_accounts
.get(vote_account_pubkey)
.map(|(stake, _)| *stake)
.unwrap_or(0);
self.add_vote_pubkey(vote_account_pubkey, all_pubkeys, stake);
}
}
}
#[derive(Default)]
pub(crate) struct ProgressMap {
progress_map: HashMap<Slot, ForkProgress>,
@ -282,17 +369,151 @@ impl ProgressMap {
self.progress_map
.retain(|k, _| bank_forks.get(*k).is_some());
}
pub fn log_propagated_stats(&self, slot: Slot, bank_forks: &RwLock<BankForks>) {
if let Some(stats) = self.get_propagated_stats(slot) {
info!(
"Propagated stats:
total staked: {},
observed staked: {},
vote pubkeys: {:?},
node_pubkeys: {:?},
slot: {},
epoch: {:?}",
stats.total_epoch_stake,
stats.propagated_validators_stake,
stats.propagated_validators,
stats.propagated_node_ids,
slot,
bank_forks.read().unwrap().get(slot).map(|x| x.epoch()),
);
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_add_vote_pubkey() {
let mut stats = PropagatedStats::default();
let mut all_pubkeys = HashSet::new();
let mut vote_pubkey = Pubkey::new_rand();
all_pubkeys.insert(Rc::new(vote_pubkey.clone()));
// Add a vote pubkey, the number of references in all_pubkeys
// should be 2
stats.add_vote_pubkey(&vote_pubkey, &mut all_pubkeys, 1);
assert!(stats.propagated_validators.contains(&vote_pubkey));
assert_eq!(stats.propagated_validators_stake, 1);
assert_eq!(Rc::strong_count(all_pubkeys.get(&vote_pubkey).unwrap()), 2);
// Adding it again should change no state since the key already existed
stats.add_vote_pubkey(&vote_pubkey, &mut all_pubkeys, 1);
assert!(stats.propagated_validators.contains(&vote_pubkey));
assert_eq!(stats.propagated_validators_stake, 1);
// Addding another pubkey should succeed
vote_pubkey = Pubkey::new_rand();
stats.add_vote_pubkey(&vote_pubkey, &mut all_pubkeys, 2);
assert!(stats.propagated_validators.contains(&vote_pubkey));
assert_eq!(stats.propagated_validators_stake, 3);
assert_eq!(Rc::strong_count(all_pubkeys.get(&vote_pubkey).unwrap()), 2);
}
#[test]
fn test_add_node_pubkey_internal() {
let num_vote_accounts = 10;
let staked_vote_accounts = 5;
let vote_account_pubkeys: Vec<_> = std::iter::repeat_with(|| Pubkey::new_rand())
.take(num_vote_accounts)
.collect();
let epoch_vote_accounts: HashMap<_, _> = vote_account_pubkeys
.iter()
.skip(num_vote_accounts - staked_vote_accounts)
.map(|pubkey| (*pubkey, (1, Account::default())))
.collect();
let mut stats = PropagatedStats::default();
let mut all_pubkeys = HashSet::new();
let mut node_pubkey = Pubkey::new_rand();
all_pubkeys.insert(Rc::new(node_pubkey.clone()));
// Add a vote pubkey, the number of references in all_pubkeys
// should be 2
stats.add_node_pubkey_internal(
&node_pubkey,
&mut all_pubkeys,
&vote_account_pubkeys,
&epoch_vote_accounts,
);
assert!(stats.propagated_node_ids.contains(&node_pubkey));
assert_eq!(
stats.propagated_validators_stake,
staked_vote_accounts as u64
);
assert_eq!(Rc::strong_count(all_pubkeys.get(&node_pubkey).unwrap()), 2);
// Adding it again should not change any state
stats.add_node_pubkey_internal(
&node_pubkey,
&mut all_pubkeys,
&vote_account_pubkeys,
&epoch_vote_accounts,
);
assert!(stats.propagated_node_ids.contains(&node_pubkey));
assert_eq!(
stats.propagated_validators_stake,
staked_vote_accounts as u64
);
// Addding another pubkey with same vote accounts should succeed, but stake
// shouldn't increase
node_pubkey = Pubkey::new_rand();
stats.add_node_pubkey_internal(
&node_pubkey,
&mut all_pubkeys,
&vote_account_pubkeys,
&epoch_vote_accounts,
);
assert!(stats.propagated_node_ids.contains(&node_pubkey));
assert_eq!(
stats.propagated_validators_stake,
staked_vote_accounts as u64
);
assert_eq!(Rc::strong_count(all_pubkeys.get(&node_pubkey).unwrap()), 2);
// Addding another pubkey with different vote accounts should succeed
// and increase stake
node_pubkey = Pubkey::new_rand();
let vote_account_pubkeys: Vec<_> = std::iter::repeat_with(|| Pubkey::new_rand())
.take(num_vote_accounts)
.collect();
let epoch_vote_accounts: HashMap<_, _> = vote_account_pubkeys
.iter()
.skip(num_vote_accounts - staked_vote_accounts)
.map(|pubkey| (*pubkey, (1, Account::default())))
.collect();
stats.add_node_pubkey_internal(
&node_pubkey,
&mut all_pubkeys,
&vote_account_pubkeys,
&epoch_vote_accounts,
);
assert!(stats.propagated_node_ids.contains(&node_pubkey));
assert_eq!(
stats.propagated_validators_stake,
2 * staked_vote_accounts as u64
);
assert_eq!(Rc::strong_count(all_pubkeys.get(&node_pubkey).unwrap()), 2);
}
#[test]
fn test_is_propagated_status_on_construction() {
// If the given ValidatorStakeInfo == None, then this is not
// a leader slot and is_propagated == false
let progress = ForkProgress::new(Hash::default(), Some(9), None);
let progress = ForkProgress::new(Hash::default(), Some(9), None, 0, 0);
assert!(!progress.propagated_stats.is_propagated);
// If the stake is zero, then threshold is always achieved
@ -303,6 +524,8 @@ mod test {
total_epoch_stake: 0,
..ValidatorStakeInfo::default()
}),
0,
0,
);
assert!(progress.propagated_stats.is_propagated);
@ -315,6 +538,8 @@ mod test {
total_epoch_stake: 2,
..ValidatorStakeInfo::default()
}),
0,
0,
);
assert!(!progress.propagated_stats.is_propagated);
@ -327,6 +552,8 @@ mod test {
total_epoch_stake: 2,
..ValidatorStakeInfo::default()
}),
0,
0,
);
assert!(progress.propagated_stats.is_propagated);
@ -337,6 +564,8 @@ mod test {
Hash::default(),
Some(9),
Some(ValidatorStakeInfo::default()),
0,
0,
);
assert!(!progress.propagated_stats.is_propagated);
}
@ -347,10 +576,16 @@ mod test {
// Insert new ForkProgress for slot 10 (not a leader slot) and its
// previous leader slot 9 (leader slot)
progress_map.insert(10, ForkProgress::new(Hash::default(), Some(9), None));
progress_map.insert(10, ForkProgress::new(Hash::default(), Some(9), None, 0, 0));
progress_map.insert(
9,
ForkProgress::new(Hash::default(), None, Some(ValidatorStakeInfo::default())),
ForkProgress::new(
Hash::default(),
None,
Some(ValidatorStakeInfo::default()),
0,
0,
),
);
// None of these slot have parents which are confirmed
@ -361,7 +596,7 @@ mod test {
// The previous leader before 8, slot 7, does not exist in
// progress map, so is_propagated(8) should return true as
// this implies the parent is rooted
progress_map.insert(8, ForkProgress::new(Hash::default(), Some(7), None));
progress_map.insert(8, ForkProgress::new(Hash::default(), Some(7), None, 0, 0));
assert!(progress_map.is_propagated(8));
// If we set the is_propagated = true, is_propagated should return true

View File

@ -20,9 +20,31 @@ use std::{
sync::{Arc, RwLock},
thread::sleep,
thread::{self, Builder, JoinHandle},
time::Duration,
time::{Duration, Instant},
};
#[derive(Default)]
pub struct RepairStatsGroup {
pub count: u64,
pub min: u64,
pub max: u64,
}
impl RepairStatsGroup {
pub fn update(&mut self, slot: u64) {
self.count += 1;
self.min = std::cmp::min(self.min, slot);
self.max = std::cmp::max(self.max, slot);
}
}
#[derive(Default)]
pub struct RepairStats {
pub shred: RepairStatsGroup,
pub highest_shred: RepairStatsGroup,
pub orphan: RepairStatsGroup,
}
pub const MAX_REPAIR_LENGTH: usize = 512;
pub const REPAIR_MS: u64 = 100;
pub const MAX_ORPHANS: usize = 5;
@ -59,7 +81,7 @@ impl RepairService {
blockstore: Arc<Blockstore>,
exit: Arc<AtomicBool>,
repair_socket: Arc<UdpSocket>,
cluster_info: Arc<RwLock<ClusterInfo>>,
cluster_info: Arc<ClusterInfo>,
repair_strategy: RepairStrategy,
cluster_slots: Arc<ClusterSlots>,
) -> Self {
@ -84,15 +106,24 @@ impl RepairService {
blockstore: &Blockstore,
exit: &AtomicBool,
repair_socket: &UdpSocket,
cluster_info: &Arc<RwLock<ClusterInfo>>,
cluster_info: &Arc<ClusterInfo>,
repair_strategy: RepairStrategy,
cluster_slots: &Arc<ClusterSlots>,
) {
let serve_repair = ServeRepair::new(cluster_info.clone());
let id = cluster_info.read().unwrap().id();
let id = cluster_info.id();
if let RepairStrategy::RepairAll { .. } = repair_strategy {
Self::initialize_lowest_slot(id, blockstore, cluster_info);
}
let mut repair_stats = RepairStats::default();
let mut last_stats = Instant::now();
if let RepairStrategy::RepairAll {
ref completed_slots_receiver,
..
} = repair_strategy
{
Self::initialize_epoch_slots(blockstore, cluster_info, completed_slots_receiver);
}
loop {
if exit.load(Ordering::Relaxed) {
break;
@ -117,14 +148,7 @@ impl RepairService {
let new_root = blockstore.last_root();
let lowest_slot = blockstore.lowest_slot();
Self::update_lowest_slot(&id, lowest_slot, &cluster_info);
Self::update_completed_slots(
&id,
new_root,
&cluster_slots,
blockstore,
completed_slots_receiver,
&cluster_info,
);
Self::update_completed_slots(completed_slots_receiver, &cluster_info);
cluster_slots.update(new_root, cluster_info, bank_forks);
Self::generate_repairs(blockstore, new_root, MAX_REPAIR_LENGTH)
}
@ -137,7 +161,12 @@ impl RepairService {
.into_iter()
.filter_map(|repair_request| {
serve_repair
.repair_request(&cluster_slots, &repair_request, &mut cache)
.repair_request(
&cluster_slots,
&repair_request,
&mut cache,
&mut repair_stats,
)
.map(|result| (result, repair_request))
.ok()
})
@ -150,6 +179,24 @@ impl RepairService {
});
}
}
if last_stats.elapsed().as_secs() > 1 {
let repair_total = repair_stats.shred.count
+ repair_stats.highest_shred.count
+ repair_stats.orphan.count;
if repair_total > 0 {
datapoint_info!(
"serve_repair-repair",
("repair-total", repair_total, i64),
("shred-count", repair_stats.shred.count, i64),
("highest-shred-count", repair_stats.highest_shred.count, i64),
("orphan-count", repair_stats.orphan.count, i64),
("repair-highest-slot", repair_stats.highest_shred.max, i64),
("repair-orphan", repair_stats.orphan.max, i64),
);
}
repair_stats = RepairStats::default();
last_stats = Instant::now();
}
sleep(Duration::from_millis(REPAIR_MS));
}
}
@ -261,54 +308,57 @@ impl RepairService {
}
}
fn initialize_lowest_slot(
id: Pubkey,
blockstore: &Blockstore,
cluster_info: &RwLock<ClusterInfo>,
) {
fn initialize_lowest_slot(id: Pubkey, blockstore: &Blockstore, cluster_info: &ClusterInfo) {
// Safe to set into gossip because by this time, the leader schedule cache should
// also be updated with the latest root (done in blockstore_processor) and thus
// will provide a schedule to window_service for any incoming shreds up to the
// last_confirmed_epoch.
cluster_info
.write()
.unwrap()
.push_lowest_slot(id, blockstore.lowest_slot());
cluster_info.push_lowest_slot(id, blockstore.lowest_slot());
}
fn update_completed_slots(
id: &Pubkey,
root: Slot,
cluster_slots: &ClusterSlots,
blockstore: &Blockstore,
completed_slots_receiver: &CompletedSlotsReceiver,
cluster_info: &RwLock<ClusterInfo>,
cluster_info: &ClusterInfo,
) {
let mine = cluster_slots.collect(id);
let mut slots: Vec<Slot> = vec![];
while let Ok(mut more) = completed_slots_receiver.try_recv() {
more.retain(|x| !mine.contains(x));
slots.append(&mut more);
}
blockstore
.live_slots_iterator(root)
.for_each(|(slot, slot_meta)| {
if slot_meta.is_full() && !mine.contains(&slot) {
slots.push(slot)
}
});
slots.sort();
slots.dedup();
if !slots.is_empty() {
cluster_info.write().unwrap().push_epoch_slots(&slots);
cluster_info.push_epoch_slots(&slots);
}
}
fn update_lowest_slot(id: &Pubkey, lowest_slot: Slot, cluster_info: &RwLock<ClusterInfo>) {
cluster_info
.write()
.unwrap()
.push_lowest_slot(*id, lowest_slot);
fn update_lowest_slot(id: &Pubkey, lowest_slot: Slot, cluster_info: &ClusterInfo) {
cluster_info.push_lowest_slot(*id, lowest_slot);
}
fn initialize_epoch_slots(
blockstore: &Blockstore,
cluster_info: &ClusterInfo,
completed_slots_receiver: &CompletedSlotsReceiver,
) {
let root = blockstore.last_root();
let mut slots: Vec<_> = blockstore
.live_slots_iterator(root)
.filter_map(|(slot, slot_meta)| {
if slot_meta.is_full() {
Some(slot)
} else {
None
}
})
.collect();
while let Ok(mut more) = completed_slots_receiver.try_recv() {
slots.append(&mut more);
}
slots.sort();
slots.dedup();
if !slots.is_empty() {
cluster_info.push_epoch_slots(&slots);
}
}
pub fn join(self) -> thread::Result<()> {
@ -542,17 +592,13 @@ mod test {
#[test]
pub fn test_update_lowest_slot() {
let node_info = Node::new_localhost_with_pubkey(&Pubkey::default());
let cluster_info = RwLock::new(ClusterInfo::new_with_invalid_keypair(
node_info.info.clone(),
));
let cluster_info = ClusterInfo::new_with_invalid_keypair(node_info.info.clone());
RepairService::update_lowest_slot(&Pubkey::default(), 5, &cluster_info);
let lowest = cluster_info
.read()
.unwrap()
.get_lowest_slot_for_node(&Pubkey::default(), None)
.unwrap()
.0
.clone();
.get_lowest_slot_for_node(&Pubkey::default(), None, |lowest_slot, _| {
lowest_slot.clone()
})
.unwrap();
assert_eq!(lowest.lowest, 5);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -38,7 +38,7 @@ const MAX_PACKET_BATCH_SIZE: usize = 100;
fn retransmit(
bank_forks: &Arc<RwLock<BankForks>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
cluster_info: &ClusterInfo,
r: &Arc<Mutex<PacketReceiver>>,
sock: &UdpSocket,
id: u32,
@ -63,11 +63,8 @@ fn retransmit(
let mut peers_len = 0;
let stakes = staking_utils::staked_nodes_at_epoch(&r_bank, bank_epoch);
let stakes = stakes.map(Arc::new);
let (peers, stakes_and_index) = cluster_info
.read()
.unwrap()
.sorted_retransmit_peers_and_stakes(stakes);
let me = cluster_info.read().unwrap().my_data();
let (peers, stakes_and_index) = cluster_info.sorted_retransmit_peers_and_stakes(stakes);
let my_id = cluster_info.id();
let mut discard_total = 0;
let mut repair_total = 0;
let mut retransmit_total = 0;
@ -88,7 +85,7 @@ fn retransmit(
let mut compute_turbine_peers = Measure::start("turbine_start");
let (my_index, mut shuffled_stakes_and_index) = ClusterInfo::shuffle_peers_and_index(
&me.id,
&my_id,
&peers,
&stakes_and_index,
packet.meta.seed,
@ -154,7 +151,7 @@ pub fn retransmitter(
sockets: Arc<Vec<UdpSocket>>,
bank_forks: Arc<RwLock<BankForks>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
cluster_info: Arc<RwLock<ClusterInfo>>,
cluster_info: Arc<ClusterInfo>,
r: Arc<Mutex<PacketReceiver>>,
) -> Vec<JoinHandle<()>> {
(0..sockets.len())
@ -206,7 +203,7 @@ impl RetransmitStage {
bank_forks: Arc<RwLock<BankForks>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
blockstore: Arc<Blockstore>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
cluster_info: &Arc<ClusterInfo>,
retransmit_sockets: Arc<Vec<UdpSocket>>,
repair_socket: Arc<UdpSocket>,
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
@ -316,11 +313,11 @@ mod tests {
.unwrap();
let other = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(other);
let cluster_info = ClusterInfo::new_with_invalid_keypair(other);
cluster_info.insert_info(me);
let retransmit_socket = Arc::new(vec![UdpSocket::bind("0.0.0.0:0").unwrap()]);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let cluster_info = Arc::new(cluster_info);
let (retransmit_sender, retransmit_receiver) = channel();
let t_retransmit = retransmitter(

File diff suppressed because it is too large Load Diff

View File

@ -4,9 +4,16 @@ use crate::rpc_subscriptions::{Confirmations, RpcSubscriptions, SlotInfo};
use jsonrpc_core::{Error, ErrorCode, Result};
use jsonrpc_derive::rpc;
use jsonrpc_pubsub::{typed::Subscriber, Session, SubscriptionId};
use solana_client::rpc_response::{Response as RpcResponse, RpcAccount, RpcKeyedAccount};
use solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature, transaction};
use std::sync::{atomic, Arc};
use solana_client::rpc_response::{
Response as RpcResponse, RpcAccount, RpcKeyedAccount, RpcSignatureResult,
};
#[cfg(test)]
use solana_ledger::blockstore::Blockstore;
use solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature};
use std::{
str::FromStr,
sync::{atomic, Arc},
};
// Suppress needless_return due to
// https://github.com/paritytech/jsonrpc/blob/2d38e6424d8461cdf72e78425ce67d51af9c6586/derive/src/lib.rs#L204
@ -74,7 +81,7 @@ pub trait RpcSolPubSub {
fn signature_subscribe(
&self,
meta: Self::Metadata,
subscriber: Subscriber<RpcResponse<transaction::Result<()>>>,
subscriber: Subscriber<RpcResponse<RpcSignatureResult>>,
signature_str: String,
confirmations: Option<Confirmations>,
);
@ -116,7 +123,6 @@ pub trait RpcSolPubSub {
fn root_unsubscribe(&self, meta: Option<Self::Metadata>, id: SubscriptionId) -> Result<bool>;
}
#[derive(Default)]
pub struct RpcSolPubSubImpl {
uid: Arc<atomic::AtomicUsize>,
subscriptions: Arc<RpcSubscriptions>,
@ -127,9 +133,14 @@ impl RpcSolPubSubImpl {
let uid = Arc::new(atomic::AtomicUsize::default());
Self { uid, subscriptions }
}
}
use std::str::FromStr;
#[cfg(test)]
fn default_with_blockstore(blockstore: Arc<Blockstore>) -> Self {
let uid = Arc::new(atomic::AtomicUsize::default());
let subscriptions = Arc::new(RpcSubscriptions::default_with_blockstore(blockstore));
Self { uid, subscriptions }
}
}
fn param<T: FromStr>(param_str: &str, thing: &str) -> Result<T> {
param_str.parse::<T>().map_err(|_e| Error {
@ -225,7 +236,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
fn signature_subscribe(
&self,
_meta: Self::Metadata,
subscriber: Subscriber<RpcResponse<transaction::Result<()>>>,
subscriber: Subscriber<RpcResponse<RpcSignatureResult>>,
signature_str: String,
confirmations: Option<Confirmations>,
) {
@ -312,12 +323,19 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
#[cfg(test)]
mod tests {
use super::*;
use crate::rpc_subscriptions::tests::robust_poll_or_panic;
use crate::{
commitment::{BlockCommitment, BlockCommitmentCache},
rpc_subscriptions::tests::robust_poll_or_panic,
};
use jsonrpc_core::{futures::sync::mpsc, Response};
use jsonrpc_pubsub::{PubSubHandler, Session};
use serial_test_derive::serial;
use solana_budget_program::{self, budget_instruction};
use solana_ledger::bank_forks::BankForks;
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_ledger::{
bank_forks::BankForks,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
};
use solana_runtime::bank::Bank;
use solana_sdk::{
pubkey::Pubkey,
@ -325,7 +343,12 @@ mod tests {
system_program, system_transaction,
transaction::{self, Transaction},
};
use std::{sync::RwLock, thread::sleep, time::Duration};
use std::{
collections::HashMap,
sync::{atomic::AtomicBool, RwLock},
thread::sleep,
time::Duration,
};
fn process_transaction_and_notify(
bank_forks: &Arc<RwLock<BankForks>>,
@ -347,6 +370,7 @@ mod tests {
}
#[test]
#[serial]
fn test_signature_subscribe() {
let GenesisConfigInfo {
genesis_config,
@ -358,8 +382,17 @@ mod tests {
let bank = Bank::new(&genesis_config);
let blockhash = bank.last_blockhash();
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let rpc = RpcSolPubSubImpl::default();
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let rpc = RpcSolPubSubImpl {
subscriptions: Arc::new(RpcSubscriptions::new(
&Arc::new(AtomicBool::new(false)),
Arc::new(RwLock::new(
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
)),
)),
uid: Arc::new(atomic::AtomicUsize::default()),
};
// Test signature subscriptions
let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash);
@ -372,7 +405,7 @@ mod tests {
// Test signature confirmation notification
let (response, _) = robust_poll_or_panic(receiver);
let expected_res: Option<transaction::Result<()>> = Some(Ok(()));
let expected_res = RpcSignatureResult { err: None };
let expected = json!({
"jsonrpc": "2.0",
"method": "signatureNotification",
@ -388,6 +421,7 @@ mod tests {
}
#[test]
#[serial]
fn test_signature_unsubscribe() {
let GenesisConfigInfo {
genesis_config,
@ -398,11 +432,13 @@ mod tests {
let bank = Bank::new(&genesis_config);
let arc_bank = Arc::new(bank);
let blockhash = arc_bank.last_blockhash();
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let session = create_session();
let mut io = PubSubHandler::default();
let rpc = RpcSolPubSubImpl::default();
let rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore);
io.extend_with(rpc.to_delegate());
let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash);
@ -436,6 +472,7 @@ mod tests {
}
#[test]
#[serial]
fn test_account_subscribe() {
let GenesisConfigInfo {
mut genesis_config,
@ -456,8 +493,18 @@ mod tests {
let bank = Bank::new(&genesis_config);
let blockhash = bank.last_blockhash();
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let rpc = RpcSolPubSubImpl::default();
let rpc = RpcSolPubSubImpl {
subscriptions: Arc::new(RpcSubscriptions::new(
&Arc::new(AtomicBool::new(false)),
Arc::new(RwLock::new(
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
)),
)),
uid: Arc::new(atomic::AtomicUsize::default()),
};
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification");
rpc.account_subscribe(
@ -540,12 +587,15 @@ mod tests {
}
#[test]
#[serial]
fn test_account_unsubscribe() {
let bob_pubkey = Pubkey::new_rand();
let session = create_session();
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let mut io = PubSubHandler::default();
let rpc = RpcSolPubSubImpl::default();
let rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore);
io.extend_with(rpc.to_delegate());
@ -589,9 +639,19 @@ mod tests {
let bank = Bank::new(&genesis_config);
let blockhash = bank.last_blockhash();
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let bob = Keypair::new();
let rpc = RpcSolPubSubImpl::default();
let mut rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore.clone());
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
)),
);
rpc.subscriptions = Arc::new(subscriptions);
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification");
rpc.account_subscribe(session, subscriber, bob.pubkey().to_string(), Some(2));
@ -620,9 +680,18 @@ mod tests {
let bank = Bank::new(&genesis_config);
let blockhash = bank.last_blockhash();
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let bob = Keypair::new();
let rpc = RpcSolPubSubImpl::default();
let mut rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore.clone());
let exit = Arc::new(AtomicBool::new(false));
let block_commitment_cache = Arc::new(RwLock::new(
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore.clone()),
));
let subscriptions = RpcSubscriptions::new(&exit, block_commitment_cache.clone());
rpc.subscriptions = Arc::new(subscriptions);
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification");
rpc.account_subscribe(session, subscriber, bob.pubkey().to_string(), Some(2));
@ -640,10 +709,39 @@ mod tests {
let bank0 = bank_forks.read().unwrap()[0].clone();
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.write().unwrap().insert(bank1);
rpc.subscriptions.notify_subscribers(1, &bank_forks);
let bank1 = bank_forks.read().unwrap()[1].clone();
let mut cache0 = BlockCommitment::default();
cache0.increase_confirmation_stake(1, 10);
let mut block_commitment = HashMap::new();
block_commitment.entry(0).or_insert(cache0.clone());
let mut new_block_commitment = BlockCommitmentCache::new(
block_commitment,
0,
10,
bank1.clone(),
blockstore.clone(),
0,
);
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
drop(w_block_commitment_cache);
rpc.subscriptions.notify_subscribers(1, &bank_forks);
let bank2 = Bank::new_from_parent(&bank1, &Pubkey::default(), 2);
bank_forks.write().unwrap().insert(bank2);
let bank2 = bank_forks.read().unwrap()[2].clone();
let mut cache0 = BlockCommitment::default();
cache0.increase_confirmation_stake(2, 10);
let mut block_commitment = HashMap::new();
block_commitment.entry(0).or_insert(cache0.clone());
let mut new_block_commitment =
BlockCommitmentCache::new(block_commitment, 0, 10, bank2, blockstore.clone(), 0);
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
drop(w_block_commitment_cache);
rpc.subscriptions.notify_subscribers(2, &bank_forks);
let expected = json!({
"jsonrpc": "2.0",
@ -667,8 +765,11 @@ mod tests {
}
#[test]
#[serial]
fn test_slot_subscribe() {
let rpc = RpcSolPubSubImpl::default();
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore);
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("slotNotification");
rpc.slot_subscribe(session, subscriber);
@ -691,8 +792,11 @@ mod tests {
}
#[test]
#[serial]
fn test_slot_unsubscribe() {
let rpc = RpcSolPubSubImpl::default();
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore);
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("slotNotification");
rpc.slot_subscribe(session, subscriber);

View File

@ -1,14 +1,20 @@
//! The `pubsub` module implements a threaded subscription service on client RPC request
use crate::rpc_pubsub::{RpcSolPubSub, RpcSolPubSubImpl};
use crate::rpc_subscriptions::RpcSubscriptions;
use crate::{
rpc_pubsub::{RpcSolPubSub, RpcSolPubSubImpl},
rpc_subscriptions::RpcSubscriptions,
};
use jsonrpc_pubsub::{PubSubHandler, Session};
use jsonrpc_ws_server::{RequestContext, ServerBuilder};
use std::net::SocketAddr;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread::{self, sleep, Builder, JoinHandle};
use std::time::Duration;
use std::{
net::SocketAddr,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
thread::{self, sleep, Builder, JoinHandle},
time::Duration,
};
pub struct PubSubService {
thread_hdl: JoinHandle<()>,
@ -66,13 +72,25 @@ impl PubSubService {
#[cfg(test)]
mod tests {
use super::*;
use std::net::{IpAddr, Ipv4Addr};
use crate::commitment::BlockCommitmentCache;
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use std::{
net::{IpAddr, Ipv4Addr},
sync::RwLock,
};
#[test]
fn test_pubsub_new() {
let pubsub_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = Arc::new(RpcSubscriptions::new(&exit));
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
)),
));
let pubsub_service = PubSubService::new(&subscriptions, pubsub_addr, &exit);
let thread = pubsub_service.thread_hdl.thread();
assert_eq!(thread.name().unwrap(), "solana-pubsub");

View File

@ -15,8 +15,9 @@ use solana_ledger::{
blockstore::Blockstore,
snapshot_utils,
};
use solana_sdk::hash::Hash;
use solana_sdk::{hash::Hash, pubkey::Pubkey};
use std::{
collections::HashSet,
net::SocketAddr,
path::{Path, PathBuf},
sync::{mpsc::channel, Arc, RwLock},
@ -24,6 +25,10 @@ use std::{
};
use tokio::prelude::Future;
// If trusted validators are specified, consider this validator healthy if its latest account hash
// is no further behind than this distance from the latest trusted validator account hash
const HEALTH_CHECK_SLOT_DISTANCE: u64 = 150;
pub struct JsonRpcService {
thread_hdl: JoinHandle<()>,
@ -37,15 +42,24 @@ struct RpcRequestMiddleware {
ledger_path: PathBuf,
snapshot_archive_path_regex: Regex,
snapshot_config: Option<SnapshotConfig>,
cluster_info: Arc<ClusterInfo>,
trusted_validators: Option<HashSet<Pubkey>>,
}
impl RpcRequestMiddleware {
pub fn new(ledger_path: PathBuf, snapshot_config: Option<SnapshotConfig>) -> Self {
pub fn new(
ledger_path: PathBuf,
snapshot_config: Option<SnapshotConfig>,
cluster_info: Arc<ClusterInfo>,
trusted_validators: Option<HashSet<Pubkey>>,
) -> Self {
Self {
ledger_path,
snapshot_archive_path_regex: Regex::new(r"/snapshot-\d+-[[:alnum:]]+\.tar\.bz2$")
.unwrap(),
snapshot_config,
cluster_info,
trusted_validators,
}
}
@ -114,6 +128,63 @@ impl RpcRequestMiddleware {
),
}
}
fn health_check(&self) -> &'static str {
let response = if let Some(trusted_validators) = &self.trusted_validators {
let (latest_account_hash_slot, latest_trusted_validator_account_hash_slot) = {
(
self.cluster_info
.get_accounts_hash_for_node(&self.cluster_info.id(), |hashes| {
hashes
.iter()
.max_by(|a, b| a.0.cmp(&b.0))
.map(|slot_hash| slot_hash.0)
})
.flatten()
.unwrap_or(0),
trusted_validators
.iter()
.map(|trusted_validator| {
self.cluster_info
.get_accounts_hash_for_node(&trusted_validator, |hashes| {
hashes
.iter()
.max_by(|a, b| a.0.cmp(&b.0))
.map(|slot_hash| slot_hash.0)
})
.flatten()
.unwrap_or(0)
})
.max()
.unwrap_or(0),
)
};
// This validator is considered healthy if its latest account hash slot is within
// `HEALTH_CHECK_SLOT_DISTANCE` of the latest trusted validator's account hash slot
if latest_account_hash_slot > 0
&& latest_trusted_validator_account_hash_slot > 0
&& latest_account_hash_slot
> latest_trusted_validator_account_hash_slot
.saturating_sub(HEALTH_CHECK_SLOT_DISTANCE)
{
"ok"
} else {
warn!(
"health check: me={}, latest trusted_validator={}",
latest_account_hash_slot, latest_trusted_validator_account_hash_slot
);
"behind"
}
} else {
// No trusted validator point of reference available, so this validator is healthy
// because it's running
"ok"
};
info!("health check: {}", response);
response
}
}
impl RequestMiddleware for RpcRequestMiddleware {
@ -148,6 +219,16 @@ impl RequestMiddleware for RpcRequestMiddleware {
}
if self.is_get_path(request.uri().path()) {
self.get(request.uri().path())
} else if request.uri().path() == "/health" {
RequestMiddlewareAction::Respond {
should_validate_hosts: true,
response: Box::new(jsonrpc_core::futures::future::ok(
hyper::Response::builder()
.status(hyper::StatusCode::OK)
.body(hyper::Body::from(self.health_check()))
.unwrap(),
)),
}
} else {
RequestMiddlewareAction::Proceed {
should_continue_on_invalid_cors: false,
@ -166,11 +247,12 @@ impl JsonRpcService {
bank_forks: Arc<RwLock<BankForks>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
blockstore: Arc<Blockstore>,
cluster_info: Arc<RwLock<ClusterInfo>>,
cluster_info: Arc<ClusterInfo>,
genesis_hash: Hash,
ledger_path: &Path,
storage_state: StorageState,
validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
trusted_validators: Option<HashSet<Pubkey>>,
) -> Self {
info!("rpc bound to {:?}", rpc_addr);
info!("rpc configuration: {:?}", config);
@ -196,20 +278,35 @@ impl JsonRpcService {
let rpc = RpcSolImpl;
io.extend_with(rpc.to_delegate());
let server =
ServerBuilder::with_meta_extractor(io, move |_req: &hyper::Request<hyper::Body>| Meta {
let request_middleware = RpcRequestMiddleware::new(
ledger_path,
snapshot_config,
cluster_info.clone(),
trusted_validators,
);
let server = ServerBuilder::with_meta_extractor(
io,
move |_req: &hyper::Request<hyper::Body>| Meta {
request_processor: request_processor.clone(),
cluster_info: cluster_info.clone(),
genesis_hash
}).threads(4)
.cors(DomainsValidation::AllowOnly(vec![
AccessControlAllowOrigin::Any,
]))
.cors_max_age(86400)
.request_middleware(RpcRequestMiddleware::new(ledger_path, snapshot_config))
.start_http(&rpc_addr);
genesis_hash,
},
)
.threads(num_cpus::get())
.cors(DomainsValidation::AllowOnly(vec![
AccessControlAllowOrigin::Any,
]))
.cors_max_age(86400)
.request_middleware(request_middleware)
.start_http(&rpc_addr);
if let Err(e) = server {
warn!("JSON RPC service unavailable error: {:?}. \nAlso, check that port {} is not already in use by another application", e, rpc_addr.port());
warn!(
"JSON RPC service unavailable error: {:?}. \n\
Also, check that port {} is not already in use by another application",
e,
rpc_addr.port()
);
return;
}
@ -248,7 +345,11 @@ impl JsonRpcService {
#[cfg(test)]
mod tests {
use super::*;
use crate::{contact_info::ContactInfo, rpc::tests::create_validator_exit};
use crate::{
contact_info::ContactInfo,
crds_value::{CrdsData, CrdsValue, SnapshotHash},
rpc::tests::create_validator_exit,
};
use solana_ledger::{
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
@ -268,30 +369,31 @@ mod tests {
let exit = Arc::new(AtomicBool::new(false));
let validator_exit = create_validator_exit(&exit);
let bank = Bank::new(&genesis_config);
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
ContactInfo::default(),
)));
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
let rpc_addr = SocketAddr::new(
ip_addr,
solana_net_utils::find_available_port_in_range(ip_addr, (10000, 65535)).unwrap(),
);
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank.slot(), bank)));
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let ledger_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&ledger_path).unwrap();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let block_commitment_cache = Arc::new(RwLock::new(
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
));
let mut rpc_service = JsonRpcService::new(
rpc_addr,
JsonRpcConfig::default(),
None,
bank_forks,
block_commitment_cache,
Arc::new(blockstore),
blockstore,
cluster_info,
Hash::default(),
&PathBuf::from("farf"),
StorageState::default(),
validator_exit,
None,
);
let thread = rpc_service.thread_hdl.thread();
assert_eq!(thread.name().unwrap(), "solana-jsonrpc");
@ -312,7 +414,9 @@ mod tests {
#[test]
fn test_is_get_path() {
let rrm = RpcRequestMiddleware::new(PathBuf::from("/"), None);
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
let rrm = RpcRequestMiddleware::new(PathBuf::from("/"), None, cluster_info.clone(), None);
let rrm_with_snapshot_config = RpcRequestMiddleware::new(
PathBuf::from("/"),
Some(SnapshotConfig {
@ -320,6 +424,8 @@ mod tests {
snapshot_package_output_path: PathBuf::from("/"),
snapshot_path: PathBuf::from("/"),
}),
cluster_info,
None,
);
assert!(rrm.is_get_path("/genesis.tar.bz2"));
@ -341,4 +447,84 @@ mod tests {
assert!(!rrm.is_get_path(".."));
assert!(!rrm.is_get_path("🎣"));
}
#[test]
fn test_health_check_with_no_trusted_validators() {
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
let rm = RpcRequestMiddleware::new(PathBuf::from("/"), None, cluster_info.clone(), None);
assert_eq!(rm.health_check(), "ok");
}
#[test]
fn test_health_check_with_trusted_validators() {
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
let trusted_validators = vec![Pubkey::new_rand(), Pubkey::new_rand(), Pubkey::new_rand()];
let rm = RpcRequestMiddleware::new(
PathBuf::from("/"),
None,
cluster_info.clone(),
Some(trusted_validators.clone().into_iter().collect()),
);
// No account hashes for this node or any trusted validators == "behind"
assert_eq!(rm.health_check(), "behind");
// No account hashes for any trusted validators == "behind"
cluster_info.push_accounts_hashes(vec![(1000, Hash::default()), (900, Hash::default())]);
assert_eq!(rm.health_check(), "behind");
// This node is ahead of the trusted validators == "ok"
cluster_info
.gossip
.write()
.unwrap()
.crds
.insert(
CrdsValue::new_unsigned(CrdsData::AccountsHashes(SnapshotHash::new(
trusted_validators[0].clone(),
vec![
(1, Hash::default()),
(1001, Hash::default()),
(2, Hash::default()),
],
))),
1,
)
.unwrap();
assert_eq!(rm.health_check(), "ok");
// Node is slightly behind the trusted validators == "ok"
cluster_info
.gossip
.write()
.unwrap()
.crds
.insert(
CrdsValue::new_unsigned(CrdsData::AccountsHashes(SnapshotHash::new(
trusted_validators[1].clone(),
vec![(1000 + HEALTH_CHECK_SLOT_DISTANCE - 1, Hash::default())],
))),
1,
)
.unwrap();
assert_eq!(rm.health_check(), "ok");
// Node is far behind the trusted validators == "behind"
cluster_info
.gossip
.write()
.unwrap()
.crds
.insert(
CrdsValue::new_unsigned(CrdsData::AccountsHashes(SnapshotHash::new(
trusted_validators[2].clone(),
vec![(1000 + HEALTH_CHECK_SLOT_DISTANCE, Hash::default())],
))),
1,
)
.unwrap();
assert_eq!(rm.health_check(), "behind");
}
}

View File

@ -1,5 +1,6 @@
//! The `pubsub` module implements a threaded subscription service on client RPC request
use crate::commitment::BlockCommitmentCache;
use core::hash::Hash;
use jsonrpc_core::futures::Future;
use jsonrpc_pubsub::{
@ -7,18 +8,23 @@ use jsonrpc_pubsub::{
SubscriptionId,
};
use serde::Serialize;
use solana_client::rpc_response::{Response, RpcAccount, RpcKeyedAccount, RpcResponseContext};
use solana_ledger::bank_forks::BankForks;
use solana_client::rpc_response::{
Response, RpcAccount, RpcKeyedAccount, RpcResponseContext, RpcSignatureResult,
};
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore};
use solana_runtime::bank::Bank;
use solana_sdk::{
account::Account, clock::Slot, pubkey::Pubkey, signature::Signature, transaction,
};
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{Receiver, RecvTimeoutError, SendError, Sender};
use std::sync::{
atomic::{AtomicBool, Ordering},
mpsc::{Receiver, RecvTimeoutError, SendError, Sender},
};
use std::thread::{Builder, JoinHandle};
use std::time::Duration;
use std::{
cmp::min,
collections::{HashMap, HashSet},
iter,
sync::{Arc, Mutex, RwLock},
@ -62,7 +68,7 @@ type RpcProgramSubscriptions = RwLock<
type RpcSignatureSubscriptions = RwLock<
HashMap<
Signature,
HashMap<SubscriptionId, (Sink<Response<transaction::Result<()>>>, Confirmations)>,
HashMap<SubscriptionId, (Sink<Response<RpcSignatureResult>>, Confirmations)>,
>,
>;
type RpcSlotSubscriptions = RwLock<HashMap<SubscriptionId, Sink<SlotInfo>>>;
@ -80,11 +86,7 @@ fn add_subscription<K, S>(
{
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
let confirmations = confirmations.unwrap_or(0);
let confirmations = if confirmations > MAX_LOCKOUT_HISTORY {
MAX_LOCKOUT_HISTORY
} else {
confirmations
};
let confirmations = min(confirmations, MAX_LOCKOUT_HISTORY + 1);
if let Some(current_hashmap) = subscriptions.get_mut(&hashmap_key) {
current_hashmap.insert(sub_id, (sink, confirmations));
return;
@ -120,8 +122,8 @@ where
fn check_confirmations_and_notify<K, S, B, F, X>(
subscriptions: &HashMap<K, HashMap<SubscriptionId, (Sink<Response<S>>, Confirmations)>>,
hashmap_key: &K,
current_slot: Slot,
bank_forks: &Arc<RwLock<BankForks>>,
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
bank_method: B,
filter_results: F,
notifier: &RpcNotifier,
@ -133,6 +135,10 @@ where
F: Fn(X, u64) -> Box<dyn Iterator<Item = S>>,
X: Clone + Serialize,
{
let mut confirmation_slots: HashMap<usize, Slot> = HashMap::new();
let r_block_commitment_cache = block_commitment_cache.read().unwrap();
let current_slot = r_block_commitment_cache.slot();
let root = r_block_commitment_cache.root();
let current_ancestors = bank_forks
.read()
.unwrap()
@ -140,27 +146,29 @@ where
.unwrap()
.ancestors
.clone();
for (slot, _) in current_ancestors.iter() {
if let Some(confirmations) = r_block_commitment_cache.get_confirmation_count(*slot) {
confirmation_slots.entry(confirmations).or_insert(*slot);
}
}
drop(r_block_commitment_cache);
let mut notified_set: HashSet<SubscriptionId> = HashSet::new();
if let Some(hashmap) = subscriptions.get(hashmap_key) {
for (sub_id, (sink, confirmations)) in hashmap.iter() {
let desired_slot: Vec<u64> = current_ancestors
.iter()
.filter(|(_, &v)| v == *confirmations)
.map(|(k, _)| k)
.cloned()
.collect();
let root: Vec<u64> = current_ancestors
.iter()
.filter(|(_, &v)| v == 32)
.map(|(k, _)| k)
.cloned()
.collect();
let root = if root.len() == 1 { root[0] } else { 0 };
if desired_slot.len() == 1 {
let slot = desired_slot[0];
let desired_bank = bank_forks.read().unwrap().get(slot).unwrap().clone();
let results = bank_method(&desired_bank, hashmap_key);
let desired_slot = if *confirmations == 0 {
Some(&current_slot)
} else if *confirmations == MAX_LOCKOUT_HISTORY + 1 {
Some(&root)
} else {
confirmation_slots.get(confirmations)
};
if let Some(&slot) = desired_slot {
let results = {
let bank_forks = bank_forks.read().unwrap();
let desired_bank = bank_forks.get(slot).unwrap();
bank_method(&desired_bank, hashmap_key)
};
for result in filter_results(results, root) {
notifier.notify(
Response {
@ -201,11 +209,15 @@ fn filter_account_result(
Box::new(iter::empty())
}
fn filter_signature_result<S>(result: Option<S>, _root: Slot) -> Box<dyn Iterator<Item = S>>
where
S: 'static + Clone + Serialize,
{
Box::new(result.into_iter())
fn filter_signature_result(
result: Option<transaction::Result<()>>,
_root: Slot,
) -> Box<dyn Iterator<Item = RpcSignatureResult>> {
Box::new(
result
.into_iter()
.map(|result| RpcSignatureResult { err: result.err() }),
)
}
fn filter_program_results(
@ -234,12 +246,6 @@ pub struct RpcSubscriptions {
exit: Arc<AtomicBool>,
}
impl Default for RpcSubscriptions {
fn default() -> Self {
Self::new(&Arc::new(AtomicBool::new(false)))
}
}
impl Drop for RpcSubscriptions {
fn drop(&mut self) {
self.shutdown().unwrap_or_else(|err| {
@ -249,7 +255,10 @@ impl Drop for RpcSubscriptions {
}
impl RpcSubscriptions {
pub fn new(exit: &Arc<AtomicBool>) -> Self {
pub fn new(
exit: &Arc<AtomicBool>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
) -> Self {
let (notification_sender, notification_receiver): (
Sender<NotificationEntry>,
Receiver<NotificationEntry>,
@ -288,6 +297,7 @@ impl RpcSubscriptions {
signature_subscriptions_clone,
slot_subscriptions_clone,
root_subscriptions_clone,
block_commitment_cache,
);
})
.unwrap();
@ -305,10 +315,19 @@ impl RpcSubscriptions {
}
}
pub fn default_with_blockstore(blockstore: Arc<Blockstore>) -> Self {
Self::new(
&Arc::new(AtomicBool::new(false)),
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
blockstore,
))),
)
}
fn check_account(
pubkey: &Pubkey,
current_slot: Slot,
bank_forks: &Arc<RwLock<BankForks>>,
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
account_subscriptions: Arc<RpcAccountSubscriptions>,
notifier: &RpcNotifier,
) {
@ -316,8 +335,8 @@ impl RpcSubscriptions {
check_confirmations_and_notify(
&subscriptions,
pubkey,
current_slot,
bank_forks,
block_commitment_cache,
Bank::get_account_modified_since_parent,
filter_account_result,
notifier,
@ -326,8 +345,8 @@ impl RpcSubscriptions {
fn check_program(
program_id: &Pubkey,
current_slot: Slot,
bank_forks: &Arc<RwLock<BankForks>>,
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
program_subscriptions: Arc<RpcProgramSubscriptions>,
notifier: &RpcNotifier,
) {
@ -335,8 +354,8 @@ impl RpcSubscriptions {
check_confirmations_and_notify(
&subscriptions,
program_id,
current_slot,
bank_forks,
block_commitment_cache,
Bank::get_program_accounts_modified_since_parent,
filter_program_results,
notifier,
@ -345,8 +364,8 @@ impl RpcSubscriptions {
fn check_signature(
signature: &Signature,
current_slot: Slot,
bank_forks: &Arc<RwLock<BankForks>>,
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
signature_subscriptions: Arc<RpcSignatureSubscriptions>,
notifier: &RpcNotifier,
) {
@ -354,8 +373,8 @@ impl RpcSubscriptions {
let notified_ids = check_confirmations_and_notify(
&subscriptions,
signature,
current_slot,
bank_forks,
block_commitment_cache,
Bank::get_signature_status_processed_since_parent,
filter_signature_result,
notifier,
@ -417,7 +436,7 @@ impl RpcSubscriptions {
signature: Signature,
confirmations: Option<Confirmations>,
sub_id: SubscriptionId,
subscriber: Subscriber<Response<transaction::Result<()>>>,
subscriber: Subscriber<Response<RpcSignatureResult>>,
) {
let mut subscriptions = self.signature_subscriptions.write().unwrap();
add_subscription(
@ -499,6 +518,7 @@ impl RpcSubscriptions {
signature_subscriptions: Arc<RpcSignatureSubscriptions>,
slot_subscriptions: Arc<RpcSlotSubscriptions>,
root_subscriptions: Arc<RpcRootSubscriptions>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
) {
loop {
if exit.load(Ordering::Relaxed) {
@ -518,7 +538,7 @@ impl RpcSubscriptions {
notifier.notify(root, sink);
}
}
NotificationEntry::Bank((current_slot, bank_forks)) => {
NotificationEntry::Bank((_current_slot, bank_forks)) => {
let pubkeys: Vec<_> = {
let subs = account_subscriptions.read().unwrap();
subs.keys().cloned().collect()
@ -526,8 +546,8 @@ impl RpcSubscriptions {
for pubkey in &pubkeys {
Self::check_account(
pubkey,
current_slot,
&bank_forks,
&block_commitment_cache,
account_subscriptions.clone(),
&notifier,
);
@ -540,8 +560,8 @@ impl RpcSubscriptions {
for program_id in &programs {
Self::check_program(
program_id,
current_slot,
&bank_forks,
&block_commitment_cache,
program_subscriptions.clone(),
&notifier,
);
@ -554,8 +574,8 @@ impl RpcSubscriptions {
for signature in &signatures {
Self::check_signature(
signature,
current_slot,
&bank_forks,
&block_commitment_cache,
signature_subscriptions.clone(),
&notifier,
);
@ -596,10 +616,16 @@ impl RpcSubscriptions {
#[cfg(test)]
pub(crate) mod tests {
use super::*;
use crate::commitment::BlockCommitment;
use jsonrpc_core::futures::{self, stream::Stream};
use jsonrpc_pubsub::typed::Subscriber;
use serial_test_derive::serial;
use solana_budget_program;
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_ledger::{
blockstore::Blockstore,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
};
use solana_sdk::{
signature::{Keypair, Signer},
system_transaction,
@ -633,12 +659,15 @@ pub(crate) mod tests {
}
#[test]
#[serial]
fn test_check_account_subscribe() {
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config(100);
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let bank = Bank::new(&genesis_config);
let blockhash = bank.last_blockhash();
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
@ -663,7 +692,12 @@ pub(crate) mod tests {
Subscriber::new_test("accountNotification");
let sub_id = SubscriptionId::Number(0 as u64);
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = RpcSubscriptions::new(&exit);
let subscriptions = RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
)),
);
subscriptions.add_account_subscription(alice.pubkey(), None, sub_id.clone(), subscriber);
assert!(subscriptions
@ -702,12 +736,15 @@ pub(crate) mod tests {
}
#[test]
#[serial]
fn test_check_program_subscribe() {
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config(100);
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let bank = Bank::new(&genesis_config);
let blockhash = bank.last_blockhash();
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
@ -732,7 +769,12 @@ pub(crate) mod tests {
Subscriber::new_test("programNotification");
let sub_id = SubscriptionId::Number(0 as u64);
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = RpcSubscriptions::new(&exit);
let subscriptions = RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
)),
);
subscriptions.add_program_subscription(
solana_budget_program::id(),
None,
@ -779,12 +821,15 @@ pub(crate) mod tests {
}
#[test]
#[serial]
fn test_check_signature_subscribe() {
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config(100);
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let bank = Bank::new(&genesis_config);
let blockhash = bank.last_blockhash();
let mut bank_forks = BankForks::new(0, bank);
@ -812,27 +857,42 @@ pub(crate) mod tests {
.unwrap()
.process_transaction(&processed_tx)
.unwrap();
let bank1 = bank_forks[1].clone();
let bank_forks = Arc::new(RwLock::new(bank_forks));
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = RpcSubscriptions::new(&exit);
let mut cache0 = BlockCommitment::default();
cache0.increase_confirmation_stake(1, 10);
let cache1 = BlockCommitment::default();
let (past_bank_sub, _id_receiver, past_bank_recv) =
let mut block_commitment = HashMap::new();
block_commitment.entry(0).or_insert(cache0.clone());
block_commitment.entry(1).or_insert(cache1.clone());
let block_commitment_cache =
BlockCommitmentCache::new(block_commitment, 0, 10, bank1, blockstore, 0);
let exit = Arc::new(AtomicBool::new(false));
let subscriptions =
RpcSubscriptions::new(&exit, Arc::new(RwLock::new(block_commitment_cache)));
let (past_bank_sub1, _id_receiver, past_bank_recv1) =
Subscriber::new_test("signatureNotification");
let (past_bank_sub2, _id_receiver, past_bank_recv2) =
Subscriber::new_test("signatureNotification");
let (processed_sub, _id_receiver, processed_recv) =
Subscriber::new_test("signatureNotification");
subscriptions.add_signature_subscription(
past_bank_tx.signatures[0],
Some(0),
SubscriptionId::Number(1 as u64),
Subscriber::new_test("signatureNotification").0,
past_bank_sub1,
);
subscriptions.add_signature_subscription(
past_bank_tx.signatures[0],
Some(1),
SubscriptionId::Number(2 as u64),
past_bank_sub,
past_bank_sub2,
);
subscriptions.add_signature_subscription(
processed_tx.signatures[0],
@ -855,43 +915,48 @@ pub(crate) mod tests {
}
subscriptions.notify_subscribers(1, &bank_forks);
let expected_res: Option<transaction::Result<()>> = Some(Ok(()));
let expected_res = RpcSignatureResult { err: None };
let expected = json!({
"jsonrpc": "2.0",
"method": "signatureNotification",
"params": {
"result": {
"context": { "slot": 0 },
"value": expected_res,
},
"subscription": 2,
}
});
let (response, _) = robust_poll_or_panic(past_bank_recv);
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
struct Notification {
slot: Slot,
id: u64,
}
let expected = json!({
"jsonrpc": "2.0",
"method": "signatureNotification",
"params": {
"result": {
"context": { "slot": 1 },
"value": expected_res,
},
"subscription": 3,
}
});
let expected_notification = |exp: Notification| -> String {
let json = json!({
"jsonrpc": "2.0",
"method": "signatureNotification",
"params": {
"result": {
"context": { "slot": exp.slot },
"value": &expected_res,
},
"subscription": exp.id,
}
});
serde_json::to_string(&json).unwrap()
};
// Expect to receive a notification from bank 1 because this subscription is
// looking for 0 confirmations and so checks the current bank
let expected = expected_notification(Notification { slot: 1, id: 1 });
let (response, _) = robust_poll_or_panic(past_bank_recv1);
assert_eq!(expected, response);
// Expect to receive a notification from bank 0 because this subscription is
// looking for 1 confirmation and so checks the past bank
let expected = expected_notification(Notification { slot: 0, id: 2 });
let (response, _) = robust_poll_or_panic(past_bank_recv2);
assert_eq!(expected, response);
let expected = expected_notification(Notification { slot: 1, id: 3 });
let (response, _) = robust_poll_or_panic(processed_recv);
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
let sig_subs = subscriptions.signature_subscriptions.read().unwrap();
assert_eq!(expected, response);
// Subscription should be automatically removed after notification
let sig_subs = subscriptions.signature_subscriptions.read().unwrap();
assert!(!sig_subs.contains_key(&processed_tx.signatures[0]));
// Only one notification is expected for signature processed in previous bank
assert_eq!(sig_subs.get(&past_bank_tx.signatures[0]).unwrap().len(), 1);
assert!(!sig_subs.contains_key(&past_bank_tx.signatures[0]));
// Unprocessed signature subscription should not be removed
assert_eq!(
@ -901,12 +966,20 @@ pub(crate) mod tests {
}
#[test]
#[serial]
fn test_check_slot_subscribe() {
let (subscriber, _id_receiver, transport_receiver) =
Subscriber::new_test("slotNotification");
let sub_id = SubscriptionId::Number(0 as u64);
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = RpcSubscriptions::new(&exit);
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let subscriptions = RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
)),
);
subscriptions.add_slot_subscription(sub_id.clone(), subscriber);
assert!(subscriptions
@ -939,12 +1012,20 @@ pub(crate) mod tests {
}
#[test]
#[serial]
fn test_check_root_subscribe() {
let (subscriber, _id_receiver, mut transport_receiver) =
Subscriber::new_test("rootNotification");
let sub_id = SubscriptionId::Number(0 as u64);
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = RpcSubscriptions::new(&exit);
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let subscriptions = RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
)),
);
subscriptions.add_root_subscription(sub_id.clone(), subscriber);
assert!(subscriptions
@ -976,6 +1057,7 @@ pub(crate) mod tests {
}
#[test]
#[serial]
fn test_add_and_remove_subscription() {
let mut subscriptions: HashMap<u64, HashMap<SubscriptionId, (Sink<()>, Confirmations)>> =
HashMap::new();

View File

@ -2,11 +2,13 @@ use crate::{
cluster_info::{ClusterInfo, ClusterInfoError},
cluster_slots::ClusterSlots,
contact_info::ContactInfo,
repair_service::RepairStats,
result::{Error, Result},
weighted_shuffle::weighted_best,
};
use bincode::serialize;
use solana_ledger::blockstore::Blockstore;
use solana_measure::measure::Measure;
use solana_measure::thread_mem_usage;
use solana_metrics::{datapoint_debug, inc_new_counter_debug};
use solana_perf::packet::{limited_deserialize, Packet, Packets, PacketsRecycler};
@ -46,6 +48,17 @@ impl RepairType {
}
}
#[derive(Default)]
pub struct ServeRepairStats {
pub total_packets: usize,
pub dropped_packets: usize,
pub processed: usize,
pub self_repair: usize,
pub window_index: usize,
pub highest_window_index: usize,
pub orphan: usize,
}
/// Window protocol messages
#[derive(Serialize, Deserialize, Debug)]
enum RepairProtocol {
@ -59,7 +72,7 @@ pub struct ServeRepair {
/// set the keypair that will be used to sign repair responses
keypair: Arc<Keypair>,
my_info: ContactInfo,
cluster_info: Arc<RwLock<ClusterInfo>>,
cluster_info: Arc<ClusterInfo>,
}
type RepairCache = HashMap<Slot, (Vec<ContactInfo>, Vec<(u64, usize)>)>;
@ -67,16 +80,13 @@ type RepairCache = HashMap<Slot, (Vec<ContactInfo>, Vec<(u64, usize)>)>;
impl ServeRepair {
/// Without a valid keypair gossip will not function. Only useful for tests.
pub fn new_with_invalid_keypair(contact_info: ContactInfo) -> Self {
Self::new(Arc::new(RwLock::new(
ClusterInfo::new_with_invalid_keypair(contact_info),
Self::new(Arc::new(ClusterInfo::new_with_invalid_keypair(
contact_info,
)))
}
pub fn new(cluster_info: Arc<RwLock<ClusterInfo>>) -> Self {
let (keypair, my_info) = {
let r_cluster_info = cluster_info.read().unwrap();
(r_cluster_info.keypair.clone(), r_cluster_info.my_data())
};
pub fn new(cluster_info: Arc<ClusterInfo>) -> Self {
let (keypair, my_info) = { (cluster_info.keypair.clone(), cluster_info.my_contact_info()) };
Self {
keypair,
my_info,
@ -106,6 +116,7 @@ impl ServeRepair {
from_addr: &SocketAddr,
blockstore: Option<&Arc<Blockstore>>,
request: RepairProtocol,
stats: &mut ServeRepairStats,
) -> Option<Packets> {
let now = Instant::now();
@ -113,18 +124,14 @@ impl ServeRepair {
let my_id = me.read().unwrap().keypair.pubkey();
let from = Self::get_repair_sender(&request);
if from.id == my_id {
warn!(
"{}: Ignored received repair request from ME {}",
my_id, from.id,
);
inc_new_counter_debug!("serve_repair-handle-repair--eq", 1);
stats.self_repair += 1;
return None;
}
let (res, label) = {
match &request {
RepairProtocol::WindowIndex(from, slot, shred_index) => {
inc_new_counter_debug!("serve_repair-request-window-index", 1);
stats.window_index += 1;
(
Self::run_window_request(
recycler,
@ -140,7 +147,7 @@ impl ServeRepair {
}
RepairProtocol::HighestWindowIndex(_, slot, highest_index) => {
inc_new_counter_debug!("serve_repair-request-highest-window-index", 1);
stats.highest_window_index += 1;
(
Self::run_highest_window_request(
recycler,
@ -153,7 +160,7 @@ impl ServeRepair {
)
}
RepairProtocol::Orphan(_, slot) => {
inc_new_counter_debug!("serve_repair-request-orphan", 1);
stats.orphan += 1;
(
Self::run_orphan(
recycler,
@ -187,15 +194,71 @@ impl ServeRepair {
blockstore: Option<&Arc<Blockstore>>,
requests_receiver: &PacketReceiver,
response_sender: &PacketSender,
stats: &mut ServeRepairStats,
max_packets: &mut usize,
) -> Result<()> {
//TODO cache connections
let timeout = Duration::new(1, 0);
let reqs = requests_receiver.recv_timeout(timeout)?;
let mut reqs_v = vec![requests_receiver.recv_timeout(timeout)?];
let mut total_packets = reqs_v[0].packets.len();
Self::handle_packets(obj, &recycler, blockstore, reqs, response_sender);
let mut dropped_packets = 0;
while let Ok(more) = requests_receiver.try_recv() {
total_packets += more.packets.len();
if total_packets < *max_packets {
// Drop the rest in the channel in case of dos
reqs_v.push(more);
} else {
dropped_packets += more.packets.len();
}
}
stats.dropped_packets += dropped_packets;
stats.total_packets += total_packets;
let mut time = Measure::start("repair::handle_packets");
for reqs in reqs_v {
Self::handle_packets(obj, &recycler, blockstore, reqs, response_sender, stats);
}
time.stop();
if total_packets >= *max_packets {
if time.as_ms() > 1000 {
*max_packets = (*max_packets * 9) / 10;
} else {
*max_packets = (*max_packets * 10) / 9;
}
}
Ok(())
}
fn report_reset_stats(me: &Arc<RwLock<Self>>, stats: &mut ServeRepairStats) {
if stats.self_repair > 0 {
let my_id = me.read().unwrap().keypair.pubkey();
warn!(
"{}: Ignored received repair requests from ME: {}",
my_id, stats.self_repair,
);
inc_new_counter_debug!("serve_repair-handle-repair--eq", stats.self_repair);
}
inc_new_counter_info!("serve_repair-total_packets", stats.total_packets);
inc_new_counter_info!("serve_repair-dropped_packets", stats.dropped_packets);
debug!(
"repair_listener: total_packets: {} passed: {}",
stats.total_packets, stats.processed
);
inc_new_counter_debug!("serve_repair-request-window-index", stats.window_index);
inc_new_counter_debug!(
"serve_repair-request-highest-window-index",
stats.highest_window_index
);
inc_new_counter_debug!("serve_repair-request-orphan", stats.orphan);
*stats = ServeRepairStats::default();
}
pub fn listen(
me: Arc<RwLock<Self>>,
blockstore: Option<Arc<Blockstore>>,
@ -207,22 +270,33 @@ impl ServeRepair {
let recycler = PacketsRecycler::default();
Builder::new()
.name("solana-repair-listen".to_string())
.spawn(move || loop {
let result = Self::run_listen(
&me,
&recycler,
blockstore.as_ref(),
&requests_receiver,
&response_sender,
);
match result {
Err(Error::RecvTimeoutError(_)) | Ok(_) => {}
Err(err) => info!("repair listener error: {:?}", err),
};
if exit.load(Ordering::Relaxed) {
return;
.spawn(move || {
let mut last_print = Instant::now();
let mut stats = ServeRepairStats::default();
let mut max_packets = 1024;
loop {
let result = Self::run_listen(
&me,
&recycler,
blockstore.as_ref(),
&requests_receiver,
&response_sender,
&mut stats,
&mut max_packets,
);
match result {
Err(Error::RecvTimeoutError(_)) | Ok(_) => {}
Err(err) => info!("repair listener error: {:?}", err),
};
if exit.load(Ordering::Relaxed) {
return;
}
if last_print.elapsed().as_secs() > 2 {
Self::report_reset_stats(&me, &mut stats);
last_print = Instant::now();
}
thread_mem_usage::datapoint("solana-repair-listen");
}
thread_mem_usage::datapoint("solana-repair-listen");
})
.unwrap()
}
@ -233,6 +307,7 @@ impl ServeRepair {
blockstore: Option<&Arc<Blockstore>>,
packets: Packets,
response_sender: &PacketSender,
stats: &mut ServeRepairStats,
) {
// iter over the packets, collect pulls separately and process everything else
let allocated = thread_mem_usage::Allocatedp::default();
@ -242,7 +317,9 @@ impl ServeRepair {
limited_deserialize(&packet.data[..packet.meta.size])
.into_iter()
.for_each(|request| {
let rsp = Self::handle_repair(me, recycler, &from_addr, blockstore, request);
stats.processed += 1;
let rsp =
Self::handle_repair(me, recycler, &from_addr, blockstore, request, stats);
if let Some(rsp) = rsp {
let _ignore_disconnect = response_sender.send(rsp);
}
@ -277,15 +354,12 @@ impl ServeRepair {
cluster_slots: &ClusterSlots,
repair_request: &RepairType,
cache: &mut RepairCache,
repair_stats: &mut RepairStats,
) -> Result<(SocketAddr, Vec<u8>)> {
// find a peer that appears to be accepting replication and has the desired slot, as indicated
// by a valid tvu port location
if cache.get(&repair_request.slot()).is_none() {
let repair_peers: Vec<_> = self
.cluster_info
.read()
.unwrap()
.repair_peers(repair_request.slot());
let repair_peers: Vec<_> = self.cluster_info.repair_peers(repair_request.slot());
if repair_peers.is_empty() {
return Err(ClusterInfoError::NoPeers.into());
}
@ -295,30 +369,26 @@ impl ServeRepair {
let (repair_peers, weights) = cache.get(&repair_request.slot()).unwrap();
let n = weighted_best(&weights, Pubkey::new_rand().to_bytes());
let addr = repair_peers[n].serve_repair; // send the request to the peer's serve_repair port
let out = self.map_repair_request(repair_request)?;
let out = self.map_repair_request(repair_request, repair_stats)?;
Ok((addr, out))
}
pub fn map_repair_request(&self, repair_request: &RepairType) -> Result<Vec<u8>> {
pub fn map_repair_request(
&self,
repair_request: &RepairType,
repair_stats: &mut RepairStats,
) -> Result<Vec<u8>> {
match repair_request {
RepairType::Shred(slot, shred_index) => {
datapoint_debug!(
"serve_repair-repair",
("repair-slot", *slot, i64),
("repair-ix", *shred_index, i64)
);
repair_stats.shred.update(*slot);
Ok(self.window_index_request_bytes(*slot, *shred_index)?)
}
RepairType::HighestShred(slot, shred_index) => {
datapoint_info!(
"serve_repair-repair_highest",
("repair-highest-slot", *slot, i64),
("repair-highest-ix", *shred_index, i64)
);
repair_stats.highest_shred.update(*slot);
Ok(self.window_highest_index_request_bytes(*slot, *shred_index)?)
}
RepairType::Orphan(slot) => {
datapoint_info!("serve_repair-repair_orphan", ("repair-orphan", *slot, i64));
repair_stats.orphan.update(*slot);
Ok(self.orphan_bytes(*slot)?)
}
}
@ -577,12 +647,13 @@ mod tests {
fn window_index_request() {
let cluster_slots = ClusterSlots::default();
let me = ContactInfo::new_localhost(&Pubkey::new_rand(), timestamp());
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(me)));
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(me));
let serve_repair = ServeRepair::new(cluster_info.clone());
let rv = serve_repair.repair_request(
&cluster_slots,
&RepairType::Shred(0, 0),
&mut HashMap::new(),
&mut RepairStats::default(),
);
assert_matches!(rv, Err(Error::ClusterInfoError(ClusterInfoError::NoPeers)));
@ -602,12 +673,13 @@ mod tests {
wallclock: 0,
shred_version: 0,
};
cluster_info.write().unwrap().insert_info(nxt.clone());
cluster_info.insert_info(nxt.clone());
let rv = serve_repair
.repair_request(
&cluster_slots,
&RepairType::Shred(0, 0),
&mut HashMap::new(),
&mut RepairStats::default(),
)
.unwrap();
assert_eq!(nxt.serve_repair, serve_repair_addr);
@ -629,7 +701,7 @@ mod tests {
wallclock: 0,
shred_version: 0,
};
cluster_info.write().unwrap().insert_info(nxt);
cluster_info.insert_info(nxt);
let mut one = false;
let mut two = false;
while !one || !two {
@ -639,6 +711,7 @@ mod tests {
&cluster_slots,
&RepairType::Shred(0, 0),
&mut HashMap::new(),
&mut RepairStats::default(),
)
.unwrap();
if rv.0 == serve_repair_addr {

View File

@ -5,7 +5,7 @@ use std::{
sync::{
atomic::{AtomicBool, Ordering},
mpsc::RecvTimeoutError,
Arc, RwLock,
Arc,
},
thread::{self, Builder, JoinHandle},
time::Duration,
@ -20,7 +20,7 @@ impl SnapshotPackagerService {
snapshot_package_receiver: SnapshotPackageReceiver,
starting_snapshot_hash: Option<(Slot, Hash)>,
exit: &Arc<AtomicBool>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
cluster_info: &Arc<ClusterInfo>,
) -> Self {
let exit = exit.clone();
let cluster_info = cluster_info.clone();
@ -32,10 +32,7 @@ impl SnapshotPackagerService {
if let Some(starting_snapshot_hash) = starting_snapshot_hash {
hashes.push(starting_snapshot_hash);
}
cluster_info
.write()
.unwrap()
.push_snapshot_hashes(hashes.clone());
cluster_info.push_snapshot_hashes(hashes.clone());
loop {
if exit.load(Ordering::Relaxed) {
break;
@ -58,10 +55,7 @@ impl SnapshotPackagerService {
while hashes.len() > MAX_SNAPSHOT_HASHES {
hashes.remove(0);
}
cluster_info
.write()
.unwrap()
.push_snapshot_hashes(hashes.clone());
cluster_info.push_snapshot_hashes(hashes.clone());
}
}
Err(RecvTimeoutError::Disconnected) => break,

View File

@ -4,6 +4,7 @@
use crate::{
cluster_info::ClusterInfo,
commitment::BlockCommitmentCache,
contact_info::ContactInfo,
result::{Error, Result},
};
@ -11,9 +12,7 @@ use rand::{Rng, SeedableRng};
use rand_chacha::ChaChaRng;
use solana_chacha_cuda::chacha_cuda::chacha_cbc_encrypt_file_many_keys;
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore};
use solana_runtime::{
bank::Bank, status_cache::SignatureConfirmationStatus, storage_utils::archiver_accounts,
};
use solana_runtime::{bank::Bank, storage_utils::archiver_accounts};
use solana_sdk::{
account::Account,
account_utils::StateMut,
@ -30,6 +29,7 @@ use solana_storage_program::{
storage_instruction,
storage_instruction::proof_validation,
};
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
use std::{
cmp,
collections::HashMap,
@ -184,7 +184,8 @@ impl StorageStage {
storage_keypair: &Arc<Keypair>,
exit: &Arc<AtomicBool>,
bank_forks: &Arc<RwLock<BankForks>>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
cluster_info: &Arc<ClusterInfo>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
) -> Self {
let (instruction_sender, instruction_receiver) = channel();
@ -256,6 +257,7 @@ impl StorageStage {
&keypair,
&storage_keypair,
&transactions_socket,
&block_commitment_cache,
)
.unwrap_or_else(|err| {
info!("failed to send storage transaction: {:?}", err)
@ -284,11 +286,12 @@ impl StorageStage {
fn send_transaction(
bank_forks: &Arc<RwLock<BankForks>>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
cluster_info: &ClusterInfo,
instruction: Instruction,
keypair: &Arc<Keypair>,
storage_keypair: &Arc<Keypair>,
transactions_socket: &UdpSocket,
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
) -> io::Result<()> {
let working_bank = bank_forks.read().unwrap().working_bank();
let blockhash = working_bank.confirmed_last_blockhash().0;
@ -320,11 +323,16 @@ impl StorageStage {
for _ in 0..5 {
transactions_socket.send_to(
&bincode::serialize(&transaction).unwrap(),
cluster_info.read().unwrap().my_data().tpu,
cluster_info.my_contact_info().tpu,
)?;
sleep(Duration::from_millis(100));
if Self::poll_for_signature_confirmation(bank_forks, &transaction.signatures[0], 0)
.is_ok()
if Self::poll_for_signature_confirmation(
bank_forks,
block_commitment_cache,
&transaction.signatures[0],
0,
)
.is_ok()
{
break;
};
@ -334,23 +342,24 @@ impl StorageStage {
fn poll_for_signature_confirmation(
bank_forks: &Arc<RwLock<BankForks>>,
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
signature: &Signature,
min_confirmed_blocks: usize,
) -> Result<()> {
let mut now = Instant::now();
let mut confirmed_blocks = 0;
loop {
let response = bank_forks
.read()
.unwrap()
.working_bank()
.get_signature_confirmation_status(signature);
if let Some(SignatureConfirmationStatus {
confirmations,
status,
..
}) = response
{
let working_bank = bank_forks.read().unwrap().working_bank();
let response = working_bank.get_signature_status_slot(signature);
if let Some((slot, status)) = response {
let confirmations = if working_bank.src.roots().contains(&slot) {
MAX_LOCKOUT_HISTORY + 1
} else {
let r_block_commitment_cache = block_commitment_cache.read().unwrap();
r_block_commitment_cache
.get_confirmation_count(slot)
.unwrap_or(0)
};
if status.is_ok() {
if confirmed_blocks != confirmations {
now = Instant::now();
@ -643,24 +652,33 @@ impl StorageStage {
}
}
pub fn test_cluster_info(id: &Pubkey) -> Arc<RwLock<ClusterInfo>> {
pub fn test_cluster_info(id: &Pubkey) -> Arc<ClusterInfo> {
let contact_info = ContactInfo::new_localhost(id, 0);
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
Arc::new(RwLock::new(cluster_info))
Arc::new(cluster_info)
}
#[cfg(test)]
mod tests {
use super::*;
use rayon::prelude::*;
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_ledger::{
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
};
use solana_runtime::bank::Bank;
use solana_sdk::hash::Hasher;
use solana_sdk::signature::{Keypair, Signer};
use std::cmp::{max, min};
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use solana_sdk::{
hash::Hasher,
signature::{Keypair, Signer},
};
use std::{
cmp::{max, min},
sync::{
atomic::{AtomicBool, AtomicUsize, Ordering},
mpsc::channel,
Arc, RwLock,
},
};
#[test]
fn test_storage_stage_none_ledger() {
@ -675,6 +693,11 @@ mod tests {
&[bank.clone()],
vec![0],
)));
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let block_commitment_cache = Arc::new(RwLock::new(
BlockCommitmentCache::default_with_blockstore(blockstore),
));
let (_slot_sender, slot_receiver) = channel();
let storage_state = StorageState::new(
&bank.last_blockhash(),
@ -690,6 +713,7 @@ mod tests {
&exit.clone(),
&bank_forks,
&cluster_info,
block_commitment_cache,
);
exit.store(true, Ordering::Relaxed);
storage_stage.join().unwrap();

View File

@ -36,7 +36,7 @@ pub struct Tpu {
impl Tpu {
#[allow(clippy::too_many_arguments)]
pub fn new(
cluster_info: &Arc<RwLock<ClusterInfo>>,
cluster_info: &Arc<ClusterInfo>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
entry_receiver: Receiver<WorkingBankEntry>,
retransmit_slots_receiver: RetransmitSlotsReceiver,

View File

@ -70,9 +70,14 @@ impl TransactionStatusService {
}
.expect("FeeCalculator must exist");
let fee = fee_calculator.calculate_fee(transaction.message());
let (writable_keys, readonly_keys) =
transaction.message.get_account_keys_by_lock_type();
blockstore
.write_transaction_status(
(slot, transaction.signatures[0]),
slot,
transaction.signatures[0],
writable_keys,
readonly_keys,
&TransactionStatusMeta {
status,
fee,

View File

@ -2,7 +2,7 @@
//! validation pipeline in software.
use crate::{
accounts_cleanup_service::AccountsCleanupService,
accounts_background_service::AccountsBackgroundService,
accounts_hash_verifier::AccountsHashVerifier,
broadcast_stage::RetransmitSlotsSender,
cluster_info::ClusterInfo,
@ -49,7 +49,7 @@ pub struct Tvu {
retransmit_stage: RetransmitStage,
replay_stage: ReplayStage,
ledger_cleanup_service: Option<LedgerCleanupService>,
accounts_cleanup_service: AccountsCleanupService,
accounts_background_service: AccountsBackgroundService,
storage_stage: StorageStage,
accounts_hash_verifier: AccountsHashVerifier,
}
@ -81,10 +81,10 @@ impl Tvu {
#[allow(clippy::new_ret_no_self, clippy::too_many_arguments)]
pub fn new(
vote_account: &Pubkey,
voting_keypair: Option<Arc<Keypair>>,
authorized_voter_keypairs: Vec<Arc<Keypair>>,
storage_keypair: &Arc<Keypair>,
bank_forks: &Arc<RwLock<BankForks>>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
cluster_info: &Arc<ClusterInfo>,
sockets: Sockets,
blockstore: Arc<Blockstore>,
storage_state: &StorageState,
@ -103,11 +103,7 @@ impl Tvu {
retransmit_slots_sender: RetransmitSlotsSender,
tvu_config: TvuConfig,
) -> Self {
let keypair: Arc<Keypair> = cluster_info
.read()
.expect("Unable to read from cluster_info during Tvu creation")
.keypair
.clone();
let keypair: Arc<Keypair> = cluster_info.keypair.clone();
let Sockets {
repair: repair_socket,
@ -160,7 +156,7 @@ impl Tvu {
*bank_forks.read().unwrap().working_bank().epoch_schedule(),
cfg,
tvu_config.shred_version,
cluster_slots,
cluster_slots.clone(),
);
let (ledger_cleanup_slot_sender, ledger_cleanup_slot_receiver) = channel();
@ -170,7 +166,7 @@ impl Tvu {
accounts_hash_receiver,
snapshot_package_sender,
exit,
cluster_info,
&cluster_info,
tvu_config.trusted_validators.clone(),
tvu_config.halt_on_trusted_validators_accounts_hash_mismatch,
tvu_config.accounts_hash_fault_injection_slots,
@ -179,13 +175,13 @@ impl Tvu {
let replay_stage_config = ReplayStageConfig {
my_pubkey: keypair.pubkey(),
vote_account: *vote_account,
voting_keypair,
authorized_voter_keypairs,
exit: exit.clone(),
subscriptions: subscriptions.clone(),
leader_schedule_cache: leader_schedule_cache.clone(),
latest_root_senders: vec![ledger_cleanup_slot_sender],
accounts_hash_sender: Some(accounts_hash_sender),
block_commitment_cache,
block_commitment_cache: block_commitment_cache.clone(),
transaction_status_sender,
rewards_recorder_sender,
};
@ -198,6 +194,7 @@ impl Tvu {
ledger_signal_receiver,
poh_recorder.clone(),
vote_tracker,
cluster_slots,
retransmit_slots_sender,
);
@ -210,7 +207,7 @@ impl Tvu {
)
});
let accounts_cleanup_service = AccountsCleanupService::new(bank_forks.clone(), &exit);
let accounts_background_service = AccountsBackgroundService::new(bank_forks.clone(), &exit);
let storage_stage = StorageStage::new(
storage_state,
@ -221,6 +218,7 @@ impl Tvu {
&exit,
&bank_forks,
&cluster_info,
block_commitment_cache,
);
Tvu {
@ -229,7 +227,7 @@ impl Tvu {
retransmit_stage,
replay_stage,
ledger_cleanup_service,
accounts_cleanup_service,
accounts_background_service,
storage_stage,
accounts_hash_verifier,
}
@ -243,7 +241,7 @@ impl Tvu {
if self.ledger_cleanup_service.is_some() {
self.ledger_cleanup_service.unwrap().join()?;
}
self.accounts_cleanup_service.join()?;
self.accounts_background_service.join()?;
self.replay_stage.join()?;
self.accounts_hash_verifier.join()?;
Ok(())
@ -255,12 +253,14 @@ pub mod tests {
use super::*;
use crate::banking_stage::create_test_recorder;
use crate::cluster_info::{ClusterInfo, Node};
use serial_test_derive::serial;
use solana_ledger::create_new_tmp_ledger;
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_runtime::bank::Bank;
use std::sync::atomic::Ordering;
#[test]
#[serial]
fn test_tvu_exit() {
solana_logger::setup();
let leader = Node::new_localhost();
@ -273,9 +273,9 @@ pub mod tests {
let bank_forks = BankForks::new(0, Bank::new(&genesis_config));
//start cluster_info1
let mut cluster_info1 = ClusterInfo::new_with_invalid_keypair(target1.info.clone());
let cluster_info1 = ClusterInfo::new_with_invalid_keypair(target1.info.clone());
cluster_info1.insert_info(leader.info.clone());
let cref1 = Arc::new(RwLock::new(cluster_info1));
let cref1 = Arc::new(cluster_info1);
let (blockstore_path, _) = create_new_tmp_ledger!(&genesis_config);
let (blockstore, l_receiver, completed_slots_receiver) =
@ -285,14 +285,16 @@ pub mod tests {
let bank = bank_forks.working_bank();
let (exit, poh_recorder, poh_service, _entry_receiver) =
create_test_recorder(&bank, &blockstore, None);
let voting_keypair = Keypair::new();
let vote_keypair = Keypair::new();
let storage_keypair = Arc::new(Keypair::new());
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let block_commitment_cache = Arc::new(RwLock::new(
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
));
let (retransmit_slots_sender, _retransmit_slots_receiver) = unbounded();
let tvu = Tvu::new(
&voting_keypair.pubkey(),
Some(Arc::new(voting_keypair)),
&vote_keypair.pubkey(),
vec![Arc::new(vote_keypair)],
&storage_keypair,
&Arc::new(RwLock::new(bank_forks)),
&cref1,
@ -307,7 +309,7 @@ pub mod tests {
blockstore,
&StorageState::default(),
l_receiver,
&Arc::new(RpcSubscriptions::new(&exit)),
&Arc::new(RpcSubscriptions::new(&exit, block_commitment_cache.clone())),
&poh_recorder,
&leader_schedule_cache,
&exit,

View File

@ -151,7 +151,7 @@ impl Validator {
keypair: &Arc<Keypair>,
ledger_path: &Path,
vote_account: &Pubkey,
authorized_voter: &Arc<Keypair>,
mut authorized_voter_keypairs: Vec<Arc<Keypair>>,
storage_keypair: &Arc<Keypair>,
entrypoint_info_option: Option<&ContactInfo>,
poh_verify: bool,
@ -162,7 +162,15 @@ impl Validator {
warn!("identity: {}", id);
warn!("vote account: {}", vote_account);
warn!("authorized voter: {}", authorized_voter.pubkey());
if config.voting_disabled {
warn!("voting disabled");
authorized_voter_keypairs.clear();
} else {
for authorized_voter_keypair in &authorized_voter_keypairs {
warn!("authorized voter: {}", authorized_voter_keypair.pubkey());
}
}
report_target_features();
info!("entrypoint: {:?}", entrypoint_info_option);
@ -197,7 +205,6 @@ impl Validator {
}
let bank_forks = Arc::new(RwLock::new(bank_forks));
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let mut validator_exit = ValidatorExit::default();
let exit_ = exit.clone();
@ -221,10 +228,7 @@ impl Validator {
}
}
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new(
node.info.clone(),
keypair.clone(),
)));
let cluster_info = Arc::new(ClusterInfo::new(node.info.clone(), keypair.clone()));
let storage_state = StorageState::new(
&bank.last_blockhash(),
@ -233,8 +237,11 @@ impl Validator {
);
let blockstore = Arc::new(blockstore);
let block_commitment_cache = Arc::new(RwLock::new(
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
));
let subscriptions = Arc::new(RpcSubscriptions::new(&exit));
let subscriptions = Arc::new(RpcSubscriptions::new(&exit, block_commitment_cache.clone()));
let rpc_service = config.rpc_ports.map(|(rpc_port, rpc_pubsub_port)| {
if ContactInfo::is_valid_address(&node.info.rpc) {
@ -257,6 +264,7 @@ impl Validator {
ledger_path,
storage_state.clone(),
validator_exit.clone(),
config.trusted_validators.clone(),
),
PubSubService::new(
&subscriptions,
@ -355,10 +363,7 @@ impl Validator {
// Insert the entrypoint info, should only be None if this node
// is the bootstrap validator
if let Some(entrypoint_info) = entrypoint_info_option {
cluster_info
.write()
.unwrap()
.set_entrypoint(entrypoint_info.clone());
cluster_info.set_entrypoint(entrypoint_info.clone());
}
let (snapshot_packager_service, snapshot_package_sender) =
@ -381,16 +386,12 @@ impl Validator {
"New shred signal for the TVU should be the same as the clear bank signal."
);
let vote_tracker = Arc::new({ VoteTracker::new(bank_forks.read().unwrap().root_bank()) });
let vote_tracker = Arc::new(VoteTracker::new(bank_forks.read().unwrap().root_bank()));
let (retransmit_slots_sender, retransmit_slots_receiver) = unbounded();
let tvu = Tvu::new(
vote_account,
if config.voting_disabled {
None
} else {
Some(authorized_voter.clone())
},
authorized_voter_keypairs,
storage_keypair,
&bank_forks,
&cluster_info,
@ -630,11 +631,7 @@ fn new_banks_from_blockstore(
)
}
fn wait_for_supermajority(
config: &ValidatorConfig,
bank: &Arc<Bank>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
) {
fn wait_for_supermajority(config: &ValidatorConfig, bank: &Bank, cluster_info: &ClusterInfo) {
if config.wait_for_supermajority != Some(bank.slot()) {
return;
}
@ -643,10 +640,9 @@ fn wait_for_supermajority(
"Waiting for 80% of activated stake at slot {} to be in gossip...",
bank.slot()
);
loop {
let gossip_stake_percent = get_stake_percent_in_gossip(&bank, &cluster_info);
for i in 1.. {
let gossip_stake_percent = get_stake_percent_in_gossip(&bank, &cluster_info, i % 10 == 0);
info!("{}% of activated stake in gossip", gossip_stake_percent,);
if gossip_stake_percent >= 80 {
break;
}
@ -666,6 +662,7 @@ pub struct TestValidator {
pub struct TestValidatorOptions {
pub fees: u64,
pub bootstrap_validator_lamports: u64,
pub mint_lamports: u64,
}
impl Default for TestValidatorOptions {
@ -674,6 +671,7 @@ impl Default for TestValidatorOptions {
TestValidatorOptions {
fees: 0,
bootstrap_validator_lamports: BOOTSTRAP_VALIDATOR_LAMPORTS,
mint_lamports: 1_000_000,
}
}
}
@ -692,6 +690,7 @@ impl TestValidator {
let TestValidatorOptions {
fees,
bootstrap_validator_lamports,
mint_lamports,
} = options;
let node_keypair = Arc::new(Keypair::new());
let node = Node::new_localhost_with_pubkey(&node_keypair.pubkey());
@ -702,7 +701,7 @@ impl TestValidator {
mint_keypair,
voting_keypair,
} = create_genesis_config_with_leader_ex(
1_000_000,
mint_lamports,
&contact_info.id,
42,
bootstrap_validator_lamports,
@ -710,6 +709,9 @@ impl TestValidator {
genesis_config
.native_instruction_processors
.push(solana_budget_program!());
genesis_config
.native_instruction_processors
.push(solana_bpf_loader_program!());
genesis_config.rent.lamports_per_byte_year = 1;
genesis_config.rent.exemption_threshold = 1.0;
@ -728,7 +730,7 @@ impl TestValidator {
&node_keypair,
&ledger_path,
&leader_voting_keypair.pubkey(),
&leader_voting_keypair,
vec![leader_voting_keypair.clone()],
&storage_keypair,
None,
true,
@ -774,33 +776,86 @@ fn report_target_features() {
}
// Get the activated stake percentage (based on the provided bank) that is visible in gossip
fn get_stake_percent_in_gossip(bank: &Arc<Bank>, cluster_info: &Arc<RwLock<ClusterInfo>>) -> u64 {
let mut gossip_stake = 0;
fn get_stake_percent_in_gossip(bank: &Bank, cluster_info: &ClusterInfo, log: bool) -> u64 {
let mut online_stake = 0;
let mut wrong_shred_stake = 0;
let mut wrong_shred_nodes = vec![];
let mut offline_stake = 0;
let mut offline_nodes = vec![];
let mut total_activated_stake = 0;
let tvu_peers = cluster_info.read().unwrap().tvu_peers();
let me = cluster_info.read().unwrap().my_data();
let all_tvu_peers = cluster_info.all_tvu_peers();
let my_shred_version = cluster_info.my_shred_version();
let my_id = cluster_info.id();
for (activated_stake, vote_account) in bank.vote_accounts().values() {
let vote_state =
solana_vote_program::vote_state::VoteState::from(&vote_account).unwrap_or_default();
total_activated_stake += activated_stake;
if tvu_peers
if *activated_stake == 0 {
continue;
}
if let Some(peer) = all_tvu_peers
.iter()
.filter(|peer| peer.shred_version == me.shred_version)
.any(|peer| peer.id == vote_state.node_pubkey)
.find(|peer| peer.id == vote_state.node_pubkey)
{
trace!(
"observed {} in gossip, (activated_stake={})",
vote_state.node_pubkey,
activated_stake
);
gossip_stake += activated_stake;
} else if vote_state.node_pubkey == cluster_info.read().unwrap().id() {
gossip_stake += activated_stake;
if peer.shred_version == my_shred_version {
trace!(
"observed {} in gossip, (activated_stake={})",
vote_state.node_pubkey,
activated_stake
);
online_stake += activated_stake;
} else {
wrong_shred_stake += activated_stake;
wrong_shred_nodes.push((*activated_stake, vote_state.node_pubkey));
}
} else if vote_state.node_pubkey == my_id {
online_stake += activated_stake; // This node is online
} else {
offline_stake += activated_stake;
offline_nodes.push((*activated_stake, vote_state.node_pubkey));
}
}
gossip_stake * 100 / total_activated_stake
if log {
info!(
"{}% of active stake visible in gossip",
online_stake * 100 / total_activated_stake
);
if !wrong_shred_nodes.is_empty() {
info!(
"{}% of active stake has the wrong shred version in gossip",
wrong_shred_stake * 100 / total_activated_stake,
);
for (stake, identity) in wrong_shred_nodes {
info!(
" {}% - {}",
stake * 100 / total_activated_stake,
identity
);
}
}
if !offline_nodes.is_empty() {
info!(
"{}% of active stake is not visible in gossip",
offline_stake * 100 / total_activated_stake
);
for (stake, identity) in offline_nodes {
info!(
" {}% - {}",
stake * 100 / total_activated_stake,
identity
);
}
}
}
online_stake * 100 / total_activated_stake
}
#[cfg(test)]
@ -836,7 +891,7 @@ mod tests {
&Arc::new(validator_keypair),
&validator_ledger_path,
&voting_keypair.pubkey(),
&voting_keypair,
vec![voting_keypair.clone()],
&storage_keypair,
Some(&leader_node.info),
true,
@ -861,7 +916,7 @@ mod tests {
.genesis_config;
let (validator_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
ledger_paths.push(validator_ledger_path.clone());
let voting_keypair = Arc::new(Keypair::new());
let vote_account_keypair = Arc::new(Keypair::new());
let storage_keypair = Arc::new(Keypair::new());
let config = ValidatorConfig {
rpc_ports: Some((
@ -874,8 +929,8 @@ mod tests {
validator_node,
&Arc::new(validator_keypair),
&validator_ledger_path,
&voting_keypair.pubkey(),
&voting_keypair,
&vote_account_keypair.pubkey(),
vec![vote_account_keypair.clone()],
&storage_keypair,
Some(&leader_node.info),
true,

View File

@ -249,7 +249,7 @@ impl WindowService {
#[allow(clippy::too_many_arguments)]
pub fn new<F>(
blockstore: Arc<Blockstore>,
cluster_info: Arc<RwLock<ClusterInfo>>,
cluster_info: Arc<ClusterInfo>,
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
retransmit: PacketSender,
repair_socket: Arc<UdpSocket>,
@ -294,7 +294,7 @@ impl WindowService {
);
let t_window = Self::start_recv_window_thread(
cluster_info.read().unwrap().id(),
cluster_info.id(),
exit,
&blockstore,
insert_sender,
@ -514,7 +514,7 @@ mod test {
net::UdpSocket,
sync::atomic::{AtomicBool, Ordering},
sync::mpsc::channel,
sync::{Arc, RwLock},
sync::Arc,
thread::sleep,
time::Duration,
};
@ -542,10 +542,7 @@ mod test {
.insert_shreds(shreds, None, false)
.expect("Expect successful processing of shred");
assert_eq!(
blockstore.get_slot_entries(0, 0, None).unwrap(),
original_entries
);
assert_eq!(blockstore.get_slot_entries(0, 0).unwrap(), original_entries);
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
@ -633,9 +630,9 @@ mod test {
let blockstore = Arc::new(blockstore);
let (retransmit_sender, _retransmit_receiver) = channel();
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(
ContactInfo::new_localhost(&Pubkey::default(), 0),
)));
));
let cluster_slots = Arc::new(ClusterSlots::default());
let repair_sock = Arc::new(UdpSocket::bind(socketaddr_any!()).unwrap());
let window = WindowService::new(

Some files were not shown because too many files have changed in this diff Show More