Compare commits

..

387 Commits

Author SHA1 Message Date
Michael Vines
a93915f1bd Wait for one slot to be produced 2020-05-26 15:06:34 -07:00
Michael Vines
ee05266a09 Adjust v1.0 CRDS version to be compatible with v1.1 2020-05-26 15:06:34 -07:00
mergify[bot]
8cde8a54ac LedgerCleanupService no longer causes an OOM and actually purges (bp #10199) (#10220)
* LedgerCleanupService no longer causes an OOM and actually purges (#10199)

* cleanup_ledger() now services new_root_receiver while purging
* purge_slots() now fully deletes before compacting
* Add ledger pruning grafana graph

(cherry picked from commit 156387aba4)

* fixup

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-05-25 00:00:59 -07:00
mergify[bot]
65b5bddf4e v1.0: Test ledger-tool commands in run-sanity.sh (bp #10211) (#10213)
automerge
2020-05-24 09:00:21 -07:00
sakridge
88199e77ab Enable disk metrics (#10009) 2020-05-22 22:55:39 -07:00
Michael Vines
b2b10f4989 Update another non-circulating account 2020-05-22 15:10:56 -07:00
mergify[bot]
41d3fbcb60 Add another non-circulating account (#10186) (#10189)
automerge

(cherry picked from commit e2b5cd6d47)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-05-22 14:35:36 -07:00
Tyera Eulberg
33322d9501 Add circ/non-circ filter to getLargestAccounts (#10188) 2020-05-22 15:14:18 -06:00
mergify[bot]
ee1f218e76 Fixup deserialize_bs58_transaction, and make a few error types more targeted (#10171) (#10176)
automerge

(cherry picked from commit 12a3b1ba6a)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-05-21 17:50:54 -07:00
mergify[bot]
b3b32befd7 REST API now returns supply in SOL rather than lamports (#10170) (#10173)
automerge

(cherry picked from commit 18be7a7966)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-05-21 16:53:58 -07:00
Michael Vines
95675f8f42 Bump version to 1.0.24 2020-05-21 13:29:16 -07:00
mergify[bot]
825c0e2b6e Revert "Add AVX2 runtime checks (#10033)" (#10167) (#10168)
This reverts commit cf8eb7700b.

(cherry picked from commit 486168b796)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-05-21 13:19:42 -07:00
mergify[bot]
4334fef955 Add v0 REST APIs for circulating and total supply (#10102) (#10159)
automerge
2020-05-20 22:14:59 -07:00
mergify[bot]
2333468350 Rename getCirculatingSuppy to getSupply in JSON API doc (#10121) (#10122)
automerge
2020-05-19 15:41:30 -07:00
mergify[bot]
86968cb311 Add SimulateTransaction RPC endpoint (bp #10106) (#10115)
automerge
2020-05-19 13:55:07 -07:00
carllin
3ce9a16e7f v1.0: Add nonce to shreds repairs, add shred data size to header (#10110)
* Add nonce to shreds/repairs

* Add data shred size to header

* Align with future epoch

Co-authored-by: Carl <carl@solana.com>
2020-05-19 12:34:26 -07:00
mergify[bot]
3746c0c6ac Update accounts whitelist (#10100) (#10103)
automerge
2020-05-18 14:54:50 -07:00
sakridge
2b71bf37f9 Make repair metrics less chatty (#9094) (#10080) 2020-05-16 10:46:50 -07:00
Michael Vines
3b526cc2de Increase the number of JSON RPC service threads (#10075)
automerge
2020-05-15 15:00:41 -07:00
mergify[bot]
198f87ffea Abort if the open fd limit cannot be increased (bp #10064) (#10073)
automerge
2020-05-15 14:47:41 -07:00
mergify[bot]
859d4db87e validator: Forge a confirmed root before halting for RPC inspection (bp #10061) (#10066)
* Forge a confirmed root before halting for RPC inspection (#10061)

(cherry picked from commit 1da1667920)

# Conflicts:
#	core/src/commitment.rs

* Update commitment.rs

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-05-15 14:36:54 -07:00
mergify[bot]
1163144914 solana-gossip spy can now specify a shred version (#10040) (#10041)
automerge
2020-05-13 21:45:16 -07:00
mergify[bot]
49f212247a Add AVX2 runtime checks (#10033) (#10034)
automerge
2020-05-13 13:34:27 -07:00
mergify[bot]
e26840cb09 Introduce type alias Ancestors (bp #9699) (#10019)
automerge
2020-05-13 04:10:18 -07:00
mergify[bot]
14bbcef722 v1.0: Advertise node version in gossip (bp #9986) (#9995)
automerge
2020-05-12 19:38:52 -07:00
mergify[bot]
5326f3ec73 Check slot cleaned up for RPC blockstore/slot queries (#9982) (#9988)
automerge
2020-05-11 16:29:03 -07:00
Michael Vines
fca4554d3f Bump version to v1.0.23 2020-05-10 21:49:52 -07:00
mergify[bot]
15de250c2c Rpc: Add getCirculatingSupply endpoint, redux (#9953) (#9954)
automerge
2020-05-09 14:20:58 -07:00
Ryo Onodera
a7b0fcc21e v1.0: Maintain sysvar balances for consistent market cap. (#9937)
* Maintain sysvar balances for consistent market cap.

* Back-port fun and gating adjustments

* Add comment

* Adjust test
2020-05-08 09:19:03 -07:00
mergify[bot]
4999fa6263 Support ad-hoc genesis args in run.sh (#9697) (#9939)
automerge
2020-05-08 08:16:36 -07:00
Ryo Onodera
1f54be66c9 v1.0: Include account.owner into account hash (#9920)
automerge
2020-05-07 13:42:58 -07:00
Michael Vines
2e1c3a8338 Correct method name 2020-05-06 11:28:52 -07:00
Michael Vines
fe934eb7a0 Reduce spammy 'ReceiveUpdates took:' log 2020-05-06 08:48:27 -07:00
mergify[bot]
62a0c2f348 Gossip no longer pushes/pulls from nodes with a different shred version (bp #9868) (#9893)
automerge
2020-05-05 23:47:03 -07:00
mergify[bot]
cfeef3a9eb Display transaction fee in SOL (#9892) (#9897)
automerge

(cherry picked from commit e078ba1dde)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-05-05 22:44:01 -07:00
mergify[bot]
52009788ee Rpc: Filter blockstore data by cluster-confirmed root (#9873) (#9880)
automerge
2020-05-04 22:37:00 -07:00
mergify[bot]
2acf4d874d Rpc: add getLargestAccounts endpoint (#9869) (#9876)
automerge
2020-05-04 19:12:18 -07:00
mergify[bot]
f951d7d33f Avoid panic caused by converting non-positive / non-normal floating points values to duration (#9867) (#9871)
automerge
2020-05-04 14:40:58 -07:00
mergify[bot]
20fe24f348 cli: Add clap.rs default for --commitment (bp #9859) (#9860)
automerge
2020-05-02 17:58:31 -07:00
Michael Vines
2e5cc41945 Bump version to 1.0.22 2020-05-02 11:31:15 -07:00
sakridge
bc76b20e6d Fuzzer test and fixes (#9853) (#9857) 2020-05-02 10:00:51 -07:00
Michael Vines
3f6befe012 Watchtower can now emit a notification on all non-vote transactions (#9848) 2020-05-01 17:48:18 -07:00
sakridge
ee201cfb84 Put empty accounts in the accounts list on load (#9842)
Indexing into accounts array does not match account_keys otherwise.
Also enforce program accounts not at index 0
Enforce at least 1 Read-write signing fee-payer account.
2020-05-01 17:24:06 -07:00
Michael Vines
66ec12e869 Passing -v/--verbose to solana confirm now displays the full transaction (#9530) (#9843)
automerge

Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2020-05-01 13:28:57 -07:00
Michael Vines
addfc99ff4 Add delay to keep RPC traffic down on error 2020-05-01 10:40:08 -07:00
Michael Vines
91f0faa72d v1.0: incinerator backport (#9837) 2020-05-01 09:02:43 -07:00
mergify[bot]
2deebe4d73 v1.1 backport custom error rename (bp #9826) (#9838)
automerge
2020-05-01 01:58:15 -07:00
mergify[bot]
5bc57ea004 Nits for sanitize trait (bp #9741) (#9809)
* thiserror, docs, remove general Failure case (#9741)

automerge

(cherry picked from commit a0514eb2ae)

# Conflicts:
#	core/src/crds_value.rs
#	core/src/epoch_slots.rs
#	sdk/src/sanitize.rs

* rebase

Co-authored-by: anatoly yakovenko <anatoly@solana.com>
Co-authored-by: Michael Vines <mvines@gmail.com>
2020-04-30 14:35:18 -07:00
mergify[bot]
c63bd05458 Add commitment Root variant, and add fleshed out --commitment arg to Cli (#9806) (#9812)
automerge
2020-04-30 12:49:36 -07:00
mergify[bot]
b4933f4c74 Upgrade to Rust 1.43.0 (bp #9754) (#9807)
* Upgrade to Rust 1.43.0 (#9754)

(cherry picked from commit 230df0ec0c)

# Conflicts:
#	core/src/validator.rs
#	runtime/src/accounts_db.rs

* Update validator.rs

* Update accounts_db.rs

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-04-30 08:50:03 -07:00
mergify[bot]
e7748c603b Rpc Client: Prevent error out on get_num_blocks_since_signature_confirmation (#9792) (#9799)
automerge
2020-04-29 16:50:07 -07:00
mergify[bot]
6a5d782d6c Bump Rust-BPF version to be interoperable with latest Rust (#9783) (#9801)
automerge
2020-04-29 16:32:06 -07:00
mergify[bot]
89fad8f566 Rpc: remove unwraps (#9793) (#9796)
automerge
2020-04-29 14:44:41 -07:00
mergify[bot]
01cac89867 Fix BPF tool caching (#9781) (#9794)
automerge
2020-04-29 13:13:00 -07:00
Michael Vines
dfb4729b02 Don't divide by zero 2020-04-29 11:03:13 -07:00
Michael Vines
6ab5f823b3 Bump version to 1.0.21 2020-04-29 08:57:57 -07:00
mergify[bot]
bf1ceab6ed catchup now estimates the time remaining (#9782) (#9784)
automerge
2020-04-29 00:31:43 -07:00
mergify[bot]
dbaff495c8 v1.1: backport commitment max changes (#9775) (#9778)
automerge
2020-04-28 16:42:08 -07:00
Michael Vines
f65caa66bf Don't --use-move 2020-04-28 12:47:08 -07:00
Michael Vines
2f455e18ef Reorder steps by relative priority for when there aren't enough agents 2020-04-28 12:46:19 -07:00
Michael Vines
7b155f384d Disable move more 2020-04-28 12:38:51 -07:00
mergify[bot]
fd405239d9 Report duration of last alarm in the All Clear message (#9766) (#9770)
automerge

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-04-28 12:38:34 -07:00
sakridge
8698156e27 Update dalek (v1.0 bp) (#9765)
* Disable Move/Libra components

* Update dalek version

Co-authored-by: Trent Nelson <trent@solana.com>
2020-04-28 12:02:09 -07:00
mergify[bot]
2f0f218ad9 Use Blockstore lowest_slot to start root iterator (#9738) (#9767)
automerge
2020-04-28 11:11:10 -07:00
mergify[bot]
3cc75b4bab Set HOME correctly (#9757) (#9761)
automerge
2020-04-28 02:57:49 -07:00
mergify[bot]
0a0f8470d7 Clean up use to keep rust 1.43.0 from complaining (bp #9740) (#9748)
* Clean up `use` to keep rust 1.43.0 from complaining (#9740)

(cherry picked from commit c11abf88b7)
2020-04-27 23:21:32 -07:00
mergify[bot]
e46026f1fb Input values are not sanitized after they are deserialized, making it far too easy for Leo to earn SOL (bp #9706) (#9735)
automerge
2020-04-27 19:58:40 -07:00
mergify[bot]
fef5089d7e Fix broken doc link to anatomy of transaction (#9728) (#9729)
automerge
2020-04-27 01:27:26 -07:00
Michael Vines
a844bd70da Update metrics dashboard 2020-04-26 09:54:19 -07:00
Michael Vines
cbc01bd1b9 Cargo.lock 2020-04-25 23:08:21 -07:00
Michael Vines
e096cc1101 Use vec! 2020-04-25 23:08:09 -07:00
sakridge
0dc559d3cf Filter program ids v1.0 (#9723)
* Filter program ids to store

* add test
2020-04-25 22:52:39 -07:00
mergify[bot]
34f537adad Add support for log rotation, sending SIGUSR1 will cause the log file to be re-opened (#9713) (#9715)
(cherry picked from commit 50f1ec0374)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-04-24 15:57:47 -07:00
Michael Vines
9558628537 Update to rocksdb 0.14 and set max wal size (#9668) (#9688) (#9708)
Co-authored-by: sakridge <sakridge@gmail.com>
2020-04-24 10:43:01 -07:00
Dan Albert
940bf7081a Update solana-user-authorized_keys.sh 2020-04-23 16:32:23 -06:00
mergify[bot]
38c31c3b4e Exit cleanly on panic! so the process don't limp along in a half-dead state (#9690) (#9692)
automerge
2020-04-23 14:07:12 -07:00
mergify[bot]
253272d757 Remove stray 'v' (#9679) (#9680)
automerge
2020-04-23 00:51:25 -07:00
Michael Vines
9b1bd8065f Bump version to 1.0.20 2020-04-22 22:17:34 -07:00
Dan Albert
b2a5cf57ad Remove validator-info publish from net scripts 2020-04-22 18:06:46 -06:00
mergify[bot]
33c51ee75d Add getLowestNonpurgedBlock rpc; use blockstore api in getConfirmedBlocks (#9656) (#9663)
automerge
2020-04-22 15:17:51 -07:00
mergify[bot]
d8aa107fae Extend snapshot interval in multinode demo (#9657) (#9660)
automerge
2020-04-22 13:45:01 -07:00
mergify[bot]
82e02d0734 Relax setting withdraw authority during lockup (#9644) (#9645)
automerge
2020-04-21 22:35:27 -07:00
mergify[bot]
dae59bb3e1 Flag test_tvu_exit as serial to hopefully reduce CI flakiness (bp #9509) (#9636)
automerge
2020-04-21 17:16:05 -07:00
mergify[bot]
e0e7fc8e52 Wait for supermajority of cluster to have rooted a transaction to consider it finalized (#9618) (#9626)
automerge
2020-04-21 01:09:26 -07:00
mergify[bot]
9abc84c583 Move streamer test to integration test (#9050) (#9624)
automerge
2020-04-21 00:50:46 -07:00
Stephen Akridge
db6540772c Check distance for timestamp 2020-04-20 11:36:17 -07:00
mergify[bot]
840ebfe625 Error for invalid shred. (#9588) (#9596)
automerge
2020-04-19 22:42:48 -07:00
Michael Vines
953282dbf4 Budget for gossip traffic (#9550) (#9583)
automerge
2020-04-19 09:43:44 -07:00
mergify[bot]
788d1199ac Fix local-cluster test - archiver should wait for itself + 1 validator (#9577) (#9584)
automerge
2020-04-19 01:36:14 -07:00
Michael Vines
f6a8f718a8 Make rpc_subscriptions.rs tests serial (#9556)
automerge

(cherry picked from commit b58338b066)
2020-04-17 11:40:31 -07:00
sakridge
f225bf6f01 Make rpc tests serial (#9537)
(cherry picked from commit e655cba5bd)
2020-04-16 22:17:41 -07:00
Michael Vines
b5f03a380b Only build x86_64-unknown-linux-gnu on docs.rs 2020-04-16 19:07:43 -07:00
Michael Vines
140c75f613 Don't upload tarballs to buildkite to speed up build 2020-04-16 13:55:15 -07:00
Stephen Akridge
6a59f67fdc Write wallet key to explicit file
(cherry picked from commit 93669ab1fc)
2020-04-16 13:41:14 -07:00
Michael Vines
5943747001 Bump version to 1.0.19 2020-04-16 12:19:48 -07:00
mergify[bot]
f26f18d29d Don't unwrap on session new (#9531)
automerge
2020-04-16 10:05:41 -07:00
mergify[bot]
9b58d72f52 Rpc: Speed up getBlockTime (#9510) (#9513)
automerge
2020-04-16 00:19:50 -07:00
Michael Vines
0c885d6a04 Default to RUST_BACKTRACE=1 for more informative validator logs
(cherry picked from commit 4ac15e68cf)
2020-04-15 22:46:24 -07:00
Michael Vines
70b51c0a5a Pacify shellcheck 2020-04-15 17:53:23 -07:00
Michael Vines
560660ae11 Always run shellcheck 2020-04-15 17:53:03 -07:00
Michael Vines
5436855b67 Update build-cli-usage.sh 2020-04-15 17:49:15 -07:00
Michael Vines
7351e5ed2c Use $rust_stable
(cherry picked from commit d567799d43)

# Conflicts:
#	docs/build-cli-usage.sh
2020-04-15 17:49:15 -07:00
mergify[bot]
7ee993fadf RPC: Add health check URI (bp #9499) (#9504)
automerge
2020-04-15 12:31:08 -07:00
sakridge
9bf459e82f Fix race in multi_bind_in_range (#9493)
(cherry picked from commit ee72714c08)
2020-04-14 17:59:15 -07:00
sakridge
545090ff17 limit test jobs to 16 to prevent OOM (#9500)
(cherry picked from commit 2b2b2cac1f)
2020-04-14 17:52:05 -07:00
Michael Vines
bd8074507f Bump version to v1.0.18 2020-04-14 09:58:33 -07:00
Ryo Onodera
cfc7b22c4c Use type alias 2020-04-13 21:12:44 -07:00
Ryo Onodera
5f1c637508 Conditionally change max_age 2020-04-13 21:12:44 -07:00
Ryo Onodera
d888e0a6d7 Use same max_age regardless of leader/not-leader 2020-04-13 21:12:44 -07:00
Michael Vines
10e808e1bd Fail coverage faster in CI 2020-04-13 21:09:26 -07:00
Michael Vines
d9b03ca38b Assume json_rpc_url can be upgrade to a websocket if no port is supplied
(cherry picked from commit bcfadd6085)
2020-04-13 20:32:26 -07:00
Michael Vines
608d75b348 Unfold coverage test failures
(cherry picked from commit d4ea1ec6ad)
2020-04-13 18:08:25 -07:00
Michael Vines
cee3cee4ef Reorder CI jobs to allow for more concurrent PRs
(cherry picked from commit ce027da236)
2020-04-13 13:00:39 -07:00
Dan Albert
09e51323f0 Update buildkite-tests.yml
(cherry picked from commit 92a5a51632)
2020-04-13 11:01:00 -07:00
Michael Vines
da6f702129 Bump version to 1.0.17 2020-04-11 19:32:42 -07:00
mergify[bot]
02a83e7c6e Allow lower shred count (#9410) (#9451)
automerge
2020-04-11 14:49:32 -07:00
sakridge
83263e1737 Calculate account refs fix (#9448) 2020-04-11 12:56:20 -07:00
mergify[bot]
1f7ac22b60 Don't subject authorizing a new stake authority to lockup (#9434) (#9441)
automerge
2020-04-10 17:25:15 -07:00
Michael Vines
747debae56 Cache downloads to speed up CI
(cherry picked from commit b4e00275b2)
2020-04-10 12:25:49 -07:00
mergify[bot]
00b4186469 Improve coverage.sh usability when used locally (#9054) (#9424)
automerge
2020-04-10 05:59:35 -07:00
mergify[bot]
b087dabf4f Rpc: Add getConfirmedSignaturesForAddress (#9407) (#9417)
automerge
2020-04-09 21:20:28 -07:00
mergify[bot]
e00eb0a069 Remove Trust Wallet Beta install instructions (#9396) (#9397)
automerge
2020-04-09 08:52:04 -07:00
mergify[bot]
d4e49ffd06 Rpc: Add getConfirmedTransaction (#9381) (#9392)
automerge
2020-04-09 01:00:34 -07:00
Michael Vines
0f34a190ea Bump version to 1.0.16 2020-04-09 00:05:16 -07:00
mergify[bot]
df2fb8f5b3 Add --no-wait arg to transfer (#9388) (#9390)
automerge
2020-04-08 23:49:40 -07:00
mergify[bot]
80d2a6046b Moar vm.max_map_count (#9389)
automerge
2020-04-08 23:18:30 -07:00
mergify[bot]
24273b826f Add blockstore address-to-signature index (#9367) (#9378)
automerge
2020-04-08 13:55:53 -07:00
mergify[bot]
68d2616e35 stake-monitor: Add 1 SOL grace, to allow for a complaint system account to fund a reasonable number of transactions. (bp #9359) (#9363)
automerge
2020-04-08 11:56:01 -07:00
mergify[bot]
f506d39339 Improve ledger-tool/accounts for easier debuging (#9370) (#9371)
automerge
2020-04-08 11:31:41 -07:00
Michael Vines
bc58c9ec7e Cache solana-perf.tgz to speed up CI (#9360)
automerge

(cherry picked from commit dc91698b3a)
2020-04-07 13:32:13 -07:00
Michael Vines
b57a52cd85 Bump version to 1.0.15 2020-04-07 09:36:47 -07:00
Michael Vines
8631be42ac Add support for monitoring system account balances (#9345)
automerge

(cherry picked from commit 03978ac5a5)
2020-04-06 22:57:47 -07:00
Tyera Eulberg
09367369ef Reinstate commitment param to support old clients (#9324) (#9329)
automerge
2020-04-06 12:21:19 -07:00
mergify[bot]
23b8c95cc4 Update getSignatureStatuses to return historical statuses (#9314) (#9321)
automerge
2020-04-06 04:24:19 -07:00
Tyera Eulberg
e61392b057 Rework TransactionStatus index in blockstore (#9281) (#9304) (#9312)
automerge

Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2020-04-05 12:52:15 -06:00
mergify[bot]
7deba20395 Deprecate confirmTransaction, getSignatureStatus, and getSignatureConfirmation (bp #9298) (#9308)
automerge
2020-04-05 02:41:10 -07:00
Michael Vines
274c097f84 RPC: add err field to TransactionStatus, alongside the now deprecated status field (#9296) (#9307)
automerge
2020-04-05 00:21:15 -07:00
Michael Vines
1c7cea1af4 Add stake-monitor 2020-04-03 20:37:28 -07:00
Michael Vines
4406496d2f Add log before opening database
(cherry picked from commit b557b3170e)
2020-04-03 15:10:57 -07:00
sakridge
a15fa4840c Fix repair dos (#9056) 2020-04-03 13:20:39 -07:00
Michael Vines
c9030660d6 Bump version to 1.0.14 2020-04-02 22:42:28 -07:00
Dan Albert
fdeda769d0 Backport wallet doc changes to v1.0 (#9268)
* Add ledger live screenshots and reduce duplicate instructions (#9258)

automerge

* Add instructions for Trust Wallet Beta for Android (#9261)

automerge
2020-04-02 15:37:26 -06:00
mergify[bot]
53edd26578 Add instructions for Trust Wallet Beta for Android (#9261) (#9264)
automerge
2020-04-02 12:25:32 -07:00
Dan Albert
2433cdd6d6 Set checks timeout to 20 minutes 2020-04-02 13:11:03 -06:00
Dan Albert
77b34c278e Do not trigger tests if only docs were modified (#9240) (#9257) 2020-04-02 10:44:51 -06:00
mergify[bot]
90e993fd9a Add epoch subcommand (#9249) (#9254)
automerge
2020-04-01 22:29:18 -07:00
Tyera Eulberg
251054d8c9 Backport confirmations fix. (#9252)
automerge
2020-04-01 19:42:04 -07:00
Tyera Eulberg
d4bb7cec69 Undo breaking rpc removal of getSignatureConfirmation (#9247) 2020-04-01 18:04:11 -06:00
mergify[bot]
a3f6b04345 Add fee-payer option to docs (bp #9230) (#9236)
automerge
2020-04-01 15:11:47 -07:00
Justin Starry
ba4a5053dd Undo getSignatureStatus breaking change, add getSignatureStatuses (#9231)
automerge
2020-04-01 12:21:56 -07:00
mergify[bot]
6b47a259c3 Add a support page for wallet docs (#9229) (#9234)
automerge
2020-04-01 11:22:50 -07:00
mergify[bot]
c9ec13cf1f Tune udp buffers and vmmap immediately (#9194) (#9216)
automerge
2020-04-01 00:52:50 -07:00
mergify[bot]
906a6ab837 Fix error with account hash list getting too big for gossip (#9197) (#9214)
automerge
2020-03-31 23:27:18 -07:00
mergify[bot]
d0e478a9f8 Fix panic (#9195) (#9208)
automerge
2020-03-31 22:03:46 -07:00
mergify[bot]
b560b64d33 Remove unecessary security exception and add a new one (bp #9200) (#9205)
* Remove unecessary exception and add a new one (#9200)

(cherry picked from commit 62e12e3af5)

# Conflicts:
#	ci/test-checks.sh

* Update test-checks.sh

Co-authored-by: sakridge <sakridge@gmail.com>
Co-authored-by: Michael Vines <mvines@gmail.com>
2020-03-31 19:50:04 -07:00
mergify[bot]
057af41716 Fix links (#9184) (#9187)
automerge
2020-03-31 09:57:23 -07:00
Michael Vines
a44b8abd48 Bump version to v1.0.13 2020-03-30 23:05:41 -07:00
mergify[bot]
8778ecaed5 Ledger cleanup fixes (#9131) (#9175)
automerge
2020-03-30 20:42:17 -07:00
Michael Vines
a02542ada3 Bump version to v1.0.12 2020-03-30 08:45:50 -07:00
Michael Vines
ea17c6883f Remove chatty 'setting snapshot root:' info log 2020-03-30 08:44:24 -07:00
Michael Vines
706306645b solana account now displays the account's rent epoch 2020-03-30 08:44:24 -07:00
sakridge
da9e930788 Calculate ref counts earlier to prevent bad clean (#9153) 2020-03-29 14:42:57 -07:00
mergify[bot]
8b8e066bbe Add RPC subscription api for rooted slots (#9118) (#9126)
automerge
2020-03-27 13:09:32 -07:00
mergify[bot]
3473350b62 fix links (#9125) (#9128)
automerge
2020-03-27 10:28:23 -07:00
mergify[bot]
59d7eb5216 Fix links in docs (#9119) (#9127)
automerge
2020-03-27 09:50:28 -07:00
mergify[bot]
55ba934137 Document transaction field in getConfirmedBlock responses (#9121) (#9124)
automerge
2020-03-27 09:45:30 -07:00
mergify[bot]
4c3dcb7f7e Exclude all executable accounts from rent collection (#9116) (#9120)
automerge
2020-03-27 09:27:07 -07:00
mergify[bot]
3a879db8af Fix broken gitbook links (#9107) (#9108)
automerge
2020-03-26 20:33:20 -07:00
mergify[bot]
d2107270ea Consolidate signature-status rpcs (bp #9069) (#9105)
automerge
2020-03-26 20:30:46 -07:00
mergify[bot]
007afe22d0 Add docs for app wallets (#9098) (#9103)
automerge
2020-03-26 18:26:23 -07:00
mergify[bot]
93a1d10e15 Revert setting the default toolchain (#9093) (#9097)
automerge
2020-03-26 15:59:26 -07:00
mergify[bot]
d57a7c8f21 Restructure wallet docs to prep for app wallet content (#9088) (#9095)
automerge
2020-03-26 14:39:32 -07:00
mergify[bot]
6db39829c8 Install xargo using CI dictated cargo version if available (#9068) (#9092)
automerge
2020-03-26 14:02:09 -07:00
mergify[bot]
ec76826493 Unflake rpc subscriptions test by reducing sub count (#9078) (#9082)
automerge
2020-03-25 22:56:52 -07:00
carllin
d4ddb6265b Convert Banks (#9033)
* Store and compute needed state in EpochStakes struct
Co-authored-by: Carl <carl@solana.com>
2020-03-25 20:43:48 -07:00
Michael Vines
7a8233d7ca Bump version to 1.0.11 2020-03-25 19:17:06 -07:00
mergify[bot]
95bc051129 Cargo update bumpalo (#9067) (#9077)
automerge
2020-03-25 18:51:38 -07:00
mergify[bot]
02bcf4f8e2 ledger-tool can now decode stake instructions (bp #9045) (#9076)
automerge
2020-03-25 18:02:31 -07:00
sakridge
4b0d4e9834 Remove accounts unwrap (#9063)
automerge
2020-03-25 10:59:52 -07:00
mergify[bot]
bf4cdc091a Fix xargo to version 0.3.19 to avoid unstable feature (#9065) (#9066)
automerge

(cherry picked from commit c558db2a48)

Co-authored-by: Justin Starry <justin@solana.com>
2020-03-25 08:48:22 -07:00
mergify[bot]
5f2cf2b44d Increase vmap count in sys-tuner (#8940) (#9064)
(cherry picked from commit a70008cc5c)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-03-25 08:06:36 -07:00
mergify[bot]
03aae5eb5f Ignore RUSTSEC-2020-0006 for the moment (#9057) (#9059)
automerge
2020-03-24 21:06:15 -07:00
mergify[bot]
5f1ce81fbc ledger tool now outputs transaction status information if available (#9024) (#9026)
automerge
2020-03-24 12:23:25 -07:00
mergify[bot]
fc582aa57c Refactor how pubsub subscriptions are added (#9042) (#9049)
automerge
2020-03-24 11:05:33 -07:00
mergify[bot]
13676e9614 Fix timeout for subscriptions test (#9043) (#9044)
automerge
2020-03-24 02:57:25 -07:00
Michael Vines
7636a0521f solana-install-init: --pubkey is no longer required on platforms without a default update manifest 2020-03-23 22:40:53 -07:00
Michael Vines
ad06354a18 Remove , 2020-03-23 22:11:56 -07:00
Michael Vines
953cb93e44 Bump version to 1.0.10 2020-03-23 21:58:45 -07:00
Dan Albert
38b2957a8e Remove unused default update manifest pubkeys (#9041)
automerge
2020-03-23 20:57:43 -07:00
Tyera Eulberg
9eb39df93f Backport: add slot to signature notification & respect confirmations param (#9036)
automerge
2020-03-23 18:32:05 -07:00
mergify[bot]
f34ce94347 Remove Ledger-specific analysis of hardware wallets (#9028) (#9030)
automerge
2020-03-23 14:48:58 -07:00
mergify[bot]
5c6411fc06 Fix link in gitbook (#9027) (#9029)
automerge
2020-03-23 14:48:41 -07:00
Michael Vines
3ab428693a Manual v1.0 backports (#9025)
automerge
2020-03-23 13:55:03 -07:00
Michael Vines
7ffaf2ad29 Manual v1.0 backports (#9015)
automerge
2020-03-22 22:44:55 -07:00
sakridge
a5e4a1f2d8 Drop storage lock (#8997) 2020-03-21 18:55:59 -07:00
mergify[bot]
6ae99848f4 Revert "Move Install Solana doc into the Command-line Guide (#8982)" (#8992) (#8993)
This reverts commit 5fa36bbab3.

(cherry picked from commit 1aab959d4e)

Co-authored-by: Greg Fitzgerald <greg@solana.com>
2020-03-21 14:52:53 -06:00
mergify[bot]
5342b3a53f Shred fetch comment and debug message tweak (#8980) (#8990)
automerge

(cherry picked from commit 909321928c)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-03-21 09:25:08 -07:00
sakridge
50a3c98d21 Shred filter (#8975) (#8987)
Thread bank_forks into shred fetch and filter by slot.
2020-03-20 11:33:19 -07:00
Michael Vines
ca2b7a2c5b Cargo.lock 2020-03-20 11:05:45 -07:00
mergify[bot]
e900623817 Update value names in docs (#8983) (#8985)
automerge
2020-03-20 09:36:55 -07:00
mergify[bot]
a46e953ebb Move Install Solana doc into the Command-line Guide (#8982) (#8984)
automerge
2020-03-20 09:24:56 -07:00
mergify[bot]
3bbc51a6ae Improve CLI usage messages (#8972) (#8977)
automerge
2020-03-19 21:52:37 -07:00
Michael Vines
7ae2464cf3 Fix windows build by removing sys-info (#8860) (#8973)
automerge
2020-03-19 18:14:01 -07:00
Michael Vines
58a36ce484 Bump version to 1.0.9 2020-03-19 17:10:35 -07:00
Dan Albert
61bd9e6a28 Fix windows binary build on v1.0 (#8968)
Co-authored-by: Michael Vines <mvines@gmail.com>
2020-03-19 15:58:34 -07:00
Tyera Eulberg
495ab631d6 Some Cli polish (#8966) (#8970)
automerge
2020-03-19 14:11:52 -07:00
Tyera Eulberg
93906847f9 Cli: polish transaction progress bar (#8963) (#8967)
automerge
2020-03-19 12:54:50 -07:00
Dan Albert
70f4c4a974 Build windows binaries on v1.0 (#8965) 2020-03-19 11:06:17 -07:00
mergify[bot]
ad6078da2d CLI: Fix create-nonce-account with seed (#8929) (#8961)
automerge
2020-03-19 10:32:35 -07:00
Michael Vines
2a617f2d07 v1.0: Backport of #8939 (#8957)
automerge
2020-03-19 02:08:36 -07:00
mergify[bot]
6af3c6ecbc CLI: Add multi-session signing support (#8927) (#8953)
* SDK: Add `NullSigner` implementation

* SDK: Split `Transaction::verify()` to gain access to results

* CLI: Minor refactor of --sign_only result parsing

* CLI: Enable paritial signing

Signers specified by pubkey, but without a matching --signer arg
supplied fall back to a `NullSigner` when --sign-only is in effect.
This allows their pubkey to be used for TX construction as usual,
but leaves their `sign_message()` a NOP. As such, with --sign-only
in effect, signing and verification must be done separately, with
the latter's per-signature results considered

* CLI: Surface/report missing/bad signers to user

* CLI: Suppress --sign-only JSON output

* nits

* Docs for multi-session offline signing

(cherry picked from commit 98228c392e)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-03-18 23:39:44 -07:00
mergify[bot]
a5938e5a21 Bump bv from 0.11.0 to 0.11.1 (bp #8952) (#8955)
automerge
2020-03-18 22:33:05 -07:00
mergify[bot]
8eaa8d8788 Add docs on wallets and generating keys (bp #8905) (#8946)
automerge
2020-03-18 19:15:34 -07:00
mergify[bot]
51940eec9b Delete broken link (#8950) (#8951)
automerge
2020-03-18 18:20:36 -07:00
mergify[bot]
d1e16b2bc4 Remove product string from device keypair URL (#8942) (#8945)
automerge
2020-03-18 14:11:41 -07:00
mergify[bot]
97051c87b4 Document account/signer requirements for vote instructions (#8941)
automerge
2020-03-18 12:20:02 -07:00
mergify[bot]
2521913654 Add counter for accounts hash verification. (#8928) (#8934)
automerge
2020-03-18 09:57:38 -07:00
mergify[bot]
803c87b4bd Add docs for --trusted-validator options (#8911) (#8933)
automerge
2020-03-18 08:37:15 -07:00
mergify[bot]
8ff9ee3a06 Cli: add spinner progress bar when waiting for transaction confirmation (#8916) (#8920)
automerge
2020-03-17 20:06:01 -07:00
mergify[bot]
25bf2c6062 Extend local-cluster CI timeout (#8921) (#8923)
automerge
2020-03-17 19:48:57 -07:00
mergify[bot]
fc80b77fc4 Increase buffer on low SOL fault to over a week (#8903) (#8904)
automerge
2020-03-17 10:32:28 -07:00
mergify[bot]
5a7707362c Sort device paths for select (#8896) (#8897)
automerge
2020-03-16 19:43:11 -07:00
mergify[bot]
0102ee3fa9 Hoist USB URL docs (#8894) (#8895)
automerge
2020-03-16 16:31:35 -07:00
mergify[bot]
d29cb19a73 Cli: enable flexible flexible signer paths for pubkey args (#8892) (#8893)
automerge
2020-03-16 16:16:58 -07:00
sakridge
1cc66f0cd7 Add Accounts hash consistency halting (#8772) (#8889)
* Accounts hash consistency halting

* Add option to inject account hash faults for testing.

Enable option in local cluster test to see that node halts.
2020-03-16 14:29:44 -07:00
mergify[bot]
e2cfc513eb Add genesis token counter test to system test (#8824) (#8891)
automerge
2020-03-16 13:32:58 -07:00
mergify[bot]
8115a962e9 Cleanup CLI types (#8888) (#8890)
automerge
2020-03-16 12:28:22 -07:00
mergify[bot]
0c804e2ef2 Use types for CLI value names (#8878) (#8886)
automerge
2020-03-16 09:23:15 -07:00
mergify[bot]
cbe36a7a63 Lower error level of InvalidTickCount (bp #8880) (#8885)
automerge
2020-03-16 08:53:33 -07:00
mergify[bot]
174825fecf Fix faucet command in run.sh (#8883) (#8884)
automerge
2020-03-16 05:38:31 -07:00
Michael Vines
c7e80bebdc Bump version to 1.0.8 2020-03-15 21:50:06 -07:00
Michael Vines
57abc370fa Quietly re-introduce legacy --voting-keypair/--identity-keypair args for v1.0.6 compatibility
(cherry picked from commit 49706172f3)
2020-03-15 20:21:23 -07:00
mergify[bot]
42ab421a87 Rename leader to validator, drop _keypair/-keypair suffix (#8876) (#8877)
automerge
2020-03-15 14:14:32 -07:00
mergify[bot]
9ab27291f5 Validators now run a full gossip node while looking for a snapshot (#8872)
automerge
2020-03-15 10:29:13 -07:00
mergify[bot]
27e4e9cb8d Cli: Add resolve-signer subcommand (#8859) (#8870)
automerge
2020-03-14 22:14:24 -07:00
mergify[bot]
b0cf65dfc8 Refactor system tests dir structure (#8865) (#8869)
automerge
2020-03-14 19:38:19 -07:00
Dan Albert
bfc97c682c Apply s/faucet-keypair/faucet renaming to net scripts (#8868)
automerge
2020-03-14 18:14:49 -07:00
mergify[bot]
231c9b25d1 Rework validator vote account defaults to half voting fees (#8858)
automerge
2020-03-13 21:47:32 -07:00
Michael Vines
f536d805ed Rework cluster metrics dashboard to support the modern clusters
(cherry picked from commit 5f5824d78d)

# Conflicts:
#	system-test/automation_utils.sh
2020-03-13 20:18:06 -07:00
Michael Vines
cb6e7426a4 Drop :8899 port from http://devnet.solana.com references
(cherry picked from commit 9e0a26628b)
2020-03-13 20:14:02 -07:00
mergify[bot]
71f391ae04 Enable any signer in various cli subcommands (#8844) (#8856)
automerge
2020-03-13 18:11:05 -07:00
mergify[bot]
12bf34f059 Remove holding Poh lock (#8838) (#8850)
automerge
2020-03-13 16:56:22 -07:00
Grimes
a3a14d6b5b Docs: Use correct flag in keypair verification instructions (#8677)
automerge

(cherry picked from commit 542691c4e4)
2020-03-13 16:00:33 -07:00
Michael Vines
5e22db017a Surface the missing pubkey
(cherry picked from commit ce88602ced)
2020-03-13 16:00:01 -07:00
mergify[bot]
2cd09957b4 Cli: add subcommand to withdraw from vote account (#8550) (#8840)
automerge
2020-03-13 15:31:47 -07:00
mergify[bot]
d1ec6c0b8b Upgrade to Rust 1.42 (#8836) (#8839)
automerge
2020-03-13 14:29:18 -07:00
Michael Vines
7722723400 Bump version to 1.0.7 2020-03-13 08:10:09 -07:00
Trent Nelson
4a42cfc42a Cli error cleanup 1.0 (#8834)
* Don't use move semantics if not needed (#8793)

* SDK: Deboilerplate `TransportError` with thiserror

* Enable interchange between `TransportError` and `ClientError`

* SDK: Retval consistency between `Client` and `AsyncClient` traits

* Client: Introduce/use `Result` type

* Client: Remove unused `RpcResponseIn` type

* Client: Rename `RpcResponse` to more appropriate `RpcResult`

* Client: Death to `io::Result` return types

* Client: Struct-ify `ClientError`

* Client: Add optional `command` parameter to `ClientError`

* RpcClient: Stop abusing `io::Error` (low-fruit)

* ClientError: Use `thiserror`'s `Display` impl

* Extend `RpcError`'s utility

* RpcClient: Stop abusing `io::Error` (the rest)

* CLI: Shim `main()` so we can `Display` format errors

* claputils: format input validator errors with `Display`

They are intended to be displayed to users

* SDK: `thiserror` for hash and sig parse erros

* Keygen: Shim main to format errors with `Display`

* SDK: `thiserror` for `InstructionError`

* CLI: `thiserror` for `CliError`

* CLI: Format user messages with `Display`

* Client: Tweak `Display` for `ClientError`

* RpcClient: Improve messaging when TX cannot be confirmed

* fu death io res retval

* CLI/Keygen - fix shell return value on error

* Tweak `InstructionError` `Display` messages as per review

* Cleanup hackjob return code fix

* Embrace that which you hate most

* Too much...

Co-authored-by: Jack May <jack@solana.com>
2020-03-13 07:42:25 -06:00
mergify[bot]
976d744b0d Enable conservative out-of-bound snapshot cleaning (#8811) (#8832)
automerge
2020-03-12 23:40:17 -07:00
mergify[bot]
62de02c8d4 Avoid early clean and bad snapshot by ref-counting (#8724) (#8831)
automerge
2020-03-12 23:09:45 -07:00
mergify[bot]
2741a5ce3b Move history out of intro (bp #8825) (#8826)
automerge
2020-03-12 18:21:56 -07:00
mergify[bot]
0c818acd90 Move intro out of README (#8735) (#8827)
automerge

(cherry picked from commit c0fd017906)

Co-authored-by: Greg Fitzgerald <greg@solana.com>
2020-03-12 18:49:51 -06:00
Michael Vines
4d04915302 Update source markdown in CI 2020-03-12 14:56:00 -07:00
Michael Vines
7576411c59 Don't tell users to install unreleased software versions 2020-03-12 14:55:46 -07:00
Michael Vines
ab2bed6e8f Add all of docs/src 2020-03-12 14:45:48 -07:00
mergify[bot]
f66a5ba579 Update keys (#8821) (#8822)
automerge
2020-03-12 14:15:32 -07:00
Dan Albert
9253d786f8 Revert over-zealous versioning text (#8819)
automerge
2020-03-12 09:49:26 -07:00
mergify[bot]
d1be1ee49e Fix malformed doc link (#8817) (#8818)
automerge
2020-03-12 09:46:26 -07:00
mergify[bot]
65833eacd8 Update keys (#8814) (#8816)
automerge
2020-03-12 09:39:31 -07:00
Dan Albert
921994e588 Restrict which nodes can run stable and coverage (#8807)
automerge
2020-03-11 19:18:10 -07:00
mergify[bot]
91dfd962e5 Update keys (#8800) (#8806)
automerge
2020-03-11 19:04:46 -07:00
Trent Nelson
18067dcb55 CLI: Plumb nonce-stored fees (#8750)
automerge

(cherry picked from commit 0422af2aae)
2020-03-11 13:09:40 -07:00
Greg Fitzgerald
7501e1b0f0 Update keys (#8791)
(cherry picked from commit a0d0d4c0e9)
2020-03-11 13:08:48 -07:00
mergify[bot]
1bd2c1de20 Notify when validator balance goes below 1 SOL (#8792)
automerge
2020-03-11 11:47:08 -07:00
mergify[bot]
a9ef20d43f Update keys (#8783) (#8784)
automerge
2020-03-10 21:39:34 -07:00
mergify[bot]
00a9d66fed Improve install messaging (#8477) (#8785)
automerge
2020-03-10 20:30:39 -07:00
mergify[bot]
7508ffe703 Add checkmark (#8781) (#8782)
automerge
2020-03-10 18:32:53 -07:00
Michael Vines
6c2368bfc9 Permit fee-payer/split-stake accounts to be the same when using --seed
(cherry picked from commit 775ce3a03f)
2020-03-10 16:16:26 -07:00
Michael Vines
6bcb797260 Revert to a computed websocket_url value when json_rpc_url is changed
(cherry picked from commit f655372b08)
2020-03-10 16:12:23 -07:00
mergify[bot]
329945e56f Print approved msg after Ledger interaction (#8771) (#8775)
automerge
2020-03-10 15:06:43 -07:00
mergify[bot]
1f80346d97 watchtower: Add --monitor-active-stake flag (bp #8765) (#8769)
automerge
2020-03-10 13:27:19 -07:00
mergify[bot]
38e661d7ba CLI Nonce account access dereplicode (#8743) (#8767)
automerge
2020-03-10 13:15:18 -07:00
mergify[bot]
592c4efd17 Configure the cluster right after installing it (#8761) (#8762)
automerge
2020-03-10 10:14:55 -07:00
mergify[bot]
1629745bd3 Move docs to imperative mood (bp #8643) (#8763)
automerge
2020-03-10 09:50:59 -07:00
mergify[bot]
02e078c22e Fix Gitbook's markdown rendering (#8759) (#8760)
automerge
2020-03-10 08:09:51 -07:00
Michael Vines
ea9e7c710a Bump version to 1.0.6 2020-03-09 22:38:57 -07:00
Michael Vines
ee5aa0c1a2 Support monitoring multiple validators 2020-03-09 21:07:32 -07:00
HM
55dee2901e watchtower: flag to suppress duplicate notifications (#8549)
* watchtower: send error message as notification

* watchtower: send all clear notification when ok again

* watchtower: add twilio sms notifications

* watchtower: flag to suppress duplicate notifications

* remove trailing space character

* changes as per suggestion on PR

* all changes together

* cargo fmt
2020-03-09 21:07:32 -07:00
Michael Vines
2b0824d18b watchtower now uses cli-config/
(cherry picked from commit 74e7da214a)
2020-03-09 20:40:18 -07:00
Michael Vines
b0709ea0ac Move cli-config default out of cli/ into cli-config/
(cherry picked from commit 756ba07b16)
2020-03-09 20:40:18 -07:00
Michael Vines
81b5499f7a Rename 'url' to 'json_rpc_url'
(cherry picked from commit 5c236fd06c)
2020-03-09 20:40:18 -07:00
Michael Vines
c96ce99705 Wait for 80% of the active stake instead of 75% 2020-03-09 20:31:44 -07:00
mergify[bot]
8dcd2d11e1 Docs: Fix missing CLI usage.md (#8745) (#8749)
automerge
2020-03-09 20:05:24 -07:00
mergify[bot]
777aae9059 Remove --derivation-path option (#8741) (#8747)
automerge
2020-03-09 19:17:14 -07:00
mergify[bot]
4dd1340236 Limit waiting-message to single- or last-chunk apdus (#8730) (#8733)
automerge
2020-03-09 15:44:17 -07:00
mergify[bot]
889b06e1d4 Allow passing of program_id to programs (bp #8639) (#8670)
automerge
2020-03-09 12:36:34 -07:00
mergify[bot]
f511296ee8 Fix account tests (#8615) (#8729)
automerge
2020-03-09 11:19:14 -07:00
mergify[bot]
c19eb717b4 Update rust-bpf to include matching cargo (#8598) (#8727)
automerge
2020-03-09 10:42:19 -07:00
mergify[bot]
dd54369e1b Cli: Fix create-with-seed (#8706) (#8723)
automerge
2020-03-09 00:16:29 -07:00
Michael Vines
bb563b4835 Permit --no-untrusted-rpc without any --trusted-validators 2020-03-08 22:34:53 -07:00
mergify[bot]
d061fadede Add purge function to ledger-tool (#8719) (#8720)
(cherry picked from commit de34187db0)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-03-08 21:38:38 -07:00
mergify[bot]
577cd2bd3a Remove unnecessary snapshot hash verification (#8711) (#8714)
(cherry picked from commit f992ee3140)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-03-07 14:41:45 -07:00
mergify[bot]
9d1c8657e2 Groom ledger-tool bounds output (#8710) (#8715)
(cherry picked from commit acb23e8ef0)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-03-07 14:15:50 -07:00
mergify[bot]
67216ac7f5 Split staker infos (#8683)
automerge
2020-03-06 23:44:06 -08:00
mergify[bot]
e63b24c39f Remove ask-seed-phrase arg from validator, archiver (#8697) (#8713)
automerge
2020-03-06 23:34:20 -08:00
Greg Fitzgerald
d963f7afb4 Set withdrawer keys (#8707)
automerge
2020-03-06 21:21:00 -08:00
mergify[bot]
a07bf4870a Fix Ledger docs (#8705) (#8708)
automerge
2020-03-06 21:03:15 -08:00
mergify[bot]
8422d4b3fb Disable setLogFilter RPC API by default (#8693) (#8699)
automerge
2020-03-06 18:16:04 -08:00
mergify[bot]
ff4731cce2 RPC: Add getFeeCalculatorForBlockhash method call (#8687) (#8698)
automerge
2020-03-06 17:25:33 -08:00
mergify[bot]
659aaafff6 Ledger: return specific error if ledger-app-solana is not running (#8684) (#8695)
automerge
2020-03-06 16:28:53 -08:00
mergify[bot]
175651c497 Add shred version support to net/ (#8689) (#8694)
automerge
2020-03-06 15:41:16 -08:00
mergify[bot]
085e773f27 Properly escape current version (#8686) (#8688)
(cherry picked from commit a78a339407)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-03-06 14:37:41 -07:00
Michael Vines
5d3140c040 Cargo.lock 2020-03-06 14:25:54 -07:00
Michael Vines
d8d7238920 Bump version to 1.0.5 2020-03-06 14:14:53 -07:00
mergify[bot]
418c3cd4cf Delete Archiver installation docs (#8665) (#8669)
automerge
2020-03-06 12:49:52 -08:00
Michael Vines
9f532cb50f Publish initial snapshot hash in gossip on validator startup (#8678) 2020-03-05 23:01:35 -07:00
Michael Vines
c35f4927cd Bump version to 1.0.4 2020-03-05 16:10:24 -07:00
Michael Vines
2d0f4b5c8c Bump version to 1.0.3 2020-03-05 13:05:12 -07:00
Grimes
fe59ee61e6 Add find_incomplete_slots (#8654) (#8671)
automerge
2020-03-05 11:52:20 -08:00
Michael Vines
bda2acb06d SDK: Allow RecentBlockhashes to hold the entire BlockhashQueue (#8632) (#8672)
automerge

(cherry picked from commit 9d667db634)

Co-authored-by: Grimes <39311140+solana-grimes@users.noreply.github.com>
2020-03-05 12:09:13 -07:00
Grimes
ac3fe1da02 Fix docs build (#8663) (#8667)
automerge
2020-03-05 10:25:43 -08:00
Michael Vines
57813041d2 Remove solana-archiver from release artifacts 2020-03-05 11:02:18 -07:00
Grimes
7e446da82c Choose a cluster before checking balances (#8666) (#8668)
automerge
2020-03-05 10:00:53 -08:00
Grimes
d94f5c94a3 Nonce fees 1.0 (#8664)
automerge
2020-03-05 09:41:45 -08:00
Michael Vines
4d9aee4794 Always and fully normalize stored 0-lamport accts. (#8657) (#8661)
(cherry picked from commit f146c92e88)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-03-05 09:49:26 -07:00
Grimes
f32c152bce genesis: Add support for multiple bootstrap validators (#8656) (#8658)
automerge
2020-03-05 00:53:40 -08:00
Grimes
5caf9110e0 Add --bind-address and --rpc-bind-address validator arguments (#8628) (#8655)
automerge
2020-03-04 22:39:38 -08:00
Grimes
d1c80143ea Add NextSlotsIterator (#8652) (#8653)
automerge
2020-03-04 21:59:04 -08:00
Grimes
bb132df121 Add orphan iterator (#8636) (#8651)
automerge
2020-03-04 20:02:46 -08:00
Grimes
b3af1c7e57 Connect partition flag to validators (#8622) (#8646)
automerge
2020-03-04 18:16:54 -08:00
Grimes
14cba53338 Install Solana before using it (#8638) (#8644)
automerge
2020-03-04 16:08:25 -08:00
Grimes
dc8abbe9e3 solana catchup now detects when you try to catchup to yourself (#8635) (#8641)
automerge
2020-03-04 16:00:20 -08:00
carllin
dd06001ed8 compute_bank_stats needs to return newly computed ForkStats (#8608) (#8634)
* Fix broken confirmation, add test

(cherry picked from commit f23dc11a86)

Co-authored-by: carllin <wumu727@gmail.com>
2020-03-04 14:28:17 -08:00
Grimes
298b7de2e2 catchup now supports an optional RPC URL argument for validators with private RPC (#8629) (#8633)
automerge
2020-03-04 12:48:18 -08:00
Grimes
74cbc6953f Expose executable and rent_epoch in AccountInfo (#8619) (#8631)
automerge
2020-03-04 12:09:14 -08:00
mergify[bot]
27e5203078 Fix sendTransaction doc (#8625) (#8626)
automerge
2020-03-04 08:53:30 -08:00
mergify[bot]
73787e162c Keep GenesisConfig binary compatible with v0.23 (#8617) (#8620)
automerge
2020-03-04 02:02:28 -08:00
mergify[bot]
74ae40be41 Check transaction signatures in entry verify (#8596) (#8614)
automerge
2020-03-03 22:10:35 -08:00
mergify[bot]
8dd58e9cea Remove accounts hack and correctly restore accounts store counts (#8569) (#8613)
automerge
2020-03-03 22:05:50 -08:00
mergify[bot]
7d86179c60 Split signature throughput tracking out of FeeCalculator (#8447) (#8610)
automerge
2020-03-03 20:36:52 -08:00
mergify[bot]
8115cf1360 Docs: Update CLI offline cmds (#8548) (#8558)
automerge
2020-03-03 20:26:28 -08:00
Michael Vines
3e5d45053d Cargo.lock 2020-03-03 21:14:00 -07:00
mergify[bot]
061319f35a Add commitment flag to vote-account and validators commands (#8597) (#8606)
automerge
2020-03-03 17:47:14 -08:00
mergify[bot]
dc75837500 Use fs::rename which is much faster than move_items (#8579) (#8595)
automerge
2020-03-03 11:23:11 -08:00
mergify[bot]
dfe26f5275 Add Ledger wallet installation instructions (#8581) (#8592)
automerge
2020-03-03 08:29:12 -08:00
mergify[bot]
f4385f7ad2 Do periodic inbound cleaning for rooted slots (#8436) (#8583)
automerge
2020-03-02 23:04:57 -08:00
Michael Vines
8df4cf2895 Don't advertise the snapshot that the node was loaded from
snapshot_packager_service will remove this snapshot hash from gossip
when it starts
2020-03-02 19:16:06 -07:00
mergify[bot]
dad62e132e Remove granularity from genesis (bp #8514) (#8518)
automerge
2020-03-02 15:42:46 -08:00
Michael Vines
0d4131ae68 Bump version to 1.0.2 2020-03-02 15:38:26 -07:00
mergify[bot]
a2539e1892 Allow stake lockup fields to be updated independently (#8568) (#8574)
automerge
2020-03-02 14:19:18 -08:00
Michael Vines
210659e6c3 Only gossip packaged snapshots
(cherry picked from commit 42c5c59800)
2020-03-02 14:21:04 -07:00
Michael Vines
15a0fb1fa9 --wait-for-supermajority now requires a SLOT
(cherry picked from commit 13551885c2)
2020-03-02 13:40:52 -07:00
Michael Vines
4db31f5b48 Add ---no-untrusted-rpc flag
(cherry picked from commit d677e83ed4)
2020-03-02 11:53:15 -07:00
mergify[bot]
b38a535c63 Hack to skip cleanup_dead_slots upon snapshot load (#8562)
automerge
2020-03-02 10:40:41 -08:00
Michael Vines
218b02aaf8 Demote gossip responder error log messages to info! 2020-03-01 10:42:32 -07:00
mergify[bot]
d6e7cbd4e8 feat: implement websocket_url as a get/set-able global parameter w/ value computation (#8553)
automerge
2020-03-01 01:09:09 -08:00
mergify[bot]
f2fda14333 Reduce max snapshot hashes to stay under MTU (bp #8541) (#8544)
automerge
2020-02-29 09:17:09 -08:00
Michael Vines
1c576d4a68 Upgrade to Rust 1.41.1
(cherry picked from commit 7d27be2a73)
2020-02-29 09:27:11 -07:00
mergify[bot]
f6232e1b3c Fix skipping own leader slots (#8533) (#8540)
automerge
2020-02-29 01:27:46 -08:00
mergify[bot]
ad71fa3f12 rpc: GET for /snapshot.tar.bz2 now redirects to the latest snapshot (bp #8536) (#8538)
automerge
2020-02-28 23:45:15 -08:00
mergify[bot]
9c326c7c71 Ensure the validator's identity pubkey is not provided as a --trusted-validator (#8525) (#8527)
automerge
2020-02-27 22:07:07 -08:00
mergify[bot]
ac545cadaf Add versioning (#8348) (#8524)
automerge
2020-02-27 20:12:49 -08:00
mergify[bot]
082d8fff36 Use legit solana message in verify (#8513) (#8523)
automerge
2020-02-27 19:36:37 -08:00
mergify[bot]
2c3632a042 Determine vote_state ahead of time (#8303) (#8521)
automerge
2020-02-27 18:32:27 -08:00
mergify[bot]
7b23e79922 Add snapshot hash of full accounts state (#8295) (#8515)
* Add snapshot hash of full accounts state

* Use normal hashing for the accounts delta state

* Add merkle

(cherry picked from commit 947a339714)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-02-27 17:36:28 -08:00
mergify[bot]
9afd14a0c6 Import Tour de SOL docs (#8516) (#8519)
automerge
2020-02-27 17:15:36 -08:00
Michael Vines
f42aa34ab6 Reorder InstructionError to remain compatible with v0.23
(cherry picked from commit 7dac8e2dde)
2020-02-27 18:07:00 -07:00
mergify[bot]
43af91ff4b Ledger messaging cleanup (#8506) (#8508)
automerge
2020-02-27 12:38:09 -08:00
mergify[bot]
a920a0f946 Fix cluster economics figures and spelling in docs (#8502) (#8505)
automerge
2020-02-27 02:49:10 -08:00
mergify[bot]
4a8a6d0b3f Remove loop (#8496)
automerge
2020-02-27 00:38:27 -08:00
mergify[bot]
a64b8a2705 Rename snapshot.tar.bz2 to snapshot-<slot>-<hash>.tar.bz2 (bp #8482) (#8501)
automerge
2020-02-26 23:31:44 -08:00
Michael Vines
0198f9e8af Peg snapshot version to 1.0.0 2020-02-26 22:07:50 -07:00
Michael Vines
1582a3a927 Cargo.lock 2020-02-26 21:17:40 -07:00
mergify[bot]
e713f0e5fb Update voting simulation (#8489) 2020-02-26 20:00:20 -08:00
mergify[bot]
77031000c4 Choose more appropriate options for pubsub websocket server (#8354) (#8492)
automerge
2020-02-26 18:23:50 -08:00
Michael Vines
635a962fba Reference the v1.0.0 installer 2020-02-26 19:20:58 -07:00
mergify[bot]
c59ec2dcff Add flag to confirm key on device (#8478) (#8490)
automerge
2020-02-26 17:31:52 -08:00
mergify[bot]
abc6c5e264 Limit leader schedule search space (#8468) (#8486)
automerge
2020-02-26 16:11:14 -08:00
mergify[bot]
87cfac12dd Validate the genesis config downloaded over RPC before accepting it (bp #8474) (#8481)
automerge
2020-02-26 15:12:06 -08:00
Tyera Eulberg
60b43e34b6 Ledger hardware wallet docs (#8472) (#8479)
automerge
2020-02-26 14:42:08 -08:00
mergify[bot]
9ab6222f03 Move docs from book/ to docs/ (#8469) (#8471)
automerge
2020-02-26 08:01:44 -08:00
mergify[bot]
2298dd5c07 Use runtime executor to send pubsub notifications (#8353) (#8465)
automerge
2020-02-25 21:45:18 -08:00
mergify[bot]
822d166115 live-slots now displays the rate the root slot is advancing (#8464)
automerge
2020-02-25 21:18:10 -08:00
mergify[bot]
8f71580615 Allow withdrawer to change the authorized stake key (#8456) (#8462)
automerge
2020-02-25 19:30:59 -08:00
mergify[bot]
cc3352ff06 Ledger key path rework (#8453) (#8457)
automerge
2020-02-25 18:00:53 -08:00
mergify[bot]
8f5928b7c7 Promote dangerous cond. from just warning to panic (#8439) (#8449)
automerge
2020-02-25 15:31:06 -08:00
Michael Vines
888e9617ff 🐌🐌 Publish crates for even longer longer 2020-02-25 09:23:20 -07:00
mergify[bot]
4695c4cf7d Add --no-check-vote-account argument (#8430) (#8435)
automerge
2020-02-25 00:23:50 -08:00
mergify[bot]
bbfc56ff7f Make solana root key accessible on Ledger (#8421) (#8431)
automerge
2020-02-24 22:28:28 -08:00
Michael Vines
4103d99018 Bump version to 1.0.1 2020-02-24 23:24:27 -07:00
mergify[bot]
c375ce1fcd CLI: collect and deduplicate signers (#8398) (#8423)
automerge
2020-02-24 17:29:34 -08:00
Jack May
df813b31c5 Fix SDK deps 2020-02-24 17:30:46 -07:00
Michael Vines
7db92e2951 Drop print- prefix from slot/accounts command
(cherry picked from commit 89baa94002)
2020-02-24 17:28:36 -07:00
Michael Vines
6585518f78 Add genesis subcommand
(cherry picked from commit 1ef3478709)
2020-02-24 17:28:36 -07:00
Michael Vines
6398e256e4 Move shred_version module to sdk/
(cherry picked from commit 73063544bd)
2020-02-24 17:28:36 -07:00
mergify[bot]
b64ebb45e7 validator: snapshot fetching cleanup (bp #8413) (#8417)
automerge
2020-02-24 15:22:34 -08:00
mergify[bot]
307ac66d9c Reinstate create-stale-account w/ seed test (#8401) (#8402)
automerge
2020-02-22 10:56:39 -08:00
425 changed files with 17043 additions and 24719 deletions

42
.appveyor.yml Normal file
View File

@@ -0,0 +1,42 @@
version: '{build}'
branches:
only:
- master
- /^v[0-9.]+\.[0-9.]+/
cache:
- '%USERPROFILE%\.cargo'
- '%APPVEYOR_BUILD_FOLDER%\target'
clone_folder: d:\projects\solana
build_script:
- bash ci/publish-tarball.sh
notifications:
- provider: Slack
incoming_webhook:
secure: GJsBey+F5apAtUm86MHVJ68Uqa6WN1SImcuIc4TsTZrDhA8K1QWUNw9FFQPybUWDyOcS5dly3kubnUqlGt9ux6Ad2efsfRIQYWv0tOVXKeY=
channel: ci-status
on_build_success: false
on_build_failure: true
on_build_status_changed: true
deploy:
- provider: S3
access_key_id:
secure: fTbJl6JpFebR40J7cOWZ2mXBa3kIvEiXgzxAj6L3N7A=
secret_access_key:
secure: vItsBXb2rEFLvkWtVn/Rcxu5a5+2EwC+b7GsA0waJy9hXh6XuBAD0lnHd9re3g/4
bucket: release.solana.com
region: us-west-1
set_public: true
- provider: GitHub
auth_token:
secure: 81fEmPZ0cV1wLtNuUrcmtgxKF6ROQF1+/ft5m+fHX21z6PoeCbaNo8cTyLioWBj7
draft: false
prerelease: false
on:
appveyor_repo_tag: true

View File

@@ -3,16 +3,3 @@ root: ./docs/src
structure:
readme: introduction.md
summary: SUMMARY.md
redirects:
wallet: ./wallet-guide/README.md
wallet/app-wallets: ./wallet-guide/apps.md
wallet/app-wallets/trust-wallet: ./wallet-guide/trust-wallet.md
wallet/app-wallets/ledger-live: ./wallet-guide/ledger-live.md
wallet/cli-wallets: ./wallet-guide/cli.md
wallet/cli-wallets/paper-wallet: ./paper-wallet/README.md
wallet/cli-wallets/paper-wallet/paper-wallet-usage: ./paper-wallet/paper-wallet-usage.md
wallet/cli-wallets/remote-wallet: ./hardware-wallets/README.md
wallet/cli-wallets/remote-wallet/ledger: ./hardware-wallets/ledger.md
wallet/cli-wallets/file-system-wallet: ./file-system-wallet/README.md
wallet/support: ./wallet-guide/support.md

1
.gitignore vendored
View File

@@ -1,6 +1,5 @@
/docs/html/
/docs/src/tests.ok
/docs/src/cli/usage.md
/docs/src/.gitbook/assets/*.svg
/farf/
/solana-release/

View File

@@ -19,13 +19,20 @@ pull_request_rules:
label:
add:
- automerge
- name: v0.23 backport
conditions:
- base=master
- label=v0.23
actions:
backport:
branches:
- v0.23
- name: v1.0 backport
conditions:
- base=master
- label=v1.0
actions:
backport:
ignore_conflicts: true
branches:
- v1.0
- name: v1.1 backport
@@ -34,7 +41,6 @@ pull_request_rules:
- label=v1.1
actions:
backport:
ignore_conflicts: true
branches:
- v1.1
- name: v1.2 backport
@@ -43,6 +49,5 @@ pull_request_rules:
- label=v1.2
actions:
backport:
ignore_conflicts: true
branches:
- v1.2

View File

@@ -45,7 +45,7 @@ $ git pull --rebase upstream master
If there are no functional changes, PRs can be very large and that's no
problem. If, however, your changes are making meaningful changes or additions,
then about 1,000 lines of changes is about the most you should ask a Solana
then about 1000 lines of changes is about the most you should ask a Solana
maintainer to review.
### Should I send small PRs as I develop large, new components?

2149
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,6 @@ members = [
"bench-exchange",
"bench-streamer",
"bench-tps",
"accounts-bench",
"banking-bench",
"chacha",
"chacha-cuda",
@@ -11,8 +10,6 @@ members = [
"cli-config",
"client",
"core",
"dos",
"download-utils",
"faucet",
"perf",
"validator",
@@ -27,7 +24,6 @@ members = [
"logger",
"log-analyzer",
"merkle-tree",
"streamer",
"measure",
"metrics",
"net-shaper",
@@ -52,7 +48,6 @@ members = [
"sdk",
"sdk-c",
"scripts",
"stake-accounts",
"stake-monitor",
"sys-tuner",
"transaction-status",

View File

@@ -9,7 +9,46 @@ Blockchain Rebuilt for Scale
Solana&trade; is a new blockchain architecture built from the ground up for scale. The architecture supports
up to 710 thousand transactions per second on a gigabit network.
Read all about it at [Solana: Blockchain Rebuilt for Scale](https://docs.solana.com/v/master).
Documentation
===
Before you jump into the code, review the documentation [Solana: Blockchain Rebuilt for Scale](https://docs.solana.com).
(The _latest_ development version of the docs is [available here](https://docs.solana.com/v/master).)
Release Binaries
===
Official release binaries are available at [Github Releases](https://github.com/solana-labs/solana/releases).
Additionally we provide pre-release binaries for the latest code on the edge and
beta channels. Note that these pre-release binaries may be less stable than an
official release.
### Edge channel
#### Linux (x86_64-unknown-linux-gnu)
* [solana.tar.bz2](http://release.solana.com/edge/solana-release-x86_64-unknown-linux-gnu.tar.bz2)
* [solana-install-init](http://release.solana.com/edge/solana-install-init-x86_64-unknown-linux-gnu) as a stand-alone executable
#### mac OS (x86_64-apple-darwin)
* [solana.tar.bz2](http://release.solana.com/edge/solana-release-x86_64-apple-darwin.tar.bz2)
* [solana-install-init](http://release.solana.com/edge/solana-install-init-x86_64-apple-darwin) as a stand-alone executable
#### Windows (x86_64-pc-windows-msvc)
* [solana.tar.bz2](http://release.solana.com/edge/solana-release-x86_64-pc-windows-msvc.tar.bz2)
* [solana-install-init.exe](http://release.solana.com/edge/solana-install-init-x86_64-pc-windows-msvc.exe) as a stand-alone executable
#### All platforms
* [solana-metrics.tar.bz2](http://release.solana.com.s3.amazonaws.com/edge/solana-metrics.tar.bz2)
### Beta channel
#### Linux (x86_64-unknown-linux-gnu)
* [solana.tar.bz2](http://release.solana.com/beta/solana-release-x86_64-unknown-linux-gnu.tar.bz2)
* [solana-install-init](http://release.solana.com/beta/solana-install-init-x86_64-unknown-linux-gnu) as a stand-alone executable
#### mac OS (x86_64-apple-darwin)
* [solana.tar.bz2](http://release.solana.com/beta/solana-release-x86_64-apple-darwin.tar.bz2)
* [solana-install-init](http://release.solana.com/beta/solana-install-init-x86_64-apple-darwin) as a stand-alone executable
#### Windows (x86_64-pc-windows-msvc)
* [solana.tar.bz2](http://release.solana.com/beta/solana-release-x86_64-pc-windows-msvc.tar.bz2)
* [solana-install-init.exe](http://release.solana.com/beta/solana-install-init-x86_64-pc-windows-msvc.exe) as a stand-alone executable
#### All platforms
* [solana-metrics.tar.bz2](http://release.solana.com.s3.amazonaws.com/beta/solana-metrics.tar.bz2)
Developing
===

View File

@@ -1,22 +0,0 @@
[package]
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-accounts-bench"
version = "1.1.13"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
log = "0.4.6"
rayon = "1.3.0"
solana-logger = { path = "../logger", version = "1.1.13" }
solana-runtime = { path = "../runtime", version = "1.1.13" }
solana-measure = { path = "../measure", version = "1.1.13" }
solana-sdk = { path = "../sdk", version = "1.1.13" }
rand = "0.7.0"
clap = "2.33.0"
crossbeam-channel = "0.4"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,105 +0,0 @@
use clap::{value_t, App, Arg};
use rayon::prelude::*;
use solana_measure::measure::Measure;
use solana_runtime::{
accounts::{create_test_accounts, update_accounts, Accounts},
accounts_index::Ancestors,
};
use solana_sdk::pubkey::Pubkey;
use std::fs;
use std::path::PathBuf;
fn main() {
solana_logger::setup();
let matches = App::new("crate")
.about("about")
.version("version")
.arg(
Arg::with_name("num_slots")
.long("num_slots")
.takes_value(true)
.value_name("SLOTS")
.help("Number of slots to store to."),
)
.arg(
Arg::with_name("num_accounts")
.long("num_accounts")
.takes_value(true)
.value_name("NUM_ACCOUNTS")
.help("Total number of accounts"),
)
.arg(
Arg::with_name("iterations")
.long("iterations")
.takes_value(true)
.value_name("ITERATIONS")
.help("Number of bench iterations"),
)
.arg(
Arg::with_name("clean")
.long("clean")
.takes_value(false)
.help("Run clean"),
)
.get_matches();
let num_slots = value_t!(matches, "num_slots", usize).unwrap_or(4);
let num_accounts = value_t!(matches, "num_accounts", usize).unwrap_or(10_000);
let iterations = value_t!(matches, "iterations", usize).unwrap_or(20);
let clean = matches.is_present("clean");
println!("clean: {:?}", clean);
let path = PathBuf::from("farf/accounts-bench");
if fs::remove_dir_all(path.clone()).is_err() {
println!("Warning: Couldn't remove {:?}", path);
}
let accounts = Accounts::new(vec![path]);
println!("Creating {} accounts", num_accounts);
let mut create_time = Measure::start("create accounts");
let pubkeys: Vec<_> = (0..num_slots)
.into_par_iter()
.map(|slot| {
let mut pubkeys: Vec<Pubkey> = vec![];
create_test_accounts(
&accounts,
&mut pubkeys,
num_accounts / num_slots,
slot as u64,
);
pubkeys
})
.collect();
let pubkeys: Vec<_> = pubkeys.into_iter().flatten().collect();
create_time.stop();
println!(
"created {} accounts in {} slots {}",
(num_accounts / num_slots) * num_slots,
num_slots,
create_time
);
let mut ancestors: Ancestors = vec![(0, 0)].into_iter().collect();
for i in 1..num_slots {
ancestors.insert(i as u64, i - 1);
accounts.add_root(i as u64);
}
for x in 0..iterations {
if clean {
let mut time = Measure::start("clean");
accounts.accounts_db.clean_accounts();
time.stop();
println!("{}", time);
for slot in 0..num_slots {
update_accounts(&accounts, &pubkeys, ((x + 1) * num_slots + slot) as u64);
accounts.add_root((x * num_slots + slot) as u64);
}
} else {
let mut pubkeys: Vec<Pubkey> = vec![];
let mut time = Measure::start("hash");
let hash = accounts.accounts_db.update_accounts_hash(0, &ancestors);
time.stop();
println!("hash: {} {}", hash, time);
create_test_accounts(&accounts, &mut pubkeys, 1, 0);
}
}
}

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-archiver-lib"
version = "1.1.13"
version = "1.0.24"
description = "Solana Archiver Library"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -10,31 +10,30 @@ edition = "2018"
[dependencies]
bincode = "1.2.1"
crossbeam-channel = "0.4"
crossbeam-channel = "0.3"
ed25519-dalek = "=1.0.0-pre.3"
log = "0.4.8"
rand = "0.7.0"
rand_chacha = "0.2.2"
solana-client = { path = "../client", version = "1.1.13" }
solana-storage-program = { path = "../programs/storage", version = "1.1.13" }
solana-client = { path = "../client", version = "1.0.24" }
solana-storage-program = { path = "../programs/storage", version = "1.0.24" }
thiserror = "1.0"
serde = "1.0.105"
serde_json = "1.0.48"
serde = "1.0.104"
serde_json = "1.0.46"
serde_derive = "1.0.103"
solana-net-utils = { path = "../net-utils", version = "1.1.13" }
solana-chacha = { path = "../chacha", version = "1.1.13" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.13" }
solana-ledger = { path = "../ledger", version = "1.1.13" }
solana-logger = { path = "../logger", version = "1.1.13" }
solana-perf = { path = "../perf", version = "1.1.13" }
solana-sdk = { path = "../sdk", version = "1.1.13" }
solana-core = { path = "../core", version = "1.1.13" }
solana-streamer = { path = "../streamer", version = "1.1.13" }
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.13" }
solana-metrics = { path = "../metrics", version = "1.1.13" }
solana-net-utils = { path = "../net-utils", version = "1.0.24" }
solana-chacha = { path = "../chacha", version = "1.0.24" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.24" }
solana-ledger = { path = "../ledger", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-perf = { path = "../perf", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
solana-core = { path = "../core", version = "1.0.24" }
solana-archiver-utils = { path = "../archiver-utils", version = "1.0.24" }
solana-metrics = { path = "../metrics", version = "1.0.24" }
[dev-dependencies]
hex = "0.4.2"
hex = "0.4.0"
[lib]
name = "solana_archiver_lib"

View File

@@ -10,14 +10,15 @@ use solana_client::{
};
use solana_core::{
cluster_info::{ClusterInfo, Node, VALIDATOR_PORT_RANGE},
cluster_slots::ClusterSlots,
contact_info::ContactInfo,
gossip_service::GossipService,
packet::{limited_deserialize, PACKET_DATA_SIZE},
repair_service::{self, RepairService, RepairSlotRange, RepairStats, RepairStrategy},
serve_repair::ServeRepair,
shred_fetch_stage::ShredFetchStage,
sigverify_stage::{DisabledSigVerifier, SigVerifyStage},
storage_stage::NUM_STORAGE_SAMPLES,
streamer::{receiver, responder, PacketReceiver},
window_service::WindowService,
};
use solana_ledger::{
@@ -25,7 +26,6 @@ use solana_ledger::{
};
use solana_net_utils::bind_in_range;
use solana_perf::packet::Packets;
use solana_perf::packet::{limited_deserialize, PACKET_DATA_SIZE};
use solana_perf::recycler::Recycler;
use solana_sdk::packet::Packet;
use solana_sdk::{
@@ -44,7 +44,6 @@ use solana_storage_program::{
storage_contract::StorageContract,
storage_instruction::{self, StorageAccountType},
};
use solana_streamer::streamer::{receiver, responder, PacketReceiver};
use std::{
io::{self, ErrorKind},
net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket},
@@ -52,7 +51,7 @@ use std::{
result,
sync::atomic::{AtomicBool, Ordering},
sync::mpsc::{channel, Receiver, Sender},
sync::Arc,
sync::{Arc, RwLock},
thread::{sleep, spawn, JoinHandle},
time::Duration,
};
@@ -184,10 +183,10 @@ impl Archiver {
info!("Archiver: id: {}", keypair.pubkey());
info!("Creating cluster info....");
let cluster_info = ClusterInfo::new(node.info.clone(), keypair.clone());
let mut cluster_info = ClusterInfo::new(node.info.clone(), keypair.clone());
cluster_info.set_entrypoint(cluster_entrypoint.clone());
let cluster_info = Arc::new(cluster_info);
let cluster_slots = Arc::new(ClusterSlots::default());
let cluster_info = Arc::new(RwLock::new(cluster_info));
// Note for now, this ledger will not contain any of the existing entries
// in the ledger located at ledger_path, and will only append on newly received
// entries after being passed to window_service
@@ -262,7 +261,6 @@ impl Archiver {
repair_socket,
shred_fetch_receiver,
slot_sender,
cluster_slots,
) {
Ok(window_service) => window_service,
Err(e) => {
@@ -307,7 +305,7 @@ impl Archiver {
fn run(
meta: &mut ArchiverMeta,
blockstore: &Arc<Blockstore>,
cluster_info: Arc<ClusterInfo>,
cluster_info: Arc<RwLock<ClusterInfo>>,
archiver_keypair: &Arc<Keypair>,
storage_keypair: &Arc<Keypair>,
exit: &Arc<AtomicBool>,
@@ -364,12 +362,12 @@ impl Archiver {
}
fn redeem_rewards(
cluster_info: &ClusterInfo,
cluster_info: &Arc<RwLock<ClusterInfo>>,
archiver_keypair: &Arc<Keypair>,
storage_keypair: &Arc<Keypair>,
client_commitment: CommitmentConfig,
) {
let nodes = cluster_info.tvu_peers();
let nodes = cluster_info.read().unwrap().tvu_peers();
let client = solana_core::gossip_service::get_client(&nodes);
if let Ok(Some(account)) =
@@ -401,10 +399,9 @@ impl Archiver {
}
// Find a segment to replicate and download it.
#[allow(clippy::too_many_arguments)]
fn setup(
meta: &mut ArchiverMeta,
cluster_info: Arc<ClusterInfo>,
cluster_info: Arc<RwLock<ClusterInfo>>,
blockstore: &Arc<Blockstore>,
exit: &Arc<AtomicBool>,
node_info: &ContactInfo,
@@ -412,7 +409,6 @@ impl Archiver {
repair_socket: Arc<UdpSocket>,
shred_fetch_receiver: PacketReceiver,
slot_sender: Sender<u64>,
cluster_slots: Arc<ClusterSlots>,
) -> Result<WindowService> {
let slots_per_segment =
match Self::get_segment_config(&cluster_info, meta.client_commitment) {
@@ -470,7 +466,6 @@ impl Archiver {
RepairStrategy::RepairRange(repair_slot_range),
&Arc::new(LeaderScheduleCache::default()),
|_, _, _, _| true,
cluster_slots,
);
info!("waiting for ledger download");
Self::wait_for_segment_download(
@@ -490,7 +485,7 @@ impl Archiver {
blockstore: &Arc<Blockstore>,
exit: &Arc<AtomicBool>,
node_info: &ContactInfo,
cluster_info: Arc<ClusterInfo>,
cluster_info: Arc<RwLock<ClusterInfo>>,
) {
info!(
"window created, waiting for ledger download starting at slot {:?}",
@@ -518,8 +513,11 @@ impl Archiver {
contact_info.tvu = "0.0.0.0:0".parse().unwrap();
contact_info.wallclock = timestamp();
// copy over the adopted shred_version from the entrypoint
contact_info.shred_version = cluster_info.my_shred_version();
cluster_info.update_contact_info(|current| *current = contact_info);
contact_info.shred_version = cluster_info.read().unwrap().my_data().shred_version;
{
let mut cluster_info_w = cluster_info.write().unwrap();
cluster_info_w.insert_self(contact_info);
}
}
fn encrypt_ledger(meta: &mut ArchiverMeta, blockstore: &Arc<Blockstore>) -> Result<()> {
@@ -622,12 +620,12 @@ impl Archiver {
fn submit_mining_proof(
meta: &ArchiverMeta,
cluster_info: &ClusterInfo,
cluster_info: &Arc<RwLock<ClusterInfo>>,
archiver_keypair: &Arc<Keypair>,
storage_keypair: &Arc<Keypair>,
) {
// No point if we've got no storage account...
let nodes = cluster_info.tvu_peers();
let nodes = cluster_info.read().unwrap().tvu_peers();
let client = solana_core::gossip_service::get_client(&nodes);
let storage_balance = client
.poll_get_balance_with_commitment(&storage_keypair.pubkey(), meta.client_commitment);
@@ -685,10 +683,13 @@ impl Archiver {
}
fn get_segment_config(
cluster_info: &ClusterInfo,
cluster_info: &Arc<RwLock<ClusterInfo>>,
client_commitment: CommitmentConfig,
) -> Result<u64> {
let rpc_peers = cluster_info.all_rpc_peers();
let rpc_peers = {
let cluster_info = cluster_info.read().unwrap();
cluster_info.all_rpc_peers()
};
debug!("rpc peers: {:?}", rpc_peers);
if !rpc_peers.is_empty() {
let rpc_client = {
@@ -696,11 +697,16 @@ impl Archiver {
RpcClient::new_socket(rpc_peers[node_index].rpc)
};
Ok(rpc_client
.send::<u64>(
RpcRequest::GetSlotsPerSegment,
.send(
&RpcRequest::GetSlotsPerSegment,
serde_json::json!([client_commitment]),
0,
)
.map_err(|err| {
warn!("Error while making rpc request {:?}", err);
ArchiverError::ClientError(err)
})?
.as_u64()
.unwrap())
} else {
Err(ArchiverError::NoRpcPeers)
@@ -709,7 +715,7 @@ impl Archiver {
/// Waits until the first segment is ready, and returns the current segment
fn poll_for_segment(
cluster_info: &ClusterInfo,
cluster_info: &Arc<RwLock<ClusterInfo>>,
slots_per_segment: u64,
previous_blockhash: &Hash,
exit: &Arc<AtomicBool>,
@@ -729,28 +735,38 @@ impl Archiver {
/// Poll for a different blockhash and associated max_slot than `previous_blockhash`
fn poll_for_blockhash_and_slot(
cluster_info: &ClusterInfo,
cluster_info: &Arc<RwLock<ClusterInfo>>,
slots_per_segment: u64,
previous_blockhash: &Hash,
exit: &Arc<AtomicBool>,
) -> Result<(Hash, u64)> {
info!("waiting for the next turn...");
loop {
let rpc_peers = cluster_info.all_rpc_peers();
let rpc_peers = {
let cluster_info = cluster_info.read().unwrap();
cluster_info.all_rpc_peers()
};
debug!("rpc peers: {:?}", rpc_peers);
if !rpc_peers.is_empty() {
let rpc_client = {
let node_index = thread_rng().gen_range(0, rpc_peers.len());
RpcClient::new_socket(rpc_peers[node_index].rpc)
};
let response = rpc_client
.send(
&RpcRequest::GetStorageTurn,
serde_json::value::Value::Null,
0,
)
.map_err(|err| {
warn!("Error while making rpc request {:?}", err);
ArchiverError::ClientError(err)
})?;
let RpcStorageTurn {
blockhash: storage_blockhash,
slot: turn_slot,
} = rpc_client.send(
RpcRequest::GetStorageTurn,
serde_json::value::Value::Null,
0,
)?;
} = serde_json::from_value::<RpcStorageTurn>(response)
.map_err(ArchiverError::JsonError)?;
let turn_blockhash = storage_blockhash.parse().map_err(|err| {
io::Error::new(
io::ErrorKind::Other,

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-archiver-utils"
version = "1.1.13"
version = "1.0.24"
description = "Solana Archiver Utils"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,15 +11,15 @@ edition = "2018"
[dependencies]
log = "0.4.8"
rand = "0.7.0"
solana-chacha = { path = "../chacha", version = "1.1.13" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.13" }
solana-ledger = { path = "../ledger", version = "1.1.13" }
solana-logger = { path = "../logger", version = "1.1.13" }
solana-perf = { path = "../perf", version = "1.1.13" }
solana-sdk = { path = "../sdk", version = "1.1.13" }
solana-chacha = { path = "../chacha", version = "1.0.24" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.24" }
solana-ledger = { path = "../ledger", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-perf = { path = "../perf", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
[dev-dependencies]
hex = "0.4.2"
hex = "0.4.0"
[lib]
name = "solana_archiver_utils"

View File

@@ -2,21 +2,21 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-archiver"
version = "1.1.13"
version = "1.0.24"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
console = "0.10.0"
solana-clap-utils = { path = "../clap-utils", version = "1.1.13" }
solana-core = { path = "../core", version = "1.1.13" }
solana-logger = { path = "../logger", version = "1.1.13" }
solana-metrics = { path = "../metrics", version = "1.1.13" }
solana-archiver-lib = { path = "../archiver-lib", version = "1.1.13" }
solana-net-utils = { path = "../net-utils", version = "1.1.13" }
solana-sdk = { path = "../sdk", version = "1.1.13" }
console = "0.9.2"
solana-clap-utils = { path = "../clap-utils", version = "1.0.24" }
solana-core = { path = "../core", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-metrics = { path = "../metrics", version = "1.0.24" }
solana-archiver-lib = { path = "../archiver-lib", version = "1.0.24" }
solana-net-utils = { path = "../net-utils", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
[package.metadata.docs.rs]

View File

@@ -2,24 +2,22 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-banking-bench"
version = "1.1.13"
version = "1.0.24"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
log = "0.4.6"
rayon = "1.3.0"
solana-core = { path = "../core", version = "1.1.13" }
solana-streamer = { path = "../streamer", version = "1.1.13" }
solana-perf = { path = "../perf", version = "1.1.13" }
solana-ledger = { path = "../ledger", version = "1.1.13" }
solana-logger = { path = "../logger", version = "1.1.13" }
solana-runtime = { path = "../runtime", version = "1.1.13" }
solana-measure = { path = "../measure", version = "1.1.13" }
solana-sdk = { path = "../sdk", version = "1.1.13" }
rayon = "1.2.0"
solana-core = { path = "../core", version = "1.0.24" }
solana-ledger = { path = "../ledger", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-runtime = { path = "../runtime", version = "1.0.24" }
solana-measure = { path = "../measure", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
rand = "0.7.0"
crossbeam-channel = "0.4"
crossbeam-channel = "0.3"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,36 +2,29 @@ use crossbeam_channel::unbounded;
use log::*;
use rand::{thread_rng, Rng};
use rayon::prelude::*;
use solana_core::{
banking_stage::{create_test_recorder, BankingStage},
cluster_info::ClusterInfo,
cluster_info::Node,
poh_recorder::PohRecorder,
poh_recorder::WorkingBankEntry,
};
use solana_ledger::{
bank_forks::BankForks,
blockstore::Blockstore,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
};
use solana_core::banking_stage::{create_test_recorder, BankingStage};
use solana_core::cluster_info::ClusterInfo;
use solana_core::cluster_info::Node;
use solana_core::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_core::packet::to_packets_chunked;
use solana_core::poh_recorder::PohRecorder;
use solana_core::poh_recorder::WorkingBankEntry;
use solana_ledger::bank_forks::BankForks;
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use solana_measure::measure::Measure;
use solana_perf::packet::to_packets_chunked;
use solana_runtime::bank::Bank;
use solana_sdk::{
hash::Hash,
pubkey::Pubkey,
signature::Keypair,
signature::Signature,
system_transaction,
timing::{duration_as_us, timestamp},
transaction::Transaction,
};
use std::{
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex},
thread::sleep,
time::{Duration, Instant},
};
use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::Keypair;
use solana_sdk::signature::Signature;
use solana_sdk::system_transaction;
use solana_sdk::timing::{duration_as_us, timestamp};
use solana_sdk::transaction::Transaction;
use std::sync::atomic::Ordering;
use std::sync::mpsc::Receiver;
use std::sync::{Arc, Mutex, RwLock};
use std::thread::sleep;
use std::time::{Duration, Instant};
fn check_txs(
receiver: &Arc<Receiver<WorkingBankEntry>>,
@@ -152,7 +145,7 @@ fn main() {
let (exit, poh_recorder, poh_service, signal_receiver) =
create_test_recorder(&bank, &blockstore, None);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let banking_stage = BankingStage::new(
&cluster_info,
&poh_recorder,

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-exchange"
version = "1.1.13"
version = "1.0.24"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -10,28 +10,28 @@ publish = false
[dependencies]
clap = "2.32.0"
itertools = "0.9.0"
itertools = "0.8.2"
log = "0.4.8"
num-derive = "0.3"
num-traits = "0.2"
rand = "0.7.0"
rayon = "1.3.0"
serde_json = "1.0.48"
rayon = "1.2.0"
serde_json = "1.0.46"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "1.1.13" }
solana-core = { path = "../core", version = "1.1.13" }
solana-genesis = { path = "../genesis", version = "1.1.13" }
solana-client = { path = "../client", version = "1.1.13" }
solana-faucet = { path = "../faucet", version = "1.1.13" }
solana-exchange-program = { path = "../programs/exchange", version = "1.1.13" }
solana-logger = { path = "../logger", version = "1.1.13" }
solana-metrics = { path = "../metrics", version = "1.1.13" }
solana-net-utils = { path = "../net-utils", version = "1.1.13" }
solana-runtime = { path = "../runtime", version = "1.1.13" }
solana-sdk = { path = "../sdk", version = "1.1.13" }
solana-clap-utils = { path = "../clap-utils", version = "1.0.24" }
solana-core = { path = "../core", version = "1.0.24" }
solana-genesis = { path = "../genesis", version = "1.0.24" }
solana-client = { path = "../client", version = "1.0.24" }
solana-faucet = { path = "../faucet", version = "1.0.24" }
solana-exchange-program = { path = "../programs/exchange", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-metrics = { path = "../metrics", version = "1.0.24" }
solana-net-utils = { path = "../net-utils", version = "1.0.24" }
solana-runtime = { path = "../runtime", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
[dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "1.1.13" }
solana-local-cluster = { path = "../local-cluster", version = "1.0.24" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -86,7 +86,7 @@ fn test_exchange_bank_client() {
solana_logger::setup();
let (genesis_config, identity) = create_genesis_config(100_000_000_000_000);
let mut bank = Bank::new(&genesis_config);
bank.add_static_program("exchange_program", id(), process_instruction);
bank.add_instruction_processor(id(), process_instruction);
let clients = vec![BankClient::new(bank)];
let mut config = Config::default();

View File

@@ -2,17 +2,17 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-streamer"
version = "1.1.13"
version = "1.0.24"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
solana-clap-utils = { path = "../clap-utils", version = "1.1.13" }
solana-streamer = { path = "../streamer", version = "1.1.13" }
solana-logger = { path = "../logger", version = "1.1.13" }
solana-net-utils = { path = "../net-utils", version = "1.1.13" }
solana-clap-utils = { path = "../clap-utils", version = "1.0.24" }
solana-core = { path = "../core", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-net-utils = { path = "../net-utils", version = "1.0.24" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,6 +1,6 @@
use clap::{crate_description, crate_name, App, Arg};
use solana_streamer::packet::{Packet, Packets, PacketsRecycler, PACKET_DATA_SIZE};
use solana_streamer::streamer::{receiver, PacketReceiver};
use solana_core::packet::{Packet, Packets, PacketsRecycler, PACKET_DATA_SIZE};
use solana_core::streamer::{receiver, PacketReceiver};
use std::cmp::max;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-tps"
version = "1.1.13"
version = "1.0.24"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -11,27 +11,27 @@ homepage = "https://solana.com/"
bincode = "1.2.1"
clap = "2.33.0"
log = "0.4.8"
rayon = "1.3.0"
serde_json = "1.0.48"
rayon = "1.2.0"
serde_json = "1.0.46"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "1.1.13" }
solana-core = { path = "../core", version = "1.1.13" }
solana-genesis = { path = "../genesis", version = "1.1.13" }
solana-client = { path = "../client", version = "1.1.13" }
solana-faucet = { path = "../faucet", version = "1.1.13" }
#solana-librapay = { path = "../programs/librapay", version = "1.1.8", optional = true }
solana-logger = { path = "../logger", version = "1.1.13" }
solana-metrics = { path = "../metrics", version = "1.1.13" }
solana-measure = { path = "../measure", version = "1.1.13" }
solana-net-utils = { path = "../net-utils", version = "1.1.13" }
solana-runtime = { path = "../runtime", version = "1.1.13" }
solana-sdk = { path = "../sdk", version = "1.1.13" }
#solana-move-loader-program = { path = "../programs/move_loader", version = "1.1.8", optional = true }
solana-clap-utils = { path = "../clap-utils", version = "1.0.24" }
solana-core = { path = "../core", version = "1.0.24" }
solana-genesis = { path = "../genesis", version = "1.0.24" }
solana-client = { path = "../client", version = "1.0.24" }
solana-faucet = { path = "../faucet", version = "1.0.24" }
#solana-librapay = { path = "../programs/librapay", version = "1.0.20", optional = true }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-metrics = { path = "../metrics", version = "1.0.24" }
solana-measure = { path = "../measure", version = "1.0.24" }
solana-net-utils = { path = "../net-utils", version = "1.0.24" }
solana-runtime = { path = "../runtime", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
#solana-move-loader-program = { path = "../programs/move_loader", version = "1.0.20", optional = true }
[dev-dependencies]
serial_test = "0.4.0"
serial_test = "0.3.2"
serial_test_derive = "0.4.0"
solana-local-cluster = { path = "../local-cluster", version = "1.1.13" }
solana-local-cluster = { path = "../local-cluster", version = "1.0.24" }
#[features]
#move = ["solana-librapay", "solana-move-loader-program"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-chacha-cuda"
version = "1.1.13"
version = "1.0.24"
description = "Solana Chacha Cuda APIs"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -10,12 +10,12 @@ edition = "2018"
[dependencies]
log = "0.4.8"
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.13" }
solana-chacha = { path = "../chacha", version = "1.1.13" }
solana-ledger = { path = "../ledger", version = "1.1.13" }
solana-logger = { path = "../logger", version = "1.1.13" }
solana-perf = { path = "../perf", version = "1.1.13" }
solana-sdk = { path = "../sdk", version = "1.1.13" }
solana-archiver-utils = { path = "../archiver-utils", version = "1.0.24" }
solana-chacha = { path = "../chacha", version = "1.0.24" }
solana-ledger = { path = "../ledger", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-perf = { path = "../perf", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
[dev-dependencies]
hex-literal = "0.2.1"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-chacha-sys"
version = "1.1.13"
version = "1.0.24"
description = "Solana chacha-sys"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-chacha"
version = "1.1.13"
version = "1.0.24"
description = "Solana Chacha APIs"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,11 +12,11 @@ edition = "2018"
log = "0.4.8"
rand = "0.7.0"
rand_chacha = "0.2.2"
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.13" }
solana-ledger = { path = "../ledger", version = "1.1.13" }
solana-logger = { path = "../logger", version = "1.1.13" }
solana-perf = { path = "../perf", version = "1.1.13" }
solana-sdk = { path = "../sdk", version = "1.1.13" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.24" }
solana-ledger = { path = "../ledger", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-perf = { path = "../perf", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
[dev-dependencies]
hex-literal = "0.2.1"

View File

@@ -32,7 +32,6 @@ RUN set -x \
&& cargo install cargo-audit \
&& cargo install svgbob_cli \
&& cargo install mdbook \
&& cargo install mdbook-linkcheck \
&& rustc --version \
&& cargo --version \
&& curl -OL https://github.com/google/protobuf/releases/download/v$PROTOC_VERSION/$PROTOC_ZIP \

View File

@@ -2,8 +2,10 @@
set -e
cd "$(dirname "$0")/.."
# shellcheck source=multinode-demo/common.sh
source multinode-demo/common.sh
rm -f config/run/init-completed
rm -rf config/run/init-completed config/ledger config/snapshot-ledger
timeout 15 ./run.sh &
pid=$!
@@ -17,6 +19,14 @@ while [[ ! -f config/run/init-completed ]]; do
fi
done
while [[ $($solana_cli slot --commitment recent) -eq 0 ]]; do
sleep 1
done
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"validatorExit"}' http://localhost:8899
wait $pid
$solana_ledger_tool create-snapshot --ledger config/ledger 1 config/snapshot-ledger
cp config/ledger/genesis.tar.bz2 config/snapshot-ledger
tar -C config/snapshot-ledger -xvf config/snapshot-ledger/genesis.tar.bz2
$solana_ledger_tool verify --ledger config/snapshot-ledger

View File

@@ -67,9 +67,8 @@ _ cargo +$rust_nightly bench --manifest-path core/Cargo.toml ${V:+--verbose} \
_ cargo +$rust_nightly bench --manifest-path programs/bpf/Cargo.toml ${V:+--verbose} --features=bpf_c \
-- -Z unstable-options --format=json --nocapture | tee -a "$BENCH_FILE"
# Run banking/accounts bench. Doesn't require nightly, but use since it is already built.
# Run banking bench. Doesn't require nightly, but use since it is already built.
_ cargo +$rust_nightly run --release --manifest-path banking-bench/Cargo.toml ${V:+--verbose} | tee -a "$BENCH_FILE"
_ cargo +$rust_nightly run --release --manifest-path accounts-bench/Cargo.toml ${V:+--verbose} -- --num_accounts 10000 --num_slots 4 | tee -a "$BENCH_FILE"
# `solana-upload-perf` disabled as it can take over 30 minutes to complete for some
# reason

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-clap-utils"
version = "1.1.13"
version = "1.0.24"
description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,8 +11,8 @@ edition = "2018"
[dependencies]
clap = "2.33.0"
rpassword = "4.0"
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.13" }
solana-sdk = { path = "../sdk", version = "1.1.13" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
thiserror = "1.0.11"
tiny-bip39 = "0.7.0"
url = "2.1.0"

View File

@@ -8,15 +8,11 @@ pub const COMMITMENT_ARG: ArgConstant<'static> = ArgConstant {
};
pub fn commitment_arg<'a, 'b>() -> Arg<'a, 'b> {
commitment_arg_with_default("recent")
}
pub fn commitment_arg_with_default<'a, 'b>(default_value: &'static str) -> Arg<'a, 'b> {
Arg::with_name(COMMITMENT_ARG.name)
.long(COMMITMENT_ARG.long)
.takes_value(true)
.possible_values(&["recent", "root", "max"])
.default_value(default_value)
.default_value("recent")
.value_name("COMMITMENT_LEVEL")
.help(COMMITMENT_ARG.help)
}

View File

@@ -63,21 +63,6 @@ pub fn keypair_of(matches: &ArgMatches<'_>, name: &str) -> Option<Keypair> {
}
}
pub fn keypairs_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<Keypair>> {
matches.values_of(name).map(|values| {
values
.filter_map(|value| {
if value == ASK_KEYWORD {
let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name);
keypair_from_seed_phrase(name, skip_validation, true).ok()
} else {
read_keypair_file(value).ok()
}
})
.collect()
})
}
// Return a pubkey for an argument that can itself be parsed into a pubkey,
// or is a filename that can be read as a keypair
pub fn pubkey_of(matches: &ArgMatches<'_>, name: &str) -> Option<Pubkey> {
@@ -117,7 +102,7 @@ pub fn pubkeys_sigs_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<(Pubk
pub fn signer_of(
matches: &ArgMatches<'_>,
name: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<(Option<Box<dyn Signer>>, Option<Pubkey>), Box<dyn std::error::Error>> {
if let Some(location) = matches.value_of(name) {
let signer = signer_from_path(matches, location, name, wallet_manager)?;
@@ -131,7 +116,7 @@ pub fn signer_of(
pub fn pubkey_of_signer(
matches: &ArgMatches<'_>,
name: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<Option<Pubkey>, Box<dyn std::error::Error>> {
if let Some(location) = matches.value_of(name) {
Ok(Some(pubkey_from_path(
@@ -148,7 +133,7 @@ pub fn pubkey_of_signer(
pub fn pubkeys_of_multiple_signers(
matches: &ArgMatches<'_>,
name: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<Option<Vec<Pubkey>>, Box<dyn std::error::Error>> {
if let Some(pubkey_matches) = matches.values_of(name) {
let mut pubkeys: Vec<Pubkey> = vec![];
@@ -164,7 +149,7 @@ pub fn pubkeys_of_multiple_signers(
pub fn resolve_signer(
matches: &ArgMatches<'_>,
name: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<Option<String>, Box<dyn std::error::Error>> {
Ok(resolve_signer_from_path(
matches,

View File

@@ -8,7 +8,7 @@ use clap::ArgMatches;
use rpassword::prompt_password_stderr;
use solana_remote_wallet::{
remote_keypair::generate_remote_keypair,
remote_wallet::{maybe_wallet_manager, RemoteWalletError, RemoteWalletManager},
remote_wallet::{RemoteWalletError, RemoteWalletManager},
};
use solana_sdk::{
pubkey::Pubkey,
@@ -64,7 +64,7 @@ pub fn signer_from_path(
matches: &ArgMatches,
path: &str,
keypair_name: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<Box<dyn Signer>, Box<dyn error::Error>> {
match parse_keypair_path(path) {
KeypairUrl::Ask => {
@@ -88,9 +88,6 @@ pub fn signer_from_path(
Ok(Box::new(read_keypair(&mut stdin)?))
}
KeypairUrl::Usb(path) => {
if wallet_manager.is_none() {
*wallet_manager = maybe_wallet_manager()?;
}
if let Some(wallet_manager) = wallet_manager {
Ok(Box::new(generate_remote_keypair(
path,
@@ -125,7 +122,7 @@ pub fn pubkey_from_path(
matches: &ArgMatches,
path: &str,
keypair_name: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<Pubkey, Box<dyn error::Error>> {
match parse_keypair_path(path) {
KeypairUrl::Pubkey(pubkey) => Ok(pubkey),
@@ -137,7 +134,7 @@ pub fn resolve_signer_from_path(
matches: &ArgMatches,
path: &str,
keypair_name: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<Option<String>, Box<dyn error::Error>> {
match parse_keypair_path(path) {
KeypairUrl::Ask => {
@@ -161,9 +158,6 @@ pub fn resolve_signer_from_path(
read_keypair(&mut stdin).map(|_| None)
}
KeypairUrl::Usb(path) => {
if wallet_manager.is_none() {
*wallet_manager = maybe_wallet_manager()?;
}
if let Some(wallet_manager) = wallet_manager {
let path = generate_remote_keypair(
path,

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-cli-config"
description = "Blockchain, Rebuilt for Scale"
version = "1.1.13"
version = "1.0.24"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -11,7 +11,7 @@ homepage = "https://solana.com/"
[dependencies]
dirs = "2.0.2"
lazy_static = "1.4.0"
serde = "1.0.105"
serde = "1.0.104"
serde_derive = "1.0.103"
serde_yaml = "0.8.11"
url = "2.1.1"

View File

@@ -26,7 +26,7 @@ impl Default for Config {
keypair_path.extend(&[".config", "solana", "id.json"]);
keypair_path.to_str().unwrap().to_string()
};
let json_rpc_url = "https://api.mainnet-beta.solana.com".to_string();
let json_rpc_url = "http://127.0.0.1:8899".to_string();
// Empty websocket_url string indicates the client should
// `Config::compute_websocket_url(&json_rpc_url)`

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-cli"
description = "Blockchain, Rebuilt for Scale"
version = "1.1.13"
version = "1.0.24"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -11,44 +11,44 @@ homepage = "https://solana.com/"
[dependencies]
bincode = "1.2.1"
bs58 = "0.3.0"
chrono = { version = "0.4.11", features = ["serde"] }
chrono = { version = "0.4.10", features = ["serde"] }
clap = "2.33.0"
criterion-stats = "0.3.0"
ctrlc = { version = "3.1.4", features = ["termination"] }
console = "0.10.0"
console = "0.9.2"
dirs = "2.0.2"
log = "0.4.8"
Inflector = "0.11.4"
indicatif = "0.14.0"
humantime = "2.0.0"
num-traits = "0.2"
pretty-hex = "0.1.1"
reqwest = { version = "0.10.4", default-features = false, features = ["blocking", "rustls-tls", "json"] }
serde = "1.0.105"
reqwest = { version = "0.10.1", default-features = false, features = ["blocking", "rustls-tls"] }
serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.48"
solana-budget-program = { path = "../programs/budget", version = "1.1.13" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.13" }
solana-cli-config = { path = "../cli-config", version = "1.1.13" }
solana-client = { path = "../client", version = "1.1.13" }
solana-config-program = { path = "../programs/config", version = "1.1.13" }
solana-faucet = { path = "../faucet", version = "1.1.13" }
solana-logger = { path = "../logger", version = "1.1.13" }
solana-net-utils = { path = "../net-utils", version = "1.1.13" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.13" }
solana-runtime = { path = "../runtime", version = "1.1.13" }
solana-sdk = { path = "../sdk", version = "1.1.13" }
solana-stake-program = { path = "../programs/stake", version = "1.1.13" }
solana-storage-program = { path = "../programs/storage", version = "1.1.13" }
solana-transaction-status = { path = "../transaction-status", version = "1.1.13" }
solana-vote-program = { path = "../programs/vote", version = "1.1.13" }
solana-vote-signer = { path = "../vote-signer", version = "1.1.13" }
thiserror = "1.0.13"
serde_json = "1.0.46"
solana-budget-program = { path = "../programs/budget", version = "1.0.24" }
solana-clap-utils = { path = "../clap-utils", version = "1.0.24" }
solana-cli-config = { path = "../cli-config", version = "1.0.24" }
solana-client = { path = "../client", version = "1.0.24" }
solana-config-program = { path = "../programs/config", version = "1.0.24" }
solana-faucet = { path = "../faucet", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-net-utils = { path = "../net-utils", version = "1.0.24" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.0.24" }
solana-runtime = { path = "../runtime", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
solana-stake-program = { path = "../programs/stake", version = "1.0.24" }
solana-storage-program = { path = "../programs/storage", version = "1.0.24" }
solana-transaction-status = { path = "../transaction-status", version = "1.0.24" }
solana-vote-program = { path = "../programs/vote", version = "1.0.24" }
solana-vote-signer = { path = "../vote-signer", version = "1.0.24" }
titlecase = "1.1.0"
thiserror = "1.0.11"
url = "2.1.1"
[dev-dependencies]
solana-core = { path = "../core", version = "1.1.13" }
solana-budget-program = { path = "../programs/budget", version = "1.1.13" }
solana-core = { path = "../core", version = "1.0.24" }
solana-budget-program = { path = "../programs/budget", version = "1.0.24" }
tempfile = "3.1.0"
[[bin]]

File diff suppressed because it is too large Load Diff

View File

@@ -1,982 +0,0 @@
use crate::{cli::build_balance_message, display::writeln_name_value};
use chrono::{DateTime, NaiveDateTime, SecondsFormat, Utc};
use console::{style, Emoji};
use inflector::cases::titlecase::to_title_case;
use serde::Serialize;
use serde_json::{Map, Value};
use solana_client::rpc_response::{
RpcAccountBalance, RpcEpochInfo, RpcKeyedAccount, RpcSupply, RpcVoteAccountInfo,
};
use solana_sdk::{
clock::{self, Epoch, Slot, UnixTimestamp},
native_token::lamports_to_sol,
stake_history::StakeHistoryEntry,
};
use solana_stake_program::stake_state::{Authorized, Lockup};
use solana_vote_program::{
authorized_voters::AuthorizedVoters,
vote_state::{BlockTimestamp, Lockout},
};
use std::{collections::BTreeMap, fmt, time::Duration};
static WARNING: Emoji = Emoji("⚠️", "!");
#[derive(PartialEq)]
pub enum OutputFormat {
Display,
Json,
JsonCompact,
}
impl OutputFormat {
pub fn formatted_string<T>(&self, item: &T) -> String
where
T: Serialize + fmt::Display,
{
match self {
OutputFormat::Display => format!("{}", item),
OutputFormat::Json => serde_json::to_string_pretty(item).unwrap(),
OutputFormat::JsonCompact => serde_json::to_value(item).unwrap().to_string(),
}
}
}
#[derive(Serialize, Deserialize)]
pub struct CliAccount {
#[serde(flatten)]
pub keyed_account: RpcKeyedAccount,
#[serde(skip_serializing)]
pub use_lamports_unit: bool,
}
impl fmt::Display for CliAccount {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f)?;
writeln_name_value(f, "Public Key:", &self.keyed_account.pubkey)?;
writeln_name_value(
f,
"Balance:",
&build_balance_message(
self.keyed_account.account.lamports,
self.use_lamports_unit,
true,
),
)?;
writeln_name_value(f, "Owner:", &self.keyed_account.account.owner)?;
writeln_name_value(
f,
"Executable:",
&self.keyed_account.account.executable.to_string(),
)?;
writeln_name_value(
f,
"Rent Epoch:",
&self.keyed_account.account.rent_epoch.to_string(),
)?;
Ok(())
}
}
#[derive(Default, Serialize, Deserialize)]
pub struct CliBlockProduction {
pub epoch: Epoch,
pub start_slot: Slot,
pub end_slot: Slot,
pub total_slots: usize,
pub total_blocks_produced: usize,
pub total_slots_skipped: usize,
pub leaders: Vec<CliBlockProductionEntry>,
pub individual_slot_status: Vec<CliSlotStatus>,
#[serde(skip_serializing)]
pub verbose: bool,
}
impl fmt::Display for CliBlockProduction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f)?;
writeln!(
f,
"{}",
style(format!(
" {:<44} {:>15} {:>15} {:>15} {:>23}",
"Identity Pubkey",
"Leader Slots",
"Blocks Produced",
"Skipped Slots",
"Skipped Slot Percentage",
))
.bold()
)?;
for leader in &self.leaders {
writeln!(
f,
" {:<44} {:>15} {:>15} {:>15} {:>22.2}%",
leader.identity_pubkey,
leader.leader_slots,
leader.blocks_produced,
leader.skipped_slots,
leader.skipped_slots as f64 / leader.leader_slots as f64 * 100.
)?;
}
writeln!(f)?;
writeln!(
f,
" {:<44} {:>15} {:>15} {:>15} {:>22.2}%",
format!("Epoch {} total:", self.epoch),
self.total_slots,
self.total_blocks_produced,
self.total_slots_skipped,
self.total_slots_skipped as f64 / self.total_slots as f64 * 100.
)?;
writeln!(
f,
" (using data from {} slots: {} to {})",
self.total_slots, self.start_slot, self.end_slot
)?;
if self.verbose {
writeln!(f)?;
writeln!(f)?;
writeln!(
f,
"{}",
style(format!(" {:<15} {:<44}", "Slot", "Identity Pubkey")).bold(),
)?;
for status in &self.individual_slot_status {
if status.skipped {
writeln!(
f,
"{}",
style(format!(
" {:<15} {:<44} SKIPPED",
status.slot, status.leader
))
.red()
)?;
} else {
writeln!(
f,
"{}",
style(format!(" {:<15} {:<44}", status.slot, status.leader))
)?;
}
}
}
Ok(())
}
}
#[derive(Default, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliBlockProductionEntry {
pub identity_pubkey: String,
pub leader_slots: u64,
pub blocks_produced: u64,
pub skipped_slots: u64,
}
#[derive(Default, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliSlotStatus {
pub slot: Slot,
pub leader: String,
pub skipped: bool,
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliEpochInfo {
#[serde(flatten)]
pub epoch_info: RpcEpochInfo,
}
impl From<RpcEpochInfo> for CliEpochInfo {
fn from(epoch_info: RpcEpochInfo) -> Self {
Self { epoch_info }
}
}
impl fmt::Display for CliEpochInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f)?;
writeln_name_value(f, "Slot:", &self.epoch_info.absolute_slot.to_string())?;
writeln_name_value(f, "Epoch:", &self.epoch_info.epoch.to_string())?;
let start_slot = self.epoch_info.absolute_slot - self.epoch_info.slot_index;
let end_slot = start_slot + self.epoch_info.slots_in_epoch;
writeln_name_value(
f,
"Epoch Slot Range:",
&format!("[{}..{})", start_slot, end_slot),
)?;
writeln_name_value(
f,
"Epoch Completed Percent:",
&format!(
"{:>3.3}%",
self.epoch_info.slot_index as f64 / self.epoch_info.slots_in_epoch as f64 * 100_f64
),
)?;
let remaining_slots_in_epoch = self.epoch_info.slots_in_epoch - self.epoch_info.slot_index;
writeln_name_value(
f,
"Epoch Completed Slots:",
&format!(
"{}/{} ({} remaining)",
self.epoch_info.slot_index,
self.epoch_info.slots_in_epoch,
remaining_slots_in_epoch
),
)?;
writeln_name_value(
f,
"Epoch Completed Time:",
&format!(
"{}/{} ({} remaining)",
slot_to_human_time(self.epoch_info.slot_index),
slot_to_human_time(self.epoch_info.slots_in_epoch),
slot_to_human_time(remaining_slots_in_epoch)
),
)
}
}
fn slot_to_human_time(slot: Slot) -> String {
humantime::format_duration(Duration::from_secs(
slot * clock::DEFAULT_TICKS_PER_SLOT / clock::DEFAULT_TICKS_PER_SECOND,
))
.to_string()
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliValidators {
pub total_active_stake: u64,
pub total_current_stake: u64,
pub total_deliquent_stake: u64,
pub current_validators: Vec<CliValidator>,
pub delinquent_validators: Vec<CliValidator>,
#[serde(skip_serializing)]
pub use_lamports_unit: bool,
}
impl fmt::Display for CliValidators {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn write_vote_account(
f: &mut fmt::Formatter,
validator: &CliValidator,
total_active_stake: u64,
use_lamports_unit: bool,
delinquent: bool,
) -> fmt::Result {
fn non_zero_or_dash(v: u64) -> String {
if v == 0 {
"-".into()
} else {
format!("{}", v)
}
}
writeln!(
f,
"{} {:<44} {:<44} {:>9}% {:>8} {:>10} {:>7} {}",
if delinquent {
WARNING.to_string()
} else {
" ".to_string()
},
validator.identity_pubkey,
validator.vote_account_pubkey,
validator.commission,
non_zero_or_dash(validator.last_vote),
non_zero_or_dash(validator.root_slot),
validator.credits,
if validator.activated_stake > 0 {
format!(
"{} ({:.2}%)",
build_balance_message(validator.activated_stake, use_lamports_unit, true),
100. * validator.activated_stake as f64 / total_active_stake as f64
)
} else {
"-".into()
},
)
}
writeln_name_value(
f,
"Active Stake:",
&build_balance_message(self.total_active_stake, self.use_lamports_unit, true),
)?;
if self.total_deliquent_stake > 0 {
writeln_name_value(
f,
"Current Stake:",
&format!(
"{} ({:0.2}%)",
&build_balance_message(self.total_current_stake, self.use_lamports_unit, true),
100. * self.total_current_stake as f64 / self.total_active_stake as f64
),
)?;
writeln_name_value(
f,
"Delinquent Stake:",
&format!(
"{} ({:0.2}%)",
&build_balance_message(
self.total_deliquent_stake,
self.use_lamports_unit,
true
),
100. * self.total_deliquent_stake as f64 / self.total_active_stake as f64
),
)?;
}
writeln!(f)?;
writeln!(
f,
"{}",
style(format!(
" {:<44} {:<44} {} {} {} {:>7} {}",
"Identity Pubkey",
"Vote Account Pubkey",
"Commission",
"Last Vote",
"Root Block",
"Credits",
"Active Stake",
))
.bold()
)?;
for validator in &self.current_validators {
write_vote_account(
f,
validator,
self.total_active_stake,
self.use_lamports_unit,
false,
)?;
}
for validator in &self.delinquent_validators {
write_vote_account(
f,
validator,
self.total_active_stake,
self.use_lamports_unit,
true,
)?;
}
Ok(())
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliValidator {
pub identity_pubkey: String,
pub vote_account_pubkey: String,
pub commission: u8,
pub last_vote: u64,
pub root_slot: u64,
pub credits: u64,
pub activated_stake: u64,
}
impl CliValidator {
pub fn new(vote_account: &RpcVoteAccountInfo, current_epoch: Epoch) -> Self {
Self {
identity_pubkey: vote_account.node_pubkey.to_string(),
vote_account_pubkey: vote_account.vote_pubkey.to_string(),
commission: vote_account.commission,
last_vote: vote_account.last_vote,
root_slot: vote_account.root_slot,
credits: vote_account
.epoch_credits
.iter()
.find_map(|(epoch, credits, _)| {
if *epoch == current_epoch {
Some(*credits)
} else {
None
}
})
.unwrap_or(0),
activated_stake: vote_account.activated_stake,
}
}
}
#[derive(Default, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliNonceAccount {
pub balance: u64,
pub minimum_balance_for_rent_exemption: u64,
pub nonce: Option<String>,
pub lamports_per_signature: Option<u64>,
pub authority: Option<String>,
#[serde(skip_serializing)]
pub use_lamports_unit: bool,
}
impl fmt::Display for CliNonceAccount {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
f,
"Balance: {}",
build_balance_message(self.balance, self.use_lamports_unit, true)
)?;
writeln!(
f,
"Minimum Balance Required: {}",
build_balance_message(
self.minimum_balance_for_rent_exemption,
self.use_lamports_unit,
true
)
)?;
let nonce = self.nonce.as_deref().unwrap_or("uninitialized");
writeln!(f, "Nonce: {}", nonce)?;
if let Some(fees) = self.lamports_per_signature {
writeln!(f, "Fee: {} lamports per signature", fees)?;
} else {
writeln!(f, "Fees: uninitialized")?;
}
let authority = self.authority.as_deref().unwrap_or("uninitialized");
writeln!(f, "Authority: {}", authority)
}
}
#[derive(Serialize, Deserialize)]
pub struct CliStakeVec(Vec<CliKeyedStakeState>);
impl CliStakeVec {
pub fn new(list: Vec<CliKeyedStakeState>) -> Self {
Self(list)
}
}
impl fmt::Display for CliStakeVec {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for state in &self.0 {
writeln!(f)?;
write!(f, "{}", state)?;
}
Ok(())
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliKeyedStakeState {
pub stake_pubkey: String,
#[serde(flatten)]
pub stake_state: CliStakeState,
}
impl fmt::Display for CliKeyedStakeState {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "Stake Pubkey: {}", self.stake_pubkey)?;
write!(f, "{}", self.stake_state)
}
}
#[derive(Default, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliStakeState {
pub stake_type: CliStakeType,
pub total_stake: u64,
#[serde(skip_serializing_if = "Option::is_none")]
pub delegated_stake: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub delegated_vote_account_address: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub activation_epoch: Option<Epoch>,
#[serde(skip_serializing_if = "Option::is_none")]
pub deactivation_epoch: Option<Epoch>,
#[serde(flatten, skip_serializing_if = "Option::is_none")]
pub authorized: Option<CliAuthorized>,
#[serde(flatten, skip_serializing_if = "Option::is_none")]
pub lockup: Option<CliLockup>,
#[serde(skip_serializing)]
pub use_lamports_unit: bool,
}
impl fmt::Display for CliStakeState {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn show_authorized(f: &mut fmt::Formatter, authorized: &CliAuthorized) -> fmt::Result {
writeln!(f, "Stake Authority: {}", authorized.staker)?;
writeln!(f, "Withdraw Authority: {}", authorized.withdrawer)?;
Ok(())
}
fn show_lockup(f: &mut fmt::Formatter, lockup: &CliLockup) -> fmt::Result {
writeln!(
f,
"Lockup Timestamp: {} (UnixTimestamp: {})",
DateTime::<Utc>::from_utc(
NaiveDateTime::from_timestamp(lockup.unix_timestamp, 0),
Utc
)
.to_rfc3339_opts(SecondsFormat::Secs, true),
lockup.unix_timestamp
)?;
writeln!(f, "Lockup Epoch: {}", lockup.epoch)?;
writeln!(f, "Lockup Custodian: {}", lockup.custodian)?;
Ok(())
}
match self.stake_type {
CliStakeType::RewardsPool => writeln!(f, "Stake account is a rewards pool")?,
CliStakeType::Uninitialized => writeln!(f, "Stake account is uninitialized")?,
CliStakeType::Initialized => {
writeln!(
f,
"Total Stake: {}",
build_balance_message(self.total_stake, self.use_lamports_unit, true)
)?;
writeln!(f, "Stake account is undelegated")?;
show_authorized(f, self.authorized.as_ref().unwrap())?;
show_lockup(f, self.lockup.as_ref().unwrap())?;
}
CliStakeType::Stake => {
writeln!(
f,
"Total Stake: {}",
build_balance_message(self.total_stake, self.use_lamports_unit, true)
)?;
writeln!(
f,
"Delegated Stake: {}",
build_balance_message(
self.delegated_stake.unwrap(),
self.use_lamports_unit,
true
)
)?;
if let Some(delegated_vote_account_address) = &self.delegated_vote_account_address {
writeln!(
f,
"Delegated Vote Account Address: {}",
delegated_vote_account_address
)?;
}
writeln!(
f,
"Stake activates starting from epoch: {}",
self.activation_epoch.unwrap()
)?;
if let Some(deactivation_epoch) = self.deactivation_epoch {
writeln!(
f,
"Stake deactivates starting from epoch: {}",
deactivation_epoch
)?;
}
show_authorized(f, self.authorized.as_ref().unwrap())?;
show_lockup(f, self.lockup.as_ref().unwrap())?;
}
}
Ok(())
}
}
#[derive(Serialize, Deserialize)]
pub enum CliStakeType {
Stake,
RewardsPool,
Uninitialized,
Initialized,
}
impl Default for CliStakeType {
fn default() -> Self {
Self::Uninitialized
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliStakeHistory {
pub entries: Vec<CliStakeHistoryEntry>,
#[serde(skip_serializing)]
pub use_lamports_unit: bool,
}
impl fmt::Display for CliStakeHistory {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f)?;
writeln!(
f,
"{}",
style(format!(
" {:<5} {:>20} {:>20} {:>20}",
"Epoch", "Effective Stake", "Activating Stake", "Deactivating Stake",
))
.bold()
)?;
for entry in &self.entries {
writeln!(
f,
" {:>5} {:>20} {:>20} {:>20} {}",
entry.epoch,
build_balance_message(entry.effective_stake, self.use_lamports_unit, false),
build_balance_message(entry.activating_stake, self.use_lamports_unit, false),
build_balance_message(entry.deactivating_stake, self.use_lamports_unit, false),
if self.use_lamports_unit {
"lamports"
} else {
"SOL"
}
)?;
}
Ok(())
}
}
impl From<&(Epoch, StakeHistoryEntry)> for CliStakeHistoryEntry {
fn from((epoch, entry): &(Epoch, StakeHistoryEntry)) -> Self {
Self {
epoch: *epoch,
effective_stake: entry.effective,
activating_stake: entry.activating,
deactivating_stake: entry.deactivating,
}
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliStakeHistoryEntry {
pub epoch: Epoch,
pub effective_stake: u64,
pub activating_stake: u64,
pub deactivating_stake: u64,
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliAuthorized {
pub staker: String,
pub withdrawer: String,
}
impl From<&Authorized> for CliAuthorized {
fn from(authorized: &Authorized) -> Self {
Self {
staker: authorized.staker.to_string(),
withdrawer: authorized.withdrawer.to_string(),
}
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliLockup {
pub unix_timestamp: UnixTimestamp,
pub epoch: Epoch,
pub custodian: String,
}
impl From<&Lockup> for CliLockup {
fn from(lockup: &Lockup) -> Self {
Self {
unix_timestamp: lockup.unix_timestamp,
epoch: lockup.epoch,
custodian: lockup.custodian.to_string(),
}
}
}
#[derive(Serialize, Deserialize)]
pub struct CliValidatorInfoVec(Vec<CliValidatorInfo>);
impl CliValidatorInfoVec {
pub fn new(list: Vec<CliValidatorInfo>) -> Self {
Self(list)
}
}
impl fmt::Display for CliValidatorInfoVec {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.0.is_empty() {
writeln!(f, "No validator info accounts found")?;
}
for validator_info in &self.0 {
writeln!(f)?;
write!(f, "{}", validator_info)?;
}
Ok(())
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliValidatorInfo {
pub identity_pubkey: String,
pub info_pubkey: String,
pub info: Map<String, Value>,
}
impl fmt::Display for CliValidatorInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln_name_value(f, "Validator Identity Pubkey:", &self.identity_pubkey)?;
writeln_name_value(f, " Info Pubkey:", &self.info_pubkey)?;
for (key, value) in self.info.iter() {
writeln_name_value(
f,
&format!(" {}:", to_title_case(key)),
&value.as_str().unwrap_or("?"),
)?;
}
Ok(())
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliVoteAccount {
pub account_balance: u64,
pub validator_identity: String,
#[serde(flatten)]
pub authorized_voters: CliAuthorizedVoters,
pub authorized_withdrawer: String,
pub credits: u64,
pub commission: u8,
pub root_slot: Option<Slot>,
pub recent_timestamp: BlockTimestamp,
pub votes: Vec<CliLockout>,
pub epoch_voting_history: Vec<CliEpochVotingHistory>,
#[serde(skip_serializing)]
pub use_lamports_unit: bool,
}
impl fmt::Display for CliVoteAccount {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
f,
"Account Balance: {}",
build_balance_message(self.account_balance, self.use_lamports_unit, true)
)?;
writeln!(f, "Validator Identity: {}", self.validator_identity)?;
writeln!(f, "Authorized Voters: {}", self.authorized_voters)?;
writeln!(f, "Authorized Withdrawer: {}", self.authorized_withdrawer)?;
writeln!(f, "Credits: {}", self.credits)?;
writeln!(f, "Commission: {}%", self.commission)?;
writeln!(
f,
"Root Slot: {}",
match self.root_slot {
Some(slot) => slot.to_string(),
None => "~".to_string(),
}
)?;
writeln!(f, "Recent Timestamp: {:?}", self.recent_timestamp)?;
if !self.votes.is_empty() {
writeln!(f, "Recent Votes:")?;
for vote in &self.votes {
writeln!(
f,
"- slot: {}\n confirmation count: {}",
vote.slot, vote.confirmation_count
)?;
}
writeln!(f, "Epoch Voting History:")?;
for epoch_info in &self.epoch_voting_history {
writeln!(
f,
"- epoch: {}\n slots in epoch: {}\n credits earned: {}",
epoch_info.epoch, epoch_info.slots_in_epoch, epoch_info.credits_earned,
)?;
}
}
Ok(())
}
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliAuthorizedVoters {
authorized_voters: BTreeMap<Epoch, String>,
}
impl fmt::Display for CliAuthorizedVoters {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self.authorized_voters)
}
}
impl From<&AuthorizedVoters> for CliAuthorizedVoters {
fn from(authorized_voters: &AuthorizedVoters) -> Self {
let mut voter_map: BTreeMap<Epoch, String> = BTreeMap::new();
for (epoch, voter) in authorized_voters.iter() {
voter_map.insert(*epoch, voter.to_string());
}
Self {
authorized_voters: voter_map,
}
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliEpochVotingHistory {
pub epoch: Epoch,
pub slots_in_epoch: u64,
pub credits_earned: u64,
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliLockout {
pub slot: Slot,
pub confirmation_count: u32,
}
impl From<&Lockout> for CliLockout {
fn from(lockout: &Lockout) -> Self {
Self {
slot: lockout.slot,
confirmation_count: lockout.confirmation_count,
}
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliBlockTime {
pub slot: Slot,
pub timestamp: UnixTimestamp,
}
impl fmt::Display for CliBlockTime {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln_name_value(f, "Block:", &self.slot.to_string())?;
writeln_name_value(
f,
"Date:",
&format!(
"{} (UnixTimestamp: {})",
DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(self.timestamp, 0), Utc)
.to_rfc3339_opts(SecondsFormat::Secs, true),
self.timestamp
),
)
}
}
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
pub struct CliSignOnlyData {
pub blockhash: String,
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub signers: Vec<String>,
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub absent: Vec<String>,
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub bad_sig: Vec<String>,
}
impl fmt::Display for CliSignOnlyData {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f)?;
writeln_name_value(f, "Blockhash:", &self.blockhash)?;
if !self.signers.is_empty() {
writeln!(f, "{}", style("Signers (Pubkey=Signature):").bold())?;
for signer in self.signers.iter() {
writeln!(f, " {}", signer)?;
}
}
if !self.absent.is_empty() {
writeln!(f, "{}", style("Absent Signers (Pubkey):").bold())?;
for pubkey in self.absent.iter() {
writeln!(f, " {}", pubkey)?;
}
}
if !self.bad_sig.is_empty() {
writeln!(f, "{}", style("Bad Signatures (Pubkey):").bold())?;
for pubkey in self.bad_sig.iter() {
writeln!(f, " {}", pubkey)?;
}
}
Ok(())
}
}
#[derive(Serialize, Deserialize)]
pub struct CliSignature {
pub signature: String,
}
impl fmt::Display for CliSignature {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f)?;
writeln_name_value(f, "Signature:", &self.signature)?;
Ok(())
}
}
#[derive(Serialize, Deserialize)]
pub struct CliAccountBalances {
pub accounts: Vec<RpcAccountBalance>,
}
impl fmt::Display for CliAccountBalances {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
f,
"{}",
style(format!("{:<44} {}", "Address", "Balance",)).bold()
)?;
for account in &self.accounts {
writeln!(
f,
"{:<44} {}",
account.address,
&format!("{} SOL", lamports_to_sol(account.lamports))
)?;
}
Ok(())
}
}
#[derive(Serialize, Deserialize)]
pub struct CliSupply {
pub total: u64,
pub circulating: u64,
pub non_circulating: u64,
pub non_circulating_accounts: Vec<String>,
#[serde(skip_serializing)]
pub print_accounts: bool,
}
impl From<RpcSupply> for CliSupply {
fn from(rpc_supply: RpcSupply) -> Self {
Self {
total: rpc_supply.total,
circulating: rpc_supply.circulating,
non_circulating: rpc_supply.non_circulating,
non_circulating_accounts: rpc_supply.non_circulating_accounts,
print_accounts: false,
}
}
}
impl fmt::Display for CliSupply {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln_name_value(f, "Total:", &format!("{} SOL", lamports_to_sol(self.total)))?;
writeln_name_value(
f,
"Circulating:",
&format!("{} SOL", lamports_to_sol(self.circulating)),
)?;
writeln_name_value(
f,
"Non-Circulating:",
&format!("{} SOL", lamports_to_sol(self.non_circulating)),
)?;
if self.print_accounts {
writeln!(f)?;
writeln_name_value(f, "Non-Circulating Accounts:", " ")?;
for account in &self.non_circulating_accounts {
writeln!(f, " {}", account)?;
}
}
Ok(())
}
}

View File

@@ -1,9 +1,11 @@
use crate::{
cli::{check_account_for_fee, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult},
cli_output::*,
cli::{
build_balance_message, check_account_for_fee, CliCommand, CliCommandInfo, CliConfig,
CliError, ProcessResult,
},
display::println_name_value,
};
use clap::{value_t, value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand};
use clap::{value_t, value_t_or_exit, App, Arg, ArgMatches, SubCommand};
use console::{style, Emoji};
use indicatif::{ProgressBar, ProgressStyle};
use solana_clap_utils::{
@@ -15,22 +17,19 @@ use solana_clap_utils::{
use solana_client::{
pubsub_client::{PubsubClient, SlotInfoMessage},
rpc_client::RpcClient,
rpc_config::{RpcLargestAccountsConfig, RpcLargestAccountsFilter},
rpc_request::MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE,
rpc_response::RpcVoteAccountInfo,
};
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_sdk::{
account_utils::StateMut,
clock::{self, Clock, Slot},
clock::{self, Slot},
commitment_config::CommitmentConfig,
epoch_schedule::Epoch,
hash::Hash,
message::Message,
native_token::lamports_to_sol,
pubkey::Pubkey,
signature::{Keypair, Signer},
system_instruction,
sysvar::{self, Sysvar},
transaction::Transaction,
};
use std::{
@@ -46,6 +45,7 @@ use std::{
static CHECK_MARK: Emoji = Emoji("", "");
static CROSS_MARK: Emoji = Emoji("", "");
static WARNING: Emoji = Emoji("⚠️", "!");
pub trait ClusterQuerySubCommands {
fn cluster_query_subcommands(self) -> Self;
@@ -72,18 +72,7 @@ impl ClusterQuerySubCommands for App<'_, '_> {
.takes_value(true)
.validator(is_url)
.help("JSON RPC URL for validator, which is useful for validators with a private RPC service")
)
.arg(
Arg::with_name("follow")
.long("follow")
.takes_value(false)
.help("Continue reporting progress even after the validator has caught up"),
)
.arg(commitment_arg()),
)
.subcommand(
SubCommand::with_name("cluster-date")
.about("Get current cluster date, computed from genesis creation time and network time")
),
)
.subcommand(
SubCommand::with_name("cluster-version")
@@ -98,6 +87,7 @@ impl ClusterQuerySubCommands for App<'_, '_> {
.index(1)
.takes_value(true)
.value_name("SLOT")
.required(true)
.help("Slot number of the block to query")
)
)
@@ -122,38 +112,6 @@ impl ClusterQuerySubCommands for App<'_, '_> {
SubCommand::with_name("epoch").about("Get current epoch")
.arg(commitment_arg()),
)
.subcommand(
SubCommand::with_name("largest-accounts").about("Get addresses of largest cluster accounts")
.arg(
Arg::with_name("circulating")
.long("circulating")
.takes_value(false)
.help("Filter address list to only circulating accounts")
)
.arg(
Arg::with_name("non_circulating")
.long("non-circulating")
.takes_value(false)
.conflicts_with("circulating")
.help("Filter address list to only non-circulating accounts")
)
.arg(commitment_arg()),
)
.subcommand(
SubCommand::with_name("supply").about("Get information about the cluster supply of SOL")
.arg(
Arg::with_name("print_accounts")
.long("print-accounts")
.takes_value(false)
.help("Print list of non-circualting account addresses")
)
.arg(commitment_arg()),
)
.subcommand(
SubCommand::with_name("total-supply").about("Get total number of SOL")
.setting(AppSettings::Hidden)
.arg(commitment_arg()),
)
.subcommand(
SubCommand::with_name("transaction-count").about("Get current transaction count")
.alias("get-transaction-count")
@@ -256,57 +214,19 @@ impl ClusterQuerySubCommands for App<'_, '_> {
)
.arg(commitment_arg()),
)
.subcommand(
SubCommand::with_name("transaction-history")
.about("Show historical transactions affecting the given address, \
ordered based on the slot in which they were confirmed in \
from lowest to highest slot")
.arg(
Arg::with_name("address")
.index(1)
.value_name("ADDRESS")
.required(true)
.validator(is_valid_pubkey)
.help("Account address"),
)
.arg(
Arg::with_name("end_slot")
.takes_value(false)
.value_name("SLOT")
.index(2)
.validator(is_slot)
.help(
"Slot to start from [default: latest slot at maximum commitment]"
),
)
.arg(
Arg::with_name("limit")
.long("limit")
.takes_value(true)
.value_name("NUMBER OF SLOTS")
.validator(is_slot)
.help(
"Limit the search to this many slots"
),
),
)
}
}
pub fn parse_catchup(
matches: &ArgMatches<'_>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let node_pubkey = pubkey_of_signer(matches, "node_pubkey", wallet_manager)?.unwrap();
let node_json_rpc_url = value_t!(matches, "node_json_rpc_url", String).ok();
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
let follow = matches.is_present("follow");
Ok(CliCommandInfo {
command: CliCommand::Catchup {
node_pubkey,
node_json_rpc_url,
commitment_config,
follow,
},
signers: vec![],
})
@@ -315,7 +235,7 @@ pub fn parse_catchup(
pub fn parse_cluster_ping(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let lamports = value_t_or_exit!(matches, "lamports", u64);
let interval = Duration::from_secs(value_t_or_exit!(matches, "interval", u64));
@@ -344,7 +264,7 @@ pub fn parse_cluster_ping(
}
pub fn parse_get_block_time(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let slot = value_of(matches, "slot");
let slot = value_t_or_exit!(matches, "slot", u64);
Ok(CliCommandInfo {
command: CliCommand::GetBlockTime { slot },
signers: vec![],
@@ -375,44 +295,6 @@ pub fn parse_get_epoch(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliEr
})
}
pub fn parse_largest_accounts(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
let filter = if matches.is_present("circulating") {
Some(RpcLargestAccountsFilter::Circulating)
} else if matches.is_present("non_circulating") {
Some(RpcLargestAccountsFilter::NonCirculating)
} else {
None
};
Ok(CliCommandInfo {
command: CliCommand::LargestAccounts {
commitment_config,
filter,
},
signers: vec![],
})
}
pub fn parse_supply(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
let print_accounts = matches.is_present("print_accounts");
Ok(CliCommandInfo {
command: CliCommand::Supply {
commitment_config,
print_accounts,
},
signers: vec![],
})
}
pub fn parse_total_supply(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
Ok(CliCommandInfo {
command: CliCommand::TotalSupply { commitment_config },
signers: vec![],
})
}
pub fn parse_get_transaction_count(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
Ok(CliCommandInfo {
@@ -423,7 +305,7 @@ pub fn parse_get_transaction_count(matches: &ArgMatches<'_>) -> Result<CliComman
pub fn parse_show_stakes(
matches: &ArgMatches<'_>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let use_lamports_unit = matches.is_present("lamports");
let vote_account_pubkeys =
@@ -451,25 +333,6 @@ pub fn parse_show_validators(matches: &ArgMatches<'_>) -> Result<CliCommandInfo,
})
}
pub fn parse_transaction_history(
matches: &ArgMatches<'_>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let address = pubkey_of_signer(matches, "address", wallet_manager)?.unwrap();
let end_slot = value_t!(matches, "end_slot", Slot).ok();
let slot_limit = value_t!(matches, "limit", u64)
.unwrap_or(MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE);
Ok(CliCommandInfo {
command: CliCommand::TransactionHistory {
address,
end_slot,
slot_limit,
},
signers: vec![],
})
}
/// Creates a new process bar for processing that will take an unknown amount of time
fn new_spinner_progress_bar() -> ProgressBar {
let progress_bar = ProgressBar::new(42);
@@ -483,37 +346,20 @@ pub fn process_catchup(
rpc_client: &RpcClient,
node_pubkey: &Pubkey,
node_json_rpc_url: &Option<String>,
commitment_config: CommitmentConfig,
follow: bool,
) -> ProcessResult {
let sleep_interval = 5;
let progress_bar = new_spinner_progress_bar();
progress_bar.set_message("Connecting...");
let cluster_nodes = rpc_client.get_cluster_nodes()?;
let node_client = if let Some(node_json_rpc_url) = node_json_rpc_url {
RpcClient::new(node_json_rpc_url.to_string())
} else {
let rpc_addr = loop {
let cluster_nodes = rpc_client.get_cluster_nodes()?;
if let Some(contact_info) = cluster_nodes
RpcClient::new_socket(
cluster_nodes
.iter()
.find(|contact_info| contact_info.pubkey == node_pubkey.to_string())
{
if let Some(rpc_addr) = contact_info.rpc {
break rpc_addr;
}
progress_bar.set_message(&format!("RPC service not found for {}", node_pubkey));
} else {
progress_bar.set_message(&format!(
"Contact information not found for {}",
node_pubkey
));
}
sleep(Duration::from_secs(sleep_interval as u64));
};
RpcClient::new_socket(rpc_addr)
.ok_or_else(|| format!("Contact information not found for {}", node_pubkey))?
.rpc
.ok_or_else(|| format!("RPC service not found for {}", node_pubkey))?,
)
};
let reported_node_pubkey = node_client.get_identity()?;
@@ -529,12 +375,16 @@ pub fn process_catchup(
return Err("Both RPC URLs reference the same node, unable to monitor for catchup. Try a different --url".into());
}
let progress_bar = new_spinner_progress_bar();
progress_bar.set_message("Connecting...");
let mut previous_rpc_slot = std::u64::MAX;
let mut previous_slot_distance = 0;
let sleep_interval = 5;
loop {
let rpc_slot = rpc_client.get_slot_with_commitment(commitment_config)?;
let node_slot = node_client.get_slot_with_commitment(commitment_config)?;
if !follow && node_slot > std::cmp::min(previous_rpc_slot, rpc_slot) {
let rpc_slot = rpc_client.get_slot_with_commitment(CommitmentConfig::recent())?;
let node_slot = node_client.get_slot_with_commitment(CommitmentConfig::recent())?;
if node_slot > std::cmp::min(previous_rpc_slot, rpc_slot) {
progress_bar.finish_and_clear();
return Ok(format!(
"{} has caught up (us:{} them:{})",
@@ -560,7 +410,7 @@ pub fn process_catchup(
slot_distance,
node_slot,
rpc_slot,
if slot_distance == 0 || previous_rpc_slot == std::u64::MAX {
if previous_rpc_slot == std::u64::MAX {
"".to_string()
} else {
format!(
@@ -582,23 +432,6 @@ pub fn process_catchup(
}
}
pub fn process_cluster_date(rpc_client: &RpcClient, config: &CliConfig) -> ProcessResult {
let result = rpc_client
.get_account_with_commitment(&sysvar::clock::id(), CommitmentConfig::default())?;
if let Some(clock_account) = result.value {
let clock: Clock = Sysvar::from_account(&clock_account).ok_or_else(|| {
CliError::RpcRequestError("Failed to deserialize clock sysvar".to_string())
})?;
let block_time = CliBlockTime {
slot: result.context.slot,
timestamp: clock.unix_timestamp,
};
Ok(config.output_format.formatted_string(&block_time))
} else {
Err(format!("AccountNotFound: pubkey={}", sysvar::clock::id()).into())
}
}
pub fn process_cluster_version(rpc_client: &RpcClient) -> ProcessResult {
let remote_version = rpc_client.get_version()?;
Ok(remote_version.solana_core)
@@ -648,30 +481,57 @@ pub fn process_leader_schedule(rpc_client: &RpcClient) -> ProcessResult {
Ok("".to_string())
}
pub fn process_get_block_time(
rpc_client: &RpcClient,
config: &CliConfig,
slot: Option<Slot>,
) -> ProcessResult {
let slot = if let Some(slot) = slot {
slot
} else {
rpc_client.get_slot()?
};
pub fn process_get_block_time(rpc_client: &RpcClient, slot: Slot) -> ProcessResult {
let timestamp = rpc_client.get_block_time(slot)?;
let block_time = CliBlockTime { slot, timestamp };
Ok(config.output_format.formatted_string(&block_time))
Ok(timestamp.to_string())
}
fn slot_to_human_time(slot: Slot) -> String {
humantime::format_duration(Duration::from_secs(
slot * clock::DEFAULT_TICKS_PER_SLOT / clock::DEFAULT_TICKS_PER_SECOND,
))
.to_string()
}
pub fn process_get_epoch_info(
rpc_client: &RpcClient,
config: &CliConfig,
commitment_config: CommitmentConfig,
) -> ProcessResult {
let epoch_info: CliEpochInfo = rpc_client
.get_epoch_info_with_commitment(commitment_config.clone())?
.into();
Ok(config.output_format.formatted_string(&epoch_info))
let epoch_info = rpc_client.get_epoch_info_with_commitment(commitment_config.clone())?;
println!();
println_name_value("Slot:", &epoch_info.absolute_slot.to_string());
println_name_value("Epoch:", &epoch_info.epoch.to_string());
let start_slot = epoch_info.absolute_slot - epoch_info.slot_index;
let end_slot = start_slot + epoch_info.slots_in_epoch;
println_name_value(
"Epoch Slot Range:",
&format!("[{}..{})", start_slot, end_slot),
);
println_name_value(
"Epoch Completed Percent:",
&format!(
"{:>3.3}%",
epoch_info.slot_index as f64 / epoch_info.slots_in_epoch as f64 * 100_f64
),
);
let remaining_slots_in_epoch = epoch_info.slots_in_epoch - epoch_info.slot_index;
println_name_value(
"Epoch Completed Slots:",
&format!(
"{}/{} ({} remaining)",
epoch_info.slot_index, epoch_info.slots_in_epoch, remaining_slots_in_epoch
),
);
println_name_value(
"Epoch Completed Time:",
&format!(
"{}/{} ({} remaining)",
slot_to_human_time(epoch_info.slot_index),
slot_to_human_time(epoch_info.slots_in_epoch),
slot_to_human_time(remaining_slots_in_epoch)
),
);
Ok("".to_string())
}
pub fn process_get_genesis_hash(rpc_client: &RpcClient) -> ProcessResult {
@@ -712,7 +572,7 @@ pub fn process_show_block_production(
slot_limit: Option<u64>,
) -> ProcessResult {
let epoch_schedule = rpc_client.get_epoch_schedule()?;
let epoch_info = rpc_client.get_epoch_info_with_commitment(CommitmentConfig::root())?;
let epoch_info = rpc_client.get_epoch_info_with_commitment(CommitmentConfig::max())?;
let epoch = epoch.unwrap_or(epoch_info.epoch);
if epoch > epoch_info.epoch {
@@ -763,15 +623,15 @@ pub fn process_show_block_production(
let start_slot_index = (start_slot - first_slot_in_epoch) as usize;
let end_slot_index = (end_slot - first_slot_in_epoch) as usize;
let total_slots = end_slot_index - start_slot_index + 1;
let total_blocks_produced = confirmed_blocks.len();
assert!(total_blocks_produced <= total_slots);
let total_slots_skipped = total_slots - total_blocks_produced;
let total_blocks = confirmed_blocks.len();
assert!(total_blocks <= total_slots);
let total_slots_skipped = total_slots - total_blocks;
let mut leader_slot_count = HashMap::new();
let mut leader_skipped_slots = HashMap::new();
progress_bar.set_message(&format!("Fetching leader schedule for epoch {}...", epoch));
let leader_schedule = rpc_client
.get_leader_schedule_with_commitment(Some(start_slot), CommitmentConfig::root())?;
.get_leader_schedule_with_commitment(Some(start_slot), CommitmentConfig::max())?;
if leader_schedule.is_none() {
return Err(format!("Unable to fetch leader schedule for slot {}", start_slot).into());
}
@@ -789,7 +649,7 @@ pub fn process_show_block_production(
progress_bar.set_message(&format!(
"Processing {} slots containing {} blocks and {} empty slots...",
total_slots, total_blocks_produced, total_slots_skipped
total_slots, total_blocks, total_slots_skipped
));
let mut confirmed_blocks_index = 0;
@@ -808,88 +668,72 @@ pub fn process_show_block_production(
continue;
}
if slot_of_next_confirmed_block == slot {
individual_slot_status.push(CliSlotStatus {
slot,
leader: (*leader).to_string(),
skipped: false,
});
individual_slot_status
.push(style(format!(" {:<15} {:<44}", slot, leader)).to_string());
break;
}
}
*skipped_slots += 1;
individual_slot_status.push(CliSlotStatus {
slot,
leader: (*leader).to_string(),
skipped: true,
});
individual_slot_status.push(
style(format!(" {:<15} {:<44} SKIPPED", slot, leader))
.red()
.to_string(),
);
break;
}
}
progress_bar.finish_and_clear();
println!(
"\n{}",
style(format!(
" {:<44} {:>15} {:>15} {:>15} {:>23}",
"Identity Pubkey",
"Leader Slots",
"Blocks Produced",
"Skipped Slots",
"Skipped Slot Percentage",
))
.bold()
);
let mut leaders: Vec<CliBlockProductionEntry> = leader_slot_count
.iter()
.map(|(leader, leader_slots)| {
let skipped_slots = leader_skipped_slots.get(leader).unwrap();
let blocks_produced = leader_slots - skipped_slots;
CliBlockProductionEntry {
identity_pubkey: (**leader).to_string(),
leader_slots: *leader_slots,
blocks_produced,
skipped_slots: *skipped_slots,
}
})
.collect();
leaders.sort_by(|a, b| a.identity_pubkey.partial_cmp(&b.identity_pubkey).unwrap());
let block_production = CliBlockProduction {
epoch,
start_slot,
end_slot,
let mut table = vec![];
for (leader, leader_slots) in leader_slot_count.iter() {
let skipped_slots = leader_skipped_slots.get(leader).unwrap();
let blocks_produced = leader_slots - skipped_slots;
table.push(format!(
" {:<44} {:>15} {:>15} {:>15} {:>22.2}%",
leader,
leader_slots,
blocks_produced,
skipped_slots,
*skipped_slots as f64 / *leader_slots as f64 * 100.
));
}
table.sort();
println!(
"{}\n\n {:<44} {:>15} {:>15} {:>15} {:>22.2}%",
table.join("\n"),
format!("Epoch {} total:", epoch),
total_slots,
total_blocks_produced,
total_blocks,
total_slots_skipped,
leaders,
individual_slot_status,
verbose: config.verbose,
};
Ok(config.output_format.formatted_string(&block_production))
}
total_slots_skipped as f64 / total_slots as f64 * 100.
);
println!(
" (using data from {} slots: {} to {})",
total_slots, start_slot, end_slot
);
pub fn process_largest_accounts(
rpc_client: &RpcClient,
config: &CliConfig,
commitment_config: CommitmentConfig,
filter: Option<RpcLargestAccountsFilter>,
) -> ProcessResult {
let accounts = rpc_client
.get_largest_accounts_with_config(RpcLargestAccountsConfig {
commitment: Some(commitment_config),
filter,
})?
.value;
let largest_accounts = CliAccountBalances { accounts };
Ok(config.output_format.formatted_string(&largest_accounts))
}
pub fn process_supply(
rpc_client: &RpcClient,
config: &CliConfig,
commitment_config: CommitmentConfig,
print_accounts: bool,
) -> ProcessResult {
let supply_response = rpc_client.supply_with_commitment(commitment_config.clone())?;
let mut supply: CliSupply = supply_response.value.into();
supply.print_accounts = print_accounts;
Ok(config.output_format.formatted_string(&supply))
}
pub fn process_total_supply(
rpc_client: &RpcClient,
commitment_config: CommitmentConfig,
) -> ProcessResult {
let total_supply = rpc_client.total_supply_with_commitment(commitment_config.clone())?;
Ok(format!("{} SOL", lamports_to_sol(total_supply)))
if config.verbose {
println!(
"\n\n{}\n{}",
style(format!(" {:<15} {:<44}", "Slot", "Identity Pubkey")).bold(),
individual_slot_status.join("\n")
);
}
Ok("".to_string())
}
pub fn process_get_transaction_count(
@@ -1162,11 +1006,10 @@ pub fn process_show_gossip(rpc_client: &RpcClient) -> ProcessResult {
pub fn process_show_stakes(
rpc_client: &RpcClient,
config: &CliConfig,
use_lamports_unit: bool,
vote_account_pubkeys: Option<&[Pubkey]>,
) -> ProcessResult {
use crate::stake::build_stake_state;
use crate::stake::print_stake_state;
use solana_stake_program::stake_state::StakeState;
let progress_bar = new_spinner_progress_bar();
@@ -1174,20 +1017,13 @@ pub fn process_show_stakes(
let all_stake_accounts = rpc_client.get_program_accounts(&solana_stake_program::id())?;
progress_bar.finish_and_clear();
let mut stake_accounts: Vec<CliKeyedStakeState> = vec![];
for (stake_pubkey, stake_account) in all_stake_accounts {
if let Ok(stake_state) = stake_account.state() {
match stake_state {
StakeState::Initialized(_) => {
if vote_account_pubkeys.is_none() {
stake_accounts.push(CliKeyedStakeState {
stake_pubkey: stake_pubkey.to_string(),
stake_state: build_stake_state(
stake_account.lamports,
&stake_state,
use_lamports_unit,
),
});
println!("\nstake pubkey: {}", stake_pubkey);
print_stake_state(stake_account.lamports, &stake_state, use_lamports_unit);
}
}
StakeState::Stake(_, stake) => {
@@ -1196,28 +1032,19 @@ pub fn process_show_stakes(
.unwrap()
.contains(&stake.delegation.voter_pubkey)
{
stake_accounts.push(CliKeyedStakeState {
stake_pubkey: stake_pubkey.to_string(),
stake_state: build_stake_state(
stake_account.lamports,
&stake_state,
use_lamports_unit,
),
});
println!("\nstake pubkey: {}", stake_pubkey);
print_stake_state(stake_account.lamports, &stake_state, use_lamports_unit);
}
}
_ => {}
}
}
}
Ok(config
.output_format
.formatted_string(&CliStakeVec::new(stake_accounts)))
Ok("".to_string())
}
pub fn process_show_validators(
rpc_client: &RpcClient,
config: &CliConfig,
use_lamports_unit: bool,
commitment_config: CommitmentConfig,
) -> ProcessResult {
@@ -1227,63 +1054,123 @@ pub fn process_show_validators(
.current
.iter()
.chain(vote_accounts.delinquent.iter())
.fold(0, |acc, vote_account| acc + vote_account.activated_stake);
.fold(0, |acc, vote_account| acc + vote_account.activated_stake)
as f64;
let total_deliquent_stake = vote_accounts
.delinquent
.iter()
.fold(0, |acc, vote_account| acc + vote_account.activated_stake);
.fold(0, |acc, vote_account| acc + vote_account.activated_stake)
as f64;
let total_current_stake = total_active_stake - total_deliquent_stake;
let mut current = vote_accounts.current;
current.sort_by(|a, b| b.activated_stake.cmp(&a.activated_stake));
let current_validators: Vec<CliValidator> = current
.iter()
.map(|vote_account| CliValidator::new(vote_account, epoch_info.epoch))
.collect();
let mut delinquent = vote_accounts.delinquent;
delinquent.sort_by(|a, b| b.activated_stake.cmp(&a.activated_stake));
let delinquent_validators: Vec<CliValidator> = delinquent
.iter()
.map(|vote_account| CliValidator::new(vote_account, epoch_info.epoch))
.collect();
let cli_validators = CliValidators {
total_active_stake,
total_current_stake,
total_deliquent_stake,
current_validators,
delinquent_validators,
use_lamports_unit,
};
Ok(config.output_format.formatted_string(&cli_validators))
}
pub fn process_transaction_history(
rpc_client: &RpcClient,
address: &Pubkey,
end_slot: Option<Slot>, // None == use latest slot
slot_limit: u64,
) -> ProcessResult {
let end_slot = {
if let Some(end_slot) = end_slot {
end_slot
} else {
rpc_client.get_slot_with_commitment(CommitmentConfig::max())?
}
};
let start_slot = end_slot.saturating_sub(slot_limit);
println_name_value(
"Active Stake:",
&build_balance_message(total_active_stake as u64, use_lamports_unit, true),
);
if total_deliquent_stake > 0. {
println_name_value(
"Current Stake:",
&format!(
"{} ({:0.2}%)",
&build_balance_message(total_current_stake as u64, use_lamports_unit, true),
100. * total_current_stake / total_active_stake
),
);
println_name_value(
"Delinquent Stake:",
&format!(
"{} ({:0.2}%)",
&build_balance_message(total_deliquent_stake as u64, use_lamports_unit, true),
100. * total_deliquent_stake / total_active_stake
),
);
}
println!();
println!(
"Transactions affecting {} within slots [{},{}]",
address, start_slot, end_slot
"{}",
style(format!(
" {:<44} {:<44} {} {} {} {:>7} {}",
"Identity Pubkey",
"Vote Account Pubkey",
"Commission",
"Last Vote",
"Root Block",
"Credits",
"Active Stake",
))
.bold()
);
let signatures =
rpc_client.get_confirmed_signatures_for_address(address, start_slot, end_slot)?;
for signature in &signatures {
println!("{}", signature);
fn print_vote_account(
vote_account: RpcVoteAccountInfo,
current_epoch: Epoch,
total_active_stake: f64,
use_lamports_unit: bool,
delinquent: bool,
) {
fn non_zero_or_dash(v: u64) -> String {
if v == 0 {
"-".into()
} else {
format!("{}", v)
}
}
println!(
"{} {:<44} {:<44} {:>9}% {:>8} {:>10} {:>7} {}",
if delinquent {
WARNING.to_string()
} else {
" ".to_string()
},
vote_account.node_pubkey,
vote_account.vote_pubkey,
vote_account.commission,
non_zero_or_dash(vote_account.last_vote),
non_zero_or_dash(vote_account.root_slot),
vote_account
.epoch_credits
.iter()
.find_map(|(epoch, credits, _)| if *epoch == current_epoch {
Some(*credits)
} else {
None
})
.unwrap_or(0),
if vote_account.activated_stake > 0 {
format!(
"{} ({:.2}%)",
build_balance_message(vote_account.activated_stake, use_lamports_unit, true),
100. * vote_account.activated_stake as f64 / total_active_stake
)
} else {
"-".into()
},
);
}
Ok(format!("{} transactions found", signatures.len(),))
for vote_account in vote_accounts.current.into_iter() {
print_vote_account(
vote_account,
epoch_info.epoch,
total_active_stake,
use_lamports_unit,
false,
);
}
for vote_account in vote_accounts.delinquent.into_iter() {
print_vote_account(
vote_account,
epoch_info.epoch,
total_active_stake,
use_lamports_unit,
true,
);
}
Ok("".to_string())
}
#[cfg(test)]
@@ -1305,22 +1192,11 @@ mod tests {
let (default_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&default_keypair, tmp_file.as_file_mut()).unwrap();
let test_cluster_version = test_commands
.clone()
.get_matches_from(vec!["test", "cluster-date"]);
assert_eq!(
parse_command(&test_cluster_version, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::ClusterDate,
signers: vec![],
}
);
let test_cluster_version = test_commands
.clone()
.get_matches_from(vec!["test", "cluster-version"]);
assert_eq!(
parse_command(&test_cluster_version, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_cluster_version, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::ClusterVersion,
signers: vec![],
@@ -1329,7 +1205,7 @@ mod tests {
let test_fees = test_commands.clone().get_matches_from(vec!["test", "fees"]);
assert_eq!(
parse_command(&test_fees, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_fees, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::Fees,
signers: vec![],
@@ -1342,9 +1218,9 @@ mod tests {
.clone()
.get_matches_from(vec!["test", "block-time", &slot.to_string()]);
assert_eq!(
parse_command(&test_get_block_time, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_get_block_time, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::GetBlockTime { slot: Some(slot) },
command: CliCommand::GetBlockTime { slot },
signers: vec![],
}
);
@@ -1353,7 +1229,7 @@ mod tests {
.clone()
.get_matches_from(vec!["test", "epoch-info"]);
assert_eq!(
parse_command(&test_get_epoch_info, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_get_epoch_info, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::GetEpochInfo {
commitment_config: CommitmentConfig::recent(),
@@ -1366,7 +1242,7 @@ mod tests {
.clone()
.get_matches_from(vec!["test", "genesis-hash"]);
assert_eq!(
parse_command(&test_get_genesis_hash, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_get_genesis_hash, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::GetGenesisHash,
signers: vec![],
@@ -1375,7 +1251,7 @@ mod tests {
let test_get_slot = test_commands.clone().get_matches_from(vec!["test", "slot"]);
assert_eq!(
parse_command(&test_get_slot, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_get_slot, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::GetSlot {
commitment_config: CommitmentConfig::recent(),
@@ -1388,7 +1264,7 @@ mod tests {
.clone()
.get_matches_from(vec!["test", "epoch"]);
assert_eq!(
parse_command(&test_get_epoch, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_get_epoch, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::GetEpoch {
commitment_config: CommitmentConfig::recent(),
@@ -1397,24 +1273,11 @@ mod tests {
}
);
let test_total_supply = test_commands
.clone()
.get_matches_from(vec!["test", "total-supply"]);
assert_eq!(
parse_command(&test_total_supply, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::TotalSupply {
commitment_config: CommitmentConfig::recent(),
},
signers: vec![],
}
);
let test_transaction_count = test_commands
.clone()
.get_matches_from(vec!["test", "transaction-count"]);
assert_eq!(
parse_command(&test_transaction_count, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_transaction_count, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::GetTransactionCount {
commitment_config: CommitmentConfig::recent(),
@@ -1436,7 +1299,7 @@ mod tests {
"max",
]);
assert_eq!(
parse_command(&test_ping, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_ping, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::Ping {
lamports: 1,

View File

@@ -5,7 +5,7 @@ use solana_sdk::{
transaction::Transaction,
};
use solana_transaction_status::RpcTransactionStatusMeta;
use std::{fmt, io};
use std::io;
// Pretty print a "name value"
pub fn println_name_value(name: &str, value: &str) {
@@ -17,15 +17,6 @@ pub fn println_name_value(name: &str, value: &str) {
println!("{} {}", style(name).bold(), styled_value);
}
pub fn writeln_name_value(f: &mut fmt::Formatter, name: &str, value: &str) -> fmt::Result {
let styled_value = if value == "" {
style("(not set)").italic()
} else {
style(value)
};
writeln!(f, "{} {}", style(name).bold(), styled_value)
}
pub fn println_name_value_or(name: &str, value: &str, setting_type: SettingType) {
let description = match setting_type {
SettingType::Explicit => "",

View File

@@ -1,8 +1,4 @@
#[macro_use]
extern crate serde_derive;
pub mod cli;
pub mod cli_output;
pub mod cluster_query;
pub mod display;
pub mod nonce;

View File

@@ -2,15 +2,15 @@ use clap::{crate_description, crate_name, AppSettings, Arg, ArgGroup, ArgMatches
use console::style;
use solana_clap_utils::{
input_validators::is_url, keypair::SKIP_SEED_PHRASE_VALIDATION_ARG, DisplayError,
input_validators::is_url, keypair::SKIP_SEED_PHRASE_VALIDATION_ARG, offline::SIGN_ONLY_ARG,
DisplayError,
};
use solana_cli::{
cli::{app, parse_command, process_command, CliCommandInfo, CliConfig, CliSigners},
cli_output::OutputFormat,
display::{println_name_value, println_name_value_or},
};
use solana_cli_config::{Config, CONFIG_FILE};
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_remote_wallet::remote_wallet::{maybe_wallet_manager, RemoteWalletManager};
use std::{error, sync::Arc};
fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error>> {
@@ -102,7 +102,7 @@ fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error
pub fn parse_args<'a>(
matches: &ArgMatches<'_>,
mut wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<Arc<RemoteWalletManager>>,
) -> Result<(CliConfig<'a>, CliSigners), Box<dyn error::Error>> {
let config = if let Some(config_file) = matches.value_of("config_file") {
Config::load(config_file).unwrap_or_default()
@@ -125,16 +125,7 @@ pub fn parse_args<'a>(
);
let CliCommandInfo { command, signers } =
parse_command(&matches, &default_signer_path, &mut wallet_manager)?;
let output_format = matches
.value_of("output_format")
.map(|value| match value {
"json" => OutputFormat::Json,
"json-compact" => OutputFormat::JsonCompact,
_ => unreachable!(),
})
.unwrap_or(OutputFormat::Display);
parse_command(&matches, &default_signer_path, wallet_manager.as_ref())?;
Ok((
CliConfig {
@@ -145,7 +136,6 @@ pub fn parse_args<'a>(
keypair_path: default_signer_path,
rpc_client: None,
verbose: matches.is_present("verbose"),
output_format,
},
signers,
))
@@ -207,14 +197,6 @@ fn main() -> Result<(), Box<dyn error::Error>> {
.global(true)
.help("Show additional information"),
)
.arg(
Arg::with_name("output_format")
.long("output")
.global(true)
.takes_value(true)
.possible_values(&["json", "json-compact"])
.help("Return information in specified output format. Supports: json, json-compact"),
)
.arg(
Arg::with_name(SKIP_SEED_PHRASE_VALIDATION_ARG.name)
.long(SKIP_SEED_PHRASE_VALIDATION_ARG.long)
@@ -256,12 +238,18 @@ fn main() -> Result<(), Box<dyn error::Error>> {
fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box<dyn error::Error>> {
if parse_settings(&matches)? {
let mut wallet_manager = None;
let wallet_manager = maybe_wallet_manager()?;
let (mut config, signers) = parse_args(&matches, &mut wallet_manager)?;
let (mut config, signers) = parse_args(&matches, wallet_manager)?;
config.signers = signers.iter().map(|s| s.as_ref()).collect();
let result = process_command(&config)?;
println!("{}", result);
let (_, submatches) = matches.subcommand();
let sign_only = submatches
.map(|m| m.is_present(SIGN_ONLY_ARG.name))
.unwrap_or(false);
if !sign_only {
println!("{}", result);
}
};
Ok(())
}

View File

@@ -1,10 +1,7 @@
use crate::{
cli::{
check_account_for_fee, check_unique_pubkeys, generate_unique_signers,
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError,
ProcessResult, SignerIndex,
},
cli_output::CliNonceAccount,
use crate::cli::{
build_balance_message, check_account_for_fee, check_unique_pubkeys, generate_unique_signers,
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult,
SignerIndex,
};
use clap::{App, Arg, ArgMatches, SubCommand};
use solana_clap_utils::{
@@ -24,8 +21,9 @@ use solana_sdk::{
},
pubkey::Pubkey,
system_instruction::{
advance_nonce_account, authorize_nonce_account, create_nonce_account,
create_nonce_account_with_seed, withdraw_nonce_account, NonceError, SystemError,
advance_nonce_account, authorize_nonce_account, create_address_with_seed,
create_nonce_account, create_nonce_account_with_seed, withdraw_nonce_account, NonceError,
SystemError,
},
system_program,
transaction::Transaction,
@@ -279,7 +277,7 @@ pub fn data_from_state(state: &State) -> Result<&Data, CliNonceError> {
pub fn parse_authorize_nonce_account(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let nonce_account = pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
let new_authority = pubkey_of_signer(matches, "new_authority", wallet_manager)?.unwrap();
@@ -307,7 +305,7 @@ pub fn parse_authorize_nonce_account(
pub fn parse_nonce_create_account(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let (nonce_account, nonce_account_pubkey) =
signer_of(matches, "nonce_account_keypair", wallet_manager)?;
@@ -336,7 +334,7 @@ pub fn parse_nonce_create_account(
pub fn parse_get_nonce(
matches: &ArgMatches<'_>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let nonce_account_pubkey =
pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
@@ -350,7 +348,7 @@ pub fn parse_get_nonce(
pub fn parse_new_nonce(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let nonce_account = pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
let (nonce_authority, nonce_authority_pubkey) =
@@ -375,7 +373,7 @@ pub fn parse_new_nonce(
pub fn parse_show_nonce_account(
matches: &ArgMatches<'_>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let nonce_account_pubkey =
pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
@@ -393,7 +391,7 @@ pub fn parse_show_nonce_account(
pub fn parse_withdraw_from_nonce_account(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let nonce_account = pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
let destination_account_pubkey =
@@ -462,8 +460,8 @@ pub fn process_authorize_nonce_account(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<NonceError>(result, &config)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<NonceError>(result)
}
pub fn process_create_nonce_account(
@@ -476,7 +474,7 @@ pub fn process_create_nonce_account(
) -> ProcessResult {
let nonce_account_pubkey = config.signers[nonce_account].pubkey();
let nonce_account_address = if let Some(seed) = seed.clone() {
Pubkey::create_with_seed(&nonce_account_pubkey, &seed, &system_program::id())?
create_address_with_seed(&nonce_account_pubkey, &seed, &system_program::id())?
} else {
nonce_account_pubkey
};
@@ -539,8 +537,8 @@ pub fn process_create_nonce_account(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<SystemError>(result, &config)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<SystemError>(result)
}
pub fn process_get_nonce(rpc_client: &RpcClient, nonce_account_pubkey: &Pubkey) -> ProcessResult {
@@ -580,32 +578,46 @@ pub fn process_new_nonce(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<SystemError>(result, &config)
let result = rpc_client
.send_and_confirm_transaction_with_spinner(&mut tx, &[config.signers[0], nonce_authority]);
log_instruction_custom_error::<SystemError>(result)
}
pub fn process_show_nonce_account(
rpc_client: &RpcClient,
config: &CliConfig,
nonce_account_pubkey: &Pubkey,
use_lamports_unit: bool,
) -> ProcessResult {
let nonce_account = get_account(rpc_client, nonce_account_pubkey)?;
let print_account = |data: Option<&nonce::state::Data>| {
let mut nonce_account = CliNonceAccount {
balance: nonce_account.lamports,
minimum_balance_for_rent_exemption: rpc_client
.get_minimum_balance_for_rent_exemption(State::size())?,
use_lamports_unit,
..CliNonceAccount::default()
};
if let Some(ref data) = data {
nonce_account.nonce = Some(data.blockhash.to_string());
nonce_account.lamports_per_signature = Some(data.fee_calculator.lamports_per_signature);
nonce_account.authority = Some(data.authority.to_string());
println!(
"Balance: {}",
build_balance_message(nonce_account.lamports, use_lamports_unit, true)
);
println!(
"Minimum Balance Required: {}",
build_balance_message(
rpc_client.get_minimum_balance_for_rent_exemption(State::size())?,
use_lamports_unit,
true
)
);
match data {
Some(ref data) => {
println!("Nonce: {}", data.blockhash);
println!(
"Fee: {} lamports per signature",
data.fee_calculator.lamports_per_signature
);
println!("Authority: {}", data.authority);
}
None => {
println!("Nonce: uninitialized");
println!("Fees: uninitialized");
println!("Authority: uninitialized");
}
}
Ok(config.output_format.formatted_string(&nonce_account))
Ok("".to_string())
};
match state_from_account(&nonce_account)? {
State::Uninitialized => print_account(None),
@@ -639,8 +651,8 @@ pub fn process_withdraw_from_nonce_account(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<NonceError>(result, &config)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<NonceError>(result)
}
#[cfg(test)]
@@ -686,12 +698,7 @@ mod tests {
&Pubkey::default().to_string(),
]);
assert_eq!(
parse_command(
&test_authorize_nonce_account,
&default_keypair_file,
&mut None
)
.unwrap(),
parse_command(&test_authorize_nonce_account, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::AuthorizeNonceAccount {
nonce_account: nonce_account_pubkey,
@@ -712,12 +719,7 @@ mod tests {
&authority_keypair_file,
]);
assert_eq!(
parse_command(
&test_authorize_nonce_account,
&default_keypair_file,
&mut None
)
.unwrap(),
parse_command(&test_authorize_nonce_account, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::AuthorizeNonceAccount {
nonce_account: read_keypair_file(&keypair_file).unwrap().pubkey(),
@@ -739,7 +741,7 @@ mod tests {
"50",
]);
assert_eq!(
parse_command(&test_create_nonce_account, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_create_nonce_account, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::CreateNonceAccount {
nonce_account: 1,
@@ -764,7 +766,7 @@ mod tests {
&authority_keypair_file,
]);
assert_eq!(
parse_command(&test_create_nonce_account, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_create_nonce_account, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::CreateNonceAccount {
nonce_account: 1,
@@ -786,7 +788,7 @@ mod tests {
&nonce_account_string,
]);
assert_eq!(
parse_command(&test_get_nonce, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_get_nonce, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::GetNonce(nonce_account_keypair.pubkey()),
signers: vec![],
@@ -800,7 +802,7 @@ mod tests {
.get_matches_from(vec!["test", "new-nonce", &keypair_file]);
let nonce_account = read_keypair_file(&keypair_file).unwrap();
assert_eq!(
parse_command(&test_new_nonce, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_new_nonce, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::NewNonce {
nonce_account: nonce_account.pubkey(),
@@ -820,7 +822,7 @@ mod tests {
]);
let nonce_account = read_keypair_file(&keypair_file).unwrap();
assert_eq!(
parse_command(&test_new_nonce, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_new_nonce, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::NewNonce {
nonce_account: nonce_account.pubkey(),
@@ -840,7 +842,7 @@ mod tests {
&nonce_account_string,
]);
assert_eq!(
parse_command(&test_show_nonce_account, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_show_nonce_account, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::ShowNonceAccount {
nonce_account_pubkey: nonce_account_keypair.pubkey(),
@@ -862,7 +864,7 @@ mod tests {
parse_command(
&test_withdraw_from_nonce_account,
&default_keypair_file,
&mut None
None
)
.unwrap(),
CliCommandInfo {
@@ -890,7 +892,7 @@ mod tests {
parse_command(
&test_withdraw_from_nonce_account,
&default_keypair_file,
&mut None
None
)
.unwrap(),
CliCommandInfo {

View File

@@ -79,47 +79,32 @@ pub fn parse_sign_only_reply_string(reply: &str) -> SignOnly {
let object: Value = serde_json::from_str(&reply).unwrap();
let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap();
let blockhash = blockhash_str.parse::<Hash>().unwrap();
let mut present_signers: Vec<(Pubkey, Signature)> = Vec::new();
let signer_strings = object.get("signers");
if let Some(sig_strings) = signer_strings {
present_signers = sig_strings
.as_array()
.unwrap()
.iter()
.map(|signer_string| {
let mut signer = signer_string.as_str().unwrap().split('=');
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
(key, sig)
})
.collect();
}
let mut absent_signers: Vec<Pubkey> = Vec::new();
let signer_strings = object.get("absent");
if let Some(sig_strings) = signer_strings {
absent_signers = sig_strings
.as_array()
.unwrap()
.iter()
.map(|val| {
let s = val.as_str().unwrap();
Pubkey::from_str(s).unwrap()
})
.collect();
}
let mut bad_signers: Vec<Pubkey> = Vec::new();
let signer_strings = object.get("badSig");
if let Some(sig_strings) = signer_strings {
bad_signers = sig_strings
.as_array()
.unwrap()
.iter()
.map(|val| {
let s = val.as_str().unwrap();
Pubkey::from_str(s).unwrap()
})
.collect();
}
let signer_strings = object.get("signers").unwrap().as_array().unwrap();
let present_signers = signer_strings
.iter()
.map(|signer_string| {
let mut signer = signer_string.as_str().unwrap().split('=');
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
(key, sig)
})
.collect();
let signer_strings = object.get("absent").unwrap().as_array().unwrap();
let absent_signers = signer_strings
.iter()
.map(|val| {
let s = val.as_str().unwrap();
Pubkey::from_str(s).unwrap()
})
.collect();
let signer_strings = object.get("badSig").unwrap().as_array().unwrap();
let bad_signers = signer_strings
.iter()
.map(|val| {
let s = val.as_str().unwrap();
Pubkey::from_str(s).unwrap()
})
.collect();
SignOnly {
blockhash,
present_signers,

View File

@@ -1,14 +1,15 @@
use crate::{
cli::{
check_account_for_fee, check_unique_pubkeys, fee_payer_arg, generate_unique_signers,
log_instruction_custom_error, nonce_authority_arg, return_signers, CliCommand,
CliCommandInfo, CliConfig, CliError, ProcessResult, SignerIndex, FEE_PAYER_ARG,
build_balance_message, check_account_for_fee, check_unique_pubkeys, fee_payer_arg,
generate_unique_signers, log_instruction_custom_error, nonce_authority_arg, return_signers,
CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult, SignerIndex, FEE_PAYER_ARG,
},
cli_output::{CliStakeHistory, CliStakeHistoryEntry, CliStakeState, CliStakeType},
nonce::{check_nonce_account, nonce_arg, NONCE_ARG, NONCE_AUTHORITY_ARG},
offline::{blockhash_query::BlockhashQuery, *},
};
use chrono::{DateTime, NaiveDateTime, SecondsFormat, Utc};
use clap::{App, Arg, ArgGroup, ArgMatches, SubCommand};
use console::style;
use solana_clap_utils::{input_parsers::*, input_validators::*, offline::*, ArgConstant};
use solana_client::rpc_client::RpcClient;
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
@@ -16,7 +17,7 @@ use solana_sdk::{
account_utils::StateMut,
message::Message,
pubkey::Pubkey,
system_instruction::SystemError,
system_instruction::{create_address_with_seed, SystemError},
sysvar::{
stake_history::{self, StakeHistory},
Sysvar,
@@ -85,7 +86,7 @@ impl StakeSubCommands for App<'_, '_> {
.takes_value(true)
.validator(is_amount)
.required(true)
.help("The amount to send to the stake account, in SOL")
.help("The amount of send to the vote account, in SOL")
)
.arg(
Arg::with_name("custodian")
@@ -313,14 +314,6 @@ impl StakeSubCommands for App<'_, '_> {
.arg(nonce_arg())
.arg(nonce_authority_arg())
.arg(fee_payer_arg())
.arg(
Arg::with_name("custodian")
.long("custodian")
.takes_value(true)
.value_name("KEYPAIR")
.validator(is_valid_signer)
.help("Authority to override account lockup")
)
)
.subcommand(
SubCommand::with_name("stake-set-lockup")
@@ -410,7 +403,7 @@ impl StakeSubCommands for App<'_, '_> {
pub fn parse_stake_create_account(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let seed = matches.value_of("seed").map(|s| s.to_string());
let epoch = value_of(matches, "lockup_epoch").unwrap_or(0);
@@ -462,7 +455,7 @@ pub fn parse_stake_create_account(
pub fn parse_stake_delegate_stake(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let stake_account_pubkey =
pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap();
@@ -504,7 +497,7 @@ pub fn parse_stake_delegate_stake(
pub fn parse_stake_authorize(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let stake_account_pubkey =
pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap();
@@ -587,7 +580,7 @@ pub fn parse_stake_authorize(
pub fn parse_split_stake(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let stake_account_pubkey =
pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap();
@@ -632,7 +625,7 @@ pub fn parse_split_stake(
pub fn parse_stake_deactivate_stake(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let stake_account_pubkey =
pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap();
@@ -669,7 +662,7 @@ pub fn parse_stake_deactivate_stake(
pub fn parse_stake_withdraw_stake(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let stake_account_pubkey =
pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap();
@@ -684,15 +677,11 @@ pub fn parse_stake_withdraw_stake(
let (nonce_authority, nonce_authority_pubkey) =
signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?;
let (fee_payer, fee_payer_pubkey) = signer_of(matches, FEE_PAYER_ARG.name, wallet_manager)?;
let (custodian, custodian_pubkey) = signer_of(matches, "custodian", wallet_manager)?;
let mut bulk_signers = vec![withdraw_authority, fee_payer];
if nonce_account.is_some() {
bulk_signers.push(nonce_authority);
}
if custodian.is_some() {
bulk_signers.push(custodian);
}
let signer_info =
generate_unique_signers(bulk_signers, matches, default_signer_path, wallet_manager)?;
@@ -707,7 +696,6 @@ pub fn parse_stake_withdraw_stake(
nonce_account,
nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(),
fee_payer: signer_info.index_of(fee_payer_pubkey).unwrap(),
custodian: custodian_pubkey.and_then(|_| signer_info.index_of(custodian_pubkey)),
},
signers: signer_info.signers,
})
@@ -716,7 +704,7 @@ pub fn parse_stake_withdraw_stake(
pub fn parse_stake_set_lockup(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let stake_account_pubkey =
pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap();
@@ -761,7 +749,7 @@ pub fn parse_stake_set_lockup(
pub fn parse_show_stake_account(
matches: &ArgMatches<'_>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let stake_account_pubkey =
pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap();
@@ -802,7 +790,7 @@ pub fn process_create_stake_account(
) -> ProcessResult {
let stake_account = config.signers[stake_account];
let stake_account_address = if let Some(seed) = seed {
Pubkey::create_with_seed(&stake_account.pubkey(), &seed, &solana_stake_program::id())?
create_address_with_seed(&stake_account.pubkey(), &seed, &solana_stake_program::id())?
} else {
stake_account.pubkey()
};
@@ -881,7 +869,7 @@ pub fn process_create_stake_account(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx, &config)
return_signers(&tx)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -894,8 +882,8 @@ pub fn process_create_stake_account(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<SystemError>(result, &config)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<SystemError>(result)
}
}
@@ -946,7 +934,7 @@ pub fn process_stake_authorize(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx, &config)
return_signers(&tx)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -959,8 +947,8 @@ pub fn process_stake_authorize(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<StakeError>(result, &config)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<StakeError>(result)
}
}
@@ -1000,7 +988,7 @@ pub fn process_deactivate_stake_account(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx, &config)
return_signers(&tx)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -1013,8 +1001,8 @@ pub fn process_deactivate_stake_account(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<StakeError>(result, &config)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<StakeError>(result)
}
}
@@ -1026,7 +1014,6 @@ pub fn process_withdraw_stake(
destination_account_pubkey: &Pubkey,
lamports: u64,
withdraw_authority: SignerIndex,
custodian: Option<SignerIndex>,
sign_only: bool,
blockhash_query: &BlockhashQuery,
nonce_account: Option<&Pubkey>,
@@ -1036,14 +1023,12 @@ pub fn process_withdraw_stake(
let (recent_blockhash, fee_calculator) =
blockhash_query.get_blockhash_and_fee_calculator(rpc_client)?;
let withdraw_authority = config.signers[withdraw_authority];
let custodian = custodian.map(|index| config.signers[index]);
let ixs = vec![stake_instruction::withdraw(
stake_account_pubkey,
&withdraw_authority.pubkey(),
destination_account_pubkey,
lamports,
custodian.map(|signer| signer.pubkey()).as_ref(),
)];
let fee_payer = config.signers[fee_payer];
@@ -1063,7 +1048,7 @@ pub fn process_withdraw_stake(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx, &config)
return_signers(&tx)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -1076,8 +1061,8 @@ pub fn process_withdraw_stake(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<SystemError>(result, &config)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<SystemError>(result)
}
}
@@ -1123,7 +1108,7 @@ pub fn process_split_stake(
let stake_authority = config.signers[stake_authority];
let split_stake_account_address = if let Some(seed) = split_stake_account_seed {
Pubkey::create_with_seed(
create_address_with_seed(
&split_stake_account.pubkey(),
&seed,
&solana_stake_program::id(),
@@ -1197,7 +1182,7 @@ pub fn process_split_stake(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx, &config)
return_signers(&tx)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -1210,8 +1195,8 @@ pub fn process_split_stake(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<StakeError>(result, &config)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<StakeError>(result)
}
}
@@ -1254,7 +1239,7 @@ pub fn process_stake_set_lockup(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx, &config)
return_signers(&tx)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -1267,66 +1252,84 @@ pub fn process_stake_set_lockup(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<StakeError>(result, &config)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<StakeError>(result)
}
}
pub fn build_stake_state(
stake_lamports: u64,
stake_state: &StakeState,
use_lamports_unit: bool,
) -> CliStakeState {
pub fn print_stake_state(stake_lamports: u64, stake_state: &StakeState, use_lamports_unit: bool) {
fn show_authorized(authorized: &Authorized) {
println!("Stake Authority: {}", authorized.staker);
println!("Withdraw Authority: {}", authorized.withdrawer);
}
fn show_lockup(lockup: &Lockup) {
println!(
"Lockup Timestamp: {} (UnixTimestamp: {})",
DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(lockup.unix_timestamp, 0), Utc)
.to_rfc3339_opts(SecondsFormat::Secs, true),
lockup.unix_timestamp
);
println!("Lockup Epoch: {}", lockup.epoch);
println!("Lockup Custodian: {}", lockup.custodian);
}
match stake_state {
StakeState::Stake(
Meta {
authorized, lockup, ..
},
stake,
) => CliStakeState {
stake_type: CliStakeType::Stake,
total_stake: stake_lamports,
delegated_stake: Some(stake.delegation.stake),
delegated_vote_account_address: if stake.delegation.voter_pubkey != Pubkey::default() {
Some(stake.delegation.voter_pubkey.to_string())
} else {
None
},
activation_epoch: Some(if stake.delegation.activation_epoch < std::u64::MAX {
stake.delegation.activation_epoch
} else {
0
}),
deactivation_epoch: if stake.delegation.deactivation_epoch < std::u64::MAX {
Some(stake.delegation.deactivation_epoch)
} else {
None
},
authorized: Some(authorized.into()),
lockup: Some(lockup.into()),
use_lamports_unit,
},
StakeState::RewardsPool => CliStakeState {
stake_type: CliStakeType::RewardsPool,
..CliStakeState::default()
},
StakeState::Uninitialized => CliStakeState::default(),
) => {
println!(
"Total Stake: {}",
build_balance_message(stake_lamports, use_lamports_unit, true)
);
println!("Credits Observed: {}", stake.credits_observed);
println!(
"Delegated Stake: {}",
build_balance_message(stake.delegation.stake, use_lamports_unit, true)
);
if stake.delegation.voter_pubkey != Pubkey::default() {
println!(
"Delegated Vote Account Address: {}",
stake.delegation.voter_pubkey
);
}
println!(
"Stake activates starting from epoch: {}",
if stake.delegation.activation_epoch < std::u64::MAX {
stake.delegation.activation_epoch
} else {
0
}
);
if stake.delegation.deactivation_epoch < std::u64::MAX {
println!(
"Stake deactivates starting from epoch: {}",
stake.delegation.deactivation_epoch
);
}
show_authorized(&authorized);
show_lockup(&lockup);
}
StakeState::RewardsPool => println!("Stake account is a rewards pool"),
StakeState::Uninitialized => println!("Stake account is uninitialized"),
StakeState::Initialized(Meta {
authorized, lockup, ..
}) => CliStakeState {
stake_type: CliStakeType::Initialized,
total_stake: stake_lamports,
authorized: Some(authorized.into()),
lockup: Some(lockup.into()),
use_lamports_unit,
..CliStakeState::default()
},
}) => {
println!(
"Total Stake: {}",
build_balance_message(stake_lamports, use_lamports_unit, true)
);
println!("Stake account is undelegated");
show_authorized(&authorized);
show_lockup(&lockup);
}
}
}
pub fn process_show_stake_account(
rpc_client: &RpcClient,
config: &CliConfig,
_config: &CliConfig,
stake_account_pubkey: &Pubkey,
use_lamports_unit: bool,
) -> ProcessResult {
@@ -1340,8 +1343,8 @@ pub fn process_show_stake_account(
}
match stake_account.state() {
Ok(stake_state) => {
let state = build_stake_state(stake_account.lamports, &stake_state, use_lamports_unit);
Ok(config.output_format.formatted_string(&state))
print_stake_state(stake_account.lamports, &stake_state, use_lamports_unit);
Ok("".to_string())
}
Err(err) => Err(CliError::RpcRequestError(format!(
"Account data could not be deserialized to stake state: {}",
@@ -1353,7 +1356,7 @@ pub fn process_show_stake_account(
pub fn process_show_stake_history(
rpc_client: &RpcClient,
config: &CliConfig,
_config: &CliConfig,
use_lamports_unit: bool,
) -> ProcessResult {
let stake_history_account = rpc_client.get_account(&stake_history::id())?;
@@ -1361,15 +1364,27 @@ pub fn process_show_stake_history(
CliError::RpcRequestError("Failed to deserialize stake history".to_string())
})?;
let mut entries: Vec<CliStakeHistoryEntry> = vec![];
for entry in stake_history.deref() {
entries.push(entry.into());
println!();
println!(
"{}",
style(format!(
" {:<5} {:>20} {:>20} {:>20}",
"Epoch", "Effective Stake", "Activating Stake", "Deactivating Stake",
))
.bold()
);
for (epoch, entry) in stake_history.deref() {
println!(
" {:>5} {:>20} {:>20} {:>20} {}",
epoch,
build_balance_message(entry.effective, use_lamports_unit, false),
build_balance_message(entry.activating, use_lamports_unit, false),
build_balance_message(entry.deactivating, use_lamports_unit, false),
if use_lamports_unit { "lamports" } else { "SOL" }
);
}
let stake_history_output = CliStakeHistory {
entries,
use_lamports_unit,
};
Ok(config.output_format.formatted_string(&stake_history_output))
Ok("".to_string())
}
#[allow(clippy::too_many_arguments)]
@@ -1462,7 +1477,7 @@ pub fn process_delegate_stake(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx, &config)
return_signers(&tx)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -1475,8 +1490,8 @@ pub fn process_delegate_stake(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<StakeError>(result, &config)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<StakeError>(result)
}
}
@@ -1510,9 +1525,6 @@ mod tests {
let (stake_authority_keypair_file, mut tmp_file) = make_tmp_file();
let stake_authority_keypair = Keypair::new();
write_keypair(&stake_authority_keypair, tmp_file.as_file_mut()).unwrap();
let (custodian_keypair_file, mut tmp_file) = make_tmp_file();
let custodian_keypair = Keypair::new();
write_keypair(&custodian_keypair, tmp_file.as_file_mut()).unwrap();
// stake-authorize subcommand
let stake_account_string = stake_account_pubkey.to_string();
@@ -1530,7 +1542,7 @@ mod tests {
&new_withdraw_string,
]);
assert_eq!(
parse_command(&test_stake_authorize, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_stake_authorize, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@@ -1564,7 +1576,7 @@ mod tests {
&withdraw_authority_keypair_file,
]);
assert_eq!(
parse_command(&test_stake_authorize, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_stake_authorize, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@@ -1602,7 +1614,7 @@ mod tests {
&withdraw_authority_keypair_file,
]);
assert_eq!(
parse_command(&test_stake_authorize, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_stake_authorize, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@@ -1632,7 +1644,7 @@ mod tests {
&new_stake_string,
]);
assert_eq!(
parse_command(&test_stake_authorize, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_stake_authorize, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@@ -1656,7 +1668,7 @@ mod tests {
&stake_authority_keypair_file,
]);
assert_eq!(
parse_command(&test_stake_authorize, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_stake_authorize, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@@ -1686,7 +1698,7 @@ mod tests {
&withdraw_authority_keypair_file,
]);
assert_eq!(
parse_command(&test_stake_authorize, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_stake_authorize, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@@ -1713,7 +1725,7 @@ mod tests {
&new_withdraw_string,
]);
assert_eq!(
parse_command(&test_stake_authorize, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_stake_authorize, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@@ -1741,7 +1753,7 @@ mod tests {
&withdraw_authority_keypair_file,
]);
assert_eq!(
parse_command(&test_stake_authorize, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_stake_authorize, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@@ -1779,7 +1791,7 @@ mod tests {
"--sign-only",
]);
assert_eq!(
parse_command(&test_authorize, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_authorize, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@@ -1812,7 +1824,7 @@ mod tests {
&pubkey.to_string(),
]);
assert_eq!(
parse_command(&test_authorize, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_authorize, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@@ -1858,7 +1870,7 @@ mod tests {
&pubkey2.to_string(),
]);
assert_eq!(
parse_command(&test_authorize, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_authorize, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@@ -1890,7 +1902,7 @@ mod tests {
&blockhash_string,
]);
assert_eq!(
parse_command(&test_authorize, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_authorize, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@@ -1927,7 +1939,7 @@ mod tests {
&nonce_keypair_file,
]);
assert_eq!(
parse_command(&test_authorize, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_authorize, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@@ -1963,7 +1975,7 @@ mod tests {
&fee_payer_keypair_file,
]);
assert_eq!(
parse_command(&test_authorize, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_authorize, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@@ -1997,7 +2009,7 @@ mod tests {
&signer,
]);
assert_eq!(
parse_command(&test_authorize, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_authorize, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
@@ -2038,7 +2050,7 @@ mod tests {
"43",
]);
assert_eq!(
parse_command(&test_create_stake_account, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_create_stake_account, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::CreateStakeAccount {
stake_account: 1,
@@ -2079,12 +2091,7 @@ mod tests {
]);
assert_eq!(
parse_command(
&test_create_stake_account2,
&default_keypair_file,
&mut None
)
.unwrap(),
parse_command(&test_create_stake_account2, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::CreateStakeAccount {
stake_account: 1,
@@ -2137,12 +2144,7 @@ mod tests {
]);
assert_eq!(
parse_command(
&test_create_stake_account2,
&default_keypair_file,
&mut None
)
.unwrap(),
parse_command(&test_create_stake_account2, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::CreateStakeAccount {
stake_account: 1,
@@ -2178,7 +2180,7 @@ mod tests {
&vote_account_string,
]);
assert_eq!(
parse_command(&test_delegate_stake, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_delegate_stake, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::DelegateStake {
stake_account_pubkey,
@@ -2207,7 +2209,7 @@ mod tests {
&stake_authority_keypair_file,
]);
assert_eq!(
parse_command(&test_delegate_stake, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_delegate_stake, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::DelegateStake {
stake_account_pubkey,
@@ -2238,7 +2240,7 @@ mod tests {
&vote_account_string,
]);
assert_eq!(
parse_command(&test_delegate_stake, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_delegate_stake, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::DelegateStake {
stake_account_pubkey,
@@ -2267,7 +2269,7 @@ mod tests {
&blockhash_string,
]);
assert_eq!(
parse_command(&test_delegate_stake, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_delegate_stake, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::DelegateStake {
stake_account_pubkey,
@@ -2297,7 +2299,7 @@ mod tests {
"--sign-only",
]);
assert_eq!(
parse_command(&test_delegate_stake, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_delegate_stake, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::DelegateStake {
stake_account_pubkey,
@@ -2331,7 +2333,7 @@ mod tests {
&key1.to_string(),
]);
assert_eq!(
parse_command(&test_delegate_stake, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_delegate_stake, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::DelegateStake {
stake_account_pubkey,
@@ -2377,7 +2379,7 @@ mod tests {
&key2.to_string(),
]);
assert_eq!(
parse_command(&test_delegate_stake, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_delegate_stake, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::DelegateStake {
stake_account_pubkey,
@@ -2414,7 +2416,7 @@ mod tests {
&fee_payer_keypair_file,
]);
assert_eq!(
parse_command(&test_delegate_stake, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_delegate_stake, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::DelegateStake {
stake_account_pubkey,
@@ -2444,14 +2446,13 @@ mod tests {
]);
assert_eq!(
parse_command(&test_withdraw_stake, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_withdraw_stake, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::WithdrawStake {
stake_account_pubkey,
destination_account_pubkey: stake_account_pubkey,
lamports: 42_000_000_000,
withdraw_authority: 0,
custodian: None,
sign_only: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
@@ -2474,14 +2475,13 @@ mod tests {
]);
assert_eq!(
parse_command(&test_withdraw_stake, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_withdraw_stake, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::WithdrawStake {
stake_account_pubkey,
destination_account_pubkey: stake_account_pubkey,
lamports: 42_000_000_000,
withdraw_authority: 1,
custodian: None,
sign_only: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
@@ -2497,39 +2497,6 @@ mod tests {
}
);
// Test WithdrawStake Subcommand w/ custodian
let test_withdraw_stake = test_commands.clone().get_matches_from(vec![
"test",
"withdraw-stake",
&stake_account_string,
&stake_account_string,
"42",
"--custodian",
&custodian_keypair_file,
]);
assert_eq!(
parse_command(&test_withdraw_stake, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::WithdrawStake {
stake_account_pubkey,
destination_account_pubkey: stake_account_pubkey,
lamports: 42_000_000_000,
withdraw_authority: 0,
custodian: Some(1),
sign_only: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
fee_payer: 0,
},
signers: vec![
read_keypair_file(&default_keypair_file).unwrap().into(),
read_keypair_file(&custodian_keypair_file).unwrap().into()
],
}
);
// Test WithdrawStake Subcommand w/ authority and offline nonce
let test_withdraw_stake = test_commands.clone().get_matches_from(vec![
"test",
@@ -2552,14 +2519,13 @@ mod tests {
]);
assert_eq!(
parse_command(&test_withdraw_stake, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_withdraw_stake, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::WithdrawStake {
stake_account_pubkey,
destination_account_pubkey: stake_account_pubkey,
lamports: 42_000_000_000,
withdraw_authority: 0,
custodian: None,
sign_only: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_account),
@@ -2585,7 +2551,7 @@ mod tests {
&stake_account_string,
]);
assert_eq!(
parse_command(&test_deactivate_stake, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_deactivate_stake, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::DeactivateStake {
stake_account_pubkey,
@@ -2609,7 +2575,7 @@ mod tests {
&stake_authority_keypair_file,
]);
assert_eq!(
parse_command(&test_deactivate_stake, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_deactivate_stake, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::DeactivateStake {
stake_account_pubkey,
@@ -2640,7 +2606,7 @@ mod tests {
&blockhash_string,
]);
assert_eq!(
parse_command(&test_deactivate_stake, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_deactivate_stake, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::DeactivateStake {
stake_account_pubkey,
@@ -2667,7 +2633,7 @@ mod tests {
"--sign-only",
]);
assert_eq!(
parse_command(&test_deactivate_stake, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_deactivate_stake, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::DeactivateStake {
stake_account_pubkey,
@@ -2698,7 +2664,7 @@ mod tests {
&key1.to_string(),
]);
assert_eq!(
parse_command(&test_deactivate_stake, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_deactivate_stake, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::DeactivateStake {
stake_account_pubkey,
@@ -2741,7 +2707,7 @@ mod tests {
&key2.to_string(),
]);
assert_eq!(
parse_command(&test_deactivate_stake, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_deactivate_stake, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::DeactivateStake {
stake_account_pubkey,
@@ -2772,7 +2738,7 @@ mod tests {
&fee_payer_keypair_file,
]);
assert_eq!(
parse_command(&test_deactivate_stake, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_deactivate_stake, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::DeactivateStake {
stake_account_pubkey,
@@ -2806,7 +2772,7 @@ mod tests {
"50",
]);
assert_eq!(
parse_command(&test_split_stake_account, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_split_stake_account, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::SplitStake {
stake_account_pubkey: stake_account_keypair.pubkey(),
@@ -2867,7 +2833,7 @@ mod tests {
&stake_signer,
]);
assert_eq!(
parse_command(&test_split_stake_account, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_split_stake_account, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::SplitStake {
stake_account_pubkey: stake_account_keypair.pubkey(),

View File

@@ -102,7 +102,7 @@ impl StorageSubCommands for App<'_, '_> {
pub fn parse_storage_create_archiver_account(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let account_owner =
pubkey_of_signer(matches, "storage_account_owner", wallet_manager)?.unwrap();
@@ -130,7 +130,7 @@ pub fn parse_storage_create_archiver_account(
pub fn parse_storage_create_validator_account(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let account_owner =
pubkey_of_signer(matches, "storage_account_owner", wallet_manager)?.unwrap();
@@ -158,7 +158,7 @@ pub fn parse_storage_create_validator_account(
pub fn parse_storage_claim_reward(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let node_account_pubkey =
pubkey_of_signer(matches, "node_account_pubkey", wallet_manager)?.unwrap();
@@ -180,7 +180,7 @@ pub fn parse_storage_claim_reward(
pub fn parse_storage_get_account_command(
matches: &ArgMatches<'_>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let storage_account_pubkey =
pubkey_of_signer(matches, "storage_account_pubkey", wallet_manager)?.unwrap();
@@ -242,8 +242,8 @@ pub fn process_create_storage_account(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<SystemError>(result, &config)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<SystemError>(result)
}
pub fn process_claim_storage_reward(
@@ -266,7 +266,7 @@ pub fn process_claim_storage_reward(
&fee_calculator,
&tx.message,
)?;
let signature = rpc_client.send_and_confirm_transaction_with_spinner(&tx)?;
let signature = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &signers)?;
Ok(signature.to_string())
}
@@ -330,7 +330,7 @@ mod tests {
parse_command(
&test_create_archiver_storage_account,
&default_keypair_file,
&mut None
None
)
.unwrap(),
CliCommandInfo {
@@ -362,7 +362,7 @@ mod tests {
parse_command(
&test_create_validator_storage_account,
&default_keypair_file,
&mut None
None
)
.unwrap(),
CliCommandInfo {
@@ -385,7 +385,7 @@ mod tests {
&storage_account_string,
]);
assert_eq!(
parse_command(&test_claim_storage_reward, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_claim_storage_reward, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::ClaimStorageReward {
node_account_pubkey: pubkey,

View File

@@ -1,6 +1,6 @@
use crate::{
cli::{check_account_for_fee, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult},
cli_output::{CliValidatorInfo, CliValidatorInfoVec},
display::println_name_value,
};
use bincode::deserialize;
use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
@@ -25,6 +25,7 @@ use solana_sdk::{
transaction::Transaction,
};
use std::{error, sync::Arc};
use titlecase::titlecase;
pub const MAX_SHORT_FIELD_LENGTH: usize = 70;
pub const MAX_LONG_FIELD_LENGTH: usize = 300;
@@ -228,7 +229,7 @@ impl ValidatorInfoSubCommands for App<'_, '_> {
pub fn parse_validator_info_command(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let info_pubkey = pubkey_of(matches, "info_pubkey");
// Prepare validator info
@@ -367,18 +368,14 @@ pub fn process_set_validator_info(
&fee_calculator,
&tx.message,
)?;
let signature_str = rpc_client.send_and_confirm_transaction_with_spinner(&tx)?;
let signature_str = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &signers)?;
println!("Success! Validator info published at: {:?}", info_pubkey);
println!("{}", signature_str);
Ok("".to_string())
}
pub fn process_get_validator_info(
rpc_client: &RpcClient,
config: &CliConfig,
pubkey: Option<Pubkey>,
) -> ProcessResult {
pub fn process_get_validator_info(rpc_client: &RpcClient, pubkey: Option<Pubkey>) -> ProcessResult {
let validator_info: Vec<(Pubkey, Account)> = if let Some(validator_info_pubkey) = pubkey {
vec![(
validator_info_pubkey,
@@ -397,22 +394,24 @@ pub fn process_get_validator_info(
.collect()
};
let mut validator_info_list: Vec<CliValidatorInfo> = vec![];
if validator_info.is_empty() {
println!("No validator info accounts found");
}
for (validator_info_pubkey, validator_info_account) in validator_info.iter() {
let (validator_pubkey, validator_info) =
parse_validator_info(&validator_info_pubkey, &validator_info_account)?;
validator_info_list.push(CliValidatorInfo {
identity_pubkey: validator_pubkey.to_string(),
info_pubkey: validator_info_pubkey.to_string(),
info: validator_info,
});
println!();
println_name_value("Validator Identity Pubkey:", &validator_pubkey.to_string());
println_name_value(" Info Pubkey:", &validator_info_pubkey.to_string());
for (key, value) in validator_info.iter() {
println_name_value(
&format!(" {}:", titlecase(key)),
&value.as_str().unwrap_or("?"),
);
}
}
Ok(config
.output_format
.formatted_string(&CliValidatorInfoVec::new(validator_info_list)))
Ok("".to_string())
}
#[cfg(test)]

View File

@@ -1,10 +1,7 @@
use crate::{
cli::{
check_account_for_fee, check_unique_pubkeys, generate_unique_signers,
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError,
ProcessResult, SignerIndex,
},
cli_output::{CliEpochVotingHistory, CliLockout, CliVoteAccount},
use crate::cli::{
build_balance_message, check_account_for_fee, check_unique_pubkeys, generate_unique_signers,
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, CliSignerInfo,
ProcessResult, SignerIndex,
};
use clap::{value_t_or_exit, App, Arg, ArgMatches, SubCommand};
use solana_clap_utils::{
@@ -15,8 +12,12 @@ use solana_clap_utils::{
use solana_client::rpc_client::RpcClient;
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_sdk::{
account::Account, commitment_config::CommitmentConfig, message::Message, pubkey::Pubkey,
system_instruction::SystemError, transaction::Transaction,
account::Account,
commitment_config::CommitmentConfig,
message::Message,
pubkey::Pubkey,
system_instruction::{create_address_with_seed, SystemError},
transaction::Transaction,
};
use solana_vote_program::{
vote_instruction::{self, withdraw, VoteError},
@@ -95,17 +96,9 @@ impl VoteSubCommands for App<'_, '_> {
.validator(is_valid_pubkey)
.help("Vote account in which to set the authorized voter"),
)
.arg(
Arg::with_name("authorized")
.index(2)
.value_name("AUTHORIZED_KEYPAIR")
.required(true)
.validator(is_valid_signer)
.help("Current authorized vote signer"),
)
.arg(
Arg::with_name("new_authorized_pubkey")
.index(3)
.index(2)
.value_name("AUTHORIZED_PUBKEY")
.takes_value(true)
.required(true)
@@ -125,17 +118,9 @@ impl VoteSubCommands for App<'_, '_> {
.validator(is_valid_pubkey)
.help("Vote account in which to set the authorized withdrawer"),
)
.arg(
Arg::with_name("authorized")
.index(2)
.value_name("AUTHORIZED_KEYPAIR")
.required(true)
.validator(is_valid_signer)
.help("Current authorized withdrawer"),
)
.arg(
Arg::with_name("new_authorized_pubkey")
.index(3)
.index(2)
.value_name("AUTHORIZED_PUBKEY")
.takes_value(true)
.required(true)
@@ -240,7 +225,7 @@ impl VoteSubCommands for App<'_, '_> {
pub fn parse_create_vote_account(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let (vote_account, _) = signer_of(matches, "vote_account", wallet_manager)?;
let seed = matches.value_of("seed").map(|s| s.to_string());
@@ -273,18 +258,17 @@ pub fn parse_create_vote_account(
pub fn parse_vote_authorize(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
vote_authorize: VoteAuthorize,
) -> Result<CliCommandInfo, CliError> {
let vote_account_pubkey =
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
let new_authorized_pubkey =
pubkey_of_signer(matches, "new_authorized_pubkey", wallet_manager)?.unwrap();
let (authorized, _) = signer_of(matches, "authorized", wallet_manager)?;
let payer_provided = None;
let signer_info = generate_unique_signers(
vec![payer_provided, authorized],
let authorized_voter_provided = None;
let CliSignerInfo { signers } = generate_unique_signers(
vec![authorized_voter_provided],
matches,
default_signer_path,
wallet_manager,
@@ -296,14 +280,14 @@ pub fn parse_vote_authorize(
new_authorized_pubkey,
vote_authorize,
},
signers: signer_info.signers,
signers,
})
}
pub fn parse_vote_update_validator(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let vote_account_pubkey =
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
@@ -330,7 +314,7 @@ pub fn parse_vote_update_validator(
pub fn parse_vote_get_account_command(
matches: &ArgMatches<'_>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let vote_account_pubkey =
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
@@ -349,7 +333,7 @@ pub fn parse_vote_get_account_command(
pub fn parse_withdraw_from_vote_account(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let vote_account_pubkey =
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
@@ -390,7 +374,7 @@ pub fn process_create_vote_account(
let vote_account = config.signers[1];
let vote_account_pubkey = vote_account.pubkey();
let vote_account_address = if let Some(seed) = seed {
Pubkey::create_with_seed(&vote_account_pubkey, &seed, &solana_vote_program::id())?
create_address_with_seed(&vote_account_pubkey, &seed, &solana_vote_program::id())?
} else {
vote_account_pubkey
};
@@ -457,8 +441,8 @@ pub fn process_create_vote_account(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<SystemError>(result, &config)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<SystemError>(result)
}
pub fn process_vote_authorize(
@@ -468,24 +452,16 @@ pub fn process_vote_authorize(
new_authorized_pubkey: &Pubkey,
vote_authorize: VoteAuthorize,
) -> ProcessResult {
// If the `authorized_account` is also the fee payer, `config.signers` will only have one
// keypair in it
let authorized = if config.signers.len() == 2 {
config.signers[1]
} else {
config.signers[0]
};
check_unique_pubkeys(
(&authorized.pubkey(), "authorized_account".to_string()),
(vote_account_pubkey, "vote_account_pubkey".to_string()),
(new_authorized_pubkey, "new_authorized_pubkey".to_string()),
)?;
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let ixs = vec![vote_instruction::authorize(
vote_account_pubkey, // vote account to update
&authorized.pubkey(), // current authorized
new_authorized_pubkey, // new vote signer/withdrawer
vote_authorize, // vote or withdraw
vote_account_pubkey, // vote account to update
&config.signers[0].pubkey(), // current authorized voter
new_authorized_pubkey, // new vote signer/withdrawer
vote_authorize, // vote or withdraw
)];
let message = Message::new_with_payer(&ixs, Some(&config.signers[0].pubkey()));
@@ -497,8 +473,9 @@ pub fn process_vote_authorize(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<VoteError>(result, &config)
let result =
rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &[config.signers[0]]);
log_instruction_custom_error::<VoteError>(result)
}
pub fn process_vote_update_validator(
@@ -515,7 +492,7 @@ pub fn process_vote_update_validator(
(&new_identity_pubkey, "new_identity_account".to_string()),
)?;
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let ixs = vec![vote_instruction::update_validator_identity(
let ixs = vec![vote_instruction::update_node(
vote_account_pubkey,
&authorized_withdrawer.pubkey(),
&new_identity_pubkey,
@@ -530,8 +507,8 @@ pub fn process_vote_update_validator(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<VoteError>(result, &config)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<VoteError>(result)
}
fn get_vote_account(
@@ -564,7 +541,7 @@ fn get_vote_account(
pub fn process_show_vote_account(
rpc_client: &RpcClient,
config: &CliConfig,
_config: &CliConfig,
vote_account_pubkey: &Pubkey,
use_lamports_unit: bool,
commitment_config: CommitmentConfig,
@@ -574,38 +551,46 @@ pub fn process_show_vote_account(
let epoch_schedule = rpc_client.get_epoch_schedule()?;
let mut votes: Vec<CliLockout> = vec![];
let mut epoch_voting_history: Vec<CliEpochVotingHistory> = vec![];
if !vote_state.votes.is_empty() {
for vote in &vote_state.votes {
votes.push(vote.into());
println!(
"Account Balance: {}",
build_balance_message(vote_account.lamports, use_lamports_unit, true)
);
println!("Validator Identity: {}", vote_state.node_pubkey);
println!("Authorized Voter: {:?}", vote_state.authorized_voters());
println!(
"Authorized Withdrawer: {}",
vote_state.authorized_withdrawer
);
println!("Credits: {}", vote_state.credits());
println!("Commission: {}%", vote_state.commission);
println!(
"Root Slot: {}",
match vote_state.root_slot {
Some(slot) => slot.to_string(),
None => "~".to_string(),
}
);
println!("Recent Timestamp: {:?}", vote_state.last_timestamp);
if !vote_state.votes.is_empty() {
println!("recent votes:");
for vote in &vote_state.votes {
println!(
"- slot: {}\n confirmation count: {}",
vote.slot, vote.confirmation_count
);
}
println!("Epoch Voting History:");
for (epoch, credits, prev_credits) in vote_state.epoch_credits() {
let credits_earned = credits - prev_credits;
let slots_in_epoch = epoch_schedule.get_slots_in_epoch(*epoch);
epoch_voting_history.push(CliEpochVotingHistory {
epoch: *epoch,
slots_in_epoch,
credits_earned,
});
println!(
"- epoch: {}\n slots in epoch: {}\n credits earned: {}",
epoch, slots_in_epoch, credits_earned,
);
}
}
let vote_account_data = CliVoteAccount {
account_balance: vote_account.lamports,
validator_identity: vote_state.node_pubkey.to_string(),
authorized_voters: vote_state.authorized_voters().into(),
authorized_withdrawer: vote_state.authorized_withdrawer.to_string(),
credits: vote_state.credits(),
commission: vote_state.commission,
root_slot: vote_state.root_slot,
recent_timestamp: vote_state.last_timestamp.clone(),
votes,
epoch_voting_history,
use_lamports_unit,
};
Ok(config.output_format.formatted_string(&vote_account_data))
Ok("".to_string())
}
pub fn process_withdraw_from_vote_account(
@@ -635,8 +620,9 @@ pub fn process_withdraw_from_vote_account(
&fee_calculator,
&transaction.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&transaction);
log_instruction_custom_error::<VoteError>(result, &config)
let result =
rpc_client.send_and_confirm_transaction_with_spinner(&mut transaction, &config.signers);
log_instruction_custom_error::<VoteError>(result)
}
#[cfg(test)]
@@ -669,11 +655,10 @@ mod tests {
"test",
"vote-authorize-voter",
&pubkey_string,
&default_keypair_file,
&pubkey2_string,
]);
assert_eq!(
parse_command(&test_authorize_voter, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_authorize_voter, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::VoteAuthorize {
vote_account_pubkey: pubkey,
@@ -684,32 +669,6 @@ mod tests {
}
);
let authorized_keypair = Keypair::new();
let (authorized_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&authorized_keypair, tmp_file.as_file_mut()).unwrap();
let test_authorize_voter = test_commands.clone().get_matches_from(vec![
"test",
"vote-authorize-voter",
&pubkey_string,
&authorized_keypair_file,
&pubkey2_string,
]);
assert_eq!(
parse_command(&test_authorize_voter, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::VoteAuthorize {
vote_account_pubkey: pubkey,
new_authorized_pubkey: pubkey2,
vote_authorize: VoteAuthorize::Voter
},
signers: vec![
read_keypair_file(&default_keypair_file).unwrap().into(),
read_keypair_file(&authorized_keypair_file).unwrap().into(),
],
}
);
let (keypair_file, mut tmp_file) = make_tmp_file();
let keypair = Keypair::new();
write_keypair(&keypair, tmp_file.as_file_mut()).unwrap();
@@ -726,7 +685,7 @@ mod tests {
"10",
]);
assert_eq!(
parse_command(&test_create_vote_account, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_create_vote_account, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::CreateVoteAccount {
seed: None,
@@ -754,7 +713,7 @@ mod tests {
&identity_keypair_file,
]);
assert_eq!(
parse_command(&test_create_vote_account2, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_create_vote_account2, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::CreateVoteAccount {
seed: None,
@@ -786,7 +745,7 @@ mod tests {
&authed.to_string(),
]);
assert_eq!(
parse_command(&test_create_vote_account3, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_create_vote_account3, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::CreateVoteAccount {
seed: None,
@@ -816,7 +775,7 @@ mod tests {
&authed.to_string(),
]);
assert_eq!(
parse_command(&test_create_vote_account4, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_create_vote_account4, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::CreateVoteAccount {
seed: None,
@@ -841,7 +800,7 @@ mod tests {
&keypair_file,
]);
assert_eq!(
parse_command(&test_update_validator, &default_keypair_file, &mut None).unwrap(),
parse_command(&test_update_validator, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::VoteUpdateValidator {
vote_account_pubkey: pubkey,
@@ -867,7 +826,7 @@ mod tests {
parse_command(
&test_withdraw_from_vote_account,
&default_keypair_file,
&mut None
None
)
.unwrap(),
CliCommandInfo {
@@ -898,7 +857,7 @@ mod tests {
parse_command(
&test_withdraw_from_vote_account,
&default_keypair_file,
&mut None
None
)
.unwrap(),
CliCommandInfo {

View File

@@ -1,6 +1,5 @@
use solana_cli::{
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
cli_output::OutputFormat,
nonce,
offline::{
blockhash_query::{self, BlockhashQuery},
@@ -14,6 +13,7 @@ use solana_sdk::{
hash::Hash,
pubkey::Pubkey,
signature::{keypair_from_seed, Keypair, Signer},
system_instruction::create_address_with_seed,
system_program,
};
use std::{fs::remove_dir_all, sync::mpsc::channel, thread::sleep, time::Duration};
@@ -120,7 +120,6 @@ fn full_battery_tests(
&faucet_addr,
&config_payer.signers[0].pubkey(),
2000,
&config_payer,
)
.unwrap();
check_balance(2000, &rpc_client, &config_payer.signers[0].pubkey());
@@ -131,7 +130,7 @@ fn full_battery_tests(
config_nonce.signers = vec![&nonce_keypair];
let nonce_account = if let Some(seed) = seed.as_ref() {
Pubkey::create_with_seed(
create_address_with_seed(
&config_nonce.signers[0].pubkey(),
seed,
&system_program::id(),
@@ -267,7 +266,6 @@ fn test_create_account_with_seed() {
} = TestValidator::run_with_options(TestValidatorOptions {
fees: 1,
bootstrap_validator_lamports: 42_000,
..TestValidatorOptions::default()
});
let (sender, receiver) = channel();
@@ -277,7 +275,6 @@ fn test_create_account_with_seed() {
let offline_nonce_authority_signer = keypair_from_seed(&[1u8; 32]).unwrap();
let online_nonce_creator_signer = keypair_from_seed(&[2u8; 32]).unwrap();
let to_address = Pubkey::new(&[3u8; 32]);
let config = CliConfig::default();
// Setup accounts
let rpc_client = RpcClient::new_socket(leader_data.rpc);
@@ -286,7 +283,6 @@ fn test_create_account_with_seed() {
&faucet_addr,
&offline_nonce_authority_signer.pubkey(),
42,
&config,
)
.unwrap();
request_and_confirm_airdrop(
@@ -294,7 +290,6 @@ fn test_create_account_with_seed() {
&faucet_addr,
&online_nonce_creator_signer.pubkey(),
4242,
&config,
)
.unwrap();
check_balance(42, &rpc_client, &offline_nonce_authority_signer.pubkey());
@@ -306,7 +301,7 @@ fn test_create_account_with_seed() {
let authority_pubkey = offline_nonce_authority_signer.pubkey();
let seed = authority_pubkey.to_string()[0..32].to_string();
let nonce_address =
Pubkey::create_with_seed(&creator_pubkey, &seed, &system_program::id()).unwrap();
create_address_with_seed(&creator_pubkey, &seed, &system_program::id()).unwrap();
check_balance(0, &rpc_client, &nonce_address);
let mut creator_config = CliConfig::default();
@@ -349,7 +344,6 @@ fn test_create_account_with_seed() {
nonce_authority: 0,
fee_payer: 0,
};
authority_config.output_format = OutputFormat::JsonCompact;
let sign_only_reply = process_command(&authority_config).unwrap();
let sign_only = parse_sign_only_reply_string(&sign_only_reply);
let authority_presigner = sign_only.presigner_of(&authority_pubkey).unwrap();

View File

@@ -2,7 +2,6 @@ use chrono::prelude::*;
use serde_json::Value;
use solana_cli::{
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig, PayCommand},
cli_output::OutputFormat,
nonce,
offline::{
blockhash_query::{self, BlockhashQuery},
@@ -70,7 +69,6 @@ fn test_cli_timestamp_tx() {
&faucet_addr,
&config_payer.signers[0].pubkey(),
50,
&config_witness,
)
.unwrap();
check_balance(50, &rpc_client, &config_payer.signers[0].pubkey());
@@ -80,7 +78,6 @@ fn test_cli_timestamp_tx() {
&faucet_addr,
&config_witness.signers[0].pubkey(),
1,
&config_witness,
)
.unwrap();
@@ -157,7 +154,6 @@ fn test_cli_witness_tx() {
&faucet_addr,
&config_payer.signers[0].pubkey(),
50,
&config_witness,
)
.unwrap();
request_and_confirm_airdrop(
@@ -165,7 +161,6 @@ fn test_cli_witness_tx() {
&faucet_addr,
&config_witness.signers[0].pubkey(),
1,
&config_witness,
)
.unwrap();
@@ -239,7 +234,6 @@ fn test_cli_cancel_tx() {
&faucet_addr,
&config_payer.signers[0].pubkey(),
50,
&config_witness,
)
.unwrap();
@@ -313,7 +307,6 @@ fn test_offline_pay_tx() {
&faucet_addr,
&config_offline.signers[0].pubkey(),
50,
&config_offline,
)
.unwrap();
@@ -322,7 +315,6 @@ fn test_offline_pay_tx() {
&faucet_addr,
&config_online.signers[0].pubkey(),
50,
&config_offline,
)
.unwrap();
check_balance(50, &rpc_client, &config_offline.signers[0].pubkey());
@@ -336,7 +328,6 @@ fn test_offline_pay_tx() {
sign_only: true,
..PayCommand::default()
});
config_offline.output_format = OutputFormat::JsonCompact;
let sig_response = process_command(&config_offline).unwrap();
check_balance(50, &rpc_client, &config_offline.signers[0].pubkey());
@@ -397,7 +388,6 @@ fn test_nonced_pay_tx() {
&faucet_addr,
&config.signers[0].pubkey(),
50 + minimum_nonce_balance,
&config,
)
.unwrap();
check_balance(

View File

@@ -1,6 +1,5 @@
use solana_cli::{
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
cli_output::OutputFormat,
nonce,
offline::{
blockhash_query::{self, BlockhashQuery},
@@ -15,6 +14,7 @@ use solana_sdk::{
nonce::State as NonceState,
pubkey::Pubkey,
signature::{keypair_from_seed, Keypair, Signer},
system_instruction::create_address_with_seed,
};
use solana_stake_program::{
stake_instruction::LockupArgs,
@@ -60,7 +60,6 @@ fn test_stake_delegation_force() {
&faucet_addr,
&config.signers[0].pubkey(),
100_000,
&config,
)
.unwrap();
@@ -157,12 +156,11 @@ fn test_seed_stake_delegation_and_deactivation() {
&faucet_addr,
&config_validator.signers[0].pubkey(),
100_000,
&config_validator,
)
.unwrap();
check_balance(100_000, &rpc_client, &config_validator.signers[0].pubkey());
let stake_address = Pubkey::create_with_seed(
let stake_address = create_address_with_seed(
&config_validator.signers[0].pubkey(),
"hi there",
&solana_stake_program::id(),
@@ -248,7 +246,6 @@ fn test_stake_delegation_and_deactivation() {
&faucet_addr,
&config_validator.signers[0].pubkey(),
100_000,
&config_validator,
)
.unwrap();
check_balance(100_000, &rpc_client, &config_validator.signers[0].pubkey());
@@ -345,7 +342,6 @@ fn test_offline_stake_delegation_and_deactivation() {
&faucet_addr,
&config_validator.signers[0].pubkey(),
100_000,
&config_offline,
)
.unwrap();
check_balance(100_000, &rpc_client, &config_validator.signers[0].pubkey());
@@ -355,7 +351,6 @@ fn test_offline_stake_delegation_and_deactivation() {
&faucet_addr,
&config_offline.signers[0].pubkey(),
100_000,
&config_validator,
)
.unwrap();
check_balance(100_000, &rpc_client, &config_offline.signers[0].pubkey());
@@ -391,7 +386,6 @@ fn test_offline_stake_delegation_and_deactivation() {
nonce_authority: 0,
fee_payer: 0,
};
config_offline.output_format = OutputFormat::JsonCompact;
let sig_response = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
assert!(sign_only.has_all_signers());
@@ -477,7 +471,6 @@ fn test_nonced_stake_delegation_and_deactivation() {
&faucet_addr,
&config.signers[0].pubkey(),
100_000,
&config,
)
.unwrap();
@@ -587,7 +580,6 @@ fn test_stake_authorize() {
&faucet_addr,
&config.signers[0].pubkey(),
100_000,
&config,
)
.unwrap();
@@ -605,7 +597,6 @@ fn test_stake_authorize() {
&faucet_addr,
&config_offline.signers[0].pubkey(),
100_000,
&config,
)
.unwrap();
@@ -713,7 +704,6 @@ fn test_stake_authorize() {
nonce_authority: 0,
fee_payer: 0,
};
config_offline.output_format = OutputFormat::JsonCompact;
let sign_reply = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sign_reply);
assert!(sign_only.has_all_signers());
@@ -822,7 +812,6 @@ fn test_stake_authorize_with_fee_payer() {
} = TestValidator::run_with_options(TestValidatorOptions {
fees: SIG_FEE,
bootstrap_validator_lamports: 42_000,
..TestValidatorOptions::default()
});
let (sender, receiver) = channel();
run_local_faucet(alice, sender, None);
@@ -852,16 +841,13 @@ fn test_stake_authorize_with_fee_payer() {
config_offline.command = CliCommand::ClusterVersion;
process_command(&config_offline).unwrap_err();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &default_pubkey, 100_000, &config)
.unwrap();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &default_pubkey, 100_000).unwrap();
check_balance(100_000, &rpc_client, &config.signers[0].pubkey());
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &payer_pubkey, 100_000, &config)
.unwrap();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &payer_pubkey, 100_000).unwrap();
check_balance(100_000, &rpc_client, &payer_pubkey);
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 100_000, &config)
.unwrap();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 100_000).unwrap();
check_balance(100_000, &rpc_client, &offline_pubkey);
// Create stake account, identity is authority
@@ -915,7 +901,6 @@ fn test_stake_authorize_with_fee_payer() {
nonce_authority: 0,
fee_payer: 0,
};
config_offline.output_format = OutputFormat::JsonCompact;
let sign_reply = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sign_reply);
assert!(sign_only.has_all_signers());
@@ -954,7 +939,6 @@ fn test_stake_split() {
} = TestValidator::run_with_options(TestValidatorOptions {
fees: 1,
bootstrap_validator_lamports: 42_000,
..TestValidatorOptions::default()
});
let (sender, receiver) = channel();
run_local_faucet(alice, sender, None);
@@ -981,13 +965,11 @@ fn test_stake_split() {
&faucet_addr,
&config.signers[0].pubkey(),
500_000,
&config,
)
.unwrap();
check_balance(500_000, &rpc_client, &config.signers[0].pubkey());
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 100_000, &config)
.unwrap();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 100_000).unwrap();
check_balance(100_000, &rpc_client, &offline_pubkey);
// Create stake account, identity is authority
@@ -1055,7 +1037,6 @@ fn test_stake_split() {
lamports: 2 * minimum_stake_balance,
fee_payer: 0,
};
config_offline.output_format = OutputFormat::JsonCompact;
let sig_response = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
assert!(sign_only.has_all_signers());
@@ -1105,7 +1086,6 @@ fn test_stake_set_lockup() {
} = TestValidator::run_with_options(TestValidatorOptions {
fees: 1,
bootstrap_validator_lamports: 42_000,
..TestValidatorOptions::default()
});
let (sender, receiver) = channel();
run_local_faucet(alice, sender, None);
@@ -1132,13 +1112,11 @@ fn test_stake_set_lockup() {
&faucet_addr,
&config.signers[0].pubkey(),
500_000,
&config,
)
.unwrap();
check_balance(500_000, &rpc_client, &config.signers[0].pubkey());
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 100_000, &config)
.unwrap();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 100_000).unwrap();
check_balance(100_000, &rpc_client, &offline_pubkey);
// Create stake account, identity is authority
@@ -1312,7 +1290,6 @@ fn test_stake_set_lockup() {
nonce_authority: 0,
fee_payer: 0,
};
config_offline.output_format = OutputFormat::JsonCompact;
let sig_response = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
assert!(sign_only.has_all_signers());
@@ -1385,13 +1362,11 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
&faucet_addr,
&config.signers[0].pubkey(),
200_000,
&config,
)
.unwrap();
check_balance(200_000, &rpc_client, &config.signers[0].pubkey());
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 100_000, &config)
.unwrap();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 100_000).unwrap();
check_balance(100_000, &rpc_client, &offline_pubkey);
// Create nonce account
@@ -1433,7 +1408,6 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
fee_payer: 0,
from: 0,
};
config_offline.output_format = OutputFormat::JsonCompact;
let sig_response = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
assert!(sign_only.has_all_signers());
@@ -1475,7 +1449,6 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
destination_account_pubkey: recipient_pubkey,
lamports: 42,
withdraw_authority: 0,
custodian: None,
sign_only: true,
blockhash_query: BlockhashQuery::None(nonce_hash),
nonce_account: Some(nonce_pubkey),
@@ -1491,7 +1464,6 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
destination_account_pubkey: recipient_pubkey,
lamports: 42,
withdraw_authority: 0,
custodian: None,
sign_only: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_pubkey),
@@ -1551,7 +1523,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
};
process_command(&config).unwrap();
let seed_address =
Pubkey::create_with_seed(&stake_pubkey, seed, &solana_stake_program::id()).unwrap();
create_address_with_seed(&stake_pubkey, seed, &solana_stake_program::id()).unwrap();
check_balance(50_000, &rpc_client, &seed_address);
server.close().unwrap();

View File

@@ -1,6 +1,5 @@
use solana_cli::{
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
cli_output::OutputFormat,
nonce,
offline::{
blockhash_query::{self, BlockhashQuery},
@@ -41,7 +40,6 @@ fn test_transfer() {
} = TestValidator::run_with_options(TestValidatorOptions {
fees: 1,
bootstrap_validator_lamports: 42_000,
..TestValidatorOptions::default()
});
let (sender, receiver) = channel();
@@ -60,8 +58,7 @@ fn test_transfer() {
let sender_pubkey = config.signers[0].pubkey();
let recipient_pubkey = Pubkey::new(&[1u8; 32]);
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &sender_pubkey, 50_000, &config)
.unwrap();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &sender_pubkey, 50_000).unwrap();
check_balance(50_000, &rpc_client, &sender_pubkey);
check_balance(0, &rpc_client, &recipient_pubkey);
@@ -89,7 +86,7 @@ fn test_transfer() {
process_command(&offline).unwrap_err();
let offline_pubkey = offline.signers[0].pubkey();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 50, &config).unwrap();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 50).unwrap();
check_balance(50, &rpc_client, &offline_pubkey);
// Offline transfer
@@ -105,7 +102,6 @@ fn test_transfer() {
nonce_authority: 0,
fee_payer: 0,
};
offline.output_format = OutputFormat::JsonCompact;
let sign_only_reply = process_command(&offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sign_only_reply);
assert!(sign_only.has_all_signers());
@@ -239,7 +235,6 @@ fn test_transfer_multisession_signing() {
} = TestValidator::run_with_options(TestValidatorOptions {
fees: 1,
bootstrap_validator_lamports: 42_000,
..TestValidatorOptions::default()
});
let (sender, receiver) = channel();
@@ -250,24 +245,16 @@ fn test_transfer_multisession_signing() {
let offline_from_signer = keypair_from_seed(&[2u8; 32]).unwrap();
let offline_fee_payer_signer = keypair_from_seed(&[3u8; 32]).unwrap();
let from_null_signer = NullSigner::new(&offline_from_signer.pubkey());
let config = CliConfig::default();
// Setup accounts
let rpc_client = RpcClient::new_socket(leader_data.rpc);
request_and_confirm_airdrop(
&rpc_client,
&faucet_addr,
&offline_from_signer.pubkey(),
43,
&config,
)
.unwrap();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_from_signer.pubkey(), 43)
.unwrap();
request_and_confirm_airdrop(
&rpc_client,
&faucet_addr,
&offline_fee_payer_signer.pubkey(),
3,
&config,
)
.unwrap();
check_balance(43, &rpc_client, &offline_from_signer.pubkey());
@@ -294,7 +281,6 @@ fn test_transfer_multisession_signing() {
nonce_authority: 0,
fee_payer: 0,
};
fee_payer_config.output_format = OutputFormat::JsonCompact;
let sign_only_reply = process_command(&fee_payer_config).unwrap();
let sign_only = parse_sign_only_reply_string(&sign_only_reply);
assert!(!sign_only.has_all_signers());
@@ -320,7 +306,6 @@ fn test_transfer_multisession_signing() {
nonce_authority: 0,
fee_payer: 0,
};
from_config.output_format = OutputFormat::JsonCompact;
let sign_only_reply = process_command(&from_config).unwrap();
let sign_only = parse_sign_only_reply_string(&sign_only_reply);
assert!(sign_only.has_all_signers());

View File

@@ -48,7 +48,6 @@ fn test_vote_authorize_and_withdraw() {
&faucet_addr,
&config.signers[0].pubkey(),
100_000,
&config,
)
.unwrap();

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-client"
version = "1.1.13"
version = "1.0.24"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -14,15 +14,15 @@ bs58 = "0.3.0"
indicatif = "0.14.0"
jsonrpc-core = "14.0.5"
log = "0.4.8"
rayon = "1.3.0"
reqwest = { version = "0.10.4", default-features = false, features = ["blocking", "rustls-tls", "json"] }
serde = "1.0.105"
rayon = "1.2.0"
reqwest = { version = "0.10.1", default-features = false, features = ["blocking", "rustls-tls"] }
serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.48"
solana-transaction-status = { path = "../transaction-status", version = "1.1.13" }
solana-net-utils = { path = "../net-utils", version = "1.1.13" }
solana-sdk = { path = "../sdk", version = "1.1.13" }
solana-vote-program = { path = "../programs/vote", version = "1.1.13" }
serde_json = "1.0.46"
solana-net-utils = { path = "../net-utils", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
solana-transaction-status = { path = "../transaction-status", version = "1.0.24" }
solana-vote-program = { path = "../programs/vote", version = "1.0.24" }
thiserror = "1.0"
tungstenite = "0.10.1"
url = "2.1.1"
@@ -31,7 +31,7 @@ url = "2.1.1"
assert_matches = "1.3.0"
jsonrpc-core = "14.0.5"
jsonrpc-http-server = "14.0.6"
solana-logger = { path = "../logger", version = "1.1.13" }
solana-logger = { path = "../logger", version = "1.0.24" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -50,29 +50,29 @@ impl Into<TransportError> for ClientErrorKind {
#[derive(Error, Debug)]
#[error("{kind}")]
pub struct ClientError {
request: Option<rpc_request::RpcRequest>,
command: Option<&'static str>,
#[source]
#[error(transparent)]
kind: ClientErrorKind,
}
impl ClientError {
pub fn new_with_request(kind: ClientErrorKind, request: rpc_request::RpcRequest) -> Self {
pub fn new_with_command(kind: ClientErrorKind, command: &'static str) -> Self {
Self {
request: Some(request),
command: Some(command),
kind,
}
}
pub fn into_with_request(self, request: rpc_request::RpcRequest) -> Self {
pub fn into_with_command(self, command: &'static str) -> Self {
Self {
request: Some(request),
command: Some(command),
..self
}
}
pub fn request(&self) -> Option<&rpc_request::RpcRequest> {
self.request.as_ref()
pub fn command(&self) -> Option<&'static str> {
self.command
}
pub fn kind(&self) -> &ClientErrorKind {
@@ -83,7 +83,7 @@ impl ClientError {
impl From<ClientErrorKind> for ClientError {
fn from(kind: ClientErrorKind) -> Self {
Self {
request: None,
command: None,
kind,
}
}
@@ -92,7 +92,7 @@ impl From<ClientErrorKind> for ClientError {
impl From<TransportError> for ClientError {
fn from(err: TransportError) -> Self {
Self {
request: None,
command: None,
kind: err.into(),
}
}
@@ -107,7 +107,7 @@ impl Into<TransportError> for ClientError {
impl From<std::io::Error> for ClientError {
fn from(err: std::io::Error) -> Self {
Self {
request: None,
command: None,
kind: err.into(),
}
}
@@ -116,7 +116,7 @@ impl From<std::io::Error> for ClientError {
impl From<reqwest::Error> for ClientError {
fn from(err: reqwest::Error) -> Self {
Self {
request: None,
command: None,
kind: err.into(),
}
}
@@ -125,7 +125,7 @@ impl From<reqwest::Error> for ClientError {
impl From<rpc_request::RpcError> for ClientError {
fn from(err: rpc_request::RpcError) -> Self {
Self {
request: None,
command: None,
kind: err.into(),
}
}
@@ -134,7 +134,7 @@ impl From<rpc_request::RpcError> for ClientError {
impl From<serde_json::error::Error> for ClientError {
fn from(err: serde_json::error::Error) -> Self {
Self {
request: None,
command: None,
kind: err.into(),
}
}
@@ -143,7 +143,7 @@ impl From<serde_json::error::Error> for ClientError {
impl From<SignerError> for ClientError {
fn from(err: SignerError) -> Self {
Self {
request: None,
command: None,
kind: err.into(),
}
}
@@ -152,7 +152,7 @@ impl From<SignerError> for ClientError {
impl From<TransactionError> for ClientError {
fn from(err: TransactionError) -> Self {
Self {
request: None,
command: None,
kind: err.into(),
}
}

View File

@@ -3,7 +3,7 @@ use crate::{client_error::Result, rpc_request::RpcRequest};
pub(crate) trait GenericRpcClientRequest {
fn send(
&self,
request: RpcRequest,
request: &RpcRequest,
params: serde_json::Value,
retries: usize,
) -> Result<serde_json::Value>;

View File

@@ -8,8 +8,7 @@ use serde_json::{Number, Value};
use solana_sdk::{
fee_calculator::{FeeCalculator, FeeRateGovernor},
instruction::InstructionError,
signature::Signature,
transaction::{self, Transaction, TransactionError},
transaction::{self, TransactionError},
};
use solana_transaction_status::TransactionStatus;
use std::{collections::HashMap, sync::RwLock};
@@ -40,11 +39,11 @@ impl MockRpcClientRequest {
impl GenericRpcClientRequest for MockRpcClientRequest {
fn send(
&self,
request: RpcRequest,
params: serde_json::Value,
request: &RpcRequest,
_params: serde_json::Value,
_retries: usize,
) -> Result<serde_json::Value> {
if let Some(value) = self.mocks.write().unwrap().remove(&request) {
if let Some(value) = self.mocks.write().unwrap().remove(request) {
return Ok(value);
}
if self.url == "fails" {
@@ -106,17 +105,7 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
}
RpcRequest::GetTransactionCount => Value::Number(Number::from(1234)),
RpcRequest::GetSlot => Value::Number(Number::from(0)),
RpcRequest::SendTransaction => {
let signature = if self.url == "malicious" {
Signature::new(&[8; 64]).to_string()
} else {
let tx_str = params.as_array().unwrap()[0].as_str().unwrap().to_string();
let data = bs58::decode(tx_str).into_vec().unwrap();
let tx: Transaction = bincode::deserialize(&data).unwrap();
tx.signatures[0].to_string()
};
Value::String(signature)
}
RpcRequest::SendTransaction => Value::String(SIGNATURE.to_string()),
RpcRequest::GetMinimumBalanceForRentExemption => Value::Number(Number::from(1234)),
_ => Value::Null,
};

View File

@@ -3,9 +3,12 @@ use crate::{
generic_rpc_client_request::GenericRpcClientRequest,
mock_rpc_client_request::{MockRpcClientRequest, Mocks},
rpc_client_request::RpcClientRequest,
rpc_config::RpcLargestAccountsConfig,
rpc_request::{RpcError, RpcRequest},
rpc_response::*,
rpc_response::{
Response, RpcAccount, RpcBlockhashFeeCalculator, RpcContactInfo, RpcEpochInfo,
RpcFeeCalculator, RpcFeeRateGovernor, RpcIdentity, RpcKeyedAccount, RpcLeaderSchedule,
RpcResult, RpcVersionInfo, RpcVoteAccountStatus,
},
};
use bincode::serialize;
use indicatif::{ProgressBar, ProgressStyle};
@@ -25,7 +28,7 @@ use solana_sdk::{
pubkey::Pubkey,
signature::Signature,
signers::Signers,
transaction::{self, Transaction},
transaction::{self, Transaction, TransactionError},
};
use solana_transaction_status::{
ConfirmedBlock, ConfirmedTransaction, TransactionEncoding, TransactionStatus,
@@ -97,28 +100,35 @@ impl RpcClient {
pub fn send_transaction(&self, transaction: &Transaction) -> ClientResult<Signature> {
let serialized_encoded = bs58::encode(serialize(transaction).unwrap()).into_string();
let response =
self.client
.send(&RpcRequest::SendTransaction, json!([serialized_encoded]), 5)?;
let signature_base58_str: String =
self.send(RpcRequest::SendTransaction, json!([serialized_encoded]), 5)?;
let signature = signature_base58_str
.parse::<Signature>()
.map_err(|err| Into::<ClientError>::into(RpcError::ParseError(err.to_string())))?;
// A mismatching RPC response signature indicates an issue with the RPC node, and
// should not be passed along to confirmation methods. The transaction may or may
// not have been submitted to the cluster, so callers should verify the success of
// the correct transaction signature independently.
if signature != transaction.signatures[0] {
Err(RpcError::RpcRequestError(format!(
"RPC node returned mismatched signature {:?}, expected {:?}",
signature, transaction.signatures[0]
))
.into())
} else {
Ok(transaction.signatures[0])
match response.as_str() {
None => {
Err(RpcError::ForUser("Received result of an unexpected type".to_string()).into())
}
Some(signature_base58_str) => signature_base58_str
.parse::<Signature>()
.map_err(|err| RpcError::ParseError(err.to_string()).into()),
}
}
pub fn simulate_transaction(
&self,
transaction: &Transaction,
sig_verify: bool,
) -> RpcResult<TransactionStatus> {
let serialized_encoded = bs58::encode(serialize(transaction).unwrap()).into_string();
let response = self.send(
&RpcRequest::SimulateTransaction,
json!([serialized_encoded, { "sigVerify": sig_verify }]),
0,
)?;
Ok(serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "SimulateTransaction"))?)
}
pub fn get_signature_status(
&self,
signature: &Signature,
@@ -131,7 +141,11 @@ impl RpcClient {
signatures: &[Signature],
) -> RpcResult<Vec<Option<TransactionStatus>>> {
let signatures: Vec<_> = signatures.iter().map(|s| s.to_string()).collect();
self.send(RpcRequest::GetSignatureStatuses, json!([signatures]), 5)
let signature_status =
self.client
.send(&RpcRequest::GetSignatureStatuses, json!([signatures]), 5)?;
Ok(serde_json::from_value(signature_status)
.map_err(|err| ClientError::new_with_command(err.into(), "GetSignatureStatuses"))?)
}
pub fn get_signature_status_with_commitment(
@@ -139,11 +153,14 @@ impl RpcClient {
signature: &Signature,
commitment_config: CommitmentConfig,
) -> ClientResult<Option<transaction::Result<()>>> {
let result: Response<Vec<Option<TransactionStatus>>> = self.send(
RpcRequest::GetSignatureStatuses,
let signature_status = self.client.send(
&RpcRequest::GetSignatureStatuses,
json!([[signature.to_string()]]),
5,
)?;
let result: Response<Vec<Option<TransactionStatus>>> =
serde_json::from_value(signature_status)
.map_err(|err| ClientError::new_with_command(err.into(), "GetSignatureStatuses"))?;
Ok(result.value[0]
.clone()
.filter(|result| result.satisfies_commitment(commitment_config))
@@ -156,13 +173,16 @@ impl RpcClient {
commitment_config: CommitmentConfig,
search_transaction_history: bool,
) -> ClientResult<Option<transaction::Result<()>>> {
let result: Response<Vec<Option<TransactionStatus>>> = self.send(
RpcRequest::GetSignatureStatuses,
let signature_status = self.client.send(
&RpcRequest::GetSignatureStatuses,
json!([[signature.to_string()], {
"searchTransactionHistory": search_transaction_history
}]),
5,
)?;
let result: Response<Vec<Option<TransactionStatus>>> =
serde_json::from_value(signature_status)
.map_err(|err| ClientError::new_with_command(err.into(), "GetSignatureStatuses"))?;
Ok(result.value[0]
.clone()
.filter(|result| result.satisfies_commitment(commitment_config))
@@ -177,32 +197,13 @@ impl RpcClient {
&self,
commitment_config: CommitmentConfig,
) -> ClientResult<Slot> {
self.send(RpcRequest::GetSlot, json!([commitment_config]), 0)
}
let response = self
.client
.send(&RpcRequest::GetSlot, json!([commitment_config]), 0)
.map_err(|err| err.into_with_command("GetSlot"))?;
pub fn supply_with_commitment(
&self,
commitment_config: CommitmentConfig,
) -> RpcResult<RpcSupply> {
self.send(RpcRequest::GetSupply, json!([commitment_config]), 0)
}
pub fn total_supply(&self) -> ClientResult<u64> {
self.total_supply_with_commitment(CommitmentConfig::default())
}
pub fn total_supply_with_commitment(
&self,
commitment_config: CommitmentConfig,
) -> ClientResult<u64> {
self.send(RpcRequest::GetTotalSupply, json!([commitment_config]), 0)
}
pub fn get_largest_accounts_with_config(
&self,
config: RpcLargestAccountsConfig,
) -> RpcResult<Vec<RpcAccountBalance>> {
self.send(RpcRequest::GetLargestAccounts, json!([config]), 0)
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetSlot"))
}
pub fn get_vote_accounts(&self) -> ClientResult<RpcVoteAccountStatus> {
@@ -213,11 +214,23 @@ impl RpcClient {
&self,
commitment_config: CommitmentConfig,
) -> ClientResult<RpcVoteAccountStatus> {
self.send(RpcRequest::GetVoteAccounts, json!([commitment_config]), 0)
let response = self
.client
.send(&RpcRequest::GetVoteAccounts, json!([commitment_config]), 0)
.map_err(|err| err.into_with_command("GetVoteAccounts"))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetVoteAccounts"))
}
pub fn get_cluster_nodes(&self) -> ClientResult<Vec<RpcContactInfo>> {
self.send(RpcRequest::GetClusterNodes, Value::Null, 0)
let response = self
.client
.send(&RpcRequest::GetClusterNodes, Value::Null, 0)
.map_err(|err| err.into_with_command("GetClusterNodes"))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetClusterNodes"))
}
pub fn get_confirmed_block(&self, slot: Slot) -> ClientResult<ConfirmedBlock> {
@@ -229,7 +242,13 @@ impl RpcClient {
slot: Slot,
encoding: TransactionEncoding,
) -> ClientResult<ConfirmedBlock> {
self.send(RpcRequest::GetConfirmedBlock, json!([slot, encoding]), 0)
let response = self
.client
.send(&RpcRequest::GetConfirmedBlock, json!([slot, encoding]), 0)
.map_err(|err| err.into_with_command("GetConfirmedBlock"))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetConfirmedBlock"))
}
pub fn get_confirmed_blocks(
@@ -237,11 +256,17 @@ impl RpcClient {
start_slot: Slot,
end_slot: Option<Slot>,
) -> ClientResult<Vec<Slot>> {
self.send(
RpcRequest::GetConfirmedBlocks,
json!([start_slot, end_slot]),
0,
)
let response = self
.client
.send(
&RpcRequest::GetConfirmedBlocks,
json!([start_slot, end_slot]),
0,
)
.map_err(|err| err.into_with_command("GetConfirmedBlocks"))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetConfirmedBlocks"))
}
pub fn get_confirmed_signatures_for_address(
@@ -250,21 +275,18 @@ impl RpcClient {
start_slot: Slot,
end_slot: Slot,
) -> ClientResult<Vec<Signature>> {
let signatures_base58_str: Vec<String> = self.send(
RpcRequest::GetConfirmedSignaturesForAddress,
json!([address.to_string(), start_slot, end_slot]),
0,
)?;
let response = self
.client
.send(
&RpcRequest::GetConfirmedSignaturesForAddress,
json!([address, start_slot, end_slot]),
0,
)
.map_err(|err| err.into_with_command("GetConfirmedSignaturesForAddress"))?;
let mut signatures = vec![];
for signature_base58_str in signatures_base58_str {
signatures.push(
signature_base58_str.parse::<Signature>().map_err(|err| {
Into::<ClientError>::into(RpcError::ParseError(err.to_string()))
})?,
);
}
Ok(signatures)
serde_json::from_value(response).map_err(|err| {
ClientError::new_with_command(err.into(), "GetConfirmedSignaturesForAddress")
})
}
pub fn get_confirmed_transaction(
@@ -272,16 +294,23 @@ impl RpcClient {
signature: &Signature,
encoding: TransactionEncoding,
) -> ClientResult<ConfirmedTransaction> {
self.send(
RpcRequest::GetConfirmedTransaction,
json!([signature.to_string(), encoding]),
0,
)
let response = self
.client
.send(
&RpcRequest::GetConfirmedTransaction,
json!([signature.to_string(), encoding]),
0,
)
.map_err(|err| err.into_with_command("GetConfirmedTransaction"))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetConfirmedTransaction"))
}
pub fn get_block_time(&self, slot: Slot) -> ClientResult<UnixTimestamp> {
let request = RpcRequest::GetBlockTime;
let response = self.client.send(request, json!([slot]), 0);
let response = self
.client
.send(&RpcRequest::GetBlockTime, json!([slot]), 0);
response
.map(|result_json| {
@@ -289,11 +318,11 @@ impl RpcClient {
return Err(RpcError::ForUser(format!("Block Not Found: slot={}", slot)).into());
}
let result = serde_json::from_value(result_json)
.map_err(|err| ClientError::new_with_request(err.into(), request))?;
.map_err(|err| ClientError::new_with_command(err.into(), "GetBlockTime"))?;
trace!("Response block timestamp {:?} {:?}", slot, result);
Ok(result)
})
.map_err(|err| err.into_with_request(request))?
.map_err(|err| err.into_with_command("GetBlockTime"))?
}
pub fn get_epoch_info(&self) -> ClientResult<RpcEpochInfo> {
@@ -304,7 +333,13 @@ impl RpcClient {
&self,
commitment_config: CommitmentConfig,
) -> ClientResult<RpcEpochInfo> {
self.send(RpcRequest::GetEpochInfo, json!([commitment_config]), 0)
let response = self
.client
.send(&RpcRequest::GetEpochInfo, json!([commitment_config]), 0)
.map_err(|err| err.into_with_command("GetEpochInfo"))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetEpochInfo"))
}
pub fn get_leader_schedule(
@@ -319,43 +354,81 @@ impl RpcClient {
slot: Option<Slot>,
commitment_config: CommitmentConfig,
) -> ClientResult<Option<RpcLeaderSchedule>> {
self.send(
RpcRequest::GetLeaderSchedule,
json!([slot, commitment_config]),
0,
)
let response = self
.client
.send(
&RpcRequest::GetLeaderSchedule,
json!([slot, commitment_config]),
0,
)
.map_err(|err| err.into_with_command("GetLeaderSchedule"))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetLeaderSchedule"))
}
pub fn get_epoch_schedule(&self) -> ClientResult<EpochSchedule> {
self.send(RpcRequest::GetEpochSchedule, Value::Null, 0)
let response = self
.client
.send(&RpcRequest::GetEpochSchedule, Value::Null, 0)
.map_err(|err| err.into_with_command("GetEpochSchedule"))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetEpochSchedule"))
}
pub fn get_identity(&self) -> ClientResult<Pubkey> {
let rpc_identity: RpcIdentity = self.send(RpcRequest::GetIdentity, Value::Null, 0)?;
let response = self
.client
.send(&RpcRequest::GetIdentity, Value::Null, 0)
.map_err(|err| err.into_with_command("GetIdentity"))?;
rpc_identity.identity.parse::<Pubkey>().map_err(|_| {
ClientError::new_with_request(
RpcError::ParseError("Pubkey".to_string()).into(),
RpcRequest::GetIdentity,
)
})
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetIdentity"))
.and_then(|rpc_identity: RpcIdentity| {
rpc_identity.identity.parse::<Pubkey>().map_err(|_| {
ClientError::new_with_command(
RpcError::ParseError("Pubkey".to_string()).into(),
"GetIdentity",
)
})
})
}
pub fn get_inflation(&self) -> ClientResult<Inflation> {
self.send(RpcRequest::GetInflation, Value::Null, 0)
let response = self
.client
.send(&RpcRequest::GetInflation, Value::Null, 0)
.map_err(|err| err.into_with_command("GetInflation"))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetInflation"))
}
pub fn get_version(&self) -> ClientResult<RpcVersionInfo> {
self.send(RpcRequest::GetVersion, Value::Null, 0)
let response = self
.client
.send(&RpcRequest::GetVersion, Value::Null, 0)
.map_err(|err| err.into_with_command("GetVersion"))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetVersion"))
}
pub fn minimum_ledger_slot(&self) -> ClientResult<Slot> {
self.send(RpcRequest::MinimumLedgerSlot, Value::Null, 0)
let response = self
.client
.send(&RpcRequest::MinimumLedgerSlot, Value::Null, 0)
.map_err(|err| err.into_with_command("MinimumLedgerSlot"))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "MinimumLedgerSlot"))
}
pub fn send_and_confirm_transaction(
pub fn send_and_confirm_transaction<T: Signers>(
&self,
transaction: &Transaction,
transaction: &mut Transaction,
signer_keys: &T,
) -> ClientResult<Signature> {
let mut send_retries = 20;
loop {
@@ -379,6 +452,11 @@ impl RpcClient {
send_retries = if let Some(result) = status.clone() {
match result {
Ok(_) => return Ok(signature),
Err(TransactionError::AccountInUse) => {
// Fetch a new blockhash and re-sign the transaction before sending it again
self.resign_transaction(transaction, signer_keys)?;
send_retries - 1
}
Err(_) => 0,
}
} else {
@@ -389,9 +467,7 @@ impl RpcClient {
return Err(err.unwrap_err().into());
} else {
return Err(
RpcError::ForUser("unable to confirm transaction. \
This can happen in situations such as transaction expiration \
and insufficient fee-payer funds".to_string()).into(),
RpcError::ForUser("unable to confirm transaction. This can happen in situations such as transaction expiration and insufficient fee-payer funds".to_string()).into(),
);
}
}
@@ -482,15 +558,18 @@ impl RpcClient {
pubkey: &Pubkey,
retries: usize,
) -> Result<Option<u64>, Box<dyn error::Error>> {
let request = RpcRequest::GetBalance;
let balance_json = self
.client
.send(request, json!([pubkey.to_string()]), retries)
.map_err(|err| err.into_with_request(request))?;
.send(
&RpcRequest::GetBalance,
json!([pubkey.to_string()]),
retries,
)
.map_err(|err| err.into_with_command("RetryGetBalance"))?;
Ok(Some(
serde_json::from_value::<Response<u64>>(balance_json)
.map_err(|err| ClientError::new_with_request(err.into(), request))?
.map_err(|err| ClientError::new_with_command(err.into(), "RetryGetBalance"))?
.value,
))
}
@@ -507,7 +586,7 @@ impl RpcClient {
commitment_config: CommitmentConfig,
) -> RpcResult<Option<Account>> {
let response = self.client.send(
RpcRequest::GetAccountInfo,
&RpcRequest::GetAccountInfo,
json!([pubkey.to_string(), commitment_config]),
0,
);
@@ -543,14 +622,18 @@ impl RpcClient {
}
pub fn get_minimum_balance_for_rent_exemption(&self, data_len: usize) -> ClientResult<u64> {
let request = RpcRequest::GetMinimumBalanceForRentExemption;
let minimum_balance_json = self
.client
.send(request, json!([data_len]), 0)
.map_err(|err| err.into_with_request(request))?;
.send(
&RpcRequest::GetMinimumBalanceForRentExemption,
json!([data_len]),
0,
)
.map_err(|err| err.into_with_command("GetMinimumBalanceForRentExemption"))?;
let minimum_balance: u64 = serde_json::from_value(minimum_balance_json)
.map_err(|err| ClientError::new_with_request(err.into(), request))?;
let minimum_balance: u64 = serde_json::from_value(minimum_balance_json).map_err(|err| {
ClientError::new_with_command(err.into(), "GetMinimumBalanceForRentExemption")
})?;
trace!(
"Response minimum balance {:?} {:?}",
data_len,
@@ -571,25 +654,39 @@ impl RpcClient {
pubkey: &Pubkey,
commitment_config: CommitmentConfig,
) -> RpcResult<u64> {
self.send(
RpcRequest::GetBalance,
json!([pubkey.to_string(), commitment_config]),
0,
)
let balance_json = self
.client
.send(
&RpcRequest::GetBalance,
json!([pubkey.to_string(), commitment_config]),
0,
)
.map_err(|err| err.into_with_command("GetBalance"))?;
serde_json::from_value::<Response<u64>>(balance_json)
.map_err(|err| ClientError::new_with_command(err.into(), "GetBalance"))
}
pub fn get_program_accounts(&self, pubkey: &Pubkey) -> ClientResult<Vec<(Pubkey, Account)>> {
let accounts: Vec<RpcKeyedAccount> = self.send(
RpcRequest::GetProgramAccounts,
json!([pubkey.to_string()]),
0,
)?;
let response = self
.client
.send(
&RpcRequest::GetProgramAccounts,
json!([pubkey.to_string()]),
0,
)
.map_err(|err| err.into_with_command("GetProgramAccounts"))?;
let accounts: Vec<RpcKeyedAccount> =
serde_json::from_value::<Vec<RpcKeyedAccount>>(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetProgramAccounts"))?;
let mut pubkey_accounts: Vec<(Pubkey, Account)> = Vec::new();
for RpcKeyedAccount { pubkey, account } in accounts.into_iter() {
let pubkey = pubkey.parse().map_err(|_| {
ClientError::new_with_request(
ClientError::new_with_command(
RpcError::ParseError("Pubkey".to_string()).into(),
RpcRequest::GetProgramAccounts,
"GetProgramAccounts",
)
})?;
pubkey_accounts.push((pubkey, account.decode().unwrap()));
@@ -606,11 +703,17 @@ impl RpcClient {
&self,
commitment_config: CommitmentConfig,
) -> ClientResult<u64> {
self.send(
RpcRequest::GetTransactionCount,
json!([commitment_config]),
0,
)
let response = self
.client
.send(
&RpcRequest::GetTransactionCount,
json!([commitment_config]),
0,
)
.map_err(|err| err.into_with_command("GetTransactionCount"))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetTransactionCount"))
}
pub fn get_recent_blockhash(&self) -> ClientResult<(Hash, FeeCalculator)> {
@@ -623,6 +726,15 @@ impl RpcClient {
&self,
commitment_config: CommitmentConfig,
) -> RpcResult<(Hash, FeeCalculator)> {
let response = self
.client
.send(
&RpcRequest::GetRecentBlockhash,
json!([commitment_config]),
0,
)
.map_err(|err| err.into_with_command("GetRecentBlockhash"))?;
let Response {
context,
value:
@@ -630,16 +742,12 @@ impl RpcClient {
blockhash,
fee_calculator,
},
} = self.send::<Response<RpcBlockhashFeeCalculator>>(
RpcRequest::GetRecentBlockhash,
json!([commitment_config]),
0,
)?;
} = serde_json::from_value::<Response<RpcBlockhashFeeCalculator>>(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetRecentBlockhash"))?;
let blockhash = blockhash.parse().map_err(|_| {
ClientError::new_with_request(
ClientError::new_with_command(
RpcError::ParseError("Hash".to_string()).into(),
RpcRequest::GetRecentBlockhash,
"GetRecentBlockhash",
)
})?;
Ok(Response {
@@ -652,25 +760,31 @@ impl RpcClient {
&self,
blockhash: &Hash,
) -> ClientResult<Option<FeeCalculator>> {
let Response { value, .. } = self.send::<Response<Option<RpcFeeCalculator>>>(
RpcRequest::GetFeeCalculatorForBlockhash,
json!([blockhash.to_string()]),
0,
)?;
let response = self
.client
.send(
&RpcRequest::GetFeeCalculatorForBlockhash,
json!([blockhash.to_string()]),
0,
)
.map_err(|err| err.into_with_command("GetFeeCalculatorForBlockhash"))?;
let Response { value, .. } = serde_json::from_value::<Response<Option<RpcFeeCalculator>>>(
response,
)
.map_err(|e| ClientError::new_with_command(e.into(), "GetFeeCalculatorForBlockhash"))?;
Ok(value.map(|rf| rf.fee_calculator))
}
pub fn get_fee_rate_governor(&self) -> RpcResult<FeeRateGovernor> {
let response = self
.client
.send(&RpcRequest::GetFeeRateGovernor, Value::Null, 0)
.map_err(|err| err.into_with_command("GetFeeRateGovernor"))?;
let Response {
context,
value: RpcFeeRateGovernor { fee_rate_governor },
} = self.send::<Response<RpcFeeRateGovernor>>(
RpcRequest::GetFeeRateGovernor,
Value::Null,
0,
)?;
} = serde_json::from_value::<Response<RpcFeeRateGovernor>>(response)
.map_err(|e| ClientError::new_with_command(e.into(), "GetFeeRateGovernor"))?;
Ok(Response {
context,
value: fee_rate_governor,
@@ -704,11 +818,18 @@ impl RpcClient {
}
pub fn get_genesis_hash(&self) -> ClientResult<Hash> {
let hash_str: String = self.send(RpcRequest::GetGenesisHash, Value::Null, 0)?;
let hash = hash_str.parse().map_err(|_| {
ClientError::new_with_request(
let response = self
.client
.send(&RpcRequest::GetGenesisHash, Value::Null, 0)
.map_err(|err| err.into_with_command("GetGenesisHash"))?;
let hash = serde_json::from_value::<String>(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetGenesisHash"))?;
let hash = hash.parse().map_err(|_| {
ClientError::new_with_command(
RpcError::ParseError("Hash".to_string()).into(),
RpcRequest::GetGenesisHash,
"GetGenesisHash",
)
})?;
Ok(hash)
@@ -764,7 +885,7 @@ impl RpcClient {
return balance_result.ok();
}
trace!(
"wait_for_balance_with_commitment [{}] {:?} {:?}",
"retry_get_balance[{}] {:?} {:?}",
run,
balance_result,
expected_balance
@@ -897,18 +1018,23 @@ impl RpcClient {
&self,
signature: &Signature,
) -> ClientResult<usize> {
let result: Response<Vec<Option<TransactionStatus>>> = self.send(
RpcRequest::GetSignatureStatuses,
json!([[signature.to_string()]]),
5,
)?;
let response = self
.client
.send(
&RpcRequest::GetSignatureStatuses,
json!([[signature.to_string()]]),
1,
)
.map_err(|err| err.into_with_command("GetSignatureStatuses"))?;
let result: Response<Vec<Option<TransactionStatus>>> = serde_json::from_value(response)
.map_err(|err| ClientError::new_with_command(err.into(), "GetSignatureStatuses"))?;
let confirmations = result.value[0]
.clone()
.ok_or_else(|| {
ClientError::new_with_request(
ClientError::new_with_command(
ClientErrorKind::Custom("signature not found".to_string()),
RpcRequest::GetSignatureStatuses,
"GetSignatureStatuses",
)
})?
.confirmations
@@ -916,22 +1042,22 @@ impl RpcClient {
Ok(confirmations)
}
pub fn send_and_confirm_transaction_with_spinner(
pub fn send_and_confirm_transaction_with_spinner<T: Signers>(
&self,
transaction: &Transaction,
transaction: &mut Transaction,
signer_keys: &T,
) -> ClientResult<Signature> {
let mut confirmations = 0;
let progress_bar = new_spinner_progress_bar();
progress_bar.set_message(&format!(
"[{}/{}] Waiting for confirmations",
confirmations,
MAX_LOCKOUT_HISTORY + 1,
));
let mut send_retries = 20;
let signature = loop {
progress_bar.set_message(&format!(
"[{}/{}] Finalizing transaction {}",
confirmations,
MAX_LOCKOUT_HISTORY + 1,
transaction.signatures[0],
));
let mut status_retries = 15;
let (signature, status) = loop {
let signature = self.send_transaction(transaction)?;
@@ -955,6 +1081,11 @@ impl RpcClient {
send_retries = if let Some(result) = status.clone() {
match result {
Ok(_) => 0,
Err(TransactionError::AccountInUse) => {
// Fetch a new blockhash and re-sign the transaction before sending it again
self.resign_transaction(transaction, signer_keys)?;
send_retries - 1
}
// If transaction errors, return right away; no point in counting confirmations
Err(_) => 0,
}
@@ -972,13 +1103,9 @@ impl RpcClient {
}
}
} else {
return Err(RpcError::ForUser(
"unable to confirm transaction. \
This can happen in situations such as transaction \
expiration and insufficient fee-payer funds"
.to_string(),
)
.into());
return Err(
RpcError::ForUser("unable to confirm transaction. This can happen in situations such as transaction expiration and insufficient fee-payer funds".to_string()).into(),
);
}
}
};
@@ -992,10 +1119,9 @@ impl RpcClient {
return Ok(signature);
}
progress_bar.set_message(&format!(
"[{}/{}] Finalizing transaction {}",
"[{}/{}] Waiting for confirmations",
confirmations + 1,
MAX_LOCKOUT_HISTORY + 1,
signature,
));
sleep(Duration::from_millis(500));
confirmations = self
@@ -1003,29 +1129,24 @@ impl RpcClient {
.unwrap_or(confirmations);
if now.elapsed().as_secs() >= MAX_HASH_AGE_IN_SECONDS as u64 {
return Err(
RpcError::ForUser("transaction not finalized. \
This can happen when a transaction lands in an abandoned fork. \
Please retry.".to_string()).into(),
RpcError::ForUser("transaction not finalized. This can happen when a transaction lands in an abandoned fork. Please retry.".to_string()).into(),
);
}
}
}
pub fn validator_exit(&self) -> ClientResult<bool> {
self.send(RpcRequest::ValidatorExit, Value::Null, 0)
}
pub fn send<T>(&self, request: RpcRequest, params: Value, retries: usize) -> ClientResult<T>
where
T: serde::de::DeserializeOwned,
{
assert!(params.is_array() || params.is_null());
let response = self
.client
.send(request, params, retries)
.map_err(|err| err.into_with_request(request))?;
.send(&RpcRequest::ValidatorExit, Value::Null, 0)
.map_err(|err| err.into_with_command("ValidatorExit"))?;
serde_json::from_value(response)
.map_err(|err| ClientError::new_with_request(err.into(), request))
.map_err(|err| ClientError::new_with_command(err.into(), "ValidatorExit"))
}
pub fn send(&self, request: &RpcRequest, params: Value, retries: usize) -> ClientResult<Value> {
assert!(params.is_array() || params.is_null());
self.client.send(request, params, retries)
}
}
@@ -1048,7 +1169,10 @@ pub fn get_rpc_request_str(rpc_addr: SocketAddr, tls: bool) -> String {
#[cfg(test)]
mod tests {
use super::*;
use crate::{client_error::ClientErrorKind, mock_rpc_client_request::PUBKEY};
use crate::{
client_error::ClientErrorKind,
mock_rpc_client_request::{PUBKEY, SIGNATURE},
};
use assert_matches::assert_matches;
use jsonrpc_core::{Error, IoHandler, Params};
use jsonrpc_http_server::{AccessControlAllowOrigin, DomainsValidation, ServerBuilder};
@@ -1095,23 +1219,21 @@ mod tests {
let rpc_addr = receiver.recv().unwrap();
let rpc_client = RpcClient::new_socket(rpc_addr);
let balance: u64 = rpc_client
.send(
RpcRequest::GetBalance,
json!(["deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx"]),
0,
)
.unwrap();
assert_eq!(balance, 50);
let balance = rpc_client.send(
&RpcRequest::GetBalance,
json!(["deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx"]),
0,
);
assert_eq!(balance.unwrap().as_u64().unwrap(), 50);
let blockhash: String = rpc_client
.send(RpcRequest::GetRecentBlockhash, Value::Null, 0)
.unwrap();
assert_eq!(blockhash, "deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx");
let blockhash = rpc_client.send(&RpcRequest::GetRecentBlockhash, Value::Null, 0);
assert_eq!(
blockhash.unwrap().as_str().unwrap(),
"deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx"
);
// Send erroneous parameter
let blockhash: ClientResult<String> =
rpc_client.send(RpcRequest::GetRecentBlockhash, json!(["parameter"]), 0);
let blockhash = rpc_client.send(&RpcRequest::GetRecentBlockhash, json!(["parameter"]), 0);
assert_eq!(blockhash.is_err(), true);
}
@@ -1145,14 +1267,12 @@ mod tests {
let rpc_addr = receiver.recv().unwrap();
let rpc_client = RpcClient::new_socket(rpc_addr);
let balance: u64 = rpc_client
.send(
RpcRequest::GetBalance,
json!(["deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhw"]),
10,
)
.unwrap();
assert_eq!(balance, 5);
let balance = rpc_client.send(
&RpcRequest::GetBalance,
json!(["deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhw"]),
10,
);
assert_eq!(balance.unwrap().as_u64().unwrap(), 5);
}
#[test]
@@ -1165,17 +1285,12 @@ mod tests {
let tx = system_transaction::transfer(&key, &to, 50, blockhash);
let signature = rpc_client.send_transaction(&tx);
assert_eq!(signature.unwrap(), tx.signatures[0]);
assert_eq!(signature.unwrap(), SIGNATURE.parse().unwrap());
let rpc_client = RpcClient::new_mock("fails".to_string());
let signature = rpc_client.send_transaction(&tx);
assert!(signature.is_err());
// Test bad signature returned from rpc node
let rpc_client = RpcClient::new_mock("malicious".to_string());
let signature = rpc_client.send_transaction(&tx);
assert!(signature.is_err());
}
#[test]
fn test_get_recent_blockhash() {
@@ -1215,16 +1330,17 @@ mod tests {
let key = Keypair::new();
let to = Pubkey::new_rand();
let blockhash = Hash::default();
let tx = system_transaction::transfer(&key, &to, 50, blockhash);
let result = rpc_client.send_and_confirm_transaction(&tx);
let mut tx = system_transaction::transfer(&key, &to, 50, blockhash);
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&key]);
result.unwrap();
let rpc_client = RpcClient::new_mock("account_in_use".to_string());
let result = rpc_client.send_and_confirm_transaction(&tx);
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&key]);
assert!(result.is_err());
let rpc_client = RpcClient::new_mock("instruction_error".to_string());
let result = rpc_client.send_and_confirm_transaction(&tx);
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&key]);
assert_matches!(
result.unwrap_err().kind(),
ClientErrorKind::TransactionError(TransactionError::InstructionError(
@@ -1234,7 +1350,7 @@ mod tests {
);
let rpc_client = RpcClient::new_mock("sig_not_found".to_string());
let result = rpc_client.send_and_confirm_transaction(&tx);
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&key]);
if let ClientErrorKind::Io(err) = result.unwrap_err().kind() {
assert_eq!(err.kind(), io::ErrorKind::Other);
}

View File

@@ -31,7 +31,7 @@ impl RpcClientRequest {
impl GenericRpcClientRequest for RpcClientRequest {
fn send(
&self,
request: RpcRequest,
request: &RpcRequest,
params: serde_json::Value,
mut retries: usize,
) -> Result<serde_json::Value> {

View File

@@ -9,6 +9,12 @@ pub struct RpcSignatureStatusConfig {
pub commitment: Option<CommitmentConfig>,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcSimulateTransactionConfig {
pub sig_verify: bool,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum RpcLargestAccountsFilter {

View File

@@ -1,8 +1,7 @@
use serde_json::{json, Value};
use std::fmt;
use thiserror::Error;
#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
#[derive(Debug, PartialEq, Eq, Hash)]
pub enum RpcRequest {
DeregisterNode,
ValidatorExit,
@@ -32,21 +31,21 @@ pub enum RpcRequest {
GetStorageTurnRate,
GetSlotsPerSegment,
GetStoragePubkeysForSlot,
GetSupply,
GetTotalSupply,
GetTransactionCount,
GetVersion,
GetVoteAccounts,
RegisterNode,
RequestAirdrop,
SendTransaction,
SimulateTransaction,
SignVote,
GetMinimumBalanceForRentExemption,
MinimumLedgerSlot,
}
impl fmt::Display for RpcRequest {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
impl RpcRequest {
pub(crate) fn build_request_json(&self, id: u64, params: Value) -> Value {
let jsonrpc = "2.0";
let method = match self {
RpcRequest::DeregisterNode => "deregisterNode",
RpcRequest::ValidatorExit => "validatorExit",
@@ -76,34 +75,21 @@ impl fmt::Display for RpcRequest {
RpcRequest::GetStorageTurnRate => "getStorageTurnRate",
RpcRequest::GetSlotsPerSegment => "getSlotsPerSegment",
RpcRequest::GetStoragePubkeysForSlot => "getStoragePubkeysForSlot",
RpcRequest::GetSupply => "getSupply",
RpcRequest::GetTotalSupply => "getTotalSupply",
RpcRequest::GetTransactionCount => "getTransactionCount",
RpcRequest::GetVersion => "getVersion",
RpcRequest::GetVoteAccounts => "getVoteAccounts",
RpcRequest::RegisterNode => "registerNode",
RpcRequest::RequestAirdrop => "requestAirdrop",
RpcRequest::SendTransaction => "sendTransaction",
RpcRequest::SimulateTransaction => "simulateTransaction",
RpcRequest::SignVote => "signVote",
RpcRequest::GetMinimumBalanceForRentExemption => "getMinimumBalanceForRentExemption",
RpcRequest::MinimumLedgerSlot => "minimumLedgerSlot",
};
write!(f, "{}", method)
}
}
pub const NUM_LARGEST_ACCOUNTS: usize = 20;
pub const MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS: usize = 256;
pub const MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE: u64 = 10_000;
impl RpcRequest {
pub(crate) fn build_request_json(self, id: u64, params: Value) -> Value {
let jsonrpc = "2.0";
json!({
"jsonrpc": jsonrpc,
"id": id,
"method": format!("{}", self),
"method": method,
"params": params,
})
}

View File

@@ -1,7 +1,7 @@
[package]
name = "solana-core"
description = "Blockchain, Rebuilt for Scale"
version = "1.1.13"
version = "1.0.24"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "../README.md"
@@ -17,67 +17,67 @@ codecov = { repository = "solana-labs/solana", branch = "master", service = "git
bincode = "1.2.1"
bv = { version = "0.11.1", features = ["serde"] }
bs58 = "0.3.0"
byteorder = "1.3.4"
chrono = { version = "0.4.11", features = ["serde"] }
byteorder = "1.3.2"
chrono = { version = "0.4.10", features = ["serde"] }
compression = "0.1.5"
core_affinity = "0.5.10"
crossbeam-channel = "0.4"
crossbeam-channel = "0.3"
fs_extra = "1.1.0"
flate2 = "1.0"
indexmap = "1.3"
itertools = "0.9.0"
itertools = "0.8.2"
jsonrpc-core = "14.0.5"
jsonrpc-core-client = { version = "14.0.5", features = ["ws"] }
jsonrpc-derive = "14.0.5"
jsonrpc-http-server = "14.0.6"
jsonrpc-pubsub = "14.0.6"
jsonrpc-ws-server = "14.0.6"
libc = "0.2.66"
log = "0.4.8"
nix = "0.17.0"
num_cpus = "1.0.0"
num-traits = "0.2"
rand = "0.7.0"
rand_chacha = "0.2.2"
rayon = "1.3.0"
regex = "1.3.6"
serde = "1.0.105"
rayon = "1.2.0"
regex = "1.3.4"
serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.48"
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.1.13" }
solana-budget-program = { path = "../programs/budget", version = "1.1.13" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.13" }
solana-client = { path = "../client", version = "1.1.13" }
solana-transaction-status = { path = "../transaction-status", version = "1.1.13" }
solana-faucet = { path = "../faucet", version = "1.1.13" }
serde_json = "1.0.46"
solana-budget-program = { path = "../programs/budget", version = "1.0.24" }
solana-clap-utils = { path = "../clap-utils", version = "1.0.24" }
solana-client = { path = "../client", version = "1.0.24" }
solana-transaction-status = { path = "../transaction-status", version = "1.0.24" }
solana-faucet = { path = "../faucet", version = "1.0.24" }
ed25519-dalek = "=1.0.0-pre.3"
solana-ledger = { path = "../ledger", version = "1.1.13" }
solana-logger = { path = "../logger", version = "1.1.13" }
solana-merkle-tree = { path = "../merkle-tree", version = "1.1.13" }
solana-metrics = { path = "../metrics", version = "1.1.13" }
solana-measure = { path = "../measure", version = "1.1.13" }
solana-net-utils = { path = "../net-utils", version = "1.1.13" }
solana-chacha-cuda = { path = "../chacha-cuda", version = "1.1.13" }
solana-perf = { path = "../perf", version = "1.1.13" }
solana-runtime = { path = "../runtime", version = "1.1.13" }
solana-sdk = { path = "../sdk", version = "1.1.13" }
solana-stake-program = { path = "../programs/stake", version = "1.1.13" }
solana-storage-program = { path = "../programs/storage", version = "1.1.13" }
solana-streamer = { path = "../streamer", version = "1.1.13" }
solana-version = { path = "../version", version = "1.1.13" }
solana-vote-program = { path = "../programs/vote", version = "1.1.13" }
solana-vote-signer = { path = "../vote-signer", version = "1.1.13" }
solana-sys-tuner = { path = "../sys-tuner", version = "1.1.13" }
solana-ledger = { path = "../ledger", version = "1.0.24" }
solana-logger = { path = "../logger", version = "1.0.24" }
solana-merkle-tree = { path = "../merkle-tree", version = "1.0.24" }
solana-metrics = { path = "../metrics", version = "1.0.24" }
solana-measure = { path = "../measure", version = "1.0.24" }
solana-net-utils = { path = "../net-utils", version = "1.0.24" }
solana-chacha-cuda = { path = "../chacha-cuda", version = "1.0.24" }
solana-perf = { path = "../perf", version = "1.0.24" }
solana-runtime = { path = "../runtime", version = "1.0.24" }
solana-sdk = { path = "../sdk", version = "1.0.24" }
solana-stake-program = { path = "../programs/stake", version = "1.0.24" }
solana-storage-program = { path = "../programs/storage", version = "1.0.24" }
solana-version = { path = "../version", version = "1.0.24" }
solana-vote-program = { path = "../programs/vote", version = "1.0.24" }
solana-vote-signer = { path = "../vote-signer", version = "1.0.24" }
solana-sys-tuner = { path = "../sys-tuner", version = "1.0.24" }
tempfile = "3.1.0"
thiserror = "1.0"
tokio = "0.1"
tokio-codec = "0.1"
tokio-fs = "0.1"
tokio-io = "0.1"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.13" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.0.24" }
trees = "0.2.1"
[dev-dependencies]
matches = "0.1.6"
reqwest = { version = "0.10.4", default-features = false, features = ["blocking", "rustls-tls", "json"] }
serial_test = "0.4.0"
reqwest = { version = "0.10.1", default-features = false, features = ["blocking", "rustls-tls"] }
serial_test = "0.3.2"
serial_test_derive = "0.4.0"
systemstat = "0.1.5"

View File

@@ -9,12 +9,12 @@ use rayon::prelude::*;
use solana_core::banking_stage::{create_test_recorder, BankingStage};
use solana_core::cluster_info::ClusterInfo;
use solana_core::cluster_info::Node;
use solana_core::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_core::packet::to_packets_chunked;
use solana_core::poh_recorder::WorkingBankEntry;
use solana_ledger::blockstore_processor::process_entries;
use solana_ledger::entry::{next_hash, Entry};
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use solana_perf::packet::to_packets_chunked;
use solana_perf::test_tx::test_tx;
use solana_runtime::bank::Bank;
use solana_sdk::genesis_config::GenesisConfig;
@@ -190,7 +190,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
let (exit, poh_recorder, poh_service, signal_receiver) =
create_test_recorder(&bank, &blockstore, None);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let _banking_stage = BankingStage::new(
&cluster_info,
&poh_recorder,

View File

@@ -3,18 +3,14 @@
extern crate test;
use rand::{thread_rng, Rng};
use solana_core::broadcast_stage::{broadcast_shreds, get_broadcast_peers};
use solana_core::cluster_info::{ClusterInfo, Node};
use solana_core::contact_info::ContactInfo;
use solana_ledger::shred::{Shred, NONCE_SHRED_PAYLOAD_SIZE};
use solana_sdk::pubkey::Pubkey;
use solana_sdk::timing::timestamp;
use std::sync::RwLock;
use std::{
collections::HashMap,
net::UdpSocket,
sync::{atomic::AtomicU64, Arc},
};
use std::collections::HashMap;
use std::net::UdpSocket;
use std::sync::Arc;
use test::Bencher;
#[bench]
@@ -26,7 +22,8 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
const NUM_SHREDS: usize = 32;
let shreds = vec![Shred::new_empty_data_shred(NONCE_SHRED_PAYLOAD_SIZE); NUM_SHREDS];
let shreds = vec![vec![0; NONCE_SHRED_PAYLOAD_SIZE]; NUM_SHREDS];
let seeds = vec![[0u8; 32]; NUM_SHREDS];
let mut stakes = HashMap::new();
const NUM_PEERS: usize = 200;
for _ in 0..NUM_PEERS {
@@ -36,20 +33,10 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
stakes.insert(id, thread_rng().gen_range(1, NUM_PEERS) as u64);
}
let stakes = Arc::new(stakes);
let cluster_info = Arc::new(cluster_info);
let (peers, peers_and_stakes) = get_broadcast_peers(&cluster_info, Some(stakes.clone()));
let shreds = Arc::new(shreds);
let last_datapoint = Arc::new(AtomicU64::new(0));
bencher.iter(move || {
let shreds = shreds.clone();
broadcast_shreds(
&socket,
&shreds,
&peers_and_stakes,
&peers,
&last_datapoint,
&mut 0,
)
.unwrap();
cluster_info
.broadcast_shreds(&socket, shreds, &seeds, Some(stakes.clone()))
.unwrap();
});
}

View File

@@ -6,12 +6,12 @@ extern crate test;
use log::*;
use solana_core::cluster_info::{ClusterInfo, Node};
use solana_core::contact_info::ContactInfo;
use solana_core::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_core::packet::to_packets_chunked;
use solana_core::retransmit_stage::retransmitter;
use solana_ledger::bank_forks::BankForks;
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
use solana_measure::measure::Measure;
use solana_perf::packet::to_packets_chunked;
use solana_perf::test_tx::test_tx;
use solana_runtime::bank::Bank;
use solana_sdk::pubkey::Pubkey;
@@ -45,7 +45,7 @@ fn bench_retransmitter(bencher: &mut Bencher) {
peer_sockets.push(socket);
}
let peer_sockets = Arc::new(peer_sockets);
let cluster_info = Arc::new(cluster_info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(100_000);
let bank0 = Bank::new(&genesis_config);

View File

@@ -9,7 +9,7 @@ use solana_ledger::shred::{
};
use solana_perf::test_tx;
use solana_sdk::hash::Hash;
use solana_sdk::signature::{Keypair, Signer};
use solana_sdk::signature::Keypair;
use std::sync::Arc;
use test::Bencher;

View File

@@ -6,9 +6,9 @@ extern crate test;
use crossbeam_channel::unbounded;
use log::*;
use rand::{thread_rng, Rng};
use solana_core::packet::to_packets_chunked;
use solana_core::sigverify::TransactionSigVerifier;
use solana_core::sigverify_stage::SigVerifyStage;
use solana_perf::packet::to_packets_chunked;
use solana_perf::test_tx::test_tx;
use solana_sdk::hash::Hash;
use solana_sdk::signature::{Keypair, Signer};

View File

@@ -1,45 +0,0 @@
// Service to clean up dead slots in accounts_db
//
// This can be expensive since we have to walk the append vecs being cleaned up.
use solana_ledger::bank_forks::BankForks;
use std::sync::{
atomic::{AtomicBool, Ordering},
Arc, RwLock,
};
use std::thread::{self, sleep, Builder, JoinHandle};
use std::time::Duration;
pub struct AccountsBackgroundService {
t_background: JoinHandle<()>,
}
const INTERVAL_MS: u64 = 100;
impl AccountsBackgroundService {
pub fn new(bank_forks: Arc<RwLock<BankForks>>, exit: &Arc<AtomicBool>) -> Self {
info!("AccountsBackgroundService active");
let exit = exit.clone();
let t_background = Builder::new()
.name("solana-accounts-background".to_string())
.spawn(move || loop {
if exit.load(Ordering::Relaxed) {
break;
}
let bank = bank_forks.read().unwrap().working_bank();
bank.process_dead_slots();
// Currently, given INTERVAL_MS, we process 1 slot/100 ms
bank.process_stale_slot();
sleep(Duration::from_millis(INTERVAL_MS));
})
.unwrap();
Self { t_background }
}
pub fn join(self) -> thread::Result<()> {
self.t_background.join()
}
}

View File

@@ -15,7 +15,7 @@ use std::{
sync::{
atomic::{AtomicBool, Ordering},
mpsc::RecvTimeoutError,
Arc,
Arc, RwLock,
},
thread::{self, Builder, JoinHandle},
time::Duration,
@@ -30,7 +30,7 @@ impl AccountsHashVerifier {
snapshot_package_receiver: SnapshotPackageReceiver,
snapshot_package_sender: Option<SnapshotPackageSender>,
exit: &Arc<AtomicBool>,
cluster_info: &Arc<ClusterInfo>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
trusted_validators: Option<HashSet<Pubkey>>,
halt_on_trusted_validators_accounts_hash_mismatch: bool,
fault_injection_rate_slots: u64,
@@ -72,7 +72,7 @@ impl AccountsHashVerifier {
fn process_snapshot(
snapshot_package: SnapshotPackage,
cluster_info: &ClusterInfo,
cluster_info: &Arc<RwLock<ClusterInfo>>,
trusted_validators: &Option<HashSet<Pubkey>>,
halt_on_trusted_validator_accounts_hash_mismatch: bool,
snapshot_package_sender: &Option<SnapshotPackageSender>,
@@ -111,11 +111,14 @@ impl AccountsHashVerifier {
if sender.send(snapshot_package).is_err() {}
}
cluster_info.push_accounts_hashes(hashes.clone());
cluster_info
.write()
.unwrap()
.push_accounts_hashes(hashes.clone());
}
fn should_halt(
cluster_info: &ClusterInfo,
cluster_info: &Arc<RwLock<ClusterInfo>>,
trusted_validators: &Option<HashSet<Pubkey>>,
slot_to_hash: &mut HashMap<Slot, Hash>,
) -> bool {
@@ -123,9 +126,11 @@ impl AccountsHashVerifier {
let mut highest_slot = 0;
if let Some(trusted_validators) = trusted_validators.as_ref() {
for trusted_validator in trusted_validators {
let is_conflicting = cluster_info.get_accounts_hash_for_node(trusted_validator, |accounts_hashes|
let cluster_info_r = cluster_info.read().unwrap();
if let Some(accounts_hashes) =
cluster_info_r.get_accounts_hash_for_node(trusted_validator)
{
accounts_hashes.iter().any(|(slot, hash)| {
for (slot, hash) in accounts_hashes {
if let Some(reference_hash) = slot_to_hash.get(slot) {
if *hash != *reference_hash {
error!("Trusted validator {} produced conflicting hashes for slot: {} ({} != {})",
@@ -134,21 +139,16 @@ impl AccountsHashVerifier {
hash,
reference_hash,
);
true
return true;
} else {
verified_count += 1;
false
}
} else {
highest_slot = std::cmp::max(*slot, highest_slot);
slot_to_hash.insert(*slot, *hash);
false
}
})
}).unwrap_or(false);
if is_conflicting {
return true;
}
}
}
}
@@ -181,7 +181,7 @@ mod tests {
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
let cluster_info = Arc::new(cluster_info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let mut trusted_validators = HashSet::new();
let mut slot_to_hash = HashMap::new();
@@ -196,7 +196,8 @@ mod tests {
let hash2 = hash(&[2]);
{
let message = make_accounts_hashes_message(&validator1, vec![(0, hash1)]).unwrap();
cluster_info.push_message(message);
let mut cluster_info_w = cluster_info.write().unwrap();
cluster_info_w.push_message(message);
}
slot_to_hash.insert(0, hash2);
trusted_validators.insert(validator1.pubkey());
@@ -216,7 +217,7 @@ mod tests {
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
let cluster_info = Arc::new(cluster_info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let trusted_validators = HashSet::new();
let exit = Arc::new(AtomicBool::new(false));
@@ -243,8 +244,9 @@ mod tests {
0,
);
}
let cluster_hashes = cluster_info
.get_accounts_hash_for_node(&keypair.pubkey(), |c| c.clone())
let cluster_info_r = cluster_info.read().unwrap();
let cluster_hashes = cluster_info_r
.get_accounts_hash_for_node(&keypair.pubkey())
.unwrap();
info!("{:?}", cluster_hashes);
assert_eq!(hashes.len(), MAX_SNAPSHOT_HASHES);

View File

@@ -3,6 +3,7 @@
//! can do its processing in parallel with signature verification on the GPU.
use crate::{
cluster_info::ClusterInfo,
packet::{limited_deserialize, Packet, Packets, PACKETS_PER_BATCH},
poh_recorder::{PohRecorder, PohRecorderError, WorkingBankEntry},
poh_service::PohService,
};
@@ -16,11 +17,7 @@ use solana_ledger::{
};
use solana_measure::{measure::Measure, thread_mem_usage};
use solana_metrics::{inc_new_counter_debug, inc_new_counter_info, inc_new_counter_warn};
use solana_perf::{
cuda_runtime::PinnedVec,
packet::{limited_deserialize, Packet, Packets, PACKETS_PER_BATCH},
perf_libs,
};
use solana_perf::{cuda_runtime::PinnedVec, perf_libs};
use solana_runtime::{
accounts_db::ErrorCounters,
bank::{Bank, TransactionBalancesSet, TransactionProcessResult},
@@ -41,7 +38,7 @@ use std::{
net::UdpSocket,
sync::atomic::AtomicBool,
sync::mpsc::Receiver,
sync::{Arc, Mutex},
sync::{Arc, Mutex, RwLock},
thread::{self, Builder, JoinHandle},
time::Duration,
time::Instant,
@@ -76,7 +73,7 @@ impl BankingStage {
/// Create the stage using `bank`. Exit when `verified_receiver` is dropped.
#[allow(clippy::new_ret_no_self)]
pub fn new(
cluster_info: &Arc<ClusterInfo>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
verified_vote_receiver: CrossbeamReceiver<Vec<Packets>>,
@@ -93,7 +90,7 @@ impl BankingStage {
}
fn new_num_threads(
cluster_info: &Arc<ClusterInfo>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
verified_vote_receiver: CrossbeamReceiver<Vec<Packets>>,
@@ -104,7 +101,7 @@ impl BankingStage {
// Single thread to generate entries from many banks.
// This thread talks to poh_service and broadcasts the entries once they have been recorded.
// Once an entry has been recorded, its blockhash is registered with the bank.
let my_pubkey = cluster_info.id();
let my_pubkey = cluster_info.read().unwrap().id();
// Many banks that process transactions in parallel.
let bank_thread_hdls: Vec<JoinHandle<()>> = (0..num_threads)
.map(|i| {
@@ -287,7 +284,7 @@ impl BankingStage {
my_pubkey: &Pubkey,
socket: &std::net::UdpSocket,
poh_recorder: &Arc<Mutex<PohRecorder>>,
cluster_info: &ClusterInfo,
cluster_info: &Arc<RwLock<ClusterInfo>>,
buffered_packets: &mut Vec<PacketsAndOffsets>,
enable_forwarding: bool,
batch_limit: usize,
@@ -331,7 +328,10 @@ impl BankingStage {
next_leader.map_or((), |leader_pubkey| {
let leader_addr = {
cluster_info
.lookup_contact_info(&leader_pubkey, |leader| leader.tpu_forwards)
.read()
.unwrap()
.lookup(&leader_pubkey)
.map(|leader| leader.tpu_forwards)
};
leader_addr.map_or((), |leader_addr| {
@@ -355,7 +355,7 @@ impl BankingStage {
my_pubkey: Pubkey,
verified_receiver: &CrossbeamReceiver<Vec<Packets>>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
cluster_info: &ClusterInfo,
cluster_info: &Arc<RwLock<ClusterInfo>>,
recv_start: &mut Instant,
enable_forwarding: bool,
id: u32,
@@ -1009,7 +1009,10 @@ pub fn create_test_recorder(
mod tests {
use super::*;
use crate::{
cluster_info::Node, poh_recorder::WorkingBank,
cluster_info::Node,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
packet::to_packets,
poh_recorder::WorkingBank,
transaction_status_service::TransactionStatusService,
};
use crossbeam_channel::unbounded;
@@ -1017,10 +1020,8 @@ mod tests {
use solana_ledger::{
blockstore::entries_to_test_shreds,
entry::{next_entry, Entry, EntrySlice},
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
};
use solana_perf::packet::to_packets;
use solana_runtime::bank::HashAgeKind;
use solana_sdk::{
instruction::InstructionError,
@@ -1046,7 +1047,7 @@ mod tests {
let (exit, poh_recorder, poh_service, _entry_receiever) =
create_test_recorder(&bank, &blockstore, None);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let banking_stage = BankingStage::new(
&cluster_info,
&poh_recorder,
@@ -1086,7 +1087,7 @@ mod tests {
let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let banking_stage = BankingStage::new(
&cluster_info,
&poh_recorder,
@@ -1149,7 +1150,7 @@ mod tests {
let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let banking_stage = BankingStage::new(
&cluster_info,
&poh_recorder,
@@ -1290,7 +1291,7 @@ mod tests {
create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info =
ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let _banking_stage = BankingStage::new_num_threads(
&cluster_info,
&poh_recorder,

283
core/src/blockstream.rs Normal file
View File

@@ -0,0 +1,283 @@
//! The `blockstream` module provides a method for streaming entries out via a
//! local unix socket, to provide client services such as a block explorer with
//! real-time access to entries.
use bincode::serialize;
use chrono::{SecondsFormat, Utc};
use serde_json::json;
use solana_ledger::entry::Entry;
use solana_sdk::{clock::Slot, hash::Hash, pubkey::Pubkey};
use std::cell::RefCell;
use std::io::Result;
use std::path::{Path, PathBuf};
pub trait EntryWriter: std::fmt::Debug {
fn write(&self, payload: String) -> Result<()>;
}
#[derive(Debug, Default)]
pub struct EntryVec {
values: RefCell<Vec<String>>,
}
impl EntryWriter for EntryVec {
fn write(&self, payload: String) -> Result<()> {
self.values.borrow_mut().push(payload);
Ok(())
}
}
impl EntryVec {
pub fn new() -> Self {
EntryVec {
values: RefCell::new(Vec::new()),
}
}
pub fn entries(&self) -> Vec<String> {
self.values.borrow().clone()
}
}
#[derive(Debug)]
pub struct EntrySocket {
unix_socket: PathBuf,
}
impl EntryWriter for EntrySocket {
#[cfg(not(windows))]
fn write(&self, payload: String) -> Result<()> {
use std::io::prelude::*;
use std::net::Shutdown;
use std::os::unix::net::UnixStream;
const MESSAGE_TERMINATOR: &str = "\n";
let mut socket = UnixStream::connect(&self.unix_socket)?;
socket.write_all(payload.as_bytes())?;
socket.write_all(MESSAGE_TERMINATOR.as_bytes())?;
socket.shutdown(Shutdown::Write)?;
Ok(())
}
#[cfg(windows)]
fn write(&self, _payload: String) -> Result<()> {
Err(std::io::Error::new(
std::io::ErrorKind::Other,
"EntryWriter::write() not implemented for windows",
))
}
}
pub trait BlockstreamEvents {
fn emit_entry_event(
&self,
slot: Slot,
tick_height: u64,
leader_pubkey: &Pubkey,
entries: &Entry,
) -> Result<()>;
fn emit_block_event(
&self,
slot: Slot,
tick_height: u64,
leader_pubkey: &Pubkey,
blockhash: Hash,
) -> Result<()>;
}
#[derive(Debug)]
pub struct Blockstream<T: EntryWriter> {
pub output: T,
}
impl<T> BlockstreamEvents for Blockstream<T>
where
T: EntryWriter,
{
fn emit_entry_event(
&self,
slot: Slot,
tick_height: u64,
leader_pubkey: &Pubkey,
entry: &Entry,
) -> Result<()> {
let transactions: Vec<Vec<u8>> = serialize_transactions(entry);
let stream_entry = json!({
"num_hashes": entry.num_hashes,
"hash": entry.hash,
"transactions": transactions
});
let json_entry = serde_json::to_string(&stream_entry)?;
let payload = format!(
r#"{{"dt":"{}","t":"entry","s":{},"h":{},"l":"{:?}","entry":{}}}"#,
Utc::now().to_rfc3339_opts(SecondsFormat::Nanos, true),
slot,
tick_height,
leader_pubkey,
json_entry,
);
self.output.write(payload)?;
Ok(())
}
fn emit_block_event(
&self,
slot: Slot,
tick_height: u64,
leader_pubkey: &Pubkey,
blockhash: Hash,
) -> Result<()> {
let payload = format!(
r#"{{"dt":"{}","t":"block","s":{},"h":{},"l":"{:?}","hash":"{:?}"}}"#,
Utc::now().to_rfc3339_opts(SecondsFormat::Nanos, true),
slot,
tick_height,
leader_pubkey,
blockhash,
);
self.output.write(payload)?;
Ok(())
}
}
pub type SocketBlockstream = Blockstream<EntrySocket>;
impl SocketBlockstream {
pub fn new(unix_socket: &Path) -> Self {
Blockstream {
output: EntrySocket {
unix_socket: unix_socket.to_path_buf(),
},
}
}
}
pub type MockBlockstream = Blockstream<EntryVec>;
impl MockBlockstream {
pub fn new(_: &Path) -> Self {
Blockstream {
output: EntryVec::new(),
}
}
pub fn entries(&self) -> Vec<String> {
self.output.entries()
}
}
fn serialize_transactions(entry: &Entry) -> Vec<Vec<u8>> {
entry
.transactions
.iter()
.map(|tx| serialize(&tx).unwrap())
.collect()
}
#[cfg(test)]
mod test {
use super::*;
use chrono::{DateTime, FixedOffset};
use serde_json::Value;
use solana_sdk::hash::Hash;
use solana_sdk::signature::{Keypair, Signer};
use solana_sdk::system_transaction;
use std::collections::HashSet;
use std::path::PathBuf;
#[test]
fn test_serialize_transactions() {
let entry = Entry::new(&Hash::default(), 1, vec![]);
let empty_vec: Vec<Vec<u8>> = vec![];
assert_eq!(serialize_transactions(&entry), empty_vec);
let keypair0 = Keypair::new();
let keypair1 = Keypair::new();
let tx0 = system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default());
let tx1 = system_transaction::transfer(&keypair1, &keypair0.pubkey(), 2, Hash::default());
let serialized_tx0 = serialize(&tx0).unwrap();
let serialized_tx1 = serialize(&tx1).unwrap();
let entry = Entry::new(&Hash::default(), 1, vec![tx0, tx1]);
assert_eq!(
serialize_transactions(&entry),
vec![serialized_tx0, serialized_tx1]
);
}
#[test]
fn test_blockstream() -> () {
let blockstream = MockBlockstream::new(&PathBuf::from("test_stream"));
let ticks_per_slot = 5;
let mut blockhash = Hash::default();
let mut entries = Vec::new();
let mut expected_entries = Vec::new();
let tick_height_initial = 1;
let tick_height_final = tick_height_initial + ticks_per_slot + 2;
let mut curr_slot = 0;
let leader_pubkey = Pubkey::new_rand();
for tick_height in tick_height_initial..=tick_height_final {
if tick_height == 5 {
blockstream
.emit_block_event(curr_slot, tick_height, &leader_pubkey, blockhash)
.unwrap();
curr_slot += 1;
}
let entry = Entry::new(&mut blockhash, 1, vec![]); // just ticks
blockhash = entry.hash;
blockstream
.emit_entry_event(curr_slot, tick_height, &leader_pubkey, &entry)
.unwrap();
expected_entries.push(entry.clone());
entries.push(entry);
}
assert_eq!(
blockstream.entries().len() as u64,
// one entry per tick (1..=N+2) is +3, plus one block
ticks_per_slot + 3 + 1
);
let mut j = 0;
let mut matched_entries = 0;
let mut matched_slots = HashSet::new();
let mut matched_blocks = HashSet::new();
for item in blockstream.entries() {
let json: Value = serde_json::from_str(&item).unwrap();
let dt_str = json["dt"].as_str().unwrap();
// Ensure `ts` field parses as valid DateTime
let _dt: DateTime<FixedOffset> = DateTime::parse_from_rfc3339(dt_str).unwrap();
let item_type = json["t"].as_str().unwrap();
match item_type {
"block" => {
let hash = json["hash"].to_string();
matched_blocks.insert(hash);
}
"entry" => {
let slot = json["s"].as_u64().unwrap();
matched_slots.insert(slot);
let entry_obj = json["entry"].clone();
let entry: Entry = serde_json::from_value(entry_obj).unwrap();
assert_eq!(entry, expected_entries[j]);
matched_entries += 1;
j += 1;
}
_ => {
assert!(false, "unknown item type {}", item);
}
}
}
assert_eq!(matched_entries, expected_entries.len());
assert_eq!(matched_slots.len(), 2);
assert_eq!(matched_blocks.len(), 1);
}
}

View File

@@ -0,0 +1,228 @@
//! The `blockstream_service` implements optional streaming of entries and block metadata
//! using the `blockstream` module, providing client services such as a block explorer with
//! real-time access to entries.
use crate::blockstream::BlockstreamEvents;
#[cfg(test)]
use crate::blockstream::MockBlockstream as Blockstream;
#[cfg(not(test))]
use crate::blockstream::SocketBlockstream as Blockstream;
use crate::result::{Error, Result};
use solana_ledger::blockstore::Blockstore;
use solana_sdk::pubkey::Pubkey;
use std::path::Path;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{Receiver, RecvTimeoutError};
use std::sync::Arc;
use std::thread::{self, Builder, JoinHandle};
use std::time::Duration;
pub struct BlockstreamService {
t_blockstream: JoinHandle<()>,
}
impl BlockstreamService {
#[allow(clippy::new_ret_no_self)]
pub fn new(
slot_full_receiver: Receiver<(u64, Pubkey)>,
blockstore: Arc<Blockstore>,
unix_socket: &Path,
exit: &Arc<AtomicBool>,
) -> Self {
let mut blockstream = Blockstream::new(unix_socket);
let exit = exit.clone();
let t_blockstream = Builder::new()
.name("solana-blockstream".to_string())
.spawn(move || loop {
if exit.load(Ordering::Relaxed) {
break;
}
if let Err(e) =
Self::process_entries(&slot_full_receiver, &blockstore, &mut blockstream)
{
match e {
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
_ => info!("Error from process_entries: {:?}", e),
}
}
})
.unwrap();
Self { t_blockstream }
}
fn process_entries(
slot_full_receiver: &Receiver<(u64, Pubkey)>,
blockstore: &Arc<Blockstore>,
blockstream: &mut Blockstream,
) -> Result<()> {
let timeout = Duration::new(1, 0);
let (slot, slot_leader) = slot_full_receiver.recv_timeout(timeout)?;
// Slot might not exist due to LedgerCleanupService, check first
let blockstore_meta = blockstore.meta(slot).unwrap();
if let Some(blockstore_meta) = blockstore_meta {
// Return error to main loop. Thread won't exit, will just log the error
let entries = blockstore.get_slot_entries(slot, 0, None)?;
let _parent_slot = if slot == 0 {
None
} else {
Some(blockstore_meta.parent_slot)
};
let ticks_per_slot = entries.iter().filter(|entry| entry.is_tick()).count() as u64;
let mut tick_height = ticks_per_slot * slot;
for (i, entry) in entries.iter().enumerate() {
if entry.is_tick() {
tick_height += 1;
}
blockstream
.emit_entry_event(slot, tick_height, &slot_leader, &entry)
.unwrap_or_else(|e| {
debug!("Blockstream error: {:?}, {:?}", e, blockstream.output);
});
if i == entries.len() - 1 {
blockstream
.emit_block_event(slot, tick_height, &slot_leader, entry.hash)
.unwrap_or_else(|e| {
debug!("Blockstream error: {:?}, {:?}", e, blockstream.output);
});
}
}
}
Ok(())
}
pub fn join(self) -> thread::Result<()> {
self.t_blockstream.join()
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use bincode::{deserialize, serialize};
use chrono::{DateTime, FixedOffset};
use serde_json::Value;
use solana_ledger::create_new_tmp_ledger;
use solana_ledger::entry::{create_ticks, Entry};
use solana_sdk::hash::Hash;
use solana_sdk::signature::{Keypair, Signer};
use solana_sdk::system_transaction;
use std::path::PathBuf;
use std::sync::mpsc::channel;
#[test]
fn test_blockstream_service_process_entries() {
let ticks_per_slot = 5;
let leader_pubkey = Pubkey::new_rand();
// Set up genesis config and blockstore
let GenesisConfigInfo {
mut genesis_config, ..
} = create_genesis_config(1000);
genesis_config.ticks_per_slot = ticks_per_slot;
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
let blockstore = Blockstore::open(&ledger_path).unwrap();
// Set up blockstream
let mut blockstream = Blockstream::new(&PathBuf::from("test_stream"));
// Set up dummy channel to receive a full-slot notification
let (slot_full_sender, slot_full_receiver) = channel();
// Create entries - 4 ticks + 1 populated entry + 1 tick
let mut entries = create_ticks(4, 0, Hash::default());
let keypair = Keypair::new();
let mut blockhash = entries[3].hash;
let tx = system_transaction::transfer(&keypair, &keypair.pubkey(), 1, Hash::default());
let entry = Entry::new(&mut blockhash, 1, vec![tx]);
blockhash = entry.hash;
entries.push(entry);
let final_tick = create_ticks(1, 0, blockhash);
entries.extend_from_slice(&final_tick);
let expected_entries = entries.clone();
let expected_tick_heights = [6, 7, 8, 9, 9, 10];
blockstore
.write_entries(
1,
0,
0,
ticks_per_slot,
None,
true,
&Arc::new(Keypair::new()),
entries,
0,
)
.unwrap();
slot_full_sender.send((1, leader_pubkey)).unwrap();
BlockstreamService::process_entries(
&slot_full_receiver,
&Arc::new(blockstore),
&mut blockstream,
)
.unwrap();
assert_eq!(blockstream.entries().len(), 7);
let (entry_events, block_events): (Vec<Value>, Vec<Value>) = blockstream
.entries()
.iter()
.map(|item| {
let json: Value = serde_json::from_str(&item).unwrap();
let dt_str = json["dt"].as_str().unwrap();
// Ensure `ts` field parses as valid DateTime
let _dt: DateTime<FixedOffset> = DateTime::parse_from_rfc3339(dt_str).unwrap();
json
})
.partition(|json| {
let item_type = json["t"].as_str().unwrap();
item_type == "entry"
});
for (i, json) in entry_events.iter().enumerate() {
let height = json["h"].as_u64().unwrap();
assert_eq!(height, expected_tick_heights[i]);
let entry_obj = json["entry"].clone();
let tx = entry_obj["transactions"].as_array().unwrap();
let entry: Entry;
if tx.len() == 0 {
entry = serde_json::from_value(entry_obj).unwrap();
} else {
let entry_json = entry_obj.as_object().unwrap();
entry = Entry {
num_hashes: entry_json.get("num_hashes").unwrap().as_u64().unwrap(),
hash: serde_json::from_value(entry_json.get("hash").unwrap().clone()).unwrap(),
transactions: entry_json
.get("transactions")
.unwrap()
.as_array()
.unwrap()
.into_iter()
.enumerate()
.map(|(j, tx)| {
let tx_vec: Vec<u8> = serde_json::from_value(tx.clone()).unwrap();
// Check explicitly that transaction matches bincode-serialized format
assert_eq!(
tx_vec,
serialize(&expected_entries[i].transactions[j]).unwrap()
);
deserialize(&tx_vec).unwrap()
})
.collect(),
};
}
assert_eq!(entry, expected_entries[i]);
}
for json in block_events {
let slot = json["s"].as_u64().unwrap();
assert_eq!(1, slot);
let height = json["h"].as_u64().unwrap();
assert_eq!(2 * ticks_per_slot, height);
}
}
}

View File

@@ -1,51 +1,30 @@
//! A stage to broadcast data from a leader node to validators
use self::{
broadcast_fake_shreds_run::BroadcastFakeShredsRun, broadcast_metrics::*,
fail_entry_verification_broadcast_run::FailEntryVerificationBroadcastRun,
standard_broadcast_run::StandardBroadcastRun,
};
use crate::contact_info::ContactInfo;
use crate::crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
use crate::weighted_shuffle::weighted_best;
use crate::{
cluster_info::{ClusterInfo, ClusterInfoError},
poh_recorder::WorkingBankEntry,
result::{Error, Result},
};
use crossbeam_channel::{
Receiver as CrossbeamReceiver, RecvTimeoutError as CrossbeamRecvTimeoutError,
Sender as CrossbeamSender,
};
use solana_ledger::{blockstore::Blockstore, shred::Shred, staking_utils};
use solana_measure::measure::Measure;
use self::broadcast_fake_shreds_run::BroadcastFakeShredsRun;
use self::fail_entry_verification_broadcast_run::FailEntryVerificationBroadcastRun;
use self::standard_broadcast_run::StandardBroadcastRun;
use crate::cluster_info::{ClusterInfo, ClusterInfoError};
use crate::poh_recorder::WorkingBankEntry;
use crate::result::{Error, Result};
use solana_ledger::blockstore::Blockstore;
use solana_ledger::shred::Shred;
use solana_ledger::staking_utils;
use solana_metrics::{inc_new_counter_error, inc_new_counter_info};
use solana_runtime::bank::Bank;
use solana_sdk::timing::timestamp;
use solana_sdk::{clock::Slot, pubkey::Pubkey};
use solana_streamer::sendmmsg::send_mmsg;
use std::sync::atomic::AtomicU64;
use std::{
collections::HashMap,
net::UdpSocket,
sync::atomic::{AtomicBool, Ordering},
sync::mpsc::{channel, Receiver, RecvError, RecvTimeoutError, Sender},
sync::{Arc, Mutex},
thread::{self, Builder, JoinHandle},
time::{Duration, Instant},
};
use solana_sdk::pubkey::Pubkey;
use std::collections::HashMap;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver, RecvError, RecvTimeoutError, Sender};
use std::sync::{Arc, Mutex, RwLock};
use std::thread::{self, Builder, JoinHandle};
use std::time::Instant;
pub const NUM_INSERT_THREADS: usize = 2;
mod broadcast_fake_shreds_run;
pub(crate) mod broadcast_metrics;
pub(crate) mod broadcast_utils;
mod fail_entry_verification_broadcast_run;
mod standard_broadcast_run;
pub(crate) const NUM_INSERT_THREADS: usize = 2;
pub(crate) type RetransmitSlotsSender = CrossbeamSender<HashMap<Slot, Arc<Bank>>>;
pub(crate) type RetransmitSlotsReceiver = CrossbeamReceiver<HashMap<Slot, Arc<Bank>>>;
pub(crate) type RecordReceiver = Receiver<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>;
pub(crate) type TransmitReceiver = Receiver<(TransmitShreds, Option<BroadcastShredBatchInfo>)>;
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum BroadcastStageReturnType {
ChannelDisconnected,
@@ -62,20 +41,18 @@ impl BroadcastStageType {
pub fn new_broadcast_stage(
&self,
sock: Vec<UdpSocket>,
cluster_info: Arc<ClusterInfo>,
cluster_info: Arc<RwLock<ClusterInfo>>,
receiver: Receiver<WorkingBankEntry>,
retransmit_slots_receiver: RetransmitSlotsReceiver,
exit_sender: &Arc<AtomicBool>,
blockstore: &Arc<Blockstore>,
shred_version: u16,
) -> BroadcastStage {
let keypair = cluster_info.keypair.clone();
let keypair = cluster_info.read().unwrap().keypair.clone();
match self {
BroadcastStageType::Standard => BroadcastStage::new(
sock,
cluster_info,
receiver,
retransmit_slots_receiver,
exit_sender,
blockstore,
StandardBroadcastRun::new(keypair, shred_version),
@@ -85,7 +62,6 @@ impl BroadcastStageType {
sock,
cluster_info,
receiver,
retransmit_slots_receiver,
exit_sender,
blockstore,
FailEntryVerificationBroadcastRun::new(keypair, shred_version),
@@ -95,7 +71,6 @@ impl BroadcastStageType {
sock,
cluster_info,
receiver,
retransmit_slots_receiver,
exit_sender,
blockstore,
BroadcastFakeShredsRun::new(keypair, 0, shred_version),
@@ -104,24 +79,24 @@ impl BroadcastStageType {
}
}
pub type TransmitShreds = (Option<Arc<HashMap<Pubkey, u64>>>, Arc<Vec<Shred>>);
type TransmitShreds = (Option<Arc<HashMap<Pubkey, u64>>>, Arc<Vec<Shred>>);
trait BroadcastRun {
fn run(
&mut self,
blockstore: &Arc<Blockstore>,
receiver: &Receiver<WorkingBankEntry>,
socket_sender: &Sender<(TransmitShreds, Option<BroadcastShredBatchInfo>)>,
blockstore_sender: &Sender<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>,
socket_sender: &Sender<TransmitShreds>,
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
) -> Result<()>;
fn transmit(
&mut self,
receiver: &Arc<Mutex<TransmitReceiver>>,
cluster_info: &ClusterInfo,
&self,
receiver: &Arc<Mutex<Receiver<TransmitShreds>>>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
sock: &UdpSocket,
) -> Result<()>;
fn record(
&mut self,
receiver: &Arc<Mutex<RecordReceiver>>,
&self,
receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>,
blockstore: &Arc<Blockstore>,
) -> Result<()>;
}
@@ -153,34 +128,32 @@ impl BroadcastStage {
fn run(
blockstore: &Arc<Blockstore>,
receiver: &Receiver<WorkingBankEntry>,
socket_sender: &Sender<(TransmitShreds, Option<BroadcastShredBatchInfo>)>,
blockstore_sender: &Sender<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>,
socket_sender: &Sender<TransmitShreds>,
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
mut broadcast_stage_run: impl BroadcastRun,
) -> BroadcastStageReturnType {
loop {
let res =
broadcast_stage_run.run(blockstore, receiver, socket_sender, blockstore_sender);
let res = Self::handle_error(res, "run");
let res = Self::handle_error(res);
if let Some(res) = res {
return res;
}
}
}
fn handle_error(r: Result<()>, name: &str) -> Option<BroadcastStageReturnType> {
fn handle_error(r: Result<()>) -> Option<BroadcastStageReturnType> {
if let Err(e) = r {
match e {
Error::RecvTimeoutError(RecvTimeoutError::Disconnected)
| Error::SendError
| Error::RecvError(RecvError)
| Error::CrossbeamRecvTimeoutError(CrossbeamRecvTimeoutError::Disconnected) => {
| Error::RecvError(RecvError) => {
return Some(BroadcastStageReturnType::ChannelDisconnected);
}
Error::RecvTimeoutError(RecvTimeoutError::Timeout)
| Error::CrossbeamRecvTimeoutError(CrossbeamRecvTimeoutError::Timeout) => (),
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
Error::ClusterInfoError(ClusterInfoError::NoPeers) => (), // TODO: Why are the unit-tests throwing hundreds of these?
_ => {
inc_new_counter_error!("streamer-broadcaster-error", 1, 1);
error!("{} broadcaster error: {:?}", name, e);
error!("broadcaster error: {:?}", e);
}
}
}
@@ -205,9 +178,8 @@ impl BroadcastStage {
#[allow(clippy::too_many_arguments)]
fn new(
socks: Vec<UdpSocket>,
cluster_info: Arc<ClusterInfo>,
cluster_info: Arc<RwLock<ClusterInfo>>,
receiver: Receiver<WorkingBankEntry>,
retransmit_slots_receiver: RetransmitSlotsReceiver,
exit_sender: &Arc<AtomicBool>,
blockstore: &Arc<Blockstore>,
broadcast_stage_run: impl BroadcastRun + Send + 'static + Clone,
@@ -217,8 +189,6 @@ impl BroadcastStage {
let (socket_sender, socket_receiver) = channel();
let (blockstore_sender, blockstore_receiver) = channel();
let bs_run = broadcast_stage_run.clone();
let socket_sender_ = socket_sender.clone();
let thread_hdl = Builder::new()
.name("solana-broadcaster".to_string())
.spawn(move || {
@@ -226,7 +196,7 @@ impl BroadcastStage {
Self::run(
&btree,
&receiver,
&socket_sender_,
&socket_sender,
&blockstore_sender,
bs_run,
)
@@ -236,13 +206,13 @@ impl BroadcastStage {
let socket_receiver = Arc::new(Mutex::new(socket_receiver));
for sock in socks.into_iter() {
let socket_receiver = socket_receiver.clone();
let mut bs_transmit = broadcast_stage_run.clone();
let bs_transmit = broadcast_stage_run.clone();
let cluster_info = cluster_info.clone();
let t = Builder::new()
.name("solana-broadcaster-transmit".to_string())
.spawn(move || loop {
let res = bs_transmit.transmit(&socket_receiver, &cluster_info, &sock);
let res = Self::handle_error(res, "solana-broadcaster-transmit");
let res = Self::handle_error(res);
if let Some(res) = res {
return res;
}
@@ -253,13 +223,13 @@ impl BroadcastStage {
let blockstore_receiver = Arc::new(Mutex::new(blockstore_receiver));
for _ in 0..NUM_INSERT_THREADS {
let blockstore_receiver = blockstore_receiver.clone();
let mut bs_record = broadcast_stage_run.clone();
let bs_record = broadcast_stage_run.clone();
let btree = blockstore.clone();
let t = Builder::new()
.name("solana-broadcaster-record".to_string())
.spawn(move || loop {
let res = bs_record.record(&blockstore_receiver, &btree);
let res = Self::handle_error(res, "solana-broadcaster-record");
let res = Self::handle_error(res);
if let Some(res) = res {
return res;
}
@@ -268,68 +238,9 @@ impl BroadcastStage {
thread_hdls.push(t);
}
let blockstore = blockstore.clone();
let retransmit_thread = Builder::new()
.name("solana-broadcaster-retransmit".to_string())
.spawn(move || loop {
if let Some(res) = Self::handle_error(
Self::check_retransmit_signals(
&blockstore,
&retransmit_slots_receiver,
&socket_sender,
),
"solana-broadcaster-retransmit-check_retransmit_signals",
) {
return res;
}
})
.unwrap();
thread_hdls.push(retransmit_thread);
Self { thread_hdls }
}
fn check_retransmit_signals(
blockstore: &Blockstore,
retransmit_slots_receiver: &RetransmitSlotsReceiver,
socket_sender: &Sender<(TransmitShreds, Option<BroadcastShredBatchInfo>)>,
) -> Result<()> {
let timer = Duration::from_millis(100);
// Check for a retransmit signal
let mut retransmit_slots = retransmit_slots_receiver.recv_timeout(timer)?;
while let Ok(new_retransmit_slots) = retransmit_slots_receiver.try_recv() {
retransmit_slots.extend(new_retransmit_slots);
}
for (_, bank) in retransmit_slots.iter() {
let bank_epoch = bank.get_leader_schedule_epoch(bank.slot());
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch);
let stakes = stakes.map(Arc::new);
let data_shreds = Arc::new(
blockstore
.get_data_shreds_for_slot(bank.slot(), 0)
.expect("My own shreds must be reconstructable"),
);
if !data_shreds.is_empty() {
socket_sender.send(((stakes.clone(), data_shreds), None))?;
}
let coding_shreds = Arc::new(
blockstore
.get_coding_shreds_for_slot(bank.slot(), 0)
.expect("My own shreds must be reconstructable"),
);
if !coding_shreds.is_empty() {
socket_sender.send(((stakes.clone(), coding_shreds), None))?;
}
}
Ok(())
}
pub fn join(self) -> thread::Result<BroadcastStageReturnType> {
for thread_hdl in self.thread_hdls.into_iter() {
let _ = thread_hdl.join();
@@ -338,238 +249,23 @@ impl BroadcastStage {
}
}
fn update_peer_stats(
num_live_peers: i64,
broadcast_len: i64,
last_datapoint_submit: &Arc<AtomicU64>,
) {
let now = timestamp();
let last = last_datapoint_submit.load(Ordering::Relaxed);
if now - last > 1000
&& last_datapoint_submit.compare_and_swap(last, now, Ordering::Relaxed) == last
{
datapoint_info!(
"cluster_info-num_nodes",
("live_count", num_live_peers, i64),
("broadcast_count", broadcast_len, i64)
);
}
}
pub fn get_broadcast_peers<S: std::hash::BuildHasher>(
cluster_info: &ClusterInfo,
stakes: Option<Arc<HashMap<Pubkey, u64, S>>>,
) -> (Vec<ContactInfo>, Vec<(u64, usize)>) {
use crate::cluster_info;
let mut peers = cluster_info.tvu_peers();
let peers_and_stakes = cluster_info::stake_weight_peers(&mut peers, stakes);
(peers, peers_and_stakes)
}
/// broadcast messages from the leader to layer 1 nodes
/// # Remarks
pub fn broadcast_shreds(
s: &UdpSocket,
shreds: &Arc<Vec<Shred>>,
peers_and_stakes: &[(u64, usize)],
peers: &[ContactInfo],
last_datapoint_submit: &Arc<AtomicU64>,
send_mmsg_total: &mut u64,
) -> Result<()> {
let broadcast_len = peers_and_stakes.len();
if broadcast_len == 0 {
update_peer_stats(1, 1, last_datapoint_submit);
return Ok(());
}
let packets: Vec<_> = shreds
.iter()
.map(|shred| {
let broadcast_index = weighted_best(&peers_and_stakes, shred.seed());
(&shred.payload, &peers[broadcast_index].tvu)
})
.collect();
let mut sent = 0;
let mut send_mmsg_time = Measure::start("send_mmsg");
while sent < packets.len() {
match send_mmsg(s, &packets[sent..]) {
Ok(n) => sent += n,
Err(e) => {
return Err(Error::IO(e));
}
}
}
send_mmsg_time.stop();
*send_mmsg_total += send_mmsg_time.as_us();
let num_live_peers = num_live_peers(&peers);
update_peer_stats(
num_live_peers,
broadcast_len as i64 + 1,
last_datapoint_submit,
);
Ok(())
}
fn distance(a: u64, b: u64) -> u64 {
if a > b {
a - b
} else {
b - a
}
}
fn num_live_peers(peers: &[ContactInfo]) -> i64 {
let mut num_live_peers = 1i64;
peers.iter().for_each(|p| {
// A peer is considered live if they generated their contact info recently
if distance(timestamp(), p.wallclock) <= CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS {
num_live_peers += 1;
}
});
num_live_peers
}
#[cfg(test)]
pub mod test {
mod test {
use super::*;
use crate::cluster_info::{ClusterInfo, Node};
use crossbeam_channel::unbounded;
use solana_ledger::{
blockstore::{make_slot_entries, Blockstore},
entry::create_ticks,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
shred::{max_ticks_per_n_shreds, Shredder, RECOMMENDED_FEC_RATE},
};
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_ledger::entry::create_ticks;
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use solana_runtime::bank::Bank;
use solana_sdk::{
hash::Hash,
pubkey::Pubkey,
signature::{Keypair, Signer},
};
use std::{
path::Path, sync::atomic::AtomicBool, sync::mpsc::channel, sync::Arc, thread::sleep,
};
pub fn make_transmit_shreds(
slot: Slot,
num: u64,
stakes: Option<Arc<HashMap<Pubkey, u64>>>,
) -> (
Vec<Shred>,
Vec<Shred>,
Vec<TransmitShreds>,
Vec<TransmitShreds>,
) {
let num_entries = max_ticks_per_n_shreds(num, None);
let (data_shreds, _) = make_slot_entries(slot, 0, num_entries);
let keypair = Arc::new(Keypair::new());
let shredder = Shredder::new(slot, 0, RECOMMENDED_FEC_RATE, keypair, 0, 0)
.expect("Expected to create a new shredder");
let coding_shreds = shredder.data_shreds_to_coding_shreds(&data_shreds[0..]);
(
data_shreds.clone(),
coding_shreds.clone(),
data_shreds
.into_iter()
.map(|s| (stakes.clone(), Arc::new(vec![s])))
.collect(),
coding_shreds
.into_iter()
.map(|s| (stakes.clone(), Arc::new(vec![s])))
.collect(),
)
}
fn check_all_shreds_received(
transmit_receiver: &TransmitReceiver,
mut data_index: u64,
mut coding_index: u64,
num_expected_data_shreds: u64,
num_expected_coding_shreds: u64,
) {
while let Ok((new_retransmit_slots, _)) = transmit_receiver.try_recv() {
if new_retransmit_slots.1[0].is_data() {
for data_shred in new_retransmit_slots.1.iter() {
assert_eq!(data_shred.index() as u64, data_index);
data_index += 1;
}
} else {
assert_eq!(new_retransmit_slots.1[0].index() as u64, coding_index);
for coding_shred in new_retransmit_slots.1.iter() {
assert_eq!(coding_shred.index() as u64, coding_index);
coding_index += 1;
}
}
}
assert_eq!(num_expected_data_shreds, data_index);
assert_eq!(num_expected_coding_shreds, coding_index);
}
#[test]
fn test_num_live_peers() {
let mut ci = ContactInfo::default();
ci.wallclock = std::u64::MAX;
assert_eq!(num_live_peers(&[ci.clone()]), 1);
ci.wallclock = timestamp() - 1;
assert_eq!(num_live_peers(&[ci.clone()]), 2);
ci.wallclock = timestamp() - CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS - 1;
assert_eq!(num_live_peers(&[ci]), 1);
}
#[test]
fn test_duplicate_retransmit_signal() {
// Setup
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let (transmit_sender, transmit_receiver) = channel();
let (retransmit_slots_sender, retransmit_slots_receiver) = unbounded();
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(100_000);
let bank0 = Arc::new(Bank::new(&genesis_config));
// Make some shreds
let updated_slot = 0;
let (all_data_shreds, all_coding_shreds, _, _all_coding_transmit_shreds) =
make_transmit_shreds(updated_slot, 10, None);
let num_data_shreds = all_data_shreds.len();
let num_coding_shreds = all_coding_shreds.len();
assert!(num_data_shreds >= 10);
// Insert all the shreds
blockstore
.insert_shreds(all_data_shreds, None, true)
.unwrap();
blockstore
.insert_shreds(all_coding_shreds, None, true)
.unwrap();
// Insert duplicate retransmit signal, blocks should
// only be retransmitted once
retransmit_slots_sender
.send(vec![(updated_slot, bank0.clone())].into_iter().collect())
.unwrap();
retransmit_slots_sender
.send(vec![(updated_slot, bank0.clone())].into_iter().collect())
.unwrap();
BroadcastStage::check_retransmit_signals(
&blockstore,
&retransmit_slots_receiver,
&transmit_sender,
)
.unwrap();
// Check all the data shreds were received only once
check_all_shreds_received(
&transmit_receiver,
0,
0,
num_data_shreds as u64,
num_coding_shreds as u64,
);
}
use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, Signer};
use std::path::Path;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use std::thread::sleep;
use std::time::Duration;
struct MockBroadcastStage {
blockstore: Arc<Blockstore>,
@@ -581,7 +277,6 @@ pub mod test {
leader_pubkey: &Pubkey,
ledger_path: &Path,
entry_receiver: Receiver<WorkingBankEntry>,
retransmit_slots_receiver: RetransmitSlotsReceiver,
) -> MockBroadcastStage {
// Make the database ledger
let blockstore = Arc::new(Blockstore::open(ledger_path).unwrap());
@@ -594,22 +289,21 @@ pub mod test {
let broadcast_buddy = Node::new_localhost_with_pubkey(&buddy_keypair.pubkey());
// Fill the cluster_info with the buddy's info
let cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info.clone());
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info.clone());
cluster_info.insert_info(broadcast_buddy.info);
let cluster_info = Arc::new(cluster_info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let exit_sender = Arc::new(AtomicBool::new(false));
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let bank = Arc::new(Bank::new(&genesis_config));
let leader_keypair = cluster_info.keypair.clone();
let leader_keypair = cluster_info.read().unwrap().keypair.clone();
// Start up the broadcast stage
let broadcast_service = BroadcastStage::new(
leader_info.sockets.broadcast,
cluster_info,
entry_receiver,
retransmit_slots_receiver,
&exit_sender,
&blockstore,
StandardBroadcastRun::new(leader_keypair, 0),
@@ -632,12 +326,10 @@ pub mod test {
let leader_keypair = Keypair::new();
let (entry_sender, entry_receiver) = channel();
let (retransmit_slots_sender, retransmit_slots_receiver) = unbounded();
let broadcast_service = setup_dummy_broadcast_service(
&leader_keypair.pubkey(),
&ledger_path,
entry_receiver,
retransmit_slots_receiver,
);
let start_tick_height;
let max_tick_height;
@@ -656,7 +348,6 @@ pub mod test {
.expect("Expect successful send to broadcast service");
}
}
sleep(Duration::from_millis(2000));
trace!(
@@ -673,7 +364,6 @@ pub mod test {
assert_eq!(entries.len(), max_tick_height as usize);
drop(entry_sender);
drop(retransmit_slots_sender);
broadcast_service
.broadcast_service
.join()

View File

@@ -28,8 +28,8 @@ impl BroadcastRun for BroadcastFakeShredsRun {
&mut self,
blockstore: &Arc<Blockstore>,
receiver: &Receiver<WorkingBankEntry>,
socket_sender: &Sender<(TransmitShreds, Option<BroadcastShredBatchInfo>)>,
blockstore_sender: &Sender<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>,
socket_sender: &Sender<TransmitShreds>,
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
) -> Result<()> {
// 1) Pull entries from banking stage
let receive_results = broadcast_utils::recv_slot_entries(receiver)?;
@@ -83,32 +83,26 @@ impl BroadcastRun for BroadcastFakeShredsRun {
}
let data_shreds = Arc::new(data_shreds);
blockstore_sender.send((data_shreds.clone(), None))?;
blockstore_sender.send(data_shreds.clone())?;
// 3) Start broadcast step
//some indicates fake shreds
socket_sender.send((
(Some(Arc::new(HashMap::new())), Arc::new(fake_data_shreds)),
None,
))?;
socket_sender.send((
(Some(Arc::new(HashMap::new())), Arc::new(fake_coding_shreds)),
None,
))?;
socket_sender.send((Some(Arc::new(HashMap::new())), Arc::new(fake_data_shreds)))?;
socket_sender.send((Some(Arc::new(HashMap::new())), Arc::new(fake_coding_shreds)))?;
//none indicates real shreds
socket_sender.send(((None, data_shreds), None))?;
socket_sender.send(((None, Arc::new(coding_shreds)), None))?;
socket_sender.send((None, data_shreds))?;
socket_sender.send((None, Arc::new(coding_shreds)))?;
Ok(())
}
fn transmit(
&mut self,
receiver: &Arc<Mutex<TransmitReceiver>>,
cluster_info: &ClusterInfo,
&self,
receiver: &Arc<Mutex<Receiver<TransmitShreds>>>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
sock: &UdpSocket,
) -> Result<()> {
for ((stakes, data_shreds), _) in receiver.lock().unwrap().iter() {
let peers = cluster_info.tvu_peers();
for (stakes, data_shreds) in receiver.lock().unwrap().iter() {
let peers = cluster_info.read().unwrap().tvu_peers();
peers.iter().enumerate().for_each(|(i, peer)| {
if i <= self.partition && stakes.is_some() {
// Send fake shreds to the first N peers
@@ -125,11 +119,11 @@ impl BroadcastRun for BroadcastFakeShredsRun {
Ok(())
}
fn record(
&mut self,
receiver: &Arc<Mutex<RecordReceiver>>,
&self,
receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>,
blockstore: &Arc<Blockstore>,
) -> Result<()> {
for (data_shreds, _) in receiver.lock().unwrap().iter() {
for data_shreds in receiver.lock().unwrap().iter() {
blockstore.insert_shreds(data_shreds.to_vec(), None, true)?;
}
Ok(())
@@ -145,7 +139,7 @@ mod tests {
#[test]
fn test_tvu_peers_ordering() {
let cluster = ClusterInfo::new_with_invalid_keypair(ContactInfo::new_localhost(
let mut cluster = ClusterInfo::new_with_invalid_keypair(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
));

View File

@@ -1,288 +0,0 @@
use super::*;
pub(crate) trait BroadcastStats {
fn update(&mut self, new_stats: &Self);
fn report_stats(&mut self, slot: Slot, slot_start: Instant);
}
#[derive(Clone)]
pub(crate) struct BroadcastShredBatchInfo {
pub(crate) slot: Slot,
pub(crate) num_expected_batches: Option<usize>,
pub(crate) slot_start_ts: Instant,
}
#[derive(Default, Clone)]
pub(crate) struct ProcessShredsStats {
// Per-slot elapsed time
pub(crate) shredding_elapsed: u64,
pub(crate) receive_elapsed: u64,
}
impl ProcessShredsStats {
pub(crate) fn update(&mut self, new_stats: &ProcessShredsStats) {
self.shredding_elapsed += new_stats.shredding_elapsed;
self.receive_elapsed += new_stats.receive_elapsed;
}
pub(crate) fn reset(&mut self) {
*self = Self::default();
}
}
#[derive(Default, Clone)]
pub(crate) struct TransmitShredsStats {
pub(crate) transmit_elapsed: u64,
pub(crate) send_mmsg_elapsed: u64,
pub(crate) get_peers_elapsed: u64,
pub(crate) num_shreds: usize,
}
impl BroadcastStats for TransmitShredsStats {
fn update(&mut self, new_stats: &TransmitShredsStats) {
self.transmit_elapsed += new_stats.transmit_elapsed;
self.send_mmsg_elapsed += new_stats.send_mmsg_elapsed;
self.get_peers_elapsed += new_stats.get_peers_elapsed;
self.num_shreds += new_stats.num_shreds;
}
fn report_stats(&mut self, slot: Slot, slot_start: Instant) {
datapoint_info!(
"broadcast-transmit-shreds-stats",
("slot", slot as i64, i64),
(
"end_to_end_elapsed",
// `slot_start` signals when the first batch of shreds was
// received, used to measure duration of broadcast
slot_start.elapsed().as_micros() as i64,
i64
),
("transmit_elapsed", self.transmit_elapsed as i64, i64),
("send_mmsg_elapsed", self.send_mmsg_elapsed as i64, i64),
("get_peers_elapsed", self.get_peers_elapsed as i64, i64),
("num_shreds", self.num_shreds as i64, i64),
);
}
}
#[derive(Default, Clone)]
pub(crate) struct InsertShredsStats {
pub(crate) insert_shreds_elapsed: u64,
pub(crate) num_shreds: usize,
}
impl BroadcastStats for InsertShredsStats {
fn update(&mut self, new_stats: &InsertShredsStats) {
self.insert_shreds_elapsed += new_stats.insert_shreds_elapsed;
self.num_shreds += new_stats.num_shreds;
}
fn report_stats(&mut self, slot: Slot, slot_start: Instant) {
datapoint_info!(
"broadcast-insert-shreds-stats",
("slot", slot as i64, i64),
(
"end_to_end_elapsed",
// `slot_start` signals when the first batch of shreds was
// received, used to measure duration of broadcast
slot_start.elapsed().as_micros() as i64,
i64
),
(
"insert_shreds_elapsed",
self.insert_shreds_elapsed as i64,
i64
),
("num_shreds", self.num_shreds as i64, i64),
);
}
}
// Tracks metrics of type `T` acrosss multiple threads
#[derive(Default)]
pub(crate) struct BatchCounter<T: BroadcastStats + Default> {
// The number of batches processed across all threads so far
num_batches: usize,
// Filled in when the last batch of shreds is received,
// signals how many batches of shreds to expect
num_expected_batches: Option<usize>,
broadcast_shred_stats: T,
}
impl<T: BroadcastStats + Default> BatchCounter<T> {
#[cfg(test)]
pub(crate) fn num_batches(&self) -> usize {
self.num_batches
}
}
#[derive(Default)]
pub(crate) struct SlotBroadcastStats<T: BroadcastStats + Default>(HashMap<Slot, BatchCounter<T>>);
impl<T: BroadcastStats + Default> SlotBroadcastStats<T> {
#[cfg(test)]
pub(crate) fn get(&self, slot: Slot) -> Option<&BatchCounter<T>> {
self.0.get(&slot)
}
pub(crate) fn update(&mut self, new_stats: &T, batch_info: &Option<BroadcastShredBatchInfo>) {
if let Some(batch_info) = batch_info {
let mut should_delete = false;
{
let slot_batch_counter = self.0.entry(batch_info.slot).or_default();
slot_batch_counter.broadcast_shred_stats.update(new_stats);
// Only count the ones where `broadcast_shred_batch_info`.is_some(), because
// there could potentially be other `retransmit` slots inserted into the
// transmit pipeline (signaled by ReplayStage) that are not created by the
// main shredding/broadcast pipeline
slot_batch_counter.num_batches += 1;
if let Some(num_expected_batches) = batch_info.num_expected_batches {
slot_batch_counter.num_expected_batches = Some(num_expected_batches);
}
if let Some(num_expected_batches) = slot_batch_counter.num_expected_batches {
if slot_batch_counter.num_batches == num_expected_batches {
slot_batch_counter
.broadcast_shred_stats
.report_stats(batch_info.slot, batch_info.slot_start_ts);
should_delete = true;
}
}
}
if should_delete {
self.0
.remove(&batch_info.slot)
.expect("delete should be successful");
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[derive(Default)]
struct TestStats {
sender: Option<Sender<(usize, Slot, Instant)>>,
count: usize,
}
impl BroadcastStats for TestStats {
fn update(&mut self, new_stats: &TestStats) {
self.count += new_stats.count;
self.sender = new_stats.sender.clone();
}
fn report_stats(&mut self, slot: Slot, slot_start: Instant) {
self.sender
.as_ref()
.unwrap()
.send((self.count, slot, slot_start))
.unwrap()
}
}
#[test]
fn test_update() {
let start = Instant::now();
let mut slot_broadcast_stats = SlotBroadcastStats::default();
slot_broadcast_stats.update(
&TransmitShredsStats {
transmit_elapsed: 1,
get_peers_elapsed: 1,
send_mmsg_elapsed: 1,
num_shreds: 1,
},
&Some(BroadcastShredBatchInfo {
slot: 0,
num_expected_batches: Some(2),
slot_start_ts: start.clone(),
}),
);
// Singular update
let slot_0_stats = slot_broadcast_stats.0.get(&0).unwrap();
assert_eq!(slot_0_stats.num_batches, 1);
assert_eq!(slot_0_stats.num_expected_batches.unwrap(), 2);
assert_eq!(slot_0_stats.broadcast_shred_stats.transmit_elapsed, 1);
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 1);
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 1);
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 1);
slot_broadcast_stats.update(
&TransmitShredsStats {
transmit_elapsed: 1,
get_peers_elapsed: 1,
send_mmsg_elapsed: 1,
num_shreds: 1,
},
&None,
);
// If BroadcastShredBatchInfo == None, then update should be ignored
let slot_0_stats = slot_broadcast_stats.0.get(&0).unwrap();
assert_eq!(slot_0_stats.num_batches, 1);
assert_eq!(slot_0_stats.num_expected_batches.unwrap(), 2);
assert_eq!(slot_0_stats.broadcast_shred_stats.transmit_elapsed, 1);
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 1);
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 1);
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 1);
// If another batch is given, then total number of batches == num_expected_batches == 2,
// so the batch should be purged from the HashMap
slot_broadcast_stats.update(
&TransmitShredsStats {
transmit_elapsed: 1,
get_peers_elapsed: 1,
send_mmsg_elapsed: 1,
num_shreds: 1,
},
&Some(BroadcastShredBatchInfo {
slot: 0,
num_expected_batches: None,
slot_start_ts: start.clone(),
}),
);
assert!(slot_broadcast_stats.0.get(&0).is_none());
}
#[test]
fn test_update_multi_threaded() {
for round in 0..50 {
let start = Instant::now();
let slot_broadcast_stats = Arc::new(Mutex::new(SlotBroadcastStats::default()));
let num_threads = 5;
let slot = 0;
let (sender, receiver) = channel();
let thread_handles: Vec<_> = (0..num_threads)
.into_iter()
.map(|i| {
let slot_broadcast_stats = slot_broadcast_stats.clone();
let sender = Some(sender.clone());
let test_stats = TestStats { sender, count: 1 };
let mut broadcast_batch_info = BroadcastShredBatchInfo {
slot,
num_expected_batches: None,
slot_start_ts: start.clone(),
};
if i == round % num_threads {
broadcast_batch_info.num_expected_batches = Some(num_threads);
}
Builder::new()
.name("test_update_multi_threaded".to_string())
.spawn(move || {
slot_broadcast_stats
.lock()
.unwrap()
.update(&test_stats, &Some(broadcast_batch_info))
})
.unwrap()
})
.collect();
for t in thread_handles {
t.join().unwrap();
}
assert!(slot_broadcast_stats.lock().unwrap().0.get(&slot).is_none());
let (returned_count, returned_slot, returned_instant) = receiver.recv().unwrap();
assert_eq!(returned_count, num_threads);
assert_eq!(returned_slot, slot);
assert_eq!(returned_instant, returned_instant);
}
}
}

View File

@@ -76,7 +76,7 @@ pub(super) fn recv_slot_entries(receiver: &Receiver<WorkingBankEntry>) -> Result
#[cfg(test)]
mod tests {
use super::*;
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_sdk::genesis_config::GenesisConfig;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::system_transaction;

View File

@@ -23,8 +23,8 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
&mut self,
blockstore: &Arc<Blockstore>,
receiver: &Receiver<WorkingBankEntry>,
socket_sender: &Sender<(TransmitShreds, Option<BroadcastShredBatchInfo>)>,
blockstore_sender: &Sender<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>,
socket_sender: &Sender<TransmitShreds>,
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
) -> Result<()> {
// 1) Pull entries from banking stage
let mut receive_results = broadcast_utils::recv_slot_entries(receiver)?;
@@ -61,44 +61,38 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
);
let data_shreds = Arc::new(data_shreds);
blockstore_sender.send((data_shreds.clone(), None))?;
blockstore_sender.send(data_shreds.clone())?;
// 3) Start broadcast step
let bank_epoch = bank.get_leader_schedule_epoch(bank.slot());
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch);
let stakes = stakes.map(Arc::new);
socket_sender.send(((stakes.clone(), data_shreds), None))?;
socket_sender.send(((stakes, Arc::new(coding_shreds)), None))?;
socket_sender.send((stakes.clone(), data_shreds))?;
socket_sender.send((stakes, Arc::new(coding_shreds)))?;
Ok(())
}
fn transmit(
&mut self,
receiver: &Arc<Mutex<TransmitReceiver>>,
cluster_info: &ClusterInfo,
&self,
receiver: &Arc<Mutex<Receiver<TransmitShreds>>>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
sock: &UdpSocket,
) -> Result<()> {
let ((stakes, shreds), _) = receiver.lock().unwrap().recv()?;
let (stakes, shreds) = receiver.lock().unwrap().recv()?;
let all_seeds: Vec<[u8; 32]> = shreds.iter().map(|s| s.seed()).collect();
// Broadcast data
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes);
let mut send_mmsg_total = 0;
broadcast_shreds(
sock,
&shreds,
&peers_and_stakes,
&peers,
&Arc::new(AtomicU64::new(0)),
&mut send_mmsg_total,
)?;
let all_shred_bufs: Vec<Vec<u8>> = shreds.to_vec().into_iter().map(|s| s.payload).collect();
cluster_info
.write()
.unwrap()
.broadcast_shreds(sock, all_shred_bufs, &all_seeds, stakes)?;
Ok(())
}
fn record(
&mut self,
receiver: &Arc<Mutex<RecordReceiver>>,
&self,
receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>,
blockstore: &Arc<Blockstore>,
) -> Result<()> {
let (all_shreds, _) = receiver.lock().unwrap().recv()?;
let all_shreds = receiver.lock().unwrap().recv()?;
blockstore
.insert_shreds(all_shreds.to_vec(), None, true)
.expect("Failed to insert shreds in blockstore");

View File

@@ -1,43 +1,53 @@
use super::{
broadcast_utils::{self, ReceiveResults},
*,
};
use super::broadcast_utils::{self, ReceiveResults};
use super::*;
use crate::broadcast_stage::broadcast_utils::UnfinishedSlotInfo;
use solana_ledger::{
entry::Entry,
shred::{Shred, Shredder, RECOMMENDED_FEC_RATE, SHRED_TICK_REFERENCE_MASK},
};
use solana_sdk::{pubkey::Pubkey, signature::Keypair, timing::duration_as_us};
use solana_ledger::entry::Entry;
use solana_ledger::shred::{Shred, Shredder, RECOMMENDED_FEC_RATE, SHRED_TICK_REFERENCE_MASK};
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::Keypair;
use solana_sdk::timing::duration_as_us;
use std::collections::HashMap;
use std::time::Duration;
#[derive(Default)]
struct BroadcastStats {
// Per-slot elapsed time
shredding_elapsed: u64,
insert_shreds_elapsed: u64,
broadcast_elapsed: u64,
receive_elapsed: u64,
seed_elapsed: u64,
}
impl BroadcastStats {
fn reset(&mut self) {
self.insert_shreds_elapsed = 0;
self.shredding_elapsed = 0;
self.broadcast_elapsed = 0;
self.receive_elapsed = 0;
self.seed_elapsed = 0;
}
}
#[derive(Clone)]
pub struct StandardBroadcastRun {
process_shreds_stats: ProcessShredsStats,
transmit_shreds_stats: Arc<Mutex<SlotBroadcastStats<TransmitShredsStats>>>,
insert_shreds_stats: Arc<Mutex<SlotBroadcastStats<InsertShredsStats>>>,
pub(super) struct StandardBroadcastRun {
stats: Arc<RwLock<BroadcastStats>>,
unfinished_slot: Option<UnfinishedSlotInfo>,
current_slot_and_parent: Option<(u64, u64)>,
slot_broadcast_start: Option<Instant>,
keypair: Arc<Keypair>,
shred_version: u16,
last_datapoint_submit: Arc<AtomicU64>,
num_batches: usize,
}
impl StandardBroadcastRun {
pub(super) fn new(keypair: Arc<Keypair>, shred_version: u16) -> Self {
Self {
process_shreds_stats: ProcessShredsStats::default(),
transmit_shreds_stats: Arc::new(Mutex::new(SlotBroadcastStats::default())),
insert_shreds_stats: Arc::new(Mutex::new(SlotBroadcastStats::default())),
stats: Arc::new(RwLock::new(BroadcastStats::default())),
unfinished_slot: None,
current_slot_and_parent: None,
slot_broadcast_start: None,
keypair,
shred_version,
last_datapoint_submit: Arc::new(AtomicU64::new(0)),
num_batches: 0,
}
}
@@ -120,7 +130,7 @@ impl StandardBroadcastRun {
#[cfg(test)]
fn test_process_receive_results(
&mut self,
cluster_info: &ClusterInfo,
cluster_info: &Arc<RwLock<ClusterInfo>>,
sock: &UdpSocket,
blockstore: &Arc<Blockstore>,
receive_results: ReceiveResults,
@@ -132,7 +142,6 @@ impl StandardBroadcastRun {
let brecv = Arc::new(Mutex::new(brecv));
//data
let _ = self.transmit(&srecv, cluster_info, sock);
let _ = self.record(&brecv, blockstore);
//coding
let _ = self.transmit(&srecv, cluster_info, sock);
let _ = self.record(&brecv, blockstore);
@@ -142,8 +151,8 @@ impl StandardBroadcastRun {
fn process_receive_results(
&mut self,
blockstore: &Arc<Blockstore>,
socket_sender: &Sender<(TransmitShreds, Option<BroadcastShredBatchInfo>)>,
blockstore_sender: &Sender<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>,
socket_sender: &Sender<TransmitShreds>,
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
receive_results: ReceiveResults,
) -> Result<()> {
let mut receive_elapsed = receive_results.time_elapsed;
@@ -151,13 +160,11 @@ impl StandardBroadcastRun {
let bank = receive_results.bank.clone();
let last_tick_height = receive_results.last_tick_height;
inc_new_counter_info!("broadcast_service-entries_received", num_entries);
let old_broadcast_start = self.slot_broadcast_start;
let old_num_batches = self.num_batches;
if self.current_slot_and_parent.is_none()
|| bank.slot() != self.current_slot_and_parent.unwrap().0
{
self.slot_broadcast_start = Some(Instant::now());
self.num_batches = 0;
let slot = bank.slot();
let parent_slot = bank.parent_slot();
@@ -172,19 +179,19 @@ impl StandardBroadcastRun {
self.check_for_interrupted_slot(bank.ticks_per_slot() as u8);
// 2) Convert entries to shreds and coding shreds
let (shredder, next_shred_index) = self.init_shredder(
blockstore,
(bank.tick_height() % bank.ticks_per_slot()) as u8,
);
let is_last_in_slot = last_tick_height == bank.max_tick_height();
let data_shreds = self.entries_to_data_shreds(
let mut data_shreds = self.entries_to_data_shreds(
&shredder,
next_shred_index,
&receive_results.entries,
is_last_in_slot,
last_tick_height == bank.max_tick_height(),
);
// Insert the first shred so blockstore stores that the leader started this block
// This must be done before the blocks are sent out over the wire.
//Insert the first shred so blockstore stores that the leader started this block
//This must be done before the blocks are sent out over the wire.
if !data_shreds.is_empty() && data_shreds[0].index() == 0 {
let first = vec![data_shreds[0].clone()];
blockstore
@@ -192,56 +199,26 @@ impl StandardBroadcastRun {
.expect("Failed to insert shreds in blockstore");
}
let last_data_shred = data_shreds.len();
if let Some(last_shred) = last_unfinished_slot_shred {
data_shreds.push(last_shred);
}
let to_shreds_elapsed = to_shreds_start.elapsed();
let bank_epoch = bank.get_leader_schedule_epoch(bank.slot());
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch);
let stakes = stakes.map(Arc::new);
// Broadcast the last shred of the interrupted slot if necessary
if let Some(last_shred) = last_unfinished_slot_shred {
let batch_info = Some(BroadcastShredBatchInfo {
slot: last_shred.slot(),
num_expected_batches: Some(old_num_batches + 1),
slot_start_ts: old_broadcast_start.expect(
"Old broadcast start time for previous slot must exist if the previous slot
was interrupted",
),
});
let last_shred = Arc::new(vec![last_shred]);
socket_sender.send(((stakes.clone(), last_shred.clone()), batch_info.clone()))?;
blockstore_sender.send((last_shred, batch_info))?;
}
// Increment by two batches, one for the data batch, one for the coding batch.
self.num_batches += 2;
let num_expected_batches = {
if is_last_in_slot {
Some(self.num_batches)
} else {
None
}
};
let batch_info = Some(BroadcastShredBatchInfo {
slot: bank.slot(),
num_expected_batches,
slot_start_ts: self
.slot_broadcast_start
.clone()
.expect("Start timestamp must exist for a slot if we're broadcasting the slot"),
});
let data_shreds = Arc::new(data_shreds);
socket_sender.send(((stakes.clone(), data_shreds.clone()), batch_info.clone()))?;
blockstore_sender.send((data_shreds.clone(), batch_info.clone()))?;
socket_sender.send((stakes.clone(), data_shreds.clone()))?;
blockstore_sender.send(data_shreds.clone())?;
let coding_shreds = shredder.data_shreds_to_coding_shreds(&data_shreds[0..last_data_shred]);
let coding_shreds = Arc::new(coding_shreds);
socket_sender.send(((stakes, coding_shreds.clone()), batch_info.clone()))?;
blockstore_sender.send((coding_shreds, batch_info))?;
self.process_shreds_stats.update(&ProcessShredsStats {
socket_sender.send((stakes, coding_shreds))?;
self.update_broadcast_stats(BroadcastStats {
shredding_elapsed: duration_as_us(&to_shreds_elapsed),
receive_elapsed: duration_as_us(&receive_elapsed),
..BroadcastStats::default()
});
if last_tick_height == bank.max_tick_height() {
self.report_and_reset_stats();
self.unfinished_slot = None;
@@ -250,15 +227,10 @@ impl StandardBroadcastRun {
Ok(())
}
fn insert(
&mut self,
blockstore: &Arc<Blockstore>,
shreds: Arc<Vec<Shred>>,
broadcast_shred_batch_info: Option<BroadcastShredBatchInfo>,
) -> Result<()> {
fn insert(&self, blockstore: &Arc<Blockstore>, shreds: Arc<Vec<Shred>>) -> Result<()> {
// Insert shreds into blockstore
let insert_shreds_start = Instant::now();
// The first shred is inserted synchronously
//The first shred is inserted synchronously
let data_shreds = if !shreds.is_empty() && shreds[0].index() == 0 {
shreds[1..].to_vec()
} else {
@@ -268,80 +240,66 @@ impl StandardBroadcastRun {
.insert_shreds(data_shreds, None, true)
.expect("Failed to insert shreds in blockstore");
let insert_shreds_elapsed = insert_shreds_start.elapsed();
let new_insert_shreds_stats = InsertShredsStats {
self.update_broadcast_stats(BroadcastStats {
insert_shreds_elapsed: duration_as_us(&insert_shreds_elapsed),
num_shreds: shreds.len(),
};
self.update_insertion_metrics(&new_insert_shreds_stats, &broadcast_shred_batch_info);
..BroadcastStats::default()
});
Ok(())
}
fn update_insertion_metrics(
&mut self,
new_insertion_shreds_stats: &InsertShredsStats,
broadcast_shred_batch_info: &Option<BroadcastShredBatchInfo>,
) {
let mut insert_shreds_stats = self.insert_shreds_stats.lock().unwrap();
insert_shreds_stats.update(new_insertion_shreds_stats, broadcast_shred_batch_info);
}
fn broadcast(
&mut self,
&self,
sock: &UdpSocket,
cluster_info: &ClusterInfo,
cluster_info: &Arc<RwLock<ClusterInfo>>,
stakes: Option<Arc<HashMap<Pubkey, u64>>>,
shreds: Arc<Vec<Shred>>,
broadcast_shred_batch_info: Option<BroadcastShredBatchInfo>,
) -> Result<()> {
trace!("Broadcasting {:?} shreds", shreds.len());
// Get the list of peers to broadcast to
let get_peers_start = Instant::now();
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes);
let get_peers_elapsed = get_peers_start.elapsed();
let seed_start = Instant::now();
let seeds: Vec<[u8; 32]> = shreds.iter().map(|s| s.seed()).collect();
let seed_elapsed = seed_start.elapsed();
// Broadcast the shreds
let transmit_start = Instant::now();
let mut send_mmsg_total = 0;
broadcast_shreds(
sock,
&shreds,
&peers_and_stakes,
&peers,
&self.last_datapoint_submit,
&mut send_mmsg_total,
)?;
let transmit_elapsed = transmit_start.elapsed();
let new_transmit_shreds_stats = TransmitShredsStats {
transmit_elapsed: duration_as_us(&transmit_elapsed),
get_peers_elapsed: duration_as_us(&get_peers_elapsed),
send_mmsg_elapsed: send_mmsg_total,
num_shreds: shreds.len(),
};
let broadcast_start = Instant::now();
let shred_bufs: Vec<Vec<u8>> = shreds.to_vec().into_iter().map(|s| s.payload).collect();
trace!("Broadcasting {:?} shreds", shred_bufs.len());
// Process metrics
self.update_transmit_metrics(&new_transmit_shreds_stats, &broadcast_shred_batch_info);
cluster_info
.write()
.unwrap()
.broadcast_shreds(sock, shred_bufs, &seeds, stakes)?;
let broadcast_elapsed = broadcast_start.elapsed();
self.update_broadcast_stats(BroadcastStats {
broadcast_elapsed: duration_as_us(&broadcast_elapsed),
seed_elapsed: duration_as_us(&seed_elapsed),
..BroadcastStats::default()
});
Ok(())
}
fn update_transmit_metrics(
&mut self,
new_transmit_shreds_stats: &TransmitShredsStats,
broadcast_shred_batch_info: &Option<BroadcastShredBatchInfo>,
) {
let mut transmit_shreds_stats = self.transmit_shreds_stats.lock().unwrap();
transmit_shreds_stats.update(new_transmit_shreds_stats, broadcast_shred_batch_info);
fn update_broadcast_stats(&self, stats: BroadcastStats) {
let mut wstats = self.stats.write().unwrap();
wstats.receive_elapsed += stats.receive_elapsed;
wstats.shredding_elapsed += stats.shredding_elapsed;
wstats.insert_shreds_elapsed += stats.insert_shreds_elapsed;
wstats.broadcast_elapsed += stats.broadcast_elapsed;
wstats.seed_elapsed += stats.seed_elapsed;
}
fn report_and_reset_stats(&mut self) {
let stats = &self.process_shreds_stats;
let stats = self.stats.read().unwrap();
assert!(self.unfinished_slot.is_some());
datapoint_info!(
"broadcast-process-shreds-stats",
"broadcast-bank-stats",
("slot", self.unfinished_slot.unwrap().slot as i64, i64),
("shredding_time", stats.shredding_elapsed as i64, i64),
("insertion_time", stats.insert_shreds_elapsed as i64, i64),
("broadcast_time", stats.broadcast_elapsed as i64, i64),
("receive_time", stats.receive_elapsed as i64, i64),
("seed", stats.seed_elapsed as i64, i64),
(
"num_data_shreds",
"num_shreds",
i64::from(self.unfinished_slot.unwrap().next_shred_index),
i64
),
@@ -351,7 +309,8 @@ impl StandardBroadcastRun {
i64
),
);
self.process_shreds_stats.reset();
drop(stats);
self.stats.write().unwrap().reset();
}
}
@@ -360,8 +319,8 @@ impl BroadcastRun for StandardBroadcastRun {
&mut self,
blockstore: &Arc<Blockstore>,
receiver: &Receiver<WorkingBankEntry>,
socket_sender: &Sender<(TransmitShreds, Option<BroadcastShredBatchInfo>)>,
blockstore_sender: &Sender<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>,
socket_sender: &Sender<TransmitShreds>,
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
) -> Result<()> {
let receive_results = broadcast_utils::recv_slot_entries(receiver)?;
self.process_receive_results(
@@ -372,21 +331,21 @@ impl BroadcastRun for StandardBroadcastRun {
)
}
fn transmit(
&mut self,
receiver: &Arc<Mutex<TransmitReceiver>>,
cluster_info: &ClusterInfo,
&self,
receiver: &Arc<Mutex<Receiver<TransmitShreds>>>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
sock: &UdpSocket,
) -> Result<()> {
let ((stakes, shreds), slot_start_ts) = receiver.lock().unwrap().recv()?;
self.broadcast(sock, cluster_info, stakes, shreds, slot_start_ts)
let (stakes, shreds) = receiver.lock().unwrap().recv()?;
self.broadcast(sock, cluster_info, stakes, shreds)
}
fn record(
&mut self,
receiver: &Arc<Mutex<RecordReceiver>>,
&self,
receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>,
blockstore: &Arc<Blockstore>,
) -> Result<()> {
let (shreds, slot_start_ts) = receiver.lock().unwrap().recv()?;
self.insert(blockstore, shreds, slot_start_ts)
let shreds = receiver.lock().unwrap().recv()?;
self.insert(blockstore, shreds)
}
}
@@ -394,17 +353,18 @@ impl BroadcastRun for StandardBroadcastRun {
mod test {
use super::*;
use crate::cluster_info::{ClusterInfo, Node};
use solana_ledger::genesis_utils::create_genesis_config;
use crate::genesis_utils::create_genesis_config;
use solana_ledger::{
blockstore::Blockstore, entry::create_ticks, get_tmp_ledger_path,
shred::max_ticks_per_n_shreds,
};
use solana_runtime::bank::Bank;
use solana_sdk::{
clock::Slot,
genesis_config::GenesisConfig,
signature::{Keypair, Signer},
};
use std::sync::Arc;
use std::sync::{Arc, RwLock};
use std::time::Duration;
fn setup(
@@ -412,7 +372,7 @@ mod test {
) -> (
Arc<Blockstore>,
GenesisConfig,
Arc<ClusterInfo>,
Arc<RwLock<ClusterInfo>>,
Arc<Bank>,
Arc<Keypair>,
UdpSocket,
@@ -425,9 +385,9 @@ mod test {
let leader_keypair = Arc::new(Keypair::new());
let leader_pubkey = leader_keypair.pubkey();
let leader_info = Node::new_localhost_with_pubkey(&leader_pubkey);
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
leader_info.info.clone(),
));
)));
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut genesis_config = create_genesis_config(10_000).genesis_config;
genesis_config.ticks_per_slot = max_ticks_per_n_shreds(num_shreds_per_slot, None) + 1;
@@ -502,40 +462,25 @@ mod test {
// Make sure the slot is not complete
assert!(!blockstore.is_full(0));
// Modify the stats, should reset later
standard_broadcast_run.process_shreds_stats.receive_elapsed = 10;
// Broadcast stats should exist, and 2 batches should have been sent,
// one for data, one for coding
assert_eq!(
standard_broadcast_run
.transmit_shreds_stats
.lock()
.unwrap()
.get(unfinished_slot.slot)
.unwrap()
.num_batches(),
2
);
assert_eq!(
standard_broadcast_run
.insert_shreds_stats
.lock()
.unwrap()
.get(unfinished_slot.slot)
.unwrap()
.num_batches(),
2
);
standard_broadcast_run
.stats
.write()
.unwrap()
.receive_elapsed = 10;
// Try to fetch ticks from blockstore, nothing should break
assert_eq!(blockstore.get_slot_entries(0, 0).unwrap(), ticks0);
assert_eq!(blockstore.get_slot_entries(0, 0, None).unwrap(), ticks0);
assert_eq!(
blockstore.get_slot_entries(0, num_shreds_per_slot).unwrap(),
blockstore
.get_slot_entries(0, num_shreds_per_slot, None)
.unwrap(),
vec![],
);
// Step 2: Make a transmission for another bank that interrupts the transmission for
// slot 0
let bank2 = Arc::new(Bank::new_from_parent(&bank0, &leader_keypair.pubkey(), 2));
let interrupted_slot = unfinished_slot.slot;
// Interrupting the slot should cause the unfinished_slot and stats to reset
let num_shreds = 1;
assert!(num_shreds < num_shreds_per_slot);
@@ -563,28 +508,16 @@ mod test {
// Check that the stats were reset as well
assert_eq!(
standard_broadcast_run.process_shreds_stats.receive_elapsed,
standard_broadcast_run.stats.read().unwrap().receive_elapsed,
0
);
// Broadcast stats for interrupted slot should be cleared
assert!(standard_broadcast_run
.transmit_shreds_stats
.lock()
.unwrap()
.get(interrupted_slot)
.is_none());
assert!(standard_broadcast_run
.insert_shreds_stats
.lock()
.unwrap()
.get(interrupted_slot)
.is_none());
// Try to fetch the incomplete ticks from blockstore, should succeed
assert_eq!(blockstore.get_slot_entries(0, 0).unwrap(), ticks0);
assert_eq!(blockstore.get_slot_entries(0, 0, None).unwrap(), ticks0);
assert_eq!(
blockstore.get_slot_entries(0, num_shreds_per_slot).unwrap(),
blockstore
.get_slot_entries(0, num_shreds_per_slot, None)
.unwrap(),
vec![],
);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,343 +0,0 @@
use crate::{
cluster_info::ClusterInfo, contact_info::ContactInfo, epoch_slots::EpochSlots,
serve_repair::RepairType,
};
use solana_ledger::bank_forks::BankForks;
use solana_runtime::epoch_stakes::NodeIdToVoteAccounts;
use solana_sdk::{clock::Slot, pubkey::Pubkey};
use std::{
collections::{HashMap, HashSet},
sync::{Arc, RwLock},
};
pub type SlotPubkeys = HashMap<Arc<Pubkey>, u64>;
pub type ClusterSlotsMap = RwLock<HashMap<Slot, Arc<RwLock<SlotPubkeys>>>>;
#[derive(Default)]
pub struct ClusterSlots {
cluster_slots: ClusterSlotsMap,
keys: RwLock<HashSet<Arc<Pubkey>>>,
since: RwLock<Option<u64>>,
validator_stakes: RwLock<Arc<NodeIdToVoteAccounts>>,
epoch: RwLock<Option<u64>>,
self_id: RwLock<Pubkey>,
}
impl ClusterSlots {
pub fn lookup(&self, slot: Slot) -> Option<Arc<RwLock<SlotPubkeys>>> {
self.cluster_slots.read().unwrap().get(&slot).cloned()
}
pub fn update(&self, root: Slot, cluster_info: &ClusterInfo, bank_forks: &RwLock<BankForks>) {
self.update_peers(cluster_info, bank_forks);
let since = *self.since.read().unwrap();
let epoch_slots = cluster_info.get_epoch_slots_since(since);
self.update_internal(root, epoch_slots);
}
fn update_internal(&self, root: Slot, epoch_slots: (Vec<EpochSlots>, Option<u64>)) {
let (epoch_slots_list, since) = epoch_slots;
for epoch_slots in epoch_slots_list {
let slots = epoch_slots.to_slots(root);
for slot in &slots {
if *slot <= root {
continue;
}
let pubkey = Arc::new(epoch_slots.from);
let exists = self.keys.read().unwrap().get(&pubkey).is_some();
if !exists {
self.keys.write().unwrap().insert(pubkey.clone());
}
let from = self.keys.read().unwrap().get(&pubkey).unwrap().clone();
let balance = self
.validator_stakes
.read()
.unwrap()
.get(&from)
.map(|v| v.total_stake)
.unwrap_or(0);
let mut slot_pubkeys = self.cluster_slots.read().unwrap().get(slot).cloned();
if slot_pubkeys.is_none() {
let new_slot_pubkeys = Arc::new(RwLock::new(HashMap::default()));
self.cluster_slots
.write()
.unwrap()
.insert(*slot, new_slot_pubkeys.clone());
slot_pubkeys = Some(new_slot_pubkeys);
}
slot_pubkeys
.unwrap()
.write()
.unwrap()
.insert(from.clone(), balance);
}
}
self.cluster_slots.write().unwrap().retain(|x, _| *x > root);
self.keys
.write()
.unwrap()
.retain(|x| Arc::strong_count(x) > 1);
*self.since.write().unwrap() = since;
}
pub fn collect(&self, id: &Pubkey) -> HashSet<Slot> {
self.cluster_slots
.read()
.unwrap()
.iter()
.filter(|(_, keys)| keys.read().unwrap().get(id).is_some())
.map(|(slot, _)| slot)
.cloned()
.collect()
}
fn update_peers(&self, cluster_info: &ClusterInfo, bank_forks: &RwLock<BankForks>) {
let root_bank = bank_forks.read().unwrap().root_bank().clone();
let root_epoch = root_bank.epoch();
let my_epoch = *self.epoch.read().unwrap();
if Some(root_epoch) != my_epoch {
let validator_stakes = root_bank
.epoch_stakes(root_epoch)
.expect(
"Bank must have epoch stakes
for its own epoch",
)
.node_id_to_vote_accounts()
.clone();
*self.validator_stakes.write().unwrap() = validator_stakes;
let id = cluster_info.id();
*self.self_id.write().unwrap() = id;
*self.epoch.write().unwrap() = Some(root_epoch);
}
}
pub fn compute_weights(&self, slot: Slot, repair_peers: &[ContactInfo]) -> Vec<(u64, usize)> {
let slot_peers = self.lookup(slot);
repair_peers
.iter()
.enumerate()
.map(|(i, x)| {
let peer_stake = slot_peers
.as_ref()
.and_then(|v| v.read().unwrap().get(&x.id).cloned())
.unwrap_or(0);
(
1 + peer_stake
+ self
.validator_stakes
.read()
.unwrap()
.get(&x.id)
.map(|v| v.total_stake)
.unwrap_or(0),
i,
)
})
.collect()
}
pub fn generate_repairs_for_missing_slots(
&self,
self_id: &Pubkey,
root: Slot,
) -> Vec<RepairType> {
let my_slots = self.collect(self_id);
self.cluster_slots
.read()
.unwrap()
.keys()
.filter(|x| **x > root)
.filter(|x| !my_slots.contains(*x))
.map(|x| RepairType::HighestShred(*x, 0))
.collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
use solana_runtime::epoch_stakes::NodeVoteAccounts;
#[test]
fn test_default() {
let cs = ClusterSlots::default();
assert!(cs.cluster_slots.read().unwrap().is_empty());
assert!(cs.since.read().unwrap().is_none());
}
#[test]
fn test_update_noop() {
let cs = ClusterSlots::default();
cs.update_internal(0, (vec![], None));
assert!(cs.cluster_slots.read().unwrap().is_empty());
assert!(cs.since.read().unwrap().is_none());
}
#[test]
fn test_update_empty() {
let cs = ClusterSlots::default();
let epoch_slot = EpochSlots::default();
cs.update_internal(0, (vec![epoch_slot], Some(0)));
assert_eq!(*cs.since.read().unwrap(), Some(0));
assert!(cs.lookup(0).is_none());
}
#[test]
fn test_update_rooted() {
//root is 0, so it should clear out the slot
let cs = ClusterSlots::default();
let mut epoch_slot = EpochSlots::default();
epoch_slot.fill(&[0], 0);
cs.update_internal(0, (vec![epoch_slot], Some(0)));
assert_eq!(*cs.since.read().unwrap(), Some(0));
assert!(cs.lookup(0).is_none());
}
#[test]
fn test_update_new_slot() {
let cs = ClusterSlots::default();
let mut epoch_slot = EpochSlots::default();
epoch_slot.fill(&[1], 0);
cs.update_internal(0, (vec![epoch_slot], Some(0)));
assert_eq!(*cs.since.read().unwrap(), Some(0));
assert!(cs.lookup(0).is_none());
assert!(cs.lookup(1).is_some());
assert_eq!(
cs.lookup(1)
.unwrap()
.read()
.unwrap()
.get(&Pubkey::default()),
Some(&0)
);
}
#[test]
fn test_compute_weights() {
let cs = ClusterSlots::default();
let ci = ContactInfo::default();
assert_eq!(cs.compute_weights(0, &[ci]), vec![(1, 0)]);
}
#[test]
fn test_best_peer_2() {
let cs = ClusterSlots::default();
let mut c1 = ContactInfo::default();
let mut c2 = ContactInfo::default();
let mut map = HashMap::new();
let k1 = Pubkey::new_rand();
let k2 = Pubkey::new_rand();
map.insert(Arc::new(k1.clone()), std::u64::MAX / 2);
map.insert(Arc::new(k2.clone()), 0);
cs.cluster_slots
.write()
.unwrap()
.insert(0, Arc::new(RwLock::new(map)));
c1.id = k1;
c2.id = k2;
assert_eq!(
cs.compute_weights(0, &[c1, c2]),
vec![(std::u64::MAX / 2 + 1, 0), (1, 1)]
);
}
#[test]
fn test_best_peer_3() {
let cs = ClusterSlots::default();
let mut c1 = ContactInfo::default();
let mut c2 = ContactInfo::default();
let mut map = HashMap::new();
let k1 = Pubkey::new_rand();
let k2 = Pubkey::new_rand();
map.insert(Arc::new(k2.clone()), 0);
cs.cluster_slots
.write()
.unwrap()
.insert(0, Arc::new(RwLock::new(map)));
//make sure default weights are used as well
let validator_stakes: HashMap<_, _> = vec![(
*Arc::new(k1.clone()),
NodeVoteAccounts {
total_stake: std::u64::MAX / 2,
vote_accounts: vec![Pubkey::default()],
},
)]
.into_iter()
.collect();
*cs.validator_stakes.write().unwrap() = Arc::new(validator_stakes);
c1.id = k1;
c2.id = k2;
assert_eq!(
cs.compute_weights(0, &[c1, c2]),
vec![(std::u64::MAX / 2 + 1, 0), (1, 1)]
);
}
#[test]
fn test_update_new_staked_slot() {
let cs = ClusterSlots::default();
let mut epoch_slot = EpochSlots::default();
epoch_slot.fill(&[1], 0);
let map = Arc::new(
vec![(
Pubkey::default(),
NodeVoteAccounts {
total_stake: 1,
vote_accounts: vec![Pubkey::default()],
},
)]
.into_iter()
.collect(),
);
*cs.validator_stakes.write().unwrap() = map;
cs.update_internal(0, (vec![epoch_slot], None));
assert!(cs.lookup(1).is_some());
assert_eq!(
cs.lookup(1)
.unwrap()
.read()
.unwrap()
.get(&Pubkey::default()),
Some(&1)
);
}
#[test]
fn test_generate_repairs() {
let cs = ClusterSlots::default();
let mut epoch_slot = EpochSlots::default();
epoch_slot.fill(&[1], 0);
cs.update_internal(0, (vec![epoch_slot], None));
let self_id = Pubkey::new_rand();
assert_eq!(
cs.generate_repairs_for_missing_slots(&self_id, 0),
vec![RepairType::HighestShred(1, 0)]
)
}
#[test]
fn test_collect_my_slots() {
let cs = ClusterSlots::default();
let mut epoch_slot = EpochSlots::default();
epoch_slot.fill(&[1], 0);
let self_id = epoch_slot.from;
cs.update_internal(0, (vec![epoch_slot], None));
let slots: Vec<Slot> = cs.collect(&self_id).into_iter().collect();
assert_eq!(slots, vec![1]);
}
#[test]
fn test_generate_repairs_existing() {
let cs = ClusterSlots::default();
let mut epoch_slot = EpochSlots::default();
epoch_slot.fill(&[1], 0);
let self_id = epoch_slot.from;
cs.update_internal(0, (vec![epoch_slot], None));
assert!(cs
.generate_repairs_for_missing_slots(&self_id, 0)
.is_empty());
}
}

View File

@@ -1,7 +1,7 @@
use crate::{consensus::VOTE_THRESHOLD_SIZE, rpc_subscriptions::RpcSubscriptions};
use crate::consensus::VOTE_THRESHOLD_SIZE;
use solana_ledger::blockstore::Blockstore;
use solana_measure::measure::Measure;
use solana_metrics::datapoint_info;
use solana_metrics::inc_new_counter_info;
use solana_runtime::bank::Bank;
use solana_sdk::clock::Slot;
use solana_vote_program::{vote_state::VoteState, vote_state::MAX_LOCKOUT_HISTORY};
@@ -14,14 +14,6 @@ use std::{
time::Duration,
};
#[derive(Default)]
pub struct CacheSlotInfo {
pub current_slot: Slot,
pub node_root: Slot,
pub largest_confirmed_root: Slot,
pub highest_confirmed_slot: Slot,
}
pub type BlockCommitmentArray = [u64; MAX_LOCKOUT_HISTORY + 1];
#[derive(Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)]
@@ -61,7 +53,6 @@ pub struct BlockCommitmentCache {
bank: Arc<Bank>,
blockstore: Arc<Blockstore>,
root: Slot,
highest_confirmed_slot: Slot,
}
impl std::fmt::Debug for BlockCommitmentCache {
@@ -86,7 +77,6 @@ impl BlockCommitmentCache {
bank: Arc<Bank>,
blockstore: Arc<Blockstore>,
root: Slot,
highest_confirmed_slot: Slot,
) -> Self {
Self {
block_commitment,
@@ -95,7 +85,6 @@ impl BlockCommitmentCache {
bank,
blockstore,
root,
highest_confirmed_slot,
}
}
@@ -107,7 +96,6 @@ impl BlockCommitmentCache {
bank: Arc::new(Bank::default()),
blockstore,
root: Slot::default(),
highest_confirmed_slot: Slot::default(),
}
}
@@ -135,26 +123,6 @@ impl BlockCommitmentCache {
self.root
}
pub fn highest_confirmed_slot(&self) -> Slot {
self.highest_confirmed_slot
}
fn highest_slot_with_confirmation_count(&self, confirmation_count: usize) -> Slot {
assert!(confirmation_count > 0 && confirmation_count <= MAX_LOCKOUT_HISTORY);
for slot in (self.root()..self.slot()).rev() {
if let Some(count) = self.get_confirmation_count(slot) {
if count >= confirmation_count {
return slot;
}
}
}
self.root
}
fn calculate_highest_confirmed_slot(&self) -> Slot {
self.highest_slot_with_confirmation_count(1)
}
pub fn get_confirmation_count(&self, slot: Slot) -> Option<usize> {
self.get_lockout_count(slot, VOTE_THRESHOLD_SIZE)
}
@@ -191,30 +159,10 @@ impl BlockCommitmentCache {
largest_confirmed_root: Slot::default(),
bank: Arc::new(Bank::default()),
root: Slot::default(),
highest_confirmed_slot: Slot::default(),
}
}
#[cfg(test)]
pub fn new_for_tests_with_blockstore_bank(
blockstore: Arc<Blockstore>,
bank: Arc<Bank>,
root: Slot,
) -> Self {
let mut block_commitment: HashMap<Slot, BlockCommitment> = HashMap::new();
block_commitment.insert(0, BlockCommitment::default());
Self {
block_commitment,
blockstore,
total_stake: 42,
largest_confirmed_root: root,
bank,
root,
highest_confirmed_slot: root,
}
}
pub(crate) fn set_largest_confirmed_root(&mut self, root: Slot) {
pub(crate) fn set_get_largest_confirmed_root(&mut self, root: Slot) {
self.largest_confirmed_root = root;
}
}
@@ -255,7 +203,6 @@ impl AggregateCommitmentService {
pub fn new(
exit: &Arc<AtomicBool>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
subscriptions: Arc<RpcSubscriptions>,
) -> (Sender<CommitmentAggregationData>, Self) {
let (sender, receiver): (
Sender<CommitmentAggregationData>,
@@ -273,7 +220,7 @@ impl AggregateCommitmentService {
}
if let Err(RecvTimeoutError::Disconnected) =
Self::run(&receiver, &block_commitment_cache, &subscriptions, &exit_)
Self::run(&receiver, &block_commitment_cache, &exit_)
{
break;
}
@@ -286,7 +233,6 @@ impl AggregateCommitmentService {
fn run(
receiver: &Receiver<CommitmentAggregationData>,
block_commitment_cache: &RwLock<BlockCommitmentCache>,
subscriptions: &Arc<RpcSubscriptions>,
exit: &Arc<AtomicBool>,
) -> Result<(), RecvTimeoutError> {
loop {
@@ -319,30 +265,16 @@ impl AggregateCommitmentService {
aggregation_data.bank,
block_commitment_cache.read().unwrap().blockstore.clone(),
aggregation_data.root,
aggregation_data.root,
);
new_block_commitment.highest_confirmed_slot =
new_block_commitment.calculate_highest_confirmed_slot();
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
aggregate_commitment_time.stop();
datapoint_info!(
"block-commitment-cache",
(
"aggregate-commitment-ms",
aggregate_commitment_time.as_ms() as i64,
i64
)
inc_new_counter_info!(
"aggregate-commitment-ms",
aggregate_commitment_time.as_ms() as usize
);
subscriptions.notify_subscribers(CacheSlotInfo {
current_slot: w_block_commitment_cache.slot(),
node_root: w_block_commitment_cache.root(),
largest_confirmed_root: w_block_commitment_cache.largest_confirmed_root(),
highest_confirmed_slot: w_block_commitment_cache.highest_confirmed_slot(),
});
}
}
@@ -428,11 +360,9 @@ impl AggregateCommitmentService {
#[cfg(test)]
mod tests {
use super::*;
use solana_ledger::{
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
};
use solana_sdk::{genesis_config::GenesisConfig, pubkey::Pubkey};
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_ledger::get_tmp_ledger_path;
use solana_sdk::pubkey::Pubkey;
use solana_stake_program::stake_state;
use solana_vote_program::vote_state::{self, VoteStateVersions};
@@ -469,7 +399,7 @@ mod tests {
block_commitment.entry(1).or_insert(cache1.clone());
block_commitment.entry(2).or_insert(cache2.clone());
let block_commitment_cache =
BlockCommitmentCache::new(block_commitment, 0, 50, bank, blockstore, 0, 0);
BlockCommitmentCache::new(block_commitment, 0, 50, bank, blockstore, 0);
assert_eq!(block_commitment_cache.get_confirmation_count(0), Some(2));
assert_eq!(block_commitment_cache.get_confirmation_count(1), Some(1));
@@ -503,7 +433,6 @@ mod tests {
bank,
blockstore,
0,
0,
);
assert!(block_commitment_cache.is_confirmed_rooted(0));
@@ -527,114 +456,6 @@ mod tests {
assert_eq!(get_largest_confirmed_root(rooted_stake, 10), 1);
}
#[test]
fn test_highest_confirmed_slot() {
let bank = Arc::new(Bank::new(&GenesisConfig::default()));
let bank_slot_5 = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 5));
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let total_stake = 50;
// Build cache with confirmation_count 2 given total_stake
let mut cache0 = BlockCommitment::default();
cache0.increase_confirmation_stake(1, 5);
cache0.increase_confirmation_stake(2, 40);
// Build cache with confirmation_count 1 given total_stake
let mut cache1 = BlockCommitment::default();
cache1.increase_confirmation_stake(1, 40);
cache1.increase_confirmation_stake(2, 5);
// Build cache with confirmation_count 0 given total_stake
let mut cache2 = BlockCommitment::default();
cache2.increase_confirmation_stake(1, 20);
cache2.increase_confirmation_stake(2, 5);
let mut block_commitment = HashMap::new();
block_commitment.entry(1).or_insert(cache0.clone()); // Slot 1, conf 2
block_commitment.entry(2).or_insert(cache1.clone()); // Slot 2, conf 1
block_commitment.entry(3).or_insert(cache2.clone()); // Slot 3, conf 0
let block_commitment_cache = BlockCommitmentCache::new(
block_commitment,
0,
total_stake,
bank_slot_5.clone(),
blockstore.clone(),
0,
0,
);
assert_eq!(block_commitment_cache.calculate_highest_confirmed_slot(), 2);
// Build map with multiple slots at conf 1
let mut block_commitment = HashMap::new();
block_commitment.entry(1).or_insert(cache1.clone()); // Slot 1, conf 1
block_commitment.entry(2).or_insert(cache1.clone()); // Slot 2, conf 1
block_commitment.entry(3).or_insert(cache2.clone()); // Slot 3, conf 0
let block_commitment_cache = BlockCommitmentCache::new(
block_commitment,
0,
total_stake,
bank_slot_5.clone(),
blockstore.clone(),
0,
0,
);
assert_eq!(block_commitment_cache.calculate_highest_confirmed_slot(), 2);
// Build map with slot gaps
let mut block_commitment = HashMap::new();
block_commitment.entry(1).or_insert(cache1.clone()); // Slot 1, conf 1
block_commitment.entry(3).or_insert(cache1.clone()); // Slot 3, conf 1
block_commitment.entry(5).or_insert(cache2.clone()); // Slot 5, conf 0
let block_commitment_cache = BlockCommitmentCache::new(
block_commitment,
0,
total_stake,
bank_slot_5.clone(),
blockstore.clone(),
0,
0,
);
assert_eq!(block_commitment_cache.calculate_highest_confirmed_slot(), 3);
// Build map with no conf 1 slots, but one higher
let mut block_commitment = HashMap::new();
block_commitment.entry(1).or_insert(cache0.clone()); // Slot 1, conf 2
block_commitment.entry(2).or_insert(cache2.clone()); // Slot 2, conf 0
block_commitment.entry(3).or_insert(cache2.clone()); // Slot 3, conf 0
let block_commitment_cache = BlockCommitmentCache::new(
block_commitment,
0,
total_stake,
bank_slot_5.clone(),
blockstore.clone(),
0,
0,
);
assert_eq!(block_commitment_cache.calculate_highest_confirmed_slot(), 1);
// Build map with no conf 1 or higher slots
let mut block_commitment = HashMap::new();
block_commitment.entry(1).or_insert(cache2.clone()); // Slot 1, conf 0
block_commitment.entry(2).or_insert(cache2.clone()); // Slot 2, conf 0
block_commitment.entry(3).or_insert(cache2.clone()); // Slot 3, conf 0
let block_commitment_cache = BlockCommitmentCache::new(
block_commitment,
0,
total_stake,
bank_slot_5.clone(),
blockstore.clone(),
0,
0,
);
assert_eq!(block_commitment_cache.calculate_highest_confirmed_slot(), 0);
}
#[test]
fn test_aggregate_commitment_for_vote_account_1() {
let ancestors = vec![3, 4, 5, 7, 9, 11];

View File

@@ -1,4 +1,3 @@
use crate::progress_map::ProgressMap;
use chrono::prelude::*;
use solana_ledger::bank_forks::BankForks;
use solana_runtime::bank::Bank;
@@ -321,7 +320,7 @@ impl Tower {
}
pub fn check_vote_stake_threshold(
&self,
slot: Slot,
slot: u64,
stake_lockouts: &HashMap<u64, StakeLockout>,
total_staked: u64,
) -> bool {
@@ -332,8 +331,11 @@ impl Tower {
if let Some(fork_stake) = stake_lockouts.get(&vote.slot) {
let lockout = fork_stake.stake as f64 / total_staked as f64;
trace!(
"fork_stake slot: {}, vote slot: {}, lockout: {} fork_stake: {} total_stake: {}",
slot, vote.slot, lockout, fork_stake.stake, total_staked
"fork_stake slot: {} lockout: {} fork_stake: {} total_stake: {}",
slot,
lockout,
fork_stake.stake,
total_staked
);
if vote.confirmation_count as usize > self.threshold_depth {
for old_vote in &self.lockouts.votes {
@@ -353,18 +355,6 @@ impl Tower {
}
}
pub(crate) fn check_switch_threshold(
&self,
_slot: Slot,
_ancestors: &HashMap<Slot, HashSet<u64>>,
_descendants: &HashMap<Slot, HashSet<u64>>,
_progress: &ProgressMap,
_total_epoch_stake: u64,
_epoch_vote_accounts: &HashMap<Pubkey, (u64, Account)>,
) -> bool {
true
}
/// Update lockouts for all the ancestors
fn update_ancestor_lockouts(
stake_lockouts: &mut HashMap<Slot, StakeLockout>,
@@ -478,12 +468,7 @@ impl Tower {
#[cfg(test)]
pub mod test {
use super::*;
use crate::{
cluster_info_vote_listener::VoteTracker,
cluster_slots::ClusterSlots,
progress_map::ForkProgress,
replay_stage::{HeaviestForkFailures, ReplayStage},
};
use crate::replay_stage::{ForkProgress, ReplayStage};
use solana_ledger::bank_forks::BankForks;
use solana_runtime::{
bank::Bank,
@@ -496,96 +481,107 @@ pub mod test {
hash::Hash,
pubkey::Pubkey,
signature::{Keypair, Signer},
transaction::Transaction,
};
use solana_vote_program::{
vote_instruction,
vote_state::{Vote, VoteStateVersions},
vote_transaction,
};
use std::collections::HashMap;
use std::collections::{HashMap, VecDeque};
use std::sync::RwLock;
use std::{thread::sleep, time::Duration};
use trees::{tr, Tree, TreeWalk};
use trees::{tr, Node, Tree};
pub(crate) struct VoteSimulator {
pub validator_keypairs: HashMap<Pubkey, ValidatorVoteKeypairs>,
pub node_pubkeys: Vec<Pubkey>,
pub vote_pubkeys: Vec<Pubkey>,
pub bank_forks: RwLock<BankForks>,
pub progress: ProgressMap,
pub(crate) struct VoteSimulator<'a> {
searchable_nodes: HashMap<u64, &'a Node<u64>>,
}
impl VoteSimulator {
pub(crate) fn new(num_keypairs: usize) -> Self {
let (validator_keypairs, node_pubkeys, vote_pubkeys, bank_forks, progress) =
Self::init_state(num_keypairs);
Self {
validator_keypairs,
node_pubkeys,
vote_pubkeys,
bank_forks: RwLock::new(bank_forks),
progress,
}
}
pub(crate) fn fill_bank_forks(
&mut self,
forks: Tree<u64>,
cluster_votes: &HashMap<Pubkey, Vec<u64>>,
) {
let root = forks.root().data;
assert!(self.bank_forks.read().unwrap().get(root).is_some());
let mut walk = TreeWalk::from(forks);
loop {
if let Some(visit) = walk.get() {
let slot = visit.node().data;
self.progress
.entry(slot)
.or_insert_with(|| ForkProgress::new(Hash::default(), None, None, 0, 0));
if self.bank_forks.read().unwrap().get(slot).is_some() {
walk.forward();
continue;
}
let parent = walk.get_parent().unwrap().data;
let parent_bank = self.bank_forks.read().unwrap().get(parent).unwrap().clone();
let new_bank = Bank::new_from_parent(&parent_bank, &Pubkey::default(), slot);
for (pubkey, vote) in cluster_votes.iter() {
if vote.contains(&parent) {
let keypairs = self.validator_keypairs.get(pubkey).unwrap();
let last_blockhash = parent_bank.last_blockhash();
let vote_tx = vote_transaction::new_vote_transaction(
// Must vote > root to be processed
vec![parent],
parent_bank.hash(),
last_blockhash,
&keypairs.node_keypair,
&keypairs.vote_keypair,
&keypairs.vote_keypair,
);
info!("voting {} {}", parent_bank.slot(), parent_bank.hash());
new_bank.process_transaction(&vote_tx).unwrap();
}
}
new_bank.freeze();
self.bank_forks.write().unwrap().insert(new_bank);
walk.forward();
} else {
break;
}
}
impl<'a> VoteSimulator<'a> {
pub(crate) fn new(forks: &'a Tree<u64>) -> Self {
let mut searchable_nodes = HashMap::new();
let root = forks.root();
searchable_nodes.insert(root.data, root);
Self { searchable_nodes }
}
pub(crate) fn simulate_vote(
&mut self,
vote_slot: Slot,
my_pubkey: &Pubkey,
bank_forks: &RwLock<BankForks>,
cluster_votes: &mut HashMap<Pubkey, Vec<u64>>,
validator_keypairs: &HashMap<Pubkey, ValidatorVoteKeypairs>,
my_keypairs: &ValidatorVoteKeypairs,
progress: &mut HashMap<u64, ForkProgress>,
tower: &mut Tower,
) -> Vec<HeaviestForkFailures> {
// Try to simulate the vote
let my_keypairs = self.validator_keypairs.get(&my_pubkey).unwrap();
) -> Vec<VoteFailures> {
let node = self
.find_node_and_update_simulation(vote_slot)
.expect("Vote to simulate must be for a slot in the tree");
let mut missing_nodes = VecDeque::new();
let mut current = node;
loop {
let current_slot = current.data;
if bank_forks.read().unwrap().get(current_slot).is_some()
|| tower.root().map(|r| current_slot < r).unwrap_or(false)
{
break;
} else {
missing_nodes.push_front(current);
}
if let Some(parent) = current.parent() {
current = parent;
} else {
break;
}
}
// Create any missing banks along the path
for missing_node in missing_nodes {
let missing_slot = missing_node.data;
let parent = missing_node.parent().unwrap().data;
let parent_bank = bank_forks
.read()
.unwrap()
.get(parent)
.expect("parent bank must exist")
.clone();
info!("parent of {} is {}", missing_slot, parent_bank.slot(),);
progress
.entry(missing_slot)
.or_insert_with(|| ForkProgress::new(parent_bank.last_blockhash()));
// Create the missing bank
let new_bank =
Bank::new_from_parent(&parent_bank, &Pubkey::default(), missing_slot);
// Simulate ingesting the cluster's votes for the parent into this bank
for (pubkey, vote) in cluster_votes.iter() {
if vote.contains(&parent_bank.slot()) {
let keypairs = validator_keypairs.get(pubkey).unwrap();
let node_pubkey = keypairs.node_keypair.pubkey();
let vote_pubkey = keypairs.vote_keypair.pubkey();
let last_blockhash = parent_bank.last_blockhash();
let votes = Vote::new(vec![parent_bank.slot()], parent_bank.hash());
info!("voting {} {}", parent_bank.slot(), parent_bank.hash());
let vote_ix = vote_instruction::vote(&vote_pubkey, &vote_pubkey, votes);
let mut vote_tx =
Transaction::new_with_payer(vec![vote_ix], Some(&node_pubkey));
vote_tx.partial_sign(&[&keypairs.node_keypair], last_blockhash);
vote_tx.partial_sign(&[&keypairs.vote_keypair], last_blockhash);
new_bank.process_transaction(&vote_tx).unwrap();
}
}
new_bank.freeze();
bank_forks.write().unwrap().insert(new_bank);
}
// Now try to simulate the vote
let my_pubkey = my_keypairs.node_keypair.pubkey();
let my_vote_pubkey = my_keypairs.vote_keypair.pubkey();
let ancestors = self.bank_forks.read().unwrap().ancestors();
let mut frozen_banks: Vec<_> = self
.bank_forks
let ancestors = bank_forks.read().unwrap().ancestors();
let mut frozen_banks: Vec<_> = bank_forks
.read()
.unwrap()
.frozen_banks()
@@ -598,133 +594,99 @@ pub mod test {
&ancestors,
&mut frozen_banks,
tower,
&mut self.progress,
&VoteTracker::default(),
&ClusterSlots::default(),
&self.bank_forks,
&mut HashSet::new(),
progress,
);
let vote_bank = self
.bank_forks
let bank = bank_forks
.read()
.unwrap()
.get(vote_slot)
.expect("Bank must have been created before vote simulation")
.clone();
// Try to vote on the given slot
let descendants = self.bank_forks.read().unwrap().descendants();
let (_, _, failure_reasons) = ReplayStage::select_vote_and_reset_forks(
&Some(vote_bank.clone()),
&None,
&ancestors,
&descendants,
&self.progress,
&tower,
);
// Make sure this slot isn't locked out or failing threshold
info!("Checking vote: {}", vote_bank.slot());
if !failure_reasons.is_empty() {
return failure_reasons;
let fork_progress = progress
.get(&vote_slot)
.expect("Slot for vote must exist in progress map");
info!("Checking vote: {}", vote_slot);
info!("lockouts: {:?}", fork_progress.fork_stats.stake_lockouts);
let mut failures = vec![];
if fork_progress.fork_stats.is_locked_out {
failures.push(VoteFailures::LockedOut(vote_slot));
}
let vote = tower.new_vote_from_bank(&vote_bank, &my_vote_pubkey).0;
if !fork_progress.fork_stats.vote_threshold {
failures.push(VoteFailures::FailedThreshold(vote_slot));
}
if !failures.is_empty() {
return failures;
}
let vote = tower.new_vote_from_bank(&bank, &my_vote_pubkey).0;
if let Some(new_root) = tower.record_bank_vote(vote) {
ReplayStage::handle_new_root(
new_root,
&self.bank_forks,
&mut self.progress,
&None,
&mut HashSet::new(),
None,
);
ReplayStage::handle_new_root(new_root, bank_forks, progress, &None, None);
}
// Mark the vote for this bank under this node's pubkey so it will be
// integrated into any future child banks
cluster_votes.entry(my_pubkey).or_default().push(vote_slot);
vec![]
}
fn can_progress_on_fork(
&mut self,
my_pubkey: &Pubkey,
tower: &mut Tower,
start_slot: u64,
num_slots: u64,
cluster_votes: &mut HashMap<Pubkey, Vec<u64>>,
) -> bool {
// Check that within some reasonable time, validator can make a new
// root on this fork
let old_root = tower.root();
for i in 1..num_slots {
// The parent of the tip of the fork
let mut fork_tip_parent = tr(start_slot + i - 1);
// The tip of the fork
fork_tip_parent.push_front(tr(start_slot + i));
self.fill_bank_forks(fork_tip_parent, cluster_votes);
if self
.simulate_vote(i + start_slot, &my_pubkey, tower)
.is_empty()
{
cluster_votes
.entry(*my_pubkey)
.or_default()
.push(start_slot + i);
}
if old_root != tower.root() {
return true;
// Find a node representing the given slot
fn find_node_and_update_simulation(&mut self, slot: u64) -> Option<&'a Node<u64>> {
let mut successful_search_node: Option<&'a Node<u64>> = None;
let mut found_node = None;
for search_node in self.searchable_nodes.values() {
if let Some((target, new_searchable_nodes)) = Self::find_node(search_node, slot) {
successful_search_node = Some(search_node);
found_node = Some(target);
for node in new_searchable_nodes {
self.searchable_nodes.insert(node.data, node);
}
break;
}
}
false
successful_search_node.map(|node| {
self.searchable_nodes.remove(&node.data);
});
found_node
}
fn init_state(
num_keypairs: usize,
) -> (
HashMap<Pubkey, ValidatorVoteKeypairs>,
Vec<Pubkey>,
Vec<Pubkey>,
BankForks,
ProgressMap,
) {
let keypairs: HashMap<_, _> = std::iter::repeat_with(|| {
let node_keypair = Keypair::new();
let vote_keypair = Keypair::new();
let stake_keypair = Keypair::new();
let node_pubkey = node_keypair.pubkey();
(
node_pubkey,
ValidatorVoteKeypairs::new(node_keypair, vote_keypair, stake_keypair),
)
})
.take(num_keypairs)
.collect();
let node_pubkeys: Vec<_> = keypairs
.values()
.map(|keys| keys.node_keypair.pubkey())
.collect();
let vote_pubkeys: Vec<_> = keypairs
.values()
.map(|keys| keys.vote_keypair.pubkey())
.collect();
fn find_node(
node: &'a Node<u64>,
slot: u64,
) -> Option<(&'a Node<u64>, Vec<&'a Node<u64>>)> {
if node.data == slot {
Some((node, node.iter().collect()))
} else {
let mut search_result: Option<(&'a Node<u64>, Vec<&'a Node<u64>>)> = None;
for child in node.iter() {
if let Some((_, ref mut new_searchable_nodes)) = search_result {
new_searchable_nodes.push(child);
continue;
}
search_result = Self::find_node(child, slot);
}
let (bank_forks, progress) = initialize_state(&keypairs, 10_000);
(keypairs, node_pubkeys, vote_pubkeys, bank_forks, progress)
search_result
}
}
}
#[derive(PartialEq, Debug)]
pub(crate) enum VoteFailures {
LockedOut(u64),
FailedThreshold(u64),
}
// Setup BankForks with bank 0 and all the validator accounts
pub(crate) fn initialize_state(
validator_keypairs_map: &HashMap<Pubkey, ValidatorVoteKeypairs>,
stake: u64,
) -> (BankForks, ProgressMap) {
) -> (BankForks, HashMap<u64, ForkProgress>) {
let validator_keypairs: Vec<_> = validator_keypairs_map.values().collect();
let GenesisConfigInfo {
genesis_config,
mint_keypair,
voting_keypair: _,
} = create_genesis_config_with_vote_accounts(1_000_000_000, &validator_keypairs, stake);
} = create_genesis_config_with_vote_accounts(1_000_000_000, &validator_keypairs);
let bank0 = Bank::new(&genesis_config);
@@ -733,11 +695,8 @@ pub mod test {
}
bank0.freeze();
let mut progress = ProgressMap::default();
progress.insert(
0,
ForkProgress::new(bank0.last_blockhash(), None, None, 0, 0),
);
let mut progress = HashMap::new();
progress.insert(0, ForkProgress::new(bank0.last_blockhash()));
(BankForks::new(0, bank0), progress)
}
@@ -761,26 +720,84 @@ pub mod test {
stakes
}
fn can_progress_on_fork(
my_pubkey: &Pubkey,
tower: &mut Tower,
start_slot: u64,
num_slots: u64,
bank_forks: &RwLock<BankForks>,
cluster_votes: &mut HashMap<Pubkey, Vec<u64>>,
keypairs: &HashMap<Pubkey, ValidatorVoteKeypairs>,
progress: &mut HashMap<u64, ForkProgress>,
) -> bool {
// Check that within some reasonable time, validator can make a new
// root on this fork
let old_root = tower.root();
let mut main_fork = tr(start_slot);
let mut tip = main_fork.root_mut();
for i in 1..num_slots {
tip.push_front(tr(start_slot + i));
tip = tip.first_mut().unwrap();
}
let mut voting_simulator = VoteSimulator::new(&main_fork);
for i in 1..num_slots {
voting_simulator.simulate_vote(
i + start_slot,
&bank_forks,
cluster_votes,
&keypairs,
keypairs.get(&my_pubkey).unwrap(),
progress,
tower,
);
if old_root != tower.root() {
return true;
}
}
false
}
#[test]
fn test_simple_votes() {
// Init state
let mut vote_simulator = VoteSimulator::new(1);
let node_pubkey = vote_simulator.node_pubkeys[0];
let mut tower = Tower::new_with_key(&node_pubkey);
let node_keypair = Keypair::new();
let vote_keypair = Keypair::new();
let stake_keypair = Keypair::new();
let node_pubkey = node_keypair.pubkey();
let mut keypairs = HashMap::new();
keypairs.insert(
node_pubkey,
ValidatorVoteKeypairs::new(node_keypair, vote_keypair, stake_keypair),
);
// Initialize BankForks
let (bank_forks, mut progress) = initialize_state(&keypairs);
let bank_forks = RwLock::new(bank_forks);
// Create the tree of banks
let forks = tr(0) / (tr(1) / (tr(2) / (tr(3) / (tr(4) / tr(5)))));
// Set the voting behavior
let mut cluster_votes = HashMap::new();
let mut voting_simulator = VoteSimulator::new(&forks);
let votes = vec![0, 1, 2, 3, 4, 5];
cluster_votes.insert(node_pubkey, votes.clone());
vote_simulator.fill_bank_forks(forks, &cluster_votes);
// Simulate the votes
let mut tower = Tower::new_with_key(&node_pubkey);
let mut cluster_votes = HashMap::new();
for vote in votes {
assert!(vote_simulator
.simulate_vote(vote, &node_pubkey, &mut tower,)
assert!(voting_simulator
.simulate_vote(
vote,
&bank_forks,
&mut cluster_votes,
&keypairs,
keypairs.get(&node_pubkey).unwrap(),
&mut progress,
&mut tower,
)
.is_empty());
}
@@ -792,14 +809,21 @@ pub mod test {
#[test]
fn test_double_partition() {
// Init state
let mut vote_simulator = VoteSimulator::new(2);
let node_pubkey = vote_simulator.node_pubkeys[0];
let vote_pubkey = vote_simulator.vote_pubkeys[0];
let mut tower = Tower::new_with_key(&node_pubkey);
solana_logger::setup();
let node_keypair = Keypair::new();
let vote_keypair = Keypair::new();
let stake_keypair = Keypair::new();
let node_pubkey = node_keypair.pubkey();
let vote_pubkey = vote_keypair.pubkey();
let num_slots_to_try = 200;
// Create the tree of banks
let mut keypairs = HashMap::new();
info!("my_pubkey: {}", node_pubkey);
keypairs.insert(
node_pubkey,
ValidatorVoteKeypairs::new(node_keypair, vote_keypair, stake_keypair),
);
// Create the tree of banks in a BankForks object
let forks = tr(0)
/ (tr(1)
/ (tr(2)
@@ -816,37 +840,56 @@ pub mod test {
/ (tr(44)
// Minor fork 2
/ (tr(45) / (tr(46) / (tr(47) / (tr(48) / (tr(49) / (tr(50)))))))
/ (tr(110) / (tr(110 + 2 * num_slots_to_try))))))))))))));
/ (tr(110)))))))))))));
// Set the successful voting behavior
let mut cluster_votes = HashMap::new();
let mut my_votes: Vec<Slot> = vec![];
let next_unlocked_slot = 110;
// Set the voting behavior
let mut voting_simulator = VoteSimulator::new(&forks);
let mut votes: Vec<Slot> = vec![];
// Vote on the first minor fork
my_votes.extend((0..=14).into_iter());
votes.extend((0..=14).into_iter());
// Come back to the main fork
my_votes.extend((43..=44).into_iter());
votes.extend((43..=44).into_iter());
// Vote on the second minor fork
my_votes.extend((45..=50).into_iter());
// Vote to come back to main fork
my_votes.push(next_unlocked_slot);
cluster_votes.insert(node_pubkey, my_votes.clone());
// Make the other validator vote fork to pass the threshold checks
let other_votes = my_votes.clone();
cluster_votes.insert(vote_simulator.node_pubkeys[1], other_votes);
vote_simulator.fill_bank_forks(forks, &cluster_votes);
votes.extend((45..=50).into_iter());
// Simulate the votes.
for vote in &my_votes {
let mut cluster_votes: HashMap<Pubkey, Vec<Slot>> = HashMap::new();
let (bank_forks, mut progress) = initialize_state(&keypairs);
let bank_forks = RwLock::new(bank_forks);
// Simulate the votes. Should fail on trying to come back to the main fork
// at 106 exclusively due to threshold failure
let mut tower = Tower::new_with_key(&node_pubkey);
for vote in &votes {
// All these votes should be ok
assert!(vote_simulator
.simulate_vote(*vote, &node_pubkey, &mut tower,)
assert!(voting_simulator
.simulate_vote(
*vote,
&bank_forks,
&mut cluster_votes,
&keypairs,
keypairs.get(&node_pubkey).unwrap(),
&mut progress,
&mut tower,
)
.is_empty());
}
// Try to come back to main fork
let next_unlocked_slot = 110;
assert!(voting_simulator
.simulate_vote(
next_unlocked_slot,
&bank_forks,
&mut cluster_votes,
&keypairs,
keypairs.get(&node_pubkey).unwrap(),
&mut progress,
&mut tower,
)
.is_empty());
info!("local tower: {:#?}", tower.lockouts.votes);
let vote_accounts = vote_simulator
.bank_forks
let vote_accounts = bank_forks
.read()
.unwrap()
.get(next_unlocked_slot)
@@ -856,17 +899,15 @@ pub mod test {
let state = VoteState::from(&observed.1).unwrap();
info!("observed tower: {:#?}", state.votes);
let num_slots_to_try = 200;
cluster_votes
.get_mut(&vote_simulator.node_pubkeys[1])
.unwrap()
.extend(next_unlocked_slot + 1..next_unlocked_slot + num_slots_to_try);
assert!(vote_simulator.can_progress_on_fork(
assert!(can_progress_on_fork(
&node_pubkey,
&mut tower,
next_unlocked_slot,
num_slots_to_try,
200,
&bank_forks,
&mut cluster_votes,
&keypairs,
&mut progress
));
}

View File

@@ -292,7 +292,7 @@ impl CrdsGossipPull {
failed
}
// build a set of filters of the current crds table
// num_filters - used to increase the likelyhood of a value in crds being added to some filter
// num_filters - used to increase the likely hood of a value in crds being added to some filter
pub fn build_crds_filters(&self, crds: &Crds, bloom_size: usize) -> Vec<CrdsFilter> {
let num = cmp::max(
CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS,

View File

@@ -1,6 +1,4 @@
use crate::contact_info::ContactInfo;
use crate::deprecated;
use crate::epoch_slots::EpochSlots;
use bincode::{serialize, serialized_size};
use solana_sdk::sanitize::{Sanitize, SanitizeError};
use solana_sdk::timing::timestamp;
@@ -23,8 +21,8 @@ pub const MAX_SLOT: u64 = 1_000_000_000_000_000;
pub type VoteIndex = u8;
pub const MAX_VOTES: VoteIndex = 32;
pub type EpochSlotsIndex = u8;
pub const MAX_EPOCH_SLOTS: EpochSlotsIndex = 255;
pub type EpochSlotIndex = u8;
pub const MAX_EPOCH_SLOTS: EpochSlotIndex = 1;
/// CrdsValue that is replicated across the cluster
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
@@ -65,19 +63,49 @@ impl Signable for CrdsValue {
/// CrdsData that defines the different types of items CrdsValues can hold
/// * Merge Strategy - Latest wallclock is picked
/// * LowestSlot index is deprecated
#[allow(clippy::large_enum_variant)]
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub enum CrdsData {
ContactInfo(ContactInfo),
Vote(VoteIndex, Vote),
LowestSlot(u8, LowestSlot),
EpochSlots(EpochSlotIndex, EpochSlots),
SnapshotHashes(SnapshotHash),
AccountsHashes(SnapshotHash),
EpochSlots(EpochSlotsIndex, EpochSlots),
NewEpochSlotsPlaceholder, // Reserve this enum entry for the v1.1 version of EpochSlots
Version(Version),
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub enum CompressionType {
Uncompressed,
GZip,
BZip2,
}
impl Default for CompressionType {
fn default() -> Self {
Self::Uncompressed
}
}
#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)]
pub struct EpochIncompleteSlots {
pub first: Slot,
pub compression: CompressionType,
pub compressed_list: Vec<u8>,
}
impl Sanitize for EpochIncompleteSlots {
fn sanitize(&self) -> Result<(), SanitizeError> {
if self.first >= MAX_SLOT {
return Err(SanitizeError::InvalidValue);
}
//rest of the data doesn't matter since we no longer decompress
//these values
Ok(())
}
}
impl Sanitize for CrdsData {
fn sanitize(&self) -> Result<(), SanitizeError> {
match self {
@@ -88,12 +116,6 @@ impl Sanitize for CrdsData {
}
val.sanitize()
}
CrdsData::LowestSlot(ix, val) => {
if *ix as usize >= 1 {
return Err(SanitizeError::ValueOutOfBounds);
}
val.sanitize()
}
CrdsData::SnapshotHashes(val) => val.sanitize(),
CrdsData::AccountsHashes(val) => val.sanitize(),
CrdsData::EpochSlots(ix, val) => {
@@ -102,6 +124,7 @@ impl Sanitize for CrdsData {
}
val.sanitize()
}
CrdsData::NewEpochSlotsPlaceholder => Err(SanitizeError::InvalidValue), // Not supported on v1.0
CrdsData::Version(version) => version.sanitize(),
}
}
@@ -137,17 +160,18 @@ impl SnapshotHash {
}
}
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct LowestSlot {
pub struct EpochSlots {
pub from: Pubkey,
root: Slot, //deprecated
root: Slot,
pub lowest: Slot,
slots: BTreeSet<Slot>, //deprecated
stash: Vec<deprecated::EpochIncompleteSlots>, //deprecated
slots: BTreeSet<Slot>,
stash: Vec<EpochIncompleteSlots>,
pub wallclock: u64,
}
impl LowestSlot {
impl EpochSlots {
pub fn new(from: Pubkey, lowest: Slot, wallclock: u64) -> Self {
Self {
from,
@@ -160,7 +184,7 @@ impl LowestSlot {
}
}
impl Sanitize for LowestSlot {
impl Sanitize for EpochSlots {
fn sanitize(&self) -> Result<(), SanitizeError> {
if self.wallclock >= MAX_WALLCLOCK {
return Err(SanitizeError::ValueOutOfBounds);
@@ -168,15 +192,15 @@ impl Sanitize for LowestSlot {
if self.lowest >= MAX_SLOT {
return Err(SanitizeError::ValueOutOfBounds);
}
if self.root != 0 {
return Err(SanitizeError::InvalidValue);
if self.root >= MAX_SLOT {
return Err(SanitizeError::ValueOutOfBounds);
}
if !self.slots.is_empty() {
return Err(SanitizeError::InvalidValue);
}
if !self.stash.is_empty() {
return Err(SanitizeError::InvalidValue);
for slot in &self.slots {
if *slot >= MAX_SLOT {
return Err(SanitizeError::ValueOutOfBounds);
}
}
self.stash.sanitize()?;
self.from.sanitize()
}
}
@@ -241,10 +265,10 @@ impl Version {
pub enum CrdsValueLabel {
ContactInfo(Pubkey),
Vote(VoteIndex, Pubkey),
LowestSlot(Pubkey),
EpochSlots(Pubkey),
SnapshotHashes(Pubkey),
EpochSlots(EpochSlotsIndex, Pubkey),
AccountsHashes(Pubkey),
NewEpochSlotsPlaceholder,
Version(Pubkey),
}
@@ -253,10 +277,10 @@ impl fmt::Display for CrdsValueLabel {
match self {
CrdsValueLabel::ContactInfo(_) => write!(f, "ContactInfo({})", self.pubkey()),
CrdsValueLabel::Vote(ix, _) => write!(f, "Vote({}, {})", ix, self.pubkey()),
CrdsValueLabel::LowestSlot(_) => write!(f, "LowestSlot({})", self.pubkey()),
CrdsValueLabel::SnapshotHashes(_) => write!(f, "SnapshotHash({})", self.pubkey()),
CrdsValueLabel::EpochSlots(ix, _) => write!(f, "EpochSlots({}, {})", ix, self.pubkey()),
CrdsValueLabel::EpochSlots(_) => write!(f, "EpochSlots({})", self.pubkey()),
CrdsValueLabel::SnapshotHashes(_) => write!(f, "SnapshotHashes({})", self.pubkey()),
CrdsValueLabel::AccountsHashes(_) => write!(f, "AccountsHashes({})", self.pubkey()),
CrdsValueLabel::NewEpochSlotsPlaceholder => write!(f, "NewEpochSlotsPlaceholder"),
CrdsValueLabel::Version(_) => write!(f, "Version({})", self.pubkey()),
}
}
@@ -267,10 +291,10 @@ impl CrdsValueLabel {
match self {
CrdsValueLabel::ContactInfo(p) => *p,
CrdsValueLabel::Vote(_, p) => *p,
CrdsValueLabel::LowestSlot(p) => *p,
CrdsValueLabel::EpochSlots(p) => *p,
CrdsValueLabel::SnapshotHashes(p) => *p,
CrdsValueLabel::EpochSlots(_, p) => *p,
CrdsValueLabel::AccountsHashes(p) => *p,
CrdsValueLabel::NewEpochSlotsPlaceholder => Pubkey::default(),
CrdsValueLabel::Version(p) => *p,
}
}
@@ -296,10 +320,10 @@ impl CrdsValue {
match &self.data {
CrdsData::ContactInfo(contact_info) => contact_info.wallclock,
CrdsData::Vote(_, vote) => vote.wallclock,
CrdsData::LowestSlot(_, obj) => obj.wallclock,
CrdsData::EpochSlots(_, vote) => vote.wallclock,
CrdsData::SnapshotHashes(hash) => hash.wallclock,
CrdsData::AccountsHashes(hash) => hash.wallclock,
CrdsData::EpochSlots(_, p) => p.wallclock,
CrdsData::NewEpochSlotsPlaceholder => 0,
CrdsData::Version(version) => version.wallclock,
}
}
@@ -307,10 +331,10 @@ impl CrdsValue {
match &self.data {
CrdsData::ContactInfo(contact_info) => contact_info.id,
CrdsData::Vote(_, vote) => vote.from,
CrdsData::LowestSlot(_, slots) => slots.from,
CrdsData::EpochSlots(_, slots) => slots.from,
CrdsData::SnapshotHashes(hash) => hash.from,
CrdsData::AccountsHashes(hash) => hash.from,
CrdsData::EpochSlots(_, p) => p.from,
CrdsData::NewEpochSlotsPlaceholder => Pubkey::default(),
CrdsData::Version(version) => version.from,
}
}
@@ -318,10 +342,10 @@ impl CrdsValue {
match &self.data {
CrdsData::ContactInfo(_) => CrdsValueLabel::ContactInfo(self.pubkey()),
CrdsData::Vote(ix, _) => CrdsValueLabel::Vote(*ix, self.pubkey()),
CrdsData::LowestSlot(_, _) => CrdsValueLabel::LowestSlot(self.pubkey()),
CrdsData::EpochSlots(_, _) => CrdsValueLabel::EpochSlots(self.pubkey()),
CrdsData::SnapshotHashes(_) => CrdsValueLabel::SnapshotHashes(self.pubkey()),
CrdsData::AccountsHashes(_) => CrdsValueLabel::AccountsHashes(self.pubkey()),
CrdsData::EpochSlots(ix, _) => CrdsValueLabel::EpochSlots(*ix, self.pubkey()),
CrdsData::NewEpochSlotsPlaceholder => CrdsValueLabel::NewEpochSlotsPlaceholder,
CrdsData::Version(_) => CrdsValueLabel::Version(self.pubkey()),
}
}
@@ -345,9 +369,9 @@ impl CrdsValue {
}
}
pub fn lowest_slot(&self) -> Option<&LowestSlot> {
pub fn epoch_slots(&self) -> Option<&EpochSlots> {
match &self.data {
CrdsData::LowestSlot(_, slots) => Some(slots),
CrdsData::EpochSlots(_, slots) => Some(slots),
_ => None,
}
}
@@ -366,13 +390,6 @@ impl CrdsValue {
}
}
pub fn epoch_slots(&self) -> Option<&EpochSlots> {
match &self.data {
CrdsData::EpochSlots(_, slots) => Some(slots),
_ => None,
}
}
pub fn version(&self) -> Option<&Version> {
match &self.data {
CrdsData::Version(version) => Some(version),
@@ -384,13 +401,12 @@ impl CrdsValue {
pub fn record_labels(key: &Pubkey) -> Vec<CrdsValueLabel> {
let mut labels = vec![
CrdsValueLabel::ContactInfo(*key),
CrdsValueLabel::LowestSlot(*key),
CrdsValueLabel::EpochSlots(*key),
CrdsValueLabel::SnapshotHashes(*key),
CrdsValueLabel::AccountsHashes(*key),
CrdsValueLabel::Version(*key),
];
labels.extend((0..MAX_VOTES).map(|ix| CrdsValueLabel::Vote(ix, *key)));
labels.extend((0..MAX_EPOCH_SLOTS).map(|ix| CrdsValueLabel::EpochSlots(ix, *key)));
labels
}
@@ -438,19 +454,17 @@ mod test {
#[test]
fn test_labels() {
let mut hits = [false; 5 + MAX_VOTES as usize + MAX_EPOCH_SLOTS as usize];
let mut hits = [false; 5 + MAX_VOTES as usize];
// this method should cover all the possible labels
for v in &CrdsValue::record_labels(&Pubkey::default()) {
match v {
CrdsValueLabel::ContactInfo(_) => hits[0] = true,
CrdsValueLabel::LowestSlot(_) => hits[1] = true,
CrdsValueLabel::EpochSlots(_) => hits[1] = true,
CrdsValueLabel::SnapshotHashes(_) => hits[2] = true,
CrdsValueLabel::AccountsHashes(_) => hits[3] = true,
CrdsValueLabel::Version(_) => hits[4] = true,
CrdsValueLabel::Vote(ix, _) => hits[*ix as usize + 5] = true,
CrdsValueLabel::EpochSlots(ix, _) => {
hits[*ix as usize + MAX_VOTES as usize + 5] = true
}
CrdsValueLabel::NewEpochSlotsPlaceholder => unreachable!(),
}
}
assert!(hits.iter().all(|x| *x));
@@ -470,39 +484,13 @@ mod test {
let key = v.clone().vote().unwrap().from;
assert_eq!(v.label(), CrdsValueLabel::Vote(0, key));
let v = CrdsValue::new_unsigned(CrdsData::LowestSlot(
let v = CrdsValue::new_unsigned(CrdsData::EpochSlots(
0,
LowestSlot::new(Pubkey::default(), 0, 0),
EpochSlots::new(Pubkey::default(), 0, 0),
));
assert_eq!(v.wallclock(), 0);
let key = v.clone().lowest_slot().unwrap().from;
assert_eq!(v.label(), CrdsValueLabel::LowestSlot(key));
}
#[test]
fn test_lowest_slot_sanitize() {
let ls = LowestSlot::new(Pubkey::default(), 0, 0);
let v = CrdsValue::new_unsigned(CrdsData::LowestSlot(0, ls.clone()));
assert_eq!(v.sanitize(), Ok(()));
let mut o = ls.clone();
o.root = 1;
let v = CrdsValue::new_unsigned(CrdsData::LowestSlot(0, o.clone()));
assert_eq!(v.sanitize(), Err(SanitizeError::InvalidValue));
let o = ls.clone();
let v = CrdsValue::new_unsigned(CrdsData::LowestSlot(1, o.clone()));
assert_eq!(v.sanitize(), Err(SanitizeError::ValueOutOfBounds));
let mut o = ls.clone();
o.slots.insert(1);
let v = CrdsValue::new_unsigned(CrdsData::LowestSlot(0, o.clone()));
assert_eq!(v.sanitize(), Err(SanitizeError::InvalidValue));
let mut o = ls.clone();
o.stash.push(deprecated::EpochIncompleteSlots::default());
let v = CrdsValue::new_unsigned(CrdsData::LowestSlot(0, o.clone()));
assert_eq!(v.sanitize(), Err(SanitizeError::InvalidValue));
let key = v.clone().epoch_slots().unwrap().from;
assert_eq!(v.label(), CrdsValueLabel::EpochSlots(key));
}
#[test]
@@ -519,9 +507,9 @@ mod test {
Vote::new(&keypair.pubkey(), test_tx(), timestamp()),
));
verify_signatures(&mut v, &keypair, &wrong_keypair);
v = CrdsValue::new_unsigned(CrdsData::LowestSlot(
v = CrdsValue::new_unsigned(CrdsData::EpochSlots(
0,
LowestSlot::new(keypair.pubkey(), 0, timestamp()),
EpochSlots::new(keypair.pubkey(), 0, timestamp()),
));
verify_signatures(&mut v, &keypair, &wrong_keypair);
}
@@ -543,9 +531,9 @@ mod test {
fn test_max_epoch_slots_index() {
let keypair = Keypair::new();
let item = CrdsValue::new_signed(
CrdsData::EpochSlots(
MAX_EPOCH_SLOTS,
EpochSlots::new(keypair.pubkey(), timestamp()),
CrdsData::Vote(
MAX_VOTES,
Vote::new(&keypair.pubkey(), test_tx(), timestamp()),
),
&keypair,
);

View File

@@ -1,21 +0,0 @@
use solana_sdk::clock::Slot;
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
enum CompressionType {
Uncompressed,
GZip,
BZip2,
}
impl Default for CompressionType {
fn default() -> Self {
Self::Uncompressed
}
}
#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)]
pub(crate) struct EpochIncompleteSlots {
first: Slot,
compression: CompressionType,
compressed_list: Vec<u8>,
}

View File

@@ -1,517 +0,0 @@
use crate::cluster_info::MAX_CRDS_OBJECT_SIZE;
use crate::crds_value::MAX_SLOT;
use crate::crds_value::MAX_WALLCLOCK;
use bincode::serialized_size;
use bv::BitVec;
use flate2::{Compress, Compression, Decompress, FlushCompress, FlushDecompress};
use solana_sdk::clock::Slot;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::sanitize::{Sanitize, SanitizeError};
const MAX_SLOTS_PER_ENTRY: usize = 2048 * 8;
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct Uncompressed {
pub first_slot: Slot,
pub num: usize,
pub slots: BitVec<u8>,
}
impl Sanitize for Uncompressed {
fn sanitize(&self) -> std::result::Result<(), SanitizeError> {
if self.first_slot >= MAX_SLOT {
return Err(SanitizeError::ValueOutOfBounds);
}
if self.num >= MAX_SLOTS_PER_ENTRY {
return Err(SanitizeError::ValueOutOfBounds);
}
Ok(())
}
}
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq)]
pub struct Flate2 {
pub first_slot: Slot,
pub num: usize,
pub compressed: Vec<u8>,
}
impl Sanitize for Flate2 {
fn sanitize(&self) -> std::result::Result<(), SanitizeError> {
if self.first_slot >= MAX_SLOT {
return Err(SanitizeError::ValueOutOfBounds);
}
if self.num >= MAX_SLOTS_PER_ENTRY {
return Err(SanitizeError::ValueOutOfBounds);
}
Ok(())
}
}
#[derive(Debug, PartialEq)]
pub enum Error {
CompressError,
DecompressError,
}
pub type Result<T> = std::result::Result<T, Error>;
impl std::convert::From<flate2::CompressError> for Error {
fn from(_e: flate2::CompressError) -> Error {
Error::CompressError
}
}
impl std::convert::From<flate2::DecompressError> for Error {
fn from(_e: flate2::DecompressError) -> Error {
Error::DecompressError
}
}
impl Flate2 {
fn deflate(mut unc: Uncompressed) -> Result<Self> {
let mut compressed = Vec::with_capacity(unc.slots.block_capacity());
let mut compressor = Compress::new(Compression::best(), false);
let first_slot = unc.first_slot;
let num = unc.num;
unc.slots.shrink_to_fit();
let bits = unc.slots.into_boxed_slice();
compressor.compress_vec(&bits, &mut compressed, FlushCompress::Finish)?;
let rv = Self {
first_slot,
num,
compressed,
};
let _ = rv.inflate()?;
Ok(rv)
}
pub fn inflate(&self) -> Result<Uncompressed> {
//add some head room for the decompressor which might spill more bits
let mut uncompressed = Vec::with_capacity(32 + (self.num + 4) / 8);
let mut decompress = Decompress::new(false);
decompress.decompress_vec(&self.compressed, &mut uncompressed, FlushDecompress::Finish)?;
Ok(Uncompressed {
first_slot: self.first_slot,
num: self.num,
slots: BitVec::from_bits(&uncompressed),
})
}
}
impl Uncompressed {
pub fn new(max_size: usize) -> Self {
Self {
num: 0,
first_slot: 0,
slots: BitVec::new_fill(false, 8 * max_size as u64),
}
}
pub fn to_slots(&self, min_slot: Slot) -> Vec<Slot> {
let mut rv = vec![];
let start = if min_slot < self.first_slot {
0 as usize
} else {
(min_slot - self.first_slot) as usize
};
for i in start..self.num {
if i >= self.slots.len() as usize {
break;
}
if self.slots.get(i as u64) {
rv.push(self.first_slot + i as Slot);
}
}
rv
}
pub fn add(&mut self, slots: &[Slot]) -> usize {
for (i, s) in slots.iter().enumerate() {
if self.num == 0 {
self.first_slot = *s;
}
if self.num >= MAX_SLOTS_PER_ENTRY {
return i;
}
if *s < self.first_slot {
return i;
}
if *s - self.first_slot >= self.slots.capacity() {
return i;
}
self.slots.set(*s - self.first_slot, true);
self.num = std::cmp::max(self.num, 1 + (*s - self.first_slot) as usize);
}
slots.len()
}
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub enum CompressedSlots {
Flate2(Flate2),
Uncompressed(Uncompressed),
}
impl Sanitize for CompressedSlots {
fn sanitize(&self) -> std::result::Result<(), SanitizeError> {
match self {
CompressedSlots::Uncompressed(a) => a.sanitize(),
CompressedSlots::Flate2(b) => b.sanitize(),
}
}
}
impl Default for CompressedSlots {
fn default() -> Self {
CompressedSlots::new(0)
}
}
impl CompressedSlots {
fn new(max_size: usize) -> Self {
CompressedSlots::Uncompressed(Uncompressed::new(max_size))
}
pub fn first_slot(&self) -> Slot {
match self {
CompressedSlots::Uncompressed(a) => a.first_slot,
CompressedSlots::Flate2(b) => b.first_slot,
}
}
pub fn num_slots(&self) -> usize {
match self {
CompressedSlots::Uncompressed(a) => a.num,
CompressedSlots::Flate2(b) => b.num,
}
}
pub fn add(&mut self, slots: &[Slot]) -> usize {
match self {
CompressedSlots::Uncompressed(vals) => vals.add(slots),
CompressedSlots::Flate2(_) => 0,
}
}
pub fn to_slots(&self, min_slot: Slot) -> Result<Vec<Slot>> {
match self {
CompressedSlots::Uncompressed(vals) => Ok(vals.to_slots(min_slot)),
CompressedSlots::Flate2(vals) => {
let unc = vals.inflate()?;
Ok(unc.to_slots(min_slot))
}
}
}
pub fn deflate(&mut self) -> Result<()> {
match self {
CompressedSlots::Uncompressed(vals) => {
let unc = vals.clone();
let compressed = Flate2::deflate(unc)?;
let mut new = CompressedSlots::Flate2(compressed);
std::mem::swap(self, &mut new);
Ok(())
}
CompressedSlots::Flate2(_) => Ok(()),
}
}
}
#[derive(Serialize, Deserialize, Clone, Default, PartialEq)]
pub struct EpochSlots {
pub from: Pubkey,
pub slots: Vec<CompressedSlots>,
pub wallclock: u64,
}
impl Sanitize for EpochSlots {
fn sanitize(&self) -> std::result::Result<(), SanitizeError> {
if self.wallclock >= MAX_WALLCLOCK {
return Err(SanitizeError::ValueOutOfBounds);
}
self.from.sanitize()?;
self.slots.sanitize()
}
}
use std::fmt;
impl fmt::Debug for EpochSlots {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let num_slots: usize = self.slots.iter().map(|s| s.num_slots()).sum();
let lowest_slot = self
.slots
.iter()
.map(|s| s.first_slot())
.fold(0, std::cmp::min);
write!(
f,
"EpochSlots {{ from: {} num_slots: {} lowest_slot: {} wallclock: {} }}",
self.from, num_slots, lowest_slot, self.wallclock
)
}
}
impl EpochSlots {
pub fn new(from: Pubkey, now: u64) -> Self {
Self {
from,
wallclock: now,
slots: vec![],
}
}
pub fn fill(&mut self, slots: &[Slot], now: u64) -> usize {
let mut num = 0;
self.wallclock = std::cmp::max(now, self.wallclock + 1);
while num < slots.len() {
num += self.add(&slots[num..]);
if num < slots.len() {
if self.deflate().is_err() {
return num;
}
let space = self.max_compressed_slot_size();
if space > 0 {
let cslot = CompressedSlots::new(space as usize);
self.slots.push(cslot);
} else {
return num;
}
}
}
num
}
pub fn add(&mut self, slots: &[Slot]) -> usize {
let mut num = 0;
for s in &mut self.slots {
num += s.add(&slots[num..]);
if num >= slots.len() {
break;
}
}
num
}
pub fn deflate(&mut self) -> Result<()> {
for s in self.slots.iter_mut() {
s.deflate()?;
}
Ok(())
}
pub fn max_compressed_slot_size(&self) -> isize {
let len_header = serialized_size(self).unwrap();
let len_slot = serialized_size(&CompressedSlots::default()).unwrap();
MAX_CRDS_OBJECT_SIZE as isize - (len_header + len_slot) as isize
}
pub fn first_slot(&self) -> Option<Slot> {
self.slots.iter().map(|s| s.first_slot()).min()
}
pub fn to_slots(&self, min_slot: Slot) -> Vec<Slot> {
self.slots
.iter()
.filter(|s| min_slot < s.first_slot() + s.num_slots() as u64)
.filter_map(|s| s.to_slots(min_slot).ok())
.flatten()
.collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_epoch_slots_max_size() {
let epoch_slots = EpochSlots::default();
assert!(epoch_slots.max_compressed_slot_size() > 0);
}
#[test]
fn test_epoch_slots_uncompressed_add_1() {
let mut slots = Uncompressed::new(1);
assert_eq!(slots.slots.capacity(), 8);
assert_eq!(slots.add(&[1]), 1);
assert_eq!(slots.to_slots(1), vec![1]);
assert!(slots.to_slots(2).is_empty());
}
#[test]
fn test_epoch_slots_to_slots_overflow() {
let mut slots = Uncompressed::new(1);
slots.num = 100;
assert!(slots.to_slots(0).is_empty());
}
#[test]
fn test_epoch_slots_uncompressed_add_2() {
let mut slots = Uncompressed::new(1);
assert_eq!(slots.add(&[1, 2]), 2);
assert_eq!(slots.to_slots(1), vec![1, 2]);
}
#[test]
fn test_epoch_slots_uncompressed_add_3a() {
let mut slots = Uncompressed::new(1);
assert_eq!(slots.add(&[1, 3, 2]), 3);
assert_eq!(slots.to_slots(1), vec![1, 2, 3]);
}
#[test]
fn test_epoch_slots_uncompressed_add_3b() {
let mut slots = Uncompressed::new(1);
assert_eq!(slots.add(&[1, 10, 2]), 1);
assert_eq!(slots.to_slots(1), vec![1]);
}
#[test]
fn test_epoch_slots_uncompressed_add_3c() {
let mut slots = Uncompressed::new(2);
assert_eq!(slots.add(&[1, 10, 2]), 3);
assert_eq!(slots.to_slots(1), vec![1, 2, 10]);
assert_eq!(slots.to_slots(2), vec![2, 10]);
assert_eq!(slots.to_slots(3), vec![10]);
assert_eq!(slots.to_slots(11).is_empty(), true);
}
#[test]
fn test_epoch_slots_compressed() {
let mut slots = Uncompressed::new(100);
slots.add(&[1, 701, 2]);
assert_eq!(slots.num, 701);
let compressed = Flate2::deflate(slots).unwrap();
assert_eq!(compressed.first_slot, 1);
assert_eq!(compressed.num, 701);
assert!(compressed.compressed.len() < 32);
let slots = compressed.inflate().unwrap();
assert_eq!(slots.first_slot, 1);
assert_eq!(slots.num, 701);
assert_eq!(slots.to_slots(1), vec![1, 2, 701]);
}
#[test]
fn test_epoch_slots_sanitize() {
let mut slots = Uncompressed::new(100);
slots.add(&[1, 701, 2]);
assert_eq!(slots.num, 701);
assert!(slots.sanitize().is_ok());
let mut o = slots.clone();
o.first_slot = MAX_SLOT;
assert_eq!(o.sanitize(), Err(SanitizeError::ValueOutOfBounds));
let mut o = slots.clone();
o.num = MAX_SLOTS_PER_ENTRY;
assert_eq!(o.sanitize(), Err(SanitizeError::ValueOutOfBounds));
let compressed = Flate2::deflate(slots).unwrap();
assert!(compressed.sanitize().is_ok());
let mut o = compressed.clone();
o.first_slot = MAX_SLOT;
assert_eq!(o.sanitize(), Err(SanitizeError::ValueOutOfBounds));
let mut o = compressed.clone();
o.num = MAX_SLOTS_PER_ENTRY;
assert_eq!(o.sanitize(), Err(SanitizeError::ValueOutOfBounds));
let mut slots = EpochSlots::default();
let range: Vec<Slot> = (0..5000).into_iter().collect();
assert_eq!(slots.fill(&range, 1), 5000);
assert_eq!(slots.wallclock, 1);
assert!(slots.sanitize().is_ok());
let mut o = slots.clone();
o.wallclock = MAX_WALLCLOCK;
assert_eq!(o.sanitize(), Err(SanitizeError::ValueOutOfBounds));
}
#[test]
fn test_epoch_slots_fill_range() {
let range: Vec<Slot> = (0..5000).into_iter().collect();
let mut slots = EpochSlots::default();
assert_eq!(slots.fill(&range, 1), 5000);
assert_eq!(slots.wallclock, 1);
assert_eq!(slots.to_slots(0), range);
assert_eq!(slots.to_slots(4999), vec![4999]);
assert_eq!(slots.to_slots(5000).is_empty(), true);
}
#[test]
fn test_epoch_slots_fill_sparce_range() {
let range: Vec<Slot> = (0..5000).into_iter().map(|x| x * 3).collect();
let mut slots = EpochSlots::default();
assert_eq!(slots.fill(&range, 2), 5000);
assert_eq!(slots.wallclock, 2);
assert_eq!(slots.slots.len(), 3);
assert_eq!(slots.slots[0].first_slot(), 0);
assert_ne!(slots.slots[0].num_slots(), 0);
let next = slots.slots[0].num_slots() as u64 + slots.slots[0].first_slot();
assert!(slots.slots[1].first_slot() >= next);
assert_ne!(slots.slots[1].num_slots(), 0);
assert_ne!(slots.slots[2].num_slots(), 0);
assert_eq!(slots.to_slots(0), range);
assert_eq!(slots.to_slots(4999 * 3), vec![4999 * 3]);
}
#[test]
fn test_epoch_slots_fill_large_sparce_range() {
let range: Vec<Slot> = (0..5000).into_iter().map(|x| x * 7).collect();
let mut slots = EpochSlots::default();
assert_eq!(slots.fill(&range, 2), 5000);
assert_eq!(slots.to_slots(0), range);
}
#[test]
fn test_epoch_slots_fill_uncompressed_random_range() {
use rand::Rng;
for _ in 0..10 {
let mut range: Vec<Slot> = vec![];
for _ in 0..5000 {
let last = *range.last().unwrap_or(&0);
range.push(last + rand::thread_rng().gen_range(1, 5));
}
let sz = EpochSlots::default().max_compressed_slot_size();
let mut slots = Uncompressed::new(sz as usize);
let sz = slots.add(&range);
let slots = slots.to_slots(0);
assert_eq!(slots.len(), sz);
assert_eq!(slots[..], range[..sz]);
}
}
#[test]
fn test_epoch_slots_fill_compressed_random_range() {
use rand::Rng;
for _ in 0..10 {
let mut range: Vec<Slot> = vec![];
for _ in 0..5000 {
let last = *range.last().unwrap_or(&0);
range.push(last + rand::thread_rng().gen_range(1, 5));
}
let sz = EpochSlots::default().max_compressed_slot_size();
let mut slots = Uncompressed::new(sz as usize);
let sz = slots.add(&range);
let mut slots = CompressedSlots::Uncompressed(slots);
slots.deflate().unwrap();
let slots = slots.to_slots(0).unwrap();
assert_eq!(slots.len(), sz);
assert_eq!(slots[..], range[..sz]);
}
}
#[test]
fn test_epoch_slots_fill_random_range() {
use rand::Rng;
for _ in 0..10 {
let mut range: Vec<Slot> = vec![];
for _ in 0..5000 {
let last = *range.last().unwrap_or(&0);
range.push(last + rand::thread_rng().gen_range(1, 5));
}
let mut slots = EpochSlots::default();
let sz = slots.fill(&range, 1);
let last = range[sz - 1];
assert_eq!(
last,
slots.slots.last().unwrap().first_slot()
+ slots.slots.last().unwrap().num_slots() as u64
- 1
);
for s in &slots.slots {
assert!(s.to_slots(0).is_ok());
}
let slots = slots.to_slots(0);
assert_eq!(slots[..], range[..slots.len()]);
assert_eq!(sz, slots.len())
}
}
}

View File

@@ -1,14 +1,14 @@
//! The `fetch_stage` batches input from a UDP socket and sends it to a channel.
use crate::banking_stage::FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET;
use crate::packet::PacketsRecycler;
use crate::poh_recorder::PohRecorder;
use crate::result::{Error, Result};
use crate::streamer::{self, PacketReceiver, PacketSender};
use solana_measure::thread_mem_usage;
use solana_metrics::{inc_new_counter_debug, inc_new_counter_info};
use solana_perf::packet::PacketsRecycler;
use solana_perf::recycler::Recycler;
use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT;
use solana_streamer::streamer::{self, PacketReceiver, PacketSender};
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::{channel, RecvTimeoutError};

View File

@@ -0,0 +1 @@
pub use solana_ledger::genesis_utils::*;

View File

@@ -2,13 +2,13 @@
use crate::cluster_info::{ClusterInfo, VALIDATOR_PORT_RANGE};
use crate::contact_info::ContactInfo;
use crate::streamer;
use rand::{thread_rng, Rng};
use solana_client::thin_client::{create_client, ThinClient};
use solana_ledger::bank_forks::BankForks;
use solana_perf::recycler::Recycler;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, Signer};
use solana_streamer::streamer;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener, UdpSocket};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::channel;
@@ -22,7 +22,7 @@ pub struct GossipService {
impl GossipService {
pub fn new(
cluster_info: &Arc<ClusterInfo>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
bank_forks: Option<Arc<RwLock<BankForks>>>,
gossip_socket: UdpSocket,
exit: &Arc<AtomicBool>,
@@ -31,7 +31,7 @@ impl GossipService {
let gossip_socket = Arc::new(gossip_socket);
trace!(
"GossipService: id: {}, listening on: {:?}",
&cluster_info.id(),
&cluster_info.read().unwrap().my_data().id,
gossip_socket.local_addr().unwrap()
);
let t_receiver = streamer::receiver(
@@ -92,7 +92,7 @@ pub fn discover(
let (gossip_service, ip_echo, spy_ref) =
make_gossip_node(entrypoint, &exit, my_gossip_addr, my_shred_version);
let id = spy_ref.id();
let id = spy_ref.read().unwrap().keypair.pubkey();
info!("Entrypoint: {:?}", entrypoint);
info!("Node Id: {:?}", id);
if let Some(my_gossip_addr) = my_gossip_addr {
@@ -116,7 +116,7 @@ pub fn discover(
info!(
"discover success in {}s...\n{}",
secs,
spy_ref.contact_info_trace()
spy_ref.read().unwrap().contact_info_trace()
);
return Ok((tvu_peers, storage_peers));
}
@@ -124,12 +124,15 @@ pub fn discover(
if !tvu_peers.is_empty() {
info!(
"discover failed to match criteria by timeout...\n{}",
spy_ref.contact_info_trace()
spy_ref.read().unwrap().contact_info_trace()
);
return Ok((tvu_peers, storage_peers));
}
info!("discover failed...\n{}", spy_ref.contact_info_trace());
info!(
"discover failed...\n{}",
spy_ref.read().unwrap().contact_info_trace()
);
Err(std::io::Error::new(
std::io::ErrorKind::Other,
"Discover failed",
@@ -176,7 +179,7 @@ pub fn get_multi_client(nodes: &[ContactInfo]) -> (ThinClient, usize) {
}
fn spy(
spy_ref: Arc<ClusterInfo>,
spy_ref: Arc<RwLock<ClusterInfo>>,
num_nodes: Option<usize>,
timeout: Option<u64>,
find_node_by_pubkey: Option<Pubkey>,
@@ -194,8 +197,13 @@ fn spy(
}
}
tvu_peers = spy_ref.all_tvu_peers().into_iter().collect::<Vec<_>>();
storage_peers = spy_ref.all_storage_peers();
tvu_peers = spy_ref
.read()
.unwrap()
.all_tvu_peers()
.into_iter()
.collect::<Vec<_>>();
storage_peers = spy_ref.read().unwrap().all_storage_peers();
let mut nodes: Vec<_> = tvu_peers.iter().chain(storage_peers.iter()).collect();
nodes.sort();
@@ -227,7 +235,10 @@ fn spy(
met_criteria = true;
}
if i % 20 == 0 {
info!("discovering...\n{}", spy_ref.contact_info_trace());
info!(
"discovering...\n{}",
spy_ref.read().unwrap().contact_info_trace()
);
}
sleep(Duration::from_millis(
crate::cluster_info::GOSSIP_SLEEP_MILLIS,
@@ -249,18 +260,18 @@ fn make_gossip_node(
exit: &Arc<AtomicBool>,
gossip_addr: Option<&SocketAddr>,
shred_version: u16,
) -> (GossipService, Option<TcpListener>, Arc<ClusterInfo>) {
) -> (GossipService, Option<TcpListener>, Arc<RwLock<ClusterInfo>>) {
let keypair = Arc::new(Keypair::new());
let (node, gossip_socket, ip_echo) = if let Some(gossip_addr) = gossip_addr {
ClusterInfo::gossip_node(&keypair.pubkey(), gossip_addr, shred_version)
} else {
ClusterInfo::spy_node(&keypair.pubkey(), shred_version)
};
let cluster_info = ClusterInfo::new(node, keypair);
let mut cluster_info = ClusterInfo::new(node, keypair);
if let Some(entrypoint) = entrypoint {
cluster_info.set_entrypoint(ContactInfo::new_gossip_entry_point(entrypoint));
}
let cluster_info = Arc::new(cluster_info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let gossip_service = GossipService::new(&cluster_info.clone(), None, gossip_socket, &exit);
(gossip_service, ip_echo, cluster_info)
}
@@ -270,7 +281,7 @@ mod tests {
use super::*;
use crate::cluster_info::{ClusterInfo, Node};
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
use std::sync::{Arc, RwLock};
#[test]
#[ignore]
@@ -279,7 +290,7 @@ mod tests {
let exit = Arc::new(AtomicBool::new(false));
let tn = Node::new_localhost();
let cluster_info = ClusterInfo::new_with_invalid_keypair(tn.info.clone());
let c = Arc::new(cluster_info);
let c = Arc::new(RwLock::new(cluster_info));
let d = GossipService::new(&c, None, tn.sockets.gossip, &exit);
exit.store(true, Ordering::Relaxed);
d.join().unwrap();
@@ -293,16 +304,16 @@ mod tests {
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
let peer0_info = ContactInfo::new_localhost(&peer0, 0);
let peer1_info = ContactInfo::new_localhost(&peer1, 0);
let cluster_info = ClusterInfo::new(contact_info.clone(), Arc::new(keypair));
let mut cluster_info = ClusterInfo::new(contact_info.clone(), Arc::new(keypair));
cluster_info.insert_info(peer0_info.clone());
cluster_info.insert_info(peer1_info);
let spy_ref = Arc::new(cluster_info);
let spy_ref = Arc::new(RwLock::new(cluster_info));
let (met_criteria, secs, tvu_peers, _) = spy(spy_ref.clone(), None, Some(1), None, None);
assert_eq!(met_criteria, false);
assert_eq!(secs, 1);
assert_eq!(tvu_peers, spy_ref.tvu_peers());
assert_eq!(tvu_peers, spy_ref.read().unwrap().tvu_peers());
// Find num_nodes
let (met_criteria, _, _, _) = spy(spy_ref.clone(), Some(1), None, None, None);

View File

@@ -29,9 +29,8 @@ pub const DEFAULT_MIN_MAX_LEDGER_SHREDS: u64 = 50_000_000;
// and starve other blockstore users.
pub const DEFAULT_PURGE_SLOT_INTERVAL: u64 = 512;
// Remove a limited number of slots at a time, so the operation
// does not take too long and block other blockstore users.
pub const DEFAULT_PURGE_BATCH_SIZE: u64 = 256;
// Delay between purges to cooperate with other blockstore users
pub const DEFAULT_DELAY_BETWEEN_PURGES: Duration = Duration::from_millis(500);
pub struct LedgerCleanupService {
t_cleanup: JoinHandle<()>,
@@ -62,6 +61,7 @@ impl LedgerCleanupService {
max_ledger_slots,
&mut last_purge_slot,
DEFAULT_PURGE_SLOT_INTERVAL,
Some(DEFAULT_DELAY_BETWEEN_PURGES),
) {
match e {
RecvTimeoutError::Disconnected => break,
@@ -77,8 +77,8 @@ impl LedgerCleanupService {
blockstore: &Arc<Blockstore>,
root: Slot,
max_ledger_shreds: u64,
) -> (u64, Slot, Slot, u64) {
let mut shreds = Vec::new();
) -> (bool, Slot, Slot, u64) {
let mut total_slots = Vec::new();
let mut iterate_time = Measure::start("iterate_time");
let mut total_shreds = 0;
let mut first_slot = 0;
@@ -89,33 +89,43 @@ impl LedgerCleanupService {
}
// Not exact since non-full slots will have holes
total_shreds += meta.received;
shreds.push((slot, meta.received));
total_slots.push((slot, meta.received));
if slot > root {
break;
}
}
iterate_time.stop();
info!(
"checking for ledger purge: max_shreds: {} slots: {} total_shreds: {} {}",
max_ledger_shreds,
shreds.len(),
"first_slot={} total_slots={} total_shreds={} max_ledger_shreds={}, {}",
first_slot,
total_slots.len(),
total_shreds,
max_ledger_shreds,
iterate_time
);
if (total_shreds as u64) < max_ledger_shreds {
return (0, 0, 0, total_shreds);
return (false, 0, 0, total_shreds);
}
let mut cur_shreds = 0;
let mut lowest_slot_to_clean = shreds[0].0;
for (slot, num_shreds) in shreds.iter().rev() {
cur_shreds += *num_shreds as u64;
if cur_shreds > max_ledger_shreds {
lowest_slot_to_clean = *slot;
let mut num_shreds_to_clean = 0;
let mut lowest_cleanup_slot = total_slots[0].0;
for (slot, num_shreds) in total_slots.iter().rev() {
num_shreds_to_clean += *num_shreds as u64;
if num_shreds_to_clean > max_ledger_shreds {
lowest_cleanup_slot = *slot;
break;
}
}
(cur_shreds, lowest_slot_to_clean, first_slot, total_shreds)
(true, lowest_cleanup_slot, first_slot, total_shreds)
}
fn receive_new_roots(new_root_receiver: &Receiver<Slot>) -> Result<Slot, RecvTimeoutError> {
let mut root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
// Get the newest root
while let Ok(new_root) = new_root_receiver.try_recv() {
root = new_root;
}
Ok(root)
}
fn cleanup_ledger(
@@ -124,58 +134,63 @@ impl LedgerCleanupService {
max_ledger_shreds: u64,
last_purge_slot: &mut u64,
purge_interval: u64,
delay_between_purges: Option<Duration>,
) -> Result<(), RecvTimeoutError> {
let mut root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
// Get the newest root
while let Ok(new_root) = new_root_receiver.try_recv() {
root = new_root;
let root = Self::receive_new_roots(new_root_receiver)?;
if root - *last_purge_slot <= purge_interval {
return Ok(());
}
if root - *last_purge_slot > purge_interval {
let disk_utilization_pre = blockstore.storage_size();
let disk_utilization_pre = blockstore.storage_size();
info!(
"purge: last_root={}, last_purge_slot={}, purge_interval={}, disk_utilization={:?}",
root, last_purge_slot, purge_interval, disk_utilization_pre
);
*last_purge_slot = root;
let (slots_to_clean, lowest_cleanup_slot, first_slot, total_shreds) =
Self::find_slots_to_clean(&blockstore, root, max_ledger_shreds);
if slots_to_clean {
info!(
"purge: new root: {} last_purge: {} purge_interval: {} disk: {:?}",
root, last_purge_slot, purge_interval, disk_utilization_pre
"purging data from slots {} to {}",
first_slot, lowest_cleanup_slot
);
*last_purge_slot = root;
let (num_shreds_to_clean, lowest_slot_to_clean, mut first_slot, total_shreds) =
Self::find_slots_to_clean(blockstore, root, max_ledger_shreds);
if num_shreds_to_clean > 0 {
debug!(
"cleaning up to: {} shreds: {} first: {}",
lowest_slot_to_clean, num_shreds_to_clean, first_slot
);
loop {
let current_lowest =
std::cmp::min(lowest_slot_to_clean, first_slot + DEFAULT_PURGE_BATCH_SIZE);
let purge_complete = Arc::new(AtomicBool::new(false));
let blockstore = blockstore.clone();
let purge_complete1 = purge_complete.clone();
let _t_purge = Builder::new()
.name("solana-ledger-purge".to_string())
.spawn(move || {
let mut slot_update_time = Measure::start("slot_update");
*blockstore.lowest_cleanup_slot.write().unwrap() = current_lowest;
*blockstore.lowest_cleanup_slot.write().unwrap() = lowest_cleanup_slot;
slot_update_time.stop();
let mut clean_time = Measure::start("ledger_clean");
blockstore.purge_slots(first_slot, Some(current_lowest));
clean_time.stop();
debug!(
"ledger purge {} -> {}: {} {}",
first_slot, current_lowest, slot_update_time, clean_time
let mut purge_time = Measure::start("purge_slots_with_delay");
blockstore.purge_slots_with_delay(
first_slot,
lowest_cleanup_slot,
delay_between_purges,
);
first_slot += DEFAULT_PURGE_BATCH_SIZE;
if current_lowest == lowest_slot_to_clean {
break;
}
thread::sleep(Duration::from_millis(500));
purge_time.stop();
info!("{}", purge_time);
purge_complete1.store(true, Ordering::Relaxed);
})
.unwrap();
// Keep pulling roots off `new_root_receiver` while purging to avoid channel buildup
while !purge_complete.load(Ordering::Relaxed) {
if let Err(err) = Self::receive_new_roots(new_root_receiver) {
debug!("receive_new_roots: {}", err);
}
thread::sleep(Duration::from_secs(1));
}
let disk_utilization_post = blockstore.storage_size();
Self::report_disk_metrics(disk_utilization_pre, disk_utilization_post, total_shreds);
}
let disk_utilization_post = blockstore.storage_size();
Self::report_disk_metrics(disk_utilization_pre, disk_utilization_post, total_shreds);
Ok(())
}
@@ -219,8 +234,15 @@ mod tests {
//send a signal to kill all but 5 shreds, which will be in the newest slots
let mut last_purge_slot = 0;
sender.send(50).unwrap();
LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 5, &mut last_purge_slot, 10)
.unwrap();
LedgerCleanupService::cleanup_ledger(
&receiver,
&blockstore,
5,
&mut last_purge_slot,
10,
None,
)
.unwrap();
//check that 0-40 don't exist
blockstore
@@ -273,6 +295,7 @@ mod tests {
initial_slots,
&mut last_purge_slot,
10,
None,
)
.unwrap();
time.stop();
@@ -315,6 +338,7 @@ mod tests {
max_ledger_shreds,
&mut next_purge_batch,
10,
None,
)
.unwrap();

View File

@@ -5,18 +5,17 @@
//! command-line tools to spin up validators and a Rust library
//!
pub mod accounts_background_service;
pub mod accounts_hash_verifier;
pub mod banking_stage;
pub mod broadcast_stage;
pub mod cluster_info_vote_listener;
pub mod commitment;
mod deprecated;
pub mod shred_fetch_stage;
#[macro_use]
pub mod contact_info;
pub mod blockstream;
pub mod blockstream_service;
pub mod cluster_info;
pub mod cluster_slots;
pub mod consensus;
pub mod crds;
pub mod crds_gossip;
@@ -24,16 +23,17 @@ pub mod crds_gossip_error;
pub mod crds_gossip_pull;
pub mod crds_gossip_push;
pub mod crds_value;
pub mod epoch_slots;
pub mod fetch_stage;
pub mod gen_keys;
pub mod genesis_utils;
pub mod gossip_service;
pub mod ledger_cleanup_service;
pub mod local_vote_signer_service;
pub mod non_circulating_supply;
pub mod packet;
pub mod poh_recorder;
pub mod poh_service;
pub mod progress_map;
pub mod recvmmsg;
pub mod repair_response;
pub mod repair_service;
pub mod replay_stage;
@@ -46,6 +46,7 @@ pub mod rpc_pubsub;
pub mod rpc_pubsub_service;
pub mod rpc_service;
pub mod rpc_subscriptions;
pub mod sendmmsg;
pub mod serve_repair;
pub mod serve_repair_service;
pub mod sigverify;
@@ -53,17 +54,14 @@ pub mod sigverify_shreds;
pub mod sigverify_stage;
pub mod snapshot_packager_service;
pub mod storage_stage;
pub mod streamer;
pub mod tpu;
pub mod transaction_status_service;
pub mod tvu;
pub mod validator;
pub mod verified_vote_packets;
pub mod weighted_shuffle;
pub mod window_service;
#[macro_use]
extern crate solana_bpf_loader_program;
#[macro_use]
extern crate solana_budget_program;

View File

@@ -8,7 +8,7 @@ pub struct NonCirculatingSupply {
pub accounts: Vec<Pubkey>,
}
pub fn calculate_non_circulating_supply(bank: Arc<Bank>) -> NonCirculatingSupply {
pub fn calculate_non_circulating_supply(bank: &Arc<Bank>) -> NonCirculatingSupply {
debug!("Updating Bank supply, epoch: {}", bank.epoch());
let mut non_circulating_accounts_set: HashSet<Pubkey> = HashSet::new();
@@ -74,6 +74,7 @@ solana_sdk::pubkeys!(
"CHmdL15akDcJgBkY6BP3hzs98Dqr6wbdDC5p8odvtSbq",
"FR84wZQy3Y3j2gWz6pgETUiUoJtreMEuWfbg6573UCj9",
"5q54XjQ7vDx4y6KphPeE97LUNiYGtP55spjvXAWPGBuf",
"3o6xgkJ9sTmDeQWyfj3sxwon18fXJB9PV5LDc8sfgR4a",
]
);
@@ -149,7 +150,7 @@ mod tests {
(num_genesis_accounts + num_non_circulating_accounts + num_stake_accounts) * balance
);
let non_circulating_supply = calculate_non_circulating_supply(bank.clone());
let non_circulating_supply = calculate_non_circulating_supply(&bank);
assert_eq!(
non_circulating_supply.lamports,
(num_non_circulating_accounts + num_stake_accounts) * balance
@@ -164,7 +165,7 @@ mod tests {
for key in non_circulating_accounts {
bank.store_account(&key, &Account::new(new_balance, 0, &Pubkey::default()));
}
let non_circulating_supply = calculate_non_circulating_supply(bank.clone());
let non_circulating_supply = calculate_non_circulating_supply(&bank);
assert_eq!(
non_circulating_supply.lamports,
(num_non_circulating_accounts * new_balance) + (num_stake_accounts * balance)
@@ -179,7 +180,7 @@ mod tests {
bank = Arc::new(new_from_parent(&bank));
}
assert_eq!(bank.epoch(), 1);
let non_circulating_supply = calculate_non_circulating_supply(bank.clone());
let non_circulating_supply = calculate_non_circulating_supply(&bank);
assert_eq!(
non_circulating_supply.lamports,
num_non_circulating_accounts * new_balance

View File

@@ -72,7 +72,7 @@ mod tests {
#[test]
fn test_packets_set_addr() {
// test that the address is actually being updated
let send_addr: SocketAddr = "127.0.0.1:123".parse().unwrap();
let send_addr = socketaddr!([127, 0, 0, 1], 123);
let packets = vec![Packet::default()];
let mut msgs = Packets::new(packets);
msgs.set_addr(&send_addr);

View File

@@ -511,8 +511,8 @@ impl PohRecorder {
#[cfg(test)]
mod tests {
use super::*;
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use bincode::serialize;
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_ledger::{blockstore::Blockstore, blockstore_meta::SlotMeta, get_tmp_ledger_path};
use solana_perf::test_tx::test_tx;
use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT;

View File

@@ -118,8 +118,8 @@ impl PohService {
#[cfg(test)]
mod tests {
use super::*;
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use crate::poh_recorder::WorkingBank;
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use solana_perf::test_tx::test_tx;

View File

@@ -1,622 +0,0 @@
use crate::{
cluster_info_vote_listener::SlotVoteTracker, cluster_slots::SlotPubkeys,
consensus::StakeLockout, replay_stage::SUPERMINORITY_THRESHOLD,
};
use solana_ledger::{
bank_forks::BankForks,
blockstore_processor::{ConfirmationProgress, ConfirmationTiming},
};
use solana_runtime::bank::Bank;
use solana_sdk::{account::Account, clock::Slot, hash::Hash, pubkey::Pubkey};
use std::{
collections::{HashMap, HashSet},
rc::Rc,
sync::{Arc, RwLock},
};
#[derive(Default)]
pub(crate) struct ReplaySlotStats(ConfirmationTiming);
impl std::ops::Deref for ReplaySlotStats {
type Target = ConfirmationTiming;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl std::ops::DerefMut for ReplaySlotStats {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl ReplaySlotStats {
pub fn report_stats(&self, slot: Slot, num_entries: usize, num_shreds: u64) {
datapoint_info!(
"replay-slot-stats",
("slot", slot as i64, i64),
("fetch_entries_time", self.fetch_elapsed as i64, i64),
(
"fetch_entries_fail_time",
self.fetch_fail_elapsed as i64,
i64
),
("entry_verification_time", self.verify_elapsed as i64, i64),
("replay_time", self.replay_elapsed as i64, i64),
(
"replay_total_elapsed",
self.started.elapsed().as_micros() as i64,
i64
),
("total_entries", num_entries as i64, i64),
("total_shreds", num_shreds as i64, i64),
);
}
}
#[derive(Debug)]
pub(crate) struct ValidatorStakeInfo {
pub validator_vote_pubkey: Pubkey,
pub stake: u64,
pub total_epoch_stake: u64,
}
impl Default for ValidatorStakeInfo {
fn default() -> Self {
Self {
stake: 0,
validator_vote_pubkey: Pubkey::default(),
total_epoch_stake: 1,
}
}
}
impl ValidatorStakeInfo {
pub fn new(validator_vote_pubkey: Pubkey, stake: u64, total_epoch_stake: u64) -> Self {
Self {
validator_vote_pubkey,
stake,
total_epoch_stake,
}
}
}
pub(crate) struct ForkProgress {
pub(crate) is_dead: bool,
pub(crate) fork_stats: ForkStats,
pub(crate) propagated_stats: PropagatedStats,
pub(crate) replay_stats: ReplaySlotStats,
pub(crate) replay_progress: ConfirmationProgress,
// Note `num_blocks_on_fork` and `num_dropped_blocks_on_fork` only
// count new blocks replayed since last restart, which won't include
// blocks already existing in the ledger/before snapshot at start,
// so these stats do not span all of time
pub(crate) num_blocks_on_fork: u64,
pub(crate) num_dropped_blocks_on_fork: u64,
}
impl ForkProgress {
pub fn new(
last_entry: Hash,
prev_leader_slot: Option<Slot>,
validator_stake_info: Option<ValidatorStakeInfo>,
num_blocks_on_fork: u64,
num_dropped_blocks_on_fork: u64,
) -> Self {
let (
is_leader_slot,
propagated_validators_stake,
propagated_validators,
is_propagated,
total_epoch_stake,
) = validator_stake_info
.map(|info| {
(
true,
info.stake,
vec![Rc::new(info.validator_vote_pubkey)]
.into_iter()
.collect(),
{
if info.total_epoch_stake == 0 {
true
} else {
info.stake as f64 / info.total_epoch_stake as f64
> SUPERMINORITY_THRESHOLD
}
},
info.total_epoch_stake,
)
})
.unwrap_or((false, 0, HashSet::new(), false, 0));
Self {
is_dead: false,
fork_stats: ForkStats::default(),
replay_stats: ReplaySlotStats::default(),
replay_progress: ConfirmationProgress::new(last_entry),
num_blocks_on_fork,
num_dropped_blocks_on_fork,
propagated_stats: PropagatedStats {
prev_leader_slot,
is_leader_slot,
propagated_validators_stake,
propagated_validators,
is_propagated,
total_epoch_stake,
..PropagatedStats::default()
},
}
}
pub fn new_from_bank(
bank: &Bank,
my_pubkey: &Pubkey,
voting_pubkey: &Pubkey,
prev_leader_slot: Option<Slot>,
num_blocks_on_fork: u64,
num_dropped_blocks_on_fork: u64,
) -> Self {
let validator_fork_info = {
if bank.collector_id() == my_pubkey {
let stake = bank.epoch_vote_account_stake(voting_pubkey);
Some(ValidatorStakeInfo::new(
*voting_pubkey,
stake,
bank.total_epoch_stake(),
))
} else {
None
}
};
Self::new(
bank.last_blockhash(),
prev_leader_slot,
validator_fork_info,
num_blocks_on_fork,
num_dropped_blocks_on_fork,
)
}
}
#[derive(Debug, Clone, Default)]
pub(crate) struct ForkStats {
pub(crate) weight: u128,
pub(crate) fork_weight: u128,
pub(crate) total_staked: u64,
pub(crate) block_height: u64,
pub(crate) has_voted: bool,
pub(crate) is_recent: bool,
pub(crate) is_empty: bool,
pub(crate) vote_threshold: bool,
pub(crate) is_locked_out: bool,
pub(crate) stake_lockouts: HashMap<u64, StakeLockout>,
pub(crate) confirmation_reported: bool,
pub(crate) computed: bool,
}
#[derive(Clone, Default)]
pub(crate) struct PropagatedStats {
pub(crate) propagated_validators: HashSet<Rc<Pubkey>>,
pub(crate) propagated_node_ids: HashSet<Rc<Pubkey>>,
pub(crate) propagated_validators_stake: u64,
pub(crate) is_propagated: bool,
pub(crate) is_leader_slot: bool,
pub(crate) prev_leader_slot: Option<Slot>,
pub(crate) slot_vote_tracker: Option<Arc<RwLock<SlotVoteTracker>>>,
pub(crate) cluster_slot_pubkeys: Option<Arc<RwLock<SlotPubkeys>>>,
pub(crate) total_epoch_stake: u64,
}
impl PropagatedStats {
pub fn add_vote_pubkey(
&mut self,
vote_pubkey: &Pubkey,
all_pubkeys: &mut HashSet<Rc<Pubkey>>,
stake: u64,
) {
if !self.propagated_validators.contains(vote_pubkey) {
let mut cached_pubkey: Option<Rc<Pubkey>> = all_pubkeys.get(vote_pubkey).cloned();
if cached_pubkey.is_none() {
let new_pubkey = Rc::new(*vote_pubkey);
all_pubkeys.insert(new_pubkey.clone());
cached_pubkey = Some(new_pubkey);
}
let vote_pubkey = cached_pubkey.unwrap();
self.propagated_validators.insert(vote_pubkey);
self.propagated_validators_stake += stake;
}
}
pub fn add_node_pubkey(
&mut self,
node_pubkey: &Pubkey,
all_pubkeys: &mut HashSet<Rc<Pubkey>>,
bank: &Bank,
) {
if !self.propagated_node_ids.contains(node_pubkey) {
let node_vote_accounts = bank
.epoch_vote_accounts_for_node_id(&node_pubkey)
.map(|v| &v.vote_accounts);
if let Some(node_vote_accounts) = node_vote_accounts {
self.add_node_pubkey_internal(
node_pubkey,
all_pubkeys,
node_vote_accounts,
bank.epoch_vote_accounts(bank.epoch())
.expect("Epoch stakes for bank's own epoch must exist"),
);
}
}
}
fn add_node_pubkey_internal(
&mut self,
node_pubkey: &Pubkey,
all_pubkeys: &mut HashSet<Rc<Pubkey>>,
vote_account_pubkeys: &[Pubkey],
epoch_vote_accounts: &HashMap<Pubkey, (u64, Account)>,
) {
let mut cached_pubkey: Option<Rc<Pubkey>> = all_pubkeys.get(node_pubkey).cloned();
if cached_pubkey.is_none() {
let new_pubkey = Rc::new(*node_pubkey);
all_pubkeys.insert(new_pubkey.clone());
cached_pubkey = Some(new_pubkey);
}
let node_pubkey = cached_pubkey.unwrap();
self.propagated_node_ids.insert(node_pubkey);
for vote_account_pubkey in vote_account_pubkeys.iter() {
let stake = epoch_vote_accounts
.get(vote_account_pubkey)
.map(|(stake, _)| *stake)
.unwrap_or(0);
self.add_vote_pubkey(vote_account_pubkey, all_pubkeys, stake);
}
}
}
#[derive(Default)]
pub(crate) struct ProgressMap {
progress_map: HashMap<Slot, ForkProgress>,
}
impl std::ops::Deref for ProgressMap {
type Target = HashMap<Slot, ForkProgress>;
fn deref(&self) -> &Self::Target {
&self.progress_map
}
}
impl std::ops::DerefMut for ProgressMap {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.progress_map
}
}
impl ProgressMap {
pub fn insert(&mut self, slot: Slot, fork_progress: ForkProgress) {
self.progress_map.insert(slot, fork_progress);
}
pub fn get_propagated_stats(&self, slot: Slot) -> Option<&PropagatedStats> {
self.progress_map
.get(&slot)
.map(|fork_progress| &fork_progress.propagated_stats)
}
pub fn get_propagated_stats_mut(&mut self, slot: Slot) -> Option<&mut PropagatedStats> {
self.progress_map
.get_mut(&slot)
.map(|fork_progress| &mut fork_progress.propagated_stats)
}
pub fn get_fork_stats(&self, slot: Slot) -> Option<&ForkStats> {
self.progress_map
.get(&slot)
.map(|fork_progress| &fork_progress.fork_stats)
}
pub fn get_fork_stats_mut(&mut self, slot: Slot) -> Option<&mut ForkStats> {
self.progress_map
.get_mut(&slot)
.map(|fork_progress| &mut fork_progress.fork_stats)
}
pub fn is_propagated(&self, slot: Slot) -> bool {
let leader_slot_to_check = self.get_latest_leader_slot(slot);
// prev_leader_slot doesn't exist because already rooted
// or this validator hasn't been scheduled as a leader
// yet. In both cases the latest leader is vacuously
// confirmed
leader_slot_to_check
.map(|leader_slot_to_check| {
// If the leader's stats are None (isn't in the
// progress map), this means that prev_leader slot is
// rooted, so return true
self.get_propagated_stats(leader_slot_to_check)
.map(|stats| stats.is_propagated)
.unwrap_or(true)
})
.unwrap_or(true)
}
pub fn get_latest_leader_slot(&self, slot: Slot) -> Option<Slot> {
let propagated_stats = self
.get_propagated_stats(slot)
.expect("All frozen banks must exist in the Progress map");
if propagated_stats.is_leader_slot {
Some(slot)
} else {
propagated_stats.prev_leader_slot
}
}
pub fn get_bank_prev_leader_slot(&self, bank: &Bank) -> Option<Slot> {
let parent_slot = bank.parent_slot();
self.get_propagated_stats(parent_slot)
.map(|stats| {
if stats.is_leader_slot {
Some(parent_slot)
} else {
stats.prev_leader_slot
}
})
.unwrap_or(None)
}
pub fn handle_new_root(&mut self, bank_forks: &BankForks) {
self.progress_map
.retain(|k, _| bank_forks.get(*k).is_some());
}
pub fn log_propagated_stats(&self, slot: Slot, bank_forks: &RwLock<BankForks>) {
if let Some(stats) = self.get_propagated_stats(slot) {
info!(
"Propagated stats:
total staked: {},
observed staked: {},
vote pubkeys: {:?},
node_pubkeys: {:?},
slot: {},
epoch: {:?}",
stats.total_epoch_stake,
stats.propagated_validators_stake,
stats.propagated_validators,
stats.propagated_node_ids,
slot,
bank_forks.read().unwrap().get(slot).map(|x| x.epoch()),
);
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_add_vote_pubkey() {
let mut stats = PropagatedStats::default();
let mut all_pubkeys = HashSet::new();
let mut vote_pubkey = Pubkey::new_rand();
all_pubkeys.insert(Rc::new(vote_pubkey.clone()));
// Add a vote pubkey, the number of references in all_pubkeys
// should be 2
stats.add_vote_pubkey(&vote_pubkey, &mut all_pubkeys, 1);
assert!(stats.propagated_validators.contains(&vote_pubkey));
assert_eq!(stats.propagated_validators_stake, 1);
assert_eq!(Rc::strong_count(all_pubkeys.get(&vote_pubkey).unwrap()), 2);
// Adding it again should change no state since the key already existed
stats.add_vote_pubkey(&vote_pubkey, &mut all_pubkeys, 1);
assert!(stats.propagated_validators.contains(&vote_pubkey));
assert_eq!(stats.propagated_validators_stake, 1);
// Addding another pubkey should succeed
vote_pubkey = Pubkey::new_rand();
stats.add_vote_pubkey(&vote_pubkey, &mut all_pubkeys, 2);
assert!(stats.propagated_validators.contains(&vote_pubkey));
assert_eq!(stats.propagated_validators_stake, 3);
assert_eq!(Rc::strong_count(all_pubkeys.get(&vote_pubkey).unwrap()), 2);
}
#[test]
fn test_add_node_pubkey_internal() {
let num_vote_accounts = 10;
let staked_vote_accounts = 5;
let vote_account_pubkeys: Vec<_> = std::iter::repeat_with(|| Pubkey::new_rand())
.take(num_vote_accounts)
.collect();
let epoch_vote_accounts: HashMap<_, _> = vote_account_pubkeys
.iter()
.skip(num_vote_accounts - staked_vote_accounts)
.map(|pubkey| (*pubkey, (1, Account::default())))
.collect();
let mut stats = PropagatedStats::default();
let mut all_pubkeys = HashSet::new();
let mut node_pubkey = Pubkey::new_rand();
all_pubkeys.insert(Rc::new(node_pubkey.clone()));
// Add a vote pubkey, the number of references in all_pubkeys
// should be 2
stats.add_node_pubkey_internal(
&node_pubkey,
&mut all_pubkeys,
&vote_account_pubkeys,
&epoch_vote_accounts,
);
assert!(stats.propagated_node_ids.contains(&node_pubkey));
assert_eq!(
stats.propagated_validators_stake,
staked_vote_accounts as u64
);
assert_eq!(Rc::strong_count(all_pubkeys.get(&node_pubkey).unwrap()), 2);
// Adding it again should not change any state
stats.add_node_pubkey_internal(
&node_pubkey,
&mut all_pubkeys,
&vote_account_pubkeys,
&epoch_vote_accounts,
);
assert!(stats.propagated_node_ids.contains(&node_pubkey));
assert_eq!(
stats.propagated_validators_stake,
staked_vote_accounts as u64
);
// Addding another pubkey with same vote accounts should succeed, but stake
// shouldn't increase
node_pubkey = Pubkey::new_rand();
stats.add_node_pubkey_internal(
&node_pubkey,
&mut all_pubkeys,
&vote_account_pubkeys,
&epoch_vote_accounts,
);
assert!(stats.propagated_node_ids.contains(&node_pubkey));
assert_eq!(
stats.propagated_validators_stake,
staked_vote_accounts as u64
);
assert_eq!(Rc::strong_count(all_pubkeys.get(&node_pubkey).unwrap()), 2);
// Addding another pubkey with different vote accounts should succeed
// and increase stake
node_pubkey = Pubkey::new_rand();
let vote_account_pubkeys: Vec<_> = std::iter::repeat_with(|| Pubkey::new_rand())
.take(num_vote_accounts)
.collect();
let epoch_vote_accounts: HashMap<_, _> = vote_account_pubkeys
.iter()
.skip(num_vote_accounts - staked_vote_accounts)
.map(|pubkey| (*pubkey, (1, Account::default())))
.collect();
stats.add_node_pubkey_internal(
&node_pubkey,
&mut all_pubkeys,
&vote_account_pubkeys,
&epoch_vote_accounts,
);
assert!(stats.propagated_node_ids.contains(&node_pubkey));
assert_eq!(
stats.propagated_validators_stake,
2 * staked_vote_accounts as u64
);
assert_eq!(Rc::strong_count(all_pubkeys.get(&node_pubkey).unwrap()), 2);
}
#[test]
fn test_is_propagated_status_on_construction() {
// If the given ValidatorStakeInfo == None, then this is not
// a leader slot and is_propagated == false
let progress = ForkProgress::new(Hash::default(), Some(9), None, 0, 0);
assert!(!progress.propagated_stats.is_propagated);
// If the stake is zero, then threshold is always achieved
let progress = ForkProgress::new(
Hash::default(),
Some(9),
Some(ValidatorStakeInfo {
total_epoch_stake: 0,
..ValidatorStakeInfo::default()
}),
0,
0,
);
assert!(progress.propagated_stats.is_propagated);
// If the stake is non zero, then threshold is not achieved unless
// validator has enough stake by itself to pass threshold
let progress = ForkProgress::new(
Hash::default(),
Some(9),
Some(ValidatorStakeInfo {
total_epoch_stake: 2,
..ValidatorStakeInfo::default()
}),
0,
0,
);
assert!(!progress.propagated_stats.is_propagated);
// Give the validator enough stake by itself to pass threshold
let progress = ForkProgress::new(
Hash::default(),
Some(9),
Some(ValidatorStakeInfo {
stake: 1,
total_epoch_stake: 2,
..ValidatorStakeInfo::default()
}),
0,
0,
);
assert!(progress.propagated_stats.is_propagated);
// Check that the default ValidatorStakeInfo::default() constructs a ForkProgress
// with is_propagated == false, otherwise propagation tests will fail to run
// the proper checks (most will auto-pass without checking anything)
let progress = ForkProgress::new(
Hash::default(),
Some(9),
Some(ValidatorStakeInfo::default()),
0,
0,
);
assert!(!progress.propagated_stats.is_propagated);
}
#[test]
fn test_is_propagated() {
let mut progress_map = ProgressMap::default();
// Insert new ForkProgress for slot 10 (not a leader slot) and its
// previous leader slot 9 (leader slot)
progress_map.insert(10, ForkProgress::new(Hash::default(), Some(9), None, 0, 0));
progress_map.insert(
9,
ForkProgress::new(
Hash::default(),
None,
Some(ValidatorStakeInfo::default()),
0,
0,
),
);
// None of these slot have parents which are confirmed
assert!(!progress_map.is_propagated(9));
assert!(!progress_map.is_propagated(10));
// Insert new ForkProgress for slot 8 with no previous leader.
// The previous leader before 8, slot 7, does not exist in
// progress map, so is_propagated(8) should return true as
// this implies the parent is rooted
progress_map.insert(8, ForkProgress::new(Hash::default(), Some(7), None, 0, 0));
assert!(progress_map.is_propagated(8));
// If we set the is_propagated = true, is_propagated should return true
progress_map
.get_propagated_stats_mut(9)
.unwrap()
.is_propagated = true;
assert!(progress_map.is_propagated(9));
assert!(progress_map.get(&9).unwrap().propagated_stats.is_propagated);
// Because slot 9 is now confirmed, then slot 10 is also confirmed b/c 9
// is the last leader slot before 10
assert!(progress_map.is_propagated(10));
// If we make slot 10 a leader slot though, even though its previous
// leader slot 9 has been confirmed, slot 10 itself is not confirmed
progress_map
.get_propagated_stats_mut(10)
.unwrap()
.is_leader_slot = true;
assert!(!progress_map.is_propagated(10));
}
}

View File

@@ -2,7 +2,6 @@
//! regularly finds missing shreds in the ledger and sends repair requests for those shreds
use crate::{
cluster_info::ClusterInfo,
cluster_slots::ClusterSlots,
result::Result,
serve_repair::{RepairType, ServeRepair},
};
@@ -10,11 +9,13 @@ use solana_ledger::{
bank_forks::BankForks,
blockstore::{Blockstore, CompletedSlotsReceiver, SlotMeta},
};
use solana_sdk::clock::DEFAULT_SLOTS_PER_EPOCH;
use solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey};
use std::{
collections::HashMap,
collections::{BTreeSet, HashSet},
iter::Iterator,
net::UdpSocket,
ops::Bound::{Included, Unbounded},
sync::atomic::{AtomicBool, Ordering},
sync::{Arc, RwLock},
thread::sleep,
@@ -48,6 +49,9 @@ pub const MAX_REPAIR_LENGTH: usize = 512;
pub const REPAIR_MS: u64 = 100;
pub const MAX_ORPHANS: usize = 5;
const MAX_COMPLETED_SLOT_CACHE_LEN: usize = 256;
const COMPLETED_SLOT_CACHE_FLUSH_TRIGGER: usize = 512;
pub enum RepairStrategy {
RepairRange(RepairSlotRange),
RepairAll {
@@ -80,9 +84,8 @@ impl RepairService {
blockstore: Arc<Blockstore>,
exit: Arc<AtomicBool>,
repair_socket: Arc<UdpSocket>,
cluster_info: Arc<ClusterInfo>,
cluster_info: Arc<RwLock<ClusterInfo>>,
repair_strategy: RepairStrategy,
cluster_slots: Arc<ClusterSlots>,
) -> Self {
let t_repair = Builder::new()
.name("solana-repair-service".to_string())
@@ -91,9 +94,8 @@ impl RepairService {
&blockstore,
&exit,
&repair_socket,
cluster_info,
&cluster_info,
repair_strategy,
&cluster_slots,
)
})
.unwrap();
@@ -102,27 +104,33 @@ impl RepairService {
}
fn run(
blockstore: &Blockstore,
exit: &AtomicBool,
repair_socket: &UdpSocket,
cluster_info: Arc<ClusterInfo>,
blockstore: &Arc<Blockstore>,
exit: &Arc<AtomicBool>,
repair_socket: &Arc<UdpSocket>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
repair_strategy: RepairStrategy,
cluster_slots: &Arc<ClusterSlots>,
) {
let serve_repair = ServeRepair::new(cluster_info.clone());
let id = cluster_info.id();
if let RepairStrategy::RepairAll { .. } = repair_strategy {
Self::initialize_lowest_slot(id, blockstore, &cluster_info);
let mut epoch_slots: BTreeSet<Slot> = BTreeSet::new();
let mut old_incomplete_slots: BTreeSet<Slot> = BTreeSet::new();
let id = cluster_info.read().unwrap().id();
if let RepairStrategy::RepairAll {
ref epoch_schedule, ..
} = repair_strategy
{
let current_root = blockstore.last_root();
Self::initialize_epoch_slots(
id,
blockstore,
&mut epoch_slots,
&old_incomplete_slots,
current_root,
epoch_schedule,
cluster_info,
);
}
let mut repair_stats = RepairStats::default();
let mut last_stats = Instant::now();
if let RepairStrategy::RepairAll {
ref completed_slots_receiver,
..
} = repair_strategy
{
Self::initialize_epoch_slots(blockstore, &cluster_info, completed_slots_receiver);
}
loop {
if exit.load(Ordering::Relaxed) {
break;
@@ -141,28 +149,29 @@ impl RepairService {
RepairStrategy::RepairAll {
ref completed_slots_receiver,
ref bank_forks,
..
} => {
let new_root = blockstore.last_root();
let lowest_slot = blockstore.lowest_slot();
Self::update_lowest_slot(&id, lowest_slot, &cluster_info);
Self::update_completed_slots(completed_slots_receiver, &cluster_info);
cluster_slots.update(new_root, &cluster_info, bank_forks);
Self::update_epoch_slots(
id,
new_root,
lowest_slot,
&mut epoch_slots,
&mut old_incomplete_slots,
&cluster_info,
completed_slots_receiver,
);
Self::generate_repairs(blockstore, new_root, MAX_REPAIR_LENGTH)
}
}
};
if let Ok(repairs) = repairs {
let mut cache = HashMap::new();
repairs.into_iter().for_each(|repair_request| {
if let Ok((to, req)) = serve_repair.repair_request(
&cluster_slots,
repair_request,
&mut cache,
&mut repair_stats,
) {
if let Ok((to, req)) =
serve_repair.repair_request(&repair_request, &mut repair_stats)
{
repair_socket.send_to(&req, to).unwrap_or_else(|e| {
info!("{} repair req send_to({}) error {:?}", id, to, e);
0
@@ -299,57 +308,150 @@ impl RepairService {
}
}
fn initialize_lowest_slot(id: Pubkey, blockstore: &Blockstore, cluster_info: &ClusterInfo) {
fn get_completed_slots_past_root(
blockstore: &Blockstore,
slots_in_gossip: &mut BTreeSet<Slot>,
root: Slot,
epoch_schedule: &EpochSchedule,
) {
let last_confirmed_epoch = epoch_schedule.get_leader_schedule_epoch(root);
let last_epoch_slot = epoch_schedule.get_last_slot_in_epoch(last_confirmed_epoch);
let meta_iter = blockstore
.slot_meta_iterator(root + 1)
.expect("Couldn't get db iterator");
for (current_slot, meta) in meta_iter {
if current_slot > last_epoch_slot {
break;
}
if meta.is_full() {
slots_in_gossip.insert(current_slot);
}
}
}
fn initialize_epoch_slots(
id: Pubkey,
blockstore: &Blockstore,
slots_in_gossip: &mut BTreeSet<Slot>,
old_incomplete_slots: &BTreeSet<Slot>,
root: Slot,
epoch_schedule: &EpochSchedule,
cluster_info: &RwLock<ClusterInfo>,
) {
Self::get_completed_slots_past_root(blockstore, slots_in_gossip, root, epoch_schedule);
// Safe to set into gossip because by this time, the leader schedule cache should
// also be updated with the latest root (done in blockstore_processor) and thus
// will provide a schedule to window_service for any incoming shreds up to the
// last_confirmed_epoch.
cluster_info.push_lowest_slot(id, blockstore.lowest_slot());
cluster_info.write().unwrap().push_epoch_slots(
id,
root,
blockstore.lowest_slot(),
slots_in_gossip.clone(),
old_incomplete_slots,
);
}
fn update_completed_slots(
// Update the gossiped structure used for the "Repairmen" repair protocol. See docs
// for details.
fn update_epoch_slots(
id: Pubkey,
latest_known_root: Slot,
lowest_slot: Slot,
completed_slot_cache: &mut BTreeSet<Slot>,
incomplete_slot_stash: &mut BTreeSet<Slot>,
cluster_info: &RwLock<ClusterInfo>,
completed_slots_receiver: &CompletedSlotsReceiver,
cluster_info: &ClusterInfo,
) {
let mut slots: Vec<Slot> = vec![];
while let Ok(mut more) = completed_slots_receiver.try_recv() {
slots.append(&mut more);
let mut should_update = false;
while let Ok(completed_slots) = completed_slots_receiver.try_recv() {
for slot in completed_slots {
let last_slot_in_stash = *incomplete_slot_stash.iter().next_back().unwrap_or(&0);
let removed_from_stash = incomplete_slot_stash.remove(&slot);
// If the newly completed slot was not being tracked in stash, and is > last
// slot being tracked in stash, add it to cache. Also, update gossip
if !removed_from_stash && slot >= last_slot_in_stash {
should_update |= completed_slot_cache.insert(slot);
}
// If the slot was removed from stash, update gossip
should_update |= removed_from_stash;
}
}
slots.sort();
if !slots.is_empty() {
cluster_info.push_epoch_slots(&slots);
if should_update {
if completed_slot_cache.len() >= COMPLETED_SLOT_CACHE_FLUSH_TRIGGER {
Self::stash_old_incomplete_slots(completed_slot_cache, incomplete_slot_stash);
let lowest_completed_slot_in_cache =
*completed_slot_cache.iter().next().unwrap_or(&0);
Self::prune_incomplete_slot_stash(
incomplete_slot_stash,
lowest_completed_slot_in_cache,
);
}
cluster_info.write().unwrap().push_epoch_slots(
id,
latest_known_root,
lowest_slot,
completed_slot_cache.clone(),
incomplete_slot_stash,
);
}
}
fn update_lowest_slot(id: &Pubkey, lowest_slot: Slot, cluster_info: &ClusterInfo) {
cluster_info.push_lowest_slot(*id, lowest_slot);
fn stash_old_incomplete_slots(cache: &mut BTreeSet<Slot>, stash: &mut BTreeSet<Slot>) {
if cache.len() > MAX_COMPLETED_SLOT_CACHE_LEN {
let mut prev = *cache.iter().next().expect("Expected to find some slot");
cache.remove(&prev);
while cache.len() >= MAX_COMPLETED_SLOT_CACHE_LEN {
let next = *cache.iter().next().expect("Expected to find some slot");
cache.remove(&next);
// Prev slot and next slot are not included in incomplete slot list.
(prev + 1..next).for_each(|slot| {
stash.insert(slot);
});
prev = next;
}
}
}
fn initialize_epoch_slots(
blockstore: &Blockstore,
cluster_info: &ClusterInfo,
completed_slots_receiver: &CompletedSlotsReceiver,
fn prune_incomplete_slot_stash(
stash: &mut BTreeSet<Slot>,
lowest_completed_slot_in_cache: Slot,
) {
let root = blockstore.last_root();
let mut slots: Vec<_> = blockstore
if let Some(oldest_incomplete_slot) = stash.iter().next() {
// Prune old slots
// Prune in batches to reduce overhead. Pruning starts when oldest slot is 1.5 epochs
// earlier than the new root. But, we prune all the slots that are older than 1 epoch.
// So slots in a batch of half epoch are getting pruned
if oldest_incomplete_slot + DEFAULT_SLOTS_PER_EPOCH + DEFAULT_SLOTS_PER_EPOCH / 2
< lowest_completed_slot_in_cache
{
let oldest_slot_to_retain =
lowest_completed_slot_in_cache.saturating_sub(DEFAULT_SLOTS_PER_EPOCH);
*stash = stash
.range((Included(&oldest_slot_to_retain), Unbounded))
.cloned()
.collect();
}
}
}
#[allow(dead_code)]
fn find_incomplete_slots(blockstore: &Blockstore, root: Slot) -> HashSet<Slot> {
blockstore
.live_slots_iterator(root)
.filter_map(|(slot, slot_meta)| {
if slot_meta.is_full() {
if !slot_meta.is_full() {
Some(slot)
} else {
None
}
})
.collect();
while let Ok(mut more) = completed_slots_receiver.try_recv() {
slots.append(&mut more);
}
slots.sort();
slots.dedup();
if !slots.is_empty() {
cluster_info.push_epoch_slots(&slots);
}
.collect()
}
pub fn join(self) -> thread::Result<()> {
@@ -361,11 +463,15 @@ impl RepairService {
mod test {
use super::*;
use crate::cluster_info::Node;
use itertools::Itertools;
use rand::seq::SliceRandom;
use rand::{thread_rng, Rng};
use solana_ledger::blockstore::{
make_chaining_slot_entries, make_many_slot_entries, make_slot_entries,
};
use solana_ledger::shred::max_ticks_per_n_shreds;
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use std::thread::Builder;
#[test]
pub fn test_repair_orphan() {
@@ -581,15 +687,342 @@ mod test {
}
#[test]
pub fn test_update_lowest_slot() {
let node_info = Node::new_localhost_with_pubkey(&Pubkey::default());
let cluster_info = ClusterInfo::new_with_invalid_keypair(node_info.info.clone());
RepairService::update_lowest_slot(&Pubkey::default(), 5, &cluster_info);
let lowest = cluster_info
.get_lowest_slot_for_node(&Pubkey::default(), None, |lowest_slot, _| {
lowest_slot.clone()
})
.unwrap();
assert_eq!(lowest.lowest, 5);
pub fn test_get_completed_slots_past_root() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let num_entries_per_slot = 10;
let root = 10;
let fork1 = vec![5, 7, root, 15, 20, 21];
let fork1_shreds: Vec<_> = make_chaining_slot_entries(&fork1, num_entries_per_slot)
.into_iter()
.flat_map(|(shreds, _)| shreds)
.collect();
let fork2 = vec![8, 12];
let fork2_shreds = make_chaining_slot_entries(&fork2, num_entries_per_slot);
// Remove the last shred from each slot to make an incomplete slot
let fork2_incomplete_shreds: Vec<_> = fork2_shreds
.into_iter()
.flat_map(|(mut shreds, _)| {
shreds.pop();
shreds
})
.collect();
let mut full_slots = BTreeSet::new();
blockstore.insert_shreds(fork1_shreds, None, false).unwrap();
blockstore
.insert_shreds(fork2_incomplete_shreds, None, false)
.unwrap();
// Test that only slots > root from fork1 were included
let epoch_schedule = EpochSchedule::custom(32, 32, false);
RepairService::get_completed_slots_past_root(
&blockstore,
&mut full_slots,
root,
&epoch_schedule,
);
let mut expected: BTreeSet<_> = fork1.into_iter().filter(|x| *x > root).collect();
assert_eq!(full_slots, expected);
// Test that slots past the last confirmed epoch boundary don't get included
let last_epoch = epoch_schedule.get_leader_schedule_epoch(root);
let last_slot = epoch_schedule.get_last_slot_in_epoch(last_epoch);
let fork3 = vec![last_slot, last_slot + 1];
let fork3_shreds: Vec<_> = make_chaining_slot_entries(&fork3, num_entries_per_slot)
.into_iter()
.flat_map(|(shreds, _)| shreds)
.collect();
blockstore.insert_shreds(fork3_shreds, None, false).unwrap();
RepairService::get_completed_slots_past_root(
&blockstore,
&mut full_slots,
root,
&epoch_schedule,
);
expected.insert(last_slot);
assert_eq!(full_slots, expected);
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_update_epoch_slots() {
let blockstore_path = get_tmp_ledger_path!();
{
// Create blockstore
let (blockstore, _, completed_slots_receiver) =
Blockstore::open_with_signal(&blockstore_path).unwrap();
let blockstore = Arc::new(blockstore);
let mut root = 0;
let num_slots = 100;
let entries_per_slot = 5;
let blockstore_ = blockstore.clone();
// Spin up thread to write to blockstore
let writer = Builder::new()
.name("writer".to_string())
.spawn(move || {
let slots: Vec<_> = (1..num_slots + 1).collect();
let mut shreds: Vec<_> = make_chaining_slot_entries(&slots, entries_per_slot)
.into_iter()
.flat_map(|(shreds, _)| shreds)
.collect();
shreds.shuffle(&mut thread_rng());
let mut i = 0;
let max_step = entries_per_slot * 4;
let repair_interval_ms = 10;
let mut rng = rand::thread_rng();
let num_shreds = shreds.len();
while i < num_shreds {
let step = rng.gen_range(1, max_step + 1) as usize;
let step = std::cmp::min(step, num_shreds - i);
let shreds_to_insert = shreds.drain(..step).collect_vec();
blockstore_
.insert_shreds(shreds_to_insert, None, false)
.unwrap();
sleep(Duration::from_millis(repair_interval_ms));
i += step;
}
})
.unwrap();
let mut completed_slots = BTreeSet::new();
let node_info = Node::new_localhost_with_pubkey(&Pubkey::default());
let cluster_info = RwLock::new(ClusterInfo::new_with_invalid_keypair(
node_info.info.clone(),
));
let mut old_incomplete_slots: BTreeSet<Slot> = BTreeSet::new();
while completed_slots.len() < num_slots as usize {
RepairService::update_epoch_slots(
Pubkey::default(),
root,
blockstore.lowest_slot(),
&mut completed_slots,
&mut old_incomplete_slots,
&cluster_info,
&completed_slots_receiver,
);
}
let mut expected: BTreeSet<_> = (1..num_slots + 1).collect();
assert_eq!(completed_slots, expected);
// Update with new root, should filter out the slots <= root
root = num_slots / 2;
let (shreds, _) = make_slot_entries(num_slots + 2, num_slots + 1, entries_per_slot);
blockstore.insert_shreds(shreds, None, false).unwrap();
RepairService::update_epoch_slots(
Pubkey::default(),
root,
0,
&mut completed_slots,
&mut old_incomplete_slots,
&cluster_info,
&completed_slots_receiver,
);
expected.insert(num_slots + 2);
assert_eq!(completed_slots, expected);
writer.join().unwrap();
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_stash_old_incomplete_slots() {
let mut cache: BTreeSet<Slot> = BTreeSet::new();
let mut stash: BTreeSet<Slot> = BTreeSet::new();
// When cache is empty.
RepairService::stash_old_incomplete_slots(&mut cache, &mut stash);
assert_eq!(stash.len(), 0);
// Insert some slots in cache ( < MAX_COMPLETED_SLOT_CACHE_LEN + 1)
cache.insert(101);
cache.insert(102);
cache.insert(104);
cache.insert(105);
// Not enough slots in cache. So stash should remain empty.
RepairService::stash_old_incomplete_slots(&mut cache, &mut stash);
assert_eq!(stash.len(), 0);
assert_eq!(cache.len(), 4);
// Insert slots in cache ( = MAX_COMPLETED_SLOT_CACHE_LEN)
let mut cache: BTreeSet<Slot> = BTreeSet::new();
(0..MAX_COMPLETED_SLOT_CACHE_LEN as u64)
.into_iter()
.for_each(|slot| {
cache.insert(slot);
});
// Not enough slots in cache. So stash should remain empty.
RepairService::stash_old_incomplete_slots(&mut cache, &mut stash);
assert_eq!(stash.len(), 0);
assert_eq!(cache.len(), MAX_COMPLETED_SLOT_CACHE_LEN);
// Insert 1 more to cross the threshold
cache.insert(MAX_COMPLETED_SLOT_CACHE_LEN as u64);
RepairService::stash_old_incomplete_slots(&mut cache, &mut stash);
// Stash is still empty, as no missing slots
assert_eq!(stash.len(), 0);
// It removed some entries from cache
assert_eq!(cache.len(), MAX_COMPLETED_SLOT_CACHE_LEN - 1);
// Insert more slots to create a missing slot
let mut cache: BTreeSet<Slot> = BTreeSet::new();
cache.insert(0);
(2..=MAX_COMPLETED_SLOT_CACHE_LEN as u64 + 2)
.into_iter()
.for_each(|slot| {
cache.insert(slot);
});
RepairService::stash_old_incomplete_slots(&mut cache, &mut stash);
// Stash is not empty
assert!(stash.contains(&1));
// It removed some entries from cache
assert_eq!(cache.len(), MAX_COMPLETED_SLOT_CACHE_LEN - 1);
// Test multiple missing slots at dispersed locations
let mut cache: BTreeSet<Slot> = BTreeSet::new();
(0..MAX_COMPLETED_SLOT_CACHE_LEN as u64 * 2)
.into_iter()
.for_each(|slot| {
cache.insert(slot);
});
cache.remove(&10);
cache.remove(&11);
cache.remove(&28);
cache.remove(&29);
cache.remove(&148);
cache.remove(&149);
cache.remove(&150);
cache.remove(&151);
RepairService::stash_old_incomplete_slots(&mut cache, &mut stash);
// Stash is not empty
assert!(stash.contains(&10));
assert!(stash.contains(&11));
assert!(stash.contains(&28));
assert!(stash.contains(&29));
assert!(stash.contains(&148));
assert!(stash.contains(&149));
assert!(stash.contains(&150));
assert!(stash.contains(&151));
assert!(!stash.contains(&147));
assert!(!stash.contains(&152));
// It removed some entries from cache
assert_eq!(cache.len(), MAX_COMPLETED_SLOT_CACHE_LEN - 1);
(MAX_COMPLETED_SLOT_CACHE_LEN + 1..MAX_COMPLETED_SLOT_CACHE_LEN * 2)
.into_iter()
.for_each(|slot| {
let slot: u64 = slot as u64;
assert!(cache.contains(&slot));
});
}
#[test]
fn test_prune_incomplete_slot_stash() {
// Prune empty stash
let mut stash: BTreeSet<Slot> = BTreeSet::new();
RepairService::prune_incomplete_slot_stash(&mut stash, 0);
assert!(stash.is_empty());
// Prune stash with slots < DEFAULT_SLOTS_PER_EPOCH
stash.insert(0);
stash.insert(10);
stash.insert(11);
stash.insert(50);
assert_eq!(stash.len(), 4);
RepairService::prune_incomplete_slot_stash(&mut stash, 100);
assert_eq!(stash.len(), 4);
// Prune stash with slots > DEFAULT_SLOTS_PER_EPOCH, but < 1.5 * DEFAULT_SLOTS_PER_EPOCH
stash.insert(DEFAULT_SLOTS_PER_EPOCH + 50);
assert_eq!(stash.len(), 5);
RepairService::prune_incomplete_slot_stash(&mut stash, DEFAULT_SLOTS_PER_EPOCH + 100);
assert_eq!(stash.len(), 5);
// Prune stash with slots > 1.5 * DEFAULT_SLOTS_PER_EPOCH
stash.insert(DEFAULT_SLOTS_PER_EPOCH + DEFAULT_SLOTS_PER_EPOCH / 2);
assert_eq!(stash.len(), 6);
RepairService::prune_incomplete_slot_stash(
&mut stash,
DEFAULT_SLOTS_PER_EPOCH + DEFAULT_SLOTS_PER_EPOCH / 2 + 1,
);
assert_eq!(stash.len(), 2);
}
#[test]
fn test_find_incomplete_slots() {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let num_entries_per_slot = 100;
let (mut shreds, _) = make_slot_entries(0, 0, num_entries_per_slot);
assert!(shreds.len() > 1);
let (shreds4, _) = make_slot_entries(4, 0, num_entries_per_slot);
shreds.extend(shreds4);
blockstore.insert_shreds(shreds, None, false).unwrap();
// Nothing is incomplete
assert!(RepairService::find_incomplete_slots(&blockstore, 0).is_empty());
// Insert a slot 5 that chains to an incomplete orphan slot 3
let (shreds5, _) = make_slot_entries(5, 3, num_entries_per_slot);
blockstore.insert_shreds(shreds5, None, false).unwrap();
assert_eq!(
RepairService::find_incomplete_slots(&blockstore, 0),
vec![3].into_iter().collect()
);
// Insert another incomplete orphan slot 2 that is the parent of slot 3.
// Both should be incomplete
let (shreds3, _) = make_slot_entries(3, 2, num_entries_per_slot);
blockstore
.insert_shreds(shreds3[1..].to_vec(), None, false)
.unwrap();
assert_eq!(
RepairService::find_incomplete_slots(&blockstore, 0),
vec![2, 3].into_iter().collect()
);
// Insert a incomplete slot 6 that chains to the root 0,
// should also be incomplete
let (shreds6, _) = make_slot_entries(6, 0, num_entries_per_slot);
blockstore
.insert_shreds(shreds6[1..].to_vec(), None, false)
.unwrap();
assert_eq!(
RepairService::find_incomplete_slots(&blockstore, 0),
vec![2, 3, 6].into_iter().collect()
);
// Complete slot 3, should no longer be marked incomplete
blockstore
.insert_shreds(shreds3[..].to_vec(), None, false)
.unwrap();
assert_eq!(
RepairService::find_incomplete_slots(&blockstore, 0),
vec![2, 6].into_iter().collect()
);
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -2,9 +2,10 @@
use crate::{
cluster_info::{compute_retransmit_peers, ClusterInfo, DATA_PLANE_FANOUT},
cluster_slots::ClusterSlots,
packet::Packets,
repair_service::RepairStrategy,
result::{Error, Result},
streamer::PacketReceiver,
window_service::{should_retransmit_and_persist, WindowService},
};
use crossbeam_channel::Receiver as CrossbeamReceiver;
@@ -16,16 +17,11 @@ use solana_ledger::{
};
use solana_measure::measure::Measure;
use solana_metrics::inc_new_counter_error;
use solana_perf::packet::Packets;
use solana_sdk::clock::Slot;
use solana_sdk::epoch_schedule::EpochSchedule;
use solana_sdk::timing::timestamp;
use solana_streamer::streamer::PacketReceiver;
use std::{
cmp,
collections::{BTreeMap, HashMap},
net::UdpSocket,
sync::atomic::{AtomicBool, AtomicU64, Ordering},
sync::atomic::{AtomicBool, Ordering},
sync::mpsc::channel,
sync::mpsc::RecvTimeoutError,
sync::Mutex,
@@ -38,123 +34,13 @@ use std::{
// it doesn't pull up too much work.
const MAX_PACKET_BATCH_SIZE: usize = 100;
#[derive(Default)]
struct RetransmitStats {
total_packets: AtomicU64,
total_batches: AtomicU64,
total_time: AtomicU64,
repair_total: AtomicU64,
discard_total: AtomicU64,
retransmit_total: AtomicU64,
last_ts: AtomicU64,
compute_turbine_peers_total: AtomicU64,
packets_by_slot: Mutex<BTreeMap<Slot, usize>>,
packets_by_source: Mutex<BTreeMap<String, usize>>,
}
#[allow(clippy::too_many_arguments)]
fn update_retransmit_stats(
stats: &Arc<RetransmitStats>,
total_time: u64,
total_packets: usize,
retransmit_total: u64,
discard_total: u64,
repair_total: u64,
compute_turbine_peers_total: u64,
peers_len: usize,
packets_by_slot: HashMap<Slot, usize>,
packets_by_source: HashMap<String, usize>,
) {
stats.total_time.fetch_add(total_time, Ordering::Relaxed);
stats
.total_packets
.fetch_add(total_packets as u64, Ordering::Relaxed);
stats
.retransmit_total
.fetch_add(retransmit_total, Ordering::Relaxed);
stats
.repair_total
.fetch_add(repair_total, Ordering::Relaxed);
stats
.discard_total
.fetch_add(discard_total, Ordering::Relaxed);
stats
.compute_turbine_peers_total
.fetch_add(compute_turbine_peers_total, Ordering::Relaxed);
stats.total_batches.fetch_add(1, Ordering::Relaxed);
{
let mut stats_packets_by_slot = stats.packets_by_slot.lock().unwrap();
for (slot, count) in packets_by_slot {
*stats_packets_by_slot.entry(slot).or_insert(0) += count;
}
}
{
let mut stats_packets_by_source = stats.packets_by_source.lock().unwrap();
for (source, count) in packets_by_source {
*stats_packets_by_source.entry(source).or_insert(0) += count;
}
}
let now = timestamp();
let last = stats.last_ts.load(Ordering::Relaxed);
if now - last > 2000 && stats.last_ts.compare_and_swap(last, now, Ordering::Relaxed) == last {
datapoint_info!("retransmit-num_nodes", ("count", peers_len, i64));
datapoint_info!(
"retransmit-stage",
(
"total_time",
stats.total_time.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"total_batches",
stats.total_batches.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"total_packets",
stats.total_packets.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"retransmit_total",
stats.retransmit_total.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"compute_turbine",
stats.compute_turbine_peers_total.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"repair_total",
stats.repair_total.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"discard_total",
stats.discard_total.swap(0, Ordering::Relaxed) as i64,
i64
),
);
let mut packets_by_slot = stats.packets_by_slot.lock().unwrap();
info!("retransmit: packets_by_slot: {:#?}", packets_by_slot);
packets_by_slot.clear();
drop(packets_by_slot);
let mut packets_by_source = stats.packets_by_source.lock().unwrap();
info!("retransmit: packets_by_source: {:#?}", packets_by_source);
packets_by_source.clear();
}
}
fn retransmit(
bank_forks: &Arc<RwLock<BankForks>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
cluster_info: &ClusterInfo,
cluster_info: &Arc<RwLock<ClusterInfo>>,
r: &Arc<Mutex<PacketReceiver>>,
sock: &UdpSocket,
id: u32,
stats: &Arc<RetransmitStats>,
) -> Result<()> {
let timer = Duration::new(1, 0);
let r_lock = r.lock().unwrap();
@@ -176,14 +62,15 @@ fn retransmit(
let mut peers_len = 0;
let stakes = staking_utils::staked_nodes_at_epoch(&r_bank, bank_epoch);
let stakes = stakes.map(Arc::new);
let (peers, stakes_and_index) = cluster_info.sorted_retransmit_peers_and_stakes(stakes);
let my_id = cluster_info.id();
let (peers, stakes_and_index) = cluster_info
.read()
.unwrap()
.sorted_retransmit_peers_and_stakes(stakes);
let me = cluster_info.read().unwrap().my_data();
let mut discard_total = 0;
let mut repair_total = 0;
let mut retransmit_total = 0;
let mut compute_turbine_peers_total = 0;
let mut packets_by_slot: HashMap<Slot, usize> = HashMap::new();
let mut packets_by_source: HashMap<String, usize> = HashMap::new();
for mut packets in packet_v {
for packet in packets.packets.iter_mut() {
// skip discarded packets and repair packets
@@ -200,7 +87,7 @@ fn retransmit(
let mut compute_turbine_peers = Measure::start("turbine_start");
let (my_index, mut shuffled_stakes_and_index) = ClusterInfo::shuffle_peers_and_index(
&my_id,
&me.id,
&peers,
&stakes_and_index,
packet.meta.seed,
@@ -218,12 +105,7 @@ fn retransmit(
let neighbors: Vec<_> = neighbors.into_iter().map(|index| &peers[index]).collect();
let children: Vec<_> = children.into_iter().map(|index| &peers[index]).collect();
compute_turbine_peers.stop();
compute_turbine_peers_total += compute_turbine_peers.as_us();
*packets_by_slot.entry(packet.meta.slot).or_insert(0) += 1;
*packets_by_source
.entry(packet.meta.addr().to_string())
.or_insert(0) += 1;
compute_turbine_peers_total += compute_turbine_peers.as_ms();
let leader =
leader_schedule_cache.slot_leader_at(packet.meta.slot, Some(r_bank.as_ref()));
@@ -235,7 +117,7 @@ fn retransmit(
ClusterInfo::retransmit_to(&children, packet, leader, sock, true)?;
}
retransmit_time.stop();
retransmit_total += retransmit_time.as_us();
retransmit_total += retransmit_time.as_ms();
}
}
timer_start.stop();
@@ -246,19 +128,16 @@ fn retransmit(
retransmit_total,
id,
);
update_retransmit_stats(
stats,
timer_start.as_us(),
total_packets,
retransmit_total,
discard_total,
repair_total,
compute_turbine_peers_total,
peers_len,
packets_by_slot,
packets_by_source,
datapoint_debug!("cluster_info-num_nodes", ("count", peers_len, i64));
datapoint_debug!(
"retransmit-stage",
("total_time", timer_start.as_ms() as i64, i64),
("total_packets", total_packets as i64, i64),
("retransmit_total", retransmit_total as i64, i64),
("compute_turbine", compute_turbine_peers_total as i64, i64),
("repair_total", i64::from(repair_total), i64),
("discard_total", i64::from(discard_total), i64),
);
Ok(())
}
@@ -274,10 +153,9 @@ pub fn retransmitter(
sockets: Arc<Vec<UdpSocket>>,
bank_forks: Arc<RwLock<BankForks>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
cluster_info: Arc<ClusterInfo>,
cluster_info: Arc<RwLock<ClusterInfo>>,
r: Arc<Mutex<PacketReceiver>>,
) -> Vec<JoinHandle<()>> {
let stats = Arc::new(RetransmitStats::default());
(0..sockets.len())
.map(|s| {
let sockets = sockets.clone();
@@ -285,7 +163,6 @@ pub fn retransmitter(
let leader_schedule_cache = leader_schedule_cache.clone();
let r = r.clone();
let cluster_info = cluster_info.clone();
let stats = stats.clone();
Builder::new()
.name("solana-retransmitter".to_string())
@@ -299,7 +176,6 @@ pub fn retransmitter(
&r,
&sockets[s],
s as u32,
&stats,
) {
match e {
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
@@ -329,7 +205,7 @@ impl RetransmitStage {
bank_forks: Arc<RwLock<BankForks>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
blockstore: Arc<Blockstore>,
cluster_info: &Arc<ClusterInfo>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
retransmit_sockets: Arc<Vec<UdpSocket>>,
repair_socket: Arc<UdpSocket>,
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
@@ -338,7 +214,6 @@ impl RetransmitStage {
epoch_schedule: EpochSchedule,
cfg: Option<Arc<AtomicBool>>,
shred_version: u16,
cluster_slots: Arc<ClusterSlots>,
) -> Self {
let (retransmit_sender, retransmit_receiver) = channel();
@@ -381,7 +256,6 @@ impl RetransmitStage {
);
rv && is_connected
},
cluster_slots,
);
let thread_hdls = t_retransmit;
@@ -404,11 +278,11 @@ impl RetransmitStage {
mod tests {
use super::*;
use crate::contact_info::ContactInfo;
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use crate::packet::{self, Meta, Packet, Packets};
use solana_ledger::blockstore_processor::{process_blockstore, ProcessOptions};
use solana_ledger::create_new_tmp_ledger;
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_net_utils::find_available_port_in_range;
use solana_perf::packet::{Meta, Packet, Packets};
use solana_sdk::pubkey::Pubkey;
use std::net::{IpAddr, Ipv4Addr};
@@ -439,11 +313,11 @@ mod tests {
.unwrap();
let other = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
let cluster_info = ClusterInfo::new_with_invalid_keypair(other);
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(other);
cluster_info.insert_info(me);
let retransmit_socket = Arc::new(vec![UdpSocket::bind("0.0.0.0:0").unwrap()]);
let cluster_info = Arc::new(cluster_info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let (retransmit_sender, retransmit_receiver) = channel();
let t_retransmit = retransmitter(
@@ -459,7 +333,7 @@ mod tests {
// it should send this over the sockets.
retransmit_sender.send(packets).unwrap();
let mut packets = Packets::new(vec![]);
solana_streamer::packet::recv_from(&mut packets, &me_retransmit, 1).unwrap();
packet::recv_from(&mut packets, &me_retransmit, 1).unwrap();
assert_eq!(packets.packets.len(), 1);
assert_eq!(packets.packets[0].meta.repair, false);
@@ -475,7 +349,7 @@ mod tests {
let packets = Packets::new(vec![repair, Packet::default()]);
retransmit_sender.send(packets).unwrap();
let mut packets = Packets::new(vec![]);
solana_streamer::packet::recv_from(&mut packets, &me_retransmit, 1).unwrap();
packet::recv_from(&mut packets, &me_retransmit, 1).unwrap();
assert_eq!(packets.packets.len(), 1);
assert_eq!(packets.packets[0].meta.repair, false);
}

View File

@@ -5,6 +5,7 @@ use crate::{
commitment::{BlockCommitmentArray, BlockCommitmentCache},
contact_info::ContactInfo,
non_circulating_supply::calculate_non_circulating_supply,
packet::PACKET_DATA_SIZE,
rpc_error::RpcCustomError,
storage_stage::StorageState,
validator::ValidatorExit,
@@ -12,19 +13,11 @@ use crate::{
use bincode::serialize;
use jsonrpc_core::{Error, Metadata, Result};
use jsonrpc_derive::rpc;
use solana_client::{
rpc_config::*,
rpc_request::{
MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE,
MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS, NUM_LARGEST_ACCOUNTS,
},
rpc_response::*,
};
use solana_client::{rpc_config::*, rpc_response::*};
use solana_faucet::faucet::request_airdrop_transaction;
use solana_ledger::{
bank_forks::BankForks, blockstore::Blockstore, blockstore_db::BlockstoreError,
};
use solana_perf::packet::PACKET_DATA_SIZE;
use solana_runtime::{accounts::AccountAddressFilter, bank::Bank};
use solana_sdk::{
clock::{Slot, UnixTimestamp},
@@ -51,6 +44,10 @@ use std::{
time::{Duration, Instant},
};
const MAX_QUERY_ITEMS: usize = 256;
const MAX_SLOT_RANGE: u64 = 10_000;
const NUM_LARGEST_ACCOUNTS: usize = 20;
type RpcResponse<T> = Result<Response<T>>;
fn new_response<T>(bank: &Bank, value: T) -> RpcResponse<T> {
@@ -81,42 +78,28 @@ impl JsonRpcRequestProcessor {
fn bank(&self, commitment: Option<CommitmentConfig>) -> Result<Arc<Bank>> {
debug!("RPC commitment_config: {:?}", commitment);
let r_bank_forks = self.bank_forks.read().unwrap();
match commitment {
Some(commitment_config) if commitment_config.commitment == CommitmentLevel::Recent => {
let bank = r_bank_forks.working_bank();
debug!("RPC using working_bank: {:?}", bank.slot());
Ok(bank)
}
Some(commitment_config) if commitment_config.commitment == CommitmentLevel::Root => {
let slot = r_bank_forks.root();
debug!("RPC using node root: {:?}", slot);
Ok(r_bank_forks.get(slot).cloned().unwrap())
}
Some(commitment_config) if commitment_config.commitment == CommitmentLevel::Single => {
let slot = self
.block_commitment_cache
.read()
.unwrap()
.highest_confirmed_slot();
debug!("RPC using confirmed slot: {:?}", slot);
Ok(r_bank_forks.get(slot).cloned().unwrap())
}
_ => {
let cluster_root = self
.block_commitment_cache
.read()
.unwrap()
.largest_confirmed_root();
debug!("RPC using block: {:?}", cluster_root);
r_bank_forks.get(cluster_root).cloned().ok_or_else(|| {
RpcCustomError::NonexistentClusterRoot {
cluster_root,
node_root: r_bank_forks.root(),
}
.into()
})
}
if commitment.is_some() && commitment.unwrap().commitment == CommitmentLevel::Recent {
let bank = r_bank_forks.working_bank();
debug!("RPC using working_bank: {:?}", bank.slot());
Ok(bank)
} else if commitment.is_some() && commitment.unwrap().commitment == CommitmentLevel::Root {
let slot = r_bank_forks.root();
debug!("RPC using node root: {:?}", slot);
Ok(r_bank_forks.get(slot).cloned().unwrap())
} else {
let cluster_root = self
.block_commitment_cache
.read()
.unwrap()
.largest_confirmed_root();
debug!("RPC using block: {:?}", cluster_root);
r_bank_forks.get(cluster_root).cloned().ok_or_else(|| {
RpcCustomError::NonexistentClusterRoot {
cluster_root,
node_root: r_bank_forks.root(),
}
.into()
})
}
}
@@ -294,7 +277,7 @@ impl JsonRpcRequestProcessor {
let config = config.unwrap_or_default();
let bank = self.bank(config.commitment)?;
let (addresses, address_filter) = if let Some(filter) = config.filter {
let non_circulating_supply = calculate_non_circulating_supply(bank.clone());
let non_circulating_supply = calculate_non_circulating_supply(&bank);
let addresses = non_circulating_supply.accounts.into_iter().collect();
let address_filter = match filter {
RpcLargestAccountsFilter::Circulating => AccountAddressFilter::Exclude,
@@ -318,7 +301,7 @@ impl JsonRpcRequestProcessor {
fn get_supply(&self, commitment: Option<CommitmentConfig>) -> RpcResponse<RpcSupply> {
let bank = self.bank(commitment)?;
let non_circulating_supply = calculate_non_circulating_supply(bank.clone());
let non_circulating_supply = calculate_non_circulating_supply(&bank);
let total_supply = bank.capitalization();
new_response(
&bank,
@@ -551,10 +534,7 @@ impl JsonRpcRequestProcessor {
signature: Signature,
commitment: Option<CommitmentConfig>,
) -> Option<transaction::Result<()>> {
self.bank(commitment)
.ok()?
.get_signature_status_slot(&signature)
.map(|(_, status)| status)
self.bank(commitment).ok()?.get_signature_status(&signature)
}
pub fn get_signature_statuses(
@@ -691,23 +671,27 @@ impl JsonRpcRequestProcessor {
}
}
fn get_tpu_addr(cluster_info: &ClusterInfo) -> Result<SocketAddr> {
let contact_info = cluster_info.my_contact_info();
fn get_tpu_addr(cluster_info: &Arc<RwLock<ClusterInfo>>) -> Result<SocketAddr> {
let contact_info = cluster_info.read().unwrap().my_data();
Ok(contact_info.tpu)
}
fn verify_pubkey(input: String) -> Result<Pubkey> {
input.parse().map_err(|_e| Error::invalid_request())
input
.parse()
.map_err(|e| Error::invalid_params(format!("{:?}", e)))
}
fn verify_signature(input: &str) -> Result<Signature> {
input.parse().map_err(|_e| Error::invalid_request())
input
.parse()
.map_err(|e| Error::invalid_params(format!("{:?}", e)))
}
#[derive(Clone)]
pub struct Meta {
pub request_processor: Arc<RwLock<JsonRpcRequestProcessor>>,
pub cluster_info: Arc<ClusterInfo>,
pub cluster_info: Arc<RwLock<ClusterInfo>>,
pub genesis_hash: Hash,
}
impl Metadata for Meta {}
@@ -882,6 +866,14 @@ pub trait RpcSol {
#[rpc(meta, name = "sendTransaction")]
fn send_transaction(&self, meta: Self::Metadata, data: String) -> Result<String>;
#[rpc(meta, name = "simulateTransaction")]
fn simulate_transaction(
&self,
meta: Self::Metadata,
data: String,
config: Option<RpcSimulateTransactionConfig>,
) -> RpcResponse<TransactionStatus>;
#[rpc(meta, name = "getSlotLeader")]
fn get_slot_leader(
&self,
@@ -1064,7 +1056,7 @@ impl RpcSol for RpcSolImpl {
}
fn get_cluster_nodes(&self, meta: Self::Metadata) -> Result<Vec<RpcContactInfo>> {
let cluster_info = &meta.cluster_info;
let cluster_info = meta.cluster_info.read().unwrap();
fn valid_address_or_none(addr: &SocketAddr) -> Option<SocketAddr> {
if ContactInfo::is_valid_address(addr) {
Some(*addr)
@@ -1072,12 +1064,12 @@ impl RpcSol for RpcSolImpl {
None
}
}
let my_shred_version = cluster_info.my_shred_version();
let shred_version = cluster_info.my_data().shred_version;
Ok(cluster_info
.all_peers()
.iter()
.filter_map(|(contact_info, _)| {
if my_shred_version == contact_info.shred_version
if shred_version == contact_info.shred_version
&& ContactInfo::is_valid_address(&contact_info.gossip)
{
Some(RpcContactInfo {
@@ -1230,10 +1222,10 @@ impl RpcSol for RpcSolImpl {
signature_strs: Vec<String>,
config: Option<RpcSignatureStatusConfig>,
) -> RpcResponse<Vec<Option<TransactionStatus>>> {
if signature_strs.len() > MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS {
if signature_strs.len() > MAX_QUERY_ITEMS {
return Err(Error::invalid_params(format!(
"Too many inputs provided; max {}",
MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS
MAX_QUERY_ITEMS
)));
}
let mut signatures: Vec<Signature> = vec![];
@@ -1378,41 +1370,67 @@ impl RpcSol for RpcSolImpl {
}
fn send_transaction(&self, meta: Self::Metadata, data: String) -> Result<String> {
let data = bs58::decode(data).into_vec().unwrap();
if data.len() >= PACKET_DATA_SIZE {
info!(
"send_transaction: transaction too large: {} bytes (max: {} bytes)",
data.len(),
PACKET_DATA_SIZE
);
return Err(Error::invalid_request());
}
let tx: Transaction = bincode::config()
.limit(PACKET_DATA_SIZE as u64)
.deserialize(&data)
.map_err(|err| {
info!("send_transaction: deserialize error: {:?}", err);
Error::invalid_request()
})?;
let (wire_transaction, transaction) = deserialize_bs58_transaction(data)?;
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let tpu_addr = get_tpu_addr(&meta.cluster_info)?;
trace!("send_transaction: leader is {:?}", &tpu_addr);
transactions_socket
.send_to(&data, tpu_addr)
.send_to(&wire_transaction, tpu_addr)
.map_err(|err| {
info!("send_transaction: send_to error: {:?}", err);
Error::internal_error()
})?;
let signature = tx.signatures[0].to_string();
let signature = transaction.signatures[0].to_string();
trace!(
"send_transaction: sent {} bytes, signature={}",
data.len(),
wire_transaction.len(),
signature
);
Ok(signature)
}
fn simulate_transaction(
&self,
meta: Self::Metadata,
data: String,
config: Option<RpcSimulateTransactionConfig>,
) -> RpcResponse<TransactionStatus> {
let (_, transaction) = deserialize_bs58_transaction(data)?;
let config = config.unwrap_or(RpcSimulateTransactionConfig { sig_verify: false });
let bank = &*meta.request_processor.read().unwrap().bank(None)?;
assert!(bank.is_frozen());
let mut result = if config.sig_verify {
transaction.verify()
} else {
Ok(())
};
if result.is_ok() {
let transactions = [transaction];
let batch = bank.prepare_batch(&transactions, None);
let (
_loaded_accounts,
executed,
_retryable_transactions,
_transaction_count,
_signature_count,
) = bank.load_and_execute_transactions(&batch, solana_sdk::clock::MAX_PROCESSING_AGE);
result = executed[0].0.clone();
}
new_response(
&bank,
TransactionStatus {
slot: bank.slot(),
confirmations: Some(0),
status: result.clone(),
err: result.err(),
},
)
}
fn get_slot_leader(
&self,
meta: Self::Metadata,
@@ -1550,16 +1568,16 @@ impl RpcSol for RpcSolImpl {
end_slot: Slot,
) -> Result<Vec<String>> {
let pubkey = verify_pubkey(pubkey_str)?;
if end_slot < start_slot {
if end_slot <= start_slot {
return Err(Error::invalid_params(format!(
"start_slot {} must be less than or equal to end_slot {}",
"start_slot {} must be smaller than end_slot {}",
start_slot, end_slot
)));
}
if end_slot - start_slot > MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE {
if end_slot - start_slot > MAX_SLOT_RANGE {
return Err(Error::invalid_params(format!(
"Slot range too large; max {}",
MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE
MAX_SLOT_RANGE
)));
}
meta.request_processor
@@ -1582,22 +1600,44 @@ impl RpcSol for RpcSolImpl {
}
}
fn deserialize_bs58_transaction(bs58_transaction: String) -> Result<(Vec<u8>, Transaction)> {
let wire_transaction = bs58::decode(bs58_transaction)
.into_vec()
.map_err(|e| Error::invalid_params(format!("{:?}", e)))?;
if wire_transaction.len() >= PACKET_DATA_SIZE {
let err = format!(
"transaction too large: {} bytes (max: {} bytes)",
wire_transaction.len(),
PACKET_DATA_SIZE
);
info!("{}", err);
return Err(Error::invalid_params(&err));
}
bincode::config()
.limit(PACKET_DATA_SIZE as u64)
.deserialize(&wire_transaction)
.map_err(|err| {
info!("transaction deserialize error: {:?}", err);
Error::invalid_params(&err.to_string())
})
.map(|transaction| (wire_transaction, transaction))
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::{
commitment::BlockCommitment, contact_info::ContactInfo,
commitment::BlockCommitment,
contact_info::ContactInfo,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
non_circulating_supply::non_circulating_accounts,
replay_stage::tests::create_test_transactions_and_populate_blockstore,
};
use bincode::deserialize;
use jsonrpc_core::{MetaIoHandler, Output, Response, Value};
use jsonrpc_core::{ErrorCode, MetaIoHandler, Output, Response, Value};
use solana_ledger::{
blockstore::entries_to_test_shreds,
blockstore_processor::fill_blockstore_slot_with_ticks,
entry::next_entry_mut,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
blockstore::entries_to_test_shreds, blockstore_processor::fill_blockstore_slot_with_ticks,
entry::next_entry_mut, get_tmp_ledger_path,
};
use solana_sdk::{
fee_calculator::DEFAULT_BURN_PERCENT,
@@ -1679,7 +1719,6 @@ pub mod tests {
bank.clone(),
blockstore.clone(),
0,
0,
)));
// Add timestamp vote to blockstore
@@ -1760,12 +1799,17 @@ pub mod tests {
StorageState::default(),
validator_exit,
)));
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
ContactInfo::default(),
)));
cluster_info.insert_info(ContactInfo::new_with_pubkey_socketaddr(
&leader_pubkey,
&socketaddr!("127.0.0.1:1234"),
));
cluster_info
.write()
.unwrap()
.insert_info(ContactInfo::new_with_pubkey_socketaddr(
&leader_pubkey,
&socketaddr!("127.0.0.1:1234"),
));
let mut io = MetaIoHandler::default();
let rpc = RpcSolImpl;
@@ -2239,6 +2283,133 @@ pub mod tests {
assert_eq!(expected, result);
}
#[test]
fn test_rpc_simulate_transaction() {
let bob_pubkey = Pubkey::new_rand();
let RpcHandler {
io,
meta,
blockhash,
alice,
bank,
..
} = start_rpc_handler_with_tx(&bob_pubkey);
let mut tx = system_transaction::transfer(&alice, &bob_pubkey, 1234, blockhash);
let tx_serialized_encoded = bs58::encode(serialize(&tx).unwrap()).into_string();
tx.signatures[0] = Signature::default();
let tx_badsig_serialized_encoded = bs58::encode(serialize(&tx).unwrap()).into_string();
bank.freeze(); // Ensure the root bank is frozen, `start_rpc_handler_with_tx()` doesn't do this
// Good signature with sigVerify=true
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}", {{"sigVerify": true}}]}}"#,
tx_serialized_encoded,
);
let res = io.handle_request_sync(&req, meta.clone());
let expected = json!({
"jsonrpc": "2.0",
"result": {
"context":{"slot":0},
"value":{"confirmations":0,"slot": 0,"status":{"Ok":null},"err":null}
},
"id": 1,
});
let expected: Response =
serde_json::from_value(expected).expect("expected response deserialization");
let result: Response = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
assert_eq!(expected, result);
// Bad signature with sigVerify=true
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}", {{"sigVerify": true}}]}}"#,
tx_badsig_serialized_encoded,
);
let res = io.handle_request_sync(&req, meta.clone());
let expected = json!({
"jsonrpc": "2.0",
"result": {
"context":{"slot":0},
"value":{"confirmations":0,"slot":0,"status":{"Err":"SignatureFailure"},"err":"SignatureFailure"}
},
"id": 1,
});
let expected: Response =
serde_json::from_value(expected).expect("expected response deserialization");
let result: Response = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
assert_eq!(expected, result);
// Bad signature with sigVerify=false
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}", {{"sigVerify": false}}]}}"#,
tx_serialized_encoded,
);
let res = io.handle_request_sync(&req, meta.clone());
let expected = json!({
"jsonrpc": "2.0",
"result": {
"context":{"slot":0},
"value":{"confirmations":0,"slot": 0,"status":{"Ok":null},"err":null}
},
"id": 1,
});
let expected: Response =
serde_json::from_value(expected).expect("expected response deserialization");
let result: Response = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
assert_eq!(expected, result);
// Bad signature with default sigVerify setting (false)
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}"]}}"#,
tx_serialized_encoded,
);
let res = io.handle_request_sync(&req, meta.clone());
let expected = json!({
"jsonrpc": "2.0",
"result": {
"context":{"slot":0},
"value":{"confirmations":0,"slot": 0,"status":{"Ok":null},"err":null}
},
"id": 1,
});
let expected: Response =
serde_json::from_value(expected).expect("expected response deserialization");
let result: Response = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
assert_eq!(expected, result);
}
#[test]
#[should_panic]
fn test_rpc_simulate_transaction_panic_on_unfrozen_bank() {
let bob_pubkey = Pubkey::new_rand();
let RpcHandler {
io,
meta,
blockhash,
alice,
bank,
..
} = start_rpc_handler_with_tx(&bob_pubkey);
let tx = system_transaction::transfer(&alice, &bob_pubkey, 1234, blockhash);
let tx_serialized_encoded = bs58::encode(serialize(&tx).unwrap()).into_string();
assert!(!bank.is_frozen());
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}", {{"sigVerify": true}}]}}"#,
tx_serialized_encoded,
);
// should panic because `bank` is not frozen
let _ = io.handle_request_sync(&req, meta.clone());
}
#[test]
fn test_rpc_confirm_tx() {
let bob_pubkey = Pubkey::new_rand();
@@ -2550,26 +2721,24 @@ pub mod tests {
);
Arc::new(RwLock::new(request_processor))
},
cluster_info: Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default())),
cluster_info: Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
ContactInfo::default(),
))),
genesis_hash: Hash::default(),
};
let req = r#"{"jsonrpc":"2.0","id":1,"method":"sendTransaction","params":["37u9WtQpcm6ULa3Vmu7ySnANv"]}"#;
let res = io.handle_request_sync(req, meta.clone());
let expected =
r#"{"jsonrpc":"2.0","error":{"code":-32600,"message":"Invalid request"},"id":1}"#;
let expected: Response =
serde_json::from_str(expected).expect("expected response deserialization");
let result: Response = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
assert_eq!(expected, result);
let res = io.handle_request_sync(req, meta);
let json: Value = serde_json::from_str(&res.unwrap()).unwrap();
let error = &json["error"];
assert_eq!(error["code"], ErrorCode::InvalidParams.code());
}
#[test]
fn test_rpc_get_tpu_addr() {
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
ContactInfo::new_with_socketaddr(&socketaddr!("127.0.0.1:1234")),
));
)));
assert_eq!(
get_tpu_addr(&cluster_info),
Ok(socketaddr!("127.0.0.1:1234"))
@@ -2583,7 +2752,7 @@ pub mod tests {
let bad_pubkey = "a1b2c3d4";
assert_eq!(
verify_pubkey(bad_pubkey.to_string()),
Err(Error::invalid_request())
Err(Error::invalid_params("WrongSize"))
);
}
@@ -2597,7 +2766,7 @@ pub mod tests {
let bad_signature = "a1b2c3d4";
assert_eq!(
verify_signature(&bad_signature.to_string()),
Err(Error::invalid_request())
Err(Error::invalid_params("WrongSize"))
);
}
@@ -2738,7 +2907,6 @@ pub mod tests {
bank_forks.read().unwrap().working_bank(),
blockstore.clone(),
0,
0,
)));
let mut config = JsonRpcConfig::default();
@@ -2940,7 +3108,7 @@ pub mod tests {
block_commitment_cache
.write()
.unwrap()
.set_largest_confirmed_root(8);
.set_get_largest_confirmed_root(8);
let req =
format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlocks","params":[0]}}"#);
@@ -3001,7 +3169,7 @@ pub mod tests {
block_commitment_cache
.write()
.unwrap()
.set_largest_confirmed_root(7);
.set_get_largest_confirmed_root(7);
let slot_duration = slot_duration_from_slots_per_year(bank.slots_per_year());

Some files were not shown because too many files have changed in this diff Show More