Compare commits

...

174 Commits

Author SHA1 Message Date
Michael Vines
6fbad24477 getEpochInfo RPC endpoint now includes the current block height 2020-07-21 20:21:39 -07:00
mergify[bot]
4bd018e68b Use OrderedIterator in TransactionStatusService (#11149) (#11150)
* Split out get-first-err for unit testing

* Add failing test

* Add missing ordering

(cherry picked from commit 6c38369042)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-07-21 17:49:38 -06:00
mergify[bot]
c73e40a351 Add Bank support for "upgrade epochs" where all non-vote transactions will be rejected (bp #11082) (#11109)
* Add Bank support for "upgrade epochs" where all non-vote transactions will be rejected

(cherry picked from commit e5d8c4383f)

# Conflicts:
#	runtime/src/bank.rs

* Fix merge conflict

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-07-17 12:34:19 -07:00
Michael Vines
8f790e3153 Add --expected-bank-hash stub for v1.2 command-line compatibility 2020-07-16 13:18:34 -07:00
Michael Vines
1972d8b5c0 Update CRATES_IO_TOKEN 2020-07-15 17:55:13 -07:00
mergify[bot]
bc2fd56516 Gate nonce-overwrite change (#11081) (#11086)
(cherry picked from commit 1da9f9f05a)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-07-15 18:22:33 -06:00
Trent Nelson
00916b2ca6 CLI: Fix explicitly plumb vote_account through create-vote-account
(cherry picked from commit 14ac233d01)
2020-07-14 10:28:59 -06:00
Trent Nelson
006a5c5c88 CLI: Fix explicitly plumb withdraw_authority through vote-update-validator
(cherry picked from commit 2284699889)
2020-07-14 10:28:59 -06:00
Trent Nelson
6666e54a1f CLI: Fix explicitly plumb withdraw_authority through vote-update-commission
(cherry picked from commit 3392ecc310)
2020-07-14 10:28:59 -06:00
Trent Nelson
d6ea4f50c9 Mode gate RecentBlockhashes/BlockhashQueue sync
(cherry picked from commit 5741002a32)
2020-07-13 22:47:39 -06:00
Trent Nelson
a0965e1eba Synchronize BlockhashQueue and RecentBlockhashes sysvar update
(cherry picked from commit 5357ff6d60)
2020-07-13 22:47:39 -06:00
Trent Nelson
7ca65341e6 Factor locked portion of Bank::update_recent_blockhashes() out to helper
(cherry picked from commit 9cc379af6c)
2020-07-13 22:47:39 -06:00
Tyera Eulberg
141a5928c4 Add failing test
(cherry picked from commit 942c019d50)
2020-07-13 22:47:39 -06:00
Michael Vines
5f0584b6e8 Add vote-update-commission subcommand 2020-07-13 11:45:33 -06:00
Michael Vines
b7fb739cd9 Revert "Add EncodedTransaction::Raw"
This reverts commit 60b1bcddb5.
2020-07-12 08:56:36 -07:00
Michael Vines
5a4a238029 rebase 2020-07-10 22:52:51 -07:00
Michael Vines
01987f8f89 getConfirmedBlocks now has an upper limit on slot range
(cherry picked from commit aef6bf272e)

# Conflicts:
#	core/src/rpc.rs
2020-07-10 22:52:51 -07:00
Tyera Eulberg
82caa50781 Add fee-calculator logging (#11001) 2020-07-10 18:04:49 -06:00
Michael Vines
60b1bcddb5 Add EncodedTransaction::Raw 2020-07-10 14:55:07 -07:00
mergify[bot]
dce7739b75 Add block time placeholder to getConfirmedBlock (#10989)
(cherry picked from commit 491f5ae61a)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-07-10 11:02:43 -07:00
mergify[bot]
1c703af6a2 Fix nonce fee_calculator overwrite (bp #10973) (#10975)
* Fix nonce fee_calculator overwrite (#10973)

* Add failing test

* Pass fee_calculator to prepare_if_nonce_account; only overwrite in error case

(cherry picked from commit 25228ca957)

* v1.1 transaction  builder

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2020-07-09 16:57:18 -06:00
Michael Vines
f49de3b1ad Add --snapshot-version stub to ease migration to 1.2.0 (#10921) 2020-07-05 21:22:31 -07:00
mergify[bot]
5c1b79f500 net.sh: Refactor node initialization wait (#10819) (#10823)
* remote-node.sh: Factor out init wait to own script

* remote-node.sh: Allow nodes to initialize asynchronously

* testnet-automation: Plumb --async-node-init

(cherry picked from commit 7021e1c584)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-06-26 16:28:55 +00:00
mergify[bot]
da04616fd4 Fix race in ci/run-sanity.sh (#10796) (#10801)
(cherry picked from commit 4dc9f378b8)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-06-25 05:54:08 +00:00
mergify[bot]
8653c86284 Fix broken image link (#10496) (#10794)
automerge

(cherry picked from commit 75b8c2c4e3)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-06-25 01:41:35 +00:00
mergify[bot]
809e4cbf25 Remote Wallet: Stricter derivation path component parsing (#10725) (#10739)
(cherry picked from commit 842cab2739)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-06-22 18:27:46 +00:00
mergify[bot]
1aef482972 ledger-tool: Ignore SIGUSR1 (#10730) (#10731)
Prevents warehouse archive calls getting KO'd by logrotate

(cherry picked from commit d42247c652)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-06-21 19:31:25 +00:00
mergify[bot]
248ab3a6ec Don't bother api.github.com on pull requests to avoid getting rate limited (#10709)
(cherry picked from commit c0389ef82f)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-06-18 23:55:09 +00:00
Trent Nelson
ec1f2b4f90 Bump version to v1.1.20 (#10705) 2020-06-18 23:29:23 +00:00
Michael Vines
c853632fc4 Add stub address_labels field for 1.3 compatibility (#10696) 2020-06-18 11:05:48 -07:00
mergify[bot]
e651209f73 Update testnet shred version (#10684) (#10685)
Co-authored-by: Carl <carl@solana.com>
(cherry picked from commit 9c22a6007d)

Co-authored-by: carllin <wumu727@gmail.com>
2020-06-18 07:59:20 +00:00
mergify[bot]
641f439a45 Update testnet shred version (#10681) (#10682)
Co-authored-by: Carl <carl@solana.com>
(cherry picked from commit dae8bc477b)

Co-authored-by: carllin <wumu727@gmail.com>
2020-06-18 07:44:58 +00:00
Michael Vines
a2486f8094 Remove strict from automerge, add rebase opt in 2020-06-17 20:53:55 -07:00
mergify[bot]
d48bd80619 Plumb --warp-slot through net scripts (bp #10639) (#10642)
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>

Co-authored-by: Trent Nelson <trent@solana.com>
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2020-06-18 01:44:58 +00:00
mergify[bot]
4ff70a05f1 ignore break (#10666) (#10668)
(cherry picked from commit a5f82c995e)

Co-authored-by: anatoly yakovenko <anatoly@solana.com>
2020-06-17 22:46:24 +00:00
mergify[bot]
7831cef9a7 Wait until bank is frozen before sending RPC notifications (bp #10654) (#10662)
* Wait until bank is frozen before sending RPC notifications (#10654)

(cherry picked from commit 39984cdcc3)

# Conflicts:
#	core/src/replay_stage.rs

* Update replay_stage.rs

Co-authored-by: Justin Starry <justin@solana.com>
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2020-06-17 20:35:16 +00:00
mergify[bot]
7dd22d6601 Factor out testnet automation SW version resolution (#10659)
(cherry picked from commit a15f60a291)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-06-17 17:19:02 +00:00
mergify[bot]
3bb0388299 Add address to non-circulating supply (#10647)
(cherry picked from commit 5673343f49)

Co-authored-by: publish-docs.sh <maintainers@solana.com>
2020-06-17 05:43:23 +00:00
mergify[bot]
a0a2c61856 Allow pre-existing stake accounts in multinode-demo/delegate-stake.sh (#10635)
(cherry picked from commit ae0d5ba201)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-06-16 22:53:52 +00:00
mergify[bot]
4afa64c20d Plumb --wait-for-supermajority through scripts (#10611) (#10613)
(cherry picked from commit 348bf78cd1)

Co-authored-by: Trent Nelson <trent@solana.com>
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2020-06-16 17:55:16 +00:00
mergify[bot]
be6edb950c Add generic is_parsable() input validator (bp #10599) (#10620)
* Add generic is_parsable() input validator.
Allow input validators to accept &str, &String and String parameters.

(cherry picked from commit daa2e6363f)

# Conflicts:
#	clap-utils/src/input_validators.rs

* Fix conflict

Co-authored-by: Kristofer Peterson <kris@tranception.com>
Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-06-16 11:15:29 +00:00
Michael Vines
62bc83ef39 Add mergify automerge rules 2020-06-15 09:14:39 -07:00
Michael Vines
f26824f2b5 Bump version to v1.1.19 2020-06-14 20:18:45 -07:00
mergify[bot]
bc808d785b Fix udp port check retry and check all udp ports (bp #10385) (#10576)
automerge
2020-06-14 17:36:17 -07:00
mergify[bot]
a5e91f8b14 Fix perf-libs version detection (#10571) (#10573)
automerge
2020-06-14 13:50:10 -07:00
mergify[bot]
79b1d49e42 Fix fannout gossip bench (bp #10509) (#10555)
* Fix fannout gossip bench (#10509)

* Gossip benchmark

* Rayon tweaking

* push pulls

* fanout to max nodes

* fixup! fanout to max nodes

* fixup! fixup! fanout to max nodes

* update

* multi vote test

* fixup prune

* fast propagation

* fixups

* compute up to 95%

* test for specific tx

* stats

* stats

* fixed tests

* rename

* track a lagging view of which nodes have the local node in their active set in the local received_cache

* test fixups

* dups are old now

* dont prune your own origin

* send vote to tpu

* tests

* fixed tests

* fixed test

* update

* ignore scale

* lint

* fixup

* fixup

* fixup

* cleanup

Co-authored-by: Stephen Akridge <sakridge@gmail.com>
(cherry picked from commit ba83e4ca50)

* Merge fixes

Co-authored-by: anatoly yakovenko <anatoly@solana.com>
2020-06-14 06:28:43 -07:00
mergify[bot]
5c5207b7c4 Use git diff instead of git show for --check (#10566) (#10567)
automerge
2020-06-14 06:28:33 -07:00
mergify[bot]
6280ea1b6e Check the whole range of commits in the topic branch (bp #10560) (#10563)
automerge
2020-06-14 04:46:09 -07:00
sakridge
f016ccdbb5 Dial down gossip threadpool (#10540) 2020-06-13 22:48:32 -07:00
Dan Albert
a528e966e6 Add Trust Wallet security info (#10516)
automerge

(cherry picked from commit 914f285914)
2020-06-12 22:14:34 -07:00
mergify[bot]
4be9d926c8 Add FdGYQ... to non-circulation withdrawer authority list (#10542) (#10545)
automerge
2020-06-12 18:14:27 -07:00
Michael Vines
94e162b0f0 Refine build condition 2020-06-12 17:03:48 -07:00
mergify[bot]
26ca3c6d6d Update non-circulating pubkeys (#10524) (#10526)
automerge

(cherry picked from commit fb8612be49)

Co-authored-by: Greg Fitzgerald <greg@solana.com>
2020-06-11 16:58:28 -07:00
mergify[bot]
729b997392 Improve BPF SDK dependency caching (#10434) (#10517)
automerge
2020-06-11 14:12:16 -07:00
Michael Vines
37b381f47f Force CI_REPO_SLUG 2020-06-11 13:14:42 -07:00
Michael Vines
0115bfa2ea Bump version to v1.1.18 2020-06-10 22:42:05 -07:00
mergify[bot]
3f60fe62c2 Add StakeInstruction::Merge (#10503) (#10506)
automerge
2020-06-10 20:04:03 -07:00
mergify[bot]
ea44e64d21 Add VoteInstruction::UpdateCommission (#10497)
automerge
2020-06-10 12:54:09 -07:00
mergify[bot]
8e1c2d2df4 Add back missing pull_response success counter (#10491) (#10500)
automerge
2020-06-10 10:45:27 -07:00
sakridge
a79702c62c Optimize process pull responses (#10460) (#10484)
* Batch process pull responses

* Generate pull requests at 1/2 rate

* Do filtering work of process_pull_response in read lock

Only take write lock to insert if needed.
2020-06-09 20:02:46 -07:00
mergify[bot]
3c94084177 Add SendTransactionService (#10470)
automerge
2020-06-09 18:13:50 -07:00
mergify[bot]
7d448eb1a9 Add --warp-slot argument to |solana-ledger-tool create-snapshot| (#10473)
automerge
2020-06-09 11:09:32 -07:00
sakridge
a705764ca7 v1.1 gossip lock optimizations (#10459)
* Skip gossip requests with different shred version and split lock (#10240)


(cherry picked from commit 3f508b37fd)

* More cluster stats and add epoch stakes cache in retransmit stage (#10345)

* More cluster info metrics for push request/response counts

* Cache staked peers for the epoch

(cherry picked from commit ef37b82ffa)

* Cache tvu peers for broadcast (#10373)


(cherry picked from commit 2cf719ac2c)

* Add pull request count metrics (#10421)


(cherry picked from commit 3d2230f1a9)
2020-06-08 17:05:55 -07:00
mergify[bot]
3110def6c3 Remove lock around JsonRpcRequestProcessor (bp #10417) (#10450)
automerge
2020-06-08 16:45:49 -07:00
Michael Vines
afc89beefa Bump version to v1.1.17 2020-06-08 10:32:26 -07:00
Michael Vines
d5d5e8797b Bump new_system_program_activation_epoch by 2 2020-06-08 09:40:39 -07:00
mergify[bot]
09f0624887 Adjust RPC simulateTransaction endpoint to match v1.2 (#10443)
automerge
2020-06-06 21:08:27 -07:00
mergify[bot]
52c20a5c38 Add Certus One as a trusted validator for testnet (#10433) (#10437)
automerge
2020-06-05 16:49:21 -07:00
mergify[bot]
3c38df9be0 Avoid AccountInUse errors when simulating transactions (#10391) (#10419)
automerge
2020-06-04 20:41:12 -07:00
Michael Vines
da038e626a v1.1: ledger_cleanup_service: compact at a slower rate than purging (#10415)
automerge
2020-06-04 20:30:31 -07:00
mergify[bot]
9cfbf8a94d Deactivate legacy_system_instruction_processor at epoch 58/38 (preview/stable) (#10406) (#10407)
automerge
2020-06-04 01:21:43 -07:00
Michael Vines
fbcbd37650 v1.1: Enable rolling update of "Permit paying oneself" / "No longer allow create-account to add funds to an existing account" (#10394)
automerge
2020-06-03 16:34:59 -07:00
mergify[bot]
dca932fe45 Don't share same snapshot dir for secondary access (bp #10384) (#10386)
automerge
2020-06-03 06:34:19 -07:00
mergify[bot]
8d89eac32f Support opening an in-use rocksdb as secondary (bp #10209) (#10381)
automerge
2020-06-02 23:51:43 -07:00
Michael Vines
862fd63bb4 Update system_instruction_processor.rs 2020-06-02 23:35:31 -07:00
Greg Fitzgerald
578d77495a No longer allow create-account to add funds to an existing account (#10192) 2020-06-02 23:35:31 -07:00
Ryo Onodera
537d135005 Add --max-genesis-archive-unpacked-size to capitalization (#10380)
automerge
2020-06-02 21:39:14 -07:00
Michael Vines
5ade9b9f02 Revert "Reduce UNLOCK_NONCE_SLOT to ensure it is active on all three clusters (#10223)" (#10370)
automerge
2020-06-02 12:42:03 -07:00
mergify[bot]
e023719c58 Add preflight checks to sendTransaction RPC method (bp #10338) (#10362)
automerge
2020-06-01 22:45:51 -07:00
mergify[bot]
a278f745f8 Reduce stable jobs (#10344) (#10346)
automerge
2020-05-31 22:40:51 -07:00
mergify[bot]
640bb9cb95 Permit paying oneself (#10337) (#10341)
automerge
2020-05-31 13:18:34 -07:00
mergify[bot]
c344a878b6 validator: Added --health-check-slot-distance (bp #10324) (#10330)
automerge
2020-05-29 17:49:09 -07:00
mergify[bot]
9b63f7a50f Improve Rpc inflation tooling (bp #10309) (#10321)
automerge
2020-05-29 17:35:10 -07:00
Trent Nelson
b128087445 Backport of #9161 to v1.1 branch (#10327)
automerge
2020-05-29 16:34:36 -07:00
Tyera Eulberg
72755fcd19 Add mechanism to get blockhash's last valid slot (#10239) (#10318)
automerge
2020-05-29 11:27:45 -07:00
mergify[bot]
24937e63d4 verify_reachable_ports: Handle errors without expect() (#10298) (#10304)
automerge
2020-05-28 16:12:08 -07:00
mergify[bot]
995759faf5 Add commitment parameter to getFeeCalculatorForBlockhash (#10255) (#10296) (#10302)
automerge
2020-05-28 15:26:39 -07:00
mergify[bot]
db60bd30dc Feign RPC health while in a --wait-for-supermajority holding pattern (#10295) (#10300)
automerge
2020-05-28 13:53:05 -07:00
carllin
bc86ee8d13 Fix run_orphan DOS (#10290)
Co-authored-by: Carl <carl@solana.com>
2020-05-28 11:29:13 -07:00
mergify[bot]
93506b22e7 Include GenesisConfig inflation in Display (#10282) (#10288)
automerge
2020-05-28 00:54:13 -07:00
mergify[bot]
1e53760a65 Use correct --url (#10284) (#10286)
automerge
2020-05-27 22:15:47 -07:00
Michael Vines
24c796b434 Bump version to 1.1.16 2020-05-27 18:13:17 -07:00
carllin
2cdd3f835f log leader (#10280)
Co-authored-by: Carl <carl@solana.com>
2020-05-27 18:07:31 -07:00
Michael Vines
c4e04f70d0 Adjust mainnet-beta shred version 2020-05-27 17:10:53 -07:00
Michael Vines
5d971472b2 Purge next slots to avoid a blockstore_processor panic on restart (#10277) 2020-05-27 17:10:27 -07:00
Michael Vines
f1201502d4 Bump version to 1.1.15 2020-05-26 21:22:34 -07:00
Tyera Eulberg
fd5222ad21 V1.1 single gossip commitment (#10263)
automerge
2020-05-26 21:16:46 -07:00
mergify[bot]
768a5f2b40 Cluster info metrics (#10215) (#10235)
automerge
2020-05-26 17:28:14 -07:00
mergify[bot]
87b57b53f9 Wait for one slot to be produced (#10257) (#10258)
automerge
2020-05-26 17:27:36 -07:00
Michael Vines
55a64c8945 Activate eager rent collection and BPF loader on mainnet-beta epoch 34 (#10231) 2020-05-26 10:28:29 -07:00
Ryo Onodera
e8c6233c6e Adjust owner hashing activation slot (#10243)
automerge
2020-05-26 01:21:22 -07:00
Michael Vines
f51b214449 Adjust include_owner_in_hash to match mainet-beta v1.0 activation (#10230)
automerge
2020-05-25 12:31:15 -07:00
mergify[bot]
8fe8a5717e Clean up RPCClient retry handling: only retry on 429, after a little sleep (#10182) (#10184)
automerge
2020-05-25 11:41:46 -07:00
Michael Vines
9adf8b4fc8 Reduce UNLOCK_NONCE_SLOT to ensure it is active on all three clusters (#10223)
automerge
2020-05-25 01:08:30 -07:00
mergify[bot]
82772f95a1 LedgerCleanupService no longer causes an OOM and actually purges (bp #10199) (#10221)
automerge
2020-05-24 23:24:45 -07:00
mergify[bot]
0b5d3df251 Optimize banking processing of AccountInUse (#10154) (#10193)
automerge
2020-05-24 11:46:10 -07:00
Ryo Onodera
e63fdba252 Test ledger-tool commands in run-sanity.sh (#10211)
automerge
2020-05-24 06:07:24 -07:00
mergify[bot]
5e65b7cbd9 Retry a couple times before declaring a UDP port unreachable (#10181) (#10191)
automerge
2020-05-22 15:57:18 -07:00
Michael Vines
68d0fe2dbc Update another non-circulating account 2020-05-22 15:11:19 -07:00
mergify[bot]
3aad5f563e Add another non-circulating account (#10186) (#10190)
automerge
2020-05-22 14:59:21 -07:00
mergify[bot]
ccfe09e460 Fixup deserialize_bs58_transaction, and make a few error types more targeted (#10171) (#10177)
automerge
2020-05-21 19:09:24 -07:00
mergify[bot]
6fd57fafc8 REST API now returns supply in SOL rather than lamports (#10170) (#10174)
automerge

(cherry picked from commit 18be7a7966)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-05-21 16:54:12 -07:00
mergify[bot]
c7d857583f Revert "Add AVX2 runtime checks (#10033)" (#10167) (#10169)
This reverts commit cf8eb7700b.

(cherry picked from commit 486168b796)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-05-21 13:19:47 -07:00
mergify[bot]
e29b7876ad Add v0 REST APIs for circulating and total supply (bp #10102) (#10160)
automerge
2020-05-20 21:51:25 -07:00
mergify[bot]
de479ebda9 transaction-history now searches over the entire history by default (#10145) (#10153)
automerge
2020-05-20 15:32:21 -07:00
mergify[bot]
d3447f2f41 Fixup subscription docs (#10146) (#10148)
automerge
2020-05-20 12:23:07 -07:00
mergify[bot]
d9e14b4a82 Fix another unstable test after eager rent (#10120) (#10143)
automerge
2020-05-20 10:22:11 -07:00
mergify[bot]
94b97e4b56 Ignore test_tvu_exit (#10134) (#10138)
automerge
2020-05-20 00:57:34 -07:00
mergify[bot]
abd977b819 Fix erasure (bp #10095) (#10127)
automerge
2020-05-19 22:21:35 -07:00
mergify[bot]
36eafa56a3 Rename getCirculatingSuppy to getSupply in JSON API doc (#10121) (#10123)
automerge
2020-05-19 15:47:39 -07:00
mergify[bot]
06a63549c1 Add SimulateTransaction RPC endpoint (#10106) (#10116)
automerge
2020-05-19 14:25:06 -07:00
carllin
a4047bb9c8 Fix deserialize reference tick (#10111)
Co-authored-by: Carl <carl@solana.com>
2020-05-19 13:55:37 -07:00
Michael Vines
a235423000 Cargo.lock 2020-05-19 08:14:29 -07:00
Michael Vines
726eadc64b Bump version to 1.1.14 2020-05-18 15:15:26 -07:00
mergify[bot]
4d18144232 Update accounts whitelist (#10100) (#10104)
automerge
2020-05-18 14:42:02 -07:00
mergify[bot]
342cf90ce1 Trigger RPC notifications after block commitment cache update (#10077) (#10101)
automerge
2020-05-18 13:34:18 -07:00
mergify[bot]
3ec109a0e4 Bump solana-rbpf to v0.1.28 (#9976) (#9983)
automerge
2020-05-18 00:10:05 -07:00
Michael Vines
2634402fef Bump version to 1.1.13 2020-05-17 16:35:36 -07:00
carllin
997f317c23 v1.1: Add nonce to shreds repairs, add shred data size to header (#10076)
* Add nonce to shreds/repairs

* Add data shred size to header

* Align nonce unlock with epoch 47

Co-authored-by: Carl <carl@solana.com>
2020-05-17 13:36:15 -07:00
mergify[bot]
7bc915c0d1 Abort if the open fd limit cannot be increased (bp #10064) (#10074)
automerge
2020-05-15 14:35:29 -07:00
mergify[bot]
8651f058eb Add docs section to upgrade Solana App on Ledger Live (#10070) (#10072)
automerge
2020-05-15 11:30:32 -07:00
mergify[bot]
b6d6ff786a Forge a confirmed root before halting for RPC inspection (#10061) (#10067)
automerge
2020-05-15 10:30:02 -07:00
mergify[bot]
b9a80152df Fix unstable test after eager rent collection (#10031) (#10060)
automerge
2020-05-15 01:25:48 -07:00
Ryo Onodera
e9dda5ebd7 v1.1: Eager rent collection (#10028)
* Introduce eager rent collection (#9527)

* Switch AccountsIndex.account_maps from HashMap to BTreeMap

* Introduce eager rent collection

* Start to add tests

* Avoid too short eager rent collection cycles

* Add more tests

* Add more tests...

* Refacotr!!!!!!

* Refactoring follow up

* More tiny cleanups

* Don't rewrite 0-lamport accounts to be deterministic

* Refactor a bit

* Do hard fork, restore tests, and perf. mitigation

* Fix build...

* Refactor and add switch over for testnet (TdS)

* Use to_be_bytes

* cleanup

* More tiny cleanup

* Rebase cleanup

* Set Bank::genesis_hash when resuming from snapshot

* Reorder fns and clean ups

* Better naming and commenting

* Yet more naming clarifications

* Make prefix width strictly uniform for 2-base partition_count

* Fix typo...

* Revert cluster-dependent gate

* kick ci?

* kick ci?

* kick ci?

(cherry picked from commit 1eb40c3fe0)

# Conflicts:
#	core/tests/bank_forks.rs
#	ledger/src/bank_forks_utils.rs
#	ledger/src/snapshot_utils.rs
#	runtime/src/bank.rs

* Fix merge conflicts

* Add gating

* Add Danger comment...

* Delay activation epoch

* Add gating for stable as well

* fmt...

* fmt!!!!
2020-05-15 15:38:31 +09:00
mergify[bot]
5f0be1793c Add Ledger error codes (#10056) (#10059)
automerge
2020-05-14 23:13:18 -07:00
mergify[bot]
2d8533075d Base58 (#10052) (#10055)
automerge
2020-05-14 18:06:27 -07:00
mergify[bot]
bf382c6069 Remove inline from all BPF C functions (bp #10038) (#10039)
automerge
2020-05-14 14:47:04 -07:00
mergify[bot]
366e426f2b Clean up Ledger instructions (#10047) (#10049)
automerge
2020-05-14 13:08:34 -07:00
mergify[bot]
fa34e6e419 solana-gossip spy can now specify a shred version (#10040) (#10042)
automerge
2020-05-13 21:17:12 -07:00
mergify[bot]
ab9fe5e9ad Add AVX2 runtime checks (#10033) (#10035)
automerge
2020-05-13 13:43:06 -07:00
Ryo Onodera
3474419111 Revert "[NO-MERGE; needs gating logic] Introduce eager rent collection (bp #9527) (#10022)" (#10026)
This reverts commit ff21251416.
2020-05-13 22:51:59 +09:00
mergify[bot]
ff21251416 [NO-MERGE; needs gating logic] Introduce eager rent collection (bp #9527) (#10022)
automerge
2020-05-13 06:12:45 -07:00
mergify[bot]
7e6bbc7b77 Introduce type alias Ancestors (#9699) (#10018)
automerge
2020-05-13 01:46:38 -07:00
mergify[bot]
82783b18ea Rpc: optionally filter getLargestAccounts by circulating/nonCirculating (#10007) (#10014)
automerge
2020-05-12 21:54:44 -07:00
mergify[bot]
b7c6f38665 Enable disk metrics (#10009) (#10010)
automerge
2020-05-12 16:45:26 -07:00
mergify[bot]
11da07eca7 Update testnet shred version (#10000) (#10002)
automerge
2020-05-11 23:35:14 -07:00
Michael Vines
b6b779d2c4 Use CommitmentConfig::root() when checking accounts, CommitmentConfig::max() may not be available yet 2020-05-11 22:55:04 -07:00
mergify[bot]
1c85d62fe4 Fix crash when CI_COMMIT=HEAD (#9994) (#9998)
automerge

(cherry picked from commit 28d1f7c5e7)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-05-11 22:53:48 -07:00
Michael Vines
867a213cd3 Bump version to v1.1.12 2020-05-11 22:10:03 -07:00
Michael Vines
c51a18a887 getClusterNodes RPC API now includes the node software version (#9993) 2020-05-11 21:38:19 -07:00
mergify[bot]
206ff02be9 Fix up a couple cli commands that fail when a node is in the --wait-for-supermajority state (#9985) (#9991)
automerge

(cherry picked from commit 3b9dc50541)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-05-11 19:48:59 -07:00
Michael Vines
8d7e90e9b8 Advertise node version in gossip (#9986)
automerge
2020-05-11 17:45:19 -07:00
mergify[bot]
eb11db3e3e Check slot cleaned up for RPC blockstore/slot queries (#9982) (#9989)
automerge
2020-05-11 16:49:22 -07:00
mergify[bot]
8d8ad84527 Add retransmit packets_by_slot metrics (#9975) (#9984)
automerge
2020-05-11 15:25:40 -07:00
Dan Albert
fa059bb3c3 Add windows instructions to CLI install docs (#9987)
automerge
2020-05-11 14:50:26 -07:00
mergify[bot]
9652e832c2 Write non-error output to stdout (#9960) (#9972)
automerge
2020-05-11 10:18:15 -07:00
mergify[bot]
52e27712e1 Retransmit and shred fetch metrics (#9965) (#9969)
automerge
2020-05-10 23:15:15 -07:00
mergify[bot]
c00ec26a3b Cli: Add solana supply command; hide total-supply (bp #9956) (#9963)
automerge
2020-05-10 18:04:46 -07:00
mergify[bot]
50eba96b58 More logging around failure (#9967) (#9968)
automerge
2020-05-10 17:23:30 -07:00
mergify[bot]
e7c0629951 Remove RpcClient code duplication (#9952) (#9961)
automerge
2020-05-10 10:36:56 -07:00
mergify[bot]
a08235da9a send_and_confirm_transaction() no longer needs a keypair (#9950) (#9962)
automerge
2020-05-10 10:14:31 -07:00
mergify[bot]
b213004157 Rpc: Add getCirculatingSupply endpoint, redux (#9953) (#9955)
automerge
2020-05-09 12:32:08 -07:00
Jack May
92562b4349 Pull in hardened BPF virtual machine (#9931) 2020-05-08 16:06:22 -07:00
Jack May
01c490d354 Rename BPF helper to syscall (#9819)
automerge
2020-05-08 16:06:22 -07:00
Ryo Onodera
cfdc0eb99e Maintain sysvar balances for consistent market cap. (#9942)
automerge
2020-05-08 12:15:37 -07:00
mergify[bot]
0b7b3c9f20 Support ad-hoc genesis args in run.sh (#9697) (#9940)
automerge
2020-05-08 08:29:29 -07:00
Michael Vines
5cd685ed3a Bump version to v1.1.11 2020-05-07 16:57:43 -07:00
Ryo Onodera
9498f11d46 v1.1: Include account.owner into account hash (#9918)
automerge
2020-05-07 13:00:52 -07:00
mergify[bot]
558324b861 Refactor RPC subscriptions account handling (#9888) (#9912)
automerge
2020-05-07 01:14:58 -07:00
Tyera Eulberg
9a5fc3513a Add using OutputFormat enum to --sign-only transactions (#9650) (#9911)
automerge
2020-05-06 23:19:36 -07:00
carllin
b7c6e139e6 Revert (#9908)
automerge
2020-05-06 22:28:19 -07:00
mergify[bot]
a9d2fa6aad Cli: Update OutputFormat method to return a String to restore consistency (#9904) (#9905)
automerge
2020-05-06 20:51:45 -07:00
Michael Vines
056a9952c3 Cargo.lock 2020-05-06 16:35:26 -07:00
Michael Vines
fc21c857a3 Bump version to v1.1.10 2020-05-06 16:04:41 -07:00
270 changed files with 17689 additions and 8821 deletions

View File

@@ -1,12 +1,12 @@
{
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
"environment": {
"CODECOV_TOKEN": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:JnxhrIxh09AvqdJgrVSYmb7PxSrh19aE:07WzVExCHEd1lJ1m8QizRRthGri+WBNeZRKjjEvsy5eo4gv3HD7zVEm42tVTGkqITKkBNQ==]",
"CRATES_IO_TOKEN": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:d0jJqC32/axwzq/N7kMRmpxKhnRrhtpt:zvcPHwkOzGnjhNkAQSejwdy1Jkr9wR1qXFFCnfIjyt/XQYubzB1tLkoly/qdmeb5]",
"GEOLOCATION_API_KEY": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:R4gfB6Ey4i50HyfLt4UZDLBqg3qHEUye:UfZCOgt8XI6Y2g+ivCRVoS1fjFycFs7/GSevvCqh1B50mG0+hzpEyzXQLuKG5OeI]",
"GITHUB_TOKEN": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:Vq2dkGTOzfEpRht0BAGHFp/hDogMvXJe:tFXHg1epVt2mq9hkuc5sRHe+KAnVREi/p8S+IZu67XRyzdiA/nGak1k860FXYuuzuaE0QWekaEc=]",
"INFLUX_DATABASE": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:5KI9WBkXx3R/W4m256mU5MJOE7N8aAT9:Cb8QFELZ9I60t5zhJ9h55Kcs]",
"INFLUX_PASSWORD": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:hQRMpLCrav+OYkNphkeM4hagdVoZv5Iw:AUO76rr6+gF1OLJA8ZLSG8wHKXgYCPNk6gRCV8rBhZBJ4KwDaxpvOhMl7bxxXG6jol7v4aRa/Lk=]",
"INFLUX_USERNAME": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:R7BNmQjfeqoGDAFTJu9bYTGHol2NgnYN:Q2tOT/EBcFvhFk+DKLKmVU7tLCpVC3Ui]"
"CODECOV_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:3K68mE38LJ2RB98VWmjuNLFBNn1XTGR4:cR4r05/TOZQKmEZp1v4CSgUJtC6QJiOaL85QjXW0qZ061fMnsBA8AtAPMDoDq4WCGOZM1A==]",
"CRATES_IO_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:GGRTYDjMXksevzR6kq4Jx+FaIQZz50RU:xkbwDxcgoCyU+aT2tiI9mymigrEl6YiOr3axe3aX70ELIBKbCdPGilXP/wixvKi94g2u]",
"GEOLOCATION_API_KEY": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:U2PZLi5MU3Ru/zK1SilianEeizcMvxml:AJKf2OAtDHmJh0KyXrBnNnistItZvVVP3cZ7ZLtrVupjmWN/PzmKwSsXeCNObWS+]",
"GITHUB_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:0NJNlpD/O19mvOakCGBYDhIDfySxWFSC:Dz4NXv9x6ncRQ1u9sVoWOcqmkg0sI09qmefghB0GXZgPcFGgn6T0mw7ynNnbUvjyH8dLruKHauk=]",
"INFLUX_DATABASE": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:SzwHIeOVpmbTcGQOGngoFgYumsLZJUGq:t7Rpk49njsWvoM+ztv5Uwuiz]",
"INFLUX_PASSWORD": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:/MUs+q7pdGrUjzwcq+6pgIFxur4hxdqu:am22z2E2dtmw1f1J1Mq5JLcUHZsrEjQAJ0pp21M4AZeJbNO6bVb44d9zSkHj7xdN6U+GNlCk+wU=]",
"INFLUX_USERNAME": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:XjghH20xGVWro9B+epGlJaJcW8Wze0Bi:ZIdOtXudTY5TqKseDU7gVvQXfmXV99Xh]"
}
}

View File

@@ -1,9 +1,40 @@
# Validate your changes with:
#
# $ curl -F 'data=@.mergify.yml' https://gh.mergify.io/validate
# $ curl -F 'data=@.mergify.yml' https://gh.mergify.io/validate/
#
# https://doc.mergify.io/
pull_request_rules:
- name: automatic merge (squash) on CI success
conditions:
- status-success=buildkite/solana
#- status-success=Travis CI - Pull Request
- status-success=ci-gate
- label=automerge
- author≠@dont-squash-my-commits
actions:
merge:
method: squash
# Join the dont-squash-my-commits group if you won't like your commits squashed
- name: automatic merge (rebase) on CI success
conditions:
- status-success=buildkite/solana
#- status-success=Travis CI - Pull Request
- status-success=ci-gate
- label=automerge
- author=@dont-squash-my-commits
actions:
merge:
method: rebase
- name: remove automerge label on CI failure
conditions:
- label=automerge
- "#status-failure!=0"
actions:
label:
remove:
- automerge
comment:
message: automerge label removed due to a CI failure
- name: remove outdated reviews
conditions:
- base=master

View File

@@ -18,6 +18,8 @@ branches:
- master
- /^v\d+\.\d+/
if: type IN (api, cron) OR tag IS present
notifications:
slack:
on_success: change

1168
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -58,6 +58,7 @@ members = [
"transaction-status",
"upload-perf",
"net-utils",
"version",
"vote-signer",
"cli",
"rayon-threadlimit",

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-accounts-bench"
version = "1.1.9"
version = "1.1.20"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -10,10 +10,10 @@ homepage = "https://solana.com/"
[dependencies]
log = "0.4.6"
rayon = "1.3.0"
solana-logger = { path = "../logger", version = "1.1.9" }
solana-runtime = { path = "../runtime", version = "1.1.9" }
solana-measure = { path = "../measure", version = "1.1.9" }
solana-sdk = { path = "../sdk", version = "1.1.9" }
solana-logger = { path = "../logger", version = "1.1.20" }
solana-runtime = { path = "../runtime", version = "1.1.20" }
solana-measure = { path = "../measure", version = "1.1.20" }
solana-sdk = { path = "../sdk", version = "1.1.20" }
rand = "0.7.0"
clap = "2.33.0"
crossbeam-channel = "0.4"

View File

@@ -1,9 +1,11 @@
use clap::{value_t, App, Arg};
use rayon::prelude::*;
use solana_measure::measure::Measure;
use solana_runtime::accounts::{create_test_accounts, update_accounts, Accounts};
use solana_runtime::{
accounts::{create_test_accounts, update_accounts, Accounts},
accounts_index::Ancestors,
};
use solana_sdk::pubkey::Pubkey;
use std::collections::HashMap;
use std::fs;
use std::path::PathBuf;
@@ -76,7 +78,7 @@ fn main() {
num_slots,
create_time
);
let mut ancestors: HashMap<u64, usize> = vec![(0, 0)].into_iter().collect();
let mut ancestors: Ancestors = vec![(0, 0)].into_iter().collect();
for i in 1..num_slots {
ancestors.insert(i as u64, i - 1);
accounts.add_root(i as u64);

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-archiver-lib"
version = "1.1.9"
version = "1.1.20"
description = "Solana Archiver Library"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -15,23 +15,23 @@ ed25519-dalek = "=1.0.0-pre.3"
log = "0.4.8"
rand = "0.7.0"
rand_chacha = "0.2.2"
solana-client = { path = "../client", version = "1.1.9" }
solana-storage-program = { path = "../programs/storage", version = "1.1.9" }
solana-client = { path = "../client", version = "1.1.20" }
solana-storage-program = { path = "../programs/storage", version = "1.1.20" }
thiserror = "1.0"
serde = "1.0.105"
serde_json = "1.0.48"
serde_derive = "1.0.103"
solana-net-utils = { path = "../net-utils", version = "1.1.9" }
solana-chacha = { path = "../chacha", version = "1.1.9" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.9" }
solana-ledger = { path = "../ledger", version = "1.1.9" }
solana-logger = { path = "../logger", version = "1.1.9" }
solana-perf = { path = "../perf", version = "1.1.9" }
solana-sdk = { path = "../sdk", version = "1.1.9" }
solana-core = { path = "../core", version = "1.1.9" }
solana-streamer = { path = "../streamer", version = "1.1.9" }
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.9" }
solana-metrics = { path = "../metrics", version = "1.1.9" }
solana-net-utils = { path = "../net-utils", version = "1.1.20" }
solana-chacha = { path = "../chacha", version = "1.1.20" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.20" }
solana-ledger = { path = "../ledger", version = "1.1.20" }
solana-logger = { path = "../logger", version = "1.1.20" }
solana-perf = { path = "../perf", version = "1.1.20" }
solana-sdk = { path = "../sdk", version = "1.1.20" }
solana-core = { path = "../core", version = "1.1.20" }
solana-streamer = { path = "../streamer", version = "1.1.20" }
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.20" }
solana-metrics = { path = "../metrics", version = "1.1.20" }
[dev-dependencies]
hex = "0.4.2"

View File

@@ -13,8 +13,7 @@ use solana_core::{
cluster_slots::ClusterSlots,
contact_info::ContactInfo,
gossip_service::GossipService,
repair_service,
repair_service::{RepairService, RepairSlotRange, RepairStats, RepairStrategy},
repair_service::{self, RepairService, RepairSlotRange, RepairStats, RepairStrategy},
serve_repair::ServeRepair,
shred_fetch_stage::ShredFetchStage,
sigverify_stage::{DisabledSigVerifier, SigVerifyStage},
@@ -697,16 +696,10 @@ impl Archiver {
RpcClient::new_socket(rpc_peers[node_index].rpc)
};
Ok(rpc_client
.send(
&RpcRequest::GetSlotsPerSegment,
.send::<u64>(
RpcRequest::GetSlotsPerSegment,
serde_json::json!([client_commitment]),
0,
)
.map_err(|err| {
warn!("Error while making rpc request {:?}", err);
ArchiverError::ClientError(err)
})?
.as_u64()
.unwrap())
} else {
Err(ArchiverError::NoRpcPeers)
@@ -749,21 +742,10 @@ impl Archiver {
let node_index = thread_rng().gen_range(0, rpc_peers.len());
RpcClient::new_socket(rpc_peers[node_index].rpc)
};
let response = rpc_client
.send(
&RpcRequest::GetStorageTurn,
serde_json::value::Value::Null,
0,
)
.map_err(|err| {
warn!("Error while making rpc request {:?}", err);
ArchiverError::ClientError(err)
})?;
let RpcStorageTurn {
blockhash: storage_blockhash,
slot: turn_slot,
} = serde_json::from_value::<RpcStorageTurn>(response)
.map_err(ArchiverError::JsonError)?;
} = rpc_client.send(RpcRequest::GetStorageTurn, serde_json::value::Value::Null)?;
let turn_blockhash = storage_blockhash.parse().map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
@@ -842,7 +824,7 @@ impl Archiver {
.into_iter()
.filter_map(|repair_request| {
serve_repair
.map_repair_request(&repair_request, &mut repair_stats)
.map_repair_request(&repair_request, &mut repair_stats, Some(0))
.map(|result| ((archiver_info.gossip, result), repair_request))
.ok()
})

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-archiver-utils"
version = "1.1.9"
version = "1.1.20"
description = "Solana Archiver Utils"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,12 +11,12 @@ edition = "2018"
[dependencies]
log = "0.4.8"
rand = "0.7.0"
solana-chacha = { path = "../chacha", version = "1.1.9" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.9" }
solana-ledger = { path = "../ledger", version = "1.1.9" }
solana-logger = { path = "../logger", version = "1.1.9" }
solana-perf = { path = "../perf", version = "1.1.9" }
solana-sdk = { path = "../sdk", version = "1.1.9" }
solana-chacha = { path = "../chacha", version = "1.1.20" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.20" }
solana-ledger = { path = "../ledger", version = "1.1.20" }
solana-logger = { path = "../logger", version = "1.1.20" }
solana-perf = { path = "../perf", version = "1.1.20" }
solana-sdk = { path = "../sdk", version = "1.1.20" }
[dev-dependencies]
hex = "0.4.2"

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-archiver"
version = "1.1.9"
version = "1.1.20"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -10,13 +10,13 @@ homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
console = "0.10.0"
solana-clap-utils = { path = "../clap-utils", version = "1.1.9" }
solana-core = { path = "../core", version = "1.1.9" }
solana-logger = { path = "../logger", version = "1.1.9" }
solana-metrics = { path = "../metrics", version = "1.1.9" }
solana-archiver-lib = { path = "../archiver-lib", version = "1.1.9" }
solana-net-utils = { path = "../net-utils", version = "1.1.9" }
solana-sdk = { path = "../sdk", version = "1.1.9" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.20" }
solana-core = { path = "../core", version = "1.1.20" }
solana-logger = { path = "../logger", version = "1.1.20" }
solana-metrics = { path = "../metrics", version = "1.1.20" }
solana-archiver-lib = { path = "../archiver-lib", version = "1.1.20" }
solana-net-utils = { path = "../net-utils", version = "1.1.20" }
solana-sdk = { path = "../sdk", version = "1.1.20" }
[package.metadata.docs.rs]

View File

@@ -2,24 +2,26 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-banking-bench"
version = "1.1.9"
version = "1.1.20"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
log = "0.4.6"
rayon = "1.3.0"
solana-core = { path = "../core", version = "1.1.9" }
solana-streamer = { path = "../streamer", version = "1.1.9" }
solana-perf = { path = "../perf", version = "1.1.9" }
solana-ledger = { path = "../ledger", version = "1.1.9" }
solana-logger = { path = "../logger", version = "1.1.9" }
solana-runtime = { path = "../runtime", version = "1.1.9" }
solana-measure = { path = "../measure", version = "1.1.9" }
solana-sdk = { path = "../sdk", version = "1.1.9" }
rand = "0.7.0"
clap = "2.33.1"
crossbeam-channel = "0.4"
log = "0.4.6"
rand = "0.7.0"
rayon = "1.3.0"
solana-core = { path = "../core", version = "1.1.20" }
solana-streamer = { path = "../streamer", version = "1.1.20" }
solana-perf = { path = "../perf", version = "1.1.20" }
solana-ledger = { path = "../ledger", version = "1.1.20" }
solana-logger = { path = "../logger", version = "1.1.20" }
solana-runtime = { path = "../runtime", version = "1.1.20" }
solana-measure = { path = "../measure", version = "1.1.20" }
solana-sdk = { path = "../sdk", version = "1.1.20" }
solana-version = { path = "../version", version = "1.1.20" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,3 +1,4 @@
use clap::{crate_description, crate_name, value_t, App, Arg};
use crossbeam_channel::unbounded;
use log::*;
use rand::{thread_rng, Rng};
@@ -64,15 +65,22 @@ fn check_txs(
no_bank
}
fn make_accounts_txs(txes: usize, mint_keypair: &Keypair, hash: Hash) -> Vec<Transaction> {
fn make_accounts_txs(
total_num_transactions: usize,
hash: Hash,
same_payer: bool,
) -> Vec<Transaction> {
let to_pubkey = Pubkey::new_rand();
let dummy = system_transaction::transfer(mint_keypair, &to_pubkey, 1, hash);
(0..txes)
let payer_key = Keypair::new();
let dummy = system_transaction::transfer(&payer_key, &to_pubkey, 1, hash);
(0..total_num_transactions)
.into_par_iter()
.map(|_| {
let mut new = dummy.clone();
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
new.message.account_keys[0] = Pubkey::new_rand();
if !same_payer {
new.message.account_keys[0] = Pubkey::new_rand();
}
new.message.account_keys[1] = Pubkey::new_rand();
new.signatures = vec![Signature::new(&sig[0..64])];
new
@@ -96,13 +104,61 @@ fn bytes_as_usize(bytes: &[u8]) -> usize {
bytes[0] as usize | (bytes[1] as usize) << 8
}
#[allow(clippy::cognitive_complexity)]
fn main() {
solana_logger::setup();
let num_threads = BankingStage::num_threads() as usize;
let matches = App::new(crate_name!())
.about(crate_description!())
.version(solana_version::version!())
.arg(
Arg::with_name("num_chunks")
.long("num-chunks")
.takes_value(true)
.value_name("SIZE")
.help("Number of transaction chunks."),
)
.arg(
Arg::with_name("packets_per_chunk")
.long("packets-per-chunk")
.takes_value(true)
.value_name("SIZE")
.help("Packets per chunk"),
)
.arg(
Arg::with_name("skip_sanity")
.long("skip-sanity")
.takes_value(false)
.help("Skip transaction sanity execution"),
)
.arg(
Arg::with_name("same_payer")
.long("same-payer")
.takes_value(false)
.help("Use the same payer for transfers"),
)
.arg(
Arg::with_name("iterations")
.long("iterations")
.takes_value(true)
.help("Number of iterations"),
)
.arg(
Arg::with_name("num_threads")
.long("num-threads")
.takes_value(true)
.help("Number of iterations"),
)
.get_matches();
let num_threads =
value_t!(matches, "num_threads", usize).unwrap_or(BankingStage::num_threads() as usize);
// a multiple of packet chunk duplicates to avoid races
const CHUNKS: usize = 8 * 2;
const PACKETS_PER_BATCH: usize = 192;
let txes = PACKETS_PER_BATCH * num_threads * CHUNKS;
let num_chunks = value_t!(matches, "num_chunks", usize).unwrap_or(16);
let packets_per_chunk = value_t!(matches, "packets_per_chunk", usize).unwrap_or(192);
let iterations = value_t!(matches, "iterations", usize).unwrap_or(1000);
let total_num_transactions = num_chunks * num_threads * packets_per_chunk;
let mint_total = 1_000_000_000_000;
let GenesisConfigInfo {
genesis_config,
@@ -116,34 +172,44 @@ fn main() {
let mut bank_forks = BankForks::new(0, bank0);
let mut bank = bank_forks.working_bank();
info!("threads: {} txs: {}", num_threads, txes);
info!("threads: {} txs: {}", num_threads, total_num_transactions);
let mut transactions = make_accounts_txs(txes, &mint_keypair, genesis_config.hash());
let same_payer = matches.is_present("same_payer");
let mut transactions =
make_accounts_txs(total_num_transactions, genesis_config.hash(), same_payer);
// fund all the accounts
transactions.iter().for_each(|tx| {
let fund = system_transaction::transfer(
let mut fund = system_transaction::transfer(
&mint_keypair,
&tx.message.account_keys[0],
mint_total / txes as u64,
mint_total / total_num_transactions as u64,
genesis_config.hash(),
);
// Ignore any pesky duplicate signature errors in the case we are using single-payer
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
fund.signatures = vec![Signature::new(&sig[0..64])];
let x = bank.process_transaction(&fund);
x.unwrap();
});
//sanity check, make sure all the transactions can execute sequentially
transactions.iter().for_each(|tx| {
let res = bank.process_transaction(&tx);
assert!(res.is_ok(), "sanity test transactions");
});
bank.clear_signatures();
//sanity check, make sure all the transactions can execute in parallel
let res = bank.process_transactions(&transactions);
for r in res {
assert!(r.is_ok(), "sanity parallel execution");
let skip_sanity = matches.is_present("skip_sanity");
if !skip_sanity {
//sanity check, make sure all the transactions can execute sequentially
transactions.iter().for_each(|tx| {
let res = bank.process_transaction(&tx);
assert!(res.is_ok(), "sanity test transactions error: {:?}", res);
});
bank.clear_signatures();
//sanity check, make sure all the transactions can execute in parallel
let res = bank.process_transactions(&transactions);
for r in res {
assert!(r.is_ok(), "sanity parallel execution error: {:?}", r);
}
bank.clear_signatures();
}
bank.clear_signatures();
let mut verified: Vec<_> = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH);
let mut verified: Vec<_> = to_packets_chunked(&transactions.clone(), packets_per_chunk);
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(
@@ -162,7 +228,7 @@ fn main() {
);
poh_recorder.lock().unwrap().set_bank(&bank);
let chunk_len = verified.len() / CHUNKS;
let chunk_len = verified.len() / num_chunks;
let mut start = 0;
// This is so that the signal_receiver does not go out of scope after the closure.
@@ -171,17 +237,17 @@ fn main() {
let signal_receiver = Arc::new(signal_receiver);
let mut total_us = 0;
let mut tx_total_us = 0;
let base_tx_count = bank.transaction_count();
let mut txs_processed = 0;
let mut root = 1;
let collector = Pubkey::new_rand();
const ITERS: usize = 1_000;
let config = Config {
packets_per_batch: PACKETS_PER_BATCH,
packets_per_batch: packets_per_chunk,
chunk_len,
num_threads,
};
let mut total_sent = 0;
for _ in 0..ITERS {
for _ in 0..iterations {
let now = Instant::now();
let mut sent = 0;
@@ -222,7 +288,11 @@ fn main() {
sleep(Duration::from_millis(5));
}
}
if check_txs(&signal_receiver, txes / CHUNKS, &poh_recorder) {
if check_txs(
&signal_receiver,
total_num_transactions / num_chunks,
&poh_recorder,
) {
debug!(
"resetting bank {} tx count: {} txs_proc: {}",
bank.slot(),
@@ -274,7 +344,7 @@ fn main() {
debug!(
"time: {} us checked: {} sent: {}",
duration_as_us(&now.elapsed()),
txes / CHUNKS,
total_num_transactions / num_chunks,
sent,
);
total_sent += sent;
@@ -285,20 +355,26 @@ fn main() {
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
tx.signatures[0] = Signature::new(&sig[0..64]);
}
verified = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH);
verified = to_packets_chunked(&transactions.clone(), packets_per_chunk);
}
start += chunk_len;
start %= verified.len();
}
let txs_processed = bank_forks.working_bank().transaction_count();
debug!("processed: {} base: {}", txs_processed, base_tx_count);
eprintln!(
"{{'name': 'banking_bench_total', 'median': '{}'}}",
"{{'name': 'banking_bench_total', 'median': '{:.2}'}}",
(1000.0 * 1000.0 * total_sent as f64) / (total_us as f64),
);
eprintln!(
"{{'name': 'banking_bench_tx_total', 'median': '{}'}}",
"{{'name': 'banking_bench_tx_total', 'median': '{:.2}'}}",
(1000.0 * 1000.0 * total_sent as f64) / (tx_total_us as f64),
);
eprintln!(
"{{'name': 'banking_bench_success_tx_total', 'median': '{:.2}'}}",
(1000.0 * 1000.0 * (txs_processed - base_tx_count) as f64) / (total_us as f64),
);
drop(verified_sender);
drop(vote_sender);

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-exchange"
version = "1.1.9"
version = "1.1.20"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -18,20 +18,20 @@ rand = "0.7.0"
rayon = "1.3.0"
serde_json = "1.0.48"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "1.1.9" }
solana-core = { path = "../core", version = "1.1.9" }
solana-genesis = { path = "../genesis", version = "1.1.9" }
solana-client = { path = "../client", version = "1.1.9" }
solana-faucet = { path = "../faucet", version = "1.1.9" }
solana-exchange-program = { path = "../programs/exchange", version = "1.1.9" }
solana-logger = { path = "../logger", version = "1.1.9" }
solana-metrics = { path = "../metrics", version = "1.1.9" }
solana-net-utils = { path = "../net-utils", version = "1.1.9" }
solana-runtime = { path = "../runtime", version = "1.1.9" }
solana-sdk = { path = "../sdk", version = "1.1.9" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.20" }
solana-core = { path = "../core", version = "1.1.20" }
solana-genesis = { path = "../genesis", version = "1.1.20" }
solana-client = { path = "../client", version = "1.1.20" }
solana-faucet = { path = "../faucet", version = "1.1.20" }
solana-exchange-program = { path = "../programs/exchange", version = "1.1.20" }
solana-logger = { path = "../logger", version = "1.1.20" }
solana-metrics = { path = "../metrics", version = "1.1.20" }
solana-net-utils = { path = "../net-utils", version = "1.1.20" }
solana-runtime = { path = "../runtime", version = "1.1.20" }
solana-sdk = { path = "../sdk", version = "1.1.20" }
[dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "1.1.9" }
solana-local-cluster = { path = "../local-cluster", version = "1.1.20" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,17 +2,17 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-streamer"
version = "1.1.9"
version = "1.1.20"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
solana-clap-utils = { path = "../clap-utils", version = "1.1.9" }
solana-streamer = { path = "../streamer", version = "1.1.9" }
solana-logger = { path = "../logger", version = "1.1.9" }
solana-net-utils = { path = "../net-utils", version = "1.1.9" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.20" }
solana-streamer = { path = "../streamer", version = "1.1.20" }
solana-logger = { path = "../logger", version = "1.1.20" }
solana-net-utils = { path = "../net-utils", version = "1.1.20" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-tps"
version = "1.1.9"
version = "1.1.20"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -14,24 +14,24 @@ log = "0.4.8"
rayon = "1.3.0"
serde_json = "1.0.48"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "1.1.9" }
solana-core = { path = "../core", version = "1.1.9" }
solana-genesis = { path = "../genesis", version = "1.1.9" }
solana-client = { path = "../client", version = "1.1.9" }
solana-faucet = { path = "../faucet", version = "1.1.9" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.20" }
solana-core = { path = "../core", version = "1.1.20" }
solana-genesis = { path = "../genesis", version = "1.1.20" }
solana-client = { path = "../client", version = "1.1.20" }
solana-faucet = { path = "../faucet", version = "1.1.20" }
#solana-librapay = { path = "../programs/librapay", version = "1.1.8", optional = true }
solana-logger = { path = "../logger", version = "1.1.9" }
solana-metrics = { path = "../metrics", version = "1.1.9" }
solana-measure = { path = "../measure", version = "1.1.9" }
solana-net-utils = { path = "../net-utils", version = "1.1.9" }
solana-runtime = { path = "../runtime", version = "1.1.9" }
solana-sdk = { path = "../sdk", version = "1.1.9" }
solana-logger = { path = "../logger", version = "1.1.20" }
solana-metrics = { path = "../metrics", version = "1.1.20" }
solana-measure = { path = "../measure", version = "1.1.20" }
solana-net-utils = { path = "../net-utils", version = "1.1.20" }
solana-runtime = { path = "../runtime", version = "1.1.20" }
solana-sdk = { path = "../sdk", version = "1.1.20" }
#solana-move-loader-program = { path = "../programs/move_loader", version = "1.1.8", optional = true }
[dev-dependencies]
serial_test = "0.4.0"
serial_test_derive = "0.4.0"
solana-local-cluster = { path = "../local-cluster", version = "1.1.9" }
solana-local-cluster = { path = "../local-cluster", version = "1.1.20" }
#[features]
#move = ["solana-librapay", "solana-move-loader-program"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-chacha-cuda"
version = "1.1.9"
version = "1.1.20"
description = "Solana Chacha Cuda APIs"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -10,12 +10,12 @@ edition = "2018"
[dependencies]
log = "0.4.8"
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.9" }
solana-chacha = { path = "../chacha", version = "1.1.9" }
solana-ledger = { path = "../ledger", version = "1.1.9" }
solana-logger = { path = "../logger", version = "1.1.9" }
solana-perf = { path = "../perf", version = "1.1.9" }
solana-sdk = { path = "../sdk", version = "1.1.9" }
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.20" }
solana-chacha = { path = "../chacha", version = "1.1.20" }
solana-ledger = { path = "../ledger", version = "1.1.20" }
solana-logger = { path = "../logger", version = "1.1.20" }
solana-perf = { path = "../perf", version = "1.1.20" }
solana-sdk = { path = "../sdk", version = "1.1.20" }
[dev-dependencies]
hex-literal = "0.2.1"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-chacha-sys"
version = "1.1.9"
version = "1.1.20"
description = "Solana chacha-sys"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-chacha"
version = "1.1.9"
version = "1.1.20"
description = "Solana Chacha APIs"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,11 +12,11 @@ edition = "2018"
log = "0.4.8"
rand = "0.7.0"
rand_chacha = "0.2.2"
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.9" }
solana-ledger = { path = "../ledger", version = "1.1.9" }
solana-logger = { path = "../logger", version = "1.1.9" }
solana-perf = { path = "../perf", version = "1.1.9" }
solana-sdk = { path = "../sdk", version = "1.1.9" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.20" }
solana-ledger = { path = "../ledger", version = "1.1.20" }
solana-logger = { path = "../logger", version = "1.1.20" }
solana-perf = { path = "../perf", version = "1.1.20" }
solana-sdk = { path = "../sdk", version = "1.1.20" }
[dev-dependencies]
hex-literal = "0.2.1"

View File

@@ -5,6 +5,9 @@
# Release tags use buildkite-release.yml instead
steps:
- command: "ci/test-sanity.sh"
name: "sanity"
timeout_in_minutes: 5
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-checks.sh"
name: "checks"
timeout_in_minutes: 20

View File

@@ -2,8 +2,10 @@
set -e
cd "$(dirname "$0")/.."
# shellcheck source=multinode-demo/common.sh
source multinode-demo/common.sh
rm -f config/run/init-completed
rm -rf config/run/init-completed config/ledger config/snapshot-ledger
timeout 15 ./run.sh &
pid=$!
@@ -17,6 +19,16 @@ while [[ ! -f config/run/init-completed ]]; do
fi
done
snapshot_slot=1
# wait a bit longer than snapshot_slot
while [[ $($solana_cli --url http://localhost:8899 slot --commitment recent) -le $((snapshot_slot + 1)) ]]; do
sleep 1
done
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"validatorExit"}' http://localhost:8899
wait $pid
$solana_ledger_tool create-snapshot --ledger config/ledger "$snapshot_slot" config/snapshot-ledger
cp config/ledger/genesis.tar.bz2 config/snapshot-ledger
$solana_ledger_tool verify --ledger config/snapshot-ledger

View File

@@ -10,9 +10,6 @@ source ci/rust-version.sh nightly
export RUST_BACKTRACE=1
export RUSTFLAGS="-D warnings"
# Look for failed mergify.io backports
_ git show HEAD --check --oneline
_ cargo +"$rust_stable" fmt --all -- --check
# Clippy gets stuck for unknown reasons if sdk-c is included in the build, so check it separately.
@@ -23,10 +20,8 @@ _ cargo +"$rust_stable" clippy --manifest-path sdk-c/Cargo.toml -- --deny=warnin
_ cargo +"$rust_stable" audit --version
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2020-0002 --ignore RUSTSEC-2020-0008
_ ci/nits.sh
_ ci/order-crates-for-publishing.py
_ docs/build.sh
_ ci/check-ssh-keys.sh
{
cd programs/bpf

27
ci/test-sanity.sh Executable file
View File

@@ -0,0 +1,27 @@
#!/usr/bin/env bash
set -e
cd "$(dirname "$0")/.."
source ci/_
(
echo --- git diff --check
set -x
# Look for failed mergify.io backports by searching leftover conflict markers
# Also check for any trailing whitespaces!
if [[ -n $BUILDKITE_PULL_REQUEST_BASE_BRANCH ]]; then
base_branch=$BUILDKITE_PULL_REQUEST_BASE_BRANCH
else
base_branch=$BUILDKITE_BRANCH
fi
git fetch origin "$base_branch"
git diff "$(git merge-base HEAD "origin/$base_branch")..HEAD" --check --oneline
)
echo
_ ci/nits.sh
_ ci/check-ssh-keys.sh
echo --- ok

View File

@@ -39,9 +39,9 @@ test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
rm -rf target/xargo # Issue #3105
# Limit compiler jobs to reduce memory usage
# on machines with 1gb/thread of memory
# on machines with 2gb/thread of memory
NPROC=$(nproc)
NPROC=$((NPROC>16 ? 16 : NPROC))
NPROC=$((NPROC>14 ? 14 : NPROC))
echo "Executing $testName"
case $testName in

View File

@@ -23,10 +23,14 @@ if [[ -z $CI_TAG ]]; then
exit 1
fi
if [[ -z $CI_REPO_SLUG ]]; then
echo Error: CI_REPO_SLUG not defined
exit 1
fi
# Force CI_REPO_SLUG since sometimes
# BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG is not set correctly, causing the
# artifact upload to fail
CI_REPO_SLUG=solana-labs/solana
#if [[ -z $CI_REPO_SLUG ]]; then
# echo Error: CI_REPO_SLUG not defined
# exit 1
#fi
releaseId=$( \
curl -s "https://api.github.com/repos/$CI_REPO_SLUG/releases/tags/$CI_TAG" \

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-clap-utils"
version = "1.1.9"
version = "1.1.20"
description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,8 +11,8 @@ edition = "2018"
[dependencies]
clap = "2.33.0"
rpassword = "4.0"
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.9" }
solana-sdk = { path = "../sdk", version = "1.1.9" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.20" }
solana-sdk = { path = "../sdk", version = "1.1.20" }
thiserror = "1.0.11"
tiny-bip39 = "0.7.0"
url = "2.1.0"

View File

@@ -8,11 +8,15 @@ pub const COMMITMENT_ARG: ArgConstant<'static> = ArgConstant {
};
pub fn commitment_arg<'a, 'b>() -> Arg<'a, 'b> {
commitment_arg_with_default("recent")
}
pub fn commitment_arg_with_default<'a, 'b>(default_value: &'static str) -> Arg<'a, 'b> {
Arg::with_name(COMMITMENT_ARG.name)
.long(COMMITMENT_ARG.long)
.takes_value(true)
.possible_values(&["recent", "root", "max"])
.default_value("recent")
.default_value(default_value)
.value_name("COMMITMENT_LEVEL")
.help(COMMITMENT_ARG.help)
}

View File

@@ -6,50 +6,86 @@ use solana_sdk::{
pubkey::Pubkey,
signature::{read_keypair_file, Signature},
};
use std::fmt::Display;
use std::str::FromStr;
fn is_parsable_generic<U, T>(string: T) -> Result<(), String>
where
T: AsRef<str> + Display,
U: FromStr,
U::Err: Display,
{
string
.as_ref()
.parse::<U>()
.map(|_| ())
.map_err(|err| format!("error parsing '{}': {}", string, err))
}
// Return an error if string cannot be parsed as type T.
// Takes a String to avoid second type parameter when used as a clap validator
pub fn is_parsable<T>(string: String) -> Result<(), String>
where
T: FromStr,
T::Err: Display,
{
is_parsable_generic::<T, String>(string)
}
// Return an error if a pubkey cannot be parsed.
pub fn is_pubkey(string: String) -> Result<(), String> {
match string.parse::<Pubkey>() {
Ok(_) => Ok(()),
Err(err) => Err(format!("{}", err)),
}
pub fn is_pubkey<T>(string: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
is_parsable_generic::<Pubkey, _>(string)
}
// Return an error if a hash cannot be parsed.
pub fn is_hash(string: String) -> Result<(), String> {
match string.parse::<Hash>() {
Ok(_) => Ok(()),
Err(err) => Err(format!("{}", err)),
}
pub fn is_hash<T>(string: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
is_parsable_generic::<Hash, _>(string)
}
// Return an error if a keypair file cannot be parsed.
pub fn is_keypair(string: String) -> Result<(), String> {
read_keypair_file(&string)
pub fn is_keypair<T>(string: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
read_keypair_file(string.as_ref())
.map(|_| ())
.map_err(|err| format!("{}", err))
}
// Return an error if a keypair file cannot be parsed
pub fn is_keypair_or_ask_keyword(string: String) -> Result<(), String> {
if string.as_str() == ASK_KEYWORD {
pub fn is_keypair_or_ask_keyword<T>(string: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
if string.as_ref() == ASK_KEYWORD {
return Ok(());
}
read_keypair_file(&string)
read_keypair_file(string.as_ref())
.map(|_| ())
.map_err(|err| format!("{}", err))
}
// Return an error if string cannot be parsed as pubkey string or keypair file location
pub fn is_pubkey_or_keypair(string: String) -> Result<(), String> {
is_pubkey(string.clone()).or_else(|_| is_keypair(string))
pub fn is_pubkey_or_keypair<T>(string: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
is_pubkey(string.as_ref()).or_else(|_| is_keypair(string))
}
// Return an error if string cannot be parsed as a pubkey string, or a valid Signer that can
// produce a pubkey()
pub fn is_valid_pubkey(string: String) -> Result<(), String> {
match parse_keypair_path(&string) {
pub fn is_valid_pubkey<T>(string: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
match parse_keypair_path(string.as_ref()) {
KeypairUrl::Filepath(path) => is_keypair(path),
_ => Ok(()),
}
@@ -63,13 +99,19 @@ pub fn is_valid_pubkey(string: String) -> Result<(), String> {
// when paired with an offline `--signer` argument to provide a Presigner (pubkey + signature).
// Clap validators can't check multiple fields at once, so the verification that a `--signer` is
// also provided and correct happens in parsing, not in validation.
pub fn is_valid_signer(string: String) -> Result<(), String> {
pub fn is_valid_signer<T>(string: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
is_valid_pubkey(string)
}
// Return an error if string cannot be parsed as pubkey=signature string
pub fn is_pubkey_sig(string: String) -> Result<(), String> {
let mut signer = string.split('=');
pub fn is_pubkey_sig<T>(string: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
let mut signer = string.as_ref().split('=');
match Pubkey::from_str(
signer
.next()
@@ -90,8 +132,11 @@ pub fn is_pubkey_sig(string: String) -> Result<(), String> {
}
// Return an error if a url cannot be parsed.
pub fn is_url(string: String) -> Result<(), String> {
match url::Url::parse(&string) {
pub fn is_url<T>(string: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
match url::Url::parse(string.as_ref()) {
Ok(url) => {
if url.has_host() {
Ok(())
@@ -103,20 +148,26 @@ pub fn is_url(string: String) -> Result<(), String> {
}
}
pub fn is_slot(slot: String) -> Result<(), String> {
slot.parse::<Slot>()
.map(|_| ())
.map_err(|e| format!("{}", e))
pub fn is_slot<T>(slot: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
is_parsable_generic::<Slot, _>(slot)
}
pub fn is_port(port: String) -> Result<(), String> {
port.parse::<u16>()
.map(|_| ())
.map_err(|e| format!("{}", e))
pub fn is_port<T>(port: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
is_parsable_generic::<u16, _>(port)
}
pub fn is_valid_percentage(percentage: String) -> Result<(), String> {
pub fn is_valid_percentage<T>(percentage: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
percentage
.as_ref()
.parse::<u8>()
.map_err(|e| {
format!(
@@ -136,8 +187,11 @@ pub fn is_valid_percentage(percentage: String) -> Result<(), String> {
})
}
pub fn is_amount(amount: String) -> Result<(), String> {
if amount.parse::<u64>().is_ok() || amount.parse::<f64>().is_ok() {
pub fn is_amount<T>(amount: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
if amount.as_ref().parse::<u64>().is_ok() || amount.as_ref().parse::<f64>().is_ok() {
Ok(())
} else {
Err(format!(
@@ -147,14 +201,20 @@ pub fn is_amount(amount: String) -> Result<(), String> {
}
}
pub fn is_rfc3339_datetime(value: String) -> Result<(), String> {
DateTime::parse_from_rfc3339(&value)
pub fn is_rfc3339_datetime<T>(value: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
DateTime::parse_from_rfc3339(value.as_ref())
.map(|_| ())
.map_err(|e| format!("{}", e))
}
pub fn is_derivation(value: String) -> Result<(), String> {
let value = value.replace("'", "");
pub fn is_derivation<T>(value: T) -> Result<(), String>
where
T: AsRef<str> + Display,
{
let value = value.as_ref().replace("'", "");
let mut parts = value.split('/');
let account = parts.next().unwrap();
account
@@ -186,14 +246,14 @@ mod tests {
#[test]
fn test_is_derivation() {
assert_eq!(is_derivation("2".to_string()), Ok(()));
assert_eq!(is_derivation("0".to_string()), Ok(()));
assert_eq!(is_derivation("65537".to_string()), Ok(()));
assert_eq!(is_derivation("0/2".to_string()), Ok(()));
assert_eq!(is_derivation("0'/2'".to_string()), Ok(()));
assert!(is_derivation("a".to_string()).is_err());
assert!(is_derivation("4294967296".to_string()).is_err());
assert!(is_derivation("a/b".to_string()).is_err());
assert!(is_derivation("0/4294967296".to_string()).is_err());
assert_eq!(is_derivation("2"), Ok(()));
assert_eq!(is_derivation("0"), Ok(()));
assert_eq!(is_derivation("65537"), Ok(()));
assert_eq!(is_derivation("0/2"), Ok(()));
assert_eq!(is_derivation("0'/2'"), Ok(()));
assert!(is_derivation("a").is_err());
assert!(is_derivation("4294967296").is_err());
assert!(is_derivation("a/b").is_err());
assert!(is_derivation("0/4294967296").is_err());
}
}

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-cli-config"
description = "Blockchain, Rebuilt for Scale"
version = "1.1.9"
version = "1.1.20"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"

View File

@@ -1,6 +1,6 @@
// Wallet settings that can be configured for long-term use
use serde_derive::{Deserialize, Serialize};
use std::io;
use std::{collections::HashMap, io};
use url::Url;
lazy_static! {
@@ -17,6 +17,8 @@ pub struct Config {
pub json_rpc_url: String,
pub websocket_url: String,
pub keypair_path: String,
#[serde(default)]
pub address_labels: HashMap<String, String>,
}
impl Default for Config {
@@ -36,6 +38,7 @@ impl Default for Config {
json_rpc_url,
websocket_url,
keypair_path,
address_labels: HashMap::new(),
}
}
}

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-cli"
description = "Blockchain, Rebuilt for Scale"
version = "1.1.9"
version = "1.1.20"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -27,28 +27,28 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
serde = "1.0.105"
serde_derive = "1.0.103"
serde_json = "1.0.48"
solana-budget-program = { path = "../programs/budget", version = "1.1.9" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.9" }
solana-cli-config = { path = "../cli-config", version = "1.1.9" }
solana-client = { path = "../client", version = "1.1.9" }
solana-config-program = { path = "../programs/config", version = "1.1.9" }
solana-faucet = { path = "../faucet", version = "1.1.9" }
solana-logger = { path = "../logger", version = "1.1.9" }
solana-net-utils = { path = "../net-utils", version = "1.1.9" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.9" }
solana-runtime = { path = "../runtime", version = "1.1.9" }
solana-sdk = { path = "../sdk", version = "1.1.9" }
solana-stake-program = { path = "../programs/stake", version = "1.1.9" }
solana-storage-program = { path = "../programs/storage", version = "1.1.9" }
solana-transaction-status = { path = "../transaction-status", version = "1.1.9" }
solana-vote-program = { path = "../programs/vote", version = "1.1.9" }
solana-vote-signer = { path = "../vote-signer", version = "1.1.9" }
solana-budget-program = { path = "../programs/budget", version = "1.1.20" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.20" }
solana-cli-config = { path = "../cli-config", version = "1.1.20" }
solana-client = { path = "../client", version = "1.1.20" }
solana-config-program = { path = "../programs/config", version = "1.1.20" }
solana-faucet = { path = "../faucet", version = "1.1.20" }
solana-logger = { path = "../logger", version = "1.1.20" }
solana-net-utils = { path = "../net-utils", version = "1.1.20" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.20" }
solana-runtime = { path = "../runtime", version = "1.1.20" }
solana-sdk = { path = "../sdk", version = "1.1.20" }
solana-stake-program = { path = "../programs/stake", version = "1.1.20" }
solana-storage-program = { path = "../programs/storage", version = "1.1.20" }
solana-transaction-status = { path = "../transaction-status", version = "1.1.20" }
solana-vote-program = { path = "../programs/vote", version = "1.1.20" }
solana-vote-signer = { path = "../vote-signer", version = "1.1.20" }
thiserror = "1.0.13"
url = "2.1.1"
[dev-dependencies]
solana-core = { path = "../core", version = "1.1.9" }
solana-budget-program = { path = "../programs/budget", version = "1.1.9" }
solana-core = { path = "../core", version = "1.1.20" }
solana-budget-program = { path = "../programs/budget", version = "1.1.20" }
tempfile = "3.1.0"
[[bin]]

View File

@@ -1,7 +1,7 @@
use crate::{
cli_output::{CliAccount, OutputFormat},
cli_output::{CliAccount, CliSignOnlyData, CliSignature, OutputFormat},
cluster_query::*,
display::{println_name_value, println_signers},
display::{new_spinner_progress_bar, println_name_value, println_transaction},
nonce::{self, *},
offline::{blockhash_query::BlockhashQuery, *},
stake::*,
@@ -16,12 +16,17 @@ use num_traits::FromPrimitive;
use serde_json::{self, json, Value};
use solana_budget_program::budget_instruction::{self, BudgetError};
use solana_clap_utils::{
input_parsers::*, input_validators::*, keypair::signer_from_path, offline::SIGN_ONLY_ARG,
commitment::{commitment_arg_with_default, COMMITMENT_ARG},
input_parsers::*,
input_validators::*,
keypair::signer_from_path,
offline::SIGN_ONLY_ARG,
ArgConstant,
};
use solana_client::{
client_error::{ClientErrorKind, Result as ClientResult},
rpc_client::RpcClient,
rpc_config::{RpcLargestAccountsFilter, RpcSendTransactionConfig},
rpc_response::{RpcAccount, RpcKeyedAccount},
};
#[cfg(not(test))]
@@ -31,7 +36,7 @@ use solana_faucet::faucet_mock::request_airdrop_transaction;
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_sdk::{
bpf_loader,
clock::{Epoch, Slot},
clock::{Epoch, Slot, DEFAULT_TICKS_PER_SECOND},
commitment_config::CommitmentConfig,
fee_calculator::FeeCalculator,
hash::Hash,
@@ -42,6 +47,7 @@ use solana_sdk::{
program_utils::DecodeError,
pubkey::{Pubkey, MAX_SEED_LEN},
signature::{Keypair, Signature, Signer, SignerError},
signers::Signers,
system_instruction::{self, SystemError},
system_program,
transaction::{Transaction, TransactionError},
@@ -55,6 +61,7 @@ use solana_transaction_status::{EncodedTransaction, TransactionEncoding};
use solana_vote_program::vote_state::VoteAuthorize;
use std::{
error,
fmt::Write as FmtWrite,
fs::File,
io::{Read, Write},
net::{IpAddr, SocketAddr},
@@ -200,6 +207,14 @@ pub enum CliCommand {
GetSlot {
commitment_config: CommitmentConfig,
},
LargestAccounts {
commitment_config: CommitmentConfig,
filter: Option<RpcLargestAccountsFilter>,
},
Supply {
commitment_config: CommitmentConfig,
print_accounts: bool,
},
TotalSupply {
commitment_config: CommitmentConfig,
},
@@ -230,8 +245,8 @@ pub enum CliCommand {
},
TransactionHistory {
address: Pubkey,
end_slot: Option<Slot>, // None == latest slot
slot_limit: u64,
end_slot: Option<Slot>, // None == latest slot
slot_limit: Option<u64>, // None == search full history
},
// Nonce commands
AuthorizeNonceAccount {
@@ -367,6 +382,7 @@ pub enum CliCommand {
},
// Vote Commands
CreateVoteAccount {
vote_account: SignerIndex,
seed: Option<String>,
identity_account: SignerIndex,
authorized_voter: Option<Pubkey>,
@@ -392,6 +408,12 @@ pub enum CliCommand {
VoteUpdateValidator {
vote_account_pubkey: Pubkey,
new_identity_account: SignerIndex,
withdraw_authority: SignerIndex,
},
VoteUpdateCommission {
vote_account_pubkey: Pubkey,
commission: u8,
withdraw_authority: SignerIndex,
},
// Wallet Commands
Address,
@@ -404,6 +426,7 @@ pub enum CliCommand {
Balance {
pubkey: Option<Pubkey>,
use_lamports_unit: bool,
commitment_config: CommitmentConfig,
},
Cancel(Pubkey),
Confirm(Signature),
@@ -568,6 +591,7 @@ impl Default for CliConfig<'_> {
command: CliCommand::Balance {
pubkey: Some(Pubkey::default()),
use_lamports_unit: false,
commitment_config: CommitmentConfig::default(),
},
json_rpc_url: Self::default_json_rpc_url(),
websocket_url: Self::default_websocket_url(),
@@ -611,6 +635,8 @@ pub fn parse_command(
}),
("epoch", Some(matches)) => parse_get_epoch(matches),
("slot", Some(matches)) => parse_get_slot(matches),
("largest-accounts", Some(matches)) => parse_largest_accounts(matches),
("supply", Some(matches)) => parse_supply(matches),
("total-supply", Some(matches)) => parse_total_supply(matches),
("transaction-count", Some(matches)) => parse_get_transaction_count(matches),
("leader-schedule", Some(_matches)) => Ok(CliCommandInfo {
@@ -709,6 +735,9 @@ pub fn parse_command(
("vote-update-validator", Some(matches)) => {
parse_vote_update_validator(matches, default_signer_path, wallet_manager)
}
("vote-update-commission", Some(matches)) => {
parse_vote_update_commission(matches, default_signer_path, wallet_manager)
}
("vote-authorize-voter", Some(matches)) => parse_vote_authorize(
matches,
default_signer_path,
@@ -781,6 +810,7 @@ pub fn parse_command(
}
("balance", Some(matches)) => {
let pubkey = pubkey_of_signer(matches, "pubkey", wallet_manager)?;
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
let signers = if pubkey.is_some() {
vec![]
} else {
@@ -795,6 +825,7 @@ pub fn parse_command(
command: CliCommand::Balance {
pubkey,
use_lamports_unit: matches.is_present("lamports"),
commitment_config,
},
signers,
})
@@ -818,7 +849,7 @@ pub fn parse_command(
},
("decode-transaction", Some(matches)) => {
let encoded_transaction = EncodedTransaction::Binary(
matches.value_of("base85_transaction").unwrap().to_string(),
matches.value_of("base58_transaction").unwrap().to_string(),
);
if let Some(transaction) = encoded_transaction.decode() {
Ok(CliCommandInfo {
@@ -1049,7 +1080,7 @@ pub fn get_blockhash_and_fee_calculator(
})
}
pub fn return_signers(tx: &Transaction) -> ProcessResult {
pub fn return_signers(tx: &Transaction, config: &CliConfig) -> ProcessResult {
let verify_results = tx.verify_with_results();
let mut signers = Vec::new();
let mut absent = Vec::new();
@@ -1068,15 +1099,14 @@ pub fn return_signers(tx: &Transaction) -> ProcessResult {
}
});
println_signers(&tx.message.recent_blockhash, &signers, &absent, &bad_sig);
let cli_command = CliSignOnlyData {
blockhash: tx.message.recent_blockhash.to_string(),
signers,
absent,
bad_sig,
};
Ok(json!({
"blockhash": tx.message.recent_blockhash.to_string(),
"signers": &signers,
"absent": &absent,
"badSig": &bad_sig,
})
.to_string())
Ok(config.output_format.formatted_string(&cli_command))
}
pub fn parse_create_address_with_seed(
@@ -1164,7 +1194,7 @@ fn process_airdrop(
}
};
request_and_confirm_airdrop(&rpc_client, faucet_addr, &pubkey, lamports)?;
request_and_confirm_airdrop(&rpc_client, faucet_addr, &pubkey, lamports, &config)?;
let current_balance = rpc_client
.retry_get_balance(&pubkey, 5)?
@@ -1178,19 +1208,17 @@ fn process_balance(
config: &CliConfig,
pubkey: &Option<Pubkey>,
use_lamports_unit: bool,
commitment_config: CommitmentConfig,
) -> ProcessResult {
let pubkey = if let Some(pubkey) = pubkey {
*pubkey
} else {
config.pubkey()?
};
let balance = rpc_client.retry_get_balance(&pubkey, 5)?;
match balance {
Some(lamports) => Ok(build_balance_message(lamports, use_lamports_unit, true)),
None => Err(
CliError::RpcRequestError("Received result of an unexpected type".to_string()).into(),
),
}
let balance = rpc_client
.get_balance_with_commitment(&pubkey, commitment_config)?
.value;
Ok(build_balance_message(balance, use_lamports_unit, true))
}
fn process_confirm(
@@ -1214,7 +1242,7 @@ fn process_confirm(
"\nTransaction executed in slot {}:",
confirmed_transaction.slot
);
crate::display::println_transaction(
println_transaction(
&confirmed_transaction
.transaction
.transaction
@@ -1244,7 +1272,7 @@ fn process_confirm(
}
fn process_decode_transaction(transaction: &Transaction) -> ProcessResult {
crate::display::println_transaction(transaction, &None, "");
println_transaction(transaction, &None, "");
Ok("".to_string())
}
@@ -1265,21 +1293,118 @@ fn process_show_account(
use_lamports_unit,
};
config.output_format.formatted_print(&cli_account);
let mut account_string = config.output_format.formatted_string(&cli_account);
if config.output_format == OutputFormat::Display {
if let Some(output_file) = output_file {
let mut f = File::create(output_file)?;
f.write_all(&data)?;
println!();
println!("Wrote account data to {}", output_file);
writeln!(&mut account_string)?;
writeln!(&mut account_string, "Wrote account data to {}", output_file)?;
} else if !data.is_empty() {
use pretty_hex::*;
println!("{:?}", data.hex_dump());
writeln!(&mut account_string, "{:?}", data.hex_dump())?;
}
}
Ok("".to_string())
Ok(account_string)
}
fn send_and_confirm_transactions_with_spinner<T: Signers>(
rpc_client: &RpcClient,
mut transactions: Vec<Transaction>,
signer_keys: &T,
) -> Result<(), Box<dyn error::Error>> {
let progress_bar = new_spinner_progress_bar();
let mut send_retries = 5;
loop {
let mut status_retries = 15;
// Send all transactions
let mut transactions_signatures = vec![];
let num_transactions = transactions.len();
for transaction in transactions {
if cfg!(not(test)) {
// Delay ~1 tick between write transactions in an attempt to reduce AccountInUse errors
// when all the write transactions modify the same program account (eg, deploying a
// new program)
sleep(Duration::from_millis(1000 / DEFAULT_TICKS_PER_SECOND));
}
let signature = rpc_client
.send_transaction_with_config(
&transaction,
RpcSendTransactionConfig {
skip_preflight: true,
},
)
.ok();
transactions_signatures.push((transaction, signature));
progress_bar.set_message(&format!(
"[{}/{}] Transactions sent",
transactions_signatures.len(),
num_transactions
));
}
// Collect statuses for all the transactions, drop those that are confirmed
while status_retries > 0 {
status_retries -= 1;
progress_bar.set_message(&format!(
"[{}/{}] Transactions confirmed",
num_transactions - transactions_signatures.len(),
num_transactions
));
if cfg!(not(test)) {
// Retry twice a second
sleep(Duration::from_millis(500));
}
transactions_signatures = transactions_signatures
.into_iter()
.filter(|(_transaction, signature)| {
if let Some(signature) = signature {
if let Ok(status) = rpc_client.get_signature_status(&signature) {
if rpc_client
.get_num_blocks_since_signature_confirmation(&signature)
.unwrap_or(0)
> 1
{
return false;
} else {
return match status {
None => true,
Some(result) => result.is_err(),
};
}
}
}
true
})
.collect();
if transactions_signatures.is_empty() {
return Ok(());
}
}
if send_retries == 0 {
return Err("Transactions failed".into());
}
send_retries -= 1;
// Re-sign any failed transactions with a new blockhash and retry
let (blockhash, _fee_calculator) = rpc_client
.get_new_blockhash(&transactions_signatures[0].0.message().recent_blockhash)?;
transactions = vec![];
for (mut transaction, _) in transactions_signatures.into_iter() {
transaction.try_sign(signer_keys, blockhash)?;
transactions.push(transaction);
}
}
}
fn process_deploy(
@@ -1343,17 +1468,24 @@ fn process_deploy(
)?;
trace!("Creating program account");
let result =
rpc_client.send_and_confirm_transaction_with_spinner(&mut create_account_tx, &signers);
log_instruction_custom_error::<SystemError>(result)
.map_err(|_| CliError::DynamicProgramError("Program allocate space failed".to_string()))?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&create_account_tx);
log_instruction_custom_error::<SystemError>(result, &config).map_err(|_| {
CliError::DynamicProgramError("Program account allocation failed".to_string())
})?;
trace!("Writing program data");
rpc_client.send_and_confirm_transactions(write_transactions, &signers)?;
send_and_confirm_transactions_with_spinner(&rpc_client, write_transactions, &signers).map_err(
|_| CliError::DynamicProgramError("Data writes to program account failed".to_string()),
)?;
trace!("Finalizing program account");
rpc_client
.send_and_confirm_transaction_with_spinner(&mut finalize_tx, &signers)
.send_and_confirm_transaction_with_spinner_and_config(
&finalize_tx,
RpcSendTransactionConfig {
skip_preflight: true,
},
)
.map_err(|e| {
CliError::DynamicProgramError(format!("Program finalize transaction failed: {}", e))
})?;
@@ -1405,7 +1537,7 @@ fn process_pay(
if sign_only {
tx.try_partial_sign(&config.signers, blockhash)?;
return_signers(&tx)
return_signers(&tx, &config)
} else {
tx.try_sign(&config.signers, blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -1418,9 +1550,8 @@ fn process_pay(
&fee_calculator,
&tx.message,
)?;
let result =
rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<SystemError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<SystemError>(result, &config)
}
} else if *witnesses == None {
let dt = timestamp.unwrap();
@@ -1445,7 +1576,7 @@ fn process_pay(
let mut tx = Transaction::new_unsigned(message);
if sign_only {
tx.try_partial_sign(&[config.signers[0], &contract_state], blockhash)?;
return_signers(&tx)
return_signers(&tx, &config)
} else {
tx.try_sign(&[config.signers[0], &contract_state], blockhash)?;
check_account_for_fee(
@@ -1454,14 +1585,10 @@ fn process_pay(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(
&mut tx,
&[config.signers[0], &contract_state],
);
let signature_str = log_instruction_custom_error::<BudgetError>(result)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
let signature = log_instruction_custom_error::<BudgetError>(result, &config)?;
Ok(json!({
"signature": signature_str,
"signature": signature,
"processId": format!("{}", contract_state.pubkey()),
})
.to_string())
@@ -1491,23 +1618,19 @@ fn process_pay(
let mut tx = Transaction::new_unsigned(message);
if sign_only {
tx.try_partial_sign(&[config.signers[0], &contract_state], blockhash)?;
return_signers(&tx)
return_signers(&tx, &config)
} else {
tx.try_sign(&[config.signers[0], &contract_state], blockhash)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(
&mut tx,
&[config.signers[0], &contract_state],
);
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
check_account_for_fee(
rpc_client,
&config.signers[0].pubkey(),
&fee_calculator,
&tx.message,
)?;
let signature_str = log_instruction_custom_error::<BudgetError>(result)?;
let signature = log_instruction_custom_error::<BudgetError>(result, &config)?;
Ok(json!({
"signature": signature_str,
"signature": signature,
"processId": format!("{}", contract_state.pubkey()),
})
.to_string())
@@ -1533,9 +1656,8 @@ fn process_cancel(rpc_client: &RpcClient, config: &CliConfig, pubkey: &Pubkey) -
&fee_calculator,
&tx.message,
)?;
let result =
rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &[config.signers[0]]);
log_instruction_custom_error::<BudgetError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<BudgetError>(result, &config)
}
fn process_time_elapsed(
@@ -1557,9 +1679,8 @@ fn process_time_elapsed(
&fee_calculator,
&tx.message,
)?;
let result =
rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &[config.signers[0]]);
log_instruction_custom_error::<BudgetError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<BudgetError>(result, &config)
}
#[allow(clippy::too_many_arguments)]
@@ -1578,11 +1699,6 @@ fn process_transfer(
) -> ProcessResult {
let from = config.signers[from];
check_unique_pubkeys(
(&from.pubkey(), "cli keypair".to_string()),
(to, "to".to_string()),
)?;
let (recent_blockhash, fee_calculator) =
blockhash_query.get_blockhash_and_fee_calculator(rpc_client)?;
let ixs = vec![system_instruction::transfer(&from.pubkey(), to, lamports)];
@@ -1604,7 +1720,7 @@ fn process_transfer(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx)
return_signers(&tx, &config)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -1620,9 +1736,9 @@ fn process_transfer(
let result = if no_wait {
rpc_client.send_transaction(&tx)
} else {
rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers)
rpc_client.send_and_confirm_transaction_with_spinner(&tx)
};
log_instruction_custom_error::<SystemError>(result)
log_instruction_custom_error::<SystemError>(result, &config)
}
}
@@ -1644,9 +1760,8 @@ fn process_witness(
&fee_calculator,
&tx.message,
)?;
let result =
rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &[config.signers[0]]);
log_instruction_custom_error::<BudgetError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<BudgetError>(result, &config)
}
pub fn process_command(config: &CliConfig) -> ProcessResult {
@@ -1704,6 +1819,14 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
CliCommand::GetSlot { commitment_config } => {
process_get_slot(&rpc_client, *commitment_config)
}
CliCommand::LargestAccounts {
commitment_config,
filter,
} => process_largest_accounts(&rpc_client, config, *commitment_config, filter.clone()),
CliCommand::Supply {
commitment_config,
print_accounts,
} => process_supply(&rpc_client, config, *commitment_config, *print_accounts),
CliCommand::TotalSupply { commitment_config } => {
process_total_supply(&rpc_client, *commitment_config)
}
@@ -2046,6 +2169,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
// Create vote account
CliCommand::CreateVoteAccount {
vote_account,
seed,
identity_account,
authorized_voter,
@@ -2054,6 +2178,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
} => process_create_vote_account(
&rpc_client,
config,
*vote_account,
seed,
*identity_account,
authorized_voter,
@@ -2098,11 +2223,24 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
CliCommand::VoteUpdateValidator {
vote_account_pubkey,
new_identity_account,
withdraw_authority,
} => process_vote_update_validator(
&rpc_client,
config,
&vote_account_pubkey,
*new_identity_account,
*withdraw_authority,
),
CliCommand::VoteUpdateCommission {
vote_account_pubkey,
commission,
withdraw_authority,
} => process_vote_update_commission(
&rpc_client,
config,
&vote_account_pubkey,
*commission,
*withdraw_authority,
),
// Wallet Commands
@@ -2134,7 +2272,14 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
CliCommand::Balance {
pubkey,
use_lamports_unit,
} => process_balance(&rpc_client, config, &pubkey, *use_lamports_unit),
commitment_config,
} => process_balance(
&rpc_client,
config,
&pubkey,
*use_lamports_unit,
*commitment_config,
),
// Cancel a contract by contract Pubkey
CliCommand::Cancel(pubkey) => process_cancel(&rpc_client, config, &pubkey),
// Confirm the last client transaction by signature
@@ -2262,6 +2407,7 @@ pub fn request_and_confirm_airdrop(
faucet_addr: &SocketAddr,
to_pubkey: &Pubkey,
lamports: u64,
config: &CliConfig,
) -> ProcessResult {
let (blockhash, _fee_calculator) = rpc_client.get_recent_blockhash()?;
let keypair = {
@@ -2275,12 +2421,15 @@ pub fn request_and_confirm_airdrop(
sleep(Duration::from_secs(1));
}
}?;
let mut tx = keypair.airdrop_transaction();
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &[&keypair]);
log_instruction_custom_error::<SystemError>(result)
let tx = keypair.airdrop_transaction();
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<SystemError>(result, &config)
}
pub fn log_instruction_custom_error<E>(result: ClientResult<Signature>) -> ProcessResult
pub fn log_instruction_custom_error<E>(
result: ClientResult<Signature>,
config: &CliConfig,
) -> ProcessResult
where
E: 'static + std::error::Error + DecodeError<E> + FromPrimitive,
{
@@ -2297,7 +2446,12 @@ where
}
Err(err.into())
}
Ok(sig) => Ok(sig.to_string()),
Ok(sig) => {
let signature = CliSignature {
signature: sig.clone().to_string(),
};
Ok(config.output_format.formatted_string(&signature))
}
}
}
@@ -2394,7 +2548,8 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
.long("lamports")
.takes_value(false)
.help("Display balance in lamports instead of SOL"),
),
)
.arg(commitment_arg_with_default("max")),
)
.subcommand(
SubCommand::with_name("cancel")
@@ -2423,9 +2578,9 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
)
.subcommand(
SubCommand::with_name("decode-transaction")
.about("Decode a base-85 binary transaction")
.about("Decode a base-58 binary transaction")
.arg(
Arg::with_name("base85_transaction")
Arg::with_name("base58_transaction")
.index(1)
.value_name("BASE58_TRANSACTION")
.takes_value(true)
@@ -2805,7 +2960,8 @@ mod tests {
CliCommandInfo {
command: CliCommand::Balance {
pubkey: Some(keypair.pubkey()),
use_lamports_unit: false
use_lamports_unit: false,
commitment_config: CommitmentConfig::default(),
},
signers: vec![],
}
@@ -2821,7 +2977,8 @@ mod tests {
CliCommandInfo {
command: CliCommand::Balance {
pubkey: Some(keypair.pubkey()),
use_lamports_unit: true
use_lamports_unit: true,
commitment_config: CommitmentConfig::default(),
},
signers: vec![],
}
@@ -2835,7 +2992,8 @@ mod tests {
CliCommandInfo {
command: CliCommand::Balance {
pubkey: None,
use_lamports_unit: true
use_lamports_unit: true,
commitment_config: CommitmentConfig::default(),
},
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
}
@@ -3305,12 +3463,14 @@ mod tests {
config.command = CliCommand::Balance {
pubkey: None,
use_lamports_unit: true,
commitment_config: CommitmentConfig::default(),
};
assert_eq!(process_command(&config).unwrap(), "50 lamports");
config.command = CliCommand::Balance {
pubkey: None,
use_lamports_unit: false,
commitment_config: CommitmentConfig::default(),
};
assert_eq!(process_command(&config).unwrap(), "0.00000005 SOL");
@@ -3326,6 +3486,7 @@ mod tests {
let bob_pubkey = bob_keypair.pubkey();
let identity_keypair = Keypair::new();
config.command = CliCommand::CreateVoteAccount {
vote_account: 1,
seed: None,
identity_account: 2,
authorized_voter: Some(bob_pubkey),
@@ -3351,6 +3512,7 @@ mod tests {
config.command = CliCommand::VoteUpdateValidator {
vote_account_pubkey: bob_pubkey,
new_identity_account: 2,
withdraw_authority: 1,
};
let result = process_command(&config);
assert!(result.is_ok());
@@ -3544,12 +3706,14 @@ mod tests {
config.command = CliCommand::Balance {
pubkey: None,
use_lamports_unit: false,
commitment_config: CommitmentConfig::default(),
};
assert!(process_command(&config).is_err());
let bob_keypair = Keypair::new();
let identity_keypair = Keypair::new();
config.command = CliCommand::CreateVoteAccount {
vote_account: 1,
seed: None,
identity_account: 2,
authorized_voter: Some(bob_pubkey),
@@ -3569,6 +3733,7 @@ mod tests {
config.command = CliCommand::VoteUpdateValidator {
vote_account_pubkey: bob_pubkey,
new_identity_account: 1,
withdraw_authority: 1,
};
assert!(process_command(&config).is_err());
@@ -3839,6 +4004,8 @@ mod tests {
}
}
let mut config = CliConfig::default();
config.output_format = OutputFormat::JsonCompact;
let present: Box<dyn Signer> = Box::new(keypair_from_seed(&[2u8; 32]).unwrap());
let absent: Box<dyn Signer> = Box::new(NullSigner::new(&Pubkey::new(&[3u8; 32])));
let bad: Box<dyn Signer> = Box::new(BadSigner::new(Pubkey::new(&[4u8; 32])));
@@ -3857,7 +4024,7 @@ mod tests {
let signers = vec![present.as_ref(), absent.as_ref(), bad.as_ref()];
let blockhash = Hash::new(&[7u8; 32]);
tx.try_partial_sign(&signers, blockhash).unwrap();
let res = return_signers(&tx).unwrap();
let res = return_signers(&tx, &config).unwrap();
let sign_only = parse_sign_only_reply_string(&res);
assert_eq!(sign_only.blockhash, blockhash);
assert_eq!(sign_only.present_signers[0].0, present.pubkey());

View File

@@ -4,9 +4,12 @@ use console::{style, Emoji};
use inflector::cases::titlecase::to_title_case;
use serde::Serialize;
use serde_json::{Map, Value};
use solana_client::rpc_response::{RpcEpochInfo, RpcKeyedAccount, RpcVoteAccountInfo};
use solana_client::rpc_response::{
RpcAccountBalance, RpcEpochInfo, RpcKeyedAccount, RpcSupply, RpcVoteAccountInfo,
};
use solana_sdk::{
clock::{self, Epoch, Slot, UnixTimestamp},
native_token::lamports_to_sol,
stake_history::StakeHistoryEntry,
};
use solana_stake_program::stake_state::{Authorized, Lockup};
@@ -26,20 +29,14 @@ pub enum OutputFormat {
}
impl OutputFormat {
pub fn formatted_print<T>(&self, item: &T)
pub fn formatted_string<T>(&self, item: &T) -> String
where
T: Serialize + fmt::Display,
{
match self {
OutputFormat::Display => {
println!("{}", item);
}
OutputFormat::Json => {
println!("{}", serde_json::to_string_pretty(item).unwrap());
}
OutputFormat::JsonCompact => {
println!("{}", serde_json::to_value(item).unwrap());
}
OutputFormat::Display => format!("{}", item),
OutputFormat::Json => serde_json::to_string_pretty(item).unwrap(),
OutputFormat::JsonCompact => serde_json::to_value(item).unwrap().to_string(),
}
}
}
@@ -862,3 +859,124 @@ impl fmt::Display for CliBlockTime {
)
}
}
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
pub struct CliSignOnlyData {
pub blockhash: String,
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub signers: Vec<String>,
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub absent: Vec<String>,
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub bad_sig: Vec<String>,
}
impl fmt::Display for CliSignOnlyData {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f)?;
writeln_name_value(f, "Blockhash:", &self.blockhash)?;
if !self.signers.is_empty() {
writeln!(f, "{}", style("Signers (Pubkey=Signature):").bold())?;
for signer in self.signers.iter() {
writeln!(f, " {}", signer)?;
}
}
if !self.absent.is_empty() {
writeln!(f, "{}", style("Absent Signers (Pubkey):").bold())?;
for pubkey in self.absent.iter() {
writeln!(f, " {}", pubkey)?;
}
}
if !self.bad_sig.is_empty() {
writeln!(f, "{}", style("Bad Signatures (Pubkey):").bold())?;
for pubkey in self.bad_sig.iter() {
writeln!(f, " {}", pubkey)?;
}
}
Ok(())
}
}
#[derive(Serialize, Deserialize)]
pub struct CliSignature {
pub signature: String,
}
impl fmt::Display for CliSignature {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f)?;
writeln_name_value(f, "Signature:", &self.signature)?;
Ok(())
}
}
#[derive(Serialize, Deserialize)]
pub struct CliAccountBalances {
pub accounts: Vec<RpcAccountBalance>,
}
impl fmt::Display for CliAccountBalances {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
f,
"{}",
style(format!("{:<44} {}", "Address", "Balance",)).bold()
)?;
for account in &self.accounts {
writeln!(
f,
"{:<44} {}",
account.address,
&format!("{} SOL", lamports_to_sol(account.lamports))
)?;
}
Ok(())
}
}
#[derive(Serialize, Deserialize)]
pub struct CliSupply {
pub total: u64,
pub circulating: u64,
pub non_circulating: u64,
pub non_circulating_accounts: Vec<String>,
#[serde(skip_serializing)]
pub print_accounts: bool,
}
impl From<RpcSupply> for CliSupply {
fn from(rpc_supply: RpcSupply) -> Self {
Self {
total: rpc_supply.total,
circulating: rpc_supply.circulating,
non_circulating: rpc_supply.non_circulating,
non_circulating_accounts: rpc_supply.non_circulating_accounts,
print_accounts: false,
}
}
}
impl fmt::Display for CliSupply {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln_name_value(f, "Total:", &format!("{} SOL", lamports_to_sol(self.total)))?;
writeln_name_value(
f,
"Circulating:",
&format!("{} SOL", lamports_to_sol(self.circulating)),
)?;
writeln_name_value(
f,
"Non-Circulating:",
&format!("{} SOL", lamports_to_sol(self.non_circulating)),
)?;
if self.print_accounts {
writeln!(f)?;
writeln_name_value(f, "Non-Circulating Accounts:", " ")?;
for account in &self.non_circulating_accounts {
writeln!(f, " {}", account)?;
}
}
Ok(())
}
}

View File

@@ -1,11 +1,10 @@
use crate::{
cli::{check_account_for_fee, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult},
cli_output::*,
display::println_name_value,
display::{new_spinner_progress_bar, println_name_value},
};
use clap::{value_t, value_t_or_exit, App, Arg, ArgMatches, SubCommand};
use clap::{value_t, value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand};
use console::{style, Emoji};
use indicatif::{ProgressBar, ProgressStyle};
use solana_clap_utils::{
commitment::{commitment_arg, COMMITMENT_ARG},
input_parsers::*,
@@ -15,6 +14,7 @@ use solana_clap_utils::{
use solana_client::{
pubsub_client::{PubsubClient, SlotInfoMessage},
rpc_client::RpcClient,
rpc_config::{RpcLargestAccountsConfig, RpcLargestAccountsFilter},
rpc_request::MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE,
};
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
@@ -121,8 +121,36 @@ impl ClusterQuerySubCommands for App<'_, '_> {
SubCommand::with_name("epoch").about("Get current epoch")
.arg(commitment_arg()),
)
.subcommand(
SubCommand::with_name("largest-accounts").about("Get addresses of largest cluster accounts")
.arg(
Arg::with_name("circulating")
.long("circulating")
.takes_value(false)
.help("Filter address list to only circulating accounts")
)
.arg(
Arg::with_name("non_circulating")
.long("non-circulating")
.takes_value(false)
.conflicts_with("circulating")
.help("Filter address list to only non-circulating accounts")
)
.arg(commitment_arg()),
)
.subcommand(
SubCommand::with_name("supply").about("Get information about the cluster supply of SOL")
.arg(
Arg::with_name("print_accounts")
.long("print-accounts")
.takes_value(false)
.help("Print list of non-circualting account addresses")
)
.arg(commitment_arg()),
)
.subcommand(
SubCommand::with_name("total-supply").about("Get total number of SOL")
.setting(AppSettings::Hidden)
.arg(commitment_arg()),
)
.subcommand(
@@ -346,6 +374,36 @@ pub fn parse_get_epoch(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliEr
})
}
pub fn parse_largest_accounts(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
let filter = if matches.is_present("circulating") {
Some(RpcLargestAccountsFilter::Circulating)
} else if matches.is_present("non_circulating") {
Some(RpcLargestAccountsFilter::NonCirculating)
} else {
None
};
Ok(CliCommandInfo {
command: CliCommand::LargestAccounts {
commitment_config,
filter,
},
signers: vec![],
})
}
pub fn parse_supply(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
let print_accounts = matches.is_present("print_accounts");
Ok(CliCommandInfo {
command: CliCommand::Supply {
commitment_config,
print_accounts,
},
signers: vec![],
})
}
pub fn parse_total_supply(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
Ok(CliCommandInfo {
@@ -398,8 +456,7 @@ pub fn parse_transaction_history(
) -> Result<CliCommandInfo, CliError> {
let address = pubkey_of_signer(matches, "address", wallet_manager)?.unwrap();
let end_slot = value_t!(matches, "end_slot", Slot).ok();
let slot_limit = value_t!(matches, "limit", u64)
.unwrap_or(MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE);
let slot_limit = value_t!(matches, "limit", u64).ok();
Ok(CliCommandInfo {
command: CliCommand::TransactionHistory {
@@ -411,15 +468,6 @@ pub fn parse_transaction_history(
})
}
/// Creates a new process bar for processing that will take an unknown amount of time
fn new_spinner_progress_bar() -> ProgressBar {
let progress_bar = ProgressBar::new(42);
progress_bar
.set_style(ProgressStyle::default_spinner().template("{spinner:.green} {wide_msg}"));
progress_bar.enable_steady_tick(100);
progress_bar
}
pub fn process_catchup(
rpc_client: &RpcClient,
node_pubkey: &Pubkey,
@@ -534,8 +582,7 @@ pub fn process_cluster_date(rpc_client: &RpcClient, config: &CliConfig) -> Proce
slot: result.context.slot,
timestamp: clock.unix_timestamp,
};
config.output_format.formatted_print(&block_time);
Ok("".to_string())
Ok(config.output_format.formatted_string(&block_time))
} else {
Err(format!("AccountNotFound: pubkey={}", sysvar::clock::id()).into())
}
@@ -602,8 +649,7 @@ pub fn process_get_block_time(
};
let timestamp = rpc_client.get_block_time(slot)?;
let block_time = CliBlockTime { slot, timestamp };
config.output_format.formatted_print(&block_time);
Ok("".to_string())
Ok(config.output_format.formatted_string(&block_time))
}
pub fn process_get_epoch_info(
@@ -614,8 +660,7 @@ pub fn process_get_epoch_info(
let epoch_info: CliEpochInfo = rpc_client
.get_epoch_info_with_commitment(commitment_config.clone())?
.into();
config.output_format.formatted_print(&epoch_info);
Ok("".to_string())
Ok(config.output_format.formatted_string(&epoch_info))
}
pub fn process_get_genesis_hash(rpc_client: &RpcClient) -> ProcessResult {
@@ -656,7 +701,7 @@ pub fn process_show_block_production(
slot_limit: Option<u64>,
) -> ProcessResult {
let epoch_schedule = rpc_client.get_epoch_schedule()?;
let epoch_info = rpc_client.get_epoch_info_with_commitment(CommitmentConfig::max())?;
let epoch_info = rpc_client.get_epoch_info_with_commitment(CommitmentConfig::root())?;
let epoch = epoch.unwrap_or(epoch_info.epoch);
if epoch > epoch_info.epoch {
@@ -715,7 +760,7 @@ pub fn process_show_block_production(
progress_bar.set_message(&format!("Fetching leader schedule for epoch {}...", epoch));
let leader_schedule = rpc_client
.get_leader_schedule_with_commitment(Some(start_slot), CommitmentConfig::max())?;
.get_leader_schedule_with_commitment(Some(start_slot), CommitmentConfig::root())?;
if leader_schedule.is_none() {
return Err(format!("Unable to fetch leader schedule for slot {}", start_slot).into());
}
@@ -797,8 +842,35 @@ pub fn process_show_block_production(
individual_slot_status,
verbose: config.verbose,
};
config.output_format.formatted_print(&block_production);
Ok("".to_string())
Ok(config.output_format.formatted_string(&block_production))
}
pub fn process_largest_accounts(
rpc_client: &RpcClient,
config: &CliConfig,
commitment_config: CommitmentConfig,
filter: Option<RpcLargestAccountsFilter>,
) -> ProcessResult {
let accounts = rpc_client
.get_largest_accounts_with_config(RpcLargestAccountsConfig {
commitment: Some(commitment_config),
filter,
})?
.value;
let largest_accounts = CliAccountBalances { accounts };
Ok(config.output_format.formatted_string(&largest_accounts))
}
pub fn process_supply(
rpc_client: &RpcClient,
config: &CliConfig,
commitment_config: CommitmentConfig,
print_accounts: bool,
) -> ProcessResult {
let supply_response = rpc_client.supply_with_commitment(commitment_config.clone())?;
let mut supply: CliSupply = supply_response.value.into();
supply.print_accounts = print_accounts;
Ok(config.output_format.formatted_string(&supply))
}
pub fn process_total_supply(
@@ -1127,10 +1199,9 @@ pub fn process_show_stakes(
}
}
}
config
Ok(config
.output_format
.formatted_print(&CliStakeVec::new(stake_accounts));
Ok("".to_string())
.formatted_string(&CliStakeVec::new(stake_accounts)))
}
pub fn process_show_validators(
@@ -1174,15 +1245,14 @@ pub fn process_show_validators(
delinquent_validators,
use_lamports_unit,
};
config.output_format.formatted_print(&cli_validators);
Ok("".to_string())
Ok(config.output_format.formatted_string(&cli_validators))
}
pub fn process_transaction_history(
rpc_client: &RpcClient,
address: &Pubkey,
end_slot: Option<Slot>, // None == use latest slot
slot_limit: u64,
slot_limit: Option<u64>,
) -> ProcessResult {
let end_slot = {
if let Some(end_slot) = end_slot {
@@ -1191,18 +1261,30 @@ pub fn process_transaction_history(
rpc_client.get_slot_with_commitment(CommitmentConfig::max())?
}
};
let start_slot = end_slot.saturating_sub(slot_limit);
let mut start_slot = match slot_limit {
Some(slot_limit) => end_slot.saturating_sub(slot_limit),
None => rpc_client.minimum_ledger_slot()?,
};
println!(
"Transactions affecting {} within slots [{},{}]",
address, start_slot, end_slot
);
let signatures =
rpc_client.get_confirmed_signatures_for_address(address, start_slot, end_slot)?;
for signature in &signatures {
println!("{}", signature);
let mut transaction_count = 0;
while start_slot < end_slot {
let signatures = rpc_client.get_confirmed_signatures_for_address(
address,
start_slot,
(start_slot + MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE).min(end_slot),
)?;
for signature in &signatures {
println!("{}", signature);
}
transaction_count += signatures.len();
start_slot += MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE;
}
Ok(format!("{} transactions found", signatures.len(),))
Ok(format!("{} transactions found", transaction_count))
}
#[cfg(test)]

View File

@@ -1,5 +1,6 @@
use crate::cli::SettingType;
use console::style;
use indicatif::{ProgressBar, ProgressStyle};
use solana_sdk::{
hash::Hash, native_token::lamports_to_sol, program_utils::limited_deserialize,
transaction::Transaction,
@@ -200,3 +201,12 @@ pub fn println_transaction(
}
}
}
/// Creates a new process bar for processing that will take an unknown amount of time
pub fn new_spinner_progress_bar() -> ProgressBar {
let progress_bar = ProgressBar::new(42);
progress_bar
.set_style(ProgressStyle::default_spinner().template("{spinner:.green} {wide_msg}"));
progress_bar.enable_steady_tick(100);
progress_bar
}

View File

@@ -2,8 +2,7 @@ use clap::{crate_description, crate_name, AppSettings, Arg, ArgGroup, ArgMatches
use console::style;
use solana_clap_utils::{
input_validators::is_url, keypair::SKIP_SEED_PHRASE_VALIDATION_ARG, offline::SIGN_ONLY_ARG,
DisplayError,
input_validators::is_url, keypair::SKIP_SEED_PHRASE_VALIDATION_ARG, DisplayError,
};
use solana_cli::{
cli::{app, parse_command, process_command, CliCommandInfo, CliConfig, CliSigners},
@@ -262,13 +261,7 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box<dyn error::Error>> {
let (mut config, signers) = parse_args(&matches, &mut wallet_manager)?;
config.signers = signers.iter().map(|s| s.as_ref()).collect();
let result = process_command(&config)?;
let (_, submatches) = matches.subcommand();
let sign_only = submatches
.map(|m| m.is_present(SIGN_ONLY_ARG.name))
.unwrap_or(false);
if !sign_only {
println!("{}", result);
}
println!("{}", result);
};
Ok(())
}

View File

@@ -462,8 +462,8 @@ pub fn process_authorize_nonce_account(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<NonceError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<NonceError>(result, &config)
}
pub fn process_create_nonce_account(
@@ -539,8 +539,8 @@ pub fn process_create_nonce_account(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<SystemError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<SystemError>(result, &config)
}
pub fn process_get_nonce(rpc_client: &RpcClient, nonce_account_pubkey: &Pubkey) -> ProcessResult {
@@ -580,9 +580,8 @@ pub fn process_new_nonce(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client
.send_and_confirm_transaction_with_spinner(&mut tx, &[config.signers[0], nonce_authority]);
log_instruction_custom_error::<SystemError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<SystemError>(result, &config)
}
pub fn process_show_nonce_account(
@@ -606,8 +605,7 @@ pub fn process_show_nonce_account(
nonce_account.authority = Some(data.authority.to_string());
}
config.output_format.formatted_print(&nonce_account);
Ok("".to_string())
Ok(config.output_format.formatted_string(&nonce_account))
};
match state_from_account(&nonce_account)? {
State::Uninitialized => print_account(None),
@@ -641,8 +639,8 @@ pub fn process_withdraw_from_nonce_account(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<NonceError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<NonceError>(result, &config)
}
#[cfg(test)]

View File

@@ -79,32 +79,47 @@ pub fn parse_sign_only_reply_string(reply: &str) -> SignOnly {
let object: Value = serde_json::from_str(&reply).unwrap();
let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap();
let blockhash = blockhash_str.parse::<Hash>().unwrap();
let signer_strings = object.get("signers").unwrap().as_array().unwrap();
let present_signers = signer_strings
.iter()
.map(|signer_string| {
let mut signer = signer_string.as_str().unwrap().split('=');
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
(key, sig)
})
.collect();
let signer_strings = object.get("absent").unwrap().as_array().unwrap();
let absent_signers = signer_strings
.iter()
.map(|val| {
let s = val.as_str().unwrap();
Pubkey::from_str(s).unwrap()
})
.collect();
let signer_strings = object.get("badSig").unwrap().as_array().unwrap();
let bad_signers = signer_strings
.iter()
.map(|val| {
let s = val.as_str().unwrap();
Pubkey::from_str(s).unwrap()
})
.collect();
let mut present_signers: Vec<(Pubkey, Signature)> = Vec::new();
let signer_strings = object.get("signers");
if let Some(sig_strings) = signer_strings {
present_signers = sig_strings
.as_array()
.unwrap()
.iter()
.map(|signer_string| {
let mut signer = signer_string.as_str().unwrap().split('=');
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
(key, sig)
})
.collect();
}
let mut absent_signers: Vec<Pubkey> = Vec::new();
let signer_strings = object.get("absent");
if let Some(sig_strings) = signer_strings {
absent_signers = sig_strings
.as_array()
.unwrap()
.iter()
.map(|val| {
let s = val.as_str().unwrap();
Pubkey::from_str(s).unwrap()
})
.collect();
}
let mut bad_signers: Vec<Pubkey> = Vec::new();
let signer_strings = object.get("badSig");
if let Some(sig_strings) = signer_strings {
bad_signers = sig_strings
.as_array()
.unwrap()
.iter()
.map(|val| {
let s = val.as_str().unwrap();
Pubkey::from_str(s).unwrap()
})
.collect();
}
SignOnly {
blockhash,
present_signers,

View File

@@ -881,7 +881,7 @@ pub fn process_create_stake_account(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx)
return_signers(&tx, &config)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -894,8 +894,8 @@ pub fn process_create_stake_account(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<SystemError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<SystemError>(result, &config)
}
}
@@ -946,7 +946,7 @@ pub fn process_stake_authorize(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx)
return_signers(&tx, &config)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -959,8 +959,8 @@ pub fn process_stake_authorize(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<StakeError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<StakeError>(result, &config)
}
}
@@ -1000,7 +1000,7 @@ pub fn process_deactivate_stake_account(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx)
return_signers(&tx, &config)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -1013,8 +1013,8 @@ pub fn process_deactivate_stake_account(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<StakeError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<StakeError>(result, &config)
}
}
@@ -1063,7 +1063,7 @@ pub fn process_withdraw_stake(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx)
return_signers(&tx, &config)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -1076,8 +1076,8 @@ pub fn process_withdraw_stake(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<SystemError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<SystemError>(result, &config)
}
}
@@ -1197,7 +1197,7 @@ pub fn process_split_stake(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx)
return_signers(&tx, &config)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -1210,8 +1210,8 @@ pub fn process_split_stake(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<StakeError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<StakeError>(result, &config)
}
}
@@ -1254,7 +1254,7 @@ pub fn process_stake_set_lockup(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx)
return_signers(&tx, &config)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -1267,8 +1267,8 @@ pub fn process_stake_set_lockup(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<StakeError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<StakeError>(result, &config)
}
}
@@ -1341,8 +1341,7 @@ pub fn process_show_stake_account(
match stake_account.state() {
Ok(stake_state) => {
let state = build_stake_state(stake_account.lamports, &stake_state, use_lamports_unit);
config.output_format.formatted_print(&state);
Ok("".to_string())
Ok(config.output_format.formatted_string(&state))
}
Err(err) => Err(CliError::RpcRequestError(format!(
"Account data could not be deserialized to stake state: {}",
@@ -1370,8 +1369,7 @@ pub fn process_show_stake_history(
entries,
use_lamports_unit,
};
config.output_format.formatted_print(&stake_history_output);
Ok("".to_string())
Ok(config.output_format.formatted_string(&stake_history_output))
}
#[allow(clippy::too_many_arguments)]
@@ -1464,7 +1462,7 @@ pub fn process_delegate_stake(
if sign_only {
tx.try_partial_sign(&config.signers, recent_blockhash)?;
return_signers(&tx)
return_signers(&tx, &config)
} else {
tx.try_sign(&config.signers, recent_blockhash)?;
if let Some(nonce_account) = &nonce_account {
@@ -1477,8 +1475,8 @@ pub fn process_delegate_stake(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<StakeError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<StakeError>(result, &config)
}
}

View File

@@ -242,8 +242,8 @@ pub fn process_create_storage_account(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<SystemError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<SystemError>(result, &config)
}
pub fn process_claim_storage_reward(
@@ -266,7 +266,7 @@ pub fn process_claim_storage_reward(
&fee_calculator,
&tx.message,
)?;
let signature = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &signers)?;
let signature = rpc_client.send_and_confirm_transaction_with_spinner(&tx)?;
Ok(signature.to_string())
}

View File

@@ -367,7 +367,7 @@ pub fn process_set_validator_info(
&fee_calculator,
&tx.message,
)?;
let signature_str = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &signers)?;
let signature_str = rpc_client.send_and_confirm_transaction_with_spinner(&tx)?;
println!("Success! Validator info published at: {:?}", info_pubkey);
println!("{}", signature_str);
@@ -410,10 +410,9 @@ pub fn process_get_validator_info(
info: validator_info,
});
}
config
Ok(config
.output_format
.formatted_print(&CliValidatorInfoVec::new(validator_info_list));
Ok("".to_string())
.formatted_string(&CliValidatorInfoVec::new(validator_info_list)))
}
#[cfg(test)]

View File

@@ -174,6 +174,37 @@ impl VoteSubCommands for App<'_, '_> {
.help("Authorized withdrawer keypair"),
)
)
.subcommand(
SubCommand::with_name("vote-update-commission")
.about("Update the vote account's commission")
.arg(
Arg::with_name("vote_account_pubkey")
.index(1)
.value_name("VOTE_ACCOUNT_ADDRESS")
.takes_value(true)
.required(true)
.validator(is_valid_pubkey)
.help("Vote account to update"),
)
.arg(
Arg::with_name("commission")
.index(2)
.value_name("PERCENTAGE")
.takes_value(true)
.required(true)
.validator(is_valid_percentage)
.help("The new commission")
)
.arg(
Arg::with_name("authorized_withdrawer")
.index(3)
.value_name("AUTHORIZED_KEYPAIR")
.takes_value(true)
.required(true)
.validator(is_valid_signer)
.help("Authorized withdrawer keypair"),
)
)
.subcommand(
SubCommand::with_name("vote-account")
.about("Show the contents of a vote account")
@@ -242,7 +273,7 @@ pub fn parse_create_vote_account(
default_signer_path: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let (vote_account, _) = signer_of(matches, "vote_account", wallet_manager)?;
let (vote_account, vote_account_pubkey) = signer_of(matches, "vote_account", wallet_manager)?;
let seed = matches.value_of("seed").map(|s| s.to_string());
let (identity_account, identity_pubkey) =
signer_of(matches, "identity_account", wallet_manager)?;
@@ -260,6 +291,7 @@ pub fn parse_create_vote_account(
Ok(CliCommandInfo {
command: CliCommand::CreateVoteAccount {
vote_account: signer_info.index_of(vote_account_pubkey).unwrap(),
seed,
identity_account: signer_info.index_of(identity_pubkey).unwrap(),
authorized_voter,
@@ -309,7 +341,8 @@ pub fn parse_vote_update_validator(
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
let (new_identity_account, new_identity_pubkey) =
signer_of(matches, "new_identity_account", wallet_manager)?;
let (authorized_withdrawer, _) = signer_of(matches, "authorized_withdrawer", wallet_manager)?;
let (authorized_withdrawer, authorized_withdrawer_pubkey) =
signer_of(matches, "authorized_withdrawer", wallet_manager)?;
let payer_provided = None;
let signer_info = generate_unique_signers(
@@ -323,6 +356,36 @@ pub fn parse_vote_update_validator(
command: CliCommand::VoteUpdateValidator {
vote_account_pubkey,
new_identity_account: signer_info.index_of(new_identity_pubkey).unwrap(),
withdraw_authority: signer_info.index_of(authorized_withdrawer_pubkey).unwrap(),
},
signers: signer_info.signers,
})
}
pub fn parse_vote_update_commission(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let vote_account_pubkey =
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
let (authorized_withdrawer, authorized_withdrawer_pubkey) =
signer_of(matches, "authorized_withdrawer", wallet_manager)?;
let commission = value_t_or_exit!(matches, "commission", u8);
let payer_provided = None;
let signer_info = generate_unique_signers(
vec![payer_provided, authorized_withdrawer],
matches,
default_signer_path,
wallet_manager,
)?;
Ok(CliCommandInfo {
command: CliCommand::VoteUpdateCommission {
vote_account_pubkey,
commission,
withdraw_authority: signer_info.index_of(authorized_withdrawer_pubkey).unwrap(),
},
signers: signer_info.signers,
})
@@ -381,13 +444,14 @@ pub fn parse_withdraw_from_vote_account(
pub fn process_create_vote_account(
rpc_client: &RpcClient,
config: &CliConfig,
vote_account: SignerIndex,
seed: &Option<String>,
identity_account: SignerIndex,
authorized_voter: &Option<Pubkey>,
authorized_withdrawer: &Option<Pubkey>,
commission: u8,
) -> ProcessResult {
let vote_account = config.signers[1];
let vote_account = config.signers[vote_account];
let vote_account_pubkey = vote_account.pubkey();
let vote_account_address = if let Some(seed) = seed {
Pubkey::create_with_seed(&vote_account_pubkey, &seed, &solana_vote_program::id())?
@@ -457,8 +521,8 @@ pub fn process_create_vote_account(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<SystemError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<SystemError>(result, &config)
}
pub fn process_vote_authorize(
@@ -497,9 +561,8 @@ pub fn process_vote_authorize(
&fee_calculator,
&tx.message,
)?;
let result =
rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &[config.signers[0]]);
log_instruction_custom_error::<VoteError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<VoteError>(result, &config)
}
pub fn process_vote_update_validator(
@@ -507,8 +570,9 @@ pub fn process_vote_update_validator(
config: &CliConfig,
vote_account_pubkey: &Pubkey,
new_identity_account: SignerIndex,
withdraw_authority: SignerIndex,
) -> ProcessResult {
let authorized_withdrawer = config.signers[1];
let authorized_withdrawer = config.signers[withdraw_authority];
let new_identity_account = config.signers[new_identity_account];
let new_identity_pubkey = new_identity_account.pubkey();
check_unique_pubkeys(
@@ -531,8 +595,36 @@ pub fn process_vote_update_validator(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<VoteError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<VoteError>(result, &config)
}
pub fn process_vote_update_commission(
rpc_client: &RpcClient,
config: &CliConfig,
vote_account_pubkey: &Pubkey,
commission: u8,
withdraw_authority: SignerIndex,
) -> ProcessResult {
let authorized_withdrawer = config.signers[withdraw_authority];
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let ixs = vec![vote_instruction::update_commission(
vote_account_pubkey,
&authorized_withdrawer.pubkey(),
commission,
)];
let message = Message::new_with_payer(&ixs, Some(&config.signers[0].pubkey()));
let mut tx = Transaction::new_unsigned(message);
tx.try_sign(&config.signers, recent_blockhash)?;
check_account_for_fee(
rpc_client,
&config.signers[0].pubkey(),
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
log_instruction_custom_error::<VoteError>(result, &config)
}
fn get_vote_account(
@@ -606,8 +698,7 @@ pub fn process_show_vote_account(
use_lamports_unit,
};
config.output_format.formatted_print(&vote_account_data);
Ok("".to_string())
Ok(config.output_format.formatted_string(&vote_account_data))
}
pub fn process_withdraw_from_vote_account(
@@ -637,9 +728,8 @@ pub fn process_withdraw_from_vote_account(
&fee_calculator,
&transaction.message,
)?;
let result =
rpc_client.send_and_confirm_transaction_with_spinner(&mut transaction, &config.signers);
log_instruction_custom_error::<VoteError>(result)
let result = rpc_client.send_and_confirm_transaction_with_spinner(&transaction);
log_instruction_custom_error::<VoteError>(result, &config)
}
#[cfg(test)]
@@ -732,6 +822,7 @@ mod tests {
parse_command(&test_create_vote_account, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::CreateVoteAccount {
vote_account: 1,
seed: None,
identity_account: 2,
authorized_voter: None,
@@ -760,6 +851,7 @@ mod tests {
parse_command(&test_create_vote_account2, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::CreateVoteAccount {
vote_account: 1,
seed: None,
identity_account: 2,
authorized_voter: None,
@@ -792,6 +884,7 @@ mod tests {
parse_command(&test_create_vote_account3, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::CreateVoteAccount {
vote_account: 1,
seed: None,
identity_account: 2,
authorized_voter: Some(authed),
@@ -822,6 +915,7 @@ mod tests {
parse_command(&test_create_vote_account4, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::CreateVoteAccount {
vote_account: 1,
seed: None,
identity_account: 2,
authorized_voter: None,
@@ -849,6 +943,7 @@ mod tests {
command: CliCommand::VoteUpdateValidator {
vote_account_pubkey: pubkey,
new_identity_account: 2,
withdraw_authority: 1,
},
signers: vec![
read_keypair_file(&default_keypair_file).unwrap().into(),
@@ -858,6 +953,28 @@ mod tests {
}
);
let test_update_commission = test_commands.clone().get_matches_from(vec![
"test",
"vote-update-commission",
&pubkey_string,
"42",
&keypair_file,
]);
assert_eq!(
parse_command(&test_update_commission, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::VoteUpdateCommission {
vote_account_pubkey: pubkey,
commission: 42,
withdraw_authority: 1,
},
signers: vec![
read_keypair_file(&default_keypair_file).unwrap().into(),
Box::new(read_keypair_file(&keypair_file).unwrap()),
],
}
);
// Test WithdrawFromVoteAccount subcommand
let test_withdraw_from_vote_account = test_commands.clone().get_matches_from(vec![
"test",

View File

@@ -1,5 +1,6 @@
use solana_cli::{
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
cli_output::OutputFormat,
nonce,
offline::{
blockhash_query::{self, BlockhashQuery},
@@ -119,6 +120,7 @@ fn full_battery_tests(
&faucet_addr,
&config_payer.signers[0].pubkey(),
2000,
&config_payer,
)
.unwrap();
check_balance(2000, &rpc_client, &config_payer.signers[0].pubkey());
@@ -275,6 +277,7 @@ fn test_create_account_with_seed() {
let offline_nonce_authority_signer = keypair_from_seed(&[1u8; 32]).unwrap();
let online_nonce_creator_signer = keypair_from_seed(&[2u8; 32]).unwrap();
let to_address = Pubkey::new(&[3u8; 32]);
let config = CliConfig::default();
// Setup accounts
let rpc_client = RpcClient::new_socket(leader_data.rpc);
@@ -283,6 +286,7 @@ fn test_create_account_with_seed() {
&faucet_addr,
&offline_nonce_authority_signer.pubkey(),
42,
&config,
)
.unwrap();
request_and_confirm_airdrop(
@@ -290,6 +294,7 @@ fn test_create_account_with_seed() {
&faucet_addr,
&online_nonce_creator_signer.pubkey(),
4242,
&config,
)
.unwrap();
check_balance(42, &rpc_client, &offline_nonce_authority_signer.pubkey());
@@ -344,6 +349,7 @@ fn test_create_account_with_seed() {
nonce_authority: 0,
fee_payer: 0,
};
authority_config.output_format = OutputFormat::JsonCompact;
let sign_only_reply = process_command(&authority_config).unwrap();
let sign_only = parse_sign_only_reply_string(&sign_only_reply);
let authority_presigner = sign_only.presigner_of(&authority_pubkey).unwrap();

View File

@@ -2,6 +2,7 @@ use chrono::prelude::*;
use serde_json::Value;
use solana_cli::{
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig, PayCommand},
cli_output::OutputFormat,
nonce,
offline::{
blockhash_query::{self, BlockhashQuery},
@@ -69,6 +70,7 @@ fn test_cli_timestamp_tx() {
&faucet_addr,
&config_payer.signers[0].pubkey(),
50,
&config_witness,
)
.unwrap();
check_balance(50, &rpc_client, &config_payer.signers[0].pubkey());
@@ -78,6 +80,7 @@ fn test_cli_timestamp_tx() {
&faucet_addr,
&config_witness.signers[0].pubkey(),
1,
&config_witness,
)
.unwrap();
@@ -154,6 +157,7 @@ fn test_cli_witness_tx() {
&faucet_addr,
&config_payer.signers[0].pubkey(),
50,
&config_witness,
)
.unwrap();
request_and_confirm_airdrop(
@@ -161,6 +165,7 @@ fn test_cli_witness_tx() {
&faucet_addr,
&config_witness.signers[0].pubkey(),
1,
&config_witness,
)
.unwrap();
@@ -234,6 +239,7 @@ fn test_cli_cancel_tx() {
&faucet_addr,
&config_payer.signers[0].pubkey(),
50,
&config_witness,
)
.unwrap();
@@ -307,6 +313,7 @@ fn test_offline_pay_tx() {
&faucet_addr,
&config_offline.signers[0].pubkey(),
50,
&config_offline,
)
.unwrap();
@@ -315,6 +322,7 @@ fn test_offline_pay_tx() {
&faucet_addr,
&config_online.signers[0].pubkey(),
50,
&config_offline,
)
.unwrap();
check_balance(50, &rpc_client, &config_offline.signers[0].pubkey());
@@ -328,6 +336,7 @@ fn test_offline_pay_tx() {
sign_only: true,
..PayCommand::default()
});
config_offline.output_format = OutputFormat::JsonCompact;
let sig_response = process_command(&config_offline).unwrap();
check_balance(50, &rpc_client, &config_offline.signers[0].pubkey());
@@ -388,6 +397,7 @@ fn test_nonced_pay_tx() {
&faucet_addr,
&config.signers[0].pubkey(),
50 + minimum_nonce_balance,
&config,
)
.unwrap();
check_balance(

View File

@@ -1,5 +1,6 @@
use solana_cli::{
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
cli_output::OutputFormat,
nonce,
offline::{
blockhash_query::{self, BlockhashQuery},
@@ -59,6 +60,7 @@ fn test_stake_delegation_force() {
&faucet_addr,
&config.signers[0].pubkey(),
100_000,
&config,
)
.unwrap();
@@ -66,6 +68,7 @@ fn test_stake_delegation_force() {
let vote_keypair = Keypair::new();
config.signers = vec![&default_signer, &vote_keypair];
config.command = CliCommand::CreateVoteAccount {
vote_account: 1,
seed: None,
identity_account: 0,
authorized_voter: None,
@@ -155,6 +158,7 @@ fn test_seed_stake_delegation_and_deactivation() {
&faucet_addr,
&config_validator.signers[0].pubkey(),
100_000,
&config_validator,
)
.unwrap();
check_balance(100_000, &rpc_client, &config_validator.signers[0].pubkey());
@@ -245,6 +249,7 @@ fn test_stake_delegation_and_deactivation() {
&faucet_addr,
&config_validator.signers[0].pubkey(),
100_000,
&config_validator,
)
.unwrap();
check_balance(100_000, &rpc_client, &config_validator.signers[0].pubkey());
@@ -341,6 +346,7 @@ fn test_offline_stake_delegation_and_deactivation() {
&faucet_addr,
&config_validator.signers[0].pubkey(),
100_000,
&config_offline,
)
.unwrap();
check_balance(100_000, &rpc_client, &config_validator.signers[0].pubkey());
@@ -350,6 +356,7 @@ fn test_offline_stake_delegation_and_deactivation() {
&faucet_addr,
&config_offline.signers[0].pubkey(),
100_000,
&config_validator,
)
.unwrap();
check_balance(100_000, &rpc_client, &config_offline.signers[0].pubkey());
@@ -385,6 +392,7 @@ fn test_offline_stake_delegation_and_deactivation() {
nonce_authority: 0,
fee_payer: 0,
};
config_offline.output_format = OutputFormat::JsonCompact;
let sig_response = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
assert!(sign_only.has_all_signers());
@@ -470,6 +478,7 @@ fn test_nonced_stake_delegation_and_deactivation() {
&faucet_addr,
&config.signers[0].pubkey(),
100_000,
&config,
)
.unwrap();
@@ -579,6 +588,7 @@ fn test_stake_authorize() {
&faucet_addr,
&config.signers[0].pubkey(),
100_000,
&config,
)
.unwrap();
@@ -596,6 +606,7 @@ fn test_stake_authorize() {
&faucet_addr,
&config_offline.signers[0].pubkey(),
100_000,
&config,
)
.unwrap();
@@ -703,6 +714,7 @@ fn test_stake_authorize() {
nonce_authority: 0,
fee_payer: 0,
};
config_offline.output_format = OutputFormat::JsonCompact;
let sign_reply = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sign_reply);
assert!(sign_only.has_all_signers());
@@ -841,13 +853,16 @@ fn test_stake_authorize_with_fee_payer() {
config_offline.command = CliCommand::ClusterVersion;
process_command(&config_offline).unwrap_err();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &default_pubkey, 100_000).unwrap();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &default_pubkey, 100_000, &config)
.unwrap();
check_balance(100_000, &rpc_client, &config.signers[0].pubkey());
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &payer_pubkey, 100_000).unwrap();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &payer_pubkey, 100_000, &config)
.unwrap();
check_balance(100_000, &rpc_client, &payer_pubkey);
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 100_000).unwrap();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 100_000, &config)
.unwrap();
check_balance(100_000, &rpc_client, &offline_pubkey);
// Create stake account, identity is authority
@@ -901,6 +916,7 @@ fn test_stake_authorize_with_fee_payer() {
nonce_authority: 0,
fee_payer: 0,
};
config_offline.output_format = OutputFormat::JsonCompact;
let sign_reply = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sign_reply);
assert!(sign_only.has_all_signers());
@@ -966,11 +982,13 @@ fn test_stake_split() {
&faucet_addr,
&config.signers[0].pubkey(),
500_000,
&config,
)
.unwrap();
check_balance(500_000, &rpc_client, &config.signers[0].pubkey());
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 100_000).unwrap();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 100_000, &config)
.unwrap();
check_balance(100_000, &rpc_client, &offline_pubkey);
// Create stake account, identity is authority
@@ -1038,6 +1056,7 @@ fn test_stake_split() {
lamports: 2 * minimum_stake_balance,
fee_payer: 0,
};
config_offline.output_format = OutputFormat::JsonCompact;
let sig_response = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
assert!(sign_only.has_all_signers());
@@ -1114,11 +1133,13 @@ fn test_stake_set_lockup() {
&faucet_addr,
&config.signers[0].pubkey(),
500_000,
&config,
)
.unwrap();
check_balance(500_000, &rpc_client, &config.signers[0].pubkey());
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 100_000).unwrap();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 100_000, &config)
.unwrap();
check_balance(100_000, &rpc_client, &offline_pubkey);
// Create stake account, identity is authority
@@ -1292,6 +1313,7 @@ fn test_stake_set_lockup() {
nonce_authority: 0,
fee_payer: 0,
};
config_offline.output_format = OutputFormat::JsonCompact;
let sig_response = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
assert!(sign_only.has_all_signers());
@@ -1364,11 +1386,13 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
&faucet_addr,
&config.signers[0].pubkey(),
200_000,
&config,
)
.unwrap();
check_balance(200_000, &rpc_client, &config.signers[0].pubkey());
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 100_000).unwrap();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 100_000, &config)
.unwrap();
check_balance(100_000, &rpc_client, &offline_pubkey);
// Create nonce account
@@ -1410,6 +1434,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
fee_payer: 0,
from: 0,
};
config_offline.output_format = OutputFormat::JsonCompact;
let sig_response = process_command(&config_offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
assert!(sign_only.has_all_signers());

View File

@@ -1,5 +1,6 @@
use solana_cli::{
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
cli_output::OutputFormat,
nonce,
offline::{
blockhash_query::{self, BlockhashQuery},
@@ -59,7 +60,8 @@ fn test_transfer() {
let sender_pubkey = config.signers[0].pubkey();
let recipient_pubkey = Pubkey::new(&[1u8; 32]);
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &sender_pubkey, 50_000).unwrap();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &sender_pubkey, 50_000, &config)
.unwrap();
check_balance(50_000, &rpc_client, &sender_pubkey);
check_balance(0, &rpc_client, &recipient_pubkey);
@@ -87,7 +89,7 @@ fn test_transfer() {
process_command(&offline).unwrap_err();
let offline_pubkey = offline.signers[0].pubkey();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 50).unwrap();
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_pubkey, 50, &config).unwrap();
check_balance(50, &rpc_client, &offline_pubkey);
// Offline transfer
@@ -103,6 +105,7 @@ fn test_transfer() {
nonce_authority: 0,
fee_payer: 0,
};
offline.output_format = OutputFormat::JsonCompact;
let sign_only_reply = process_command(&offline).unwrap();
let sign_only = parse_sign_only_reply_string(&sign_only_reply);
assert!(sign_only.has_all_signers());
@@ -247,16 +250,24 @@ fn test_transfer_multisession_signing() {
let offline_from_signer = keypair_from_seed(&[2u8; 32]).unwrap();
let offline_fee_payer_signer = keypair_from_seed(&[3u8; 32]).unwrap();
let from_null_signer = NullSigner::new(&offline_from_signer.pubkey());
let config = CliConfig::default();
// Setup accounts
let rpc_client = RpcClient::new_socket(leader_data.rpc);
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_from_signer.pubkey(), 43)
.unwrap();
request_and_confirm_airdrop(
&rpc_client,
&faucet_addr,
&offline_from_signer.pubkey(),
43,
&config,
)
.unwrap();
request_and_confirm_airdrop(
&rpc_client,
&faucet_addr,
&offline_fee_payer_signer.pubkey(),
3,
&config,
)
.unwrap();
check_balance(43, &rpc_client, &offline_from_signer.pubkey());
@@ -283,6 +294,7 @@ fn test_transfer_multisession_signing() {
nonce_authority: 0,
fee_payer: 0,
};
fee_payer_config.output_format = OutputFormat::JsonCompact;
let sign_only_reply = process_command(&fee_payer_config).unwrap();
let sign_only = parse_sign_only_reply_string(&sign_only_reply);
assert!(!sign_only.has_all_signers());
@@ -308,6 +320,7 @@ fn test_transfer_multisession_signing() {
nonce_authority: 0,
fee_payer: 0,
};
from_config.output_format = OutputFormat::JsonCompact;
let sign_only_reply = process_command(&from_config).unwrap();
let sign_only = parse_sign_only_reply_string(&sign_only_reply);
assert!(sign_only.has_all_signers());

View File

@@ -48,6 +48,7 @@ fn test_vote_authorize_and_withdraw() {
&faucet_addr,
&config.signers[0].pubkey(),
100_000,
&config,
)
.unwrap();
@@ -56,6 +57,7 @@ fn test_vote_authorize_and_withdraw() {
let vote_account_pubkey = vote_account_keypair.pubkey();
config.signers = vec![&default_signer, &vote_account_keypair];
config.command = CliCommand::CreateVoteAccount {
vote_account: 1,
seed: None,
identity_account: 0,
authorized_voter: None,
@@ -110,6 +112,7 @@ fn test_vote_authorize_and_withdraw() {
config.command = CliCommand::VoteUpdateValidator {
vote_account_pubkey,
new_identity_account: 2,
withdraw_authority: 1,
};
process_command(&config).unwrap();

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-client"
version = "1.1.9"
version = "1.1.20"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -19,10 +19,10 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
serde = "1.0.105"
serde_derive = "1.0.103"
serde_json = "1.0.48"
solana-transaction-status = { path = "../transaction-status", version = "1.1.9" }
solana-net-utils = { path = "../net-utils", version = "1.1.9" }
solana-sdk = { path = "../sdk", version = "1.1.9" }
solana-vote-program = { path = "../programs/vote", version = "1.1.9" }
solana-transaction-status = { path = "../transaction-status", version = "1.1.20" }
solana-net-utils = { path = "../net-utils", version = "1.1.20" }
solana-sdk = { path = "../sdk", version = "1.1.20" }
solana-vote-program = { path = "../programs/vote", version = "1.1.20" }
thiserror = "1.0"
tungstenite = "0.10.1"
url = "2.1.1"
@@ -31,7 +31,7 @@ url = "2.1.1"
assert_matches = "1.3.0"
jsonrpc-core = "14.0.5"
jsonrpc-http-server = "14.0.6"
solana-logger = { path = "../logger", version = "1.1.9" }
solana-logger = { path = "../logger", version = "1.1.20" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -50,28 +50,29 @@ impl Into<TransportError> for ClientErrorKind {
#[derive(Error, Debug)]
#[error("{kind}")]
pub struct ClientError {
command: Option<&'static str>,
request: Option<rpc_request::RpcRequest>,
#[source]
kind: ClientErrorKind,
}
impl ClientError {
pub fn new_with_command(kind: ClientErrorKind, command: &'static str) -> Self {
pub fn new_with_request(kind: ClientErrorKind, request: rpc_request::RpcRequest) -> Self {
Self {
command: Some(command),
request: Some(request),
kind,
}
}
pub fn into_with_command(self, command: &'static str) -> Self {
pub fn into_with_request(self, request: rpc_request::RpcRequest) -> Self {
Self {
command: Some(command),
request: Some(request),
..self
}
}
pub fn command(&self) -> Option<&'static str> {
self.command
pub fn request(&self) -> Option<&rpc_request::RpcRequest> {
self.request.as_ref()
}
pub fn kind(&self) -> &ClientErrorKind {
@@ -82,7 +83,7 @@ impl ClientError {
impl From<ClientErrorKind> for ClientError {
fn from(kind: ClientErrorKind) -> Self {
Self {
command: None,
request: None,
kind,
}
}
@@ -91,7 +92,7 @@ impl From<ClientErrorKind> for ClientError {
impl From<TransportError> for ClientError {
fn from(err: TransportError) -> Self {
Self {
command: None,
request: None,
kind: err.into(),
}
}
@@ -106,7 +107,7 @@ impl Into<TransportError> for ClientError {
impl From<std::io::Error> for ClientError {
fn from(err: std::io::Error) -> Self {
Self {
command: None,
request: None,
kind: err.into(),
}
}
@@ -115,7 +116,7 @@ impl From<std::io::Error> for ClientError {
impl From<reqwest::Error> for ClientError {
fn from(err: reqwest::Error) -> Self {
Self {
command: None,
request: None,
kind: err.into(),
}
}
@@ -124,7 +125,7 @@ impl From<reqwest::Error> for ClientError {
impl From<rpc_request::RpcError> for ClientError {
fn from(err: rpc_request::RpcError) -> Self {
Self {
command: None,
request: None,
kind: err.into(),
}
}
@@ -133,7 +134,7 @@ impl From<rpc_request::RpcError> for ClientError {
impl From<serde_json::error::Error> for ClientError {
fn from(err: serde_json::error::Error) -> Self {
Self {
command: None,
request: None,
kind: err.into(),
}
}
@@ -142,7 +143,7 @@ impl From<serde_json::error::Error> for ClientError {
impl From<SignerError> for ClientError {
fn from(err: SignerError) -> Self {
Self {
command: None,
request: None,
kind: err.into(),
}
}
@@ -151,7 +152,7 @@ impl From<SignerError> for ClientError {
impl From<TransactionError> for ClientError {
fn from(err: TransactionError) -> Self {
Self {
command: None,
request: None,
kind: err.into(),
}
}

View File

@@ -1,10 +1,5 @@
use crate::{client_error::Result, rpc_request::RpcRequest};
pub(crate) trait GenericRpcClientRequest {
fn send(
&self,
request: &RpcRequest,
params: serde_json::Value,
retries: usize,
) -> Result<serde_json::Value>;
fn send(&self, request: RpcRequest, params: serde_json::Value) -> Result<serde_json::Value>;
}

View File

@@ -8,6 +8,7 @@ pub mod perf_utils;
pub mod pubsub_client;
pub mod rpc_client;
pub mod rpc_client_request;
pub mod rpc_config;
pub mod rpc_request;
pub mod rpc_response;
pub mod thin_client;

View File

@@ -38,13 +38,8 @@ impl MockRpcClientRequest {
}
impl GenericRpcClientRequest for MockRpcClientRequest {
fn send(
&self,
request: &RpcRequest,
params: serde_json::Value,
_retries: usize,
) -> Result<serde_json::Value> {
if let Some(value) = self.mocks.write().unwrap().remove(request) {
fn send(&self, request: RpcRequest, params: serde_json::Value) -> Result<serde_json::Value> {
if let Some(value) = self.mocks.write().unwrap().remove(&request) {
return Ok(value);
}
if self.url == "fails" {

File diff suppressed because it is too large Load Diff

View File

@@ -4,8 +4,7 @@ use crate::{
rpc_request::{RpcError, RpcRequest},
};
use log::*;
use reqwest::{self, header::CONTENT_TYPE};
use solana_sdk::clock::{DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT};
use reqwest::{self, header::CONTENT_TYPE, StatusCode};
use std::{thread::sleep, time::Duration};
pub struct RpcClientRequest {
@@ -29,17 +28,13 @@ impl RpcClientRequest {
}
impl GenericRpcClientRequest for RpcClientRequest {
fn send(
&self,
request: &RpcRequest,
params: serde_json::Value,
mut retries: usize,
) -> Result<serde_json::Value> {
fn send(&self, request: RpcRequest, params: serde_json::Value) -> Result<serde_json::Value> {
// Concurrent requests are not supported so reuse the same request id for all requests
let request_id = 1;
let request_json = request.build_request_json(request_id, params);
let mut too_many_requests_retries = 5;
loop {
match self
.client
@@ -50,6 +45,19 @@ impl GenericRpcClientRequest for RpcClientRequest {
{
Ok(response) => {
if !response.status().is_success() {
if response.status() == StatusCode::TOO_MANY_REQUESTS
&& too_many_requests_retries > 0
{
too_many_requests_retries -= 1;
debug!(
"Server responded with {:?}, {} retries left",
response, too_many_requests_retries
);
// Sleep for 500ms to give the server a break
sleep(Duration::from_millis(500));
continue;
}
return Err(response.error_for_status().unwrap_err().into());
}
@@ -63,17 +71,8 @@ impl GenericRpcClientRequest for RpcClientRequest {
}
return Ok(json["result"].clone());
}
Err(e) => {
info!("{:?} failed, {} retries left: {:?}", request, retries, e);
if retries == 0 {
return Err(e.into());
}
retries -= 1;
// Sleep for approximately half a slot
sleep(Duration::from_millis(
500 * DEFAULT_TICKS_PER_SLOT / DEFAULT_TICKS_PER_SECOND,
));
Err(err) => {
return Err(err.into());
}
}
}

45
client/src/rpc_config.rs Normal file
View File

@@ -0,0 +1,45 @@
use solana_sdk::{clock::Epoch, commitment_config::CommitmentConfig};
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcSignatureStatusConfig {
pub search_transaction_history: Option<bool>,
// DEPRECATED
#[serde(flatten)]
pub commitment: Option<CommitmentConfig>,
}
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcSendTransactionConfig {
pub skip_preflight: bool,
}
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcSimulateTransactionConfig {
pub sig_verify: bool,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum RpcLargestAccountsFilter {
Circulating,
NonCirculating,
}
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcLargestAccountsConfig {
#[serde(flatten)]
pub commitment: Option<CommitmentConfig>,
pub filter: Option<RpcLargestAccountsFilter>,
}
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcInflationConfig {
pub epoch: Option<Epoch>,
#[serde(flatten)]
pub commitment: Option<CommitmentConfig>,
}

View File

@@ -1,7 +1,8 @@
use serde_json::{json, Value};
use std::fmt;
use thiserror::Error;
#[derive(Debug, PartialEq, Eq, Hash)]
#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
pub enum RpcRequest {
DeregisterNode,
ValidatorExit,
@@ -15,14 +16,18 @@ pub enum RpcRequest {
GetConfirmedTransaction,
GetEpochInfo,
GetEpochSchedule,
GetGenesisHash,
GetIdentity,
GetInflation,
GetLeaderSchedule,
GetProgramAccounts,
GetRecentBlockhash,
GetFeeCalculatorForBlockhash,
GetFeeRateGovernor,
GetFees,
GetGenesisHash,
GetIdentity,
GetInflationGovernor,
GetInflationRate,
GetLargestAccounts,
GetLeaderSchedule,
GetMinimumBalanceForRentExemption,
GetProgramAccounts,
GetRecentBlockhash,
GetSignatureStatuses,
GetSlot,
GetSlotLeader,
@@ -30,24 +35,21 @@ pub enum RpcRequest {
GetStorageTurnRate,
GetSlotsPerSegment,
GetStoragePubkeysForSlot,
GetSupply,
GetTotalSupply,
GetTransactionCount,
GetVersion,
GetVoteAccounts,
MinimumLedgerSlot,
RegisterNode,
RequestAirdrop,
SendTransaction,
SimulateTransaction,
SignVote,
GetMinimumBalanceForRentExemption,
MinimumLedgerSlot,
}
pub const MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS: usize = 256;
pub const MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE: u64 = 10_000;
impl RpcRequest {
pub(crate) fn build_request_json(&self, id: u64, params: Value) -> Value {
let jsonrpc = "2.0";
impl fmt::Display for RpcRequest {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let method = match self {
RpcRequest::DeregisterNode => "deregisterNode",
RpcRequest::ValidatorExit => "validatorExit",
@@ -61,14 +63,18 @@ impl RpcRequest {
RpcRequest::GetConfirmedTransaction => "getConfirmedTransaction",
RpcRequest::GetEpochInfo => "getEpochInfo",
RpcRequest::GetEpochSchedule => "getEpochSchedule",
RpcRequest::GetGenesisHash => "getGenesisHash",
RpcRequest::GetIdentity => "getIdentity",
RpcRequest::GetInflation => "getInflation",
RpcRequest::GetLeaderSchedule => "getLeaderSchedule",
RpcRequest::GetProgramAccounts => "getProgramAccounts",
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
RpcRequest::GetFeeCalculatorForBlockhash => "getFeeCalculatorForBlockhash",
RpcRequest::GetFeeRateGovernor => "getFeeRateGovernor",
RpcRequest::GetFees => "getFees",
RpcRequest::GetGenesisHash => "getGenesisHash",
RpcRequest::GetIdentity => "getIdentity",
RpcRequest::GetInflationGovernor => "getInflationGovernor",
RpcRequest::GetInflationRate => "getInflationRate",
RpcRequest::GetLargestAccounts => "getLargestAccounts",
RpcRequest::GetLeaderSchedule => "getLeaderSchedule",
RpcRequest::GetMinimumBalanceForRentExemption => "getMinimumBalanceForRentExemption",
RpcRequest::GetProgramAccounts => "getProgramAccounts",
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
RpcRequest::GetSignatureStatuses => "getSignatureStatuses",
RpcRequest::GetSlot => "getSlot",
RpcRequest::GetSlotLeader => "getSlotLeader",
@@ -76,21 +82,35 @@ impl RpcRequest {
RpcRequest::GetStorageTurnRate => "getStorageTurnRate",
RpcRequest::GetSlotsPerSegment => "getSlotsPerSegment",
RpcRequest::GetStoragePubkeysForSlot => "getStoragePubkeysForSlot",
RpcRequest::GetSupply => "getSupply",
RpcRequest::GetTotalSupply => "getTotalSupply",
RpcRequest::GetTransactionCount => "getTransactionCount",
RpcRequest::GetVersion => "getVersion",
RpcRequest::GetVoteAccounts => "getVoteAccounts",
RpcRequest::MinimumLedgerSlot => "minimumLedgerSlot",
RpcRequest::RegisterNode => "registerNode",
RpcRequest::RequestAirdrop => "requestAirdrop",
RpcRequest::SendTransaction => "sendTransaction",
RpcRequest::SimulateTransaction => "simulateTransaction",
RpcRequest::SignVote => "signVote",
RpcRequest::GetMinimumBalanceForRentExemption => "getMinimumBalanceForRentExemption",
RpcRequest::MinimumLedgerSlot => "minimumLedgerSlot",
};
write!(f, "{}", method)
}
}
pub const NUM_LARGEST_ACCOUNTS: usize = 20;
pub const MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS: usize = 256;
pub const MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE: u64 = 10_000;
pub const MAX_GET_CONFIRMED_BLOCKS_RANGE: u64 = 500_000;
impl RpcRequest {
pub(crate) fn build_request_json(self, id: u64, params: Value) -> Value {
let jsonrpc = "2.0";
json!({
"jsonrpc": jsonrpc,
"id": id,
"method": method,
"method": format!("{}", self),
"params": params,
})
}
@@ -129,10 +149,6 @@ mod tests {
let request = test_request.build_request_json(1, Value::Null);
assert_eq!(request["method"], "getEpochInfo");
let test_request = RpcRequest::GetInflation;
let request = test_request.build_request_json(1, Value::Null);
assert_eq!(request["method"], "getInflation");
let test_request = RpcRequest::GetRecentBlockhash;
let request = test_request.build_request_json(1, Value::Null);
assert_eq!(request["method"], "getRecentBlockhash");

View File

@@ -3,6 +3,7 @@ use solana_sdk::{
account::Account,
clock::{Epoch, Slot},
fee_calculator::{FeeCalculator, FeeRateGovernor},
inflation::Inflation,
pubkey::Pubkey,
transaction::{Result, TransactionError},
};
@@ -35,6 +36,14 @@ pub struct RpcBlockhashFeeCalculator {
pub fee_calculator: FeeCalculator,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcFees {
pub blockhash: String,
pub fee_calculator: FeeCalculator,
pub last_valid_slot: Slot,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcFeeCalculator {
@@ -47,6 +56,37 @@ pub struct RpcFeeRateGovernor {
pub fee_rate_governor: FeeRateGovernor,
}
#[derive(Serialize, Deserialize, PartialEq, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcInflationGovernor {
pub initial: f64,
pub terminal: f64,
pub taper: f64,
pub foundation: f64,
pub foundation_term: f64,
}
impl From<Inflation> for RpcInflationGovernor {
fn from(inflation: Inflation) -> Self {
Self {
initial: inflation.initial,
terminal: inflation.terminal,
taper: inflation.taper,
foundation: inflation.foundation,
foundation_term: inflation.foundation_term,
}
}
}
#[derive(Serialize, Deserialize, PartialEq, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcInflationRate {
pub total: f64,
pub validator: f64,
pub foundation: f64,
pub epoch: Epoch,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcKeyedAccount {
@@ -108,6 +148,8 @@ pub struct RpcContactInfo {
pub tpu: Option<SocketAddr>,
/// JSON RPC port
pub rpc: Option<SocketAddr>,
/// Software version
pub version: Option<String>,
}
/// Map of leader base58 identity pubkeys to the slot indices relative to the first epoch slot
@@ -127,6 +169,9 @@ pub struct RpcEpochInfo {
/// The absolute current slot
pub absolute_slot: Slot,
/// The current block height
pub block_height: u64,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
@@ -186,6 +231,13 @@ pub struct RpcSignatureConfirmation {
pub status: Result<()>,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcSimulateTransactionResult {
pub err: Option<TransactionError>,
pub logs: Option<Vec<String>>,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcStorageTurn {
@@ -199,3 +251,12 @@ pub struct RpcAccountBalance {
pub address: String,
pub lamports: u64,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcSupply {
pub total: u64,
pub circulating: u64,
pub non_circulating: u64,
pub non_circulating_accounts: Vec<String>,
}

View File

@@ -440,7 +440,7 @@ impl SyncClient for ThinClient {
match recent_blockhash {
Ok(Response { value, .. }) => {
self.optimizer.report(index, duration_as_ms(&now.elapsed()));
Ok(value)
Ok((value.0, value.1))
}
Err(e) => {
self.optimizer.report(index, std::u64::MAX);

View File

@@ -1,7 +1,7 @@
[package]
name = "solana-core"
description = "Blockchain, Rebuilt for Scale"
version = "1.1.9"
version = "1.1.20"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "../README.md"
@@ -21,6 +21,7 @@ byteorder = "1.3.4"
chrono = { version = "0.4.11", features = ["serde"] }
core_affinity = "0.5.10"
crossbeam-channel = "0.4"
ed25519-dalek = "=1.0.0-pre.3"
fs_extra = "1.1.0"
flate2 = "1.0"
indexmap = "1.3"
@@ -41,36 +42,37 @@ regex = "1.3.6"
serde = "1.0.105"
serde_derive = "1.0.103"
serde_json = "1.0.48"
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.1.9" }
solana-budget-program = { path = "../programs/budget", version = "1.1.9" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.9" }
solana-client = { path = "../client", version = "1.1.9" }
solana-transaction-status = { path = "../transaction-status", version = "1.1.9" }
solana-faucet = { path = "../faucet", version = "1.1.9" }
ed25519-dalek = "=1.0.0-pre.3"
solana-ledger = { path = "../ledger", version = "1.1.9" }
solana-logger = { path = "../logger", version = "1.1.9" }
solana-merkle-tree = { path = "../merkle-tree", version = "1.1.9" }
solana-metrics = { path = "../metrics", version = "1.1.9" }
solana-measure = { path = "../measure", version = "1.1.9" }
solana-net-utils = { path = "../net-utils", version = "1.1.9" }
solana-chacha-cuda = { path = "../chacha-cuda", version = "1.1.9" }
solana-perf = { path = "../perf", version = "1.1.9" }
solana-runtime = { path = "../runtime", version = "1.1.9" }
solana-sdk = { path = "../sdk", version = "1.1.9" }
solana-stake-program = { path = "../programs/stake", version = "1.1.9" }
solana-storage-program = { path = "../programs/storage", version = "1.1.9" }
solana-streamer = { path = "../streamer", version = "1.1.9" }
solana-vote-program = { path = "../programs/vote", version = "1.1.9" }
solana-vote-signer = { path = "../vote-signer", version = "1.1.9" }
solana-sys-tuner = { path = "../sys-tuner", version = "1.1.9" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.1.20" }
solana-budget-program = { path = "../programs/budget", version = "1.1.20" }
solana-clap-utils = { path = "../clap-utils", version = "1.1.20" }
solana-client = { path = "../client", version = "1.1.20" }
solana-transaction-status = { path = "../transaction-status", version = "1.1.20" }
solana-faucet = { path = "../faucet", version = "1.1.20" }
solana-genesis-programs = { path = "../genesis-programs", version = "1.1.20" }
solana-ledger = { path = "../ledger", version = "1.1.20" }
solana-logger = { path = "../logger", version = "1.1.20" }
solana-merkle-tree = { path = "../merkle-tree", version = "1.1.20" }
solana-metrics = { path = "../metrics", version = "1.1.20" }
solana-measure = { path = "../measure", version = "1.1.20" }
solana-net-utils = { path = "../net-utils", version = "1.1.20" }
solana-chacha-cuda = { path = "../chacha-cuda", version = "1.1.20" }
solana-perf = { path = "../perf", version = "1.1.20" }
solana-runtime = { path = "../runtime", version = "1.1.20" }
solana-sdk = { path = "../sdk", version = "1.1.20" }
solana-stake-program = { path = "../programs/stake", version = "1.1.20" }
solana-storage-program = { path = "../programs/storage", version = "1.1.20" }
solana-streamer = { path = "../streamer", version = "1.1.20" }
solana-version = { path = "../version", version = "1.1.20" }
solana-vote-program = { path = "../programs/vote", version = "1.1.20" }
solana-vote-signer = { path = "../vote-signer", version = "1.1.20" }
solana-sys-tuner = { path = "../sys-tuner", version = "1.1.20" }
tempfile = "3.1.0"
thiserror = "1.0"
tokio = "0.1"
tokio-codec = "0.1"
tokio-fs = "0.1"
tokio-io = "0.1"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.9" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.20" }
trees = "0.2.1"
[dev-dependencies]

View File

@@ -3,10 +3,11 @@
extern crate test;
use rand::{thread_rng, Rng};
use solana_core::broadcast_stage::broadcast_metrics::TransmitShredsStats;
use solana_core::broadcast_stage::{broadcast_shreds, get_broadcast_peers};
use solana_core::cluster_info::{ClusterInfo, Node};
use solana_core::contact_info::ContactInfo;
use solana_ledger::shred::Shred;
use solana_ledger::shred::{Shred, NONCE_SHRED_PAYLOAD_SIZE};
use solana_sdk::pubkey::Pubkey;
use solana_sdk::timing::timestamp;
use std::sync::RwLock;
@@ -26,7 +27,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
const NUM_SHREDS: usize = 32;
let shreds = vec![Shred::new_empty_data_shred(); NUM_SHREDS];
let shreds = vec![Shred::new_empty_data_shred(NONCE_SHRED_PAYLOAD_SIZE); NUM_SHREDS];
let mut stakes = HashMap::new();
const NUM_PEERS: usize = 200;
for _ in 0..NUM_PEERS {
@@ -48,7 +49,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
&peers_and_stakes,
&peers,
&last_datapoint,
&mut 0,
&mut TransmitShredsStats::default(),
)
.unwrap();
});

View File

@@ -5,7 +5,7 @@ extern crate test;
use solana_ledger::entry::{create_ticks, Entry};
use solana_ledger::shred::{
max_entries_per_n_shred, max_ticks_per_n_shreds, Shred, Shredder, RECOMMENDED_FEC_RATE,
SIZE_OF_DATA_SHRED_PAYLOAD,
SIZE_OF_NONCE_DATA_SHRED_PAYLOAD,
};
use solana_perf::test_tx;
use solana_sdk::hash::Hash;
@@ -29,10 +29,11 @@ fn make_large_unchained_entries(txs_per_entry: u64, num_entries: u64) -> Vec<Ent
#[bench]
fn bench_shredder_ticks(bencher: &mut Bencher) {
let kp = Arc::new(Keypair::new());
let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD;
let shred_size = SIZE_OF_NONCE_DATA_SHRED_PAYLOAD;
let num_shreds = ((1000 * 1000) + (shred_size - 1)) / shred_size;
// ~1Mb
let num_ticks = max_ticks_per_n_shreds(1) * num_shreds as u64;
let num_ticks =
max_ticks_per_n_shreds(1, Some(SIZE_OF_NONCE_DATA_SHRED_PAYLOAD)) * num_shreds as u64;
let entries = create_ticks(num_ticks, 0, Hash::default());
bencher.iter(|| {
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp.clone(), 0, 0).unwrap();
@@ -43,10 +44,14 @@ fn bench_shredder_ticks(bencher: &mut Bencher) {
#[bench]
fn bench_shredder_large_entries(bencher: &mut Bencher) {
let kp = Arc::new(Keypair::new());
let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD;
let shred_size = SIZE_OF_NONCE_DATA_SHRED_PAYLOAD;
let num_shreds = ((1000 * 1000) + (shred_size - 1)) / shred_size;
let txs_per_entry = 128;
let num_entries = max_entries_per_n_shred(&make_test_entry(txs_per_entry), num_shreds as u64);
let num_entries = max_entries_per_n_shred(
&make_test_entry(txs_per_entry),
num_shreds as u64,
Some(shred_size),
);
let entries = make_large_unchained_entries(txs_per_entry, num_entries);
// 1Mb
bencher.iter(|| {
@@ -58,10 +63,10 @@ fn bench_shredder_large_entries(bencher: &mut Bencher) {
#[bench]
fn bench_deshredder(bencher: &mut Bencher) {
let kp = Arc::new(Keypair::new());
let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD;
let shred_size = SIZE_OF_NONCE_DATA_SHRED_PAYLOAD;
// ~10Mb
let num_shreds = ((10000 * 1000) + (shred_size - 1)) / shred_size;
let num_ticks = max_ticks_per_n_shreds(1) * num_shreds as u64;
let num_ticks = max_ticks_per_n_shreds(1, Some(shred_size)) * num_shreds as u64;
let entries = create_ticks(num_ticks, 0, Hash::default());
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp, 0, 0).unwrap();
let data_shreds = shredder.entries_to_shreds(&entries, true, 0).0;
@@ -73,7 +78,7 @@ fn bench_deshredder(bencher: &mut Bencher) {
#[bench]
fn bench_deserialize_hdr(bencher: &mut Bencher) {
let data = vec![0; SIZE_OF_DATA_SHRED_PAYLOAD];
let data = vec![0; SIZE_OF_NONCE_DATA_SHRED_PAYLOAD];
let shred = Shred::new_from_data(2, 1, 1, Some(&data), true, true, 0, 0, 1);

View File

@@ -51,7 +51,7 @@ type PacketsAndOffsets = (Packets, Vec<usize>);
pub type UnprocessedPackets = Vec<PacketsAndOffsets>;
/// Transaction forwarding
pub const FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET: u64 = 4;
pub const FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET: u64 = 1;
// Fixed thread size seems to be fastest on GCP setup
pub const NUM_THREADS: u32 = 4;
@@ -292,7 +292,7 @@ impl BankingStage {
enable_forwarding: bool,
batch_limit: usize,
transaction_status_sender: Option<TransactionStatusSender>,
) {
) -> BufferedPacketsDecision {
let (leader_at_slot_offset, poh_has_bank, would_be_leader) = {
let poh = poh_recorder.lock().unwrap();
(
@@ -349,6 +349,7 @@ impl BankingStage {
}
_ => (),
}
decision
}
pub fn process_loop(
@@ -365,8 +366,8 @@ impl BankingStage {
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut buffered_packets = vec![];
loop {
if !buffered_packets.is_empty() {
Self::process_buffered_packets(
while !buffered_packets.is_empty() {
let decision = Self::process_buffered_packets(
&my_pubkey,
&socket,
poh_recorder,
@@ -376,6 +377,11 @@ impl BankingStage {
batch_limit,
transaction_status_sender.clone(),
);
if decision == BufferedPacketsDecision::Hold {
// If we are waiting on a new bank,
// check the receiver for more transactions/for exiting
break;
}
}
let recv_timeout = if !buffered_packets.is_empty() {
@@ -543,6 +549,7 @@ impl BankingStage {
send_transaction_status_batch(
bank.clone(),
batch.transactions(),
batch.iteration_order_vec(),
transaction_statuses,
TransactionBalancesSet::new(pre_balances, post_balances),
sender,

View File

@@ -35,7 +35,7 @@ use std::{
};
mod broadcast_fake_shreds_run;
pub(crate) mod broadcast_metrics;
pub mod broadcast_metrics;
pub(crate) mod broadcast_utils;
mod fail_entry_verification_broadcast_run;
mod standard_broadcast_run;
@@ -374,13 +374,14 @@ pub fn broadcast_shreds(
peers_and_stakes: &[(u64, usize)],
peers: &[ContactInfo],
last_datapoint_submit: &Arc<AtomicU64>,
send_mmsg_total: &mut u64,
transmit_stats: &mut TransmitShredsStats,
) -> Result<()> {
let broadcast_len = peers_and_stakes.len();
if broadcast_len == 0 {
update_peer_stats(1, 1, last_datapoint_submit);
return Ok(());
}
let mut shred_select = Measure::start("shred_select");
let packets: Vec<_> = shreds
.iter()
.map(|shred| {
@@ -389,6 +390,8 @@ pub fn broadcast_shreds(
(&shred.payload, &peers[broadcast_index].tvu)
})
.collect();
shred_select.stop();
transmit_stats.shred_select += shred_select.as_us();
let mut sent = 0;
let mut send_mmsg_time = Measure::start("send_mmsg");
@@ -401,7 +404,7 @@ pub fn broadcast_shreds(
}
}
send_mmsg_time.stop();
*send_mmsg_total += send_mmsg_time.as_us();
transmit_stats.send_mmsg_elapsed += send_mmsg_time.as_us();
let num_live_peers = num_live_peers(&peers);
update_peer_stats(
@@ -463,7 +466,7 @@ pub mod test {
Vec<TransmitShreds>,
Vec<TransmitShreds>,
) {
let num_entries = max_ticks_per_n_shreds(num);
let num_entries = max_ticks_per_n_shreds(num, None);
let (data_shreds, _) = make_slot_entries(slot, 0, num_entries);
let keypair = Arc::new(Keypair::new());
let shredder = Shredder::new(slot, 0, RECOMMENDED_FEC_RATE, keypair, 0, 0)

View File

@@ -29,11 +29,12 @@ impl ProcessShredsStats {
}
#[derive(Default, Clone)]
pub(crate) struct TransmitShredsStats {
pub(crate) transmit_elapsed: u64,
pub(crate) send_mmsg_elapsed: u64,
pub(crate) get_peers_elapsed: u64,
pub(crate) num_shreds: usize,
pub struct TransmitShredsStats {
pub transmit_elapsed: u64,
pub send_mmsg_elapsed: u64,
pub get_peers_elapsed: u64,
pub shred_select: u64,
pub num_shreds: usize,
}
impl BroadcastStats for TransmitShredsStats {
@@ -42,6 +43,7 @@ impl BroadcastStats for TransmitShredsStats {
self.send_mmsg_elapsed += new_stats.send_mmsg_elapsed;
self.get_peers_elapsed += new_stats.get_peers_elapsed;
self.num_shreds += new_stats.num_shreds;
self.shred_select += new_stats.shred_select;
}
fn report_stats(&mut self, slot: Slot, slot_start: Instant) {
datapoint_info!(
@@ -58,6 +60,7 @@ impl BroadcastStats for TransmitShredsStats {
("send_mmsg_elapsed", self.send_mmsg_elapsed as i64, i64),
("get_peers_elapsed", self.get_peers_elapsed as i64, i64),
("num_shreds", self.num_shreds as i64, i64),
("shred_select", self.shred_select as i64, i64),
);
}
}
@@ -176,15 +179,16 @@ mod test {
}
#[test]
fn test_update() {
fn test_update_broadcast() {
let start = Instant::now();
let mut slot_broadcast_stats = SlotBroadcastStats::default();
slot_broadcast_stats.update(
&TransmitShredsStats {
transmit_elapsed: 1,
get_peers_elapsed: 1,
send_mmsg_elapsed: 1,
num_shreds: 1,
get_peers_elapsed: 2,
send_mmsg_elapsed: 3,
shred_select: 4,
num_shreds: 5,
},
&Some(BroadcastShredBatchInfo {
slot: 0,
@@ -198,16 +202,18 @@ mod test {
assert_eq!(slot_0_stats.num_batches, 1);
assert_eq!(slot_0_stats.num_expected_batches.unwrap(), 2);
assert_eq!(slot_0_stats.broadcast_shred_stats.transmit_elapsed, 1);
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 1);
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 1);
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 1);
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 2);
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 3);
assert_eq!(slot_0_stats.broadcast_shred_stats.shred_select, 4);
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 5);
slot_broadcast_stats.update(
&TransmitShredsStats {
transmit_elapsed: 1,
get_peers_elapsed: 1,
send_mmsg_elapsed: 1,
num_shreds: 1,
transmit_elapsed: 7,
get_peers_elapsed: 8,
send_mmsg_elapsed: 9,
shred_select: 10,
num_shreds: 11,
},
&None,
);
@@ -217,9 +223,10 @@ mod test {
assert_eq!(slot_0_stats.num_batches, 1);
assert_eq!(slot_0_stats.num_expected_batches.unwrap(), 2);
assert_eq!(slot_0_stats.broadcast_shred_stats.transmit_elapsed, 1);
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 1);
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 1);
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 1);
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 2);
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 3);
assert_eq!(slot_0_stats.broadcast_shred_stats.shred_select, 4);
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 5);
// If another batch is given, then total number of batches == num_expected_batches == 2,
// so the batch should be purged from the HashMap
@@ -228,6 +235,7 @@ mod test {
transmit_elapsed: 1,
get_peers_elapsed: 1,
send_mmsg_elapsed: 1,
shred_select: 1,
num_shreds: 1,
},
&Some(BroadcastShredBatchInfo {

View File

@@ -1,19 +1,12 @@
use super::*;
use solana_ledger::shred::Shredder;
use solana_ledger::shred::{Shredder, RECOMMENDED_FEC_RATE};
use solana_sdk::hash::Hash;
use solana_sdk::signature::Keypair;
use std::{thread::sleep, time::Duration};
pub const NUM_BAD_SLOTS: u64 = 10;
pub const SLOT_TO_RESOLVE: u64 = 32;
#[derive(Clone)]
pub(super) struct FailEntryVerificationBroadcastRun {
shred_version: u16,
keypair: Arc<Keypair>,
good_shreds: Vec<Shred>,
current_slot: Slot,
next_shred_index: u32,
}
impl FailEntryVerificationBroadcastRun {
@@ -21,9 +14,6 @@ impl FailEntryVerificationBroadcastRun {
Self {
shred_version,
keypair,
good_shreds: vec![],
current_slot: 0,
next_shred_index: 0,
}
}
}
@@ -41,90 +31,44 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
let bank = receive_results.bank.clone();
let last_tick_height = receive_results.last_tick_height;
if bank.slot() != self.current_slot {
self.next_shred_index = 0;
self.current_slot = bank.slot();
}
// 2) If we're past SLOT_TO_RESOLVE, insert the correct shreds so validators can repair
// and make progress
if bank.slot() > SLOT_TO_RESOLVE && !self.good_shreds.is_empty() {
info!("Resolving bad shreds");
let mut shreds = vec![];
std::mem::swap(&mut shreds, &mut self.good_shreds);
blockstore_sender.send((Arc::new(shreds), None))?;
}
// 3) Convert entries to shreds + generate coding shreds. Set a garbage PoH on the last entry
// 2) Convert entries to shreds + generate coding shreds. Set a garbage PoH on the last entry
// in the slot to make verification fail on validators
let last_entries = {
if last_tick_height == bank.max_tick_height() && bank.slot() < NUM_BAD_SLOTS {
let good_last_entry = receive_results.entries.pop().unwrap();
let mut bad_last_entry = good_last_entry.clone();
bad_last_entry.hash = Hash::default();
Some((good_last_entry, bad_last_entry))
} else {
None
}
};
if last_tick_height == bank.max_tick_height() {
let mut last_entry = receive_results.entries.last_mut().unwrap();
last_entry.hash = Hash::default();
}
let next_shred_index = blockstore
.meta(bank.slot())
.expect("Database error")
.map(|meta| meta.consumed)
.unwrap_or(0) as u32;
let shredder = Shredder::new(
bank.slot(),
bank.parent().unwrap().slot(),
0.0,
RECOMMENDED_FEC_RATE,
self.keypair.clone(),
(bank.tick_height() % bank.ticks_per_slot()) as u8,
self.shred_version,
)
.expect("Expected to create a new shredder");
let (data_shreds, _, _) = shredder.entries_to_shreds(
let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds(
&receive_results.entries,
last_tick_height == bank.max_tick_height() && last_entries.is_none(),
self.next_shred_index,
last_tick_height == bank.max_tick_height(),
next_shred_index,
);
self.next_shred_index += data_shreds.len() as u32;
let last_shreds = last_entries.map(|(good_last_entry, bad_last_entry)| {
let (good_last_data_shred, _, _) =
shredder.entries_to_shreds(&[good_last_entry], true, self.next_shred_index);
let (bad_last_data_shred, _, _) =
// Don't mark the last shred as last so that validators won't know that
// they've gotten all the shreds, and will continue trying to repair
shredder.entries_to_shreds(&[bad_last_entry], false, self.next_shred_index);
self.next_shred_index += 1;
(good_last_data_shred, bad_last_data_shred)
});
let data_shreds = Arc::new(data_shreds);
blockstore_sender.send((data_shreds.clone(), None))?;
// 4) Start broadcast step
// 3) Start broadcast step
let bank_epoch = bank.get_leader_schedule_epoch(bank.slot());
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch);
let stakes = stakes.map(Arc::new);
socket_sender.send(((stakes.clone(), data_shreds), None))?;
if let Some((good_last_data_shred, bad_last_data_shred)) = last_shreds {
// Stash away the good shred so we can rewrite them later
self.good_shreds.extend(good_last_data_shred.clone());
let good_last_data_shred = Arc::new(good_last_data_shred);
let bad_last_data_shred = Arc::new(bad_last_data_shred);
// Store the good shred so that blockstore will signal ClusterSlots
// that the slot is complete
blockstore_sender.send((good_last_data_shred, None))?;
loop {
// Wait for slot to be complete
if blockstore.is_full(bank.slot()) {
break;
}
sleep(Duration::from_millis(10));
}
// Store the bad shred so we serve bad repairs to validators catching up
blockstore_sender.send((bad_last_data_shred.clone(), None))?;
// Send bad shreds to rest of network
socket_sender.send(((stakes, bad_last_data_shred), None))?;
}
socket_sender.send(((stakes, Arc::new(coding_shreds)), None))?;
Ok(())
}
fn transmit(
@@ -137,14 +81,13 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
// Broadcast data
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes);
let mut send_mmsg_total = 0;
broadcast_shreds(
sock,
&shreds,
&peers_and_stakes,
&peers,
&Arc::new(AtomicU64::new(0)),
&mut send_mmsg_total,
&mut TransmitShredsStats::default(),
)?;
Ok(())

View File

@@ -9,6 +9,7 @@ use solana_ledger::{
};
use solana_sdk::{pubkey::Pubkey, signature::Keypair, timing::duration_as_us};
use std::collections::HashMap;
use std::sync::RwLock;
use std::time::Duration;
#[derive(Clone)]
@@ -23,6 +24,14 @@ pub struct StandardBroadcastRun {
shred_version: u16,
last_datapoint_submit: Arc<AtomicU64>,
num_batches: usize,
broadcast_peer_cache: Arc<RwLock<BroadcastPeerCache>>,
last_peer_update: Arc<AtomicU64>,
}
#[derive(Default)]
struct BroadcastPeerCache {
peers: Vec<ContactInfo>,
peers_and_stakes: Vec<(u64, usize)>,
}
impl StandardBroadcastRun {
@@ -38,6 +47,8 @@ impl StandardBroadcastRun {
shred_version,
last_datapoint_submit: Arc::new(AtomicU64::new(0)),
num_batches: 0,
broadcast_peer_cache: Arc::new(RwLock::new(BroadcastPeerCache::default())),
last_peer_update: Arc::new(AtomicU64::new(0)),
}
}
@@ -293,33 +304,46 @@ impl StandardBroadcastRun {
shreds: Arc<Vec<Shred>>,
broadcast_shred_batch_info: Option<BroadcastShredBatchInfo>,
) -> Result<()> {
const BROADCAST_PEER_UPDATE_INTERVAL_MS: u64 = 1000;
trace!("Broadcasting {:?} shreds", shreds.len());
// Get the list of peers to broadcast to
let get_peers_start = Instant::now();
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes);
let get_peers_elapsed = get_peers_start.elapsed();
let mut get_peers_time = Measure::start("broadcast::get_peers");
let now = timestamp();
let last = self.last_peer_update.load(Ordering::Relaxed);
if now - last > BROADCAST_PEER_UPDATE_INTERVAL_MS
&& self
.last_peer_update
.compare_and_swap(now, last, Ordering::Relaxed)
== last
{
let mut w_broadcast_peer_cache = self.broadcast_peer_cache.write().unwrap();
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes);
w_broadcast_peer_cache.peers = peers;
w_broadcast_peer_cache.peers_and_stakes = peers_and_stakes;
}
get_peers_time.stop();
let r_broadcast_peer_cache = self.broadcast_peer_cache.read().unwrap();
let mut transmit_stats = TransmitShredsStats::default();
// Broadcast the shreds
let transmit_start = Instant::now();
let mut send_mmsg_total = 0;
let mut transmit_time = Measure::start("broadcast_shreds");
broadcast_shreds(
sock,
&shreds,
&peers_and_stakes,
&peers,
&r_broadcast_peer_cache.peers_and_stakes,
&r_broadcast_peer_cache.peers,
&self.last_datapoint_submit,
&mut send_mmsg_total,
&mut transmit_stats,
)?;
let transmit_elapsed = transmit_start.elapsed();
let new_transmit_shreds_stats = TransmitShredsStats {
transmit_elapsed: duration_as_us(&transmit_elapsed),
get_peers_elapsed: duration_as_us(&get_peers_elapsed),
send_mmsg_elapsed: send_mmsg_total,
num_shreds: shreds.len(),
};
drop(r_broadcast_peer_cache);
transmit_time.stop();
transmit_stats.transmit_elapsed = transmit_time.as_us();
transmit_stats.get_peers_elapsed = get_peers_time.as_us();
transmit_stats.num_shreds = shreds.len();
// Process metrics
self.update_transmit_metrics(&new_transmit_shreds_stats, &broadcast_shred_batch_info);
self.update_transmit_metrics(&transmit_stats, &broadcast_shred_batch_info);
Ok(())
}
@@ -430,7 +454,7 @@ mod test {
));
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut genesis_config = create_genesis_config(10_000).genesis_config;
genesis_config.ticks_per_slot = max_ticks_per_n_shreds(num_shreds_per_slot) + 1;
genesis_config.ticks_per_slot = max_ticks_per_n_shreds(num_shreds_per_slot, None) + 1;
let bank0 = Arc::new(Bank::new(&genesis_config));
(
blockstore,
@@ -539,7 +563,11 @@ mod test {
// Interrupting the slot should cause the unfinished_slot and stats to reset
let num_shreds = 1;
assert!(num_shreds < num_shreds_per_slot);
let ticks1 = create_ticks(max_ticks_per_n_shreds(num_shreds), 0, genesis_config.hash());
let ticks1 = create_ticks(
max_ticks_per_n_shreds(num_shreds, None),
0,
genesis_config.hash(),
);
let receive_results = ReceiveResults {
entries: ticks1.clone(),
time_elapsed: Duration::new(2, 0),

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,10 @@
use crate::{
cluster_info::{ClusterInfo, GOSSIP_SLEEP_MILLIS},
consensus::VOTE_THRESHOLD_SIZE,
crds_value::CrdsValueLabel,
poh_recorder::PohRecorder,
result::{Error, Result},
rpc_subscriptions::RpcSubscriptions,
sigverify,
verified_vote_packets::VerifiedVotePackets,
};
@@ -14,7 +16,10 @@ use log::*;
use solana_ledger::bank_forks::BankForks;
use solana_metrics::inc_new_counter_debug;
use solana_perf::packet::{self, Packets};
use solana_runtime::{bank::Bank, epoch_stakes::EpochAuthorizedVoters};
use solana_runtime::{
bank::Bank,
epoch_stakes::{EpochAuthorizedVoters, EpochStakes},
};
use solana_sdk::{
clock::{Epoch, Slot},
epoch_schedule::EpochSchedule,
@@ -43,6 +48,7 @@ pub type VerifiedVoteTransactionsReceiver = CrossbeamReceiver<Vec<Transaction>>;
pub struct SlotVoteTracker {
voted: HashSet<Arc<Pubkey>>,
updates: Option<Vec<Arc<Pubkey>>>,
total_stake: u64,
}
impl SlotVoteTracker {
@@ -203,6 +209,7 @@ impl ClusterInfoVoteListener {
poh_recorder: &Arc<Mutex<PohRecorder>>,
vote_tracker: Arc<VoteTracker>,
bank_forks: Arc<RwLock<BankForks>>,
subscriptions: Arc<RpcSubscriptions>,
) -> Self {
let exit_ = exit.clone();
@@ -244,6 +251,7 @@ impl ClusterInfoVoteListener {
verified_vote_transactions_receiver,
vote_tracker,
&bank_forks,
subscriptions,
);
})
.unwrap();
@@ -372,6 +380,7 @@ impl ClusterInfoVoteListener {
vote_txs_receiver: VerifiedVoteTransactionsReceiver,
vote_tracker: Arc<VoteTracker>,
bank_forks: &RwLock<BankForks>,
subscriptions: Arc<RpcSubscriptions>,
) -> Result<()> {
loop {
if exit.load(Ordering::Relaxed) {
@@ -380,10 +389,15 @@ impl ClusterInfoVoteListener {
let root_bank = bank_forks.read().unwrap().root_bank().clone();
vote_tracker.process_new_root_bank(&root_bank);
let epoch_stakes = root_bank.epoch_stakes(root_bank.epoch());
if let Err(e) =
Self::get_and_process_votes(&vote_txs_receiver, &vote_tracker, root_bank.slot())
{
if let Err(e) = Self::get_and_process_votes(
&vote_txs_receiver,
&vote_tracker,
root_bank.slot(),
subscriptions.clone(),
epoch_stakes,
) {
match e {
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Disconnected) => {
return Ok(());
@@ -397,21 +411,51 @@ impl ClusterInfoVoteListener {
}
}
#[cfg(test)]
pub fn get_and_process_votes_for_tests(
vote_txs_receiver: &VerifiedVoteTransactionsReceiver,
vote_tracker: &Arc<VoteTracker>,
last_root: Slot,
subscriptions: Arc<RpcSubscriptions>,
) -> Result<()> {
Self::get_and_process_votes(
vote_txs_receiver,
vote_tracker,
last_root,
subscriptions,
None,
)
}
fn get_and_process_votes(
vote_txs_receiver: &VerifiedVoteTransactionsReceiver,
vote_tracker: &Arc<VoteTracker>,
last_root: Slot,
subscriptions: Arc<RpcSubscriptions>,
epoch_stakes: Option<&EpochStakes>,
) -> Result<()> {
let timer = Duration::from_millis(200);
let mut vote_txs = vote_txs_receiver.recv_timeout(timer)?;
while let Ok(new_txs) = vote_txs_receiver.try_recv() {
vote_txs.extend(new_txs);
}
Self::process_votes(vote_tracker, vote_txs, last_root);
Self::process_votes(
vote_tracker,
vote_txs,
last_root,
subscriptions,
epoch_stakes,
);
Ok(())
}
fn process_votes(vote_tracker: &VoteTracker, vote_txs: Vec<Transaction>, root: Slot) {
fn process_votes(
vote_tracker: &VoteTracker,
vote_txs: Vec<Transaction>,
root: Slot,
subscriptions: Arc<RpcSubscriptions>,
epoch_stakes: Option<&EpochStakes>,
) {
let mut diff: HashMap<Slot, HashSet<Arc<Pubkey>>> = HashMap::new();
{
let all_slot_trackers = &vote_tracker.slot_vote_trackers;
@@ -463,7 +507,7 @@ impl ClusterInfoVoteListener {
continue;
}
for slot in vote.slots {
for &slot in vote.slots.iter() {
if slot <= root {
continue;
}
@@ -488,6 +532,8 @@ impl ClusterInfoVoteListener {
.or_default()
.insert(unduplicated_pubkey.unwrap());
}
subscriptions.notify_vote(&vote);
}
}
}
@@ -504,15 +550,35 @@ impl ClusterInfoVoteListener {
if w_slot_tracker.updates.is_none() {
w_slot_tracker.updates = Some(vec![]);
}
for pk in slot_diff {
w_slot_tracker.voted.insert(pk.clone());
w_slot_tracker.updates.as_mut().unwrap().push(pk);
let mut current_stake = 0;
for pubkey in slot_diff {
Self::sum_stake(&mut current_stake, epoch_stakes, &pubkey);
w_slot_tracker.voted.insert(pubkey.clone());
w_slot_tracker.updates.as_mut().unwrap().push(pubkey);
}
Self::notify_for_stake_change(
current_stake,
w_slot_tracker.total_stake,
&subscriptions,
epoch_stakes,
slot,
);
w_slot_tracker.total_stake += current_stake;
} else {
let voted: HashSet<_> = slot_diff.into_iter().collect();
let mut total_stake = 0;
let voted: HashSet<_> = slot_diff
.into_iter()
.map(|pubkey| {
Self::sum_stake(&mut total_stake, epoch_stakes, &pubkey);
pubkey
})
.collect();
Self::notify_for_stake_change(total_stake, 0, &subscriptions, epoch_stakes, slot);
let new_slot_tracker = SlotVoteTracker {
voted: voted.clone(),
updates: Some(voted.into_iter().collect()),
total_stake,
};
vote_tracker
.slot_vote_trackers
@@ -522,11 +588,38 @@ impl ClusterInfoVoteListener {
}
}
}
fn notify_for_stake_change(
current_stake: u64,
previous_stake: u64,
subscriptions: &Arc<RpcSubscriptions>,
epoch_stakes: Option<&EpochStakes>,
slot: Slot,
) {
if let Some(stakes) = epoch_stakes {
let supermajority_stake = (stakes.total_stake() as f64 * VOTE_THRESHOLD_SIZE) as u64;
if previous_stake < supermajority_stake
&& (previous_stake + current_stake) > supermajority_stake
{
subscriptions.notify_gossip_subscribers(slot);
}
}
}
fn sum_stake(sum: &mut u64, epoch_stakes: Option<&EpochStakes>, pubkey: &Pubkey) {
if let Some(stakes) = epoch_stakes {
if let Some(vote_account) = stakes.stakes().vote_accounts().get(pubkey) {
*sum += vote_account.0;
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::commitment::BlockCommitmentCache;
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use solana_perf::packet;
use solana_runtime::{
bank::Bank,
@@ -623,7 +716,7 @@ mod tests {
#[test]
fn test_update_new_root() {
let (vote_tracker, bank, _) = setup();
let (vote_tracker, bank, _, _) = setup();
// Check outdated slots are purged with new root
let new_voter = Arc::new(Pubkey::new_rand());
@@ -664,7 +757,7 @@ mod tests {
#[test]
fn test_update_new_leader_schedule_epoch() {
let (vote_tracker, bank, _) = setup();
let (vote_tracker, bank, _, _) = setup();
// Check outdated slots are purged with new root
let leader_schedule_epoch = bank.get_leader_schedule_epoch(bank.slot());
@@ -706,7 +799,7 @@ mod tests {
#[test]
fn test_process_votes() {
// Create some voters at genesis
let (vote_tracker, _, validator_voting_keypairs) = setup();
let (vote_tracker, _, validator_voting_keypairs, subscriptions) = setup();
let (votes_sender, votes_receiver) = unbounded();
let vote_slots = vec![1, 2];
@@ -725,7 +818,14 @@ mod tests {
});
// Check that all the votes were registered for each validator correctly
ClusterInfoVoteListener::get_and_process_votes(&votes_receiver, &vote_tracker, 0).unwrap();
ClusterInfoVoteListener::get_and_process_votes(
&votes_receiver,
&vote_tracker,
0,
subscriptions,
None,
)
.unwrap();
for vote_slot in vote_slots {
let slot_vote_tracker = vote_tracker.get_slot_vote_tracker(vote_slot).unwrap();
let r_slot_vote_tracker = slot_vote_tracker.read().unwrap();
@@ -744,7 +844,7 @@ mod tests {
#[test]
fn test_process_votes2() {
// Create some voters at genesis
let (vote_tracker, _, validator_voting_keypairs) = setup();
let (vote_tracker, _, validator_voting_keypairs, subscriptions) = setup();
// Send some votes to process
let (votes_sender, votes_receiver) = unbounded();
@@ -769,7 +869,14 @@ mod tests {
}
// Check that all the votes were registered for each validator correctly
ClusterInfoVoteListener::get_and_process_votes(&votes_receiver, &vote_tracker, 0).unwrap();
ClusterInfoVoteListener::get_and_process_votes(
&votes_receiver,
&vote_tracker,
0,
subscriptions,
None,
)
.unwrap();
for (i, keyset) in validator_voting_keypairs.chunks(2).enumerate() {
let slot_vote_tracker = vote_tracker.get_slot_vote_tracker(i as u64 + 1).unwrap();
let r_slot_vote_tracker = &slot_vote_tracker.read().unwrap();
@@ -788,7 +895,7 @@ mod tests {
#[test]
fn test_get_voters_by_epoch() {
// Create some voters at genesis
let (vote_tracker, bank, validator_voting_keypairs) = setup();
let (vote_tracker, bank, validator_voting_keypairs, _) = setup();
let last_known_epoch = bank.get_leader_schedule_epoch(bank.slot());
let last_known_slot = bank
.epoch_schedule()
@@ -859,11 +966,23 @@ mod tests {
100,
);
let bank = Bank::new(&genesis_config);
let exit = Arc::new(AtomicBool::new(false));
let bank_forks = BankForks::new(0, bank);
let bank = bank_forks.get(0).unwrap().clone();
let vote_tracker = VoteTracker::new(&bank);
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(bank_forks)),
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
blockstore.clone(),
))),
));
// Send a vote to process, should add a reference to the pubkey for that voter
// in the tracker
let validator0_keypairs = &validator_voting_keypairs[0];
let vote_tracker = VoteTracker::new(&bank);
let vote_tx = vec![vote_transaction::new_vote_transaction(
// Must vote > root to be processed
vec![bank.slot() + 1],
@@ -874,7 +993,13 @@ mod tests {
&validator0_keypairs.vote_keypair,
)];
ClusterInfoVoteListener::process_votes(&vote_tracker, vote_tx, 0);
ClusterInfoVoteListener::process_votes(
&vote_tracker,
vote_tx,
0,
subscriptions.clone(),
None,
);
let ref_count = Arc::strong_count(
&vote_tracker
.keys
@@ -924,7 +1049,7 @@ mod tests {
})
.collect();
ClusterInfoVoteListener::process_votes(&vote_tracker, vote_txs, 0);
ClusterInfoVoteListener::process_votes(&vote_tracker, vote_txs, 0, subscriptions, None);
let ref_count = Arc::strong_count(
&vote_tracker
@@ -938,7 +1063,12 @@ mod tests {
assert_eq!(ref_count, current_ref_count);
}
fn setup() -> (Arc<VoteTracker>, Arc<Bank>, Vec<ValidatorVoteKeypairs>) {
fn setup() -> (
Arc<VoteTracker>,
Arc<Bank>,
Vec<ValidatorVoteKeypairs>,
Arc<RpcSubscriptions>,
) {
let validator_voting_keypairs: Vec<_> = (0..10)
.map(|_| ValidatorVoteKeypairs::new(Keypair::new(), Keypair::new(), Keypair::new()))
.collect();
@@ -950,6 +1080,18 @@ mod tests {
);
let bank = Bank::new(&genesis_config);
let vote_tracker = VoteTracker::new(&bank);
let exit = Arc::new(AtomicBool::new(false));
let bank_forks = BankForks::new(0, bank);
let bank = bank_forks.get(0).unwrap().clone();
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(bank_forks)),
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
blockstore.clone(),
))),
));
// Integrity Checks
let current_epoch = bank.epoch();
@@ -976,8 +1118,9 @@ mod tests {
assert_eq!(*vote_tracker.current_epoch.read().unwrap(), current_epoch);
(
Arc::new(vote_tracker),
Arc::new(bank),
bank,
validator_voting_keypairs,
subscriptions,
)
}

View File

@@ -47,7 +47,29 @@ impl ClusterSlots {
self.keys.write().unwrap().insert(pubkey.clone());
}
let from = self.keys.read().unwrap().get(&pubkey).unwrap().clone();
self.insert_node_id(*slot, from);
let balance = self
.validator_stakes
.read()
.unwrap()
.get(&from)
.map(|v| v.total_stake)
.unwrap_or(0);
let mut slot_pubkeys = self.cluster_slots.read().unwrap().get(slot).cloned();
if slot_pubkeys.is_none() {
let new_slot_pubkeys = Arc::new(RwLock::new(HashMap::default()));
self.cluster_slots
.write()
.unwrap()
.insert(*slot, new_slot_pubkeys.clone());
slot_pubkeys = Some(new_slot_pubkeys);
}
slot_pubkeys
.unwrap()
.write()
.unwrap()
.insert(from.clone(), balance);
}
}
self.cluster_slots.write().unwrap().retain(|x, _| *x > root);
@@ -57,7 +79,6 @@ impl ClusterSlots {
.retain(|x| Arc::strong_count(x) > 1);
*self.since.write().unwrap() = since;
}
pub fn collect(&self, id: &Pubkey) -> HashSet<Slot> {
self.cluster_slots
.read()
@@ -69,30 +90,6 @@ impl ClusterSlots {
.collect()
}
pub fn insert_node_id(&self, slot: Slot, node_id: Arc<Pubkey>) {
let balance = self
.validator_stakes
.read()
.unwrap()
.get(&node_id)
.map(|v| v.total_stake)
.unwrap_or(0);
let mut slot_pubkeys = self.cluster_slots.read().unwrap().get(&slot).cloned();
if slot_pubkeys.is_none() {
let new_slot_pubkeys = Arc::new(RwLock::new(HashMap::default()));
self.cluster_slots
.write()
.unwrap()
.insert(slot, new_slot_pubkeys.clone());
slot_pubkeys = Some(new_slot_pubkeys);
}
slot_pubkeys
.unwrap()
.write()
.unwrap()
.insert(node_id, balance);
}
fn update_peers(&self, cluster_info: &ClusterInfo, bank_forks: &RwLock<BankForks>) {
let root_bank = bank_forks.read().unwrap().root_bank().clone();
let root_epoch = root_bank.epoch();
@@ -140,23 +137,6 @@ impl ClusterSlots {
.collect()
}
pub fn compute_weights_exclude_noncomplete(
&self,
slot: Slot,
repair_peers: &[ContactInfo],
) -> Vec<(u64, usize)> {
let slot_peers = self.lookup(slot);
repair_peers
.iter()
.enumerate()
.filter_map(|(i, x)| {
slot_peers
.as_ref()
.and_then(|v| v.read().unwrap().get(&x.id).map(|stake| (*stake + 1, i)))
})
.collect()
}
pub fn generate_repairs_for_missing_slots(
&self,
self_id: &Pubkey,
@@ -294,43 +274,6 @@ mod tests {
);
}
#[test]
fn test_best_completed_slot_peer() {
let cs = ClusterSlots::default();
let mut contact_infos = vec![ContactInfo::default(); 2];
for ci in contact_infos.iter_mut() {
ci.id = Pubkey::new_rand();
}
let slot = 9;
// None of these validators have completed slot 9, so should
// return nothing
assert!(cs
.compute_weights_exclude_noncomplete(slot, &contact_infos)
.is_empty());
// Give second validator max stake
let validator_stakes: HashMap<_, _> = vec![(
*Arc::new(contact_infos[1].id),
NodeVoteAccounts {
total_stake: std::u64::MAX / 2,
vote_accounts: vec![Pubkey::default()],
},
)]
.into_iter()
.collect();
*cs.validator_stakes.write().unwrap() = Arc::new(validator_stakes);
// Mark the first validator as completed slot 9, should pick that validator,
// even though it only has default stake, while the other validator has
// max stake
cs.insert_node_id(slot, Arc::new(contact_infos[0].id));
assert_eq!(
cs.compute_weights_exclude_noncomplete(slot, &contact_infos),
vec![(1, 0)]
);
}
#[test]
fn test_update_new_staked_slot() {
let cs = ClusterSlots::default();

View File

@@ -1,7 +1,7 @@
use crate::consensus::VOTE_THRESHOLD_SIZE;
use crate::{consensus::VOTE_THRESHOLD_SIZE, rpc_subscriptions::RpcSubscriptions};
use solana_ledger::blockstore::Blockstore;
use solana_measure::measure::Measure;
use solana_metrics::inc_new_counter_info;
use solana_metrics::datapoint_info;
use solana_runtime::bank::Bank;
use solana_sdk::clock::Slot;
use solana_vote_program::{vote_state::VoteState, vote_state::MAX_LOCKOUT_HISTORY};
@@ -14,6 +14,14 @@ use std::{
time::Duration,
};
#[derive(Default)]
pub struct CacheSlotInfo {
pub current_slot: Slot,
pub node_root: Slot,
pub largest_confirmed_root: Slot,
pub highest_confirmed_slot: Slot,
}
pub type BlockCommitmentArray = [u64; MAX_LOCKOUT_HISTORY + 1];
#[derive(Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)]
@@ -53,6 +61,7 @@ pub struct BlockCommitmentCache {
bank: Arc<Bank>,
blockstore: Arc<Blockstore>,
root: Slot,
highest_confirmed_slot: Slot,
}
impl std::fmt::Debug for BlockCommitmentCache {
@@ -77,6 +86,7 @@ impl BlockCommitmentCache {
bank: Arc<Bank>,
blockstore: Arc<Blockstore>,
root: Slot,
highest_confirmed_slot: Slot,
) -> Self {
Self {
block_commitment,
@@ -85,6 +95,7 @@ impl BlockCommitmentCache {
bank,
blockstore,
root,
highest_confirmed_slot,
}
}
@@ -96,6 +107,7 @@ impl BlockCommitmentCache {
bank: Arc::new(Bank::default()),
blockstore,
root: Slot::default(),
highest_confirmed_slot: Slot::default(),
}
}
@@ -123,6 +135,26 @@ impl BlockCommitmentCache {
self.root
}
pub fn highest_confirmed_slot(&self) -> Slot {
self.highest_confirmed_slot
}
fn highest_slot_with_confirmation_count(&self, confirmation_count: usize) -> Slot {
assert!(confirmation_count > 0 && confirmation_count <= MAX_LOCKOUT_HISTORY);
for slot in (self.root()..self.slot()).rev() {
if let Some(count) = self.get_confirmation_count(slot) {
if count >= confirmation_count {
return slot;
}
}
}
self.root
}
fn calculate_highest_confirmed_slot(&self) -> Slot {
self.highest_slot_with_confirmation_count(1)
}
pub fn get_confirmation_count(&self, slot: Slot) -> Option<usize> {
self.get_lockout_count(slot, VOTE_THRESHOLD_SIZE)
}
@@ -159,11 +191,30 @@ impl BlockCommitmentCache {
largest_confirmed_root: Slot::default(),
bank: Arc::new(Bank::default()),
root: Slot::default(),
highest_confirmed_slot: Slot::default(),
}
}
#[cfg(test)]
pub(crate) fn set_get_largest_confirmed_root(&mut self, root: Slot) {
pub fn new_for_tests_with_blockstore_bank(
blockstore: Arc<Blockstore>,
bank: Arc<Bank>,
root: Slot,
) -> Self {
let mut block_commitment: HashMap<Slot, BlockCommitment> = HashMap::new();
block_commitment.insert(0, BlockCommitment::default());
Self {
block_commitment,
blockstore,
total_stake: 42,
largest_confirmed_root: root,
bank,
root,
highest_confirmed_slot: root,
}
}
pub(crate) fn set_largest_confirmed_root(&mut self, root: Slot) {
self.largest_confirmed_root = root;
}
}
@@ -204,6 +255,7 @@ impl AggregateCommitmentService {
pub fn new(
exit: &Arc<AtomicBool>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
subscriptions: Arc<RpcSubscriptions>,
) -> (Sender<CommitmentAggregationData>, Self) {
let (sender, receiver): (
Sender<CommitmentAggregationData>,
@@ -221,7 +273,7 @@ impl AggregateCommitmentService {
}
if let Err(RecvTimeoutError::Disconnected) =
Self::run(&receiver, &block_commitment_cache, &exit_)
Self::run(&receiver, &block_commitment_cache, &subscriptions, &exit_)
{
break;
}
@@ -234,6 +286,7 @@ impl AggregateCommitmentService {
fn run(
receiver: &Receiver<CommitmentAggregationData>,
block_commitment_cache: &RwLock<BlockCommitmentCache>,
subscriptions: &Arc<RpcSubscriptions>,
exit: &Arc<AtomicBool>,
) -> Result<(), RecvTimeoutError> {
loop {
@@ -266,16 +319,30 @@ impl AggregateCommitmentService {
aggregation_data.bank,
block_commitment_cache.read().unwrap().blockstore.clone(),
aggregation_data.root,
aggregation_data.root,
);
new_block_commitment.highest_confirmed_slot =
new_block_commitment.calculate_highest_confirmed_slot();
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
aggregate_commitment_time.stop();
inc_new_counter_info!(
"aggregate-commitment-ms",
aggregate_commitment_time.as_ms() as usize
datapoint_info!(
"block-commitment-cache",
(
"aggregate-commitment-ms",
aggregate_commitment_time.as_ms() as i64,
i64
)
);
subscriptions.notify_subscribers(CacheSlotInfo {
current_slot: w_block_commitment_cache.slot(),
node_root: w_block_commitment_cache.root(),
largest_confirmed_root: w_block_commitment_cache.largest_confirmed_root(),
highest_confirmed_slot: w_block_commitment_cache.highest_confirmed_slot(),
});
}
}
@@ -365,7 +432,7 @@ mod tests {
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
};
use solana_sdk::pubkey::Pubkey;
use solana_sdk::{genesis_config::GenesisConfig, pubkey::Pubkey};
use solana_stake_program::stake_state;
use solana_vote_program::vote_state::{self, VoteStateVersions};
@@ -402,7 +469,7 @@ mod tests {
block_commitment.entry(1).or_insert(cache1.clone());
block_commitment.entry(2).or_insert(cache2.clone());
let block_commitment_cache =
BlockCommitmentCache::new(block_commitment, 0, 50, bank, blockstore, 0);
BlockCommitmentCache::new(block_commitment, 0, 50, bank, blockstore, 0, 0);
assert_eq!(block_commitment_cache.get_confirmation_count(0), Some(2));
assert_eq!(block_commitment_cache.get_confirmation_count(1), Some(1));
@@ -436,6 +503,7 @@ mod tests {
bank,
blockstore,
0,
0,
);
assert!(block_commitment_cache.is_confirmed_rooted(0));
@@ -459,6 +527,114 @@ mod tests {
assert_eq!(get_largest_confirmed_root(rooted_stake, 10), 1);
}
#[test]
fn test_highest_confirmed_slot() {
let bank = Arc::new(Bank::new(&GenesisConfig::default()));
let bank_slot_5 = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 5));
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let total_stake = 50;
// Build cache with confirmation_count 2 given total_stake
let mut cache0 = BlockCommitment::default();
cache0.increase_confirmation_stake(1, 5);
cache0.increase_confirmation_stake(2, 40);
// Build cache with confirmation_count 1 given total_stake
let mut cache1 = BlockCommitment::default();
cache1.increase_confirmation_stake(1, 40);
cache1.increase_confirmation_stake(2, 5);
// Build cache with confirmation_count 0 given total_stake
let mut cache2 = BlockCommitment::default();
cache2.increase_confirmation_stake(1, 20);
cache2.increase_confirmation_stake(2, 5);
let mut block_commitment = HashMap::new();
block_commitment.entry(1).or_insert(cache0.clone()); // Slot 1, conf 2
block_commitment.entry(2).or_insert(cache1.clone()); // Slot 2, conf 1
block_commitment.entry(3).or_insert(cache2.clone()); // Slot 3, conf 0
let block_commitment_cache = BlockCommitmentCache::new(
block_commitment,
0,
total_stake,
bank_slot_5.clone(),
blockstore.clone(),
0,
0,
);
assert_eq!(block_commitment_cache.calculate_highest_confirmed_slot(), 2);
// Build map with multiple slots at conf 1
let mut block_commitment = HashMap::new();
block_commitment.entry(1).or_insert(cache1.clone()); // Slot 1, conf 1
block_commitment.entry(2).or_insert(cache1.clone()); // Slot 2, conf 1
block_commitment.entry(3).or_insert(cache2.clone()); // Slot 3, conf 0
let block_commitment_cache = BlockCommitmentCache::new(
block_commitment,
0,
total_stake,
bank_slot_5.clone(),
blockstore.clone(),
0,
0,
);
assert_eq!(block_commitment_cache.calculate_highest_confirmed_slot(), 2);
// Build map with slot gaps
let mut block_commitment = HashMap::new();
block_commitment.entry(1).or_insert(cache1.clone()); // Slot 1, conf 1
block_commitment.entry(3).or_insert(cache1.clone()); // Slot 3, conf 1
block_commitment.entry(5).or_insert(cache2.clone()); // Slot 5, conf 0
let block_commitment_cache = BlockCommitmentCache::new(
block_commitment,
0,
total_stake,
bank_slot_5.clone(),
blockstore.clone(),
0,
0,
);
assert_eq!(block_commitment_cache.calculate_highest_confirmed_slot(), 3);
// Build map with no conf 1 slots, but one higher
let mut block_commitment = HashMap::new();
block_commitment.entry(1).or_insert(cache0.clone()); // Slot 1, conf 2
block_commitment.entry(2).or_insert(cache2.clone()); // Slot 2, conf 0
block_commitment.entry(3).or_insert(cache2.clone()); // Slot 3, conf 0
let block_commitment_cache = BlockCommitmentCache::new(
block_commitment,
0,
total_stake,
bank_slot_5.clone(),
blockstore.clone(),
0,
0,
);
assert_eq!(block_commitment_cache.calculate_highest_confirmed_slot(), 1);
// Build map with no conf 1 or higher slots
let mut block_commitment = HashMap::new();
block_commitment.entry(1).or_insert(cache2.clone()); // Slot 1, conf 0
block_commitment.entry(2).or_insert(cache2.clone()); // Slot 2, conf 0
block_commitment.entry(3).or_insert(cache2.clone()); // Slot 3, conf 0
let block_commitment_cache = BlockCommitmentCache::new(
block_commitment,
0,
total_stake,
bank_slot_5.clone(),
blockstore.clone(),
0,
0,
);
assert_eq!(block_commitment_cache.calculate_highest_confirmed_slot(), 0);
}
#[test]
fn test_aggregate_commitment_for_vote_account_1() {
let ancestors = vec![3, 4, 5, 7, 9, 11];

View File

@@ -36,6 +36,7 @@ use std::collections::HashMap;
pub struct Crds {
/// Stores the map of labels and values
pub table: IndexMap<CrdsValueLabel, VersionedCrdsValue>,
pub num_inserts: usize,
}
#[derive(PartialEq, Debug)]
@@ -84,6 +85,7 @@ impl Default for Crds {
fn default() -> Self {
Crds {
table: IndexMap::new(),
num_inserts: 0,
}
}
}
@@ -93,6 +95,24 @@ impl Crds {
pub fn new_versioned(&self, local_timestamp: u64, value: CrdsValue) -> VersionedCrdsValue {
VersionedCrdsValue::new(local_timestamp, value)
}
pub fn would_insert(
&self,
value: CrdsValue,
local_timestamp: u64,
) -> Option<VersionedCrdsValue> {
let new_value = self.new_versioned(local_timestamp, value);
let label = new_value.value.label();
let would_insert = self
.table
.get(&label)
.map(|current| new_value > *current)
.unwrap_or(true);
if would_insert {
Some(new_value)
} else {
None
}
}
/// insert the new value, returns the old value if insert succeeds
pub fn insert_versioned(
&mut self,
@@ -107,6 +127,7 @@ impl Crds {
.unwrap_or(true);
if do_insert {
let old = self.table.insert(label, new_value);
self.num_inserts += 1;
Ok(old)
} else {
trace!("INSERT FAILED data: {} new.wallclock: {}", label, wallclock,);

View File

@@ -6,7 +6,7 @@
use crate::{
crds::{Crds, VersionedCrdsValue},
crds_gossip_error::CrdsGossipError,
crds_gossip_pull::{CrdsFilter, CrdsGossipPull},
crds_gossip_pull::{CrdsFilter, CrdsGossipPull, ProcessPullStats},
crds_gossip_push::{CrdsGossipPush, CRDS_GOSSIP_NUM_ACTIVE},
crds_value::{CrdsValue, CrdsValueLabel},
};
@@ -76,17 +76,10 @@ impl CrdsGossip {
stakes: &HashMap<Pubkey, u64>,
) -> HashMap<Pubkey, HashSet<Pubkey>> {
let id = &self.id;
let crds = &self.crds;
let push = &mut self.push;
let versioned = labels
.into_iter()
.filter_map(|label| crds.lookup_versioned(&label));
let mut prune_map: HashMap<Pubkey, HashSet<_>> = HashMap::new();
for val in versioned {
let origin = val.value.pubkey();
let hash = val.value_hash;
let peers = push.prune_received_cache(id, &origin, hash, stakes);
for origin in labels.iter().map(|k| k.pubkey()) {
let peers = push.prune_received_cache(id, &origin, stakes);
for from in peers {
prune_map.entry(from).or_default().insert(origin);
}
@@ -113,7 +106,7 @@ impl CrdsGossip {
return Err(CrdsGossipError::PruneMessageTimeout);
}
if self.id == *destination {
self.push.process_prune_msg(peer, origin);
self.push.process_prune_msg(&self.id, peer, origin);
Ok(())
} else {
Err(CrdsGossipError::BadPruneDestination)
@@ -158,24 +151,47 @@ impl CrdsGossip {
self.pull.mark_pull_request_creation_time(from, now)
}
/// process a pull request and create a response
pub fn process_pull_requests(
&mut self,
filters: Vec<(CrdsValue, CrdsFilter)>,
now: u64,
) -> Vec<Vec<CrdsValue>> {
pub fn process_pull_requests(&mut self, filters: Vec<(CrdsValue, CrdsFilter)>, now: u64) {
self.pull
.process_pull_requests(&mut self.crds, filters, now)
.process_pull_requests(&mut self.crds, filters, now);
}
/// process a pull response
pub fn process_pull_response(
&mut self,
from: &Pubkey,
pub fn generate_pull_responses(
&self,
filters: &[(CrdsValue, CrdsFilter)],
) -> Vec<Vec<CrdsValue>> {
self.pull.generate_pull_responses(&self.crds, filters)
}
pub fn filter_pull_responses(
&self,
timeouts: &HashMap<Pubkey, u64>,
response: Vec<CrdsValue>,
now: u64,
) -> usize {
process_pull_stats: &mut ProcessPullStats,
) -> (Vec<VersionedCrdsValue>, Vec<VersionedCrdsValue>) {
self.pull
.process_pull_response(&mut self.crds, from, timeouts, response, now)
.filter_pull_responses(&self.crds, timeouts, response, now, process_pull_stats)
}
/// process a pull response
pub fn process_pull_responses(
&mut self,
from: &Pubkey,
responses: Vec<VersionedCrdsValue>,
responses_expired_timeout: Vec<VersionedCrdsValue>,
now: u64,
process_pull_stats: &mut ProcessPullStats,
) {
let success = self.pull.process_pull_responses(
&mut self.crds,
from,
responses,
responses_expired_timeout,
now,
process_pull_stats,
);
self.push.push_pull_responses(success, now);
}
pub fn make_timeouts_test(&self) -> HashMap<Pubkey, u64> {

View File

@@ -2,7 +2,6 @@
pub enum CrdsGossipError {
NoPeers,
PushMessageTimeout,
PushMessageAlreadyReceived,
PushMessageOldVersion,
BadPruneDestination,
PruneMessageTimeout,

View File

@@ -10,7 +10,7 @@
//! of false positives.
use crate::contact_info::ContactInfo;
use crate::crds::Crds;
use crate::crds::{Crds, VersionedCrdsValue};
use crate::crds_gossip::{get_stake, get_weight, CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS};
use crate::crds_gossip_error::CrdsGossipError;
use crate::crds_value::{CrdsValue, CrdsValueLabel};
@@ -20,8 +20,8 @@ use solana_runtime::bloom::Bloom;
use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
use std::cmp;
use std::collections::HashMap;
use std::collections::VecDeque;
use std::collections::{HashMap, HashSet};
pub const CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS: u64 = 15000;
// The maximum age of a value received over pull responses
@@ -118,6 +118,14 @@ impl CrdsFilter {
}
}
#[derive(Default)]
pub struct ProcessPullStats {
pub success: usize,
pub failed_insert: usize,
pub failed_timeout: usize,
pub timeout_count: usize,
}
#[derive(Clone)]
pub struct CrdsGossipPull {
/// timestamp of last request
@@ -126,6 +134,7 @@ pub struct CrdsGossipPull {
purged_values: VecDeque<(Hash, u64)>,
pub crds_timeout: u64,
pub msg_timeout: u64,
pub num_pulls: usize,
}
impl Default for CrdsGossipPull {
@@ -135,6 +144,7 @@ impl Default for CrdsGossipPull {
pull_request_time: HashMap::new(),
crds_timeout: CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS,
msg_timeout: CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS,
num_pulls: 0,
}
}
}
@@ -204,14 +214,13 @@ impl CrdsGossipPull {
self.purged_values.push_back((hash, timestamp))
}
/// process a pull request and create a response
/// process a pull request
pub fn process_pull_requests(
&mut self,
crds: &mut Crds,
requests: Vec<(CrdsValue, CrdsFilter)>,
now: u64,
) -> Vec<Vec<CrdsValue>> {
let rv = self.filter_crds_values(crds, &requests);
) {
requests.into_iter().for_each(|(caller, _)| {
let key = caller.label().pubkey();
let old = crds.insert(caller, now);
@@ -221,19 +230,33 @@ impl CrdsGossipPull {
}
crds.update_record_timestamp(&key, now);
});
rv
}
/// process a pull response
pub fn process_pull_response(
&mut self,
crds: &mut Crds,
from: &Pubkey,
/// Create gossip responses to pull requests
pub fn generate_pull_responses(
&self,
crds: &Crds,
requests: &[(CrdsValue, CrdsFilter)],
) -> Vec<Vec<CrdsValue>> {
self.filter_crds_values(crds, requests)
}
// Checks if responses should be inserted and
// returns those responses converted to VersionedCrdsValue
// Separated in two vecs as:
// .0 => responses that update the owner timestamp
// .1 => responses that do not update the owner timestamp
pub fn filter_pull_responses(
&self,
crds: &Crds,
timeouts: &HashMap<Pubkey, u64>,
response: Vec<CrdsValue>,
responses: Vec<CrdsValue>,
now: u64,
) -> usize {
let mut failed = 0;
for r in response {
stats: &mut ProcessPullStats,
) -> (Vec<VersionedCrdsValue>, Vec<VersionedCrdsValue>) {
let mut versioned = vec![];
let mut versioned_expired_timestamp = vec![];
for r in responses {
let owner = r.label().pubkey();
// Check if the crds value is older than the msg_timeout
if now
@@ -252,11 +275,8 @@ impl CrdsGossipPull {
if now > r.wallclock().checked_add(timeout).unwrap_or_else(|| 0)
|| now + timeout < r.wallclock()
{
inc_new_counter_warn!(
"cluster_info-gossip_pull_response_value_timeout",
1
);
failed += 1;
stats.timeout_count += 1;
stats.failed_timeout += 1;
continue;
}
}
@@ -264,32 +284,69 @@ impl CrdsGossipPull {
// Before discarding this value, check if a ContactInfo for the owner
// exists in the table. If it doesn't, that implies that this value can be discarded
if crds.lookup(&CrdsValueLabel::ContactInfo(owner)).is_none() {
inc_new_counter_warn!(
"cluster_info-gossip_pull_response_value_timeout",
1
);
failed += 1;
stats.timeout_count += 1;
stats.failed_timeout += 1;
continue;
} else {
// Silently insert this old value without bumping record timestamps
failed += crds.insert(r, now).is_err() as usize;
match crds.would_insert(r, now) {
Some(resp) => versioned_expired_timestamp.push(resp),
None => stats.failed_insert += 1,
}
continue;
}
}
}
}
let old = crds.insert(r, now);
failed += old.is_err() as usize;
match crds.would_insert(r, now) {
Some(resp) => versioned.push(resp),
None => stats.failed_insert += 1,
}
}
(versioned, versioned_expired_timestamp)
}
/// process a vec of pull responses
pub fn process_pull_responses(
&mut self,
crds: &mut Crds,
from: &Pubkey,
responses: Vec<VersionedCrdsValue>,
responses_expired_timeout: Vec<VersionedCrdsValue>,
now: u64,
stats: &mut ProcessPullStats,
) -> Vec<(CrdsValueLabel, Hash, u64)> {
let mut success = vec![];
let mut owners = HashSet::new();
for r in responses_expired_timeout {
stats.failed_insert += crds.insert_versioned(r).is_err() as usize;
}
for r in responses {
let owner = r.value.label().pubkey();
let label = r.value.label();
let wc = r.value.wallclock();
let hash = r.value_hash;
let old = crds.insert_versioned(r);
if old.is_err() {
stats.failed_insert += 1;
} else {
stats.success += 1;
self.num_pulls += 1;
success.push((label, hash, wc));
}
old.ok().map(|opt| {
crds.update_record_timestamp(&owner, now);
owners.insert(owner);
opt.map(|val| {
self.purged_values
.push_back((val.value_hash, val.local_timestamp))
})
});
}
crds.update_record_timestamp(from, now);
failed
owners.insert(*from);
for owner in owners {
crds.update_record_timestamp(&owner, now);
}
success
}
// build a set of filters of the current crds table
// num_filters - used to increase the likelyhood of a value in crds being added to some filter
@@ -379,6 +436,34 @@ impl CrdsGossipPull {
.count();
self.purged_values.drain(..cnt);
}
/// For legacy tests
#[cfg(test)]
pub fn process_pull_response(
&mut self,
crds: &mut Crds,
from: &Pubkey,
timeouts: &HashMap<Pubkey, u64>,
response: Vec<CrdsValue>,
now: u64,
) -> (usize, usize, usize) {
let mut stats = ProcessPullStats::default();
let (versioned, versioned_expired_timeout) =
self.filter_pull_responses(crds, timeouts, response, now, &mut stats);
self.process_pull_responses(
crds,
from,
versioned,
versioned_expired_timeout,
now,
&mut stats,
);
(
stats.failed_timeout + stats.failed_insert,
stats.timeout_count,
stats.success,
)
}
}
#[cfg(test)]
mod test {
@@ -578,8 +663,9 @@ mod test {
let mut dest_crds = Crds::default();
let mut dest = CrdsGossipPull::default();
let (_, filters, caller) = req.unwrap();
let filters = filters.into_iter().map(|f| (caller.clone(), f)).collect();
let rsp = dest.process_pull_requests(&mut dest_crds, filters, 1);
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
let rsp = dest.generate_pull_responses(&dest_crds, &filters);
dest.process_pull_requests(&mut dest_crds, filters, 1);
assert!(rsp.iter().all(|rsp| rsp.is_empty()));
assert!(dest_crds.lookup(&caller.label()).is_some());
assert_eq!(
@@ -648,8 +734,9 @@ mod test {
PACKET_DATA_SIZE,
);
let (_, filters, caller) = req.unwrap();
let filters = filters.into_iter().map(|f| (caller.clone(), f)).collect();
let mut rsp = dest.process_pull_requests(&mut dest_crds, filters, 0);
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
let mut rsp = dest.generate_pull_responses(&dest_crds, &filters);
dest.process_pull_requests(&mut dest_crds, filters, 0);
// if there is a false positive this is empty
// prob should be around 0.1 per iteration
if rsp.is_empty() {
@@ -660,13 +747,15 @@ mod test {
continue;
}
assert_eq!(rsp.len(), 1);
let failed = node.process_pull_response(
&mut node_crds,
&node_pubkey,
&node.make_timeouts_def(&node_pubkey, &HashMap::new(), 0, 1),
rsp.pop().unwrap(),
1,
);
let failed = node
.process_pull_response(
&mut node_crds,
&node_pubkey,
&node.make_timeouts_def(&node_pubkey, &HashMap::new(), 0, 1),
rsp.pop().unwrap(),
1,
)
.0;
assert_eq!(failed, 0);
assert_eq!(
node_crds
@@ -827,7 +916,8 @@ mod test {
&timeouts,
vec![peer_entry.clone()],
1,
),
)
.0,
0
);
@@ -843,7 +933,8 @@ mod test {
&timeouts,
vec![peer_entry.clone(), unstaked_peer_entry],
node.msg_timeout + 100,
),
)
.0,
2
);
@@ -856,7 +947,8 @@ mod test {
&timeouts,
vec![peer_entry.clone()],
node.msg_timeout + 1,
),
)
.0,
0
);
@@ -872,7 +964,8 @@ mod test {
&timeouts,
vec![peer_vote.clone()],
node.msg_timeout + 1,
),
)
.0,
0
);
@@ -885,7 +978,8 @@ mod test {
&timeouts,
vec![peer_vote.clone()],
node.msg_timeout + 1,
),
)
.0,
1
);
}

View File

@@ -35,6 +35,7 @@ pub const CRDS_GOSSIP_PUSH_FANOUT: usize = 6;
pub const CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS: u64 = 30000;
pub const CRDS_GOSSIP_PRUNE_MSG_TIMEOUT_MS: u64 = 500;
pub const CRDS_GOSSIP_PRUNE_STAKE_THRESHOLD_PCT: f64 = 0.15;
pub const CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES: usize = 2;
#[derive(Clone)]
pub struct CrdsGossipPush {
@@ -44,12 +45,18 @@ pub struct CrdsGossipPush {
active_set: IndexMap<Pubkey, Bloom<Pubkey>>,
/// push message queue
push_messages: HashMap<CrdsValueLabel, Hash>,
/// cache that tracks which validators a message was received from
received_cache: HashMap<Hash, (u64, HashSet<Pubkey>)>,
/// Cache that tracks which validators a message was received from
/// bool indicates it has been pruned.
/// This cache represents a lagging view of which validators
/// currently have this node in their `active_set`
received_cache: HashMap<Pubkey, HashMap<Pubkey, (bool, u64)>>,
pub num_active: usize,
pub push_fanout: usize,
pub msg_timeout: u64,
pub prune_timeout: u64,
pub num_total: usize,
pub num_old: usize,
pub num_pushes: usize,
}
impl Default for CrdsGossipPush {
@@ -64,6 +71,9 @@ impl Default for CrdsGossipPush {
push_fanout: CRDS_GOSSIP_PUSH_FANOUT,
msg_timeout: CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS,
prune_timeout: CRDS_GOSSIP_PRUNE_MSG_TIMEOUT_MS,
num_total: 0,
num_old: 0,
num_pushes: 0,
}
}
}
@@ -81,18 +91,21 @@ impl CrdsGossipPush {
&mut self,
self_pubkey: &Pubkey,
origin: &Pubkey,
hash: Hash,
stakes: &HashMap<Pubkey, u64>,
) -> Vec<Pubkey> {
let origin_stake = stakes.get(origin).unwrap_or(&0);
let self_stake = stakes.get(self_pubkey).unwrap_or(&0);
let cache = self.received_cache.get(&hash);
let cache = self.received_cache.get(origin);
if cache.is_none() {
return Vec::new();
}
let peers = cache.unwrap();
let peers = &cache.unwrap().1;
let peer_stake_total: u64 = peers.iter().map(|p| stakes.get(p).unwrap_or(&0)).sum();
let peer_stake_total: u64 = peers
.iter()
.filter(|v| !(v.1).0)
.map(|v| stakes.get(v.0).unwrap_or(&0))
.sum();
let prune_stake_threshold = Self::prune_stake_threshold(*self_stake, *origin_stake);
if peer_stake_total < prune_stake_threshold {
return Vec::new();
@@ -100,7 +113,8 @@ impl CrdsGossipPush {
let staked_peers: Vec<(Pubkey, u64)> = peers
.iter()
.filter_map(|p| stakes.get(p).map(|s| (*p, *s)))
.filter(|v| !(v.1).0)
.filter_map(|p| stakes.get(p.0).map(|s| (*p.0, *s)))
.filter(|(_, s)| *s > 0)
.collect();
@@ -117,16 +131,27 @@ impl CrdsGossipPush {
let (next_peer, next_stake) = staked_peers[next];
keep.insert(next_peer);
peer_stake_sum += next_stake;
if peer_stake_sum >= prune_stake_threshold {
if peer_stake_sum >= prune_stake_threshold
&& keep.len() >= CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES
{
break;
}
}
peers
.iter()
let pruned_peers: Vec<Pubkey> = peers
.keys()
.filter(|p| !keep.contains(p))
.cloned()
.collect()
.collect();
pruned_peers.iter().for_each(|p| {
self.received_cache
.get_mut(origin)
.unwrap()
.get_mut(p)
.unwrap()
.0 = true;
});
pruned_peers
}
/// process a push message to the network
@@ -137,6 +162,7 @@ impl CrdsGossipPush {
value: CrdsValue,
now: u64,
) -> Result<Option<VersionedCrdsValue>, CrdsGossipError> {
self.num_total += 1;
if now
> value
.wallclock()
@@ -149,21 +175,32 @@ impl CrdsGossipPush {
return Err(CrdsGossipError::PushMessageTimeout);
}
let label = value.label();
let origin = label.pubkey();
let new_value = crds.new_versioned(now, value);
let value_hash = new_value.value_hash;
if let Some((_, ref mut received_set)) = self.received_cache.get_mut(&value_hash) {
received_set.insert(from.clone());
return Err(CrdsGossipError::PushMessageAlreadyReceived);
}
let received_set = self
.received_cache
.entry(origin)
.or_insert_with(HashMap::new);
received_set.entry(*from).or_insert((false, 0)).1 = now;
let old = crds.insert_versioned(new_value);
if old.is_err() {
self.num_old += 1;
return Err(CrdsGossipError::PushMessageOldVersion);
}
let mut received_set = HashSet::new();
received_set.insert(from.clone());
self.push_messages.insert(label, value_hash);
self.received_cache.insert(value_hash, (now, received_set));
Ok(old.ok().and_then(|opt| opt))
Ok(old.unwrap())
}
/// push pull responses
pub fn push_pull_responses(&mut self, values: Vec<(CrdsValueLabel, Hash, u64)>, now: u64) {
for (label, value_hash, wc) in values {
if now > wc.checked_add(self.msg_timeout).unwrap_or_else(|| 0) {
continue;
}
self.push_messages.insert(label, value_hash);
}
}
/// New push message to broadcast to peers.
@@ -172,18 +209,10 @@ impl CrdsGossipPush {
/// The list of push messages is created such that all the randomly selected peers have not
/// pruned the source addresses.
pub fn new_push_messages(&mut self, crds: &Crds, now: u64) -> HashMap<Pubkey, Vec<CrdsValue>> {
let max = self.active_set.len();
let mut nodes: Vec<_> = (0..max).collect();
nodes.shuffle(&mut rand::thread_rng());
let peers: Vec<Pubkey> = nodes
.into_iter()
.filter_map(|n| self.active_set.get_index(n))
.take(self.push_fanout)
.map(|n| *n.0)
.collect();
let mut total_bytes: usize = 0;
let mut values = vec![];
let mut push_messages: HashMap<Pubkey, Vec<CrdsValue>> = HashMap::new();
trace!("new_push_messages {}", self.push_messages.len());
for (label, hash) in &self.push_messages {
let res = crds.lookup_versioned(label);
if res.is_none() {
@@ -203,21 +232,37 @@ impl CrdsGossipPush {
}
values.push(value.clone());
}
trace!(
"new_push_messages {} {}",
values.len(),
self.active_set.len()
);
for v in values {
for p in peers.iter() {
let filter = self.active_set.get_mut(p);
if filter.is_some() && !filter.unwrap().contains(&v.label().pubkey()) {
push_messages.entry(*p).or_default().push(v.clone());
//use a consistent index for the same origin so
//the active set learns the MST for that origin
let start = v.label().pubkey().as_ref()[0] as usize;
let max = self.push_fanout.min(self.active_set.len());
for i in start..(start + max) {
let ix = i % self.active_set.len();
if let Some((p, filter)) = self.active_set.get_index(ix) {
if !filter.contains(&v.label().pubkey()) {
trace!("new_push_messages insert {} {:?}", *p, v);
push_messages.entry(*p).or_default().push(v.clone());
self.num_pushes += 1;
}
}
self.push_messages.remove(&v.label());
}
self.push_messages.remove(&v.label());
}
push_messages
}
/// add the `from` to the peer's filter of nodes
pub fn process_prune_msg(&mut self, peer: &Pubkey, origins: &[Pubkey]) {
pub fn process_prune_msg(&mut self, self_pubkey: &Pubkey, peer: &Pubkey, origins: &[Pubkey]) {
for origin in origins {
if origin == self_pubkey {
continue;
}
if let Some(p) = self.active_set.get_mut(peer) {
p.add(origin)
}
@@ -339,15 +384,11 @@ impl CrdsGossipPush {
/// purge received push message cache
pub fn purge_old_received_cache(&mut self, min_time: u64) {
let old_msgs: Vec<Hash> = self
.received_cache
.iter()
.filter_map(|(k, (rcvd_time, _))| if *rcvd_time < min_time { Some(k) } else { None })
.cloned()
.collect();
for k in old_msgs {
self.received_cache.remove(&k);
}
self.received_cache
.iter_mut()
.for_each(|v| v.1.retain(|_, v| v.1 > min_time));
self.received_cache.retain(|_, v| !v.is_empty());
}
}
@@ -371,7 +412,6 @@ mod test {
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&origin, 0,
)));
let label = value.label();
let low_staked_peers = (0..10).map(|_| Pubkey::new_rand());
let mut low_staked_set = HashSet::new();
low_staked_peers.for_each(|p| {
@@ -380,11 +420,7 @@ mod test {
stakes.insert(p, 1);
});
let versioned = crds
.lookup_versioned(&label)
.expect("versioned value should exist");
let hash = versioned.value_hash;
let pruned = push.prune_received_cache(&self_id, &origin, hash, &stakes);
let pruned = push.prune_received_cache(&self_id, &origin, &stakes);
assert!(
pruned.is_empty(),
"should not prune if min threshold has not been reached"
@@ -395,7 +431,7 @@ mod test {
stakes.insert(high_staked_peer, high_stake);
let _ = push.process_push_message(&mut crds, &high_staked_peer, value.clone(), 0);
let pruned = push.prune_received_cache(&self_id, &origin, hash, &stakes);
let pruned = push.prune_received_cache(&self_id, &origin, &stakes);
assert!(
pruned.len() < low_staked_set.len() + 1,
"should not prune all peers"
@@ -409,7 +445,7 @@ mod test {
}
#[test]
fn test_process_push() {
fn test_process_push_one() {
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
@@ -426,8 +462,8 @@ mod test {
// push it again
assert_eq!(
push.process_push_message(&mut crds, &Pubkey::default(), value.clone(), 0),
Err(CrdsGossipError::PushMessageAlreadyReceived)
push.process_push_message(&mut crds, &Pubkey::default(), value, 0),
Err(CrdsGossipError::PushMessageOldVersion)
);
}
#[test]
@@ -690,6 +726,7 @@ mod test {
#[test]
fn test_process_prune() {
let mut crds = Crds::default();
let self_id = Pubkey::new_rand();
let mut push = CrdsGossipPush::default();
let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
@@ -707,7 +744,11 @@ mod test {
push.process_push_message(&mut crds, &Pubkey::default(), new_msg.clone(), 0),
Ok(None)
);
push.process_prune_msg(&peer.label().pubkey(), &[new_msg.label().pubkey()]);
push.process_prune_msg(
&self_id,
&peer.label().pubkey(),
&[new_msg.label().pubkey()],
);
assert_eq!(push.new_push_messages(&crds, 0), expected);
}
#[test]
@@ -749,9 +790,9 @@ mod test {
assert_eq!(crds.lookup(&label), Some(&value));
// push it again
assert_eq!(
assert_matches!(
push.process_push_message(&mut crds, &Pubkey::default(), value.clone(), 0),
Err(CrdsGossipError::PushMessageAlreadyReceived)
Err(CrdsGossipError::PushMessageOldVersion)
);
// purge the old pushed

View File

@@ -75,6 +75,7 @@ pub enum CrdsData {
SnapshotHashes(SnapshotHash),
AccountsHashes(SnapshotHash),
EpochSlots(EpochSlotsIndex, EpochSlots),
Version(Version),
}
impl Sanitize for CrdsData {
@@ -101,6 +102,7 @@ impl Sanitize for CrdsData {
}
val.sanitize()
}
CrdsData::Version(version) => version.sanitize(),
}
}
}
@@ -206,6 +208,33 @@ impl Vote {
}
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct Version {
pub from: Pubkey,
pub wallclock: u64,
pub version: solana_version::Version,
}
impl Sanitize for Version {
fn sanitize(&self) -> Result<(), SanitizeError> {
if self.wallclock >= MAX_WALLCLOCK {
return Err(SanitizeError::ValueOutOfBounds);
}
self.from.sanitize()?;
self.version.sanitize()
}
}
impl Version {
pub fn new(from: Pubkey) -> Self {
Self {
from,
wallclock: timestamp(),
version: solana_version::Version::default(),
}
}
}
/// Type of the replicated value
/// These are labels for values in a record that is associated with `Pubkey`
#[derive(PartialEq, Hash, Eq, Clone, Debug)]
@@ -216,6 +245,7 @@ pub enum CrdsValueLabel {
SnapshotHashes(Pubkey),
EpochSlots(EpochSlotsIndex, Pubkey),
AccountsHashes(Pubkey),
Version(Pubkey),
}
impl fmt::Display for CrdsValueLabel {
@@ -227,6 +257,7 @@ impl fmt::Display for CrdsValueLabel {
CrdsValueLabel::SnapshotHashes(_) => write!(f, "SnapshotHash({})", self.pubkey()),
CrdsValueLabel::EpochSlots(ix, _) => write!(f, "EpochSlots({}, {})", ix, self.pubkey()),
CrdsValueLabel::AccountsHashes(_) => write!(f, "AccountsHashes({})", self.pubkey()),
CrdsValueLabel::Version(_) => write!(f, "Version({})", self.pubkey()),
}
}
}
@@ -240,6 +271,7 @@ impl CrdsValueLabel {
CrdsValueLabel::SnapshotHashes(p) => *p,
CrdsValueLabel::EpochSlots(_, p) => *p,
CrdsValueLabel::AccountsHashes(p) => *p,
CrdsValueLabel::Version(p) => *p,
}
}
}
@@ -257,7 +289,7 @@ impl CrdsValue {
value.sign(keypair);
value
}
/// Totally unsecure unverfiable wallclock of the node that generated this message
/// Totally unsecure unverifiable wallclock of the node that generated this message
/// Latest wallclock is always picked.
/// This is used to time out push messages.
pub fn wallclock(&self) -> u64 {
@@ -268,6 +300,7 @@ impl CrdsValue {
CrdsData::SnapshotHashes(hash) => hash.wallclock,
CrdsData::AccountsHashes(hash) => hash.wallclock,
CrdsData::EpochSlots(_, p) => p.wallclock,
CrdsData::Version(version) => version.wallclock,
}
}
pub fn pubkey(&self) -> Pubkey {
@@ -278,6 +311,7 @@ impl CrdsValue {
CrdsData::SnapshotHashes(hash) => hash.from,
CrdsData::AccountsHashes(hash) => hash.from,
CrdsData::EpochSlots(_, p) => p.from,
CrdsData::Version(version) => version.from,
}
}
pub fn label(&self) -> CrdsValueLabel {
@@ -288,6 +322,7 @@ impl CrdsValue {
CrdsData::SnapshotHashes(_) => CrdsValueLabel::SnapshotHashes(self.pubkey()),
CrdsData::AccountsHashes(_) => CrdsValueLabel::AccountsHashes(self.pubkey()),
CrdsData::EpochSlots(ix, _) => CrdsValueLabel::EpochSlots(*ix, self.pubkey()),
CrdsData::Version(_) => CrdsValueLabel::Version(self.pubkey()),
}
}
pub fn contact_info(&self) -> Option<&ContactInfo> {
@@ -338,6 +373,13 @@ impl CrdsValue {
}
}
pub fn version(&self) -> Option<&Version> {
match &self.data {
CrdsData::Version(version) => Some(version),
_ => None,
}
}
/// Return all the possible labels for a record identified by Pubkey.
pub fn record_labels(key: &Pubkey) -> Vec<CrdsValueLabel> {
let mut labels = vec![
@@ -345,6 +387,7 @@ impl CrdsValue {
CrdsValueLabel::LowestSlot(*key),
CrdsValueLabel::SnapshotHashes(*key),
CrdsValueLabel::AccountsHashes(*key),
CrdsValueLabel::Version(*key),
];
labels.extend((0..MAX_VOTES).map(|ix| CrdsValueLabel::Vote(ix, *key)));
labels.extend((0..MAX_EPOCH_SLOTS).map(|ix| CrdsValueLabel::EpochSlots(ix, *key)));
@@ -395,7 +438,7 @@ mod test {
#[test]
fn test_labels() {
let mut hits = [false; 4 + MAX_VOTES as usize + MAX_EPOCH_SLOTS as usize];
let mut hits = [false; 5 + MAX_VOTES as usize + MAX_EPOCH_SLOTS as usize];
// this method should cover all the possible labels
for v in &CrdsValue::record_labels(&Pubkey::default()) {
match v {
@@ -403,9 +446,10 @@ mod test {
CrdsValueLabel::LowestSlot(_) => hits[1] = true,
CrdsValueLabel::SnapshotHashes(_) => hits[2] = true,
CrdsValueLabel::AccountsHashes(_) => hits[3] = true,
CrdsValueLabel::Vote(ix, _) => hits[*ix as usize + 4] = true,
CrdsValueLabel::Version(_) => hits[4] = true,
CrdsValueLabel::Vote(ix, _) => hits[*ix as usize + 5] = true,
CrdsValueLabel::EpochSlots(ix, _) => {
hits[*ix as usize + MAX_VOTES as usize + 4] = true
hits[*ix as usize + MAX_VOTES as usize + 5] = true
}
}
}

View File

@@ -75,6 +75,7 @@ pub fn discover_cluster(
None,
None,
None,
0,
)
}
@@ -85,9 +86,11 @@ pub fn discover(
find_node_by_pubkey: Option<Pubkey>,
find_node_by_gossip_addr: Option<&SocketAddr>,
my_gossip_addr: Option<&SocketAddr>,
my_shred_version: u16,
) -> std::io::Result<(Vec<ContactInfo>, Vec<ContactInfo>)> {
let exit = Arc::new(AtomicBool::new(false));
let (gossip_service, ip_echo, spy_ref) = make_gossip_node(entrypoint, &exit, my_gossip_addr);
let (gossip_service, ip_echo, spy_ref) =
make_gossip_node(entrypoint, &exit, my_gossip_addr, my_shred_version);
let id = spy_ref.id();
info!("Entrypoint: {:?}", entrypoint);
@@ -245,12 +248,13 @@ fn make_gossip_node(
entrypoint: Option<&SocketAddr>,
exit: &Arc<AtomicBool>,
gossip_addr: Option<&SocketAddr>,
shred_version: u16,
) -> (GossipService, Option<TcpListener>, Arc<ClusterInfo>) {
let keypair = Arc::new(Keypair::new());
let (node, gossip_socket, ip_echo) = if let Some(gossip_addr) = gossip_addr {
ClusterInfo::gossip_node(&keypair.pubkey(), gossip_addr)
ClusterInfo::gossip_node(&keypair.pubkey(), gossip_addr, shred_version)
} else {
ClusterInfo::spy_node(&keypair.pubkey())
ClusterInfo::spy_node(&keypair.pubkey(), shred_version)
};
let cluster_info = ClusterInfo::new(node, keypair);
if let Some(entrypoint) = entrypoint {

View File

@@ -3,8 +3,7 @@
use solana_ledger::blockstore::Blockstore;
use solana_ledger::blockstore_db::Result as BlockstoreResult;
use solana_measure::measure::Measure;
use solana_metrics::datapoint_debug;
use solana_sdk::clock::Slot;
use solana_sdk::clock::{Slot, DEFAULT_TICKS_PER_SLOT, TICKS_PER_DAY};
use std::string::ToString;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{Receiver, RecvTimeoutError};
@@ -30,9 +29,12 @@ pub const DEFAULT_MIN_MAX_LEDGER_SHREDS: u64 = 50_000_000;
// and starve other blockstore users.
pub const DEFAULT_PURGE_SLOT_INTERVAL: u64 = 512;
// Remove a limited number of slots at a time, so the operation
// does not take too long and block other blockstore users.
pub const DEFAULT_PURGE_BATCH_SIZE: u64 = 256;
// Delay between purges to cooperate with other blockstore users
pub const DEFAULT_DELAY_BETWEEN_PURGES: Duration = Duration::from_millis(500);
// Compacting at a slower interval than purging helps keep IOPS down.
// Once a day should be ample
const DEFAULT_COMPACTION_SLOT_INTERVAL: u64 = TICKS_PER_DAY / DEFAULT_TICKS_PER_SLOT;
pub struct LedgerCleanupService {
t_cleanup: JoinHandle<()>,
@@ -51,6 +53,8 @@ impl LedgerCleanupService {
);
let exit = exit.clone();
let mut last_purge_slot = 0;
let mut last_compaction_slot = 0;
let t_cleanup = Builder::new()
.name("solana-ledger-cleanup".to_string())
.spawn(move || loop {
@@ -63,6 +67,9 @@ impl LedgerCleanupService {
max_ledger_slots,
&mut last_purge_slot,
DEFAULT_PURGE_SLOT_INTERVAL,
Some(DEFAULT_DELAY_BETWEEN_PURGES),
&mut last_compaction_slot,
DEFAULT_COMPACTION_SLOT_INTERVAL,
) {
match e {
RecvTimeoutError::Disconnected => break,
@@ -78,8 +85,8 @@ impl LedgerCleanupService {
blockstore: &Arc<Blockstore>,
root: Slot,
max_ledger_shreds: u64,
) -> (u64, Slot, Slot) {
let mut shreds = Vec::new();
) -> (bool, Slot, Slot, u64) {
let mut total_slots = Vec::new();
let mut iterate_time = Measure::start("iterate_time");
let mut total_shreds = 0;
let mut first_slot = 0;
@@ -90,33 +97,43 @@ impl LedgerCleanupService {
}
// Not exact since non-full slots will have holes
total_shreds += meta.received;
shreds.push((slot, meta.received));
total_slots.push((slot, meta.received));
if slot > root {
break;
}
}
iterate_time.stop();
info!(
"checking for ledger purge: max_shreds: {} slots: {} total_shreds: {} {}",
max_ledger_shreds,
shreds.len(),
"first_slot={} total_slots={} total_shreds={} max_ledger_shreds={}, {}",
first_slot,
total_slots.len(),
total_shreds,
max_ledger_shreds,
iterate_time
);
if (total_shreds as u64) < max_ledger_shreds {
return (0, 0, 0);
return (false, 0, 0, total_shreds);
}
let mut cur_shreds = 0;
let mut lowest_slot_to_clean = shreds[0].0;
for (slot, num_shreds) in shreds.iter().rev() {
cur_shreds += *num_shreds as u64;
if cur_shreds > max_ledger_shreds {
lowest_slot_to_clean = *slot;
let mut num_shreds_to_clean = 0;
let mut lowest_cleanup_slot = total_slots[0].0;
for (slot, num_shreds) in total_slots.iter().rev() {
num_shreds_to_clean += *num_shreds as u64;
if num_shreds_to_clean > max_ledger_shreds {
lowest_cleanup_slot = *slot;
break;
}
}
(cur_shreds, lowest_slot_to_clean, first_slot)
(true, first_slot, lowest_cleanup_slot, total_shreds)
}
fn receive_new_roots(new_root_receiver: &Receiver<Slot>) -> Result<Slot, RecvTimeoutError> {
let mut root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
// Get the newest root
while let Ok(new_root) = new_root_receiver.try_recv() {
root = new_root;
}
Ok(root)
}
fn cleanup_ledger(
@@ -125,68 +142,103 @@ impl LedgerCleanupService {
max_ledger_shreds: u64,
last_purge_slot: &mut u64,
purge_interval: u64,
delay_between_purges: Option<Duration>,
last_compaction_slot: &mut u64,
compaction_interval: u64,
) -> Result<(), RecvTimeoutError> {
let mut root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
// Get the newest root
while let Ok(new_root) = new_root_receiver.try_recv() {
root = new_root;
let root = Self::receive_new_roots(new_root_receiver)?;
if root - *last_purge_slot <= purge_interval {
return Ok(());
}
if root - *last_purge_slot > purge_interval {
let disk_utilization_pre = blockstore.storage_size();
info!(
"purge: new root: {} last_purge: {} purge_interval: {} disk: {:?}",
root, last_purge_slot, purge_interval, disk_utilization_pre
);
*last_purge_slot = root;
let disk_utilization_pre = blockstore.storage_size();
info!(
"purge: last_root={}, last_purge_slot={}, purge_interval={}, last_compaction_slot={}, disk_utilization={:?}",
root, last_purge_slot, purge_interval, last_compaction_slot, disk_utilization_pre
);
*last_purge_slot = root;
let (num_shreds_to_clean, lowest_slot_to_clean, mut first_slot) =
Self::find_slots_to_clean(blockstore, root, max_ledger_shreds);
let (slots_to_clean, purge_first_slot, lowest_cleanup_slot, total_shreds) =
Self::find_slots_to_clean(&blockstore, root, max_ledger_shreds);
if num_shreds_to_clean > 0 {
debug!(
"cleaning up to: {} shreds: {} first: {}",
lowest_slot_to_clean, num_shreds_to_clean, first_slot
);
loop {
let current_lowest =
std::cmp::min(lowest_slot_to_clean, first_slot + DEFAULT_PURGE_BATCH_SIZE);
let mut slot_update_time = Measure::start("slot_update");
*blockstore.lowest_cleanup_slot.write().unwrap() = current_lowest;
slot_update_time.stop();
let mut clean_time = Measure::start("ledger_clean");
blockstore.purge_slots(first_slot, Some(current_lowest));
clean_time.stop();
debug!(
"ledger purge {} -> {}: {} {}",
first_slot, current_lowest, slot_update_time, clean_time
);
first_slot += DEFAULT_PURGE_BATCH_SIZE;
if current_lowest == lowest_slot_to_clean {
break;
}
thread::sleep(Duration::from_millis(500));
}
if slots_to_clean {
let mut compact_first_slot = std::u64::MAX;
if lowest_cleanup_slot.saturating_sub(*last_compaction_slot) > compaction_interval {
compact_first_slot = *last_compaction_slot;
*last_compaction_slot = lowest_cleanup_slot;
}
let disk_utilization_post = blockstore.storage_size();
let purge_complete = Arc::new(AtomicBool::new(false));
let blockstore = blockstore.clone();
let purge_complete1 = purge_complete.clone();
let _t_purge = Builder::new()
.name("solana-ledger-purge".to_string())
.spawn(move || {
let mut slot_update_time = Measure::start("slot_update");
*blockstore.lowest_cleanup_slot.write().unwrap() = lowest_cleanup_slot;
slot_update_time.stop();
Self::report_disk_metrics(disk_utilization_pre, disk_utilization_post);
info!(
"purging data from slots {} to {}",
purge_first_slot, lowest_cleanup_slot
);
let mut purge_time = Measure::start("purge_slots_with_delay");
blockstore.purge_slots_with_delay(
purge_first_slot,
lowest_cleanup_slot,
delay_between_purges,
);
purge_time.stop();
info!("{}", purge_time);
if compact_first_slot < lowest_cleanup_slot {
info!(
"compacting data from slots {} to {}",
compact_first_slot, lowest_cleanup_slot
);
if let Err(err) =
blockstore.compact_storage(compact_first_slot, lowest_cleanup_slot)
{
// This error is not fatal and indicates an internal error?
error!(
"Error: {:?}; Couldn't compact storage from {:?} to {:?}",
err, compact_first_slot, lowest_cleanup_slot
);
}
}
purge_complete1.store(true, Ordering::Relaxed);
})
.unwrap();
// Keep pulling roots off `new_root_receiver` while purging to avoid channel buildup
while !purge_complete.load(Ordering::Relaxed) {
if let Err(err) = Self::receive_new_roots(new_root_receiver) {
debug!("receive_new_roots: {}", err);
}
thread::sleep(Duration::from_secs(1));
}
}
let disk_utilization_post = blockstore.storage_size();
Self::report_disk_metrics(disk_utilization_pre, disk_utilization_post, total_shreds);
Ok(())
}
fn report_disk_metrics(pre: BlockstoreResult<u64>, post: BlockstoreResult<u64>) {
fn report_disk_metrics(
pre: BlockstoreResult<u64>,
post: BlockstoreResult<u64>,
total_shreds: u64,
) {
if let (Ok(pre), Ok(post)) = (pre, post) {
datapoint_debug!(
datapoint_info!(
"ledger_disk_utilization",
("disk_utilization_pre", pre as i64, i64),
("disk_utilization_post", post as i64, i64),
("disk_utilization_delta", (pre as i64 - post as i64), i64)
("disk_utilization_delta", (pre as i64 - post as i64), i64),
("total_shreds", total_shreds, i64),
);
}
}
@@ -214,9 +266,19 @@ mod tests {
//send a signal to kill all but 5 shreds, which will be in the newest slots
let mut last_purge_slot = 0;
let mut last_compaction_slot = 0;
sender.send(50).unwrap();
LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 5, &mut last_purge_slot, 10)
.unwrap();
LedgerCleanupService::cleanup_ledger(
&receiver,
&blockstore,
5,
&mut last_purge_slot,
10,
None,
&mut last_compaction_slot,
10,
)
.unwrap();
//check that 0-40 don't exist
blockstore
@@ -246,6 +308,7 @@ mod tests {
info!("{}", first_insert);
let mut last_purge_slot = 0;
let mut last_compaction_slot = 0;
let mut slot = initial_slots;
let mut num_slots = 6;
for _ in 0..5 {
@@ -269,6 +332,9 @@ mod tests {
initial_slots,
&mut last_purge_slot,
10,
None,
&mut last_compaction_slot,
10,
)
.unwrap();
time.stop();
@@ -304,12 +370,16 @@ mod tests {
// send signal to cleanup slots
let (sender, receiver) = channel();
sender.send(n).unwrap();
let mut next_purge_batch = 0;
let mut last_purge_slot = 0;
let mut last_compaction_slot = 0;
LedgerCleanupService::cleanup_ledger(
&receiver,
&blockstore,
max_ledger_shreds,
&mut next_purge_batch,
&mut last_purge_slot,
10,
None,
&mut last_compaction_slot,
10,
)
.unwrap();

View File

@@ -30,19 +30,24 @@ pub mod gen_keys;
pub mod gossip_service;
pub mod ledger_cleanup_service;
pub mod local_vote_signer_service;
pub mod non_circulating_supply;
pub mod poh_recorder;
pub mod poh_service;
pub mod progress_map;
pub mod repair_response;
pub mod repair_service;
pub mod replay_stage;
mod result;
pub mod retransmit_stage;
pub mod rewards_recorder_service;
pub mod rpc;
pub mod rpc_error;
pub mod rpc_health;
pub mod rpc_pubsub;
pub mod rpc_pubsub_service;
pub mod rpc_service;
pub mod rpc_subscriptions;
pub mod send_transaction_service;
pub mod serve_repair;
pub mod serve_repair_service;
pub mod sigverify;

View File

@@ -0,0 +1,195 @@
use solana_runtime::bank::Bank;
use solana_sdk::pubkey::Pubkey;
use solana_stake_program::stake_state::StakeState;
use std::{collections::HashSet, sync::Arc};
pub struct NonCirculatingSupply {
pub lamports: u64,
pub accounts: Vec<Pubkey>,
}
pub fn calculate_non_circulating_supply(bank: &Arc<Bank>) -> NonCirculatingSupply {
debug!("Updating Bank supply, epoch: {}", bank.epoch());
let mut non_circulating_accounts_set: HashSet<Pubkey> = HashSet::new();
for key in non_circulating_accounts() {
non_circulating_accounts_set.insert(key);
}
let withdraw_authority_list = withdraw_authority();
let clock = bank.clock();
let stake_accounts = bank.get_program_accounts(Some(&solana_stake_program::id()));
for (pubkey, account) in stake_accounts.iter() {
let stake_account = StakeState::from(&account).unwrap_or_default();
match stake_account {
StakeState::Initialized(meta) => {
if meta.lockup.is_in_force(&clock, &HashSet::default())
|| withdraw_authority_list.contains(&meta.authorized.withdrawer)
{
non_circulating_accounts_set.insert(*pubkey);
}
}
StakeState::Stake(meta, _stake) => {
if meta.lockup.is_in_force(&clock, &HashSet::default())
|| withdraw_authority_list.contains(&meta.authorized.withdrawer)
{
non_circulating_accounts_set.insert(*pubkey);
}
}
_ => {}
}
}
let lamports = non_circulating_accounts_set
.iter()
.fold(0, |acc, pubkey| acc + bank.get_balance(&pubkey));
NonCirculatingSupply {
lamports,
accounts: non_circulating_accounts_set.into_iter().collect(),
}
}
// Mainnet-beta accounts that should be considered non-circulating
solana_sdk::pubkeys!(
non_circulating_accounts,
[
"9huDUZfxoJ7wGMTffUE7vh1xePqef7gyrLJu9NApncqA",
"GK2zqSsXLA2rwVZk347RYhh6jJpRsCA69FjLW93ZGi3B",
"CWeRmXme7LmbaUWTZWFLt6FMnpzLCHaQLuR2TdgFn4Lq",
"HCV5dGFJXRrJ3jhDYA4DCeb9TEDTwGGYXtT3wHksu2Zr",
"14FUT96s9swbmH7ZjpDvfEDywnAYy9zaNhv4xvezySGu",
"HbZ5FfmKWNHC7uwk6TF1hVi6TCs7dtYfdjEcuPGgzFAg",
"C7C8odR8oashR5Feyrq2tJKaXL18id1dSj2zbkDGL2C2",
"Eyr9P5XsjK2NUKNCnfu39eqpGoiLFgVAv1LSQgMZCwiQ",
"DE1bawNcRJB9rVm3buyMVfr8mBEoyyu73NBovf2oXJsJ",
"CakcnaRDHka2gXyfbEd2d3xsvkJkqsLw2akB3zsN1D2S",
"7Np41oeYqPefeNQEHSv1UDhYrehxin3NStELsSKCT4K2",
"GdnSyH3YtwcxFvQrVVJMm1JhTS4QVX7MFsX56uJLUfiZ",
"Mc5XB47H3DKJHym5RLa9mPzWv5snERsF3KNv5AauXK8",
"7cvkjYAkUYs4W8XcXsca7cBrEGFeSUjeZmKoNBvEwyri",
"AG3m2bAibcY8raMt4oXEGqRHwX4FWKPPJVjZxn1LySDX",
"5XdtyEDREHJXXW1CTtCsVjJRjBapAwK78ZquzvnNVRrV",
"6yKHERk8rsbmJxvMpPuwPs1ct3hRiP7xaJF2tvnGU6nK",
"CHmdL15akDcJgBkY6BP3hzs98Dqr6wbdDC5p8odvtSbq",
"FR84wZQy3Y3j2gWz6pgETUiUoJtreMEuWfbg6573UCj9",
"5q54XjQ7vDx4y6KphPeE97LUNiYGtP55spjvXAWPGBuf",
"3o6xgkJ9sTmDeQWyfj3sxwon18fXJB9PV5LDc8sfgR4a",
"GumSE5HsMV5HCwBTv2D2D81yy9x17aDkvobkqAfTRgmo",
]
);
// Withdraw authority for autostaked accounts on mainnet-beta
solana_sdk::pubkeys!(
withdraw_authority,
[
"8CUUMKYNGxdgYio5CLHRHyzMEhhVRMcqefgE6dLqnVRK",
"3FFaheyqtyAXZSYxDzsr5CVKvJuvZD1WE1VEsBtDbRqB",
"FdGYQdiRky8NZzN9wZtczTBcWLYYRXrJ3LMDhqDPn5rM",
]
);
#[cfg(test)]
mod tests {
use super::*;
use solana_sdk::{
account::Account, epoch_schedule::EpochSchedule, genesis_config::GenesisConfig,
};
use solana_stake_program::stake_state::{Authorized, Lockup, Meta, StakeState};
use std::{collections::BTreeMap, sync::Arc};
fn new_from_parent(parent: &Arc<Bank>) -> Bank {
Bank::new_from_parent(parent, &Pubkey::default(), parent.slot() + 1)
}
#[test]
fn test_calculate_non_circulating_supply() {
let mut accounts: BTreeMap<Pubkey, Account> = BTreeMap::new();
let balance = 10;
let num_genesis_accounts = 10;
for _ in 0..num_genesis_accounts {
accounts.insert(
Pubkey::new_rand(),
Account::new(balance, 0, &Pubkey::default()),
);
}
let non_circulating_accounts = non_circulating_accounts();
let num_non_circulating_accounts = non_circulating_accounts.len() as u64;
for key in non_circulating_accounts.clone() {
accounts.insert(key, Account::new(balance, 0, &Pubkey::default()));
}
let num_stake_accounts = 3;
for _ in 0..num_stake_accounts {
let pubkey = Pubkey::new_rand();
let meta = Meta {
authorized: Authorized::auto(&pubkey),
lockup: Lockup {
epoch: 1,
..Lockup::default()
},
..Meta::default()
};
let stake_account = Account::new_data_with_space(
balance,
&StakeState::Initialized(meta),
std::mem::size_of::<StakeState>(),
&solana_stake_program::id(),
)
.unwrap();
accounts.insert(pubkey, stake_account);
}
let slots_per_epoch = 32;
let genesis_config = GenesisConfig {
accounts,
epoch_schedule: EpochSchedule::new(slots_per_epoch),
..GenesisConfig::default()
};
let mut bank = Arc::new(Bank::new(&genesis_config));
assert_eq!(
bank.capitalization(),
(num_genesis_accounts + num_non_circulating_accounts + num_stake_accounts) * balance
);
let non_circulating_supply = calculate_non_circulating_supply(&bank);
assert_eq!(
non_circulating_supply.lamports,
(num_non_circulating_accounts + num_stake_accounts) * balance
);
assert_eq!(
non_circulating_supply.accounts.len(),
num_non_circulating_accounts as usize + num_stake_accounts as usize
);
bank = Arc::new(new_from_parent(&bank));
let new_balance = 11;
for key in non_circulating_accounts {
bank.store_account(&key, &Account::new(new_balance, 0, &Pubkey::default()));
}
let non_circulating_supply = calculate_non_circulating_supply(&bank);
assert_eq!(
non_circulating_supply.lamports,
(num_non_circulating_accounts * new_balance) + (num_stake_accounts * balance)
);
assert_eq!(
non_circulating_supply.accounts.len(),
num_non_circulating_accounts as usize + num_stake_accounts as usize
);
// Advance bank an epoch, which should unlock stakes
for _ in 0..slots_per_epoch {
bank = Arc::new(new_from_parent(&bank));
}
assert_eq!(bank.epoch(), 1);
let non_circulating_supply = calculate_non_circulating_supply(&bank);
assert_eq!(
non_circulating_supply.lamports,
num_non_circulating_accounts * new_balance
);
assert_eq!(
non_circulating_supply.accounts.len(),
num_non_circulating_accounts as usize
);
}
}

129
core/src/repair_response.rs Normal file
View File

@@ -0,0 +1,129 @@
use solana_ledger::{
blockstore::Blockstore,
shred::{Nonce, Shred, SIZE_OF_NONCE},
};
use solana_perf::packet::limited_deserialize;
use solana_sdk::{clock::Slot, packet::Packet};
use std::{io, net::SocketAddr};
pub fn repair_response_packet(
blockstore: &Blockstore,
slot: Slot,
shred_index: u64,
dest: &SocketAddr,
nonce: Option<Nonce>,
) -> Option<Packet> {
if Shred::is_nonce_unlocked(slot) && nonce.is_none()
|| !Shred::is_nonce_unlocked(slot) && nonce.is_some()
{
return None;
}
let shred = blockstore
.get_data_shred(slot, shred_index)
.expect("Blockstore could not get data shred");
shred.map(|shred| repair_response_packet_from_shred(slot, shred, dest, nonce))
}
pub fn repair_response_packet_from_shred(
slot: Slot,
shred: Vec<u8>,
dest: &SocketAddr,
nonce: Option<Nonce>,
) -> Packet {
let size_of_nonce = {
if Shred::is_nonce_unlocked(slot) {
assert!(nonce.is_some());
SIZE_OF_NONCE
} else {
assert!(nonce.is_none());
0
}
};
let mut packet = Packet::default();
packet.meta.size = shred.len() + size_of_nonce;
packet.meta.set_addr(dest);
packet.data[..shred.len()].copy_from_slice(&shred);
let mut wr = io::Cursor::new(&mut packet.data[shred.len()..]);
if let Some(nonce) = nonce {
bincode::serialize_into(&mut wr, &nonce).expect("Buffer not large enough to fit nonce");
}
packet
}
pub fn nonce(buf: &[u8]) -> Option<Nonce> {
if buf.len() < SIZE_OF_NONCE {
None
} else {
limited_deserialize(&buf[buf.len() - SIZE_OF_NONCE..]).ok()
}
}
#[cfg(test)]
mod test {
use super::*;
use solana_ledger::{
shred::{Shred, Shredder, UNLOCK_NONCE_SLOT},
sigverify_shreds::verify_shred_cpu,
};
use solana_sdk::signature::{Keypair, Signer};
use std::{
collections::HashMap,
net::{IpAddr, Ipv4Addr},
};
fn run_test_sigverify_shred_cpu_repair(slot: Slot) {
solana_logger::setup();
let mut shred = Shred::new_from_data(
slot,
0xc0de,
0xdead,
Some(&[1, 2, 3, 4]),
true,
true,
0,
0,
0xc0de,
);
assert_eq!(shred.slot(), slot);
let keypair = Keypair::new();
Shredder::sign_shred(&keypair, &mut shred);
trace!("signature {}", shred.common_header.signature);
let nonce = if Shred::is_nonce_unlocked(slot) {
Some(9)
} else {
None
};
let mut packet = repair_response_packet_from_shred(
slot,
shred.payload,
&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080),
nonce,
);
packet.meta.repair = true;
let leader_slots = [(slot, keypair.pubkey().to_bytes())]
.iter()
.cloned()
.collect();
let rv = verify_shred_cpu(&packet, &leader_slots);
assert_eq!(rv, Some(1));
let wrong_keypair = Keypair::new();
let leader_slots = [(slot, wrong_keypair.pubkey().to_bytes())]
.iter()
.cloned()
.collect();
let rv = verify_shred_cpu(&packet, &leader_slots);
assert_eq!(rv, Some(0));
let leader_slots = HashMap::new();
let rv = verify_shred_cpu(&packet, &leader_slots);
assert_eq!(rv, None);
}
#[test]
fn test_sigverify_shred_cpu_repair() {
run_test_sigverify_shred_cpu_repair(UNLOCK_NONCE_SLOT);
run_test_sigverify_shred_cpu_repair(UNLOCK_NONCE_SLOT + 1);
}
}

View File

@@ -3,21 +3,17 @@
use crate::{
cluster_info::ClusterInfo,
cluster_slots::ClusterSlots,
consensus::VOTE_THRESHOLD_SIZE,
result::Result,
serve_repair::{RepairType, ServeRepair},
};
use crossbeam_channel::{Receiver as CrossbeamReceiver, Sender as CrossbeamSender};
use solana_ledger::{
bank_forks::BankForks,
blockstore::{Blockstore, CompletedSlotsReceiver, SlotMeta},
};
use solana_runtime::bank::Bank;
use solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey, timing::timestamp};
use solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey};
use std::{
collections::HashMap,
iter::Iterator,
net::SocketAddr,
net::UdpSocket,
sync::atomic::{AtomicBool, Ordering},
sync::{Arc, RwLock},
@@ -26,9 +22,6 @@ use std::{
time::{Duration, Instant},
};
pub type DuplicateSlotsResetSender = CrossbeamSender<Slot>;
pub type DuplicateSlotsResetReceiver = CrossbeamReceiver<Slot>;
#[derive(Default)]
pub struct RepairStatsGroup {
pub count: u64,
@@ -52,8 +45,6 @@ pub struct RepairStats {
}
pub const MAX_REPAIR_LENGTH: usize = 512;
pub const MAX_REPAIR_PER_DUPLICATE: usize = 20;
pub const MAX_DUPLICATE_WAIT_MS: usize = 10_000;
pub const REPAIR_MS: u64 = 100;
pub const MAX_ORPHANS: usize = 5;
@@ -63,7 +54,6 @@ pub enum RepairStrategy {
bank_forks: Arc<RwLock<BankForks>>,
completed_slots_receiver: CompletedSlotsReceiver,
epoch_schedule: EpochSchedule,
duplicate_slots_reset_sender: DuplicateSlotsResetSender,
},
}
@@ -81,12 +71,6 @@ impl Default for RepairSlotRange {
}
}
#[derive(Default, Clone)]
pub struct DuplicateSlotRepairStatus {
start: u64,
repair_addr: Option<SocketAddr>,
}
pub struct RepairService {
t_repair: JoinHandle<()>,
}
@@ -107,7 +91,7 @@ impl RepairService {
&blockstore,
&exit,
&repair_socket,
&cluster_info,
cluster_info,
repair_strategy,
&cluster_slots,
)
@@ -121,25 +105,23 @@ impl RepairService {
blockstore: &Blockstore,
exit: &AtomicBool,
repair_socket: &UdpSocket,
cluster_info: &Arc<ClusterInfo>,
cluster_info: Arc<ClusterInfo>,
repair_strategy: RepairStrategy,
cluster_slots: &Arc<ClusterSlots>,
) {
let serve_repair = ServeRepair::new(cluster_info.clone());
let id = cluster_info.id();
if let RepairStrategy::RepairAll { .. } = repair_strategy {
Self::initialize_lowest_slot(id, blockstore, cluster_info);
Self::initialize_lowest_slot(id, blockstore, &cluster_info);
}
let mut repair_stats = RepairStats::default();
let mut last_stats = Instant::now();
let mut duplicate_slot_repair_statuses = HashMap::new();
if let RepairStrategy::RepairAll {
ref completed_slots_receiver,
..
} = repair_strategy
{
Self::initialize_epoch_slots(blockstore, cluster_info, completed_slots_receiver);
Self::initialize_epoch_slots(blockstore, &cluster_info, completed_slots_receiver);
}
loop {
if exit.load(Ordering::Relaxed) {
@@ -160,73 +142,34 @@ impl RepairService {
RepairStrategy::RepairAll {
ref completed_slots_receiver,
ref bank_forks,
ref duplicate_slots_reset_sender,
..
} => {
let root_bank = bank_forks.read().unwrap().root_bank().clone();
let new_root = root_bank.slot();
let new_root = blockstore.last_root();
let lowest_slot = blockstore.lowest_slot();
Self::update_lowest_slot(&id, lowest_slot, &cluster_info);
Self::update_completed_slots(completed_slots_receiver, &cluster_info);
cluster_slots.update(new_root, cluster_info, bank_forks);
let new_duplicate_slots = Self::find_new_duplicate_slots(
&duplicate_slot_repair_statuses,
blockstore,
cluster_slots,
&root_bank,
);
Self::process_new_duplicate_slots(
&new_duplicate_slots,
&mut duplicate_slot_repair_statuses,
cluster_slots,
&root_bank,
blockstore,
&serve_repair,
&duplicate_slots_reset_sender,
);
Self::generate_and_send_duplicate_repairs(
&mut duplicate_slot_repair_statuses,
cluster_slots,
blockstore,
&serve_repair,
&mut repair_stats,
&repair_socket,
);
Self::generate_repairs(
blockstore,
root_bank.slot(),
MAX_REPAIR_LENGTH,
&duplicate_slot_repair_statuses,
)
cluster_slots.update(new_root, &cluster_info, bank_forks);
Self::generate_repairs(blockstore, new_root, MAX_REPAIR_LENGTH)
}
}
};
if let Ok(repairs) = repairs {
let mut cache = HashMap::new();
let reqs: Vec<((SocketAddr, Vec<u8>), RepairType)> = repairs
.into_iter()
.filter_map(|repair_request| {
serve_repair
.repair_request(
&cluster_slots,
&repair_request,
&mut cache,
&mut repair_stats,
)
.map(|result| (result, repair_request))
.ok()
})
.collect();
for ((to, req), _) in reqs {
repair_socket.send_to(&req, to).unwrap_or_else(|e| {
info!("{} repair req send_to({}) error {:?}", id, to, e);
0
});
}
repairs.into_iter().for_each(|repair_request| {
if let Ok((to, req)) = serve_repair.repair_request(
&cluster_slots,
repair_request,
&mut cache,
&mut repair_stats,
) {
repair_socket.send_to(&req, to).unwrap_or_else(|e| {
info!("{} repair req send_to({}) error {:?}", id, to, e);
0
});
}
});
}
if last_stats.elapsed().as_secs() > 1 {
let repair_total = repair_stats.shred.count
+ repair_stats.highest_shred.count
@@ -286,216 +229,19 @@ impl RepairService {
blockstore: &Blockstore,
root: Slot,
max_repairs: usize,
duplicate_slot_repair_statuses: &HashMap<Slot, DuplicateSlotRepairStatus>,
) -> Result<Vec<RepairType>> {
// Slot height and shred indexes for shreds we want to repair
let mut repairs: Vec<RepairType> = vec![];
Self::generate_repairs_for_fork(
blockstore,
&mut repairs,
max_repairs,
root,
duplicate_slot_repair_statuses,
);
Self::generate_repairs_for_fork(blockstore, &mut repairs, max_repairs, root);
// TODO: Incorporate gossip to determine priorities for repair?
// Try to resolve orphans in blockstore
let orphans = blockstore.orphans_iterator(root + 1).unwrap();
Self::generate_repairs_for_orphans(orphans, &mut repairs);
Ok(repairs)
}
fn generate_duplicate_repairs_for_slot(
blockstore: &Blockstore,
slot: Slot,
) -> Option<Vec<RepairType>> {
if let Some(slot_meta) = blockstore.meta(slot).unwrap() {
if slot_meta.is_full() {
// If the slot is full, no further need to repair this slot
None
} else {
Some(Self::generate_repairs_for_slot(
blockstore,
slot,
&slot_meta,
MAX_REPAIR_PER_DUPLICATE,
))
}
} else {
error!("Slot meta for duplicate slot does not exist, cannot generate repairs");
// Filter out this slot from the set of duplicates to be repaired as
// the SlotMeta has to exist for duplicates to be generated
None
}
}
fn generate_and_send_duplicate_repairs(
duplicate_slot_repair_statuses: &mut HashMap<Slot, DuplicateSlotRepairStatus>,
cluster_slots: &ClusterSlots,
blockstore: &Blockstore,
serve_repair: &ServeRepair,
repair_stats: &mut RepairStats,
repair_socket: &UdpSocket,
) {
duplicate_slot_repair_statuses.retain(|slot, status| {
Self::update_duplicate_slot_repair_addr(*slot, status, cluster_slots, serve_repair);
if let Some(repair_addr) = status.repair_addr {
let repairs = Self::generate_duplicate_repairs_for_slot(&blockstore, *slot);
if let Some(repairs) = repairs {
for repair_type in repairs {
if let Err(e) = Self::serialize_and_send_request(
&repair_type,
repair_socket,
&repair_addr,
serve_repair,
repair_stats,
) {
info!("repair req send_to({}) error {:?}", repair_addr, e);
}
}
true
} else {
false
}
} else {
true
}
})
}
fn serialize_and_send_request(
repair_type: &RepairType,
repair_socket: &UdpSocket,
to: &SocketAddr,
serve_repair: &ServeRepair,
repair_stats: &mut RepairStats,
) -> Result<()> {
let req = serve_repair.map_repair_request(&repair_type, repair_stats)?;
repair_socket.send_to(&req, to)?;
Ok(())
}
fn update_duplicate_slot_repair_addr(
slot: Slot,
status: &mut DuplicateSlotRepairStatus,
cluster_slots: &ClusterSlots,
serve_repair: &ServeRepair,
) {
let now = timestamp();
if status.repair_addr.is_none()
|| now.saturating_sub(status.start) >= MAX_DUPLICATE_WAIT_MS as u64
{
let repair_addr =
serve_repair.repair_request_duplicate_compute_best_peer(slot, cluster_slots);
status.repair_addr = repair_addr.ok();
status.start = timestamp();
}
}
fn process_new_duplicate_slots(
new_duplicate_slots: &[Slot],
duplicate_slot_repair_statuses: &mut HashMap<Slot, DuplicateSlotRepairStatus>,
cluster_slots: &ClusterSlots,
root_bank: &Bank,
blockstore: &Blockstore,
serve_repair: &ServeRepair,
duplicate_slots_reset_sender: &DuplicateSlotsResetSender,
) {
for slot in new_duplicate_slots {
warn!(
"Cluster completed slot: {}, dumping our current version and repairing",
slot
);
// Clear the slot signatures from status cache for this slot
root_bank.clear_slot_signatures(*slot);
// Clear the accounts for this slot
root_bank.remove_unrooted_slot(*slot);
// Clear the slot-related data in blockstore. This will:
// 1) Clear old shreds allowing new ones to be inserted
// 2) Clear the "dead" flag allowing ReplayStage to start replaying
// this slot
blockstore.clear_unconfirmed_slot(*slot);
// Signal ReplayStage to clear its progress map so that a different
// version of this slot can be replayed
let _ = duplicate_slots_reset_sender.send(*slot);
// Mark this slot as special repair, try to download from single
// validator to avoid corruption
let repair_addr = serve_repair
.repair_request_duplicate_compute_best_peer(*slot, cluster_slots)
.ok();
let new_duplicate_slot_repair_status = DuplicateSlotRepairStatus {
start: timestamp(),
repair_addr,
};
duplicate_slot_repair_statuses.insert(*slot, new_duplicate_slot_repair_status);
}
}
fn find_new_duplicate_slots(
duplicate_slot_repair_statuses: &HashMap<Slot, DuplicateSlotRepairStatus>,
blockstore: &Blockstore,
cluster_slots: &ClusterSlots,
root_bank: &Bank,
) -> Vec<Slot> {
let dead_slots_iter = blockstore
.dead_slots_iterator(root_bank.slot() + 1)
.expect("Couldn't get dead slots iterator from blockstore");
dead_slots_iter
.filter_map(|dead_slot| {
if let Some(status) = duplicate_slot_repair_statuses.get(&dead_slot) {
// Newly repaired version of this slot has been marked dead again,
// time to purge again
warn!(
"Repaired version of slot {} most recently (but maybe not entirely)
from {:?} has failed again",
dead_slot, status.repair_addr
);
}
cluster_slots
.lookup(dead_slot)
.and_then(|completed_dead_slot_pubkeys| {
let epoch = root_bank.get_epoch_and_slot_index(dead_slot).0;
if let Some(epoch_stakes) = root_bank.epoch_stakes(epoch) {
let total_stake = epoch_stakes.total_stake();
let node_id_to_vote_accounts = epoch_stakes.node_id_to_vote_accounts();
let total_completed_slot_stake: u64 = completed_dead_slot_pubkeys
.read()
.unwrap()
.iter()
.map(|(node_key, _)| {
node_id_to_vote_accounts
.get(node_key)
.map(|v| v.total_stake)
.unwrap_or(0)
})
.sum();
if total_completed_slot_stake as f64 / total_stake as f64
> VOTE_THRESHOLD_SIZE
{
Some(dead_slot)
} else {
None
}
} else {
error!(
"Dead slot {} is too far ahead of root bank {}",
dead_slot,
root_bank.slot()
);
None
}
})
})
.collect()
}
fn generate_repairs_for_slot(
blockstore: &Blockstore,
slot: Slot,
@@ -533,15 +279,10 @@ impl RepairService {
repairs: &mut Vec<RepairType>,
max_repairs: usize,
slot: Slot,
duplicate_slot_repair_statuses: &HashMap<Slot, DuplicateSlotRepairStatus>,
) {
let mut pending_slots = vec![slot];
while repairs.len() < max_repairs && !pending_slots.is_empty() {
let slot = pending_slots.pop().unwrap();
if duplicate_slot_repair_statuses.contains_key(&slot) {
// These are repaired through a different path
continue;
}
if let Some(slot_meta) = blockstore.meta(slot).unwrap() {
let new_repairs = Self::generate_repairs_for_slot(
blockstore,
@@ -620,15 +361,11 @@ impl RepairService {
mod test {
use super::*;
use crate::cluster_info::Node;
use crossbeam_channel::unbounded;
use solana_ledger::blockstore::{
make_chaining_slot_entries, make_many_slot_entries, make_slot_entries,
};
use solana_ledger::shred::max_ticks_per_n_shreds;
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use solana_runtime::genesis_utils::{self, GenesisConfigInfo, ValidatorVoteKeypairs};
use solana_sdk::signature::Signer;
use solana_vote_program::vote_transaction;
#[test]
pub fn test_repair_orphan() {
@@ -642,7 +379,7 @@ mod test {
shreds.extend(shreds2);
blockstore.insert_shreds(shreds, None, false).unwrap();
assert_eq!(
RepairService::generate_repairs(&blockstore, 0, 2, &HashMap::new()).unwrap(),
RepairService::generate_repairs(&blockstore, 0, 2).unwrap(),
vec![RepairType::HighestShred(0, 0), RepairType::Orphan(2)]
);
}
@@ -664,7 +401,7 @@ mod test {
// Check that repair tries to patch the empty slot
assert_eq!(
RepairService::generate_repairs(&blockstore, 0, 2, &HashMap::new()).unwrap(),
RepairService::generate_repairs(&blockstore, 0, 2).unwrap(),
vec![RepairType::HighestShred(0, 0)]
);
}
@@ -710,19 +447,12 @@ mod test {
.collect();
assert_eq!(
RepairService::generate_repairs(&blockstore, 0, std::usize::MAX, &HashMap::new())
.unwrap(),
RepairService::generate_repairs(&blockstore, 0, std::usize::MAX).unwrap(),
expected
);
assert_eq!(
RepairService::generate_repairs(
&blockstore,
0,
expected.len() - 2,
&HashMap::new()
)
.unwrap()[..],
RepairService::generate_repairs(&blockstore, 0, expected.len() - 2).unwrap()[..],
expected[0..expected.len() - 2]
);
}
@@ -751,8 +481,7 @@ mod test {
vec![RepairType::HighestShred(0, num_shreds_per_slot - 1)];
assert_eq!(
RepairService::generate_repairs(&blockstore, 0, std::usize::MAX, &HashMap::new())
.unwrap(),
RepairService::generate_repairs(&blockstore, 0, std::usize::MAX).unwrap(),
expected
);
}
@@ -766,7 +495,7 @@ mod test {
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let slots: Vec<u64> = vec![1, 3, 5, 7, 8];
let num_entries_per_slot = max_ticks_per_n_shreds(1) + 1;
let num_entries_per_slot = max_ticks_per_n_shreds(1, None) + 1;
let shreds = make_chaining_slot_entries(&slots, num_entries_per_slot);
for (mut slot_shreds, _) in shreds.into_iter() {
@@ -797,7 +526,7 @@ mod test {
RepairService::generate_repairs_in_range(
&blockstore,
std::usize::MAX,
&repair_slot_range,
&repair_slot_range
)
.unwrap(),
expected
@@ -842,7 +571,7 @@ mod test {
RepairService::generate_repairs_in_range(
&blockstore,
std::usize::MAX,
&repair_slot_range,
&repair_slot_range
)
.unwrap(),
expected
@@ -863,290 +592,4 @@ mod test {
.unwrap();
assert_eq!(lowest.lowest, 5);
}
#[test]
pub fn test_generate_duplicate_repairs_for_slot() {
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let dead_slot = 9;
// SlotMeta doesn't exist, should make no repairs
assert!(
RepairService::generate_duplicate_repairs_for_slot(&blockstore, dead_slot,).is_none()
);
// Insert some shreds to create a SlotMeta, should make repairs
let num_entries_per_slot = max_ticks_per_n_shreds(1) + 1;
let (mut shreds, _) = make_slot_entries(dead_slot, dead_slot - 1, num_entries_per_slot);
blockstore
.insert_shreds(shreds[..shreds.len() - 1].to_vec(), None, false)
.unwrap();
assert!(
RepairService::generate_duplicate_repairs_for_slot(&blockstore, dead_slot,).is_some()
);
// SlotMeta is full, should make no repairs
blockstore
.insert_shreds(vec![shreds.pop().unwrap()], None, false)
.unwrap();
assert!(
RepairService::generate_duplicate_repairs_for_slot(&blockstore, dead_slot,).is_none()
);
}
#[test]
pub fn test_generate_and_send_duplicate_repairs() {
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let cluster_slots = ClusterSlots::default();
let serve_repair = ServeRepair::new_with_invalid_keypair(Node::new_localhost().info);
let mut duplicate_slot_repair_statuses = HashMap::new();
let dead_slot = 9;
let receive_socket = &UdpSocket::bind("0.0.0.0:0").unwrap();
let duplicate_status = DuplicateSlotRepairStatus {
start: std::u64::MAX,
repair_addr: None,
};
// Insert some shreds to create a SlotMeta,
let num_entries_per_slot = max_ticks_per_n_shreds(1) + 1;
let (mut shreds, _) = make_slot_entries(dead_slot, dead_slot - 1, num_entries_per_slot);
blockstore
.insert_shreds(shreds[..shreds.len() - 1].to_vec(), None, false)
.unwrap();
duplicate_slot_repair_statuses.insert(dead_slot, duplicate_status.clone());
// There is no repair_addr, so should not get filtered because the timeout
// `std::u64::MAX` has not expired
RepairService::generate_and_send_duplicate_repairs(
&mut duplicate_slot_repair_statuses,
&cluster_slots,
&blockstore,
&serve_repair,
&mut RepairStats::default(),
&UdpSocket::bind("0.0.0.0:0").unwrap(),
);
assert!(duplicate_slot_repair_statuses
.get(&dead_slot)
.unwrap()
.repair_addr
.is_none());
assert!(duplicate_slot_repair_statuses.get(&dead_slot).is_some());
// Give the slot a repair address
duplicate_slot_repair_statuses
.get_mut(&dead_slot)
.unwrap()
.repair_addr = Some(receive_socket.local_addr().unwrap());
// Slot is not yet full, should not get filtered from `duplicate_slot_repair_statuses`
RepairService::generate_and_send_duplicate_repairs(
&mut duplicate_slot_repair_statuses,
&cluster_slots,
&blockstore,
&serve_repair,
&mut RepairStats::default(),
&UdpSocket::bind("0.0.0.0:0").unwrap(),
);
assert_eq!(duplicate_slot_repair_statuses.len(), 1);
assert!(duplicate_slot_repair_statuses.get(&dead_slot).is_some());
// Insert rest of shreds. Slot is full, should get filtered from
// `duplicate_slot_repair_statuses`
blockstore
.insert_shreds(vec![shreds.pop().unwrap()], None, false)
.unwrap();
RepairService::generate_and_send_duplicate_repairs(
&mut duplicate_slot_repair_statuses,
&cluster_slots,
&blockstore,
&serve_repair,
&mut RepairStats::default(),
&UdpSocket::bind("0.0.0.0:0").unwrap(),
);
assert!(duplicate_slot_repair_statuses.is_empty());
}
#[test]
pub fn test_update_duplicate_slot_repair_addr() {
let dummy_addr = Some(UdpSocket::bind("0.0.0.0:0").unwrap().local_addr().unwrap());
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(
Node::new_localhost().info,
));
let serve_repair = ServeRepair::new(cluster_info.clone());
let valid_repair_peer = Node::new_localhost().info;
// Signal that this peer has completed the dead slot, and is thus
// a valid target for repair
let dead_slot = 9;
let cluster_slots = ClusterSlots::default();
cluster_slots.insert_node_id(dead_slot, Arc::new(valid_repair_peer.id));
cluster_info.insert_info(valid_repair_peer);
// Not enough time has passed, should not update the
// address
let mut duplicate_status = DuplicateSlotRepairStatus {
start: std::u64::MAX,
repair_addr: dummy_addr,
};
RepairService::update_duplicate_slot_repair_addr(
dead_slot,
&mut duplicate_status,
&cluster_slots,
&serve_repair,
);
assert_eq!(duplicate_status.repair_addr, dummy_addr);
// If the repair address is None, should try to update
let mut duplicate_status = DuplicateSlotRepairStatus {
start: std::u64::MAX,
repair_addr: None,
};
RepairService::update_duplicate_slot_repair_addr(
dead_slot,
&mut duplicate_status,
&cluster_slots,
&serve_repair,
);
assert!(duplicate_status.repair_addr.is_some());
// If sufficient time has passssed, should try to update
let mut duplicate_status = DuplicateSlotRepairStatus {
start: timestamp() - MAX_DUPLICATE_WAIT_MS as u64,
repair_addr: dummy_addr,
};
RepairService::update_duplicate_slot_repair_addr(
dead_slot,
&mut duplicate_status,
&cluster_slots,
&serve_repair,
);
assert_ne!(duplicate_status.repair_addr, dummy_addr);
}
#[test]
pub fn test_process_new_duplicate_slots() {
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let cluster_slots = ClusterSlots::default();
let serve_repair = ServeRepair::new_with_invalid_keypair(Node::new_localhost().info);
let mut duplicate_slot_repair_statuses = HashMap::new();
let duplicate_slot = 9;
// Fill blockstore for dead slot
blockstore.set_dead_slot(duplicate_slot).unwrap();
assert!(blockstore.is_dead(duplicate_slot));
let (shreds, _) = make_slot_entries(duplicate_slot, 0, 1);
blockstore.insert_shreds(shreds, None, false).unwrap();
let keypairs = ValidatorVoteKeypairs::new_rand();
let (reset_sender, reset_receiver) = unbounded();
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = genesis_utils::create_genesis_config_with_vote_accounts(
1_000_000_000,
&[&keypairs],
10000,
);
let bank0 = Arc::new(Bank::new(&genesis_config));
let bank9 = Bank::new_from_parent(&bank0, &Pubkey::default(), duplicate_slot);
let old_balance = bank9.get_balance(&keypairs.node_keypair.pubkey());
bank9
.transfer(10_000, &mint_keypair, &keypairs.node_keypair.pubkey())
.unwrap();
let vote_tx = vote_transaction::new_vote_transaction(
vec![0],
bank0.hash(),
bank0.last_blockhash(),
&keypairs.node_keypair,
&keypairs.vote_keypair,
&keypairs.vote_keypair,
);
bank9.process_transaction(&vote_tx).unwrap();
assert!(bank9.get_signature_status(&vote_tx.signatures[0]).is_some());
RepairService::process_new_duplicate_slots(
&[duplicate_slot],
&mut duplicate_slot_repair_statuses,
&cluster_slots,
&bank9,
&blockstore,
&serve_repair,
&reset_sender,
);
// Blockstore should have been cleared
assert!(!blockstore.is_dead(duplicate_slot));
// Should not be able to find signature for slot 9 for the tx
assert!(bank9.get_signature_status(&vote_tx.signatures[0]).is_none());
// Getting balance should return the old balance (acounts were cleared)
assert_eq!(
bank9.get_balance(&keypairs.node_keypair.pubkey()),
old_balance
);
// Should add the duplicate slot to the tracker
assert!(duplicate_slot_repair_statuses
.get(&duplicate_slot)
.is_some());
// A signal should be sent to clear ReplayStage
assert!(reset_receiver.try_recv().is_ok());
}
#[test]
pub fn test_find_new_duplicate_slots() {
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let cluster_slots = ClusterSlots::default();
let duplicate_slot_repair_statuses = HashMap::new();
let keypairs = ValidatorVoteKeypairs::new_rand();
let only_node_id = Arc::new(keypairs.node_keypair.pubkey());
let GenesisConfigInfo { genesis_config, .. } =
genesis_utils::create_genesis_config_with_vote_accounts(
1_000_000_000,
&[keypairs],
100,
);
let bank0 = Bank::new(&genesis_config);
// Empty blockstore should have no duplicates
assert!(RepairService::find_new_duplicate_slots(
&duplicate_slot_repair_statuses,
&blockstore,
&cluster_slots,
&bank0,
)
.is_empty());
// Insert a dead slot, but is not confirmed by network so should not
// be marked as duplicate
let dead_slot = 9;
blockstore.set_dead_slot(dead_slot).unwrap();
assert!(RepairService::find_new_duplicate_slots(
&duplicate_slot_repair_statuses,
&blockstore,
&cluster_slots,
&bank0,
)
.is_empty());
// If supermajority confirms the slot, then dead slot should be
// marked as a duplicate that needs to be repaired
cluster_slots.insert_node_id(dead_slot, only_node_id);
assert_eq!(
RepairService::find_new_duplicate_slots(
&duplicate_slot_repair_statuses,
&blockstore,
&cluster_slots,
&bank0,
),
vec![dead_slot]
);
}
}

View File

@@ -9,7 +9,6 @@ use crate::{
consensus::{StakeLockout, Tower},
poh_recorder::{PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS},
progress_map::{ForkProgress, ForkStats, ProgressMap, PropagatedStats},
repair_service::DuplicateSlotsResetReceiver,
result::Result,
rewards_recorder_service::RewardsRecorderSender,
rpc_subscriptions::RpcSubscriptions,
@@ -109,7 +108,7 @@ pub struct ReplayStage {
}
impl ReplayStage {
#[allow(clippy::new_ret_no_self, clippy::too_many_arguments)]
#[allow(clippy::new_ret_no_self)]
pub fn new(
config: ReplayStageConfig,
blockstore: Arc<Blockstore>,
@@ -120,7 +119,6 @@ impl ReplayStage {
vote_tracker: Arc<VoteTracker>,
cluster_slots: Arc<ClusterSlots>,
retransmit_slots_sender: RetransmitSlotsSender,
duplicate_slots_reset_receiver: DuplicateSlotsResetReceiver,
) -> (Self, Receiver<Vec<Arc<Bank>>>) {
let ReplayStageConfig {
my_pubkey,
@@ -141,8 +139,11 @@ impl ReplayStage {
let mut tower = Tower::new(&my_pubkey, &vote_account, &bank_forks.read().unwrap());
// Start the replay stage loop
let (lockouts_sender, commitment_service) =
AggregateCommitmentService::new(&exit, block_commitment_cache.clone());
let (lockouts_sender, commitment_service) = AggregateCommitmentService::new(
&exit,
block_commitment_cache.clone(),
subscriptions.clone(),
);
#[allow(clippy::cognitive_complexity)]
let t_replay = Builder::new()
@@ -214,22 +215,14 @@ impl ReplayStage {
&mut progress,
transaction_status_sender.clone(),
&verify_recyclers,
&subscriptions,
);
Self::report_memory(&allocated, "replay_active_banks", start);
let ancestors = Arc::new(bank_forks.read().unwrap().ancestors());
let descendants = bank_forks.read().unwrap().descendants();
let descendants = HashMap::new();
let forks_root = bank_forks.read().unwrap().root();
let start = allocated.get();
// Reset any duplicate slots that have been confirmed
// by the network in anticipation of the confirmed version of
// the slot
Self::reset_duplicate_slots(
&duplicate_slots_reset_receiver,
&descendants,
&mut progress,
&bank_forks,
);
let mut frozen_banks: Vec<_> = bank_forks
.read()
.unwrap()
@@ -308,10 +301,6 @@ impl ReplayStage {
// Vote on a fork
if let Some(ref vote_bank) = vote_bank {
subscriptions.notify_subscribers(
block_commitment_cache.read().unwrap().slot(),
&bank_forks,
);
if let Some(votable_leader) =
leader_schedule_cache.slot_leader_at(vote_bank.slot(), Some(vote_bank))
{
@@ -473,61 +462,6 @@ impl ReplayStage {
);
}
fn reset_duplicate_slots(
duplicate_slots_reset_receiver: &DuplicateSlotsResetReceiver,
descendants: &HashMap<Slot, HashSet<Slot>>,
progress: &mut ProgressMap,
bank_forks: &RwLock<BankForks>,
) {
for duplicate_slot in duplicate_slots_reset_receiver.try_iter() {
Self::purge_unconfirmed_duplicate_slot(
duplicate_slot,
descendants,
progress,
bank_forks,
);
}
}
fn purge_unconfirmed_duplicate_slot(
duplicate_slot: Slot,
descendants: &HashMap<Slot, HashSet<Slot>>,
progress: &mut ProgressMap,
bank_forks: &RwLock<BankForks>,
) {
error!("purging slot {}", duplicate_slot);
let slot_descendants = descendants.get(&duplicate_slot);
if slot_descendants.is_none() {
// Root has already moved past this slot, no need to purge it
return;
}
for d in slot_descendants
.unwrap()
.iter()
.chain(std::iter::once(&duplicate_slot))
{
// Clear the progress map of these forks
let _ = progress.remove(d);
// Clear the duplicate banks from BankForks
{
let mut w_bank_forks = bank_forks.write().unwrap();
// Purging should have already been taken care of by logic
// in repair_service, so make sure drop implementation doesn't
// run
w_bank_forks
.get(*d)
.expect("Bank in descendants map must exist in BankForks")
.skip_drop
.store(true, Ordering::Relaxed);
w_bank_forks
.remove(*d)
.expect("Bank in descendants map must exist in BankForks");
}
}
}
fn log_leader_change(
my_pubkey: &Pubkey,
bank_slot: Slot,
@@ -812,7 +746,7 @@ impl ReplayStage {
trace!("latest root send failed: {:?}", e);
}
});
info!("new root {}", new_root);
trace!("new root {}", new_root);
if let Err(e) = root_bank_sender.send(rooted_banks) {
trace!("root_bank_sender failed: {:?}", e);
return Err(e.into());
@@ -825,7 +759,6 @@ impl ReplayStage {
progress.get_fork_stats(bank.slot()).unwrap().total_staked,
lockouts_sender,
);
Self::push_vote(
cluster_info,
bank,
@@ -905,6 +838,7 @@ impl ReplayStage {
let blockhash = bank.last_blockhash();
vote_tx.partial_sign(&[node_keypair.as_ref()], blockhash);
vote_tx.partial_sign(&[authorized_voter_keypair.as_ref()], blockhash);
let _ = cluster_info.send_vote(&vote_tx);
cluster_info.push_vote(tower_index, vote_tx);
}
@@ -963,6 +897,7 @@ impl ReplayStage {
progress: &mut ProgressMap,
transaction_status_sender: Option<TransactionStatusSender>,
verify_recyclers: &VerifyRecyclers,
subscriptions: &Arc<RpcSubscriptions>,
) -> bool {
let mut did_complete_bank = false;
let mut tx_count = 0;
@@ -1030,6 +965,7 @@ impl ReplayStage {
did_complete_bank = true;
info!("bank frozen: {}", bank.slot());
bank.freeze();
subscriptions.notify_frozen(bank.slot());
} else {
trace!(
"bank {} not completed tick_height: {}, max_tick_height: {}",
@@ -2060,12 +1996,6 @@ pub(crate) mod tests {
);
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank0));
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
blockstore.clone(),
))),
));
let mut bank_forks = BankForks::new(0, bank0);
// Insert a non-root bank so that the propagation logic will update this
@@ -2089,7 +2019,14 @@ pub(crate) mod tests {
assert!(progress.get_propagated_stats(1).unwrap().is_leader_slot);
bank1.freeze();
bank_forks.insert(bank1);
let bank_forks = RwLock::new(bank_forks);
let bank_forks = Arc::new(RwLock::new(bank_forks));
let subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
bank_forks.clone(),
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
blockstore.clone(),
))),
));
// Insert shreds for slot NUM_CONSECUTIVE_LEADER_SLOTS,
// chaining to slot 1
@@ -2430,6 +2367,7 @@ pub(crate) mod tests {
ShredCommonHeader::default(),
data_header,
CodingShredHeader::default(),
PACKET_DATA_SIZE,
);
bincode::serialize_into(
&mut shred.payload[SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER..],
@@ -2508,13 +2446,6 @@ pub(crate) mod tests {
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let block_commitment_cache = Arc::new(RwLock::new(
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
));
let (lockouts_sender, _) = AggregateCommitmentService::new(
&Arc::new(AtomicBool::new(false)),
block_commitment_cache.clone(),
);
let leader_pubkey = Pubkey::new_rand();
let leader_lamports = 3;
@@ -2535,6 +2466,18 @@ pub(crate) mod tests {
vec![0],
)));
let exit = Arc::new(AtomicBool::new(false));
let block_commitment_cache = Arc::new(RwLock::new(
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
));
let subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
bank_forks.clone(),
block_commitment_cache.clone(),
));
let (lockouts_sender, _) =
AggregateCommitmentService::new(&exit, block_commitment_cache.clone(), subscriptions);
assert!(block_commitment_cache
.read()
.unwrap()
@@ -3607,65 +3550,4 @@ pub(crate) mod tests {
&progress_map,
));
}
#[test]
fn test_purge_unconfirmed_duplicate_slot() {
let (bank_forks, mut progress) = setup_forks();
let descendants = bank_forks.read().unwrap().descendants();
// Purging slot 5 should purge only slots 5 and its descendant 6
ReplayStage::purge_unconfirmed_duplicate_slot(5, &descendants, &mut progress, &bank_forks);
for i in 5..=6 {
assert!(bank_forks.read().unwrap().get(i).is_none());
assert!(progress.get(&i).is_none());
}
for i in 0..=4 {
assert!(bank_forks.read().unwrap().get(i).is_some());
assert!(progress.get(&i).is_some());
}
// Purging slot 4 should purge only slot 4
let descendants = bank_forks.read().unwrap().descendants();
ReplayStage::purge_unconfirmed_duplicate_slot(4, &descendants, &mut progress, &bank_forks);
for i in 4..=6 {
assert!(bank_forks.read().unwrap().get(i).is_none());
assert!(progress.get(&i).is_none());
}
for i in 0..=3 {
assert!(bank_forks.read().unwrap().get(i).is_some());
assert!(progress.get(&i).is_some());
}
// Purging slot 1 should purge both forks 2 and 3
let descendants = bank_forks.read().unwrap().descendants();
ReplayStage::purge_unconfirmed_duplicate_slot(1, &descendants, &mut progress, &bank_forks);
for i in 1..=6 {
assert!(bank_forks.read().unwrap().get(i).is_none());
assert!(progress.get(&i).is_none());
}
assert!(bank_forks.read().unwrap().get(0).is_some());
assert!(progress.get(&0).is_some());
}
fn setup_forks() -> (RwLock<BankForks>, ProgressMap) {
/*
Build fork structure:
slot 0
|
slot 1
/ \
slot 2 |
| slot 3
slot 4 |
slot 5
|
slot 6
*/
let forks = tr(0) / (tr(1) / (tr(2) / (tr(4))) / (tr(3) / (tr(5) / (tr(6)))));
let mut vote_simulator = VoteSimulator::new(1);
vote_simulator.fill_bank_forks(forks, &HashMap::new());
(vote_simulator.bank_forks, vote_simulator.progress)
}
}

View File

@@ -3,12 +3,12 @@
use crate::{
cluster_info::{compute_retransmit_peers, ClusterInfo, DATA_PLANE_FANOUT},
cluster_slots::ClusterSlots,
repair_service::DuplicateSlotsResetSender,
contact_info::ContactInfo,
repair_service::RepairStrategy,
result::{Error, Result},
window_service::{should_retransmit_and_persist, WindowService},
};
use crossbeam_channel::Receiver;
use crossbeam_channel::Receiver as CrossbeamReceiver;
use solana_ledger::{
bank_forks::BankForks,
blockstore::{Blockstore, CompletedSlotsReceiver},
@@ -18,12 +18,16 @@ use solana_ledger::{
use solana_measure::measure::Measure;
use solana_metrics::inc_new_counter_error;
use solana_perf::packet::Packets;
use solana_sdk::clock::{Epoch, Slot};
use solana_sdk::epoch_schedule::EpochSchedule;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::timing::timestamp;
use solana_streamer::streamer::PacketReceiver;
use std::{
cmp,
collections::{BTreeMap, HashMap},
net::UdpSocket,
sync::atomic::{AtomicBool, Ordering},
sync::atomic::{AtomicBool, AtomicU64, Ordering},
sync::mpsc::channel,
sync::mpsc::RecvTimeoutError,
sync::Mutex,
@@ -36,6 +40,141 @@ use std::{
// it doesn't pull up too much work.
const MAX_PACKET_BATCH_SIZE: usize = 100;
#[derive(Default)]
struct RetransmitStats {
total_packets: AtomicU64,
total_batches: AtomicU64,
total_time: AtomicU64,
epoch_fetch: AtomicU64,
epoch_cache_update: AtomicU64,
repair_total: AtomicU64,
discard_total: AtomicU64,
retransmit_total: AtomicU64,
last_ts: AtomicU64,
compute_turbine_peers_total: AtomicU64,
packets_by_slot: Mutex<BTreeMap<Slot, usize>>,
packets_by_source: Mutex<BTreeMap<String, usize>>,
}
#[allow(clippy::too_many_arguments)]
fn update_retransmit_stats(
stats: &Arc<RetransmitStats>,
total_time: u64,
total_packets: usize,
retransmit_total: u64,
discard_total: u64,
repair_total: u64,
compute_turbine_peers_total: u64,
peers_len: usize,
packets_by_slot: HashMap<Slot, usize>,
packets_by_source: HashMap<String, usize>,
epoch_fetch: u64,
epoch_cach_update: u64,
) {
stats.total_time.fetch_add(total_time, Ordering::Relaxed);
stats
.total_packets
.fetch_add(total_packets as u64, Ordering::Relaxed);
stats
.retransmit_total
.fetch_add(retransmit_total, Ordering::Relaxed);
stats
.repair_total
.fetch_add(repair_total, Ordering::Relaxed);
stats
.discard_total
.fetch_add(discard_total, Ordering::Relaxed);
stats
.compute_turbine_peers_total
.fetch_add(compute_turbine_peers_total, Ordering::Relaxed);
stats.total_batches.fetch_add(1, Ordering::Relaxed);
stats.epoch_fetch.fetch_add(epoch_fetch, Ordering::Relaxed);
stats
.epoch_cache_update
.fetch_add(epoch_cach_update, Ordering::Relaxed);
{
let mut stats_packets_by_slot = stats.packets_by_slot.lock().unwrap();
for (slot, count) in packets_by_slot {
*stats_packets_by_slot.entry(slot).or_insert(0) += count;
}
}
{
let mut stats_packets_by_source = stats.packets_by_source.lock().unwrap();
for (source, count) in packets_by_source {
*stats_packets_by_source.entry(source).or_insert(0) += count;
}
}
let now = timestamp();
let last = stats.last_ts.load(Ordering::Relaxed);
if now - last > 2000 && stats.last_ts.compare_and_swap(last, now, Ordering::Relaxed) == last {
datapoint_info!("retransmit-num_nodes", ("count", peers_len, i64));
datapoint_info!(
"retransmit-stage",
(
"total_time",
stats.total_time.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"epoch_fetch",
stats.epoch_fetch.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"epoch_cache_update",
stats.epoch_cache_update.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"total_batches",
stats.total_batches.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"total_packets",
stats.total_packets.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"retransmit_total",
stats.retransmit_total.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"compute_turbine",
stats.compute_turbine_peers_total.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"repair_total",
stats.repair_total.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"discard_total",
stats.discard_total.swap(0, Ordering::Relaxed) as i64,
i64
),
);
let mut packets_by_slot = stats.packets_by_slot.lock().unwrap();
info!("retransmit: packets_by_slot: {:#?}", packets_by_slot);
packets_by_slot.clear();
drop(packets_by_slot);
let mut packets_by_source = stats.packets_by_source.lock().unwrap();
info!("retransmit: packets_by_source: {:#?}", packets_by_source);
packets_by_source.clear();
}
}
#[derive(Default)]
struct EpochStakesCache {
epoch: Epoch,
stakes: Option<Arc<HashMap<Pubkey, u64>>>,
peers: Vec<ContactInfo>,
stakes_and_index: Vec<(u64, usize)>,
}
fn retransmit(
bank_forks: &Arc<RwLock<BankForks>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
@@ -43,6 +182,9 @@ fn retransmit(
r: &Arc<Mutex<PacketReceiver>>,
sock: &UdpSocket,
id: u32,
stats: &Arc<RetransmitStats>,
epoch_stakes_cache: &Arc<RwLock<EpochStakesCache>>,
last_peer_update: &Arc<AtomicU64>,
) -> Result<()> {
let timer = Duration::new(1, 0);
let r_lock = r.lock().unwrap();
@@ -59,17 +201,49 @@ fn retransmit(
}
drop(r_lock);
let mut epoch_fetch = Measure::start("retransmit_epoch_fetch");
let r_bank = bank_forks.read().unwrap().working_bank();
let bank_epoch = r_bank.get_leader_schedule_epoch(r_bank.slot());
epoch_fetch.stop();
let mut epoch_cache_update = Measure::start("retransmit_epoch_cach_update");
let mut r_epoch_stakes_cache = epoch_stakes_cache.read().unwrap();
if r_epoch_stakes_cache.epoch != bank_epoch {
drop(r_epoch_stakes_cache);
let mut w_epoch_stakes_cache = epoch_stakes_cache.write().unwrap();
if w_epoch_stakes_cache.epoch != bank_epoch {
let stakes = staking_utils::staked_nodes_at_epoch(&r_bank, bank_epoch);
let stakes = stakes.map(Arc::new);
w_epoch_stakes_cache.stakes = stakes;
w_epoch_stakes_cache.epoch = bank_epoch;
}
drop(w_epoch_stakes_cache);
r_epoch_stakes_cache = epoch_stakes_cache.read().unwrap();
}
let now = timestamp();
let last = last_peer_update.load(Ordering::Relaxed);
if now - last > 1000 && last_peer_update.compare_and_swap(last, now, Ordering::Relaxed) == last
{
drop(r_epoch_stakes_cache);
let mut w_epoch_stakes_cache = epoch_stakes_cache.write().unwrap();
let (peers, stakes_and_index) =
cluster_info.sorted_retransmit_peers_and_stakes(w_epoch_stakes_cache.stakes.clone());
w_epoch_stakes_cache.peers = peers;
w_epoch_stakes_cache.stakes_and_index = stakes_and_index;
drop(w_epoch_stakes_cache);
r_epoch_stakes_cache = epoch_stakes_cache.read().unwrap();
}
let mut peers_len = 0;
let stakes = staking_utils::staked_nodes_at_epoch(&r_bank, bank_epoch);
let stakes = stakes.map(Arc::new);
let (peers, stakes_and_index) = cluster_info.sorted_retransmit_peers_and_stakes(stakes);
epoch_cache_update.stop();
let my_id = cluster_info.id();
let mut discard_total = 0;
let mut repair_total = 0;
let mut retransmit_total = 0;
let mut compute_turbine_peers_total = 0;
let mut packets_by_slot: HashMap<Slot, usize> = HashMap::new();
let mut packets_by_source: HashMap<String, usize> = HashMap::new();
for mut packets in packet_v {
for packet in packets.packets.iter_mut() {
// skip discarded packets and repair packets
@@ -87,8 +261,8 @@ fn retransmit(
let mut compute_turbine_peers = Measure::start("turbine_start");
let (my_index, mut shuffled_stakes_and_index) = ClusterInfo::shuffle_peers_and_index(
&my_id,
&peers,
&stakes_and_index,
&r_epoch_stakes_cache.peers,
&r_epoch_stakes_cache.stakes_and_index,
packet.meta.seed,
);
peers_len = cmp::max(peers_len, shuffled_stakes_and_index.len());
@@ -101,10 +275,21 @@ fn retransmit(
let (neighbors, children) =
compute_retransmit_peers(DATA_PLANE_FANOUT, my_index, indexes);
let neighbors: Vec<_> = neighbors.into_iter().map(|index| &peers[index]).collect();
let children: Vec<_> = children.into_iter().map(|index| &peers[index]).collect();
let neighbors: Vec<_> = neighbors
.into_iter()
.map(|index| &r_epoch_stakes_cache.peers[index])
.collect();
let children: Vec<_> = children
.into_iter()
.map(|index| &r_epoch_stakes_cache.peers[index])
.collect();
compute_turbine_peers.stop();
compute_turbine_peers_total += compute_turbine_peers.as_ms();
compute_turbine_peers_total += compute_turbine_peers.as_us();
*packets_by_slot.entry(packet.meta.slot).or_insert(0) += 1;
*packets_by_source
.entry(packet.meta.addr().to_string())
.or_insert(0) += 1;
let leader =
leader_schedule_cache.slot_leader_at(packet.meta.slot, Some(r_bank.as_ref()));
@@ -116,7 +301,7 @@ fn retransmit(
ClusterInfo::retransmit_to(&children, packet, leader, sock, true)?;
}
retransmit_time.stop();
retransmit_total += retransmit_time.as_ms();
retransmit_total += retransmit_time.as_us();
}
}
timer_start.stop();
@@ -127,16 +312,21 @@ fn retransmit(
retransmit_total,
id,
);
datapoint_debug!("cluster_info-num_nodes", ("count", peers_len, i64));
datapoint_debug!(
"retransmit-stage",
("total_time", timer_start.as_ms() as i64, i64),
("total_packets", total_packets as i64, i64),
("retransmit_total", retransmit_total as i64, i64),
("compute_turbine", compute_turbine_peers_total as i64, i64),
("repair_total", i64::from(repair_total), i64),
("discard_total", i64::from(discard_total), i64),
update_retransmit_stats(
stats,
timer_start.as_us(),
total_packets,
retransmit_total,
discard_total,
repair_total,
compute_turbine_peers_total,
peers_len,
packets_by_slot,
packets_by_source,
epoch_fetch.as_us(),
epoch_cache_update.as_us(),
);
Ok(())
}
@@ -155,6 +345,7 @@ pub fn retransmitter(
cluster_info: Arc<ClusterInfo>,
r: Arc<Mutex<PacketReceiver>>,
) -> Vec<JoinHandle<()>> {
let stats = Arc::new(RetransmitStats::default());
(0..sockets.len())
.map(|s| {
let sockets = sockets.clone();
@@ -162,6 +353,9 @@ pub fn retransmitter(
let leader_schedule_cache = leader_schedule_cache.clone();
let r = r.clone();
let cluster_info = cluster_info.clone();
let stats = stats.clone();
let epoch_stakes_cache = Arc::new(RwLock::new(EpochStakesCache::default()));
let last_peer_update = Arc::new(AtomicU64::new(0));
Builder::new()
.name("solana-retransmitter".to_string())
@@ -175,6 +369,9 @@ pub fn retransmitter(
&r,
&sockets[s],
s as u32,
&stats,
&epoch_stakes_cache,
&last_peer_update,
) {
match e {
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
@@ -207,14 +404,13 @@ impl RetransmitStage {
cluster_info: &Arc<ClusterInfo>,
retransmit_sockets: Arc<Vec<UdpSocket>>,
repair_socket: Arc<UdpSocket>,
verified_receiver: Receiver<Vec<Packets>>,
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
exit: &Arc<AtomicBool>,
completed_slots_receiver: CompletedSlotsReceiver,
epoch_schedule: EpochSchedule,
cfg: Option<Arc<AtomicBool>>,
shred_version: u16,
cluster_slots: Arc<ClusterSlots>,
duplicate_slots_reset_sender: DuplicateSlotsResetSender,
) -> Self {
let (retransmit_sender, retransmit_receiver) = channel();
@@ -231,7 +427,6 @@ impl RetransmitStage {
bank_forks,
completed_slots_receiver,
epoch_schedule,
duplicate_slots_reset_sender,
};
let leader_schedule_cache = leader_schedule_cache.clone();
let window_service = WindowService::new(

File diff suppressed because it is too large Load Diff

54
core/src/rpc_error.rs Normal file
View File

@@ -0,0 +1,54 @@
use jsonrpc_core::{Error, ErrorCode};
use solana_sdk::clock::Slot;
const JSON_RPC_SERVER_ERROR_0: i64 = -32000;
const JSON_RPC_SERVER_ERROR_1: i64 = -32001;
const JSON_RPC_SERVER_ERROR_2: i64 = -32002;
pub enum RpcCustomError {
NonexistentClusterRoot {
cluster_root: Slot,
node_root: Slot,
},
BlockCleanedUp {
slot: Slot,
first_available_block: Slot,
},
SendTransactionPreflightFailure {
message: String,
},
}
impl From<RpcCustomError> for Error {
fn from(e: RpcCustomError) -> Self {
match e {
RpcCustomError::NonexistentClusterRoot {
cluster_root,
node_root,
} => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_0),
message: format!(
"Cluster largest_confirmed_root {} does not exist on node. Node root: {}",
cluster_root, node_root,
),
data: None,
},
RpcCustomError::BlockCleanedUp {
slot,
first_available_block,
} => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_1),
message: format!(
"Block {} cleaned up, does not exist on node. First available block: {}",
slot, first_available_block,
),
data: None,
},
RpcCustomError::SendTransactionPreflightFailure { message } => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_2),
message,
data: None,
},
}
}
}

118
core/src/rpc_health.rs Normal file
View File

@@ -0,0 +1,118 @@
use crate::cluster_info::ClusterInfo;
use solana_sdk::pubkey::Pubkey;
use std::{
collections::HashSet,
sync::atomic::{AtomicBool, Ordering},
sync::Arc,
};
#[derive(PartialEq, Clone, Copy)]
pub enum RpcHealthStatus {
Ok,
Behind, // Validator is behind its trusted validators
}
pub struct RpcHealth {
cluster_info: Arc<ClusterInfo>,
trusted_validators: Option<HashSet<Pubkey>>,
health_check_slot_distance: u64,
override_health_check: Arc<AtomicBool>,
#[cfg(test)]
stub_health_status: std::sync::RwLock<Option<RpcHealthStatus>>,
}
impl RpcHealth {
pub fn new(
cluster_info: Arc<ClusterInfo>,
trusted_validators: Option<HashSet<Pubkey>>,
health_check_slot_distance: u64,
override_health_check: Arc<AtomicBool>,
) -> Self {
Self {
cluster_info,
trusted_validators,
health_check_slot_distance,
override_health_check,
#[cfg(test)]
stub_health_status: std::sync::RwLock::new(None),
}
}
pub fn check(&self) -> RpcHealthStatus {
#[cfg(test)]
{
if let Some(stub_health_status) = *self.stub_health_status.read().unwrap() {
return stub_health_status;
}
}
if self.override_health_check.load(Ordering::Relaxed) {
RpcHealthStatus::Ok
} else if let Some(trusted_validators) = &self.trusted_validators {
let (latest_account_hash_slot, latest_trusted_validator_account_hash_slot) = {
(
self.cluster_info
.get_accounts_hash_for_node(&self.cluster_info.id(), |hashes| {
hashes
.iter()
.max_by(|a, b| a.0.cmp(&b.0))
.map(|slot_hash| slot_hash.0)
})
.flatten()
.unwrap_or(0),
trusted_validators
.iter()
.map(|trusted_validator| {
self.cluster_info
.get_accounts_hash_for_node(&trusted_validator, |hashes| {
hashes
.iter()
.max_by(|a, b| a.0.cmp(&b.0))
.map(|slot_hash| slot_hash.0)
})
.flatten()
.unwrap_or(0)
})
.max()
.unwrap_or(0),
)
};
// This validator is considered healthy if its latest account hash slot is within
// `health_check_slot_distance` of the latest trusted validator's account hash slot
if latest_account_hash_slot > 0
&& latest_trusted_validator_account_hash_slot > 0
&& latest_account_hash_slot
> latest_trusted_validator_account_hash_slot
.saturating_sub(self.health_check_slot_distance)
{
RpcHealthStatus::Ok
} else {
warn!(
"health check: me={}, latest trusted_validator={}",
latest_account_hash_slot, latest_trusted_validator_account_hash_slot
);
RpcHealthStatus::Behind
}
} else {
// No trusted validator point of reference available, so this validator is healthy
// because it's running
RpcHealthStatus::Ok
}
}
#[cfg(test)]
pub(crate) fn stub() -> Arc<Self> {
Arc::new(Self::new(
Arc::new(ClusterInfo::default()),
None,
42,
Arc::new(AtomicBool::new(false)),
))
}
#[cfg(test)]
pub(crate) fn stub_set_health_status(&self, stub_health_status: Option<RpcHealthStatus>) {
*self.stub_health_status.write().unwrap() = stub_health_status;
}
}

View File

@@ -1,6 +1,6 @@
//! The `pubsub` module implements a threaded subscription service on client RPC request
use crate::rpc_subscriptions::{Confirmations, RpcSubscriptions, SlotInfo};
use crate::rpc_subscriptions::{RpcSubscriptions, RpcVote, SlotInfo};
use jsonrpc_core::{Error, ErrorCode, Result};
use jsonrpc_derive::rpc;
use jsonrpc_pubsub::{typed::Subscriber, Session, SubscriptionId};
@@ -8,8 +8,12 @@ use solana_client::rpc_response::{
Response as RpcResponse, RpcAccount, RpcKeyedAccount, RpcSignatureResult,
};
#[cfg(test)]
use solana_ledger::blockstore::Blockstore;
use solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature};
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore};
use solana_sdk::{
clock::Slot, commitment_config::CommitmentConfig, pubkey::Pubkey, signature::Signature,
};
#[cfg(test)]
use std::sync::RwLock;
use std::{
str::FromStr,
sync::{atomic, Arc},
@@ -35,7 +39,7 @@ pub trait RpcSolPubSub {
meta: Self::Metadata,
subscriber: Subscriber<RpcResponse<RpcAccount>>,
pubkey_str: String,
confirmations: Option<Confirmations>,
commitment: Option<CommitmentConfig>,
);
// Unsubscribe from account notification subscription.
@@ -59,7 +63,7 @@ pub trait RpcSolPubSub {
meta: Self::Metadata,
subscriber: Subscriber<RpcResponse<RpcKeyedAccount>>,
pubkey_str: String,
confirmations: Option<Confirmations>,
commitment: Option<CommitmentConfig>,
);
// Unsubscribe from account notification subscription.
@@ -83,7 +87,7 @@ pub trait RpcSolPubSub {
meta: Self::Metadata,
subscriber: Subscriber<RpcResponse<RpcSignatureResult>>,
signature_str: String,
confirmations: Option<Confirmations>,
commitment: Option<CommitmentConfig>,
);
// Unsubscribe from signature notification subscription.
@@ -110,6 +114,18 @@ pub trait RpcSolPubSub {
)]
fn slot_unsubscribe(&self, meta: Option<Self::Metadata>, id: SubscriptionId) -> Result<bool>;
// Get notification when vote is encountered
#[pubsub(subscription = "voteNotification", subscribe, name = "voteSubscribe")]
fn vote_subscribe(&self, meta: Self::Metadata, subscriber: Subscriber<RpcVote>);
// Unsubscribe from vote notification subscription.
#[pubsub(
subscription = "voteNotification",
unsubscribe,
name = "voteUnsubscribe"
)]
fn vote_unsubscribe(&self, meta: Option<Self::Metadata>, id: SubscriptionId) -> Result<bool>;
// Get notification when a new root is set
#[pubsub(subscription = "rootNotification", subscribe, name = "rootSubscribe")]
fn root_subscribe(&self, meta: Self::Metadata, subscriber: Subscriber<Slot>);
@@ -135,9 +151,14 @@ impl RpcSolPubSubImpl {
}
#[cfg(test)]
fn default_with_blockstore(blockstore: Arc<Blockstore>) -> Self {
fn default_with_blockstore_bank_forks(
blockstore: Arc<Blockstore>,
bank_forks: Arc<RwLock<BankForks>>,
) -> Self {
let uid = Arc::new(atomic::AtomicUsize::default());
let subscriptions = Arc::new(RpcSubscriptions::default_with_blockstore(blockstore));
let subscriptions = Arc::new(RpcSubscriptions::default_with_blockstore_bank_forks(
blockstore, bank_forks,
));
Self { uid, subscriptions }
}
}
@@ -158,19 +179,15 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
_meta: Self::Metadata,
subscriber: Subscriber<RpcResponse<RpcAccount>>,
pubkey_str: String,
confirmations: Option<Confirmations>,
commitment: Option<CommitmentConfig>,
) {
match param::<Pubkey>(&pubkey_str, "pubkey") {
Ok(pubkey) => {
let id = self.uid.fetch_add(1, atomic::Ordering::Relaxed);
let sub_id = SubscriptionId::Number(id as u64);
info!("account_subscribe: account={:?} id={:?}", pubkey, sub_id);
self.subscriptions.add_account_subscription(
pubkey,
confirmations,
sub_id,
subscriber,
)
self.subscriptions
.add_account_subscription(pubkey, commitment, sub_id, subscriber)
}
Err(e) => subscriber.reject(e).unwrap(),
}
@@ -198,19 +215,15 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
_meta: Self::Metadata,
subscriber: Subscriber<RpcResponse<RpcKeyedAccount>>,
pubkey_str: String,
confirmations: Option<Confirmations>,
commitment: Option<CommitmentConfig>,
) {
match param::<Pubkey>(&pubkey_str, "pubkey") {
Ok(pubkey) => {
let id = self.uid.fetch_add(1, atomic::Ordering::Relaxed);
let sub_id = SubscriptionId::Number(id as u64);
info!("program_subscribe: account={:?} id={:?}", pubkey, sub_id);
self.subscriptions.add_program_subscription(
pubkey,
confirmations,
sub_id,
subscriber,
)
self.subscriptions
.add_program_subscription(pubkey, commitment, sub_id, subscriber)
}
Err(e) => subscriber.reject(e).unwrap(),
}
@@ -238,7 +251,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
_meta: Self::Metadata,
subscriber: Subscriber<RpcResponse<RpcSignatureResult>>,
signature_str: String,
confirmations: Option<Confirmations>,
commitment: Option<CommitmentConfig>,
) {
info!("signature_subscribe");
match param::<Signature>(&signature_str, "signature") {
@@ -249,12 +262,8 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
"signature_subscribe: signature={:?} id={:?}",
signature, sub_id
);
self.subscriptions.add_signature_subscription(
signature,
confirmations,
sub_id,
subscriber,
);
self.subscriptions
.add_signature_subscription(signature, commitment, sub_id, subscriber);
}
Err(e) => subscriber.reject(e).unwrap(),
}
@@ -298,6 +307,27 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
}
}
fn vote_subscribe(&self, _meta: Self::Metadata, subscriber: Subscriber<RpcVote>) {
info!("vote_subscribe");
let id = self.uid.fetch_add(1, atomic::Ordering::Relaxed);
let sub_id = SubscriptionId::Number(id as u64);
info!("vote_subscribe: id={:?}", sub_id);
self.subscriptions.add_vote_subscription(sub_id, subscriber);
}
fn vote_unsubscribe(&self, _meta: Option<Self::Metadata>, id: SubscriptionId) -> Result<bool> {
info!("vote_unsubscribe");
if self.subscriptions.remove_vote_subscription(&id) {
Ok(true)
} else {
Err(Error {
code: ErrorCode::InvalidParams,
message: "Invalid Request: Subscription id does not exist".into(),
data: None,
})
}
}
fn root_subscribe(&self, _meta: Self::Metadata, subscriber: Subscriber<Slot>) {
info!("root_subscribe");
let id = self.uid.fetch_add(1, atomic::Ordering::Relaxed);
@@ -324,9 +354,11 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
mod tests {
use super::*;
use crate::{
commitment::{BlockCommitment, BlockCommitmentCache},
cluster_info_vote_listener::{ClusterInfoVoteListener, VoteTracker},
commitment::{BlockCommitmentCache, CacheSlotInfo},
rpc_subscriptions::tests::robust_poll_or_panic,
};
use crossbeam_channel::unbounded;
use jsonrpc_core::{futures::sync::mpsc, Response};
use jsonrpc_pubsub::{PubSubHandler, Session};
use serial_test_derive::serial;
@@ -336,15 +368,19 @@ mod tests {
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
};
use solana_runtime::bank::Bank;
use solana_runtime::{
bank::Bank,
genesis_utils::{create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs},
};
use solana_sdk::{
hash::Hash,
pubkey::Pubkey,
signature::{Keypair, Signer},
system_program, system_transaction,
transaction::{self, Transaction},
};
use solana_vote_program::vote_transaction;
use std::{
collections::HashMap,
sync::{atomic::AtomicBool, RwLock},
thread::sleep,
time::Duration,
@@ -354,14 +390,17 @@ mod tests {
bank_forks: &Arc<RwLock<BankForks>>,
tx: &Transaction,
subscriptions: &RpcSubscriptions,
current_slot: Slot,
) -> transaction::Result<()> {
bank_forks
.write()
.unwrap()
.get(0)
.get(current_slot)
.unwrap()
.process_transaction(tx)?;
subscriptions.notify_subscribers(0, &bank_forks);
let mut cache_slot_info = CacheSlotInfo::default();
cache_slot_info.current_slot = current_slot;
subscriptions.notify_subscribers(cache_slot_info);
Ok(())
}
@@ -387,6 +426,7 @@ mod tests {
let rpc = RpcSolPubSubImpl {
subscriptions: Arc::new(RpcSubscriptions::new(
&Arc::new(AtomicBool::new(false)),
bank_forks.clone(),
Arc::new(RwLock::new(
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
)),
@@ -401,7 +441,7 @@ mod tests {
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("signatureNotification");
rpc.signature_subscribe(session, subscriber, tx.signatures[0].to_string(), None);
process_transaction_and_notify(&bank_forks, &tx, &rpc.subscriptions).unwrap();
process_transaction_and_notify(&bank_forks, &tx, &rpc.subscriptions, 0).unwrap();
// Test signature confirmation notification
let (response, _) = robust_poll_or_panic(receiver);
@@ -430,15 +470,15 @@ mod tests {
} = create_genesis_config(10_000);
let bob_pubkey = Pubkey::new_rand();
let bank = Bank::new(&genesis_config);
let arc_bank = Arc::new(bank);
let blockhash = arc_bank.last_blockhash();
let blockhash = bank.last_blockhash();
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let session = create_session();
let mut io = PubSubHandler::default();
let rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore);
let rpc = RpcSolPubSubImpl::default_with_blockstore_bank_forks(blockstore, bank_forks);
io.extend_with(rpc.to_delegate());
let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash);
@@ -493,14 +533,22 @@ mod tests {
let bank = Bank::new(&genesis_config);
let blockhash = bank.last_blockhash();
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone();
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.write().unwrap().insert(bank1);
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let rpc = RpcSolPubSubImpl {
subscriptions: Arc::new(RpcSubscriptions::new(
&Arc::new(AtomicBool::new(false)),
bank_forks.clone(),
Arc::new(RwLock::new(
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
BlockCommitmentCache::new_for_tests_with_blockstore_bank(
blockstore,
bank_forks.read().unwrap().get(1).unwrap().clone(),
1,
),
)),
)),
uid: Arc::new(atomic::AtomicUsize::default()),
@@ -511,11 +559,11 @@ mod tests {
session,
subscriber,
contract_state.pubkey().to_string(),
None,
Some(CommitmentConfig::recent()),
);
let tx = system_transaction::transfer(&alice, &contract_funds.pubkey(), 51, blockhash);
process_transaction_and_notify(&bank_forks, &tx, &rpc.subscriptions).unwrap();
process_transaction_and_notify(&bank_forks, &tx, &rpc.subscriptions, 1).unwrap();
let ixs = budget_instruction::when_signed(
&contract_funds.pubkey(),
@@ -530,14 +578,14 @@ mod tests {
ixs,
blockhash,
);
process_transaction_and_notify(&bank_forks, &tx, &rpc.subscriptions).unwrap();
process_transaction_and_notify(&bank_forks, &tx, &rpc.subscriptions, 1).unwrap();
sleep(Duration::from_millis(200));
// Test signature confirmation notification #1
let expected_data = bank_forks
.read()
.unwrap()
.get(0)
.get(1)
.unwrap()
.get_account(&contract_state.pubkey())
.unwrap()
@@ -547,7 +595,7 @@ mod tests {
"method": "accountNotification",
"params": {
"result": {
"context": { "slot": 0 },
"context": { "slot": 1 },
"value": {
"owner": budget_program_id.to_string(),
"lamports": 51,
@@ -564,7 +612,7 @@ mod tests {
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
let tx = system_transaction::transfer(&alice, &witness.pubkey(), 1, blockhash);
process_transaction_and_notify(&bank_forks, &tx, &rpc.subscriptions).unwrap();
process_transaction_and_notify(&bank_forks, &tx, &rpc.subscriptions, 1).unwrap();
sleep(Duration::from_millis(200));
let ix = budget_instruction::apply_signature(
&witness.pubkey(),
@@ -572,14 +620,14 @@ mod tests {
&bob_pubkey,
);
let tx = Transaction::new_signed_instructions(&[&witness], vec![ix], blockhash);
process_transaction_and_notify(&bank_forks, &tx, &rpc.subscriptions).unwrap();
process_transaction_and_notify(&bank_forks, &tx, &rpc.subscriptions, 1).unwrap();
sleep(Duration::from_millis(200));
assert_eq!(
bank_forks
.read()
.unwrap()
.get(0)
.get(1)
.unwrap()
.get_account(&contract_state.pubkey()),
None
@@ -593,9 +641,12 @@ mod tests {
let session = create_session();
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, Bank::new(&genesis_config))));
let mut io = PubSubHandler::default();
let rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore);
let rpc =
RpcSolPubSubImpl::default_with_blockstore_bank_forks(blockstore, bank_forks.clone());
io.extend_with(rpc.to_delegate());
@@ -630,7 +681,7 @@ mod tests {
#[test]
#[should_panic]
fn test_account_confirmations_not_fulfilled() {
fn test_account_commitment_not_fulfilled() {
let GenesisConfigInfo {
genesis_config,
mint_keypair: alice,
@@ -638,15 +689,19 @@ mod tests {
} = create_genesis_config(10_000);
let bank = Bank::new(&genesis_config);
let blockhash = bank.last_blockhash();
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let bank_forks = Arc::new(RwLock::new(BankForks::new(1, bank)));
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let bob = Keypair::new();
let mut rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore.clone());
let mut rpc = RpcSolPubSubImpl::default_with_blockstore_bank_forks(
blockstore.clone(),
bank_forks.clone(),
);
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = RpcSubscriptions::new(
&exit,
bank_forks.clone(),
Arc::new(RwLock::new(
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
)),
@@ -654,24 +709,30 @@ mod tests {
rpc.subscriptions = Arc::new(subscriptions);
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification");
rpc.account_subscribe(session, subscriber, bob.pubkey().to_string(), Some(2));
rpc.account_subscribe(
session,
subscriber,
bob.pubkey().to_string(),
Some(CommitmentConfig::root()),
);
let tx = system_transaction::transfer(&alice, &bob.pubkey(), 100, blockhash);
bank_forks
.write()
.unwrap()
.get(0)
.get(1)
.unwrap()
.process_transaction(&tx)
.unwrap();
rpc.subscriptions.notify_subscribers(0, &bank_forks);
rpc.subscriptions
.notify_subscribers(CacheSlotInfo::default());
// allow 200ms for notification thread to wake
std::thread::sleep(Duration::from_millis(200));
let _panic = robust_poll_or_panic(receiver);
}
#[test]
fn test_account_confirmations() {
fn test_account_commitment() {
let GenesisConfigInfo {
genesis_config,
mint_keypair: alice,
@@ -680,75 +741,59 @@ mod tests {
let bank = Bank::new(&genesis_config);
let blockhash = bank.last_blockhash();
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone();
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.write().unwrap().insert(bank1);
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let bob = Keypair::new();
let mut rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore.clone());
let mut rpc = RpcSolPubSubImpl::default_with_blockstore_bank_forks(
blockstore.clone(),
bank_forks.clone(),
);
let exit = Arc::new(AtomicBool::new(false));
let block_commitment_cache = Arc::new(RwLock::new(
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore.clone()),
));
let subscriptions = RpcSubscriptions::new(&exit, block_commitment_cache.clone());
let subscriptions =
RpcSubscriptions::new(&exit, bank_forks.clone(), block_commitment_cache.clone());
rpc.subscriptions = Arc::new(subscriptions);
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification");
rpc.account_subscribe(session, subscriber, bob.pubkey().to_string(), Some(2));
rpc.account_subscribe(
session,
subscriber,
bob.pubkey().to_string(),
Some(CommitmentConfig::root()),
);
let tx = system_transaction::transfer(&alice, &bob.pubkey(), 100, blockhash);
bank_forks
.write()
.unwrap()
.get(0)
.get(1)
.unwrap()
.process_transaction(&tx)
.unwrap();
rpc.subscriptions.notify_subscribers(0, &bank_forks);
let mut cache_slot_info = CacheSlotInfo::default();
cache_slot_info.current_slot = 1;
rpc.subscriptions.notify_subscribers(cache_slot_info);
let bank0 = bank_forks.read().unwrap()[0].clone();
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.write().unwrap().insert(bank1);
let bank1 = bank_forks.read().unwrap()[1].clone();
let mut cache0 = BlockCommitment::default();
cache0.increase_confirmation_stake(1, 10);
let mut block_commitment = HashMap::new();
block_commitment.entry(0).or_insert(cache0.clone());
let mut new_block_commitment = BlockCommitmentCache::new(
block_commitment,
0,
10,
bank1.clone(),
blockstore.clone(),
0,
);
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
drop(w_block_commitment_cache);
rpc.subscriptions.notify_subscribers(1, &bank_forks);
let bank2 = Bank::new_from_parent(&bank1, &Pubkey::default(), 2);
bank_forks.write().unwrap().insert(bank2);
let bank2 = bank_forks.read().unwrap()[2].clone();
let mut cache0 = BlockCommitment::default();
cache0.increase_confirmation_stake(2, 10);
let mut block_commitment = HashMap::new();
block_commitment.entry(0).or_insert(cache0.clone());
let mut new_block_commitment =
BlockCommitmentCache::new(block_commitment, 0, 10, bank2, blockstore.clone(), 0);
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
drop(w_block_commitment_cache);
rpc.subscriptions.notify_subscribers(2, &bank_forks);
let cache_slot_info = CacheSlotInfo {
current_slot: 2,
node_root: 1,
largest_confirmed_root: 1,
highest_confirmed_slot: 1,
};
rpc.subscriptions.notify_subscribers(cache_slot_info);
let expected = json!({
"jsonrpc": "2.0",
"method": "accountNotification",
"params": {
"result": {
"context": { "slot": 0 },
"context": { "slot": 1 },
"value": {
"owner": system_program::id().to_string(),
"lamports": 100,
@@ -769,7 +814,10 @@ mod tests {
fn test_slot_subscribe() {
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore);
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let bank = Bank::new(&genesis_config);
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let rpc = RpcSolPubSubImpl::default_with_blockstore_bank_forks(blockstore, bank_forks);
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("slotNotification");
rpc.slot_subscribe(session, subscriber);
@@ -796,7 +844,10 @@ mod tests {
fn test_slot_unsubscribe() {
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore);
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let bank = Bank::new(&genesis_config);
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let rpc = RpcSolPubSubImpl::default_with_blockstore_bank_forks(blockstore, bank_forks);
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("slotNotification");
rpc.slot_subscribe(session, subscriber);
@@ -825,4 +876,97 @@ mod tests {
.slot_unsubscribe(Some(session), SubscriptionId::Number(0))
.is_ok());
}
#[test]
#[serial]
fn test_vote_subscribe() {
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let block_commitment_cache = Arc::new(RwLock::new(
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore.clone()),
));
let validator_voting_keypairs: Vec<_> = (0..10)
.map(|_| ValidatorVoteKeypairs::new(Keypair::new(), Keypair::new(), Keypair::new()))
.collect();
let GenesisConfigInfo { genesis_config, .. } =
create_genesis_config_with_vote_accounts(10_000, &validator_voting_keypairs, 100);
let exit = Arc::new(AtomicBool::new(false));
let bank = Bank::new(&genesis_config);
let bank_forks = BankForks::new(0, bank);
let bank = bank_forks.get(0).unwrap().clone();
let bank_forks = Arc::new(RwLock::new(bank_forks));
// Setup RPC
let mut rpc =
RpcSolPubSubImpl::default_with_blockstore_bank_forks(blockstore, bank_forks.clone());
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("voteNotification");
// Setup Subscriptions
let subscriptions =
RpcSubscriptions::new(&exit, bank_forks.clone(), block_commitment_cache.clone());
rpc.subscriptions = Arc::new(subscriptions);
rpc.vote_subscribe(session, subscriber);
// Create some voters at genesis
let vote_tracker = VoteTracker::new(&bank);
let (votes_sender, votes_receiver) = unbounded();
let (vote_tracker, validator_voting_keypairs) =
(Arc::new(vote_tracker), validator_voting_keypairs);
let vote_slots = vec![1, 2];
validator_voting_keypairs.iter().for_each(|keypairs| {
let node_keypair = &keypairs.node_keypair;
let vote_keypair = &keypairs.vote_keypair;
let vote_tx = vote_transaction::new_vote_transaction(
vote_slots.clone(),
Hash::default(),
Hash::default(),
node_keypair,
vote_keypair,
vote_keypair,
);
votes_sender.send(vec![vote_tx]).unwrap();
});
// Process votes and check they were notified.
ClusterInfoVoteListener::get_and_process_votes_for_tests(
&votes_receiver,
&vote_tracker,
0,
rpc.subscriptions.clone(),
)
.unwrap();
let (response, _) = robust_poll_or_panic(receiver);
assert_eq!(
response,
r#"{"jsonrpc":"2.0","method":"voteNotification","params":{"result":{"hash":"11111111111111111111111111111111","slots":[1,2],"timestamp":null},"subscription":0}}"#
);
}
#[test]
#[serial]
fn test_vote_unsubscribe() {
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let bank = Bank::new(&genesis_config);
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let rpc = RpcSolPubSubImpl::default_with_blockstore_bank_forks(blockstore, bank_forks);
let session = create_session();
let (subscriber, _id_receiver, _) = Subscriber::new_test("voteNotification");
rpc.vote_subscribe(session, subscriber);
let session = create_session();
assert!(rpc
.vote_unsubscribe(Some(session), SubscriptionId::Number(42))
.is_err());
let session = create_session();
assert!(rpc
.vote_unsubscribe(Some(session), SubscriptionId::Number(0))
.is_ok());
}
}

View File

@@ -73,7 +73,13 @@ impl PubSubService {
mod tests {
use super::*;
use crate::commitment::BlockCommitmentCache;
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use solana_ledger::{
bank_forks::BankForks,
blockstore::Blockstore,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
};
use solana_runtime::bank::Bank;
use std::{
net::{IpAddr, Ipv4Addr},
sync::RwLock,
@@ -85,8 +91,12 @@ mod tests {
let exit = Arc::new(AtomicBool::new(false));
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let bank = Bank::new(&genesis_config);
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
bank_forks,
Arc::new(RwLock::new(
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
)),

View File

@@ -1,8 +1,9 @@
//! The `rpc_service` module implements the Solana JSON RPC service.
use crate::{
cluster_info::ClusterInfo, commitment::BlockCommitmentCache, rpc::*,
storage_stage::StorageState, validator::ValidatorExit,
cluster_info::ClusterInfo, commitment::BlockCommitmentCache, rpc::*, rpc_health::*,
send_transaction_service::SendTransactionService, storage_stage::StorageState,
validator::ValidatorExit,
};
use jsonrpc_core::MetaIoHandler;
use jsonrpc_http_server::{
@@ -15,25 +16,22 @@ use solana_ledger::{
blockstore::Blockstore,
snapshot_utils,
};
use solana_sdk::{hash::Hash, pubkey::Pubkey};
use solana_sdk::{hash::Hash, native_token::lamports_to_sol, pubkey::Pubkey};
use std::{
collections::HashSet,
net::SocketAddr,
path::{Path, PathBuf},
sync::atomic::{AtomicBool, Ordering},
sync::{mpsc::channel, Arc, RwLock},
thread::{self, Builder, JoinHandle},
};
use tokio::prelude::Future;
// If trusted validators are specified, consider this validator healthy if its latest account hash
// is no further behind than this distance from the latest trusted validator account hash
const HEALTH_CHECK_SLOT_DISTANCE: u64 = 150;
pub struct JsonRpcService {
thread_hdl: JoinHandle<()>,
#[cfg(test)]
pub request_processor: Arc<RwLock<JsonRpcRequestProcessor>>, // Used only by test_rpc_new()...
pub request_processor: JsonRpcRequestProcessor, // Used only by test_rpc_new()...
close_handle: Option<CloseHandle>,
}
@@ -42,24 +40,24 @@ struct RpcRequestMiddleware {
ledger_path: PathBuf,
snapshot_archive_path_regex: Regex,
snapshot_config: Option<SnapshotConfig>,
cluster_info: Arc<ClusterInfo>,
trusted_validators: Option<HashSet<Pubkey>>,
bank_forks: Arc<RwLock<BankForks>>,
health: Arc<RpcHealth>,
}
impl RpcRequestMiddleware {
pub fn new(
ledger_path: PathBuf,
snapshot_config: Option<SnapshotConfig>,
cluster_info: Arc<ClusterInfo>,
trusted_validators: Option<HashSet<Pubkey>>,
bank_forks: Arc<RwLock<BankForks>>,
health: Arc<RpcHealth>,
) -> Self {
Self {
ledger_path,
snapshot_archive_path_regex: Regex::new(r"/snapshot-\d+-[[:alnum:]]+\.tar\.bz2$")
.unwrap(),
snapshot_config,
cluster_info,
trusted_validators,
bank_forks,
health,
}
}
@@ -85,7 +83,7 @@ impl RpcRequestMiddleware {
.unwrap()
}
fn is_get_path(&self, path: &str) -> bool {
fn is_file_get_path(&self, path: &str) -> bool {
match path {
"/genesis.tar.bz2" => true,
_ => {
@@ -98,7 +96,7 @@ impl RpcRequestMiddleware {
}
}
fn get(&self, path: &str) -> RequestMiddlewareAction {
fn process_file_get(&self, path: &str) -> RequestMiddlewareAction {
let stem = path.split_at(1).1; // Drop leading '/' from path
let filename = {
match path {
@@ -130,58 +128,10 @@ impl RpcRequestMiddleware {
}
fn health_check(&self) -> &'static str {
let response = if let Some(trusted_validators) = &self.trusted_validators {
let (latest_account_hash_slot, latest_trusted_validator_account_hash_slot) = {
(
self.cluster_info
.get_accounts_hash_for_node(&self.cluster_info.id(), |hashes| {
hashes
.iter()
.max_by(|a, b| a.0.cmp(&b.0))
.map(|slot_hash| slot_hash.0)
})
.flatten()
.unwrap_or(0),
trusted_validators
.iter()
.map(|trusted_validator| {
self.cluster_info
.get_accounts_hash_for_node(&trusted_validator, |hashes| {
hashes
.iter()
.max_by(|a, b| a.0.cmp(&b.0))
.map(|slot_hash| slot_hash.0)
})
.flatten()
.unwrap_or(0)
})
.max()
.unwrap_or(0),
)
};
// This validator is considered healthy if its latest account hash slot is within
// `HEALTH_CHECK_SLOT_DISTANCE` of the latest trusted validator's account hash slot
if latest_account_hash_slot > 0
&& latest_trusted_validator_account_hash_slot > 0
&& latest_account_hash_slot
> latest_trusted_validator_account_hash_slot
.saturating_sub(HEALTH_CHECK_SLOT_DISTANCE)
{
"ok"
} else {
warn!(
"health check: me={}, latest trusted_validator={}",
latest_account_hash_slot, latest_trusted_validator_account_hash_slot
);
"behind"
}
} else {
// No trusted validator point of reference available, so this validator is healthy
// because it's running
"ok"
let response = match self.health.check() {
RpcHealthStatus::Ok => "ok",
RpcHealthStatus::Behind => "behind",
};
info!("health check: {}", response);
response
}
@@ -217,8 +167,19 @@ impl RequestMiddleware for RpcRequestMiddleware {
};
}
}
if self.is_get_path(request.uri().path()) {
self.get(request.uri().path())
if let Some(result) = process_rest(&self.bank_forks, request.uri().path()) {
RequestMiddlewareAction::Respond {
should_validate_hosts: true,
response: Box::new(jsonrpc_core::futures::future::ok(
hyper::Response::builder()
.status(hyper::StatusCode::OK)
.body(hyper::Body::from(result))
.unwrap(),
)),
}
} else if self.is_file_get_path(request.uri().path()) {
self.process_file_get(request.uri().path())
} else if request.uri().path() == "/health" {
RequestMiddlewareAction::Respond {
should_validate_hosts: true,
@@ -238,6 +199,29 @@ impl RequestMiddleware for RpcRequestMiddleware {
}
}
fn process_rest(bank_forks: &Arc<RwLock<BankForks>>, path: &str) -> Option<String> {
match path {
"/v0/circulating-supply" => {
let r_bank_forks = bank_forks.read().unwrap();
let bank = r_bank_forks.root_bank();
let total_supply = bank.capitalization();
let non_circulating_supply =
crate::non_circulating_supply::calculate_non_circulating_supply(&bank).lamports;
Some(format!(
"{}",
lamports_to_sol(total_supply - non_circulating_supply)
))
}
"/v0/total-supply" => {
let r_bank_forks = bank_forks.read().unwrap();
let bank = r_bank_forks.root_bank();
let total_supply = bank.capitalization();
Some(format!("{}", lamports_to_sol(total_supply)))
}
_ => None,
}
}
impl JsonRpcService {
#[allow(clippy::too_many_arguments)]
pub fn new(
@@ -253,17 +237,37 @@ impl JsonRpcService {
storage_state: StorageState,
validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
trusted_validators: Option<HashSet<Pubkey>>,
override_health_check: Arc<AtomicBool>,
) -> Self {
info!("rpc bound to {:?}", rpc_addr);
info!("rpc configuration: {:?}", config);
let request_processor = Arc::new(RwLock::new(JsonRpcRequestProcessor::new(
let health = Arc::new(RpcHealth::new(
cluster_info.clone(),
trusted_validators,
config.health_check_slot_distance,
override_health_check,
));
let exit_send_transaction_service = Arc::new(AtomicBool::new(false));
let send_transaction_service = Arc::new(SendTransactionService::new(
&cluster_info,
&bank_forks,
&exit_send_transaction_service,
));
let request_processor = JsonRpcRequestProcessor::new(
config,
bank_forks,
bank_forks.clone(),
block_commitment_cache,
blockstore,
storage_state,
validator_exit.clone(),
)));
health.clone(),
cluster_info,
genesis_hash,
send_transaction_service,
);
#[cfg(test)]
let test_request_processor = request_processor.clone();
@@ -281,16 +285,12 @@ impl JsonRpcService {
let request_middleware = RpcRequestMiddleware::new(
ledger_path,
snapshot_config,
cluster_info.clone(),
trusted_validators,
bank_forks.clone(),
health.clone(),
);
let server = ServerBuilder::with_meta_extractor(
io,
move |_req: &hyper::Request<hyper::Body>| Meta {
request_processor: request_processor.clone(),
cluster_info: cluster_info.clone(),
genesis_hash,
},
move |_req: &hyper::Request<hyper::Body>| request_processor.clone(),
)
.threads(num_cpus::get())
.cors(DomainsValidation::AllowOnly(vec![
@@ -313,6 +313,7 @@ impl JsonRpcService {
let server = server.unwrap();
close_handle_sender.send(server.close_handle()).unwrap();
server.wait();
exit_send_transaction_service.store(true, Ordering::Relaxed);
})
.unwrap();
@@ -346,7 +347,6 @@ impl JsonRpcService {
mod tests {
use super::*;
use crate::{
contact_info::ContactInfo,
crds_value::{CrdsData, CrdsValue, SnapshotHash},
rpc::tests::create_validator_exit,
};
@@ -356,8 +356,7 @@ mod tests {
};
use solana_runtime::bank::Bank;
use solana_sdk::signature::Signer;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::sync::atomic::AtomicBool;
use std::net::{IpAddr, Ipv4Addr};
#[test]
fn test_rpc_new() {
@@ -369,7 +368,7 @@ mod tests {
let exit = Arc::new(AtomicBool::new(false));
let validator_exit = create_validator_exit(&exit);
let bank = Bank::new(&genesis_config);
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
let cluster_info = Arc::new(ClusterInfo::default());
let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
let rpc_addr = SocketAddr::new(
ip_addr,
@@ -394,6 +393,7 @@ mod tests {
StorageState::default(),
validator_exit,
None,
Arc::new(AtomicBool::new(false)),
);
let thread = rpc_service.thread_hdl.thread();
assert_eq!(thread.name().unwrap(), "solana-jsonrpc");
@@ -402,8 +402,6 @@ mod tests {
10_000,
rpc_service
.request_processor
.read()
.unwrap()
.get_balance(Ok(mint_keypair.pubkey()), None)
.unwrap()
.value
@@ -412,11 +410,36 @@ mod tests {
rpc_service.join().unwrap();
}
#[test]
fn test_is_get_path() {
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
fn create_bank_forks() -> Arc<RwLock<BankForks>> {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let bank = Bank::new(&genesis_config);
Arc::new(RwLock::new(BankForks::new(bank.slot(), bank)))
}
let rrm = RpcRequestMiddleware::new(PathBuf::from("/"), None, cluster_info.clone(), None);
#[test]
fn test_process_rest_api() {
let bank_forks = create_bank_forks();
assert_eq!(None, process_rest(&bank_forks, "not-a-supported-rest-api"));
assert_eq!(
Some("0.000010127".to_string()),
process_rest(&bank_forks, "/v0/circulating-supply")
);
assert_eq!(
Some("0.000010127".to_string()),
process_rest(&bank_forks, "/v0/total-supply")
);
}
#[test]
fn test_is_file_get_path() {
let bank_forks = create_bank_forks();
let rrm = RpcRequestMiddleware::new(
PathBuf::from("/"),
None,
bank_forks.clone(),
RpcHealth::stub(),
);
let rrm_with_snapshot_config = RpcRequestMiddleware::new(
PathBuf::from("/"),
Some(SnapshotConfig {
@@ -424,49 +447,57 @@ mod tests {
snapshot_package_output_path: PathBuf::from("/"),
snapshot_path: PathBuf::from("/"),
}),
cluster_info,
None,
bank_forks,
RpcHealth::stub(),
);
assert!(rrm.is_get_path("/genesis.tar.bz2"));
assert!(!rrm.is_get_path("genesis.tar.bz2"));
assert!(rrm.is_file_get_path("/genesis.tar.bz2"));
assert!(!rrm.is_file_get_path("genesis.tar.bz2"));
assert!(!rrm.is_get_path("/snapshot.tar.bz2")); // This is a redirect
assert!(!rrm.is_file_get_path("/snapshot.tar.bz2")); // This is a redirect
assert!(
!rrm.is_get_path("/snapshot-100-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.bz2")
);
assert!(rrm_with_snapshot_config
.is_get_path("/snapshot-100-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.bz2"));
assert!(!rrm.is_file_get_path(
"/snapshot-100-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.bz2"
));
assert!(rrm_with_snapshot_config.is_file_get_path(
"/snapshot-100-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.bz2"
));
assert!(!rrm.is_get_path(
assert!(!rrm.is_file_get_path(
"/snapshot-notaslotnumber-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.bz2"
));
assert!(!rrm.is_get_path("/"));
assert!(!rrm.is_get_path(".."));
assert!(!rrm.is_get_path("🎣"));
assert!(!rrm.is_file_get_path("/"));
assert!(!rrm.is_file_get_path(".."));
assert!(!rrm.is_file_get_path("🎣"));
}
#[test]
fn test_health_check_with_no_trusted_validators() {
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
let rm = RpcRequestMiddleware::new(PathBuf::from("/"), None, cluster_info.clone(), None);
let rm = RpcRequestMiddleware::new(
PathBuf::from("/"),
None,
create_bank_forks(),
RpcHealth::stub(),
);
assert_eq!(rm.health_check(), "ok");
}
#[test]
fn test_health_check_with_trusted_validators() {
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
let cluster_info = Arc::new(ClusterInfo::default());
let health_check_slot_distance = 123;
let override_health_check = Arc::new(AtomicBool::new(false));
let trusted_validators = vec![Pubkey::new_rand(), Pubkey::new_rand(), Pubkey::new_rand()];
let rm = RpcRequestMiddleware::new(
PathBuf::from("/"),
None,
let health = Arc::new(RpcHealth::new(
cluster_info.clone(),
Some(trusted_validators.clone().into_iter().collect()),
);
health_check_slot_distance,
override_health_check.clone(),
));
let rm = RpcRequestMiddleware::new(PathBuf::from("/"), None, create_bank_forks(), health);
// No account hashes for this node or any trusted validators == "behind"
assert_eq!(rm.health_check(), "behind");
@@ -474,6 +505,9 @@ mod tests {
// No account hashes for any trusted validators == "behind"
cluster_info.push_accounts_hashes(vec![(1000, Hash::default()), (900, Hash::default())]);
assert_eq!(rm.health_check(), "behind");
override_health_check.store(true, Ordering::Relaxed);
assert_eq!(rm.health_check(), "ok");
override_health_check.store(false, Ordering::Relaxed);
// This node is ahead of the trusted validators == "ok"
cluster_info
@@ -504,7 +538,7 @@ mod tests {
.insert(
CrdsValue::new_unsigned(CrdsData::AccountsHashes(SnapshotHash::new(
trusted_validators[1].clone(),
vec![(1000 + HEALTH_CHECK_SLOT_DISTANCE - 1, Hash::default())],
vec![(1000 + health_check_slot_distance - 1, Hash::default())],
))),
1,
)
@@ -520,7 +554,7 @@ mod tests {
.insert(
CrdsValue::new_unsigned(CrdsData::AccountsHashes(SnapshotHash::new(
trusted_validators[2].clone(),
vec![(1000 + HEALTH_CHECK_SLOT_DISTANCE, Hash::default())],
vec![(1000 + health_check_slot_distance, Hash::default())],
))),
1,
)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,377 @@
use crate::cluster_info::ClusterInfo;
use solana_ledger::bank_forks::BankForks;
use solana_metrics::{datapoint_warn, inc_new_counter_info};
use solana_runtime::bank::Bank;
use solana_sdk::{clock::Slot, signature::Signature};
use std::{
collections::HashMap,
net::{SocketAddr, UdpSocket},
sync::{
atomic::{AtomicBool, Ordering},
mpsc::{channel, Receiver, Sender},
Arc, Mutex, RwLock,
},
thread::{self, Builder, JoinHandle},
time::{Duration, Instant},
};
/// Maximum size of the transaction queue
const MAX_TRANSACTION_QUEUE_SIZE: usize = 10_000; // This seems like a lot but maybe it needs to be bigger one day
pub struct SendTransactionService {
thread: JoinHandle<()>,
sender: Mutex<Sender<TransactionInfo>>,
send_socket: UdpSocket,
tpu_address: SocketAddr,
}
struct TransactionInfo {
signature: Signature,
wire_transaction: Vec<u8>,
last_valid_slot: Slot,
}
#[derive(Default, Debug, PartialEq)]
struct ProcessTransactionsResult {
rooted: u64,
expired: u64,
retried: u64,
failed: u64,
retained: u64,
}
impl SendTransactionService {
pub fn new(
cluster_info: &Arc<ClusterInfo>,
bank_forks: &Arc<RwLock<BankForks>>,
exit: &Arc<AtomicBool>,
) -> Self {
let (sender, receiver) = channel::<TransactionInfo>();
let tpu_address = cluster_info.my_contact_info().tpu;
let thread = Self::retry_thread(receiver, bank_forks.clone(), tpu_address, exit.clone());
Self {
thread,
sender: Mutex::new(sender),
send_socket: UdpSocket::bind("0.0.0.0:0").unwrap(),
tpu_address,
}
}
fn retry_thread(
receiver: Receiver<TransactionInfo>,
bank_forks: Arc<RwLock<BankForks>>,
tpu_address: SocketAddr,
exit: Arc<AtomicBool>,
) -> JoinHandle<()> {
let mut last_status_check = Instant::now();
let mut transactions = HashMap::new();
let send_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
Builder::new()
.name("send-tx-svc".to_string())
.spawn(move || loop {
if exit.load(Ordering::Relaxed) {
break;
}
if let Ok(transaction_info) = receiver.recv_timeout(Duration::from_secs(1)) {
if transactions.len() < MAX_TRANSACTION_QUEUE_SIZE {
transactions.insert(transaction_info.signature, transaction_info);
} else {
datapoint_warn!("send_transaction_service-queue-overflow");
}
}
if Instant::now().duration_since(last_status_check).as_secs() >= 5 {
if !transactions.is_empty() {
datapoint_info!(
"send_transaction_service-queue-size",
("len", transactions.len(), i64)
);
let bank_forks = bank_forks.read().unwrap();
let root_bank = bank_forks.root_bank();
let working_bank = bank_forks.working_bank();
let _result = Self::process_transactions(
&working_bank,
&root_bank,
&send_socket,
&tpu_address,
&mut transactions,
);
}
last_status_check = Instant::now();
}
})
.unwrap()
}
fn process_transactions(
working_bank: &Arc<Bank>,
root_bank: &Arc<Bank>,
send_socket: &UdpSocket,
tpu_address: &SocketAddr,
transactions: &mut HashMap<Signature, TransactionInfo>,
) -> ProcessTransactionsResult {
let mut result = ProcessTransactionsResult::default();
transactions.retain(|signature, transaction_info| {
if root_bank.has_signature(signature) {
info!("Transaction is rooted: {}", signature);
result.rooted += 1;
inc_new_counter_info!("send_transaction_service-rooted", 1);
false
} else if transaction_info.last_valid_slot < root_bank.slot() {
info!("Dropping expired transaction: {}", signature);
result.expired += 1;
inc_new_counter_info!("send_transaction_service-expired", 1);
false
} else {
match working_bank.get_signature_status_slot(signature) {
None => {
// Transaction is unknown to the working bank, it might have been
// dropped or landed in another fork. Re-send it
info!("Retrying transaction: {}", signature);
result.retried += 1;
inc_new_counter_info!("send_transaction_service-retry", 1);
Self::send_transaction(
&send_socket,
&tpu_address,
&transaction_info.wire_transaction,
);
true
}
Some((_slot, status)) => {
if status.is_err() {
info!("Dropping failed transaction: {}", signature);
result.failed += 1;
inc_new_counter_info!("send_transaction_service-failed", 1);
false
} else {
result.retained += 1;
true
}
}
}
}
});
result
}
fn send_transaction(
send_socket: &UdpSocket,
tpu_address: &SocketAddr,
wire_transaction: &[u8],
) {
if let Err(err) = send_socket.send_to(wire_transaction, tpu_address) {
warn!("Failed to send transaction to {}: {:?}", tpu_address, err);
}
}
pub fn send(&self, signature: Signature, wire_transaction: Vec<u8>, last_valid_slot: Slot) {
inc_new_counter_info!("send_transaction_service-enqueue", 1, 1);
Self::send_transaction(&self.send_socket, &self.tpu_address, &wire_transaction);
self.sender
.lock()
.unwrap()
.send(TransactionInfo {
signature,
wire_transaction,
last_valid_slot,
})
.unwrap_or_else(|err| warn!("Failed to enqueue transaction: {}", err));
}
pub fn join(self) -> thread::Result<()> {
self.thread.join()
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::rpc::tests::new_bank_forks;
use solana_sdk::{pubkey::Pubkey, signature::Signer};
#[test]
fn service_exit() {
let cluster_info = Arc::new(ClusterInfo::default());
let bank_forks = new_bank_forks().0;
let exit = Arc::new(AtomicBool::new(false));
let send_tranaction_service =
SendTransactionService::new(&cluster_info, &bank_forks, &exit);
exit.store(true, Ordering::Relaxed);
send_tranaction_service.join().unwrap();
}
#[test]
fn process_transactions() {
solana_logger::setup();
let (bank_forks, mint_keypair, _voting_keypair) = new_bank_forks();
let cluster_info = ClusterInfo::default();
let send_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let tpu_address = cluster_info.my_contact_info().tpu;
let root_bank = Arc::new(Bank::new_from_parent(
&bank_forks.read().unwrap().working_bank(),
&Pubkey::default(),
1,
));
let rooted_signature = root_bank
.transfer(1, &mint_keypair, &mint_keypair.pubkey())
.unwrap();
let working_bank = Arc::new(Bank::new_from_parent(&root_bank, &Pubkey::default(), 2));
let non_rooted_signature = working_bank
.transfer(2, &mint_keypair, &mint_keypair.pubkey())
.unwrap();
let failed_signature = {
let blockhash = working_bank.last_blockhash();
let transaction = solana_sdk::system_transaction::transfer(
&mint_keypair,
&Pubkey::default(),
1,
blockhash,
);
let signature = transaction.signatures[0];
working_bank.process_transaction(&transaction).unwrap_err();
signature
};
let mut transactions = HashMap::new();
info!("Expired transactions are dropped..");
transactions.insert(
Signature::default(),
TransactionInfo {
signature: Signature::default(),
wire_transaction: vec![],
last_valid_slot: root_bank.slot() - 1,
},
);
let result = SendTransactionService::process_transactions(
&working_bank,
&root_bank,
&send_socket,
&tpu_address,
&mut transactions,
);
assert!(transactions.is_empty());
assert_eq!(
result,
ProcessTransactionsResult {
expired: 1,
..ProcessTransactionsResult::default()
}
);
info!("Rooted transactions are dropped...");
transactions.insert(
rooted_signature,
TransactionInfo {
signature: rooted_signature,
wire_transaction: vec![],
last_valid_slot: working_bank.slot(),
},
);
let result = SendTransactionService::process_transactions(
&working_bank,
&root_bank,
&send_socket,
&tpu_address,
&mut transactions,
);
assert!(transactions.is_empty());
assert_eq!(
result,
ProcessTransactionsResult {
rooted: 1,
..ProcessTransactionsResult::default()
}
);
info!("Failed transactions are dropped...");
transactions.insert(
failed_signature,
TransactionInfo {
signature: failed_signature,
wire_transaction: vec![],
last_valid_slot: working_bank.slot(),
},
);
let result = SendTransactionService::process_transactions(
&working_bank,
&root_bank,
&send_socket,
&tpu_address,
&mut transactions,
);
assert!(transactions.is_empty());
assert_eq!(
result,
ProcessTransactionsResult {
failed: 1,
..ProcessTransactionsResult::default()
}
);
info!("Non-rooted transactions are kept...");
transactions.insert(
non_rooted_signature,
TransactionInfo {
signature: non_rooted_signature,
wire_transaction: vec![],
last_valid_slot: working_bank.slot(),
},
);
let result = SendTransactionService::process_transactions(
&working_bank,
&root_bank,
&send_socket,
&tpu_address,
&mut transactions,
);
assert_eq!(transactions.len(), 1);
assert_eq!(
result,
ProcessTransactionsResult {
retained: 1,
..ProcessTransactionsResult::default()
}
);
transactions.clear();
info!("Unknown transactions are retried...");
transactions.insert(
Signature::default(),
TransactionInfo {
signature: Signature::default(),
wire_transaction: vec![],
last_valid_slot: working_bank.slot(),
},
);
let result = SendTransactionService::process_transactions(
&working_bank,
&root_bank,
&send_socket,
&tpu_address,
&mut transactions,
);
assert_eq!(transactions.len(), 1);
assert_eq!(
result,
ProcessTransactionsResult {
retried: 1,
..ProcessTransactionsResult::default()
}
);
}
}

View File

@@ -2,16 +2,20 @@ use crate::{
cluster_info::{ClusterInfo, ClusterInfoError},
cluster_slots::ClusterSlots,
contact_info::ContactInfo,
repair_response,
repair_service::RepairStats,
result::{Error, Result},
weighted_shuffle::weighted_best,
};
use bincode::serialize;
use solana_ledger::blockstore::Blockstore;
use solana_ledger::{
blockstore::Blockstore,
shred::{Nonce, Shred},
};
use solana_measure::measure::Measure;
use solana_measure::thread_mem_usage;
use solana_metrics::{datapoint_debug, inc_new_counter_debug};
use solana_perf::packet::{limited_deserialize, Packet, Packets, PacketsRecycler};
use solana_perf::packet::{limited_deserialize, Packets, PacketsRecycler};
use solana_sdk::{
clock::Slot,
pubkey::Pubkey,
@@ -30,6 +34,7 @@ use std::{
/// the number of slots to respond with when responding to `Orphan` requests
pub const MAX_ORPHAN_REPAIR_RESPONSES: usize = 10;
pub const DEFAULT_NONCE: u32 = 42;
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq)]
pub enum RepairType {
@@ -65,6 +70,9 @@ enum RepairProtocol {
WindowIndex(ContactInfo, u64, u64),
HighestWindowIndex(ContactInfo, u64, u64),
Orphan(ContactInfo, u64),
WindowIndexWithNonce(ContactInfo, u64, u64, Nonce),
HighestWindowIndexWithNonce(ContactInfo, u64, u64, Nonce),
OrphanWithNonce(ContactInfo, u64, Nonce),
}
#[derive(Clone)]
@@ -107,6 +115,9 @@ impl ServeRepair {
RepairProtocol::WindowIndex(ref from, _, _) => from,
RepairProtocol::HighestWindowIndex(ref from, _, _) => from,
RepairProtocol::Orphan(ref from, _) => from,
RepairProtocol::WindowIndexWithNonce(ref from, _, _, _) => from,
RepairProtocol::HighestWindowIndexWithNonce(ref from, _, _, _) => from,
RepairProtocol::OrphanWithNonce(ref from, _, _) => from,
}
}
@@ -141,6 +152,7 @@ impl ServeRepair {
&me.read().unwrap().my_info,
*slot,
*shred_index,
None,
),
"WindowIndex",
)
@@ -155,6 +167,7 @@ impl ServeRepair {
blockstore,
*slot,
*highest_index,
None,
),
"HighestWindowIndex",
)
@@ -168,10 +181,55 @@ impl ServeRepair {
blockstore,
*slot,
MAX_ORPHAN_REPAIR_RESPONSES,
None,
),
"Orphan",
)
}
RepairProtocol::WindowIndexWithNonce(_, slot, shred_index, nonce) => {
stats.window_index += 1;
(
Self::run_window_request(
recycler,
from,
&from_addr,
blockstore,
&me.read().unwrap().my_info,
*slot,
*shred_index,
Some(*nonce),
),
"WindowIndexWithNonce",
)
}
RepairProtocol::HighestWindowIndexWithNonce(_, slot, highest_index, nonce) => {
stats.highest_window_index += 1;
(
Self::run_highest_window_request(
recycler,
&from_addr,
blockstore,
*slot,
*highest_index,
Some(*nonce),
),
"HighestWindowIndexWithNonce",
)
}
RepairProtocol::OrphanWithNonce(_, slot, nonce) => {
stats.orphan += 1;
(
Self::run_orphan(
recycler,
&from_addr,
blockstore,
*slot,
MAX_ORPHAN_REPAIR_RESPONSES,
Some(*nonce),
),
"OrphanWithNonce",
)
}
}
};
@@ -331,20 +389,47 @@ impl ServeRepair {
});
}
fn window_index_request_bytes(&self, slot: Slot, shred_index: u64) -> Result<Vec<u8>> {
let req = RepairProtocol::WindowIndex(self.my_info.clone(), slot, shred_index);
fn window_index_request_bytes(
&self,
slot: Slot,
shred_index: u64,
nonce: Option<Nonce>,
) -> Result<Vec<u8>> {
let req = if let Some(nonce) = nonce {
RepairProtocol::WindowIndexWithNonce(self.my_info.clone(), slot, shred_index, nonce)
} else {
RepairProtocol::WindowIndex(self.my_info.clone(), slot, shred_index)
};
let out = serialize(&req)?;
Ok(out)
}
fn window_highest_index_request_bytes(&self, slot: Slot, shred_index: u64) -> Result<Vec<u8>> {
let req = RepairProtocol::HighestWindowIndex(self.my_info.clone(), slot, shred_index);
fn window_highest_index_request_bytes(
&self,
slot: Slot,
shred_index: u64,
nonce: Option<Nonce>,
) -> Result<Vec<u8>> {
let req = if let Some(nonce) = nonce {
RepairProtocol::HighestWindowIndexWithNonce(
self.my_info.clone(),
slot,
shred_index,
nonce,
)
} else {
RepairProtocol::HighestWindowIndex(self.my_info.clone(), slot, shred_index)
};
let out = serialize(&req)?;
Ok(out)
}
fn orphan_bytes(&self, slot: Slot) -> Result<Vec<u8>> {
let req = RepairProtocol::Orphan(self.my_info.clone(), slot);
fn orphan_bytes(&self, slot: Slot, nonce: Option<Nonce>) -> Result<Vec<u8>> {
let req = if let Some(nonce) = nonce {
RepairProtocol::OrphanWithNonce(self.my_info.clone(), slot, nonce)
} else {
RepairProtocol::Orphan(self.my_info.clone(), slot)
};
let out = serialize(&req)?;
Ok(out)
}
@@ -352,58 +437,55 @@ impl ServeRepair {
pub fn repair_request(
&self,
cluster_slots: &ClusterSlots,
repair_request: &RepairType,
repair_request: RepairType,
cache: &mut RepairCache,
repair_stats: &mut RepairStats,
) -> Result<(SocketAddr, Vec<u8>)> {
// find a peer that appears to be accepting replication and has the desired slot, as indicated
// by a valid tvu port location
if cache.get(&repair_request.slot()).is_none() {
let repair_peers: Vec<_> = self.cluster_info.repair_peers(repair_request.slot());
let slot = repair_request.slot();
if cache.get(&slot).is_none() {
let repair_peers: Vec<_> = self.cluster_info.repair_peers(slot);
if repair_peers.is_empty() {
return Err(ClusterInfoError::NoPeers.into());
}
let weights = cluster_slots.compute_weights(repair_request.slot(), &repair_peers);
cache.insert(repair_request.slot(), (repair_peers, weights));
let weights = cluster_slots.compute_weights(slot, &repair_peers);
cache.insert(slot, (repair_peers, weights));
}
let (repair_peers, weights) = cache.get(&repair_request.slot()).unwrap();
let (repair_peers, weights) = cache.get(&slot).unwrap();
let n = weighted_best(&weights, Pubkey::new_rand().to_bytes());
let addr = repair_peers[n].serve_repair; // send the request to the peer's serve_repair port
let out = self.map_repair_request(repair_request, repair_stats)?;
let nonce = if Shred::is_nonce_unlocked(slot) {
Some(DEFAULT_NONCE)
} else {
None
};
let out = self.map_repair_request(&repair_request, repair_stats, nonce)?;
Ok((addr, out))
}
pub fn repair_request_duplicate_compute_best_peer(
&self,
slot: Slot,
cluster_slots: &ClusterSlots,
) -> Result<SocketAddr> {
let repair_peers: Vec<_> = self.cluster_info.repair_peers(slot);
if repair_peers.is_empty() {
return Err(ClusterInfoError::NoPeers.into());
}
let weights = cluster_slots.compute_weights_exclude_noncomplete(slot, &repair_peers);
let n = weighted_best(&weights, Pubkey::new_rand().to_bytes());
Ok(repair_peers[n].serve_repair)
}
pub fn map_repair_request(
&self,
repair_request: &RepairType,
repair_stats: &mut RepairStats,
nonce: Option<Nonce>,
) -> Result<Vec<u8>> {
let slot = repair_request.slot();
if Shred::is_nonce_unlocked(slot) {
assert!(nonce.is_some());
}
match repair_request {
RepairType::Shred(slot, shred_index) => {
repair_stats.shred.update(*slot);
Ok(self.window_index_request_bytes(*slot, *shred_index)?)
Ok(self.window_index_request_bytes(*slot, *shred_index, nonce)?)
}
RepairType::HighestShred(slot, shred_index) => {
repair_stats.highest_shred.update(*slot);
Ok(self.window_highest_index_request_bytes(*slot, *shred_index)?)
Ok(self.window_highest_index_request_bytes(*slot, *shred_index, nonce)?)
}
RepairType::Orphan(slot) => {
repair_stats.orphan.update(*slot);
Ok(self.orphan_bytes(*slot)?)
Ok(self.orphan_bytes(*slot, nonce)?)
}
}
}
@@ -416,12 +498,19 @@ impl ServeRepair {
me: &ContactInfo,
slot: Slot,
shred_index: u64,
nonce: Option<Nonce>,
) -> Option<Packets> {
if let Some(blockstore) = blockstore {
// Try to find the requested index in one of the slots
let packet = Self::get_data_shred_as_packet(blockstore, slot, shred_index, from_addr);
let packet = repair_response::repair_response_packet(
blockstore,
slot,
shred_index,
from_addr,
nonce,
);
if let Ok(Some(packet)) = packet {
if let Some(packet) = packet {
inc_new_counter_debug!("serve_repair-window-request-ledger", 1);
return Some(Packets::new_with_recycler_data(
recycler,
@@ -449,15 +538,20 @@ impl ServeRepair {
blockstore: Option<&Arc<Blockstore>>,
slot: Slot,
highest_index: u64,
nonce: Option<Nonce>,
) -> Option<Packets> {
let blockstore = blockstore?;
// Try to find the requested index in one of the slots
let meta = blockstore.meta(slot).ok()??;
if meta.received > highest_index {
// meta.received must be at least 1 by this point
let packet =
Self::get_data_shred_as_packet(blockstore, slot, meta.received - 1, from_addr)
.ok()??;
let packet = repair_response::repair_response_packet(
blockstore,
slot,
meta.received - 1,
from_addr,
nonce,
)?;
return Some(Packets::new_with_recycler_data(
recycler,
"run_highest_window_request",
@@ -473,6 +567,7 @@ impl ServeRepair {
blockstore: Option<&Arc<Blockstore>>,
mut slot: Slot,
max_responses: usize,
nonce: Option<Nonce>,
) -> Option<Packets> {
let mut res = Packets::new_with_recycler(recycler.clone(), 64, "run_orphan");
if let Some(blockstore) = blockstore {
@@ -481,10 +576,22 @@ impl ServeRepair {
if meta.received == 0 {
break;
}
let packet =
Self::get_data_shred_as_packet(blockstore, slot, meta.received - 1, from_addr);
if let Ok(Some(packet)) = packet {
let nonce = if Shred::is_nonce_unlocked(slot) {
nonce
} else {
None
};
let packet = repair_response::repair_response_packet(
blockstore,
slot,
meta.received - 1,
from_addr,
nonce,
);
if let Some(packet) = packet {
res.packets.push(packet);
} else {
break;
}
if meta.is_parent_set() && res.packets.len() <= max_responses {
slot = meta.parent_slot;
@@ -498,41 +605,31 @@ impl ServeRepair {
}
Some(res)
}
fn get_data_shred_as_packet(
blockstore: &Arc<Blockstore>,
slot: Slot,
shred_index: u64,
dest: &SocketAddr,
) -> Result<Option<Packet>> {
let data = blockstore.get_data_shred(slot, shred_index)?;
Ok(data.map(|data| {
let mut packet = Packet::default();
packet.meta.size = data.len();
packet.meta.set_addr(dest);
packet.data.copy_from_slice(&data);
packet
}))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::result::Error;
use crate::{repair_response, result::Error};
use solana_ledger::get_tmp_ledger_path;
use solana_ledger::{
blockstore::make_many_slot_entries,
blockstore_processor::fill_blockstore_slot_with_ticks,
shred::{
max_ticks_per_n_shreds, CodingShredHeader, DataShredHeader, Shred, ShredCommonHeader,
NONCE_SHRED_PAYLOAD_SIZE, UNLOCK_NONCE_SLOT,
},
};
use solana_sdk::{hash::Hash, pubkey::Pubkey, timing::timestamp};
/// test run_window_requestwindow requests respond with the right shred, and do not overrun
#[test]
fn run_highest_window_request() {
fn test_run_highest_window_request() {
run_highest_window_request(UNLOCK_NONCE_SLOT + 3, 3, Some(9));
run_highest_window_request(UNLOCK_NONCE_SLOT, 3, None);
}
/// test run_window_request responds with the right shred, and do not overrun
fn run_highest_window_request(slot: Slot, num_slots: u64, nonce: Option<Nonce>) {
let recycler = PacketsRecycler::default();
solana_logger::setup();
let ledger_path = get_tmp_ledger_path!();
@@ -544,41 +641,51 @@ mod tests {
Some(&blockstore),
0,
0,
nonce,
);
assert!(rv.is_none());
let _ = fill_blockstore_slot_with_ticks(
&blockstore,
max_ticks_per_n_shreds(1) + 1,
2,
1,
max_ticks_per_n_shreds(1, None) + 1,
slot,
slot - num_slots + 1,
Hash::default(),
);
let index = 1;
let rv = ServeRepair::run_highest_window_request(
&recycler,
&socketaddr_any!(),
Some(&blockstore),
2,
1,
);
slot,
index,
nonce,
)
.expect("packets");
let rv: Vec<Shred> = rv
.expect("packets")
.packets
.into_iter()
.filter_map(|b| Shred::new_from_serialized_shred(b.data.to_vec()).ok())
.filter_map(|b| {
if nonce.is_some() {
assert_eq!(repair_response::nonce(&b.data[..]), nonce);
}
Shred::new_from_serialized_shred(b.data.to_vec()).ok()
})
.collect();
assert!(!rv.is_empty());
let index = blockstore.meta(2).unwrap().unwrap().received - 1;
let index = blockstore.meta(slot).unwrap().unwrap().received - 1;
assert_eq!(rv[0].index(), index as u32);
assert_eq!(rv[0].slot(), 2);
assert_eq!(rv[0].slot(), slot);
let rv = ServeRepair::run_highest_window_request(
&recycler,
&socketaddr_any!(),
Some(&blockstore),
2,
slot,
index + 1,
nonce,
);
assert!(rv.is_none());
}
@@ -586,9 +693,14 @@ mod tests {
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
/// test window requests respond with the right shred, and do not overrun
#[test]
fn run_window_request() {
fn test_run_window_request() {
run_window_request(UNLOCK_NONCE_SLOT + 1, Some(9));
run_window_request(UNLOCK_NONCE_SLOT - 3, None);
}
/// test window requests respond with the right shred, and do not overrun
fn run_window_request(slot: Slot, nonce: Option<Nonce>) {
let recycler = PacketsRecycler::default();
solana_logger::setup();
let ledger_path = get_tmp_ledger_path!();
@@ -615,12 +727,13 @@ mod tests {
&socketaddr_any!(),
Some(&blockstore),
&me,
slot,
0,
0,
nonce,
);
assert!(rv.is_none());
let mut common_header = ShredCommonHeader::default();
common_header.slot = 2;
common_header.slot = slot;
common_header.index = 1;
let mut data_header = DataShredHeader::default();
data_header.parent_offset = 1;
@@ -628,30 +741,37 @@ mod tests {
common_header,
data_header,
CodingShredHeader::default(),
NONCE_SHRED_PAYLOAD_SIZE,
);
blockstore
.insert_shreds(vec![shred_info], None, false)
.expect("Expect successful ledger write");
let index = 1;
let rv = ServeRepair::run_window_request(
&recycler,
&me,
&socketaddr_any!(),
Some(&blockstore),
&me,
2,
1,
);
assert!(!rv.is_none());
slot,
index,
nonce,
)
.expect("packets");
let rv: Vec<Shred> = rv
.expect("packets")
.packets
.into_iter()
.filter_map(|b| Shred::new_from_serialized_shred(b.data.to_vec()).ok())
.filter_map(|b| {
if nonce.is_some() {
assert_eq!(repair_response::nonce(&b.data[..]), nonce);
}
Shred::new_from_serialized_shred(b.data.to_vec()).ok()
})
.collect();
assert_eq!(rv[0].index(), 1);
assert_eq!(rv[0].slot(), 2);
assert_eq!(rv[0].slot(), slot);
}
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
@@ -665,7 +785,7 @@ mod tests {
let serve_repair = ServeRepair::new(cluster_info.clone());
let rv = serve_repair.repair_request(
&cluster_slots,
&RepairType::Shred(0, 0),
RepairType::Shred(0, 0),
&mut HashMap::new(),
&mut RepairStats::default(),
);
@@ -691,7 +811,7 @@ mod tests {
let rv = serve_repair
.repair_request(
&cluster_slots,
&RepairType::Shred(0, 0),
RepairType::Shred(0, 0),
&mut HashMap::new(),
&mut RepairStats::default(),
)
@@ -723,7 +843,7 @@ mod tests {
let rv = serve_repair
.repair_request(
&cluster_slots,
&RepairType::Shred(0, 0),
RepairType::Shred(0, 0),
&mut HashMap::new(),
&mut RepairStats::default(),
)
@@ -739,52 +859,94 @@ mod tests {
}
#[test]
fn run_orphan() {
fn test_run_orphan() {
run_orphan(UNLOCK_NONCE_SLOT + 1, 3, Some(9));
// Test where the response will be for some slots <= UNLOCK_NONCE_SLOT,
// and some of the response will be for some slots > UNLOCK_NONCE_SLOT.
// Should not panic.
run_orphan(UNLOCK_NONCE_SLOT, 3, None);
run_orphan(UNLOCK_NONCE_SLOT, 3, Some(9));
// Giving no nonce after UNLOCK_NONCE_SLOT should return empty
run_orphan(UNLOCK_NONCE_SLOT + 1, 3, None);
}
fn run_orphan(slot: Slot, num_slots: u64, nonce: Option<Nonce>) {
solana_logger::setup();
let recycler = PacketsRecycler::default();
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let rv =
ServeRepair::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 2, 0);
let rv = ServeRepair::run_orphan(
&recycler,
&socketaddr_any!(),
Some(&blockstore),
slot,
0,
nonce,
);
assert!(rv.is_none());
// Create slots 1, 2, 3 with 5 shreds apiece
let (shreds, _) = make_many_slot_entries(1, 3, 5);
// Create slots [slot, slot + num_slots) with 5 shreds apiece
let (shreds, _) = make_many_slot_entries(slot, num_slots, 5);
blockstore
.insert_shreds(shreds, None, false)
.expect("Expect successful ledger write");
// We don't have slot 4, so we don't know how to service this requeset
let rv =
ServeRepair::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 4, 5);
// We don't have slot `slot + num_slots`, so we don't know how to service this request
let rv = ServeRepair::run_orphan(
&recycler,
&socketaddr_any!(),
Some(&blockstore),
slot + num_slots,
5,
nonce,
);
assert!(rv.is_none());
// For slot 3, we should return the highest shreds from slots 3, 2, 1 respectively
// for this request
let rv: Vec<_> =
ServeRepair::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 3, 5)
// For a orphan request for `slot + num_slots - 1`, we should return the highest shreds
// from slots in the range [slot, slot + num_slots - 1]
let rv = ServeRepair::run_orphan(
&recycler,
&socketaddr_any!(),
Some(&blockstore),
slot + num_slots - 1,
5,
nonce,
);
if Shred::is_nonce_unlocked(slot + num_slots - 1) && nonce.is_none() {
// If a nonce is expected but not provided, there should be no
// response
assert!(rv.is_none());
} else {
// Verify responses
let rv: Vec<_> = rv
.expect("run_orphan packets")
.packets
.iter()
.map(|b| b.clone())
.collect();
let expected: Vec<_> = (1..=3)
.rev()
.map(|slot| {
let index = blockstore.meta(slot).unwrap().unwrap().received - 1;
ServeRepair::get_data_shred_as_packet(
&blockstore,
slot,
index,
&socketaddr_any!(),
)
.unwrap()
.unwrap()
})
.collect();
assert_eq!(rv, expected)
let expected: Vec<_> = (slot..slot + num_slots)
.rev()
.filter_map(|slot| {
let nonce = if Shred::is_nonce_unlocked(slot) {
nonce
} else {
None
};
let index = blockstore.meta(slot).unwrap().unwrap().received - 1;
repair_response::repair_response_packet(
&blockstore,
slot,
index,
&socketaddr_any!(),
nonce,
)
})
.collect();
assert_eq!(rv, expected);
}
}
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");

View File

@@ -22,12 +22,23 @@ use std::time::Instant;
pub type ShredsReceived = HashMap<Slot, BitVec<u64>>;
#[derive(Default)]
struct ShredFetchStats {
index_overrun: usize,
shred_count: usize,
index_bad_deserialize: usize,
index_out_of_bounds: usize,
slot_bad_deserialize: usize,
duplicate_shred: usize,
slot_out_of_range: usize,
}
pub struct ShredFetchStage {
thread_hdls: Vec<JoinHandle<()>>,
}
impl ShredFetchStage {
fn get_slot_index(p: &Packet, index_overrun: &mut usize) -> Option<(u64, u32)> {
fn get_slot_index(p: &Packet, stats: &mut ShredFetchStats) -> Option<(u64, u32)> {
let index_start = OFFSET_OF_SHRED_INDEX;
let index_end = index_start + SIZE_OF_SHRED_INDEX;
let slot_start = OFFSET_OF_SHRED_SLOT;
@@ -38,11 +49,17 @@ impl ShredFetchStage {
if index < MAX_DATA_SHREDS_PER_SLOT as u32 && slot_end <= p.meta.size {
if let Ok(slot) = limited_deserialize::<Slot>(&p.data[slot_start..slot_end]) {
return Some((slot, index));
} else {
stats.slot_bad_deserialize += 1;
}
} else {
stats.index_out_of_bounds += 1;
}
} else {
stats.index_bad_deserialize += 1;
}
} else {
*index_overrun += 1;
stats.index_overrun += 1;
}
None
}
@@ -50,7 +67,7 @@ impl ShredFetchStage {
fn process_packet<F>(
p: &mut Packet,
shreds_received: &mut ShredsReceived,
index_overrun: &mut usize,
stats: &mut ShredFetchStats,
last_root: Slot,
last_slot: Slot,
slots_per_epoch: u64,
@@ -59,7 +76,7 @@ impl ShredFetchStage {
F: Fn(&mut Packet),
{
p.meta.discard = true;
if let Some((slot, index)) = Self::get_slot_index(p, index_overrun) {
if let Some((slot, index)) = Self::get_slot_index(p, stats) {
// Seems reasonable to limit shreds to 2 epochs away
if slot > last_root && slot < (last_slot + 2 * slots_per_epoch) {
// Shred filter
@@ -70,7 +87,11 @@ impl ShredFetchStage {
p.meta.discard = false;
modify(p);
slot_received.set(index.into(), true);
} else {
stats.duplicate_shred += 1;
}
} else {
stats.slot_out_of_range += 1;
}
}
}
@@ -80,6 +101,7 @@ impl ShredFetchStage {
recvr: PacketReceiver,
sendr: PacketSender,
bank_forks: Option<Arc<RwLock<BankForks>>>,
name: &'static str,
modify: F,
) where
F: Fn(&mut Packet),
@@ -92,6 +114,9 @@ impl ShredFetchStage {
let mut last_slot = std::u64::MAX;
let mut slots_per_epoch = 0;
let mut last_stats = Instant::now();
let mut stats = ShredFetchStats::default();
while let Some(mut p) = recvr.iter().next() {
if last_cleared.elapsed().as_millis() > 200 {
shreds_received.clear();
@@ -105,22 +130,32 @@ impl ShredFetchStage {
slots_per_epoch = root_bank.get_slots_in_epoch(root_bank.epoch());
}
}
let mut index_overrun = 0;
let mut shred_count = 0;
stats.shred_count += p.packets.len();
p.packets.iter_mut().for_each(|mut packet| {
shred_count += 1;
Self::process_packet(
&mut packet,
&mut shreds_received,
&mut index_overrun,
&mut stats,
last_root,
last_slot,
slots_per_epoch,
&modify,
);
});
inc_new_counter_warn!("shred_fetch_stage-shred_index_overrun", index_overrun);
inc_new_counter_info!("shred_fetch_stage-shred_count", shred_count);
if last_stats.elapsed().as_millis() > 1000 {
datapoint_info!(
name,
("index_overrun", stats.index_overrun, i64),
("shred_count", stats.shred_count, i64),
("slot_bad_deserialize", stats.slot_bad_deserialize, i64),
("index_bad_deserialize", stats.index_bad_deserialize, i64),
("index_out_of_bounds", stats.index_out_of_bounds, i64),
("slot_out_of_range", stats.slot_out_of_range, i64),
("duplicate_shred", stats.duplicate_shred, i64),
);
stats = ShredFetchStats::default();
last_stats = Instant::now();
}
if sendr.send(p).is_err() {
break;
}
@@ -133,6 +168,7 @@ impl ShredFetchStage {
sender: PacketSender,
recycler: Recycler<PinnedVec<Packet>>,
bank_forks: Option<Arc<RwLock<BankForks>>>,
name: &'static str,
modify: F,
) -> (Vec<JoinHandle<()>>, JoinHandle<()>)
where
@@ -154,7 +190,7 @@ impl ShredFetchStage {
let modifier_hdl = Builder::new()
.name("solana-tvu-fetch-stage-packet-modifier".to_string())
.spawn(move || Self::modify_packets(packet_receiver, sender, bank_forks, modify))
.spawn(move || Self::modify_packets(packet_receiver, sender, bank_forks, name, modify))
.unwrap();
(streamers, modifier_hdl)
}
@@ -185,6 +221,7 @@ impl ShredFetchStage {
sender.clone(),
recycler.clone(),
bank_forks.clone(),
"shred_fetch_tvu_forwards",
|p| p.meta.forward = true,
);
@@ -194,6 +231,7 @@ impl ShredFetchStage {
sender.clone(),
recycler.clone(),
bank_forks,
"shred_fetch_repair",
|p| p.meta.repair = true,
);
@@ -225,7 +263,7 @@ mod tests {
solana_logger::setup();
let mut shreds_received = ShredsReceived::default();
let mut packet = Packet::default();
let mut index_overrun = 0;
let mut stats = ShredFetchStats::default();
let last_root = 0;
let last_slot = 100;
let slots_per_epoch = 10;
@@ -233,13 +271,13 @@ mod tests {
ShredFetchStage::process_packet(
&mut packet,
&mut shreds_received,
&mut index_overrun,
&mut stats,
last_root,
last_slot,
slots_per_epoch,
&|_p| {},
);
assert_eq!(index_overrun, 1);
assert_eq!(stats.index_overrun, 1);
assert!(packet.meta.discard);
let shred = Shred::new_from_data(1, 3, 0, None, true, true, 0, 0, 0);
shred.copy_to_packet(&mut packet);
@@ -248,7 +286,7 @@ mod tests {
ShredFetchStage::process_packet(
&mut packet,
&mut shreds_received,
&mut index_overrun,
&mut stats,
3,
last_slot,
slots_per_epoch,
@@ -260,7 +298,7 @@ mod tests {
ShredFetchStage::process_packet(
&mut packet,
&mut shreds_received,
&mut index_overrun,
&mut stats,
last_root,
last_slot,
slots_per_epoch,
@@ -272,7 +310,7 @@ mod tests {
ShredFetchStage::process_packet(
&mut packet,
&mut shreds_received,
&mut index_overrun,
&mut stats,
last_root,
last_slot,
slots_per_epoch,
@@ -287,7 +325,7 @@ mod tests {
ShredFetchStage::process_packet(
&mut packet,
&mut shreds_received,
&mut index_overrun,
&mut stats,
last_root,
last_slot,
slots_per_epoch,
@@ -301,7 +339,7 @@ mod tests {
ShredFetchStage::process_packet(
&mut packet,
&mut shreds_received,
&mut index_overrun,
&mut stats,
last_root,
last_slot,
slots_per_epoch,
@@ -315,10 +353,10 @@ mod tests {
let shred = Shred::new_from_data(1, 3, 0, None, true, true, 0, 0, 0);
let mut packet = Packet::default();
shred.copy_to_packet(&mut packet);
let mut index_overrun = 0;
let mut stats = ShredFetchStats::default();
assert_eq!(
Some((1, 3)),
ShredFetchStage::get_slot_index(&packet, &mut index_overrun)
ShredFetchStage::get_slot_index(&packet, &mut stats)
);
}
}

View File

@@ -8,6 +8,7 @@ use crate::{
cluster_info_vote_listener::{ClusterInfoVoteListener, VoteTracker},
fetch_stage::FetchStage,
poh_recorder::{PohRecorder, WorkingBankEntry},
rpc_subscriptions::RpcSubscriptions,
sigverify::TransactionSigVerifier,
sigverify_stage::{DisabledSigVerifier, SigVerifyStage},
};
@@ -44,6 +45,7 @@ impl Tpu {
tpu_forwards_sockets: Vec<UdpSocket>,
broadcast_sockets: Vec<UdpSocket>,
sigverify_disabled: bool,
subscriptions: &Arc<RpcSubscriptions>,
transaction_status_sender: Option<TransactionStatusSender>,
blockstore: &Arc<Blockstore>,
broadcast_type: &BroadcastStageType,
@@ -79,6 +81,7 @@ impl Tpu {
&poh_recorder,
vote_tracker,
bank_forks,
subscriptions.clone(),
);
let banking_stage = BankingStage::new(

View File

@@ -3,6 +3,7 @@ use solana_ledger::{blockstore::Blockstore, blockstore_processor::TransactionSta
use solana_runtime::{
bank::{Bank, HashAgeKind},
nonce_utils,
transaction_utils::OrderedIterator,
};
use solana_transaction_status::TransactionStatusMeta;
use std::{
@@ -50,25 +51,39 @@ impl TransactionStatusService {
let TransactionStatusBatch {
bank,
transactions,
iteration_order,
statuses,
balances,
} = write_transaction_status_receiver.recv_timeout(Duration::from_secs(1))?;
let slot = bank.slot();
for (((transaction, (status, hash_age_kind)), pre_balances), post_balances) in transactions
.iter()
.zip(statuses)
.zip(balances.pre_balances)
.zip(balances.post_balances)
for (((transaction, (status, hash_age_kind)), pre_balances), post_balances) in
OrderedIterator::new(&transactions, iteration_order.as_deref())
.zip(statuses)
.zip(balances.pre_balances)
.zip(balances.post_balances)
{
if Bank::can_commit(&status) && !transaction.signatures.is_empty() {
let fee_calculator = match hash_age_kind {
let (fee_calculator, hash_kind) = match hash_age_kind.clone() {
Some(HashAgeKind::DurableNonce(_, account)) => {
nonce_utils::fee_calculator_of(&account)
info!("nonce_account: {:?}", account);
(nonce_utils::fee_calculator_of(&account), "durable_nonce")
}
_ => bank.get_fee_calculator(&transaction.message().recent_blockhash),
_ => (
bank.get_fee_calculator(&transaction.message().recent_blockhash),
"recent_blockhash",
),
};
if fee_calculator.is_none() {
error!(
"{:?} {:?} fee_calculator: {:?}",
transaction.signatures[0],
hash_kind,
fee_calculator.is_some()
);
info!("{:?}", status);
}
.expect("FeeCalculator must exist");
let fee_calculator = fee_calculator.expect("FeeCalculator must exist");
let fee = fee_calculator.calculate_fee(transaction.message());
let (writable_keys, readonly_keys) =
transaction.message.get_account_keys_by_lock_type();

View File

@@ -143,7 +143,6 @@ impl Tvu {
};
let cluster_slots = Arc::new(ClusterSlots::default());
let (duplicate_slots_reset_sender, duplicate_slots_reset_receiver) = unbounded();
let retransmit_stage = RetransmitStage::new(
bank_forks.clone(),
leader_schedule_cache,
@@ -158,7 +157,6 @@ impl Tvu {
cfg,
tvu_config.shred_version,
cluster_slots.clone(),
duplicate_slots_reset_sender,
);
let (ledger_cleanup_slot_sender, ledger_cleanup_slot_receiver) = channel();
@@ -198,7 +196,6 @@ impl Tvu {
vote_tracker,
cluster_slots,
retransmit_slots_sender,
duplicate_slots_reset_receiver,
);
let ledger_cleanup_service = tvu_config.max_ledger_slots.map(|max_ledger_slots| {
@@ -262,6 +259,7 @@ pub mod tests {
use solana_runtime::bank::Bank;
use std::sync::atomic::Ordering;
#[ignore]
#[test]
#[serial]
fn test_tvu_exit() {
@@ -295,11 +293,12 @@ pub mod tests {
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
));
let (retransmit_slots_sender, _retransmit_slots_receiver) = unbounded();
let bank_forks = Arc::new(RwLock::new(bank_forks));
let tvu = Tvu::new(
&vote_keypair.pubkey(),
vec![Arc::new(vote_keypair)],
&storage_keypair,
&Arc::new(RwLock::new(bank_forks)),
&bank_forks,
&cref1,
{
Sockets {
@@ -312,7 +311,11 @@ pub mod tests {
blockstore,
&StorageState::default(),
l_receiver,
&Arc::new(RpcSubscriptions::new(&exit, block_commitment_cache.clone())),
&Arc::new(RpcSubscriptions::new(
&exit,
bank_forks.clone(),
block_commitment_cache.clone(),
)),
&poh_recorder,
&leader_schedule_cache,
&exit,

View File

@@ -30,7 +30,7 @@ use solana_ledger::{
blockstore::{Blockstore, CompletedSlotsReceiver},
blockstore_processor::{self, BankForksInfo},
create_new_tmp_ledger,
hardened_unpack::open_genesis_config,
hardened_unpack::{open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE},
leader_schedule::FixedSchedule,
leader_schedule_cache::LeaderScheduleCache,
};
@@ -81,6 +81,7 @@ pub struct ValidatorConfig {
pub accounts_hash_fault_injection_slots: u64, // 0 = no fault injection
pub frozen_accounts: Vec<Pubkey>,
pub no_rocksdb_compaction: bool,
pub max_genesis_archive_unpacked_size: u64,
}
impl Default for ValidatorConfig {
@@ -107,6 +108,7 @@ impl Default for ValidatorConfig {
accounts_hash_fault_injection_slots: 0,
frozen_accounts: vec![],
no_rocksdb_compaction: false,
max_genesis_archive_unpacked_size: MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
}
}
}
@@ -241,8 +243,13 @@ impl Validator {
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
));
let subscriptions = Arc::new(RpcSubscriptions::new(&exit, block_commitment_cache.clone()));
let subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
bank_forks.clone(),
block_commitment_cache.clone(),
));
let rpc_override_health_check = Arc::new(AtomicBool::new(false));
let rpc_service = config.rpc_ports.map(|(rpc_port, rpc_pubsub_port)| {
if ContactInfo::is_valid_address(&node.info.rpc) {
assert!(ContactInfo::is_valid_address(&node.info.rpc_pubsub));
@@ -265,6 +272,7 @@ impl Validator {
storage_state.clone(),
validator_exit.clone(),
config.trusted_validators.clone(),
rpc_override_health_check.clone(),
),
PubSubService::new(
&subscriptions,
@@ -314,6 +322,13 @@ impl Validator {
);
if config.dev_halt_at_slot.is_some() {
// Simulate a confirmed root to avoid RPC errors with CommitmentmentConfig::max() and
// to ensure RPC endpoints like getConfirmedBlock, which require a confirmed root, work
block_commitment_cache
.write()
.unwrap()
.set_largest_confirmed_root(bank_forks.read().unwrap().root());
// Park with the RPC service running, ready for inspection!
warn!("Validator halted");
std::thread::park();
@@ -377,7 +392,7 @@ impl Validator {
(None, None)
};
wait_for_supermajority(config, &bank, &cluster_info);
wait_for_supermajority(config, &bank, &cluster_info, rpc_override_health_check);
let poh_service = PohService::new(poh_recorder.clone(), &poh_config, &exit);
assert_eq!(
@@ -459,6 +474,7 @@ impl Validator {
node.sockets.tpu_forwards,
node.sockets.broadcast,
config.dev_sigverify_disabled,
&subscriptions,
transaction_status_sender,
&blockstore,
&config.broadcast_stage_type,
@@ -567,7 +583,8 @@ fn new_banks_from_blockstore(
LeaderScheduleCache,
Option<(Slot, Hash)>,
) {
let genesis_config = open_genesis_config(blockstore_path);
let genesis_config =
open_genesis_config(blockstore_path, config.max_genesis_archive_unpacked_size);
// This needs to be limited otherwise the state in the VoteAccount data
// grows too large
@@ -631,7 +648,12 @@ fn new_banks_from_blockstore(
)
}
fn wait_for_supermajority(config: &ValidatorConfig, bank: &Bank, cluster_info: &ClusterInfo) {
fn wait_for_supermajority(
config: &ValidatorConfig,
bank: &Bank,
cluster_info: &ClusterInfo,
rpc_override_health_check: Arc<AtomicBool>,
) {
if config.wait_for_supermajority != Some(bank.slot()) {
return;
}
@@ -646,8 +668,13 @@ fn wait_for_supermajority(config: &ValidatorConfig, bank: &Bank, cluster_info: &
if gossip_stake_percent >= 80 {
break;
}
// The normal RPC health checks don't apply as the node is waiting, so feign health to
// prevent load balancers from removing the node from their list of candidates during a
// manual restart.
rpc_override_health_check.store(true, Ordering::Relaxed);
sleep(Duration::new(1, 0));
}
rpc_override_health_check.store(false, Ordering::Relaxed);
}
pub struct TestValidator {

View File

@@ -4,8 +4,10 @@
use crate::{
cluster_info::ClusterInfo,
cluster_slots::ClusterSlots,
repair_response,
repair_service::{RepairService, RepairStrategy},
result::{Error, Result},
serve_repair::DEFAULT_NONCE,
};
use crossbeam_channel::{
unbounded, Receiver as CrossbeamReceiver, RecvTimeoutError, Sender as CrossbeamSender,
@@ -13,24 +15,25 @@ use crossbeam_channel::{
use rayon::iter::IntoParallelRefMutIterator;
use rayon::iter::ParallelIterator;
use rayon::ThreadPool;
use solana_ledger::bank_forks::BankForks;
use solana_ledger::blockstore::{
self, Blockstore, BlockstoreInsertionMetrics, MAX_DATA_SHREDS_PER_SLOT,
use solana_ledger::{
bank_forks::BankForks,
blockstore::{self, Blockstore, BlockstoreInsertionMetrics, MAX_DATA_SHREDS_PER_SLOT},
leader_schedule_cache::LeaderScheduleCache,
shred::{Nonce, Shred},
};
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
use solana_ledger::shred::Shred;
use solana_metrics::{inc_new_counter_debug, inc_new_counter_error};
use solana_perf::packet::Packets;
use solana_rayon_threadlimit::get_thread_count;
use solana_runtime::bank::Bank;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::timing::duration_as_ms;
use solana_sdk::{packet::PACKET_DATA_SIZE, pubkey::Pubkey, timing::duration_as_ms};
use solana_streamer::streamer::PacketSender;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::{self, Builder, JoinHandle};
use std::time::{Duration, Instant};
use std::{
net::{SocketAddr, UdpSocket},
sync::atomic::{AtomicBool, Ordering},
sync::{Arc, RwLock},
thread::{self, Builder, JoinHandle},
time::{Duration, Instant},
};
fn verify_shred_slot(shred: &Shred, root: u64) -> bool {
if shred.is_data() {
@@ -107,8 +110,15 @@ fn run_check_duplicate(
Ok(())
}
fn verify_repair(_shred: &Shred, repair_info: &Option<RepairMeta>) -> bool {
repair_info
.as_ref()
.map(|repair_info| repair_info.nonce == DEFAULT_NONCE)
.unwrap_or(true)
}
fn run_insert<F>(
shred_receiver: &CrossbeamReceiver<Vec<Shred>>,
shred_receiver: &CrossbeamReceiver<(Vec<Shred>, Vec<Option<RepairMeta>>)>,
blockstore: &Arc<Blockstore>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
handle_duplicate: F,
@@ -118,12 +128,16 @@ where
F: Fn(Shred) -> (),
{
let timer = Duration::from_millis(200);
let mut shreds = shred_receiver.recv_timeout(timer)?;
while let Ok(mut more_shreds) = shred_receiver.try_recv() {
shreds.append(&mut more_shreds)
let (mut shreds, mut repair_infos) = shred_receiver.recv_timeout(timer)?;
while let Ok((more_shreds, more_repair_infos)) = shred_receiver.try_recv() {
shreds.extend(more_shreds);
repair_infos.extend(more_repair_infos);
}
assert_eq!(shreds.len(), repair_infos.len());
let mut i = 0;
shreds.retain(|shred| (verify_repair(&shred, &repair_infos[i]), i += 1).0);
blockstore.insert_shreds_handle_duplicate(
shreds,
Some(leader_schedule_cache),
@@ -136,7 +150,7 @@ where
fn recv_window<F>(
blockstore: &Arc<Blockstore>,
insert_shred_sender: &CrossbeamSender<Vec<Shred>>,
insert_shred_sender: &CrossbeamSender<(Vec<Shred>, Vec<Option<RepairMeta>>)>,
my_pubkey: &Pubkey,
verified_receiver: &CrossbeamReceiver<Vec<Packets>>,
retransmit: &PacketSender,
@@ -160,7 +174,7 @@ where
inc_new_counter_debug!("streamer-recv_window-recv", total_packets);
let last_root = blockstore.last_root();
let shreds: Vec<_> = thread_pool.install(|| {
let (shreds, repair_infos): (Vec<_>, Vec<_>) = thread_pool.install(|| {
packets
.par_iter_mut()
.flat_map(|packets| {
@@ -169,34 +183,59 @@ where
.iter_mut()
.filter_map(|packet| {
if packet.meta.discard {
inc_new_counter_debug!("streamer-recv_window-invalid_signature", 1);
inc_new_counter_debug!(
"streamer-recv_window-invalid_or_unnecessary_packet",
1
);
None
} else if let Ok(shred) =
Shred::new_from_serialized_shred(packet.data.to_vec())
{
if shred_filter(&shred, last_root) {
// Mark slot as dead if the current shred is on the boundary
// of max shreds per slot. However, let the current shred
// get retransmitted. It'll allow peer nodes to see this shred
// and trigger them to mark the slot as dead.
if shred.index() >= (MAX_DATA_SHREDS_PER_SLOT - 1) as u32 {
let _ = blockstore.set_dead_slot(shred.slot());
} else {
// shred fetch stage should be sending packets
// with sufficiently large buffers. Needed to ensure
// call to `new_from_serialized_shred` is safe.
assert_eq!(packet.data.len(), PACKET_DATA_SIZE);
let serialized_shred = packet.data.to_vec();
if let Ok(shred) = Shred::new_from_serialized_shred(serialized_shred) {
let repair_info = {
if packet.meta.repair && Shred::is_nonce_unlocked(shred.slot())
{
if let Some(nonce) = repair_response::nonce(&packet.data) {
let repair_info = RepairMeta {
_from_addr: packet.meta.addr(),
nonce,
};
Some(repair_info)
} else {
// If can't parse the nonce, dump the packet
return None;
}
} else {
None
}
};
if shred_filter(&shred, last_root) {
// Mark slot as dead if the current shred is on the boundary
// of max shreds per slot. However, let the current shred
// get retransmitted. It'll allow peer nodes to see this shred
// and trigger them to mark the slot as dead.
if shred.index() >= (MAX_DATA_SHREDS_PER_SLOT - 1) as u32 {
let _ = blockstore.set_dead_slot(shred.slot());
}
packet.meta.slot = shred.slot();
packet.meta.seed = shred.seed();
Some((shred, repair_info))
} else {
packet.meta.discard = true;
None
}
packet.meta.slot = shred.slot();
packet.meta.seed = shred.seed();
Some(shred)
} else {
packet.meta.discard = true;
None
}
} else {
packet.meta.discard = true;
None
}
})
.collect::<Vec<_>>()
})
.collect()
.unzip()
});
trace!("{:?} shreds from packets", shreds.len());
@@ -210,7 +249,7 @@ where
}
}
insert_shred_sender.send(shreds)?;
insert_shred_sender.send((shreds, repair_infos))?;
trace!(
"Elapsed processing time in recv_window(): {}",
@@ -220,6 +259,11 @@ where
Ok(())
}
struct RepairMeta {
_from_addr: SocketAddr,
nonce: Nonce,
}
// Implement a destructor for the window_service thread to signal it exited
// even on panics
struct Finalizer {
@@ -343,7 +387,7 @@ impl WindowService {
exit: &Arc<AtomicBool>,
blockstore: &Arc<Blockstore>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
insert_receiver: CrossbeamReceiver<Vec<Shred>>,
insert_receiver: CrossbeamReceiver<(Vec<Shred>, Vec<Option<RepairMeta>>)>,
duplicate_sender: CrossbeamSender<Shred>,
) -> JoinHandle<()> {
let exit = exit.clone();
@@ -393,7 +437,7 @@ impl WindowService {
id: Pubkey,
exit: &Arc<AtomicBool>,
blockstore: &Arc<Blockstore>,
insert_sender: CrossbeamSender<Vec<Shred>>,
insert_sender: CrossbeamSender<(Vec<Shred>, Vec<Option<RepairMeta>>)>,
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
shred_filter: F,
bank_forks: Option<Arc<RwLock<BankForks>>>,
@@ -495,13 +539,12 @@ mod test {
cluster_info::ClusterInfo, contact_info::ContactInfo, repair_service::RepairSlotRange,
};
use rand::thread_rng;
use solana_ledger::shred::DataShredHeader;
use solana_ledger::{
blockstore::{make_many_slot_entries, Blockstore},
entry::{create_ticks, Entry},
genesis_utils::create_genesis_config_with_leader,
get_tmp_ledger_path,
shred::Shredder,
shred::{DataShredHeader, Shredder, NONCE_SHRED_PAYLOAD_SIZE},
};
use solana_perf::packet::Packet;
use solana_sdk::{
@@ -573,8 +616,12 @@ mod test {
// If it's a coding shred, test that slot >= root
let (common, coding) = Shredder::new_coding_shred_header(5, 5, 5, 6, 6, 0, 0);
let mut coding_shred =
Shred::new_empty_from_header(common, DataShredHeader::default(), coding);
let mut coding_shred = Shred::new_empty_from_header(
common,
DataShredHeader::default(),
coding,
NONCE_SHRED_PAYLOAD_SIZE,
);
Shredder::sign_shred(&leader_keypair, &mut coding_shred);
assert_eq!(
should_retransmit_and_persist(&coding_shred, Some(bank.clone()), &cache, &me_id, 0, 0),

View File

@@ -19,6 +19,7 @@ mod tests {
};
use solana_sdk::{
clock::Slot,
genesis_config::GenesisConfig,
hash::hashv,
pubkey::Pubkey,
signature::{Keypair, Signer},
@@ -90,6 +91,7 @@ mod tests {
snapshot_package_output_path,
&(old_last_bank.slot(), old_last_bank.get_accounts_hash()),
),
&GenesisConfig::default(),
)
.unwrap();

Some files were not shown because too many files have changed in this diff Show More