Compare commits

..

227 Commits

Author SHA1 Message Date
Dan Albert
b2a5cf57ad Remove validator-info publish from net scripts 2020-04-22 18:06:46 -06:00
mergify[bot]
33c51ee75d Add getLowestNonpurgedBlock rpc; use blockstore api in getConfirmedBlocks (#9656) (#9663)
automerge
2020-04-22 15:17:51 -07:00
mergify[bot]
d8aa107fae Extend snapshot interval in multinode demo (#9657) (#9660)
automerge
2020-04-22 13:45:01 -07:00
mergify[bot]
82e02d0734 Relax setting withdraw authority during lockup (#9644) (#9645)
automerge
2020-04-21 22:35:27 -07:00
mergify[bot]
dae59bb3e1 Flag test_tvu_exit as serial to hopefully reduce CI flakiness (bp #9509) (#9636)
automerge
2020-04-21 17:16:05 -07:00
mergify[bot]
e0e7fc8e52 Wait for supermajority of cluster to have rooted a transaction to consider it finalized (#9618) (#9626)
automerge
2020-04-21 01:09:26 -07:00
mergify[bot]
9abc84c583 Move streamer test to integration test (#9050) (#9624)
automerge
2020-04-21 00:50:46 -07:00
Stephen Akridge
db6540772c Check distance for timestamp 2020-04-20 11:36:17 -07:00
mergify[bot]
840ebfe625 Error for invalid shred. (#9588) (#9596)
automerge
2020-04-19 22:42:48 -07:00
Michael Vines
953282dbf4 Budget for gossip traffic (#9550) (#9583)
automerge
2020-04-19 09:43:44 -07:00
mergify[bot]
788d1199ac Fix local-cluster test - archiver should wait for itself + 1 validator (#9577) (#9584)
automerge
2020-04-19 01:36:14 -07:00
Michael Vines
f6a8f718a8 Make rpc_subscriptions.rs tests serial (#9556)
automerge

(cherry picked from commit b58338b066)
2020-04-17 11:40:31 -07:00
sakridge
f225bf6f01 Make rpc tests serial (#9537)
(cherry picked from commit e655cba5bd)
2020-04-16 22:17:41 -07:00
Michael Vines
b5f03a380b Only build x86_64-unknown-linux-gnu on docs.rs 2020-04-16 19:07:43 -07:00
Michael Vines
140c75f613 Don't upload tarballs to buildkite to speed up build 2020-04-16 13:55:15 -07:00
Stephen Akridge
6a59f67fdc Write wallet key to explicit file
(cherry picked from commit 93669ab1fc)
2020-04-16 13:41:14 -07:00
Michael Vines
5943747001 Bump version to 1.0.19 2020-04-16 12:19:48 -07:00
mergify[bot]
f26f18d29d Don't unwrap on session new (#9531)
automerge
2020-04-16 10:05:41 -07:00
mergify[bot]
9b58d72f52 Rpc: Speed up getBlockTime (#9510) (#9513)
automerge
2020-04-16 00:19:50 -07:00
Michael Vines
0c885d6a04 Default to RUST_BACKTRACE=1 for more informative validator logs
(cherry picked from commit 4ac15e68cf)
2020-04-15 22:46:24 -07:00
Michael Vines
70b51c0a5a Pacify shellcheck 2020-04-15 17:53:23 -07:00
Michael Vines
560660ae11 Always run shellcheck 2020-04-15 17:53:03 -07:00
Michael Vines
5436855b67 Update build-cli-usage.sh 2020-04-15 17:49:15 -07:00
Michael Vines
7351e5ed2c Use $rust_stable
(cherry picked from commit d567799d43)

# Conflicts:
#	docs/build-cli-usage.sh
2020-04-15 17:49:15 -07:00
mergify[bot]
7ee993fadf RPC: Add health check URI (bp #9499) (#9504)
automerge
2020-04-15 12:31:08 -07:00
sakridge
9bf459e82f Fix race in multi_bind_in_range (#9493)
(cherry picked from commit ee72714c08)
2020-04-14 17:59:15 -07:00
sakridge
545090ff17 limit test jobs to 16 to prevent OOM (#9500)
(cherry picked from commit 2b2b2cac1f)
2020-04-14 17:52:05 -07:00
Michael Vines
bd8074507f Bump version to v1.0.18 2020-04-14 09:58:33 -07:00
Ryo Onodera
cfc7b22c4c Use type alias 2020-04-13 21:12:44 -07:00
Ryo Onodera
5f1c637508 Conditionally change max_age 2020-04-13 21:12:44 -07:00
Ryo Onodera
d888e0a6d7 Use same max_age regardless of leader/not-leader 2020-04-13 21:12:44 -07:00
Michael Vines
10e808e1bd Fail coverage faster in CI 2020-04-13 21:09:26 -07:00
Michael Vines
d9b03ca38b Assume json_rpc_url can be upgrade to a websocket if no port is supplied
(cherry picked from commit bcfadd6085)
2020-04-13 20:32:26 -07:00
Michael Vines
608d75b348 Unfold coverage test failures
(cherry picked from commit d4ea1ec6ad)
2020-04-13 18:08:25 -07:00
Michael Vines
cee3cee4ef Reorder CI jobs to allow for more concurrent PRs
(cherry picked from commit ce027da236)
2020-04-13 13:00:39 -07:00
Dan Albert
09e51323f0 Update buildkite-tests.yml
(cherry picked from commit 92a5a51632)
2020-04-13 11:01:00 -07:00
Michael Vines
da6f702129 Bump version to 1.0.17 2020-04-11 19:32:42 -07:00
mergify[bot]
02a83e7c6e Allow lower shred count (#9410) (#9451)
automerge
2020-04-11 14:49:32 -07:00
sakridge
83263e1737 Calculate account refs fix (#9448) 2020-04-11 12:56:20 -07:00
mergify[bot]
1f7ac22b60 Don't subject authorizing a new stake authority to lockup (#9434) (#9441)
automerge
2020-04-10 17:25:15 -07:00
Michael Vines
747debae56 Cache downloads to speed up CI
(cherry picked from commit b4e00275b2)
2020-04-10 12:25:49 -07:00
mergify[bot]
00b4186469 Improve coverage.sh usability when used locally (#9054) (#9424)
automerge
2020-04-10 05:59:35 -07:00
mergify[bot]
b087dabf4f Rpc: Add getConfirmedSignaturesForAddress (#9407) (#9417)
automerge
2020-04-09 21:20:28 -07:00
mergify[bot]
e00eb0a069 Remove Trust Wallet Beta install instructions (#9396) (#9397)
automerge
2020-04-09 08:52:04 -07:00
mergify[bot]
d4e49ffd06 Rpc: Add getConfirmedTransaction (#9381) (#9392)
automerge
2020-04-09 01:00:34 -07:00
Michael Vines
0f34a190ea Bump version to 1.0.16 2020-04-09 00:05:16 -07:00
mergify[bot]
df2fb8f5b3 Add --no-wait arg to transfer (#9388) (#9390)
automerge
2020-04-08 23:49:40 -07:00
mergify[bot]
80d2a6046b Moar vm.max_map_count (#9389)
automerge
2020-04-08 23:18:30 -07:00
mergify[bot]
24273b826f Add blockstore address-to-signature index (#9367) (#9378)
automerge
2020-04-08 13:55:53 -07:00
mergify[bot]
68d2616e35 stake-monitor: Add 1 SOL grace, to allow for a complaint system account to fund a reasonable number of transactions. (bp #9359) (#9363)
automerge
2020-04-08 11:56:01 -07:00
mergify[bot]
f506d39339 Improve ledger-tool/accounts for easier debuging (#9370) (#9371)
automerge
2020-04-08 11:31:41 -07:00
Michael Vines
bc58c9ec7e Cache solana-perf.tgz to speed up CI (#9360)
automerge

(cherry picked from commit dc91698b3a)
2020-04-07 13:32:13 -07:00
Michael Vines
b57a52cd85 Bump version to 1.0.15 2020-04-07 09:36:47 -07:00
Michael Vines
8631be42ac Add support for monitoring system account balances (#9345)
automerge

(cherry picked from commit 03978ac5a5)
2020-04-06 22:57:47 -07:00
Tyera Eulberg
09367369ef Reinstate commitment param to support old clients (#9324) (#9329)
automerge
2020-04-06 12:21:19 -07:00
mergify[bot]
23b8c95cc4 Update getSignatureStatuses to return historical statuses (#9314) (#9321)
automerge
2020-04-06 04:24:19 -07:00
Tyera Eulberg
e61392b057 Rework TransactionStatus index in blockstore (#9281) (#9304) (#9312)
automerge

Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
2020-04-05 12:52:15 -06:00
mergify[bot]
7deba20395 Deprecate confirmTransaction, getSignatureStatus, and getSignatureConfirmation (bp #9298) (#9308)
automerge
2020-04-05 02:41:10 -07:00
Michael Vines
274c097f84 RPC: add err field to TransactionStatus, alongside the now deprecated status field (#9296) (#9307)
automerge
2020-04-05 00:21:15 -07:00
Michael Vines
1c7cea1af4 Add stake-monitor 2020-04-03 20:37:28 -07:00
Michael Vines
4406496d2f Add log before opening database
(cherry picked from commit b557b3170e)
2020-04-03 15:10:57 -07:00
sakridge
a15fa4840c Fix repair dos (#9056) 2020-04-03 13:20:39 -07:00
Michael Vines
c9030660d6 Bump version to 1.0.14 2020-04-02 22:42:28 -07:00
Dan Albert
fdeda769d0 Backport wallet doc changes to v1.0 (#9268)
* Add ledger live screenshots and reduce duplicate instructions (#9258)

automerge

* Add instructions for Trust Wallet Beta for Android (#9261)

automerge
2020-04-02 15:37:26 -06:00
mergify[bot]
53edd26578 Add instructions for Trust Wallet Beta for Android (#9261) (#9264)
automerge
2020-04-02 12:25:32 -07:00
Dan Albert
2433cdd6d6 Set checks timeout to 20 minutes 2020-04-02 13:11:03 -06:00
Dan Albert
77b34c278e Do not trigger tests if only docs were modified (#9240) (#9257) 2020-04-02 10:44:51 -06:00
mergify[bot]
90e993fd9a Add epoch subcommand (#9249) (#9254)
automerge
2020-04-01 22:29:18 -07:00
Tyera Eulberg
251054d8c9 Backport confirmations fix. (#9252)
automerge
2020-04-01 19:42:04 -07:00
Tyera Eulberg
d4bb7cec69 Undo breaking rpc removal of getSignatureConfirmation (#9247) 2020-04-01 18:04:11 -06:00
mergify[bot]
a3f6b04345 Add fee-payer option to docs (bp #9230) (#9236)
automerge
2020-04-01 15:11:47 -07:00
Justin Starry
ba4a5053dd Undo getSignatureStatus breaking change, add getSignatureStatuses (#9231)
automerge
2020-04-01 12:21:56 -07:00
mergify[bot]
6b47a259c3 Add a support page for wallet docs (#9229) (#9234)
automerge
2020-04-01 11:22:50 -07:00
mergify[bot]
c9ec13cf1f Tune udp buffers and vmmap immediately (#9194) (#9216)
automerge
2020-04-01 00:52:50 -07:00
mergify[bot]
906a6ab837 Fix error with account hash list getting too big for gossip (#9197) (#9214)
automerge
2020-03-31 23:27:18 -07:00
mergify[bot]
d0e478a9f8 Fix panic (#9195) (#9208)
automerge
2020-03-31 22:03:46 -07:00
mergify[bot]
b560b64d33 Remove unecessary security exception and add a new one (bp #9200) (#9205)
* Remove unecessary exception and add a new one (#9200)

(cherry picked from commit 62e12e3af5)

# Conflicts:
#	ci/test-checks.sh

* Update test-checks.sh

Co-authored-by: sakridge <sakridge@gmail.com>
Co-authored-by: Michael Vines <mvines@gmail.com>
2020-03-31 19:50:04 -07:00
mergify[bot]
057af41716 Fix links (#9184) (#9187)
automerge
2020-03-31 09:57:23 -07:00
Michael Vines
a44b8abd48 Bump version to v1.0.13 2020-03-30 23:05:41 -07:00
mergify[bot]
8778ecaed5 Ledger cleanup fixes (#9131) (#9175)
automerge
2020-03-30 20:42:17 -07:00
Michael Vines
a02542ada3 Bump version to v1.0.12 2020-03-30 08:45:50 -07:00
Michael Vines
ea17c6883f Remove chatty 'setting snapshot root:' info log 2020-03-30 08:44:24 -07:00
Michael Vines
706306645b solana account now displays the account's rent epoch 2020-03-30 08:44:24 -07:00
sakridge
da9e930788 Calculate ref counts earlier to prevent bad clean (#9153) 2020-03-29 14:42:57 -07:00
mergify[bot]
8b8e066bbe Add RPC subscription api for rooted slots (#9118) (#9126)
automerge
2020-03-27 13:09:32 -07:00
mergify[bot]
3473350b62 fix links (#9125) (#9128)
automerge
2020-03-27 10:28:23 -07:00
mergify[bot]
59d7eb5216 Fix links in docs (#9119) (#9127)
automerge
2020-03-27 09:50:28 -07:00
mergify[bot]
55ba934137 Document transaction field in getConfirmedBlock responses (#9121) (#9124)
automerge
2020-03-27 09:45:30 -07:00
mergify[bot]
4c3dcb7f7e Exclude all executable accounts from rent collection (#9116) (#9120)
automerge
2020-03-27 09:27:07 -07:00
mergify[bot]
3a879db8af Fix broken gitbook links (#9107) (#9108)
automerge
2020-03-26 20:33:20 -07:00
mergify[bot]
d2107270ea Consolidate signature-status rpcs (bp #9069) (#9105)
automerge
2020-03-26 20:30:46 -07:00
mergify[bot]
007afe22d0 Add docs for app wallets (#9098) (#9103)
automerge
2020-03-26 18:26:23 -07:00
mergify[bot]
93a1d10e15 Revert setting the default toolchain (#9093) (#9097)
automerge
2020-03-26 15:59:26 -07:00
mergify[bot]
d57a7c8f21 Restructure wallet docs to prep for app wallet content (#9088) (#9095)
automerge
2020-03-26 14:39:32 -07:00
mergify[bot]
6db39829c8 Install xargo using CI dictated cargo version if available (#9068) (#9092)
automerge
2020-03-26 14:02:09 -07:00
mergify[bot]
ec76826493 Unflake rpc subscriptions test by reducing sub count (#9078) (#9082)
automerge
2020-03-25 22:56:52 -07:00
carllin
d4ddb6265b Convert Banks (#9033)
* Store and compute needed state in EpochStakes struct
Co-authored-by: Carl <carl@solana.com>
2020-03-25 20:43:48 -07:00
Michael Vines
7a8233d7ca Bump version to 1.0.11 2020-03-25 19:17:06 -07:00
mergify[bot]
95bc051129 Cargo update bumpalo (#9067) (#9077)
automerge
2020-03-25 18:51:38 -07:00
mergify[bot]
02bcf4f8e2 ledger-tool can now decode stake instructions (bp #9045) (#9076)
automerge
2020-03-25 18:02:31 -07:00
sakridge
4b0d4e9834 Remove accounts unwrap (#9063)
automerge
2020-03-25 10:59:52 -07:00
mergify[bot]
bf4cdc091a Fix xargo to version 0.3.19 to avoid unstable feature (#9065) (#9066)
automerge

(cherry picked from commit c558db2a48)

Co-authored-by: Justin Starry <justin@solana.com>
2020-03-25 08:48:22 -07:00
mergify[bot]
5f2cf2b44d Increase vmap count in sys-tuner (#8940) (#9064)
(cherry picked from commit a70008cc5c)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-03-25 08:06:36 -07:00
mergify[bot]
03aae5eb5f Ignore RUSTSEC-2020-0006 for the moment (#9057) (#9059)
automerge
2020-03-24 21:06:15 -07:00
mergify[bot]
5f1ce81fbc ledger tool now outputs transaction status information if available (#9024) (#9026)
automerge
2020-03-24 12:23:25 -07:00
mergify[bot]
fc582aa57c Refactor how pubsub subscriptions are added (#9042) (#9049)
automerge
2020-03-24 11:05:33 -07:00
mergify[bot]
13676e9614 Fix timeout for subscriptions test (#9043) (#9044)
automerge
2020-03-24 02:57:25 -07:00
Michael Vines
7636a0521f solana-install-init: --pubkey is no longer required on platforms without a default update manifest 2020-03-23 22:40:53 -07:00
Michael Vines
ad06354a18 Remove , 2020-03-23 22:11:56 -07:00
Michael Vines
953cb93e44 Bump version to 1.0.10 2020-03-23 21:58:45 -07:00
Dan Albert
38b2957a8e Remove unused default update manifest pubkeys (#9041)
automerge
2020-03-23 20:57:43 -07:00
Tyera Eulberg
9eb39df93f Backport: add slot to signature notification & respect confirmations param (#9036)
automerge
2020-03-23 18:32:05 -07:00
mergify[bot]
f34ce94347 Remove Ledger-specific analysis of hardware wallets (#9028) (#9030)
automerge
2020-03-23 14:48:58 -07:00
mergify[bot]
5c6411fc06 Fix link in gitbook (#9027) (#9029)
automerge
2020-03-23 14:48:41 -07:00
Michael Vines
3ab428693a Manual v1.0 backports (#9025)
automerge
2020-03-23 13:55:03 -07:00
Michael Vines
7ffaf2ad29 Manual v1.0 backports (#9015)
automerge
2020-03-22 22:44:55 -07:00
sakridge
a5e4a1f2d8 Drop storage lock (#8997) 2020-03-21 18:55:59 -07:00
mergify[bot]
6ae99848f4 Revert "Move Install Solana doc into the Command-line Guide (#8982)" (#8992) (#8993)
This reverts commit 5fa36bbab3.

(cherry picked from commit 1aab959d4e)

Co-authored-by: Greg Fitzgerald <greg@solana.com>
2020-03-21 14:52:53 -06:00
mergify[bot]
5342b3a53f Shred fetch comment and debug message tweak (#8980) (#8990)
automerge

(cherry picked from commit 909321928c)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-03-21 09:25:08 -07:00
sakridge
50a3c98d21 Shred filter (#8975) (#8987)
Thread bank_forks into shred fetch and filter by slot.
2020-03-20 11:33:19 -07:00
Michael Vines
ca2b7a2c5b Cargo.lock 2020-03-20 11:05:45 -07:00
mergify[bot]
e900623817 Update value names in docs (#8983) (#8985)
automerge
2020-03-20 09:36:55 -07:00
mergify[bot]
a46e953ebb Move Install Solana doc into the Command-line Guide (#8982) (#8984)
automerge
2020-03-20 09:24:56 -07:00
mergify[bot]
3bbc51a6ae Improve CLI usage messages (#8972) (#8977)
automerge
2020-03-19 21:52:37 -07:00
Michael Vines
7ae2464cf3 Fix windows build by removing sys-info (#8860) (#8973)
automerge
2020-03-19 18:14:01 -07:00
Michael Vines
58a36ce484 Bump version to 1.0.9 2020-03-19 17:10:35 -07:00
Dan Albert
61bd9e6a28 Fix windows binary build on v1.0 (#8968)
Co-authored-by: Michael Vines <mvines@gmail.com>
2020-03-19 15:58:34 -07:00
Tyera Eulberg
495ab631d6 Some Cli polish (#8966) (#8970)
automerge
2020-03-19 14:11:52 -07:00
Tyera Eulberg
93906847f9 Cli: polish transaction progress bar (#8963) (#8967)
automerge
2020-03-19 12:54:50 -07:00
Dan Albert
70f4c4a974 Build windows binaries on v1.0 (#8965) 2020-03-19 11:06:17 -07:00
mergify[bot]
ad6078da2d CLI: Fix create-nonce-account with seed (#8929) (#8961)
automerge
2020-03-19 10:32:35 -07:00
Michael Vines
2a617f2d07 v1.0: Backport of #8939 (#8957)
automerge
2020-03-19 02:08:36 -07:00
mergify[bot]
6af3c6ecbc CLI: Add multi-session signing support (#8927) (#8953)
* SDK: Add `NullSigner` implementation

* SDK: Split `Transaction::verify()` to gain access to results

* CLI: Minor refactor of --sign_only result parsing

* CLI: Enable paritial signing

Signers specified by pubkey, but without a matching --signer arg
supplied fall back to a `NullSigner` when --sign-only is in effect.
This allows their pubkey to be used for TX construction as usual,
but leaves their `sign_message()` a NOP. As such, with --sign-only
in effect, signing and verification must be done separately, with
the latter's per-signature results considered

* CLI: Surface/report missing/bad signers to user

* CLI: Suppress --sign-only JSON output

* nits

* Docs for multi-session offline signing

(cherry picked from commit 98228c392e)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-03-18 23:39:44 -07:00
mergify[bot]
a5938e5a21 Bump bv from 0.11.0 to 0.11.1 (bp #8952) (#8955)
automerge
2020-03-18 22:33:05 -07:00
mergify[bot]
8eaa8d8788 Add docs on wallets and generating keys (bp #8905) (#8946)
automerge
2020-03-18 19:15:34 -07:00
mergify[bot]
51940eec9b Delete broken link (#8950) (#8951)
automerge
2020-03-18 18:20:36 -07:00
mergify[bot]
d1e16b2bc4 Remove product string from device keypair URL (#8942) (#8945)
automerge
2020-03-18 14:11:41 -07:00
mergify[bot]
97051c87b4 Document account/signer requirements for vote instructions (#8941)
automerge
2020-03-18 12:20:02 -07:00
mergify[bot]
2521913654 Add counter for accounts hash verification. (#8928) (#8934)
automerge
2020-03-18 09:57:38 -07:00
mergify[bot]
803c87b4bd Add docs for --trusted-validator options (#8911) (#8933)
automerge
2020-03-18 08:37:15 -07:00
mergify[bot]
8ff9ee3a06 Cli: add spinner progress bar when waiting for transaction confirmation (#8916) (#8920)
automerge
2020-03-17 20:06:01 -07:00
mergify[bot]
25bf2c6062 Extend local-cluster CI timeout (#8921) (#8923)
automerge
2020-03-17 19:48:57 -07:00
mergify[bot]
fc80b77fc4 Increase buffer on low SOL fault to over a week (#8903) (#8904)
automerge
2020-03-17 10:32:28 -07:00
mergify[bot]
5a7707362c Sort device paths for select (#8896) (#8897)
automerge
2020-03-16 19:43:11 -07:00
mergify[bot]
0102ee3fa9 Hoist USB URL docs (#8894) (#8895)
automerge
2020-03-16 16:31:35 -07:00
mergify[bot]
d29cb19a73 Cli: enable flexible flexible signer paths for pubkey args (#8892) (#8893)
automerge
2020-03-16 16:16:58 -07:00
sakridge
1cc66f0cd7 Add Accounts hash consistency halting (#8772) (#8889)
* Accounts hash consistency halting

* Add option to inject account hash faults for testing.

Enable option in local cluster test to see that node halts.
2020-03-16 14:29:44 -07:00
mergify[bot]
e2cfc513eb Add genesis token counter test to system test (#8824) (#8891)
automerge
2020-03-16 13:32:58 -07:00
mergify[bot]
8115a962e9 Cleanup CLI types (#8888) (#8890)
automerge
2020-03-16 12:28:22 -07:00
mergify[bot]
0c804e2ef2 Use types for CLI value names (#8878) (#8886)
automerge
2020-03-16 09:23:15 -07:00
mergify[bot]
cbe36a7a63 Lower error level of InvalidTickCount (bp #8880) (#8885)
automerge
2020-03-16 08:53:33 -07:00
mergify[bot]
174825fecf Fix faucet command in run.sh (#8883) (#8884)
automerge
2020-03-16 05:38:31 -07:00
Michael Vines
c7e80bebdc Bump version to 1.0.8 2020-03-15 21:50:06 -07:00
Michael Vines
57abc370fa Quietly re-introduce legacy --voting-keypair/--identity-keypair args for v1.0.6 compatibility
(cherry picked from commit 49706172f3)
2020-03-15 20:21:23 -07:00
mergify[bot]
42ab421a87 Rename leader to validator, drop _keypair/-keypair suffix (#8876) (#8877)
automerge
2020-03-15 14:14:32 -07:00
mergify[bot]
9ab27291f5 Validators now run a full gossip node while looking for a snapshot (#8872)
automerge
2020-03-15 10:29:13 -07:00
mergify[bot]
27e4e9cb8d Cli: Add resolve-signer subcommand (#8859) (#8870)
automerge
2020-03-14 22:14:24 -07:00
mergify[bot]
b0cf65dfc8 Refactor system tests dir structure (#8865) (#8869)
automerge
2020-03-14 19:38:19 -07:00
Dan Albert
bfc97c682c Apply s/faucet-keypair/faucet renaming to net scripts (#8868)
automerge
2020-03-14 18:14:49 -07:00
mergify[bot]
231c9b25d1 Rework validator vote account defaults to half voting fees (#8858)
automerge
2020-03-13 21:47:32 -07:00
Michael Vines
f536d805ed Rework cluster metrics dashboard to support the modern clusters
(cherry picked from commit 5f5824d78d)

# Conflicts:
#	system-test/automation_utils.sh
2020-03-13 20:18:06 -07:00
Michael Vines
cb6e7426a4 Drop :8899 port from http://devnet.solana.com references
(cherry picked from commit 9e0a26628b)
2020-03-13 20:14:02 -07:00
mergify[bot]
71f391ae04 Enable any signer in various cli subcommands (#8844) (#8856)
automerge
2020-03-13 18:11:05 -07:00
mergify[bot]
12bf34f059 Remove holding Poh lock (#8838) (#8850)
automerge
2020-03-13 16:56:22 -07:00
Grimes
a3a14d6b5b Docs: Use correct flag in keypair verification instructions (#8677)
automerge

(cherry picked from commit 542691c4e4)
2020-03-13 16:00:33 -07:00
Michael Vines
5e22db017a Surface the missing pubkey
(cherry picked from commit ce88602ced)
2020-03-13 16:00:01 -07:00
mergify[bot]
2cd09957b4 Cli: add subcommand to withdraw from vote account (#8550) (#8840)
automerge
2020-03-13 15:31:47 -07:00
mergify[bot]
d1ec6c0b8b Upgrade to Rust 1.42 (#8836) (#8839)
automerge
2020-03-13 14:29:18 -07:00
Michael Vines
7722723400 Bump version to 1.0.7 2020-03-13 08:10:09 -07:00
Trent Nelson
4a42cfc42a Cli error cleanup 1.0 (#8834)
* Don't use move semantics if not needed (#8793)

* SDK: Deboilerplate `TransportError` with thiserror

* Enable interchange between `TransportError` and `ClientError`

* SDK: Retval consistency between `Client` and `AsyncClient` traits

* Client: Introduce/use `Result` type

* Client: Remove unused `RpcResponseIn` type

* Client: Rename `RpcResponse` to more appropriate `RpcResult`

* Client: Death to `io::Result` return types

* Client: Struct-ify `ClientError`

* Client: Add optional `command` parameter to `ClientError`

* RpcClient: Stop abusing `io::Error` (low-fruit)

* ClientError: Use `thiserror`'s `Display` impl

* Extend `RpcError`'s utility

* RpcClient: Stop abusing `io::Error` (the rest)

* CLI: Shim `main()` so we can `Display` format errors

* claputils: format input validator errors with `Display`

They are intended to be displayed to users

* SDK: `thiserror` for hash and sig parse erros

* Keygen: Shim main to format errors with `Display`

* SDK: `thiserror` for `InstructionError`

* CLI: `thiserror` for `CliError`

* CLI: Format user messages with `Display`

* Client: Tweak `Display` for `ClientError`

* RpcClient: Improve messaging when TX cannot be confirmed

* fu death io res retval

* CLI/Keygen - fix shell return value on error

* Tweak `InstructionError` `Display` messages as per review

* Cleanup hackjob return code fix

* Embrace that which you hate most

* Too much...

Co-authored-by: Jack May <jack@solana.com>
2020-03-13 07:42:25 -06:00
mergify[bot]
976d744b0d Enable conservative out-of-bound snapshot cleaning (#8811) (#8832)
automerge
2020-03-12 23:40:17 -07:00
mergify[bot]
62de02c8d4 Avoid early clean and bad snapshot by ref-counting (#8724) (#8831)
automerge
2020-03-12 23:09:45 -07:00
mergify[bot]
2741a5ce3b Move history out of intro (bp #8825) (#8826)
automerge
2020-03-12 18:21:56 -07:00
mergify[bot]
0c818acd90 Move intro out of README (#8735) (#8827)
automerge

(cherry picked from commit c0fd017906)

Co-authored-by: Greg Fitzgerald <greg@solana.com>
2020-03-12 18:49:51 -06:00
Michael Vines
4d04915302 Update source markdown in CI 2020-03-12 14:56:00 -07:00
Michael Vines
7576411c59 Don't tell users to install unreleased software versions 2020-03-12 14:55:46 -07:00
Michael Vines
ab2bed6e8f Add all of docs/src 2020-03-12 14:45:48 -07:00
mergify[bot]
f66a5ba579 Update keys (#8821) (#8822)
automerge
2020-03-12 14:15:32 -07:00
Dan Albert
9253d786f8 Revert over-zealous versioning text (#8819)
automerge
2020-03-12 09:49:26 -07:00
mergify[bot]
d1be1ee49e Fix malformed doc link (#8817) (#8818)
automerge
2020-03-12 09:46:26 -07:00
mergify[bot]
65833eacd8 Update keys (#8814) (#8816)
automerge
2020-03-12 09:39:31 -07:00
Dan Albert
921994e588 Restrict which nodes can run stable and coverage (#8807)
automerge
2020-03-11 19:18:10 -07:00
mergify[bot]
91dfd962e5 Update keys (#8800) (#8806)
automerge
2020-03-11 19:04:46 -07:00
Trent Nelson
18067dcb55 CLI: Plumb nonce-stored fees (#8750)
automerge

(cherry picked from commit 0422af2aae)
2020-03-11 13:09:40 -07:00
Greg Fitzgerald
7501e1b0f0 Update keys (#8791)
(cherry picked from commit a0d0d4c0e9)
2020-03-11 13:08:48 -07:00
mergify[bot]
1bd2c1de20 Notify when validator balance goes below 1 SOL (#8792)
automerge
2020-03-11 11:47:08 -07:00
mergify[bot]
a9ef20d43f Update keys (#8783) (#8784)
automerge
2020-03-10 21:39:34 -07:00
mergify[bot]
00a9d66fed Improve install messaging (#8477) (#8785)
automerge
2020-03-10 20:30:39 -07:00
mergify[bot]
7508ffe703 Add checkmark (#8781) (#8782)
automerge
2020-03-10 18:32:53 -07:00
Michael Vines
6c2368bfc9 Permit fee-payer/split-stake accounts to be the same when using --seed
(cherry picked from commit 775ce3a03f)
2020-03-10 16:16:26 -07:00
Michael Vines
6bcb797260 Revert to a computed websocket_url value when json_rpc_url is changed
(cherry picked from commit f655372b08)
2020-03-10 16:12:23 -07:00
mergify[bot]
329945e56f Print approved msg after Ledger interaction (#8771) (#8775)
automerge
2020-03-10 15:06:43 -07:00
mergify[bot]
1f80346d97 watchtower: Add --monitor-active-stake flag (bp #8765) (#8769)
automerge
2020-03-10 13:27:19 -07:00
mergify[bot]
38e661d7ba CLI Nonce account access dereplicode (#8743) (#8767)
automerge
2020-03-10 13:15:18 -07:00
mergify[bot]
592c4efd17 Configure the cluster right after installing it (#8761) (#8762)
automerge
2020-03-10 10:14:55 -07:00
mergify[bot]
1629745bd3 Move docs to imperative mood (bp #8643) (#8763)
automerge
2020-03-10 09:50:59 -07:00
mergify[bot]
02e078c22e Fix Gitbook's markdown rendering (#8759) (#8760)
automerge
2020-03-10 08:09:51 -07:00
Michael Vines
ea9e7c710a Bump version to 1.0.6 2020-03-09 22:38:57 -07:00
Michael Vines
ee5aa0c1a2 Support monitoring multiple validators 2020-03-09 21:07:32 -07:00
HM
55dee2901e watchtower: flag to suppress duplicate notifications (#8549)
* watchtower: send error message as notification

* watchtower: send all clear notification when ok again

* watchtower: add twilio sms notifications

* watchtower: flag to suppress duplicate notifications

* remove trailing space character

* changes as per suggestion on PR

* all changes together

* cargo fmt
2020-03-09 21:07:32 -07:00
Michael Vines
2b0824d18b watchtower now uses cli-config/
(cherry picked from commit 74e7da214a)
2020-03-09 20:40:18 -07:00
Michael Vines
b0709ea0ac Move cli-config default out of cli/ into cli-config/
(cherry picked from commit 756ba07b16)
2020-03-09 20:40:18 -07:00
Michael Vines
81b5499f7a Rename 'url' to 'json_rpc_url'
(cherry picked from commit 5c236fd06c)
2020-03-09 20:40:18 -07:00
Michael Vines
c96ce99705 Wait for 80% of the active stake instead of 75% 2020-03-09 20:31:44 -07:00
mergify[bot]
8dcd2d11e1 Docs: Fix missing CLI usage.md (#8745) (#8749)
automerge
2020-03-09 20:05:24 -07:00
mergify[bot]
777aae9059 Remove --derivation-path option (#8741) (#8747)
automerge
2020-03-09 19:17:14 -07:00
mergify[bot]
4dd1340236 Limit waiting-message to single- or last-chunk apdus (#8730) (#8733)
automerge
2020-03-09 15:44:17 -07:00
mergify[bot]
889b06e1d4 Allow passing of program_id to programs (bp #8639) (#8670)
automerge
2020-03-09 12:36:34 -07:00
mergify[bot]
f511296ee8 Fix account tests (#8615) (#8729)
automerge
2020-03-09 11:19:14 -07:00
mergify[bot]
c19eb717b4 Update rust-bpf to include matching cargo (#8598) (#8727)
automerge
2020-03-09 10:42:19 -07:00
mergify[bot]
dd54369e1b Cli: Fix create-with-seed (#8706) (#8723)
automerge
2020-03-09 00:16:29 -07:00
Michael Vines
bb563b4835 Permit --no-untrusted-rpc without any --trusted-validators 2020-03-08 22:34:53 -07:00
mergify[bot]
d061fadede Add purge function to ledger-tool (#8719) (#8720)
(cherry picked from commit de34187db0)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-03-08 21:38:38 -07:00
mergify[bot]
577cd2bd3a Remove unnecessary snapshot hash verification (#8711) (#8714)
(cherry picked from commit f992ee3140)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-03-07 14:41:45 -07:00
mergify[bot]
9d1c8657e2 Groom ledger-tool bounds output (#8710) (#8715)
(cherry picked from commit acb23e8ef0)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-03-07 14:15:50 -07:00
mergify[bot]
67216ac7f5 Split staker infos (#8683)
automerge
2020-03-06 23:44:06 -08:00
mergify[bot]
e63b24c39f Remove ask-seed-phrase arg from validator, archiver (#8697) (#8713)
automerge
2020-03-06 23:34:20 -08:00
Greg Fitzgerald
d963f7afb4 Set withdrawer keys (#8707)
automerge
2020-03-06 21:21:00 -08:00
mergify[bot]
a07bf4870a Fix Ledger docs (#8705) (#8708)
automerge
2020-03-06 21:03:15 -08:00
mergify[bot]
8422d4b3fb Disable setLogFilter RPC API by default (#8693) (#8699)
automerge
2020-03-06 18:16:04 -08:00
mergify[bot]
ff4731cce2 RPC: Add getFeeCalculatorForBlockhash method call (#8687) (#8698)
automerge
2020-03-06 17:25:33 -08:00
mergify[bot]
659aaafff6 Ledger: return specific error if ledger-app-solana is not running (#8684) (#8695)
automerge
2020-03-06 16:28:53 -08:00
mergify[bot]
175651c497 Add shred version support to net/ (#8689) (#8694)
automerge
2020-03-06 15:41:16 -08:00
mergify[bot]
085e773f27 Properly escape current version (#8686) (#8688)
(cherry picked from commit a78a339407)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-03-06 14:37:41 -07:00
Michael Vines
5d3140c040 Cargo.lock 2020-03-06 14:25:54 -07:00
Michael Vines
d8d7238920 Bump version to 1.0.5 2020-03-06 14:14:53 -07:00
mergify[bot]
418c3cd4cf Delete Archiver installation docs (#8665) (#8669)
automerge
2020-03-06 12:49:52 -08:00
322 changed files with 15370 additions and 6128 deletions

View File

@@ -7,9 +7,6 @@
"GITHUB_TOKEN": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:Vq2dkGTOzfEpRht0BAGHFp/hDogMvXJe:tFXHg1epVt2mq9hkuc5sRHe+KAnVREi/p8S+IZu67XRyzdiA/nGak1k860FXYuuzuaE0QWekaEc=]",
"INFLUX_DATABASE": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:5KI9WBkXx3R/W4m256mU5MJOE7N8aAT9:Cb8QFELZ9I60t5zhJ9h55Kcs]",
"INFLUX_PASSWORD": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:hQRMpLCrav+OYkNphkeM4hagdVoZv5Iw:AUO76rr6+gF1OLJA8ZLSG8wHKXgYCPNk6gRCV8rBhZBJ4KwDaxpvOhMl7bxxXG6jol7v4aRa/Lk=]",
"INFLUX_USERNAME": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:R7BNmQjfeqoGDAFTJu9bYTGHol2NgnYN:Q2tOT/EBcFvhFk+DKLKmVU7tLCpVC3Ui]",
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_unknown_linux_gnu": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:Egc2dMrHDU0NcZ71LwGv/V66shUhwYUE:04VoIb8CKy7KYhQ5W4cEW9SDKZltxWBL5Hob106lMBbUOD/yUvKYcG3Ep8JfTMwO3K8zowW5HpU/IdGoilX0XWLiJJ6t+p05WWK0TA16nOEtwrEG+UK8wm3sN+xCO20i4jDhpNpgg3FYFHT5rKTHW8+zaBTNUX/SFxkN67Lm+92IM28CXYE43SU1WV6H99hGFFVpTK5JVM3JuYU1ex/dHRE+xCzTr4MYUB/F+nGoNFW8HUDV/y0e1jxT9to3x0SmnytEEuk+5RUzFuEt9cKNFeNml3fOCi4qL+sfj/Y5pjH9xDiUxsvH/8NL35jbLP244aFHgWcp]",
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_apple_darwin": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:NeOxSoWCvXB9AL4H6OK26l/7bmsKd/oz:Ijfoxtvk2CHlN1ZXHup3Gg/914kbbAkEGWJfvozA8UIe+aUzUObMyTrKkVOeNAH8Q8YH9tNzk7RRnrTcpnzeCCBLlWcVEeruMxHox3mPRzmSeDLxtbzCl9VePlRO3T7jg90K5hW+ZAkd5J/WJNzpAcmr93ts/of3MbvGHSujId/efCTzJEcP6JInnBb8Vrj7TlgKbzUlnqpq1+NjYPSXN3maKa9pKeo2JWxZlGBMoy6QWUUY5GbYEylw9smwh1LJcHZjlaZNMuOl4gNKtaSr38IXQkAXaRUJDPAmPras00YObKzXU8RkTrP4EoP/jx5LPR7f]",
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_pc_windows_msvc": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:7t+56twjW+jR7fpFNNeRFLPd7E4lbmyN:JuviDpkQrfVcNUGRGsa2e/UhvH6tTYyk1s4cHHE5xZH1NByL7Kpqx36VG/+o1AUGEeSQdsBnKgzYdMoFYbO8o50DoRPc86QIEVXCupD6J9avxLFtQgOWgJp+/mCdUVXlqXiFs/vQgS/L4psrcKdF6WHd77BeUr6ll8DjH+9m5FC9Rcai2pXno6VbPpunHQ0oUdYzhFR64+LiRacBaefQ9igZ+nSEWDLqbaZSyfm9viWkijoVFTq8gAgdXXEh7g0QdxVE5T6bPristJhT6jWBhWunPUCDNFFErWIsbRGctepl4pbCWqh2hNTw9btSgVfeY6uGCOsdy9E=]"
"INFLUX_USERNAME": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:R7BNmQjfeqoGDAFTJu9bYTGHol2NgnYN:Q2tOT/EBcFvhFk+DKLKmVU7tLCpVC3Ui]"
}
}

View File

@@ -1,5 +1,6 @@
os:
- osx
- windows
language: rust
rust:

View File

@@ -45,7 +45,7 @@ $ git pull --rebase upstream master
If there are no functional changes, PRs can be very large and that's no
problem. If, however, your changes are making meaningful changes or additions,
then about 1.0.4 lines of changes is about the most you should ask a Solana
then about 1000 lines of changes is about the most you should ask a Solana
maintainer to review.
### Should I send small PRs as I develop large, new components?

955
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -48,7 +48,9 @@ members = [
"sdk",
"sdk-c",
"scripts",
"stake-monitor",
"sys-tuner",
"transaction-status",
"upload-perf",
"net-utils",
"vote-signer",

View File

@@ -9,20 +9,6 @@ Blockchain Rebuilt for Scale
Solana&trade; is a new blockchain architecture built from the ground up for scale. The architecture supports
up to 710 thousand transactions per second on a gigabit network.
Disclaimer
===
All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with the author's best effort. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore nothing in this project constitutes a solicitation for investment.
Introduction
===
It's possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 176 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [\[H.T.Kung, J.T.Robinson (1981)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.65.4735). At Solana, we're demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes can't trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain!
> Perhaps the most striking difference between algorithms obtained by our method and ones based upon timeout is that using timeout produces a traditional distributed algorithm in which the processes operate asynchronously, while our method produces a globally synchronous one in which every process does the same thing at (approximately) the same time. Our method seems to contradict the whole purpose of distributed processing, which is to permit different processes to operate independently and perform different functions. However, if a distributed system is really a single system, then the processes must be synchronized in some way. Conceptually, the easiest way to synchronize processes is to get them all to do the same thing at the same time. Therefore, our method is used to implement a kernel that performs the necessary synchronization--for example, making sure that two different processes do not try to modify a file at the same time. Processes might spend only a small fraction of their time executing the synchronizing kernel; the rest of the time, they can operate independently--e.g., accessing different files. This is an approach we have advocated even when fault-tolerance is not required. The method's basic simplicity makes it easier to understand the precise properties of a system, which is crucial if one is to know just how fault-tolerant the system is. [\[L.Lamport (1984)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.71.1078)
Furthermore, and much to our surprise, it can be implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you'd use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well en route towards that theoretical limit of 710,000 transactions per second.
Documentation
===
@@ -238,3 +224,8 @@ problem is solved by this code?" On the other hand, if a test does fail and you
better way to solve the same problem, a Pull Request with your solution would most certainly be
welcome! Likewise, if rewriting a test can better communicate what code it's protecting, please
send us that patch!
Disclaimer
===
All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with the author's best effort. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore nothing in this project constitutes a solicitation for investment.

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-archiver-lib"
version = "1.0.4"
version = "1.0.19"
description = "Solana Archiver Library"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -15,25 +15,28 @@ ed25519-dalek = "=1.0.0-pre.1"
log = "0.4.8"
rand = "0.6.5"
rand_chacha = "0.1.1"
solana-client = { path = "../client", version = "1.0.4" }
solana-storage-program = { path = "../programs/storage", version = "1.0.4" }
solana-client = { path = "../client", version = "1.0.19" }
solana-storage-program = { path = "../programs/storage", version = "1.0.19" }
thiserror = "1.0"
serde = "1.0.104"
serde_json = "1.0.46"
serde_derive = "1.0.103"
solana-net-utils = { path = "../net-utils", version = "1.0.4" }
solana-chacha = { path = "../chacha", version = "1.0.4" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.4" }
solana-ledger = { path = "../ledger", version = "1.0.4" }
solana-logger = { path = "../logger", version = "1.0.4" }
solana-perf = { path = "../perf", version = "1.0.4" }
solana-sdk = { path = "../sdk", version = "1.0.4" }
solana-core = { path = "../core", version = "1.0.4" }
solana-archiver-utils = { path = "../archiver-utils", version = "1.0.4" }
solana-metrics = { path = "../metrics", version = "1.0.4" }
solana-net-utils = { path = "../net-utils", version = "1.0.19" }
solana-chacha = { path = "../chacha", version = "1.0.19" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.19" }
solana-ledger = { path = "../ledger", version = "1.0.19" }
solana-logger = { path = "../logger", version = "1.0.19" }
solana-perf = { path = "../perf", version = "1.0.19" }
solana-sdk = { path = "../sdk", version = "1.0.19" }
solana-core = { path = "../core", version = "1.0.19" }
solana-archiver-utils = { path = "../archiver-utils", version = "1.0.19" }
solana-metrics = { path = "../metrics", version = "1.0.19" }
[dev-dependencies]
hex = "0.4.0"
[lib]
name = "solana_archiver_lib"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -199,7 +199,7 @@ impl Archiver {
info!("Connecting to the cluster via {:?}", cluster_entrypoint);
let (nodes, _) =
match solana_core::gossip_service::discover_cluster(&cluster_entrypoint.gossip, 1) {
match solana_core::gossip_service::discover_cluster(&cluster_entrypoint.gossip, 2) {
Ok(nodes_and_archivers) => nodes_and_archivers,
Err(e) => {
//shutdown services before exiting
@@ -235,6 +235,7 @@ impl Archiver {
shred_forward_sockets,
repair_socket.clone(),
&shred_fetch_sender,
None,
&exit,
);
let (slot_sender, slot_receiver) = channel();
@@ -379,8 +380,7 @@ impl Archiver {
&archiver_keypair.pubkey(),
&storage_keypair.pubkey(),
);
let message =
Message::new_with_payer(vec![ix], Some(&archiver_keypair.pubkey()));
let message = Message::new_with_payer(&[ix], Some(&archiver_keypair.pubkey()));
if let Err(e) = client.send_message(&[archiver_keypair.as_ref()], message) {
error!("unable to redeem reward, tx failed: {:?}", e);
} else {
@@ -613,6 +613,7 @@ impl Archiver {
ErrorKind::Other,
"setup_mining_account: signature not found",
),
TransportError::Custom(e) => io::Error::new(ErrorKind::Other, e),
})?;
}
Ok(())
@@ -655,7 +656,7 @@ impl Archiver {
Signature::new(&meta.signature.as_ref()),
meta.blockhash,
);
let message = Message::new_with_payer(vec![instruction], Some(&archiver_keypair.pubkey()));
let message = Message::new_with_payer(&[instruction], Some(&archiver_keypair.pubkey()));
let mut transaction = Transaction::new(
&[archiver_keypair.as_ref(), storage_keypair.as_ref()],
message,

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-archiver-utils"
version = "1.0.4"
version = "1.0.19"
description = "Solana Archiver Utils"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,15 +11,18 @@ edition = "2018"
[dependencies]
log = "0.4.8"
rand = "0.6.5"
solana-chacha = { path = "../chacha", version = "1.0.4" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.4" }
solana-ledger = { path = "../ledger", version = "1.0.4" }
solana-logger = { path = "../logger", version = "1.0.4" }
solana-perf = { path = "../perf", version = "1.0.4" }
solana-sdk = { path = "../sdk", version = "1.0.4" }
solana-chacha = { path = "../chacha", version = "1.0.19" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.19" }
solana-ledger = { path = "../ledger", version = "1.0.19" }
solana-logger = { path = "../logger", version = "1.0.19" }
solana-perf = { path = "../perf", version = "1.0.19" }
solana-sdk = { path = "../sdk", version = "1.0.19" }
[dev-dependencies]
hex = "0.4.0"
[lib]
name = "solana_archiver_utils"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-archiver"
version = "1.0.4"
version = "1.0.19"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -10,11 +10,14 @@ homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
console = "0.9.2"
solana-clap-utils = { path = "../clap-utils", version = "1.0.4" }
solana-core = { path = "../core", version = "1.0.4" }
solana-logger = { path = "../logger", version = "1.0.4" }
solana-metrics = { path = "../metrics", version = "1.0.4" }
solana-archiver-lib = { path = "../archiver-lib", version = "1.0.4" }
solana-net-utils = { path = "../net-utils", version = "1.0.4" }
solana-sdk = { path = "../sdk", version = "1.0.4" }
solana-clap-utils = { path = "../clap-utils", version = "1.0.19" }
solana-core = { path = "../core", version = "1.0.19" }
solana-logger = { path = "../logger", version = "1.0.19" }
solana-metrics = { path = "../metrics", version = "1.0.19" }
solana-archiver-lib = { path = "../archiver-lib", version = "1.0.19" }
solana-net-utils = { path = "../net-utils", version = "1.0.19" }
solana-sdk = { path = "../sdk", version = "1.0.19" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,21 +2,20 @@ use clap::{crate_description, crate_name, App, Arg};
use console::style;
use solana_archiver_lib::archiver::Archiver;
use solana_clap_utils::{
input_validators::is_keypair,
keypair::{
self, keypair_input, KeypairWithSource, ASK_SEED_PHRASE_ARG,
SKIP_SEED_PHRASE_VALIDATION_ARG,
},
input_parsers::keypair_of, input_validators::is_keypair_or_ask_keyword,
keypair::SKIP_SEED_PHRASE_VALIDATION_ARG,
};
use solana_core::{
cluster_info::{Node, VALIDATOR_PORT_RANGE},
contact_info::ContactInfo,
};
use solana_sdk::{commitment_config::CommitmentConfig, signature::Signer};
use solana_sdk::{
commitment_config::CommitmentConfig,
signature::{Keypair, Signer},
};
use std::{
net::{IpAddr, Ipv4Addr, SocketAddr},
path::PathBuf,
process::exit,
sync::Arc,
};
@@ -29,10 +28,10 @@ fn main() {
.arg(
Arg::with_name("identity_keypair")
.short("i")
.long("identity-keypair")
.long("identity")
.value_name("PATH")
.takes_value(true)
.validator(is_keypair)
.validator(is_keypair_or_ask_keyword)
.help("File containing an identity (keypair)"),
)
.arg(
@@ -60,48 +59,27 @@ fn main() {
.long("storage-keypair")
.value_name("PATH")
.takes_value(true)
.validator(is_keypair)
.validator(is_keypair_or_ask_keyword)
.help("File containing the storage account keypair"),
)
.arg(
Arg::with_name(ASK_SEED_PHRASE_ARG.name)
.long(ASK_SEED_PHRASE_ARG.long)
.value_name("KEYPAIR NAME")
.multiple(true)
.takes_value(true)
.possible_values(&["identity-keypair", "storage-keypair"])
.help(ASK_SEED_PHRASE_ARG.help),
)
.arg(
Arg::with_name(SKIP_SEED_PHRASE_VALIDATION_ARG.name)
.long(SKIP_SEED_PHRASE_VALIDATION_ARG.long)
.requires(ASK_SEED_PHRASE_ARG.name)
.help(SKIP_SEED_PHRASE_VALIDATION_ARG.help),
)
.get_matches();
let ledger_path = PathBuf::from(matches.value_of("ledger").unwrap());
let identity_keypair = keypair_input(&matches, "identity_keypair")
.unwrap_or_else(|err| {
eprintln!("Identity keypair input failed: {}", err);
exit(1);
})
.keypair;
let KeypairWithSource {
keypair: storage_keypair,
source: storage_keypair_source,
} = keypair_input(&matches, "storage_keypair").unwrap_or_else(|err| {
eprintln!("Storage keypair input failed: {}", err);
exit(1);
});
if storage_keypair_source == keypair::Source::Generated {
let identity_keypair = keypair_of(&matches, "identity_keypair").unwrap_or_else(Keypair::new);
let storage_keypair = keypair_of(&matches, "storage_keypair").unwrap_or_else(|| {
clap::Error::with_description(
"The `storage-keypair` argument was not found",
clap::ErrorKind::ArgumentNotFound,
)
.exit();
}
});
let entrypoint_addr = matches
.value_of("entrypoint")

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-banking-bench"
version = "1.0.4"
version = "1.0.19"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -10,11 +10,14 @@ homepage = "https://solana.com/"
[dependencies]
log = "0.4.6"
rayon = "1.2.0"
solana-core = { path = "../core", version = "1.0.4" }
solana-ledger = { path = "../ledger", version = "1.0.4" }
solana-logger = { path = "../logger", version = "1.0.4" }
solana-runtime = { path = "../runtime", version = "1.0.4" }
solana-measure = { path = "../measure", version = "1.0.4" }
solana-sdk = { path = "../sdk", version = "1.0.4" }
solana-core = { path = "../core", version = "1.0.19" }
solana-ledger = { path = "../ledger", version = "1.0.19" }
solana-logger = { path = "../logger", version = "1.0.19" }
solana-runtime = { path = "../runtime", version = "1.0.19" }
solana-measure = { path = "../measure", version = "1.0.19" }
solana-sdk = { path = "../sdk", version = "1.0.19" }
rand = "0.6.5"
crossbeam-channel = "0.3"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-exchange"
version = "1.0.4"
version = "1.0.19"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -18,17 +18,20 @@ rand = "0.6.5"
rayon = "1.2.0"
serde_json = "1.0.46"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "1.0.4" }
solana-core = { path = "../core", version = "1.0.4" }
solana-genesis = { path = "../genesis", version = "1.0.4" }
solana-client = { path = "../client", version = "1.0.4" }
solana-faucet = { path = "../faucet", version = "1.0.4" }
solana-exchange-program = { path = "../programs/exchange", version = "1.0.4" }
solana-logger = { path = "../logger", version = "1.0.4" }
solana-metrics = { path = "../metrics", version = "1.0.4" }
solana-net-utils = { path = "../net-utils", version = "1.0.4" }
solana-runtime = { path = "../runtime", version = "1.0.4" }
solana-sdk = { path = "../sdk", version = "1.0.4" }
solana-clap-utils = { path = "../clap-utils", version = "1.0.19" }
solana-core = { path = "../core", version = "1.0.19" }
solana-genesis = { path = "../genesis", version = "1.0.19" }
solana-client = { path = "../client", version = "1.0.19" }
solana-faucet = { path = "../faucet", version = "1.0.19" }
solana-exchange-program = { path = "../programs/exchange", version = "1.0.19" }
solana-logger = { path = "../logger", version = "1.0.19" }
solana-metrics = { path = "../metrics", version = "1.0.19" }
solana-net-utils = { path = "../net-utils", version = "1.0.19" }
solana-runtime = { path = "../runtime", version = "1.0.19" }
solana-sdk = { path = "../sdk", version = "1.0.19" }
[dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "1.0.4" }
solana-local-cluster = { path = "../local-cluster", version = "1.0.19" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,14 +2,17 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-streamer"
version = "1.0.4"
version = "1.0.19"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
solana-clap-utils = { path = "../clap-utils", version = "1.0.4" }
solana-core = { path = "../core", version = "1.0.4" }
solana-logger = { path = "../logger", version = "1.0.4" }
solana-net-utils = { path = "../net-utils", version = "1.0.4" }
solana-clap-utils = { path = "../clap-utils", version = "1.0.19" }
solana-core = { path = "../core", version = "1.0.19" }
solana-logger = { path = "../logger", version = "1.0.19" }
solana-net-utils = { path = "../net-utils", version = "1.0.19" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-tps"
version = "1.0.4"
version = "1.0.19"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -14,24 +14,27 @@ log = "0.4.8"
rayon = "1.2.0"
serde_json = "1.0.46"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "1.0.4" }
solana-core = { path = "../core", version = "1.0.4" }
solana-genesis = { path = "../genesis", version = "1.0.4" }
solana-client = { path = "../client", version = "1.0.4" }
solana-faucet = { path = "../faucet", version = "1.0.4" }
solana-librapay = { path = "../programs/librapay", version = "1.0.4", optional = true }
solana-logger = { path = "../logger", version = "1.0.4" }
solana-metrics = { path = "../metrics", version = "1.0.4" }
solana-measure = { path = "../measure", version = "1.0.4" }
solana-net-utils = { path = "../net-utils", version = "1.0.4" }
solana-runtime = { path = "../runtime", version = "1.0.4" }
solana-sdk = { path = "../sdk", version = "1.0.4" }
solana-move-loader-program = { path = "../programs/move_loader", version = "1.0.4", optional = true }
solana-clap-utils = { path = "../clap-utils", version = "1.0.19" }
solana-core = { path = "../core", version = "1.0.19" }
solana-genesis = { path = "../genesis", version = "1.0.19" }
solana-client = { path = "../client", version = "1.0.19" }
solana-faucet = { path = "../faucet", version = "1.0.19" }
solana-librapay = { path = "../programs/librapay", version = "1.0.19", optional = true }
solana-logger = { path = "../logger", version = "1.0.19" }
solana-metrics = { path = "../metrics", version = "1.0.19" }
solana-measure = { path = "../measure", version = "1.0.19" }
solana-net-utils = { path = "../net-utils", version = "1.0.19" }
solana-runtime = { path = "../runtime", version = "1.0.19" }
solana-sdk = { path = "../sdk", version = "1.0.19" }
solana-move-loader-program = { path = "../programs/move_loader", version = "1.0.19", optional = true }
[dev-dependencies]
serial_test = "0.3.2"
serial_test_derive = "0.4.0"
solana-local-cluster = { path = "../local-cluster", version = "1.0.4" }
solana-local-cluster = { path = "../local-cluster", version = "1.0.19" }
[features]
move = ["solana-librapay", "solana-move-loader-program"]
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-chacha-cuda"
version = "1.0.4"
version = "1.0.19"
description = "Solana Chacha Cuda APIs"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -10,15 +10,18 @@ edition = "2018"
[dependencies]
log = "0.4.8"
solana-archiver-utils = { path = "../archiver-utils", version = "1.0.4" }
solana-chacha = { path = "../chacha", version = "1.0.4" }
solana-ledger = { path = "../ledger", version = "1.0.4" }
solana-logger = { path = "../logger", version = "1.0.4" }
solana-perf = { path = "../perf", version = "1.0.4" }
solana-sdk = { path = "../sdk", version = "1.0.4" }
solana-archiver-utils = { path = "../archiver-utils", version = "1.0.19" }
solana-chacha = { path = "../chacha", version = "1.0.19" }
solana-ledger = { path = "../ledger", version = "1.0.19" }
solana-logger = { path = "../logger", version = "1.0.19" }
solana-perf = { path = "../perf", version = "1.0.19" }
solana-sdk = { path = "../sdk", version = "1.0.19" }
[dev-dependencies]
hex-literal = "0.2.1"
[lib]
name = "solana_chacha_cuda"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-chacha-sys"
version = "1.0.4"
version = "1.0.19"
description = "Solana chacha-sys"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -10,3 +10,6 @@ edition = "2018"
[build-dependencies]
cc = "1.0.49"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-chacha"
version = "1.0.4"
version = "1.0.19"
description = "Solana Chacha APIs"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,14 +12,17 @@ edition = "2018"
log = "0.4.8"
rand = "0.6.5"
rand_chacha = "0.1.1"
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.4" }
solana-ledger = { path = "../ledger", version = "1.0.4" }
solana-logger = { path = "../logger", version = "1.0.4" }
solana-perf = { path = "../perf", version = "1.0.4" }
solana-sdk = { path = "../sdk", version = "1.0.4" }
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.19" }
solana-ledger = { path = "../ledger", version = "1.0.19" }
solana-logger = { path = "../logger", version = "1.0.19" }
solana-perf = { path = "../perf", version = "1.0.19" }
solana-sdk = { path = "../sdk", version = "1.0.19" }
[dev-dependencies]
hex-literal = "0.2.1"
[lib]
name = "solana_chacha"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

8
ci/_
View File

@@ -5,7 +5,13 @@
# |source| me
#
base_dir=$(realpath --strip "$(dirname "$0")/..")
_() {
echo "--- $*"
if [[ $(pwd) = $base_dir ]]; then
echo "--- $*"
else
echo "--- $* (wd: $(pwd))"
fi
"$@"
}

View File

@@ -18,3 +18,6 @@ steps:
- command: "ci/publish-docs.sh"
timeout_in_minutes: 15
name: "publish docs"
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-move.sh"
name: "move"
timeout_in_minutes: 20

26
ci/buildkite-tests.yml Normal file
View File

@@ -0,0 +1,26 @@
# These steps are conditionally triggered by ci/buildkite.yml when files
# other than those in docs/ are modified
steps:
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
name: "coverage"
timeout_in_minutes: 30
- wait
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable.sh"
name: "stable"
timeout_in_minutes: 60
artifact_paths: "log-*.txt"
- wait
- command: "ci/test-stable-perf.sh"
name: "stable-perf"
timeout_in_minutes: 40
artifact_paths: "log-*.txt"
agents:
- "queue=cuda"
- command: "ci/test-bench.sh"
name: "bench"
timeout_in_minutes: 30
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-local-cluster.sh"
name: "local-cluster"
timeout_in_minutes: 45
artifact_paths: "log-*.txt"

View File

@@ -1,38 +1,25 @@
# Build steps that run on pushes and pull requests.
# If files other than those in docs/ were modified, this will be followed up by
# ci/buildkite-tests.yml
#
# Release tags use buildkite-release.yml instead
steps:
- command: "ci/shellcheck.sh"
name: "shellcheck"
timeout_in_minutes: 5
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-checks.sh"
name: "checks"
timeout_in_minutes: 20
- command: "ci/shellcheck.sh"
name: "shellcheck"
timeout_in_minutes: 5
- wait
- command: "ci/test-stable-perf.sh"
name: "stable-perf"
timeout_in_minutes: 40
artifact_paths: "log-*.txt"
agents:
- "queue=cuda"
- command: "ci/test-bench.sh"
name: "bench"
timeout_in_minutes: 30
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable.sh"
name: "stable"
timeout_in_minutes: 60
artifact_paths: "log-*.txt"
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-move.sh"
name: "move"
timeout_in_minutes: 20
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-local-cluster.sh"
name: "local-cluster"
timeout_in_minutes: 30
artifact_paths: "log-*.txt"
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
name: "coverage"
timeout_in_minutes: 30
- command: "ci/maybe-trigger-tests.sh"
name: "maybe-trigger-tests"
timeout_in_minutes: 2
- wait
- trigger: "solana-secondary"
branches: "!pull/*"
async: true

View File

@@ -67,6 +67,7 @@ ARGS+=(
--env BUILDKITE_JOB_ID
--env CI
--env CI_BRANCH
--env CI_TAG
--env CI_BUILD_ID
--env CI_COMMIT
--env CI_JOB_ID

View File

@@ -1,4 +1,4 @@
FROM solanalabs/rust:1.41.1
FROM solanalabs/rust:1.42.0
ARG date
RUN set -x \

View File

@@ -15,6 +15,8 @@ To update the pinned version:
1. Run `ci/docker-rust-nightly/build.sh` to rebuild the nightly image locally,
or potentially `ci/docker-rust-nightly/build.sh YYYY-MM-DD` if there's a
specific YYYY-MM-DD that is desired (default is today's build).
Check https://rust-lang.github.io/rustup-components-history/ for build
status
1. Update `ci/rust-version.sh` to reflect the new nightly `YYY-MM-DD`
1. Run `SOLANA_DOCKER_RUN_NOSETUID=1 ci/docker-run.sh --nopull solanalabs/rust-nightly:YYYY-MM-DD ci/test-coverage.sh`
to confirm the new nightly image builds. Fix any issues as needed

View File

@@ -1,6 +1,6 @@
# Note: when the rust version is changed also modify
# ci/rust-version.sh to pick up the new image tag
FROM rust:1.41.1
FROM rust:1.42.0
# Add Google Protocol Buffers for Libra's metrics library.
ENV PROTOC_VERSION 3.8.0

View File

@@ -178,7 +178,7 @@ startNodes() {
(
set -x
$solana_cli --keypair config/bootstrap-validator/identity-keypair.json \
$solana_cli --keypair config/bootstrap-validator/identity.json \
--url http://127.0.0.1:8899 genesis-hash
) | tee genesis-hash.log
maybeExpectedGenesisHash="--expected-genesis-hash $(tail -n1 genesis-hash.log)"

21
ci/maybe-trigger-tests.sh Executable file
View File

@@ -0,0 +1,21 @@
#!/usr/bin/env bash
set -e
cd "$(dirname "$0")/.."
annotate() {
${BUILDKITE:-false} && {
buildkite-agent annotate "$@"
}
}
# Skip if only the docs have been modified
ci/affects-files.sh \
\!^docs/ \
|| {
annotate --style info \
"Skipping all further tests as only docs/ files were modified"
exit 0
}
annotate --style info "Triggering tests"
buildkite-agent pipeline upload ci/buildkite-tests.yml

View File

@@ -13,8 +13,8 @@ if [[ -n $CI_BRANCH ]]; then
. ci/rust-version.sh stable
ci/docker-run.sh "$rust_stable_docker_image" make -C docs
)
# make a local commit for the svgs
git add -A -f docs/src/.gitbook/assets/.
# make a local commit for the svgs and generated/updated markdown
git add -f docs/src
if ! git diff-index --quiet HEAD; then
git config user.email maintainers@solana.com
git config user.name "$me"

View File

@@ -45,7 +45,7 @@ linux)
TARGET=x86_64-unknown-linux-gnu
;;
windows)
TARGET=x86_64-pc-windows-msvc
TARGET=x86_64-pc-windows-gnu
;;
*)
echo CI_OS_NAME unset
@@ -73,15 +73,6 @@ echo --- Creating release tarball
source ci/rust-version.sh stable
scripts/cargo-install-all.sh +"$rust_stable" --use-move solana-release
# Reduce the Windows archive size until
# https://github.com/appveyor/ci/issues/2997 is fixed
if [[ -n $APPVEYOR ]]; then
rm -f \
solana-release/bin/solana-validator.exe \
solana-release/bin/solana-bench-exchange.exe \
fi
tar cvf solana-release-$TARGET.tar solana-release
bzip2 solana-release-$TARGET.tar
cp solana-release/bin/solana-install-init solana-install-init-$TARGET
@@ -104,9 +95,8 @@ fi
source ci/upload-ci-artifact.sh
for file in solana-release-$TARGET.tar.bz2 solana-release-$TARGET.yml solana-install-init-"$TARGET"* $MAYBE_TARBALLS; do
upload-ci-artifact "$file"
if [[ -n $DO_NOT_PUBLISH_TAR ]]; then
upload-ci-artifact "$file"
echo "Skipped $file due to DO_NOT_PUBLISH_TAR"
continue
fi

View File

@@ -16,13 +16,13 @@
if [[ -n $RUST_STABLE_VERSION ]]; then
stable_version="$RUST_STABLE_VERSION"
else
stable_version=1.41.1
stable_version=1.42.0
fi
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
nightly_version="$RUST_NIGHTLY_VERSION"
else
nightly_version=2020-02-27
nightly_version=2020-03-12
fi

View File

@@ -22,7 +22,7 @@ _ cargo +"$rust_stable" clippy --all --exclude solana-sdk-c -- --deny=warnings
_ cargo +"$rust_stable" clippy --manifest-path sdk-c/Cargo.toml -- --deny=warnings
_ cargo +"$rust_stable" audit --version
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2020-0002
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2020-0002 --ignore RUSTSEC-2020-0008
_ ci/nits.sh
_ ci/order-crates-for-publishing.py
_ docs/build.sh

View File

@@ -38,10 +38,15 @@ test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
# Clear the BPF sysroot files, they are not automatically rebuilt
rm -rf target/xargo # Issue #3105
# Limit compiler jobs to reduce memory usage
# on machines with 1gb/thread of memory
NPROC=$(nproc)
NPROC=$((NPROC>16 ? 16 : NPROC))
echo "Executing $testName"
case $testName in
test-stable)
_ cargo +"$rust_stable" test --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
_ cargo +"$rust_stable" test --jobs "$NPROC" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
_ cargo +"$rust_stable" test --manifest-path bench-tps/Cargo.toml --features=move ${V:+--verbose} test_bench_tps_local_cluster_move -- --nocapture
;;
test-stable-perf)

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-clap-utils"
version = "1.0.4"
version = "1.0.19"
description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,11 +11,15 @@ edition = "2018"
[dependencies]
clap = "2.33.0"
rpassword = "4.0"
solana-remote-wallet = { path = "../remote-wallet", version = "1.0.4" }
solana-sdk = { path = "../sdk", version = "1.0.4" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.0.19" }
solana-sdk = { path = "../sdk", version = "1.0.19" }
thiserror = "1.0.11"
tiny-bip39 = "0.7.0"
url = "2.1.0"
chrono = "0.4"
[lib]
name = "solana_clap_utils"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,9 +1,10 @@
use crate::keypair::{
keypair_from_seed_phrase, signer_from_path, ASK_KEYWORD, SKIP_SEED_PHRASE_VALIDATION_ARG,
keypair_from_seed_phrase, pubkey_from_path, resolve_signer_from_path, signer_from_path,
ASK_KEYWORD, SKIP_SEED_PHRASE_VALIDATION_ARG,
};
use chrono::DateTime;
use clap::ArgMatches;
use solana_remote_wallet::remote_wallet::{DerivationPath, RemoteWalletManager};
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_sdk::{
clock::UnixTimestamp,
native_token::sol_to_lamports,
@@ -111,18 +112,54 @@ pub fn signer_of(
}
}
pub fn lamports_of_sol(matches: &ArgMatches<'_>, name: &str) -> Option<u64> {
value_of(matches, name).map(sol_to_lamports)
pub fn pubkey_of_signer(
matches: &ArgMatches<'_>,
name: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<Option<Pubkey>, Box<dyn std::error::Error>> {
if let Some(location) = matches.value_of(name) {
Ok(Some(pubkey_from_path(
matches,
location,
name,
wallet_manager,
)?))
} else {
Ok(None)
}
}
pub fn derivation_of(matches: &ArgMatches<'_>, name: &str) -> Option<DerivationPath> {
matches.value_of(name).map(|derivation_str| {
let derivation_str = derivation_str.replace("'", "");
let mut parts = derivation_str.split('/');
let account = parts.next().map(|account| account.parse::<u32>().unwrap());
let change = parts.next().map(|change| change.parse::<u32>().unwrap());
DerivationPath { account, change }
})
pub fn pubkeys_of_multiple_signers(
matches: &ArgMatches<'_>,
name: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<Option<Vec<Pubkey>>, Box<dyn std::error::Error>> {
if let Some(pubkey_matches) = matches.values_of(name) {
let mut pubkeys: Vec<Pubkey> = vec![];
for signer in pubkey_matches {
pubkeys.push(pubkey_from_path(matches, signer, name, wallet_manager)?);
}
Ok(Some(pubkeys))
} else {
Ok(None)
}
}
pub fn resolve_signer(
matches: &ArgMatches<'_>,
name: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<Option<String>, Box<dyn std::error::Error>> {
Ok(resolve_signer_from_path(
matches,
matches.value_of(name).unwrap(),
name,
wallet_manager,
)?)
}
pub fn lamports_of_sol(matches: &ArgMatches<'_>, name: &str) -> Option<u64> {
value_of(matches, name).map(sol_to_lamports)
}
#[cfg(test)]
@@ -299,40 +336,4 @@ mod tests {
.get_matches_from(vec!["test", "--single", "0.03"]);
assert_eq!(lamports_of_sol(&matches, "single"), Some(30000000));
}
#[test]
fn test_derivation_of() {
let matches = app()
.clone()
.get_matches_from(vec!["test", "--single", "2/3"]);
assert_eq!(
derivation_of(&matches, "single"),
Some(DerivationPath {
account: Some(2),
change: Some(3)
})
);
assert_eq!(derivation_of(&matches, "another"), None);
let matches = app()
.clone()
.get_matches_from(vec!["test", "--single", "2"]);
assert_eq!(
derivation_of(&matches, "single"),
Some(DerivationPath {
account: Some(2),
change: None
})
);
assert_eq!(derivation_of(&matches, "another"), None);
let matches = app()
.clone()
.get_matches_from(vec!["test", "--single", "2'/3'"]);
assert_eq!(
derivation_of(&matches, "single"),
Some(DerivationPath {
account: Some(2),
change: Some(3)
})
);
}
}

View File

@@ -12,7 +12,7 @@ use std::str::FromStr;
pub fn is_pubkey(string: String) -> Result<(), String> {
match string.parse::<Pubkey>() {
Ok(_) => Ok(()),
Err(err) => Err(format!("{:?}", err)),
Err(err) => Err(format!("{}", err)),
}
}
@@ -20,7 +20,7 @@ pub fn is_pubkey(string: String) -> Result<(), String> {
pub fn is_hash(string: String) -> Result<(), String> {
match string.parse::<Hash>() {
Ok(_) => Ok(()),
Err(err) => Err(format!("{:?}", err)),
Err(err) => Err(format!("{}", err)),
}
}
@@ -28,7 +28,7 @@ pub fn is_hash(string: String) -> Result<(), String> {
pub fn is_keypair(string: String) -> Result<(), String> {
read_keypair_file(&string)
.map(|_| ())
.map_err(|err| format!("{:?}", err))
.map_err(|err| format!("{}", err))
}
// Return an error if a keypair file cannot be parsed
@@ -38,7 +38,7 @@ pub fn is_keypair_or_ask_keyword(string: String) -> Result<(), String> {
}
read_keypair_file(&string)
.map(|_| ())
.map_err(|err| format!("{:?}", err))
.map_err(|err| format!("{}", err))
}
// Return an error if string cannot be parsed as pubkey string or keypair file location
@@ -46,18 +46,27 @@ pub fn is_pubkey_or_keypair(string: String) -> Result<(), String> {
is_pubkey(string.clone()).or_else(|_| is_keypair(string))
}
// Return an error if string cannot be parsed as pubkey or keypair file or keypair ask keyword
pub fn is_pubkey_or_keypair_or_ask_keyword(string: String) -> Result<(), String> {
is_pubkey(string.clone()).or_else(|_| is_keypair_or_ask_keyword(string))
}
pub fn is_valid_signer(string: String) -> Result<(), String> {
// Return an error if string cannot be parsed as a pubkey string, or a valid Signer that can
// produce a pubkey()
pub fn is_valid_pubkey(string: String) -> Result<(), String> {
match parse_keypair_path(&string) {
KeypairUrl::Filepath(path) => is_keypair(path),
_ => Ok(()),
}
}
// Return an error if string cannot be parsed as a valid Signer. This is an alias of
// `is_valid_pubkey`, and does accept pubkey strings, even though a Pubkey is not by itself
// sufficient to sign a transaction.
//
// In the current offline-signing implementation, a pubkey is the valid input for a signer field
// when paired with an offline `--signer` argument to provide a Presigner (pubkey + signature).
// Clap validators can't check multiple fields at once, so the verification that a `--signer` is
// also provided and correct happens in parsing, not in validation.
pub fn is_valid_signer(string: String) -> Result<(), String> {
is_valid_pubkey(string)
}
// Return an error if string cannot be parsed as pubkey=signature string
pub fn is_pubkey_sig(string: String) -> Result<(), String> {
let mut signer = string.split('=');
@@ -73,10 +82,10 @@ pub fn is_pubkey_sig(string: String) -> Result<(), String> {
.ok_or_else(|| "Malformed signer string".to_string())?,
) {
Ok(_) => Ok(()),
Err(err) => Err(format!("{:?}", err)),
Err(err) => Err(format!("{}", err)),
}
}
Err(err) => Err(format!("{:?}", err)),
Err(err) => Err(format!("{}", err)),
}
}
@@ -90,20 +99,20 @@ pub fn is_url(string: String) -> Result<(), String> {
Err("no host provided".to_string())
}
}
Err(err) => Err(format!("{:?}", err)),
Err(err) => Err(format!("{}", err)),
}
}
pub fn is_slot(slot: String) -> Result<(), String> {
slot.parse::<Slot>()
.map(|_| ())
.map_err(|e| format!("{:?}", e))
.map_err(|e| format!("{}", e))
}
pub fn is_port(port: String) -> Result<(), String> {
port.parse::<u16>()
.map(|_| ())
.map_err(|e| format!("{:?}", e))
.map_err(|e| format!("{}", e))
}
pub fn is_valid_percentage(percentage: String) -> Result<(), String> {
@@ -111,7 +120,7 @@ pub fn is_valid_percentage(percentage: String) -> Result<(), String> {
.parse::<u8>()
.map_err(|e| {
format!(
"Unable to parse input percentage, provided: {}, err: {:?}",
"Unable to parse input percentage, provided: {}, err: {}",
percentage, e
)
})
@@ -141,7 +150,7 @@ pub fn is_amount(amount: String) -> Result<(), String> {
pub fn is_rfc3339_datetime(value: String) -> Result<(), String> {
DateTime::parse_from_rfc3339(&value)
.map(|_| ())
.map_err(|e| format!("{:?}", e))
.map_err(|e| format!("{}", e))
}
pub fn is_derivation(value: String) -> Result<(), String> {
@@ -152,7 +161,7 @@ pub fn is_derivation(value: String) -> Result<(), String> {
.parse::<u32>()
.map_err(|e| {
format!(
"Unable to parse derivation, provided: {}, err: {:?}",
"Unable to parse derivation, provided: {}, err: {}",
account, e
)
})
@@ -160,7 +169,7 @@ pub fn is_derivation(value: String) -> Result<(), String> {
if let Some(change) = parts.next() {
change.parse::<u32>().map_err(|e| {
format!(
"Unable to parse derivation, provided: {}, err: {:?}",
"Unable to parse derivation, provided: {}, err: {}",
change, e
)
})

View File

@@ -1,10 +1,10 @@
use crate::{
input_parsers::{derivation_of, pubkeys_sigs_of},
offline::SIGNER_ARG,
input_parsers::pubkeys_sigs_of,
offline::{SIGNER_ARG, SIGN_ONLY_ARG},
ArgConstant,
};
use bip39::{Language, Mnemonic, Seed};
use clap::{values_t, ArgMatches, Error, ErrorKind};
use clap::ArgMatches;
use rpassword::prompt_password_stderr;
use solana_remote_wallet::{
remote_keypair::generate_remote_keypair,
@@ -14,7 +14,7 @@ use solana_sdk::{
pubkey::Pubkey,
signature::{
keypair_from_seed, keypair_from_seed_phrase_and_passphrase, read_keypair,
read_keypair_file, Keypair, Presigner, Signature, Signer,
read_keypair_file, Keypair, NullSigner, Presigner, Signature, Signer,
},
};
use std::{
@@ -75,7 +75,14 @@ pub fn signer_from_path(
false,
)?))
}
KeypairUrl::Filepath(path) => Ok(Box::new(read_keypair_file(&path)?)),
KeypairUrl::Filepath(path) => match read_keypair_file(&path) {
Err(e) => Err(std::io::Error::new(
std::io::ErrorKind::Other,
format!("could not find keypair file: {} error: {}", path, e),
)
.into()),
Ok(file) => Ok(Box::new(file)),
},
KeypairUrl::Stdin => {
let mut stdin = std::io::stdin();
Ok(Box::new(read_keypair(&mut stdin)?))
@@ -84,9 +91,9 @@ pub fn signer_from_path(
if let Some(wallet_manager) = wallet_manager {
Ok(Box::new(generate_remote_keypair(
path,
derivation_of(matches, "derivation_path"),
wallet_manager,
matches.is_present("confirm_key"),
keypair_name,
)?))
} else {
Err(RemoteWalletError::NoDeviceFound.into())
@@ -98,10 +105,12 @@ pub fn signer_from_path(
.and_then(|presigners| presigner_from_pubkey_sigs(&pubkey, presigners));
if let Some(presigner) = presigner {
Ok(Box::new(presigner))
} else if matches.is_present(SIGN_ONLY_ARG.name) {
Ok(Box::new(NullSigner::new(&pubkey)))
} else {
Err(Error::with_description(
"Missing signature for supplied pubkey",
ErrorKind::MissingRequiredArgument,
Err(std::io::Error::new(
std::io::ErrorKind::Other,
format!("missing signature for supplied pubkey: {}", pubkey),
)
.into())
}
@@ -109,39 +118,72 @@ pub fn signer_from_path(
}
}
pub fn pubkey_from_path(
matches: &ArgMatches,
path: &str,
keypair_name: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<Pubkey, Box<dyn error::Error>> {
match parse_keypair_path(path) {
KeypairUrl::Pubkey(pubkey) => Ok(pubkey),
_ => Ok(signer_from_path(matches, path, keypair_name, wallet_manager)?.pubkey()),
}
}
pub fn resolve_signer_from_path(
matches: &ArgMatches,
path: &str,
keypair_name: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<Option<String>, Box<dyn error::Error>> {
match parse_keypair_path(path) {
KeypairUrl::Ask => {
let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name);
// This method validates the seed phrase, but returns `None` because there is no path
// on disk or to a device
keypair_from_seed_phrase(keypair_name, skip_validation, false).map(|_| None)
}
KeypairUrl::Filepath(path) => match read_keypair_file(&path) {
Err(e) => Err(std::io::Error::new(
std::io::ErrorKind::Other,
format!("could not find keypair file: {} error: {}", path, e),
)
.into()),
Ok(_) => Ok(Some(path.to_string())),
},
KeypairUrl::Stdin => {
let mut stdin = std::io::stdin();
// This method validates the keypair from stdin, but returns `None` because there is no
// path on disk or to a device
read_keypair(&mut stdin).map(|_| None)
}
KeypairUrl::Usb(path) => {
if let Some(wallet_manager) = wallet_manager {
let path = generate_remote_keypair(
path,
wallet_manager,
matches.is_present("confirm_key"),
keypair_name,
)
.map(|keypair| keypair.path)?;
Ok(Some(path))
} else {
Err(RemoteWalletError::NoDeviceFound.into())
}
}
_ => Ok(Some(path.to_string())),
}
}
// Keyword used to indicate that the user should be asked for a keypair seed phrase
pub const ASK_KEYWORD: &str = "ASK";
pub const ASK_SEED_PHRASE_ARG: ArgConstant<'static> = ArgConstant {
long: "ask-seed-phrase",
name: "ask_seed_phrase",
help: "Recover a keypair using a seed phrase and optional passphrase",
};
pub const SKIP_SEED_PHRASE_VALIDATION_ARG: ArgConstant<'static> = ArgConstant {
long: "skip-seed-phrase-validation",
name: "skip_seed_phrase_validation",
help: "Skip validation of seed phrases. Use this if your phrase does not use the BIP39 official English word list",
};
#[derive(Debug, PartialEq)]
pub enum Source {
Generated,
Path,
SeedPhrase,
}
pub struct KeypairWithSource {
pub keypair: Keypair,
pub source: Source,
}
impl KeypairWithSource {
fn new(keypair: Keypair, source: Source) -> Self {
Self { keypair, source }
}
}
/// Prompts user for a passphrase and then asks for confirmirmation to check for mistakes
pub fn prompt_passphrase(prompt: &str) -> Result<String, Box<dyn error::Error>> {
let passphrase = prompt_password_stderr(&prompt)?;
@@ -195,47 +237,6 @@ pub fn keypair_from_seed_phrase(
Ok(keypair)
}
/// Checks CLI arguments to determine whether a keypair should be:
/// - inputted securely via stdin,
/// - read in from a file,
/// - or newly generated
pub fn keypair_input(
matches: &clap::ArgMatches,
keypair_name: &str,
) -> Result<KeypairWithSource, Box<dyn error::Error>> {
let ask_seed_phrase_matches =
values_t!(matches.values_of(ASK_SEED_PHRASE_ARG.name), String).unwrap_or_default();
let keypair_match_name = keypair_name.replace('-', "_");
if ask_seed_phrase_matches
.iter()
.any(|s| s.as_str() == keypair_name)
{
if matches.value_of(keypair_match_name).is_some() {
clap::Error::with_description(
&format!(
"`--{} {}` cannot be used with `{} <PATH>`",
ASK_SEED_PHRASE_ARG.long, keypair_name, keypair_name
),
clap::ErrorKind::ArgumentConflict,
)
.exit();
}
let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name);
keypair_from_seed_phrase(keypair_name, skip_validation, true)
.map(|keypair| KeypairWithSource::new(keypair, Source::SeedPhrase))
} else if let Some(keypair_file) = matches.value_of(keypair_match_name) {
if keypair_file.starts_with("usb://") {
Ok(KeypairWithSource::new(Keypair::new(), Source::Path))
} else {
read_keypair_file(keypair_file)
.map(|keypair| KeypairWithSource::new(keypair, Source::Path))
}
} else {
Ok(KeypairWithSource::new(Keypair::new(), Source::Generated))
}
}
fn sanitize_seed_phrase(seed_phrase: &str) -> String {
seed_phrase
.split_whitespace()
@@ -246,14 +247,6 @@ fn sanitize_seed_phrase(seed_phrase: &str) -> String {
#[cfg(test)]
mod tests {
use super::*;
use clap::ArgMatches;
#[test]
fn test_keypair_input() {
let arg_matches = ArgMatches::default();
let KeypairWithSource { source, .. } = keypair_input(&arg_matches, "").unwrap();
assert_eq!(source, Source::Generated);
}
#[test]
fn test_sanitize_seed_phrase() {

View File

@@ -1,3 +1,5 @@
use thiserror::Error;
#[macro_export]
macro_rules! version {
() => {
@@ -23,6 +25,23 @@ pub struct ArgConstant<'a> {
pub help: &'a str,
}
/// Error type for forwarding Errors out of `main()` of a `clap` app
/// and still using the `Display` formatter
#[derive(Error)]
#[error("{0}")]
pub struct DisplayError(Box<dyn std::error::Error>);
impl DisplayError {
pub fn new_as_boxed(inner: Box<dyn std::error::Error>) -> Box<Self> {
DisplayError(inner).into()
}
}
impl std::fmt::Debug for DisplayError {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(fmt, "{}", self.0)
}
}
pub mod input_parsers;
pub mod input_validators;
pub mod keypair;

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-cli-config"
description = "Blockchain, Rebuilt for Scale"
version = "1.0.4"
version = "1.0.19"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -14,3 +14,7 @@ lazy_static = "1.4.0"
serde = "1.0.104"
serde_derive = "1.0.103"
serde_yaml = "0.8.11"
url = "2.1.1"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,10 +1,7 @@
// Wallet settings that can be configured for long-term use
use serde_derive::{Deserialize, Serialize};
use std::{
fs::{create_dir_all, File},
io::{self, Write},
path::Path,
};
use std::io;
use url::Url;
lazy_static! {
pub static ref CONFIG_FILE: Option<String> = {
@@ -15,39 +12,86 @@ lazy_static! {
};
}
#[derive(Serialize, Deserialize, Default, Debug, PartialEq)]
#[derive(Serialize, Deserialize, Debug, PartialEq)]
pub struct Config {
pub url: String,
pub json_rpc_url: String,
pub websocket_url: String,
pub keypair_path: String,
}
impl Config {
pub fn new(url: &str, websocket_url: &str, keypair_path: &str) -> Self {
impl Default for Config {
fn default() -> Self {
let keypair_path = {
let mut keypair_path = dirs::home_dir().expect("home directory");
keypair_path.extend(&[".config", "solana", "id.json"]);
keypair_path.to_str().unwrap().to_string()
};
let json_rpc_url = "http://127.0.0.1:8899".to_string();
// Empty websocket_url string indicates the client should
// `Config::compute_websocket_url(&json_rpc_url)`
let websocket_url = "".to_string();
Self {
url: url.to_string(),
websocket_url: websocket_url.to_string(),
keypair_path: keypair_path.to_string(),
json_rpc_url,
websocket_url,
keypair_path,
}
}
}
impl Config {
pub fn load(config_file: &str) -> Result<Self, io::Error> {
let file = File::open(config_file.to_string())?;
let config = serde_yaml::from_reader(file)
.map_err(|err| io::Error::new(io::ErrorKind::Other, format!("{:?}", err)))?;
Ok(config)
crate::load_config_file(config_file)
}
pub fn save(&self, config_file: &str) -> Result<(), io::Error> {
let serialized = serde_yaml::to_string(self)
.map_err(|err| io::Error::new(io::ErrorKind::Other, format!("{:?}", err)))?;
crate::save_config_file(self, config_file)
}
if let Some(outdir) = Path::new(&config_file).parent() {
create_dir_all(outdir)?;
pub fn compute_websocket_url(json_rpc_url: &str) -> String {
let json_rpc_url: Option<Url> = json_rpc_url.parse().ok();
if json_rpc_url.is_none() {
return "".to_string();
}
let mut file = File::create(config_file)?;
file.write_all(&serialized.into_bytes())?;
Ok(())
let json_rpc_url = json_rpc_url.unwrap();
let is_secure = json_rpc_url.scheme().to_ascii_lowercase() == "https";
let mut ws_url = json_rpc_url.clone();
ws_url
.set_scheme(if is_secure { "wss" } else { "ws" })
.expect("unable to set scheme");
if let Some(port) = json_rpc_url.port() {
ws_url.set_port(Some(port + 1)).expect("unable to set port");
}
ws_url.to_string()
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn compute_websocket_url() {
assert_eq!(
Config::compute_websocket_url(&"http://devnet.solana.com"),
"ws://devnet.solana.com/".to_string()
);
assert_eq!(
Config::compute_websocket_url(&"https://devnet.solana.com"),
"wss://devnet.solana.com/".to_string()
);
assert_eq!(
Config::compute_websocket_url(&"http://example.com:8899"),
"ws://example.com:8900/".to_string()
);
assert_eq!(
Config::compute_websocket_url(&"https://example.com:1234"),
"wss://example.com:1235/".to_string()
);
assert_eq!(Config::compute_websocket_url(&"garbage"), String::new());
}
}

View File

@@ -1,4 +1,39 @@
#[macro_use]
extern crate lazy_static;
pub mod config;
mod config;
pub use config::{Config, CONFIG_FILE};
use std::{
fs::{create_dir_all, File},
io::{self, Write},
path::Path,
};
pub fn load_config_file<T, P>(config_file: P) -> Result<T, io::Error>
where
T: serde::de::DeserializeOwned,
P: AsRef<Path>,
{
let file = File::open(config_file)?;
let config = serde_yaml::from_reader(file)
.map_err(|err| io::Error::new(io::ErrorKind::Other, format!("{:?}", err)))?;
Ok(config)
}
pub fn save_config_file<T, P>(config: &T, config_file: P) -> Result<(), io::Error>
where
T: serde::ser::Serialize,
P: AsRef<Path>,
{
let serialized = serde_yaml::to_string(config)
.map_err(|err| io::Error::new(io::ErrorKind::Other, format!("{:?}", err)))?;
if let Some(outdir) = config_file.as_ref().parent() {
create_dir_all(outdir)?;
}
let mut file = File::create(config_file)?;
file.write_all(&serialized.into_bytes())?;
Ok(())
}

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-cli"
description = "Blockchain, Rebuilt for Scale"
version = "1.0.4"
version = "1.0.19"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -26,29 +26,33 @@ reqwest = { version = "0.10.1", default-features = false, features = ["blocking"
serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.46"
solana-budget-program = { path = "../programs/budget", version = "1.0.4" }
solana-clap-utils = { path = "../clap-utils", version = "1.0.4" }
solana-cli-config = { path = "../cli-config", version = "1.0.4" }
solana-client = { path = "../client", version = "1.0.4" }
solana-config-program = { path = "../programs/config", version = "1.0.4" }
solana-faucet = { path = "../faucet", version = "1.0.4" }
solana-logger = { path = "../logger", version = "1.0.4" }
solana-net-utils = { path = "../net-utils", version = "1.0.4" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.0.4" }
solana-runtime = { path = "../runtime", version = "1.0.4" }
solana-sdk = { path = "../sdk", version = "1.0.4" }
solana-stake-program = { path = "../programs/stake", version = "1.0.4" }
solana-storage-program = { path = "../programs/storage", version = "1.0.4" }
solana-vote-program = { path = "../programs/vote", version = "1.0.4" }
solana-vote-signer = { path = "../vote-signer", version = "1.0.4" }
solana-budget-program = { path = "../programs/budget", version = "1.0.19" }
solana-clap-utils = { path = "../clap-utils", version = "1.0.19" }
solana-cli-config = { path = "../cli-config", version = "1.0.19" }
solana-client = { path = "../client", version = "1.0.19" }
solana-config-program = { path = "../programs/config", version = "1.0.19" }
solana-faucet = { path = "../faucet", version = "1.0.19" }
solana-logger = { path = "../logger", version = "1.0.19" }
solana-net-utils = { path = "../net-utils", version = "1.0.19" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.0.19" }
solana-runtime = { path = "../runtime", version = "1.0.19" }
solana-sdk = { path = "../sdk", version = "1.0.19" }
solana-stake-program = { path = "../programs/stake", version = "1.0.19" }
solana-storage-program = { path = "../programs/storage", version = "1.0.19" }
solana-vote-program = { path = "../programs/vote", version = "1.0.19" }
solana-vote-signer = { path = "../vote-signer", version = "1.0.19" }
titlecase = "1.1.0"
thiserror = "1.0.11"
url = "2.1.1"
[dev-dependencies]
solana-core = { path = "../core", version = "1.0.4" }
solana-budget-program = { path = "../programs/budget", version = "1.0.4" }
solana-core = { path = "../core", version = "1.0.19" }
solana-budget-program = { path = "../programs/budget", version = "1.0.19" }
tempfile = "3.1.0"
[[bin]]
name = "solana"
path = "src/main.rs"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

File diff suppressed because it is too large Load Diff

View File

@@ -55,8 +55,8 @@ impl ClusterQuerySubCommands for App<'_, '_> {
Arg::with_name("node_pubkey")
.index(1)
.takes_value(true)
.value_name("PUBKEY")
.validator(is_pubkey_or_keypair)
.value_name("VALIDATOR_PUBKEY")
.validator(is_valid_pubkey)
.required(true)
.help("Identity pubkey of the validator"),
)
@@ -117,6 +117,17 @@ impl ClusterQuerySubCommands for App<'_, '_> {
),
),
)
.subcommand(
SubCommand::with_name("epoch").about("Get current epoch")
.arg(
Arg::with_name("confirmed")
.long("confirmed")
.takes_value(false)
.help(
"Return epoch at maximum-lockout commitment level",
),
),
)
.subcommand(
SubCommand::with_name("transaction-count").about("Get current transaction count")
.alias("get-transaction-count")
@@ -208,10 +219,10 @@ impl ClusterQuerySubCommands for App<'_, '_> {
.arg(
Arg::with_name("vote_account_pubkeys")
.index(1)
.value_name("VOTE ACCOUNT PUBKEYS")
.value_name("VOTE_ACCOUNT_PUBKEYS")
.takes_value(true)
.multiple(true)
.validator(is_pubkey_or_keypair)
.validator(is_valid_pubkey)
.help("Only show stake accounts delegated to the provided vote accounts"),
)
.arg(
@@ -243,8 +254,11 @@ impl ClusterQuerySubCommands for App<'_, '_> {
}
}
pub fn parse_catchup(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let node_pubkey = pubkey_of(matches, "node_pubkey").unwrap();
pub fn parse_catchup(
matches: &ArgMatches<'_>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let node_pubkey = pubkey_of_signer(matches, "node_pubkey", wallet_manager)?.unwrap();
let node_json_rpc_url = value_t!(matches, "node_json_rpc_url", String).ok();
Ok(CliCommandInfo {
command: CliCommand::Catchup {
@@ -322,6 +336,18 @@ pub fn parse_get_slot(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliErr
})
}
pub fn parse_get_epoch(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let commitment_config = if matches.is_present("confirmed") {
CommitmentConfig::default()
} else {
CommitmentConfig::recent()
};
Ok(CliCommandInfo {
command: CliCommand::GetEpoch { commitment_config },
signers: vec![],
})
}
pub fn parse_get_transaction_count(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let commitment_config = if matches.is_present("confirmed") {
CommitmentConfig::default()
@@ -334,9 +360,13 @@ pub fn parse_get_transaction_count(matches: &ArgMatches<'_>) -> Result<CliComman
})
}
pub fn parse_show_stakes(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
pub fn parse_show_stakes(
matches: &ArgMatches<'_>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let use_lamports_unit = matches.is_present("lamports");
let vote_account_pubkeys = pubkeys_of(matches, "vote_account_pubkeys");
let vote_account_pubkeys =
pubkeys_of_multiple_signers(matches, "vote_account_pubkeys", wallet_manager)?;
Ok(CliCommandInfo {
command: CliCommand::ShowStakes {
@@ -568,6 +598,14 @@ pub fn process_get_slot(
Ok(slot.to_string())
}
pub fn process_get_epoch(
rpc_client: &RpcClient,
commitment_config: CommitmentConfig,
) -> ProcessResult {
let epoch_info = rpc_client.get_epoch_info_with_commitment(commitment_config.clone())?;
Ok(epoch_info.epoch.to_string())
}
pub fn parse_show_block_production(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let epoch = value_t!(matches, "epoch", Epoch).ok();
let slot_limit = value_t!(matches, "slot_limit", u64).ok();
@@ -789,7 +827,7 @@ pub fn process_ping(
last_blockhash = recent_blockhash;
let ix = system_instruction::transfer(&config.signers[0].pubkey(), &to, lamports);
let message = Message::new(vec![ix]);
let message = Message::new(&[ix]);
let mut transaction = Transaction::new_unsigned(message);
transaction.try_sign(&config.signers, recent_blockhash)?;
check_account_for_fee(
@@ -972,7 +1010,7 @@ pub fn process_live_slots(url: &str) -> ProcessResult {
current = Some(new_info);
}
Err(err) => {
eprintln!("disconnected: {:?}", err);
eprintln!("disconnected: {}", err);
break;
}
}
@@ -1273,6 +1311,19 @@ mod tests {
}
);
let test_get_epoch = test_commands
.clone()
.get_matches_from(vec!["test", "epoch"]);
assert_eq!(
parse_command(&test_get_epoch, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::GetEpoch {
commitment_config: CommitmentConfig::recent(),
},
signers: vec![],
}
);
let test_transaction_count = test_commands
.clone()
.get_matches_from(vec!["test", "transaction-count"]);

View File

@@ -1,6 +1,6 @@
use crate::cli::SettingType;
use console::style;
use solana_sdk::transaction::Transaction;
use solana_sdk::hash::Hash;
// Pretty print a "name value"
pub fn println_name_value(name: &str, value: &str) {
@@ -27,13 +27,25 @@ pub fn println_name_value_or(name: &str, value: &str, setting_type: SettingType)
);
}
pub fn println_signers(tx: &Transaction) {
pub fn println_signers(
blockhash: &Hash,
signers: &[String],
absent: &[String],
bad_sig: &[String],
) {
println!();
println!("Blockhash: {}", tx.message.recent_blockhash);
println!("Signers (Pubkey=Signature):");
tx.signatures
.iter()
.zip(tx.message.account_keys.clone())
.for_each(|(signature, pubkey)| println!(" {:?}={:?}", pubkey, signature));
println!("Blockhash: {}", blockhash);
if !signers.is_empty() {
println!("Signers (Pubkey=Signature):");
signers.iter().for_each(|signer| println!(" {}", signer))
}
if !absent.is_empty() {
println!("Absent Signers (Pubkey):");
absent.iter().for_each(|pubkey| println!(" {}", pubkey))
}
if !bad_sig.is_empty() {
println!("Bad Signatures (Pubkey):");
bad_sig.iter().for_each(|pubkey| println!(" {}", pubkey))
}
println!();
}

View File

@@ -2,15 +2,14 @@ use clap::{crate_description, crate_name, AppSettings, Arg, ArgGroup, ArgMatches
use console::style;
use solana_clap_utils::{
input_parsers::derivation_of,
input_validators::{is_derivation, is_url},
keypair::SKIP_SEED_PHRASE_VALIDATION_ARG,
input_validators::is_url, keypair::SKIP_SEED_PHRASE_VALIDATION_ARG, offline::SIGN_ONLY_ARG,
DisplayError,
};
use solana_cli::{
cli::{app, parse_command, process_command, CliCommandInfo, CliConfig, CliSigners},
display::{println_name_value, println_name_value_or},
};
use solana_cli_config::config::{Config, CONFIG_FILE};
use solana_cli_config::{Config, CONFIG_FILE};
use solana_remote_wallet::remote_wallet::{maybe_wallet_manager, RemoteWalletManager};
use std::{error, sync::Arc};
@@ -22,12 +21,12 @@ fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error
let config = Config::load(config_file).unwrap_or_default();
let (url_setting_type, json_rpc_url) =
CliConfig::compute_json_rpc_url_setting("", &config.url);
CliConfig::compute_json_rpc_url_setting("", &config.json_rpc_url);
let (ws_setting_type, websocket_url) = CliConfig::compute_websocket_url_setting(
"",
&config.websocket_url,
"",
&config.url,
&config.json_rpc_url,
);
let (keypair_setting_type, keypair_path) =
CliConfig::compute_keypair_path_setting("", &config.keypair_path);
@@ -35,7 +34,7 @@ fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error
if let Some(field) = subcommand_matches.value_of("specific_setting") {
let (field_name, value, setting_type) = match field {
"json_rpc_url" => ("RPC URL", json_rpc_url, url_setting_type),
"websocket_url" => ("WS URL", websocket_url, ws_setting_type),
"websocket_url" => ("WebSocket URL", websocket_url, ws_setting_type),
"keypair" => ("Key Path", keypair_path, keypair_setting_type),
_ => unreachable!(),
};
@@ -43,7 +42,7 @@ fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error
} else {
println_name_value("Config File:", config_file);
println_name_value_or("RPC URL:", &json_rpc_url, url_setting_type);
println_name_value_or("WS URL:", &websocket_url, ws_setting_type);
println_name_value_or("WebSocket URL:", &websocket_url, ws_setting_type);
println_name_value_or("Keypair Path:", &keypair_path, keypair_setting_type);
}
} else {
@@ -58,7 +57,10 @@ fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error
if let Some(config_file) = matches.value_of("config_file") {
let mut config = Config::load(config_file).unwrap_or_default();
if let Some(url) = subcommand_matches.value_of("json_rpc_url") {
config.url = url.to_string();
config.json_rpc_url = url.to_string();
// Revert to a computed `websocket_url` value when `json_rpc_url` is
// changed
config.websocket_url = "".to_string();
}
if let Some(url) = subcommand_matches.value_of("websocket_url") {
config.websocket_url = url.to_string();
@@ -69,19 +71,19 @@ fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error
config.save(config_file)?;
let (url_setting_type, json_rpc_url) =
CliConfig::compute_json_rpc_url_setting("", &config.url);
CliConfig::compute_json_rpc_url_setting("", &config.json_rpc_url);
let (ws_setting_type, websocket_url) = CliConfig::compute_websocket_url_setting(
"",
&config.websocket_url,
"",
&config.url,
&config.json_rpc_url,
);
let (keypair_setting_type, keypair_path) =
CliConfig::compute_keypair_path_setting("", &config.keypair_path);
println_name_value("Config File:", config_file);
println_name_value_or("RPC URL:", &json_rpc_url, url_setting_type);
println_name_value_or("WS URL:", &websocket_url, ws_setting_type);
println_name_value_or("WebSocket URL:", &websocket_url, ws_setting_type);
println_name_value_or("Keypair Path:", &keypair_path, keypair_setting_type);
} else {
println!(
@@ -109,13 +111,13 @@ pub fn parse_args<'a>(
};
let (_, json_rpc_url) = CliConfig::compute_json_rpc_url_setting(
matches.value_of("json_rpc_url").unwrap_or(""),
&config.url,
&config.json_rpc_url,
);
let (_, websocket_url) = CliConfig::compute_websocket_url_setting(
matches.value_of("websocket_url").unwrap_or(""),
&config.websocket_url,
matches.value_of("json_rpc_url").unwrap_or(""),
&config.url,
&config.json_rpc_url,
);
let (_, default_signer_path) = CliConfig::compute_keypair_path_setting(
matches.value_of("keypair").unwrap_or(""),
@@ -132,7 +134,6 @@ pub fn parse_args<'a>(
websocket_url,
signers: vec![],
keypair_path: default_signer_path,
derivation_path: derivation_of(matches, "derivation_path"),
rpc_client: None,
verbose: matches.is_present("verbose"),
},
@@ -151,7 +152,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
let arg = Arg::with_name("config_file")
.short("C")
.long("config")
.value_name("PATH")
.value_name("FILEPATH")
.takes_value(true)
.global(true)
.help("Configuration file to use");
@@ -184,26 +185,17 @@ fn main() -> Result<(), Box<dyn error::Error>> {
Arg::with_name("keypair")
.short("k")
.long("keypair")
.value_name("PATH")
.value_name("KEYPAIR")
.global(true)
.takes_value(true)
.help("/path/to/id.json or usb://remote/wallet/path"),
)
.arg(
Arg::with_name("derivation_path")
.long("derivation-path")
.value_name("ACCOUNT or ACCOUNT/CHANGE")
.global(true)
.takes_value(true)
.validator(is_derivation)
.help("Derivation path to use: m/44'/501'/ACCOUNT'/CHANGE'; default key is device base pubkey: m/44'/501'/0'")
.help("Filepath or URL to a keypair"),
)
.arg(
Arg::with_name("verbose")
.long("verbose")
.short("v")
.global(true)
.help("Show extra information header"),
.help("Show additional information"),
)
.arg(
Arg::with_name(SKIP_SEED_PHRASE_VALIDATION_ARG.name)
@@ -241,13 +233,23 @@ fn main() -> Result<(), Box<dyn error::Error>> {
)
.get_matches();
do_main(&matches).map_err(|err| DisplayError::new_as_boxed(err).into())
}
fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box<dyn error::Error>> {
if parse_settings(&matches)? {
let wallet_manager = maybe_wallet_manager()?;
let (mut config, signers) = parse_args(&matches, wallet_manager)?;
config.signers = signers.iter().map(|s| s.as_ref()).collect();
let result = process_command(&config)?;
println!("{}", result);
}
let (_, submatches) = matches.subcommand();
let sign_only = submatches
.map(|m| m.is_present(SIGN_ONLY_ARG.name))
.unwrap_or(false);
if !sign_only {
println!("{}", result);
}
};
Ok(())
}

View File

@@ -14,7 +14,11 @@ use solana_sdk::{
account_utils::StateMut,
hash::Hash,
message::Message,
nonce::{self, state::Versions, State},
nonce::{
self,
state::{Data, Versions},
State,
},
pubkey::Pubkey,
system_instruction::{
advance_nonce_account, authorize_nonce_account, create_address_with_seed,
@@ -25,14 +29,24 @@ use solana_sdk::{
transaction::Transaction,
};
use std::sync::Arc;
use thiserror::Error;
#[derive(Debug, Clone, PartialEq)]
#[derive(Debug, Error, PartialEq)]
pub enum CliNonceError {
#[error("invalid account owner")]
InvalidAccountOwner,
#[error("invalid account data")]
InvalidAccountData,
#[error("unexpected account data size")]
UnexpectedDataSize,
#[error("query hash does not match stored hash")]
InvalidHash,
#[error("query authority does not match account authority")]
InvalidAuthority,
InvalidState,
#[error("invalid state for requested operation")]
InvalidStateForOperation,
#[error("client error: {0}")]
Client(String),
}
pub const NONCE_ARG: ArgConstant<'static> = ArgConstant {
@@ -60,7 +74,7 @@ pub fn nonce_arg<'a, 'b>() -> Arg<'a, 'b> {
.takes_value(true)
.value_name("PUBKEY")
.requires(BLOCKHASH_ARG.name)
.validator(is_pubkey)
.validator(is_valid_pubkey)
.help(NONCE_ARG.help)
}
@@ -68,7 +82,7 @@ pub fn nonce_authority_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name(NONCE_AUTHORITY_ARG.name)
.long(NONCE_AUTHORITY_ARG.long)
.takes_value(true)
.value_name("KEYPAIR or PUBKEY or REMOTE WALLET PATH")
.value_name("KEYPAIR")
.validator(is_valid_signer)
.help(NONCE_AUTHORITY_ARG.help)
}
@@ -79,30 +93,23 @@ impl NonceSubCommands for App<'_, '_> {
SubCommand::with_name("authorize-nonce-account")
.about("Assign account authority to a new entity")
.arg(
Arg::with_name("nonce_account_keypair")
Arg::with_name("nonce_account_pubkey")
.index(1)
.value_name("NONCE_ACCOUNT")
.value_name("NONCE_ACCOUNT_ADDRESS")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair)
.validator(is_valid_pubkey)
.help("Address of the nonce account"),
)
.arg(
Arg::with_name("new_authority")
.index(2)
.value_name("NEW_AUTHORITY_PUBKEY")
.value_name("AUTHORITY_PUBKEY")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair)
.validator(is_valid_pubkey)
.help("Account to be granted authority of the nonce account"),
)
.arg(
Arg::with_name("seed")
.long("seed")
.value_name("SEED STRING")
.takes_value(true)
.help("Seed for address generation; if specified, the resulting account will be at a derived address of the NONCE_ACCOUNT pubkey")
)
.arg(nonce_authority_arg()),
)
.subcommand(
@@ -111,10 +118,10 @@ impl NonceSubCommands for App<'_, '_> {
.arg(
Arg::with_name("nonce_account_keypair")
.index(1)
.value_name("NONCE ACCOUNT")
.value_name("ACCOUNT_KEYPAIR")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair)
.validator(is_valid_signer)
.help("Keypair of the nonce account to fund"),
)
.arg(
@@ -130,9 +137,16 @@ impl NonceSubCommands for App<'_, '_> {
Arg::with_name(NONCE_AUTHORITY_ARG.name)
.long(NONCE_AUTHORITY_ARG.long)
.takes_value(true)
.value_name("BASE58_PUBKEY")
.validator(is_pubkey_or_keypair)
.value_name("PUBKEY")
.validator(is_valid_pubkey)
.help("Assign noncing authority to another entity"),
)
.arg(
Arg::with_name("seed")
.long("seed")
.value_name("STRING")
.takes_value(true)
.help("Seed for address generation; if specified, the resulting account will be at a derived address of the NONCE_ACCOUNT pubkey")
),
)
.subcommand(
@@ -142,10 +156,10 @@ impl NonceSubCommands for App<'_, '_> {
.arg(
Arg::with_name("nonce_account_pubkey")
.index(1)
.value_name("NONCE ACCOUNT")
.value_name("NONCE_ACCOUNT_ADDRESS")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair)
.validator(is_valid_pubkey)
.help("Address of the nonce account to display"),
),
)
@@ -153,12 +167,12 @@ impl NonceSubCommands for App<'_, '_> {
SubCommand::with_name("new-nonce")
.about("Generate a new nonce, rendering the existing nonce useless")
.arg(
Arg::with_name("nonce_account_keypair")
Arg::with_name("nonce_account_pubkey")
.index(1)
.value_name("NONCE ACCOUNT")
.value_name("NONCE_ACCOUNT_ADDRESS")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair)
.validator(is_valid_pubkey)
.help("Address of the nonce account"),
)
.arg(nonce_authority_arg()),
@@ -170,10 +184,10 @@ impl NonceSubCommands for App<'_, '_> {
.arg(
Arg::with_name("nonce_account_pubkey")
.index(1)
.value_name("NONCE ACCOUNT")
.value_name("NONCE_ACCOUNT_ADDRESS")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair)
.validator(is_valid_pubkey)
.help("Address of the nonce account to display"),
)
.arg(
@@ -185,24 +199,24 @@ impl NonceSubCommands for App<'_, '_> {
)
.subcommand(
SubCommand::with_name("withdraw-from-nonce-account")
.about("Withdraw lamports from the nonce account")
.about("Withdraw SOL from the nonce account")
.arg(
Arg::with_name("nonce_account_keypair")
Arg::with_name("nonce_account_pubkey")
.index(1)
.value_name("NONCE ACCOUNT")
.value_name("NONCE_ACCOUNT_ADDRESS")
.takes_value(true)
.required(true)
.validator(is_keypair_or_ask_keyword)
.help("Nonce account from to withdraw from"),
.validator(is_valid_pubkey)
.help("Nonce account to withdraw from"),
)
.arg(
Arg::with_name("destination_account_pubkey")
.index(2)
.value_name("DESTINATION ACCOUNT")
.value_name("RECIPIENT_ADDRESS")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair)
.help("The account to which the lamports should be transferred"),
.validator(is_valid_pubkey)
.help("The account to which the SOL should be transferred"),
)
.arg(
Arg::with_name("amount")
@@ -218,13 +232,55 @@ impl NonceSubCommands for App<'_, '_> {
}
}
pub fn get_account(
rpc_client: &RpcClient,
nonce_pubkey: &Pubkey,
) -> Result<Account, CliNonceError> {
rpc_client
.get_account(nonce_pubkey)
.map_err(|e| CliNonceError::Client(format!("{}", e)))
.and_then(|a| match account_identity_ok(&a) {
Ok(()) => Ok(a),
Err(e) => Err(e),
})
}
pub fn account_identity_ok(account: &Account) -> Result<(), CliNonceError> {
if account.owner != system_program::id() {
Err(CliNonceError::InvalidAccountOwner)
} else if account.data.is_empty() {
Err(CliNonceError::UnexpectedDataSize)
} else {
Ok(())
}
}
pub fn state_from_account(account: &Account) -> Result<State, CliNonceError> {
account_identity_ok(account)?;
StateMut::<Versions>::state(account)
.map_err(|_| CliNonceError::InvalidAccountData)
.map(|v| v.convert_to_current())
}
pub fn data_from_account(account: &Account) -> Result<Data, CliNonceError> {
account_identity_ok(account)?;
state_from_account(account).and_then(|ref s| data_from_state(s).map(|d| d.clone()))
}
pub fn data_from_state(state: &State) -> Result<&Data, CliNonceError> {
match state {
State::Uninitialized => Err(CliNonceError::InvalidStateForOperation),
State::Initialized(data) => Ok(data),
}
}
pub fn parse_authorize_nonce_account(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let nonce_account = pubkey_of(matches, "nonce_account_keypair").unwrap();
let new_authority = pubkey_of(matches, "new_authority").unwrap();
let nonce_account = pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
let new_authority = pubkey_of_signer(matches, "new_authority", wallet_manager)?.unwrap();
let (nonce_authority, nonce_authority_pubkey) =
signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?;
@@ -255,7 +311,7 @@ pub fn parse_nonce_create_account(
signer_of(matches, "nonce_account_keypair", wallet_manager)?;
let seed = matches.value_of("seed").map(|s| s.to_string());
let lamports = lamports_of_sol(matches, "amount").unwrap();
let nonce_authority = pubkey_of(matches, NONCE_AUTHORITY_ARG.name);
let nonce_authority = pubkey_of_signer(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?;
let payer_provided = None;
let signer_info = generate_unique_signers(
@@ -276,8 +332,12 @@ pub fn parse_nonce_create_account(
})
}
pub fn parse_get_nonce(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let nonce_account_pubkey = pubkey_of(matches, "nonce_account_pubkey").unwrap();
pub fn parse_get_nonce(
matches: &ArgMatches<'_>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let nonce_account_pubkey =
pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
Ok(CliCommandInfo {
command: CliCommand::GetNonce(nonce_account_pubkey),
@@ -290,7 +350,7 @@ pub fn parse_new_nonce(
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let nonce_account = pubkey_of(matches, "nonce_account_keypair").unwrap();
let nonce_account = pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
let (nonce_authority, nonce_authority_pubkey) =
signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?;
@@ -311,8 +371,12 @@ pub fn parse_new_nonce(
})
}
pub fn parse_show_nonce_account(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let nonce_account_pubkey = pubkey_of(matches, "nonce_account_pubkey").unwrap();
pub fn parse_show_nonce_account(
matches: &ArgMatches<'_>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let nonce_account_pubkey =
pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
let use_lamports_unit = matches.is_present("lamports");
Ok(CliCommandInfo {
@@ -329,8 +393,9 @@ pub fn parse_withdraw_from_nonce_account(
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let nonce_account = pubkey_of(matches, "nonce_account_keypair").unwrap();
let destination_account_pubkey = pubkey_of(matches, "destination_account_pubkey").unwrap();
let nonce_account = pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
let destination_account_pubkey =
pubkey_of_signer(matches, "destination_account_pubkey", wallet_manager)?.unwrap();
let lamports = lamports_of_sol(matches, "amount").unwrap();
let (nonce_authority, nonce_authority_pubkey) =
signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?;
@@ -359,24 +424,18 @@ pub fn check_nonce_account(
nonce_account: &Account,
nonce_authority: &Pubkey,
nonce_hash: &Hash,
) -> Result<(), Box<CliError>> {
if nonce_account.owner != system_program::ID {
return Err(CliError::InvalidNonce(CliNonceError::InvalidAccountOwner).into());
}
let nonce_state = StateMut::<Versions>::state(nonce_account)
.map(|v| v.convert_to_current())
.map_err(|_| Box::new(CliError::InvalidNonce(CliNonceError::InvalidAccountData)))?;
match nonce_state {
) -> Result<(), CliError> {
match state_from_account(nonce_account)? {
State::Initialized(ref data) => {
if &data.blockhash != nonce_hash {
Err(CliError::InvalidNonce(CliNonceError::InvalidHash).into())
Err(CliNonceError::InvalidHash.into())
} else if nonce_authority != &data.authority {
Err(CliError::InvalidNonce(CliNonceError::InvalidAuthority).into())
Err(CliNonceError::InvalidAuthority.into())
} else {
Ok(())
}
}
State::Uninitialized => Err(CliError::InvalidNonce(CliNonceError::InvalidState).into()),
State::Uninitialized => Err(CliNonceError::InvalidStateForOperation.into()),
}
}
@@ -391,7 +450,7 @@ pub fn process_authorize_nonce_account(
let nonce_authority = config.signers[nonce_authority];
let ix = authorize_nonce_account(nonce_account, &nonce_authority.pubkey(), new_authority);
let message = Message::new_with_payer(vec![ix], Some(&config.signers[0].pubkey()));
let message = Message::new_with_payer(&[ix], Some(&config.signers[0].pubkey()));
let mut tx = Transaction::new_unsigned(message);
tx.try_sign(&config.signers, recent_blockhash)?;
@@ -401,7 +460,7 @@ pub fn process_authorize_nonce_account(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &config.signers);
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<NonceError>(result)
}
@@ -425,10 +484,8 @@ pub fn process_create_nonce_account(
(&nonce_account_address, "nonce_account".to_string()),
)?;
if let Ok(nonce_account) = rpc_client.get_account(&nonce_account_address) {
let err_msg = if nonce_account.owner == system_program::id()
&& StateMut::<Versions>::state(&nonce_account).is_ok()
{
if let Ok(nonce_account) = get_account(rpc_client, &nonce_account_address) {
let err_msg = if state_from_account(&nonce_account).is_ok() {
format!("Nonce account {} already exists", nonce_account_address)
} else {
format!(
@@ -470,7 +527,7 @@ pub fn process_create_nonce_account(
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let message = Message::new_with_payer(ixs, Some(&config.signers[0].pubkey()));
let message = Message::new_with_payer(&ixs, Some(&config.signers[0].pubkey()));
let mut tx = Transaction::new_unsigned(message);
tx.try_sign(&config.signers, recent_blockhash)?;
@@ -480,28 +537,14 @@ pub fn process_create_nonce_account(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &config.signers);
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<SystemError>(result)
}
pub fn process_get_nonce(rpc_client: &RpcClient, nonce_account_pubkey: &Pubkey) -> ProcessResult {
let nonce_account = rpc_client.get_account(nonce_account_pubkey)?;
if nonce_account.owner != system_program::id() {
return Err(CliError::RpcRequestError(format!(
"{:?} is not a nonce account",
nonce_account_pubkey
))
.into());
}
let nonce_state = StateMut::<Versions>::state(&nonce_account).map(|v| v.convert_to_current());
match nonce_state {
Ok(State::Uninitialized) => Ok("Nonce account is uninitialized".to_string()),
Ok(State::Initialized(ref data)) => Ok(format!("{:?}", data.blockhash)),
Err(err) => Err(CliError::RpcRequestError(format!(
"Account data could not be deserialized to nonce state: {:?}",
err
))
.into()),
match get_account(rpc_client, nonce_account_pubkey).and_then(|ref a| state_from_account(a))? {
State::Uninitialized => Ok("Nonce account is uninitialized".to_string()),
State::Initialized(ref data) => Ok(format!("{:?}", data.blockhash)),
}
}
@@ -526,7 +569,7 @@ pub fn process_new_nonce(
let nonce_authority = config.signers[nonce_authority];
let ix = advance_nonce_account(&nonce_account, &nonce_authority.pubkey());
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let message = Message::new_with_payer(vec![ix], Some(&config.signers[0].pubkey()));
let message = Message::new_with_payer(&[ix], Some(&config.signers[0].pubkey()));
let mut tx = Transaction::new_unsigned(message);
tx.try_sign(&config.signers, recent_blockhash)?;
check_account_for_fee(
@@ -535,8 +578,8 @@ pub fn process_new_nonce(
&fee_calculator,
&tx.message,
)?;
let result =
rpc_client.send_and_confirm_transaction(&mut tx, &[config.signers[0], nonce_authority]);
let result = rpc_client
.send_and_confirm_transaction_with_spinner(&mut tx, &[config.signers[0], nonce_authority]);
log_instruction_custom_error::<SystemError>(result)
}
@@ -545,14 +588,7 @@ pub fn process_show_nonce_account(
nonce_account_pubkey: &Pubkey,
use_lamports_unit: bool,
) -> ProcessResult {
let nonce_account = rpc_client.get_account(nonce_account_pubkey)?;
if nonce_account.owner != system_program::id() {
return Err(CliError::RpcRequestError(format!(
"{:?} is not a nonce account",
nonce_account_pubkey
))
.into());
}
let nonce_account = get_account(rpc_client, nonce_account_pubkey)?;
let print_account = |data: Option<&nonce::state::Data>| {
println!(
"Balance: {}",
@@ -583,15 +619,9 @@ pub fn process_show_nonce_account(
}
Ok("".to_string())
};
let nonce_state = StateMut::<Versions>::state(&nonce_account).map(|v| v.convert_to_current());
match nonce_state {
Ok(State::Uninitialized) => print_account(None),
Ok(State::Initialized(ref data)) => print_account(Some(data)),
Err(err) => Err(CliError::RpcRequestError(format!(
"Account data could not be deserialized to nonce state: {:?}",
err
))
.into()),
match state_from_account(&nonce_account)? {
State::Uninitialized => print_account(None),
State::Initialized(ref data) => print_account(Some(data)),
}
}
@@ -612,7 +642,7 @@ pub fn process_withdraw_from_nonce_account(
destination_account_pubkey,
lamports,
);
let message = Message::new_with_payer(vec![ix], Some(&config.signers[0].pubkey()));
let message = Message::new_with_payer(&[ix], Some(&config.signers[0].pubkey()));
let mut tx = Transaction::new_unsigned(message);
tx.try_sign(&config.signers, recent_blockhash)?;
check_account_for_fee(
@@ -621,7 +651,7 @@ pub fn process_withdraw_from_nonce_account(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &config.signers);
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<NonceError>(result)
}
@@ -848,31 +878,6 @@ mod tests {
}
);
let test_withdraw_from_nonce_account = test_commands.clone().get_matches_from(vec![
"test",
"withdraw-from-nonce-account",
&keypair_file,
&nonce_account_string,
"42",
]);
assert_eq!(
parse_command(
&test_withdraw_from_nonce_account,
&default_keypair_file,
None
)
.unwrap(),
CliCommandInfo {
command: CliCommand::WithdrawFromNonceAccount {
nonce_account: read_keypair_file(&keypair_file).unwrap().pubkey(),
nonce_authority: 0,
destination_account_pubkey: nonce_account_pubkey,
lamports: 42000000000
},
signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()],
}
);
// Test WithdrawFromNonceAccount Subcommand with authority
let test_withdraw_from_nonce_account = test_commands.clone().get_matches_from(vec![
"test",
@@ -920,17 +925,13 @@ mod tests {
let invalid_owner = Account::new_data(1, &data, &Pubkey::new(&[1u8; 32]));
assert_eq!(
check_nonce_account(&invalid_owner.unwrap(), &nonce_pubkey, &blockhash),
Err(Box::new(CliError::InvalidNonce(
CliNonceError::InvalidAccountOwner
))),
Err(CliNonceError::InvalidAccountOwner.into()),
);
let invalid_data = Account::new_data(1, &"invalid", &system_program::ID);
assert_eq!(
check_nonce_account(&invalid_data.unwrap(), &nonce_pubkey, &blockhash),
Err(Box::new(CliError::InvalidNonce(
CliNonceError::InvalidAccountData
))),
Err(CliNonceError::InvalidAccountData.into()),
);
let data = Versions::new_current(State::Initialized(nonce::state::Data {
@@ -941,7 +942,7 @@ mod tests {
let invalid_hash = Account::new_data(1, &data, &system_program::ID);
assert_eq!(
check_nonce_account(&invalid_hash.unwrap(), &nonce_pubkey, &blockhash),
Err(Box::new(CliError::InvalidNonce(CliNonceError::InvalidHash))),
Err(CliNonceError::InvalidHash.into()),
);
let data = Versions::new_current(State::Initialized(nonce::state::Data {
@@ -952,18 +953,84 @@ mod tests {
let invalid_authority = Account::new_data(1, &data, &system_program::ID);
assert_eq!(
check_nonce_account(&invalid_authority.unwrap(), &nonce_pubkey, &blockhash),
Err(Box::new(CliError::InvalidNonce(
CliNonceError::InvalidAuthority
))),
Err(CliNonceError::InvalidAuthority.into()),
);
let data = Versions::new_current(State::Uninitialized);
let invalid_state = Account::new_data(1, &data, &system_program::ID);
assert_eq!(
check_nonce_account(&invalid_state.unwrap(), &nonce_pubkey, &blockhash),
Err(Box::new(CliError::InvalidNonce(
CliNonceError::InvalidState
))),
Err(CliNonceError::InvalidStateForOperation.into()),
);
}
#[test]
fn test_account_identity_ok() {
let nonce_account = nonce::create_account(1).into_inner();
assert_eq!(account_identity_ok(&nonce_account), Ok(()));
let system_account = Account::new(1, 0, &system_program::id());
assert_eq!(
account_identity_ok(&system_account),
Err(CliNonceError::UnexpectedDataSize),
);
let other_program = Pubkey::new(&[1u8; 32]);
let other_account_no_data = Account::new(1, 0, &other_program);
assert_eq!(
account_identity_ok(&other_account_no_data),
Err(CliNonceError::InvalidAccountOwner),
);
}
#[test]
fn test_state_from_account() {
let mut nonce_account = nonce::create_account(1).into_inner();
assert_eq!(state_from_account(&nonce_account), Ok(State::Uninitialized));
let data = nonce::state::Data {
authority: Pubkey::new(&[1u8; 32]),
blockhash: Hash::new(&[42u8; 32]),
fee_calculator: FeeCalculator::new(42),
};
nonce_account
.set_state(&Versions::new_current(State::Initialized(data.clone())))
.unwrap();
assert_eq!(
state_from_account(&nonce_account),
Ok(State::Initialized(data))
);
let wrong_data_size_account = Account::new(1, 1, &system_program::id());
assert_eq!(
state_from_account(&wrong_data_size_account),
Err(CliNonceError::InvalidAccountData),
);
}
#[test]
fn test_data_from_helpers() {
let mut nonce_account = nonce::create_account(1).into_inner();
let state = state_from_account(&nonce_account).unwrap();
assert_eq!(
data_from_state(&state),
Err(CliNonceError::InvalidStateForOperation)
);
assert_eq!(
data_from_account(&nonce_account),
Err(CliNonceError::InvalidStateForOperation)
);
let data = nonce::state::Data {
authority: Pubkey::new(&[1u8; 32]),
blockhash: Hash::new(&[42u8; 32]),
fee_calculator: FeeCalculator::new(42),
};
nonce_account
.set_state(&Versions::new_current(State::Initialized(data.clone())))
.unwrap();
let state = state_from_account(&nonce_account).unwrap();
assert_eq!(data_from_state(&state), Ok(&data));
assert_eq!(data_from_account(&nonce_account), Ok(data));
}
}

View File

@@ -1,253 +0,0 @@
use clap::{App, Arg, ArgMatches};
use serde_json::Value;
use solana_clap_utils::{
input_parsers::value_of,
input_validators::{is_hash, is_pubkey_sig},
offline::{BLOCKHASH_ARG, SIGNER_ARG, SIGN_ONLY_ARG},
};
use solana_client::rpc_client::RpcClient;
use solana_sdk::{fee_calculator::FeeCalculator, hash::Hash, pubkey::Pubkey, signature::Signature};
use std::str::FromStr;
#[derive(Clone, Debug, PartialEq)]
pub enum BlockhashQuery {
None(Hash, FeeCalculator),
FeeCalculator(Hash),
All,
}
impl BlockhashQuery {
pub fn new(blockhash: Option<Hash>, sign_only: bool) -> Self {
match blockhash {
Some(hash) if sign_only => Self::None(hash, FeeCalculator::default()),
Some(hash) if !sign_only => Self::FeeCalculator(hash),
None if !sign_only => Self::All,
_ => panic!("Cannot resolve blockhash"),
}
}
pub fn new_from_matches(matches: &ArgMatches<'_>) -> Self {
let blockhash = value_of(matches, BLOCKHASH_ARG.name);
let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
BlockhashQuery::new(blockhash, sign_only)
}
pub fn get_blockhash_fee_calculator(
&self,
rpc_client: &RpcClient,
) -> Result<(Hash, FeeCalculator), Box<dyn std::error::Error>> {
let (hash, fee_calc) = match self {
BlockhashQuery::None(hash, fee_calc) => (Some(hash), Some(fee_calc)),
BlockhashQuery::FeeCalculator(hash) => (Some(hash), None),
BlockhashQuery::All => (None, None),
};
if None == fee_calc {
let (cluster_hash, fee_calc) = rpc_client.get_recent_blockhash()?;
Ok((*hash.unwrap_or(&cluster_hash), fee_calc))
} else {
Ok((*hash.unwrap(), fee_calc.unwrap().clone()))
}
}
}
impl Default for BlockhashQuery {
fn default() -> Self {
BlockhashQuery::All
}
}
fn blockhash_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name(BLOCKHASH_ARG.name)
.long(BLOCKHASH_ARG.long)
.takes_value(true)
.value_name("BLOCKHASH")
.validator(is_hash)
.help(BLOCKHASH_ARG.help)
}
fn sign_only_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name(SIGN_ONLY_ARG.name)
.long(SIGN_ONLY_ARG.long)
.takes_value(false)
.requires(BLOCKHASH_ARG.name)
.help(SIGN_ONLY_ARG.help)
}
fn signer_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name(SIGNER_ARG.name)
.long(SIGNER_ARG.long)
.takes_value(true)
.value_name("BASE58_PUBKEY=BASE58_SIG")
.validator(is_pubkey_sig)
.requires(BLOCKHASH_ARG.name)
.multiple(true)
.help(SIGNER_ARG.help)
}
pub trait OfflineArgs {
fn offline_args(self) -> Self;
}
impl OfflineArgs for App<'_, '_> {
fn offline_args(self) -> Self {
self.arg(blockhash_arg())
.arg(sign_only_arg())
.arg(signer_arg())
}
}
pub fn parse_sign_only_reply_string(reply: &str) -> (Hash, Vec<(Pubkey, Signature)>) {
let object: Value = serde_json::from_str(&reply).unwrap();
let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap();
let blockhash = blockhash_str.parse::<Hash>().unwrap();
let signer_strings = object.get("signers").unwrap().as_array().unwrap();
let signers = signer_strings
.iter()
.map(|signer_string| {
let mut signer = signer_string.as_str().unwrap().split('=');
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
(key, sig)
})
.collect();
(blockhash, signers)
}
#[cfg(test)]
mod tests {
use super::*;
use clap::App;
use serde_json::{self, json, Value};
use solana_client::{
rpc_request::RpcRequest,
rpc_response::{Response, RpcResponseContext},
};
use solana_sdk::{fee_calculator::FeeCalculator, hash::hash};
use std::collections::HashMap;
#[test]
fn test_blockhashspec_new_ok() {
let blockhash = hash(&[1u8]);
assert_eq!(
BlockhashQuery::new(Some(blockhash), true),
BlockhashQuery::None(blockhash, FeeCalculator::default()),
);
assert_eq!(
BlockhashQuery::new(Some(blockhash), false),
BlockhashQuery::FeeCalculator(blockhash),
);
assert_eq!(BlockhashQuery::new(None, false), BlockhashQuery::All,);
}
#[test]
#[should_panic]
fn test_blockhashspec_new_fail() {
BlockhashQuery::new(None, true);
}
#[test]
fn test_blockhashspec_new_from_matches_ok() {
let test_commands = App::new("blockhashspec_test").offline_args();
let blockhash = hash(&[1u8]);
let blockhash_string = blockhash.to_string();
let matches = test_commands.clone().get_matches_from(vec![
"blockhashspec_test",
"--blockhash",
&blockhash_string,
"--sign-only",
]);
assert_eq!(
BlockhashQuery::new_from_matches(&matches),
BlockhashQuery::None(blockhash, FeeCalculator::default()),
);
let matches = test_commands.clone().get_matches_from(vec![
"blockhashspec_test",
"--blockhash",
&blockhash_string,
]);
assert_eq!(
BlockhashQuery::new_from_matches(&matches),
BlockhashQuery::FeeCalculator(blockhash),
);
let matches = test_commands
.clone()
.get_matches_from(vec!["blockhashspec_test"]);
assert_eq!(
BlockhashQuery::new_from_matches(&matches),
BlockhashQuery::All,
);
}
#[test]
#[should_panic]
fn test_blockhashspec_new_from_matches_fail() {
let test_commands = App::new("blockhashspec_test")
.arg(blockhash_arg())
// We can really only hit this case unless the arg requirements
// are broken, so unset the requires() to recreate that condition
.arg(sign_only_arg().requires(""));
let matches = test_commands
.clone()
.get_matches_from(vec!["blockhashspec_test", "--sign-only"]);
BlockhashQuery::new_from_matches(&matches);
}
#[test]
fn test_blockhashspec_get_blockhash_fee_calc() {
let test_blockhash = hash(&[0u8]);
let rpc_blockhash = hash(&[1u8]);
let rpc_fee_calc = FeeCalculator::new(42);
let get_recent_blockhash_response = json!(Response {
context: RpcResponseContext { slot: 1 },
value: json!((
Value::String(rpc_blockhash.to_string()),
serde_json::to_value(rpc_fee_calc.clone()).unwrap()
)),
});
let mut mocks = HashMap::new();
mocks.insert(
RpcRequest::GetRecentBlockhash,
get_recent_blockhash_response.clone(),
);
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
assert_eq!(
BlockhashQuery::All
.get_blockhash_fee_calculator(&rpc_client)
.unwrap(),
(rpc_blockhash, rpc_fee_calc.clone()),
);
let mut mocks = HashMap::new();
mocks.insert(
RpcRequest::GetRecentBlockhash,
get_recent_blockhash_response.clone(),
);
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
assert_eq!(
BlockhashQuery::FeeCalculator(test_blockhash)
.get_blockhash_fee_calculator(&rpc_client)
.unwrap(),
(test_blockhash, rpc_fee_calc.clone()),
);
let mut mocks = HashMap::new();
mocks.insert(
RpcRequest::GetRecentBlockhash,
get_recent_blockhash_response.clone(),
);
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
assert_eq!(
BlockhashQuery::None(test_blockhash, FeeCalculator::default())
.get_blockhash_fee_calculator(&rpc_client)
.unwrap(),
(test_blockhash, FeeCalculator::default()),
);
let rpc_client = RpcClient::new_mock("fails".to_string());
assert!(BlockhashQuery::All
.get_blockhash_fee_calculator(&rpc_client)
.is_err());
}
}

View File

@@ -0,0 +1,394 @@
use super::*;
#[derive(Debug, PartialEq)]
pub enum Source {
Cluster,
NonceAccount(Pubkey),
}
impl Source {
pub fn get_blockhash_and_fee_calculator(
&self,
rpc_client: &RpcClient,
) -> Result<(Hash, FeeCalculator), Box<dyn std::error::Error>> {
match self {
Self::Cluster => {
let res = rpc_client.get_recent_blockhash()?;
Ok(res)
}
Self::NonceAccount(ref pubkey) => {
let data = nonce::get_account(rpc_client, pubkey)
.and_then(|ref a| nonce::data_from_account(a))?;
Ok((data.blockhash, data.fee_calculator))
}
}
}
pub fn get_fee_calculator(
&self,
rpc_client: &RpcClient,
blockhash: &Hash,
) -> Result<Option<FeeCalculator>, Box<dyn std::error::Error>> {
match self {
Self::Cluster => {
let res = rpc_client.get_fee_calculator_for_blockhash(blockhash)?;
Ok(res)
}
Self::NonceAccount(ref pubkey) => {
let res = nonce::get_account(rpc_client, pubkey)
.and_then(|ref a| nonce::data_from_account(a))
.and_then(|d| {
if d.blockhash == *blockhash {
Ok(Some(d.fee_calculator))
} else {
Ok(None)
}
})?;
Ok(res)
}
}
}
}
#[derive(Debug, PartialEq)]
pub enum BlockhashQuery {
None(Hash),
FeeCalculator(Source, Hash),
All(Source),
}
impl BlockhashQuery {
pub fn new(blockhash: Option<Hash>, sign_only: bool, nonce_account: Option<Pubkey>) -> Self {
let source = nonce_account
.map(Source::NonceAccount)
.unwrap_or(Source::Cluster);
match blockhash {
Some(hash) if sign_only => Self::None(hash),
Some(hash) if !sign_only => Self::FeeCalculator(source, hash),
None if !sign_only => Self::All(source),
_ => panic!("Cannot resolve blockhash"),
}
}
pub fn new_from_matches(matches: &ArgMatches<'_>) -> Self {
let blockhash = value_of(matches, BLOCKHASH_ARG.name);
let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
let nonce_account = pubkey_of(matches, nonce::NONCE_ARG.name);
BlockhashQuery::new(blockhash, sign_only, nonce_account)
}
pub fn get_blockhash_and_fee_calculator(
&self,
rpc_client: &RpcClient,
) -> Result<(Hash, FeeCalculator), Box<dyn std::error::Error>> {
match self {
BlockhashQuery::None(hash) => Ok((*hash, FeeCalculator::default())),
BlockhashQuery::FeeCalculator(source, hash) => {
let fee_calculator = source
.get_fee_calculator(rpc_client, hash)?
.ok_or(format!("Hash has expired {:?}", hash))?;
Ok((*hash, fee_calculator))
}
BlockhashQuery::All(source) => source.get_blockhash_and_fee_calculator(rpc_client),
}
}
}
impl Default for BlockhashQuery {
fn default() -> Self {
BlockhashQuery::All(Source::Cluster)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{nonce::nonce_arg, offline::blockhash_query::BlockhashQuery};
use clap::App;
use serde_json::{self, json, Value};
use solana_client::{
rpc_request::RpcRequest,
rpc_response::{Response, RpcAccount, RpcFeeCalculator, RpcResponseContext},
};
use solana_sdk::{
account::Account, fee_calculator::FeeCalculator, hash::hash, nonce, system_program,
};
use std::collections::HashMap;
#[test]
fn test_blockhash_query_new_ok() {
let blockhash = hash(&[1u8]);
let nonce_pubkey = Pubkey::new(&[1u8; 32]);
assert_eq!(
BlockhashQuery::new(Some(blockhash), true, None),
BlockhashQuery::None(blockhash),
);
assert_eq!(
BlockhashQuery::new(Some(blockhash), false, None),
BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
);
assert_eq!(
BlockhashQuery::new(None, false, None),
BlockhashQuery::All(blockhash_query::Source::Cluster)
);
assert_eq!(
BlockhashQuery::new(Some(blockhash), true, Some(nonce_pubkey)),
BlockhashQuery::None(blockhash),
);
assert_eq!(
BlockhashQuery::new(Some(blockhash), false, Some(nonce_pubkey)),
BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_pubkey),
blockhash
),
);
assert_eq!(
BlockhashQuery::new(None, false, Some(nonce_pubkey)),
BlockhashQuery::All(blockhash_query::Source::NonceAccount(nonce_pubkey)),
);
}
#[test]
#[should_panic]
fn test_blockhash_query_new_no_nonce_fail() {
BlockhashQuery::new(None, true, None);
}
#[test]
#[should_panic]
fn test_blockhash_query_new_nonce_fail() {
let nonce_pubkey = Pubkey::new(&[1u8; 32]);
BlockhashQuery::new(None, true, Some(nonce_pubkey));
}
#[test]
fn test_blockhash_query_new_from_matches_ok() {
let test_commands = App::new("blockhash_query_test")
.arg(nonce_arg())
.offline_args();
let blockhash = hash(&[1u8]);
let blockhash_string = blockhash.to_string();
let matches = test_commands.clone().get_matches_from(vec![
"blockhash_query_test",
"--blockhash",
&blockhash_string,
"--sign-only",
]);
assert_eq!(
BlockhashQuery::new_from_matches(&matches),
BlockhashQuery::None(blockhash),
);
let matches = test_commands.clone().get_matches_from(vec![
"blockhash_query_test",
"--blockhash",
&blockhash_string,
]);
assert_eq!(
BlockhashQuery::new_from_matches(&matches),
BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
);
let matches = test_commands
.clone()
.get_matches_from(vec!["blockhash_query_test"]);
assert_eq!(
BlockhashQuery::new_from_matches(&matches),
BlockhashQuery::All(blockhash_query::Source::Cluster),
);
let nonce_pubkey = Pubkey::new(&[1u8; 32]);
let nonce_string = nonce_pubkey.to_string();
let matches = test_commands.clone().get_matches_from(vec![
"blockhash_query_test",
"--blockhash",
&blockhash_string,
"--sign-only",
"--nonce",
&nonce_string,
]);
assert_eq!(
BlockhashQuery::new_from_matches(&matches),
BlockhashQuery::None(blockhash),
);
let matches = test_commands.clone().get_matches_from(vec![
"blockhash_query_test",
"--blockhash",
&blockhash_string,
"--nonce",
&nonce_string,
]);
assert_eq!(
BlockhashQuery::new_from_matches(&matches),
BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_pubkey),
blockhash
),
);
}
#[test]
#[should_panic]
fn test_blockhash_query_new_from_matches_without_nonce_fail() {
let test_commands = App::new("blockhash_query_test")
.arg(blockhash_arg())
// We can really only hit this case if the arg requirements
// are broken, so unset the requires() to recreate that condition
.arg(sign_only_arg().requires(""));
let matches = test_commands
.clone()
.get_matches_from(vec!["blockhash_query_test", "--sign-only"]);
BlockhashQuery::new_from_matches(&matches);
}
#[test]
#[should_panic]
fn test_blockhash_query_new_from_matches_with_nonce_fail() {
let test_commands = App::new("blockhash_query_test")
.arg(blockhash_arg())
// We can really only hit this case if the arg requirements
// are broken, so unset the requires() to recreate that condition
.arg(sign_only_arg().requires(""));
let nonce_pubkey = Pubkey::new(&[1u8; 32]);
let nonce_string = nonce_pubkey.to_string();
let matches = test_commands.clone().get_matches_from(vec![
"blockhash_query_test",
"--sign-only",
"--nonce",
&nonce_string,
]);
BlockhashQuery::new_from_matches(&matches);
}
#[test]
fn test_blockhash_query_get_blockhash_fee_calc() {
let test_blockhash = hash(&[0u8]);
let rpc_blockhash = hash(&[1u8]);
let rpc_fee_calc = FeeCalculator::new(42);
let get_recent_blockhash_response = json!(Response {
context: RpcResponseContext { slot: 1 },
value: json!((
Value::String(rpc_blockhash.to_string()),
serde_json::to_value(rpc_fee_calc.clone()).unwrap()
)),
});
let get_fee_calculator_for_blockhash_response = json!(Response {
context: RpcResponseContext { slot: 1 },
value: json!(RpcFeeCalculator {
fee_calculator: rpc_fee_calc.clone()
}),
});
let mut mocks = HashMap::new();
mocks.insert(
RpcRequest::GetRecentBlockhash,
get_recent_blockhash_response.clone(),
);
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
assert_eq!(
BlockhashQuery::default()
.get_blockhash_and_fee_calculator(&rpc_client)
.unwrap(),
(rpc_blockhash, rpc_fee_calc.clone()),
);
let mut mocks = HashMap::new();
mocks.insert(
RpcRequest::GetRecentBlockhash,
get_recent_blockhash_response.clone(),
);
mocks.insert(
RpcRequest::GetFeeCalculatorForBlockhash,
get_fee_calculator_for_blockhash_response.clone(),
);
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
assert_eq!(
BlockhashQuery::FeeCalculator(Source::Cluster, test_blockhash)
.get_blockhash_and_fee_calculator(&rpc_client)
.unwrap(),
(test_blockhash, rpc_fee_calc.clone()),
);
let mut mocks = HashMap::new();
mocks.insert(
RpcRequest::GetRecentBlockhash,
get_recent_blockhash_response.clone(),
);
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
assert_eq!(
BlockhashQuery::None(test_blockhash)
.get_blockhash_and_fee_calculator(&rpc_client)
.unwrap(),
(test_blockhash, FeeCalculator::default()),
);
let rpc_client = RpcClient::new_mock("fails".to_string());
assert!(BlockhashQuery::default()
.get_blockhash_and_fee_calculator(&rpc_client)
.is_err());
let nonce_blockhash = Hash::new(&[2u8; 32]);
let nonce_fee_calc = FeeCalculator::new(4242);
let data = nonce::state::Data {
authority: Pubkey::new(&[3u8; 32]),
blockhash: nonce_blockhash,
fee_calculator: nonce_fee_calc.clone(),
};
let nonce_account = Account::new_data_with_space(
42,
&nonce::state::Versions::new_current(nonce::State::Initialized(data)),
nonce::State::size(),
&system_program::id(),
)
.unwrap();
let nonce_pubkey = Pubkey::new(&[4u8; 32]);
let rpc_nonce_account = RpcAccount::encode(nonce_account);
let get_account_response = json!(Response {
context: RpcResponseContext { slot: 1 },
value: json!(Some(rpc_nonce_account.clone())),
});
let mut mocks = HashMap::new();
mocks.insert(RpcRequest::GetAccountInfo, get_account_response.clone());
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
assert_eq!(
BlockhashQuery::All(Source::NonceAccount(nonce_pubkey))
.get_blockhash_and_fee_calculator(&rpc_client)
.unwrap(),
(nonce_blockhash, nonce_fee_calc.clone()),
);
let mut mocks = HashMap::new();
mocks.insert(RpcRequest::GetAccountInfo, get_account_response.clone());
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
assert_eq!(
BlockhashQuery::FeeCalculator(Source::NonceAccount(nonce_pubkey), nonce_blockhash)
.get_blockhash_and_fee_calculator(&rpc_client)
.unwrap(),
(nonce_blockhash, nonce_fee_calc.clone()),
);
let mut mocks = HashMap::new();
mocks.insert(RpcRequest::GetAccountInfo, get_account_response.clone());
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
assert!(
BlockhashQuery::FeeCalculator(Source::NonceAccount(nonce_pubkey), test_blockhash)
.get_blockhash_and_fee_calculator(&rpc_client)
.is_err()
);
let mut mocks = HashMap::new();
mocks.insert(RpcRequest::GetAccountInfo, get_account_response.clone());
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
assert_eq!(
BlockhashQuery::None(nonce_blockhash)
.get_blockhash_and_fee_calculator(&rpc_client)
.unwrap(),
(nonce_blockhash, FeeCalculator::default()),
);
let rpc_client = RpcClient::new_mock("fails".to_string());
assert!(BlockhashQuery::All(Source::NonceAccount(nonce_pubkey))
.get_blockhash_and_fee_calculator(&rpc_client)
.is_err());
}
}

114
cli/src/offline/mod.rs Normal file
View File

@@ -0,0 +1,114 @@
pub mod blockhash_query;
use crate::nonce;
use clap::{App, Arg, ArgMatches};
use serde_json::Value;
use solana_clap_utils::{
input_parsers::{pubkey_of, value_of},
input_validators::{is_hash, is_pubkey_sig},
keypair::presigner_from_pubkey_sigs,
offline::{BLOCKHASH_ARG, SIGNER_ARG, SIGN_ONLY_ARG},
};
use solana_client::rpc_client::RpcClient;
use solana_sdk::{
fee_calculator::FeeCalculator,
hash::Hash,
pubkey::Pubkey,
signature::{Presigner, Signature},
};
use std::str::FromStr;
fn blockhash_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name(BLOCKHASH_ARG.name)
.long(BLOCKHASH_ARG.long)
.takes_value(true)
.value_name("BLOCKHASH")
.validator(is_hash)
.help(BLOCKHASH_ARG.help)
}
fn sign_only_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name(SIGN_ONLY_ARG.name)
.long(SIGN_ONLY_ARG.long)
.takes_value(false)
.requires(BLOCKHASH_ARG.name)
.help(SIGN_ONLY_ARG.help)
}
fn signer_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name(SIGNER_ARG.name)
.long(SIGNER_ARG.long)
.takes_value(true)
.value_name("PUBKEY=SIGNATURE")
.validator(is_pubkey_sig)
.requires(BLOCKHASH_ARG.name)
.multiple(true)
.help(SIGNER_ARG.help)
}
pub trait OfflineArgs {
fn offline_args(self) -> Self;
}
impl OfflineArgs for App<'_, '_> {
fn offline_args(self) -> Self {
self.arg(blockhash_arg())
.arg(sign_only_arg())
.arg(signer_arg())
}
}
pub struct SignOnly {
pub blockhash: Hash,
pub present_signers: Vec<(Pubkey, Signature)>,
pub absent_signers: Vec<Pubkey>,
pub bad_signers: Vec<Pubkey>,
}
impl SignOnly {
pub fn has_all_signers(&self) -> bool {
self.absent_signers.is_empty() && self.bad_signers.is_empty()
}
pub fn presigner_of(&self, pubkey: &Pubkey) -> Option<Presigner> {
presigner_from_pubkey_sigs(pubkey, &self.present_signers)
}
}
pub fn parse_sign_only_reply_string(reply: &str) -> SignOnly {
let object: Value = serde_json::from_str(&reply).unwrap();
let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap();
let blockhash = blockhash_str.parse::<Hash>().unwrap();
let signer_strings = object.get("signers").unwrap().as_array().unwrap();
let present_signers = signer_strings
.iter()
.map(|signer_string| {
let mut signer = signer_string.as_str().unwrap().split('=');
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
(key, sig)
})
.collect();
let signer_strings = object.get("absent").unwrap().as_array().unwrap();
let absent_signers = signer_strings
.iter()
.map(|val| {
let s = val.as_str().unwrap();
Pubkey::from_str(s).unwrap()
})
.collect();
let signer_strings = object.get("badSig").unwrap().as_array().unwrap();
let bad_signers = signer_strings
.iter()
.map(|val| {
let s = val.as_str().unwrap();
Pubkey::from_str(s).unwrap()
})
.collect();
SignOnly {
blockhash,
present_signers,
absent_signers,
bad_signers,
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,7 @@
use crate::cli::{
check_account_for_fee, check_unique_pubkeys, log_instruction_custom_error, CliCommand,
CliCommandInfo, CliConfig, CliError, ProcessResult, SignerIndex,
check_account_for_fee, check_unique_pubkeys, generate_unique_signers,
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult,
SignerIndex,
};
use clap::{App, Arg, ArgMatches, SubCommand};
use solana_clap_utils::{input_parsers::*, input_validators::*, keypair::signer_from_path};
@@ -25,18 +26,18 @@ impl StorageSubCommands for App<'_, '_> {
.arg(
Arg::with_name("storage_account_owner")
.index(1)
.value_name("STORAGE ACCOUNT OWNER PUBKEY")
.value_name("AUTHORITY_PUBKEY")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair),
.validator(is_valid_pubkey),
)
.arg(
Arg::with_name("storage_account")
.index(2)
.value_name("STORAGE ACCOUNT")
.value_name("ACCOUNT_KEYPAIR")
.takes_value(true)
.required(true)
.validator(is_keypair_or_ask_keyword),
.validator(is_valid_signer),
),
)
.subcommand(
@@ -45,18 +46,18 @@ impl StorageSubCommands for App<'_, '_> {
.arg(
Arg::with_name("storage_account_owner")
.index(1)
.value_name("STORAGE ACCOUNT OWNER PUBKEY")
.value_name("AUTHORITY_PUBKEY")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair),
.validator(is_valid_pubkey),
)
.arg(
Arg::with_name("storage_account")
.index(2)
.value_name("STORAGE ACCOUNT")
.value_name("ACCOUNT_KEYPAIR")
.takes_value(true)
.required(true)
.validator(is_keypair_or_ask_keyword),
.validator(is_valid_signer),
),
)
.subcommand(
@@ -65,19 +66,19 @@ impl StorageSubCommands for App<'_, '_> {
.arg(
Arg::with_name("node_account_pubkey")
.index(1)
.value_name("NODE PUBKEY")
.value_name("NODE_ACCOUNT_ADDRESS")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair)
.validator(is_valid_pubkey)
.help("The node account to credit the rewards to"),
)
.arg(
Arg::with_name("storage_account_pubkey")
.index(2)
.value_name("STORAGE ACCOUNT PUBKEY")
.value_name("STORAGE_ACCOUNT_ADDRESS")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair)
.validator(is_valid_pubkey)
.help("Storage account address to redeem credits for"),
),
)
@@ -88,11 +89,11 @@ impl StorageSubCommands for App<'_, '_> {
.arg(
Arg::with_name("storage_account_pubkey")
.index(1)
.value_name("STORAGE ACCOUNT PUBKEY")
.value_name("STORAGE_ACCOUNT_ADDRESS")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair)
.help("Storage account pubkey"),
.validator(is_valid_pubkey)
.help("Storage account address"),
),
)
}
@@ -103,18 +104,26 @@ pub fn parse_storage_create_archiver_account(
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let account_owner = pubkey_of(matches, "storage_account_owner").unwrap();
let storage_account = keypair_of(matches, "storage_account").unwrap();
let account_owner =
pubkey_of_signer(matches, "storage_account_owner", wallet_manager)?.unwrap();
let (storage_account, storage_account_pubkey) =
signer_of(matches, "storage_account", wallet_manager)?;
let payer_provided = None;
let signer_info = generate_unique_signers(
vec![payer_provided, storage_account],
matches,
default_signer_path,
wallet_manager,
)?;
Ok(CliCommandInfo {
command: CliCommand::CreateStorageAccount {
account_owner,
storage_account: 1,
storage_account: signer_info.index_of(storage_account_pubkey).unwrap(),
account_type: StorageAccountType::Archiver,
},
signers: vec![
signer_from_path(matches, default_signer_path, "keypair", wallet_manager)?,
storage_account.into(),
],
signers: signer_info.signers,
})
}
@@ -123,18 +132,26 @@ pub fn parse_storage_create_validator_account(
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let account_owner = pubkey_of(matches, "storage_account_owner").unwrap();
let storage_account = keypair_of(matches, "storage_account").unwrap();
let account_owner =
pubkey_of_signer(matches, "storage_account_owner", wallet_manager)?.unwrap();
let (storage_account, storage_account_pubkey) =
signer_of(matches, "storage_account", wallet_manager)?;
let payer_provided = None;
let signer_info = generate_unique_signers(
vec![payer_provided, storage_account],
matches,
default_signer_path,
wallet_manager,
)?;
Ok(CliCommandInfo {
command: CliCommand::CreateStorageAccount {
account_owner,
storage_account: 1,
storage_account: signer_info.index_of(storage_account_pubkey).unwrap(),
account_type: StorageAccountType::Validator,
},
signers: vec![
signer_from_path(matches, default_signer_path, "keypair", wallet_manager)?,
storage_account.into(),
],
signers: signer_info.signers,
})
}
@@ -143,8 +160,10 @@ pub fn parse_storage_claim_reward(
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let node_account_pubkey = pubkey_of(matches, "node_account_pubkey").unwrap();
let storage_account_pubkey = pubkey_of(matches, "storage_account_pubkey").unwrap();
let node_account_pubkey =
pubkey_of_signer(matches, "node_account_pubkey", wallet_manager)?.unwrap();
let storage_account_pubkey =
pubkey_of_signer(matches, "storage_account_pubkey", wallet_manager)?.unwrap();
Ok(CliCommandInfo {
command: CliCommand::ClaimStorageReward {
node_account_pubkey,
@@ -161,8 +180,10 @@ pub fn parse_storage_claim_reward(
pub fn parse_storage_get_account_command(
matches: &ArgMatches<'_>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let storage_account_pubkey = pubkey_of(matches, "storage_account_pubkey").unwrap();
let storage_account_pubkey =
pubkey_of_signer(matches, "storage_account_pubkey", wallet_manager)?.unwrap();
Ok(CliCommandInfo {
command: CliCommand::ShowStorageAccount(storage_account_pubkey),
signers: vec![],
@@ -212,7 +233,7 @@ pub fn process_create_storage_account(
);
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let message = Message::new(ixs);
let message = Message::new(&ixs);
let mut tx = Transaction::new_unsigned(message);
tx.try_sign(&config.signers, recent_blockhash)?;
check_account_for_fee(
@@ -221,7 +242,7 @@ pub fn process_create_storage_account(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &config.signers);
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<SystemError>(result)
}
@@ -236,7 +257,7 @@ pub fn process_claim_storage_reward(
let instruction =
storage_instruction::claim_reward(node_account_pubkey, storage_account_pubkey);
let signers = [config.signers[0]];
let message = Message::new_with_payer(vec![instruction], Some(&signers[0].pubkey()));
let message = Message::new_with_payer(&[instruction], Some(&signers[0].pubkey()));
let mut tx = Transaction::new_unsigned(message);
tx.try_sign(&signers, recent_blockhash)?;
check_account_for_fee(
@@ -245,8 +266,8 @@ pub fn process_claim_storage_reward(
&fee_calculator,
&tx.message,
)?;
let signature_str = rpc_client.send_and_confirm_transaction(&mut tx, &signers)?;
Ok(signature_str)
let signature = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &signers)?;
Ok(signature.to_string())
}
pub fn process_show_storage_account(
@@ -266,7 +287,7 @@ pub fn process_show_storage_account(
use solana_storage_program::storage_contract::StorageContract;
let storage_contract: StorageContract = account.state().map_err(|err| {
CliError::RpcRequestError(format!("Unable to deserialize storage account: {:?}", err))
CliError::RpcRequestError(format!("Unable to deserialize storage account: {}", err))
})?;
println!("{:#?}", storage_contract);
println!("Account Lamports: {}", account.lamports);

View File

@@ -274,7 +274,7 @@ pub fn process_set_validator_info(
println!("--force supplied, ignoring: {:?}", result);
} else {
result.map_err(|err| {
CliError::BadParameter(format!("Invalid validator keybase username: {:?}", err))
CliError::BadParameter(format!("Invalid validator keybase username: {}", err))
})?;
}
}
@@ -339,7 +339,7 @@ pub fn process_set_validator_info(
&validator_info,
)]);
let signers = vec![config.signers[0], &info_keypair];
let message = Message::new(instructions);
let message = Message::new(&instructions);
(message, signers)
} else {
println!(
@@ -353,7 +353,7 @@ pub fn process_set_validator_info(
keys,
&validator_info,
)];
let message = Message::new_with_payer(instructions, Some(&config.signers[0].pubkey()));
let message = Message::new_with_payer(&instructions, Some(&config.signers[0].pubkey()));
let signers = vec![config.signers[0]];
(message, signers)
};
@@ -368,7 +368,7 @@ pub fn process_set_validator_info(
&fee_calculator,
&tx.message,
)?;
let signature_str = rpc_client.send_and_confirm_transaction(&mut tx, &signers)?;
let signature_str = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &signers)?;
println!("Success! Validator info published at: {:?}", info_pubkey);
println!("{}", signature_str);

View File

@@ -1,7 +1,7 @@
use crate::cli::{
build_balance_message, check_account_for_fee, check_unique_pubkeys, generate_unique_signers,
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, CliSignerInfo,
ProcessResult,
ProcessResult, SignerIndex,
};
use clap::{value_t_or_exit, App, Arg, ArgMatches, SubCommand};
use solana_clap_utils::{input_parsers::*, input_validators::*};
@@ -16,7 +16,7 @@ use solana_sdk::{
transaction::Transaction,
};
use solana_vote_program::{
vote_instruction::{self, VoteError},
vote_instruction::{self, withdraw, VoteError},
vote_state::{VoteAuthorize, VoteInit, VoteState},
};
use std::sync::Arc;
@@ -33,25 +33,25 @@ impl VoteSubCommands for App<'_, '_> {
.arg(
Arg::with_name("vote_account")
.index(1)
.value_name("VOTE ACCOUNT KEYPAIR")
.value_name("ACCOUNT_KEYPAIR")
.takes_value(true)
.required(true)
.validator(is_keypair_or_ask_keyword)
.help("Vote account keypair to fund"),
.validator(is_valid_signer)
.help("Vote account keypair to create"),
)
.arg(
Arg::with_name("identity_pubkey")
Arg::with_name("identity_account")
.index(2)
.value_name("VALIDATOR IDENTITY PUBKEY")
.value_name("IDENTITY_KEYPAIR")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair)
.help("Validator that will vote with this account"),
.validator(is_valid_signer)
.help("Keypair of validator that will vote with this account"),
)
.arg(
Arg::with_name("commission")
.long("commission")
.value_name("NUM")
.value_name("PERCENTAGE")
.takes_value(true)
.default_value("100")
.help("The commission taken on reward redemption (0-100)"),
@@ -59,78 +59,47 @@ impl VoteSubCommands for App<'_, '_> {
.arg(
Arg::with_name("authorized_voter")
.long("authorized-voter")
.value_name("PUBKEY")
.value_name("VOTER_PUBKEY")
.takes_value(true)
.validator(is_pubkey_or_keypair)
.help("Public key of the authorized voter (defaults to vote account)"),
.validator(is_valid_pubkey)
.help("Public key of the authorized voter [default: validator identity pubkey]"),
)
.arg(
Arg::with_name("authorized_withdrawer")
.long("authorized-withdrawer")
.value_name("PUBKEY")
.value_name("WITHDRAWER_PUBKEY")
.takes_value(true)
.validator(is_pubkey_or_keypair)
.help("Public key of the authorized withdrawer (defaults to cli config pubkey)"),
.validator(is_valid_pubkey)
.help("Public key of the authorized withdrawer [default: validator identity pubkey]"),
)
.arg(
Arg::with_name("seed")
.long("seed")
.value_name("SEED STRING")
.value_name("STRING")
.takes_value(true)
.help("Seed for address generation; if specified, the resulting account will be at a derived address of the VOTE ACCOUNT pubkey")
),
)
.subcommand(
SubCommand::with_name("vote-update-validator")
.about("Update the vote account's validator identity")
.arg(
Arg::with_name("vote_account_pubkey")
.index(1)
.value_name("VOTE ACCOUNT PUBKEY")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair)
.help("Vote account to update"),
)
.arg(
Arg::with_name("new_identity_pubkey")
.index(2)
.value_name("NEW VALIDATOR IDENTITY PUBKEY")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair)
.help("New validator that will vote with this account"),
)
.arg(
Arg::with_name("authorized_voter")
.index(3)
.value_name("AUTHORIZED VOTER KEYPAIR")
.takes_value(true)
.required(true)
.validator(is_keypair)
.help("Authorized voter keypair"),
)
)
.subcommand(
SubCommand::with_name("vote-authorize-voter")
.about("Authorize a new vote signing keypair for the given vote account")
.arg(
Arg::with_name("vote_account_pubkey")
.index(1)
.value_name("VOTE ACCOUNT PUBKEY")
.value_name("VOTE_ACCOUNT_ADDRESS")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair)
.validator(is_valid_pubkey)
.help("Vote account in which to set the authorized voter"),
)
.arg(
Arg::with_name("new_authorized_pubkey")
.index(2)
.value_name("NEW VOTER PUBKEY")
.value_name("AUTHORIZED_PUBKEY")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair)
.help("New vote signer to authorize"),
.validator(is_valid_pubkey)
.help("New authorized vote signer"),
),
)
.subcommand(
@@ -139,22 +108,53 @@ impl VoteSubCommands for App<'_, '_> {
.arg(
Arg::with_name("vote_account_pubkey")
.index(1)
.value_name("VOTE ACCOUNT PUBKEY")
.value_name("VOTE_ACCOUNT_ADDRESS")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair)
.validator(is_valid_pubkey)
.help("Vote account in which to set the authorized withdrawer"),
)
.arg(
Arg::with_name("new_authorized_pubkey")
.index(2)
.value_name("NEW WITHDRAWER PUBKEY")
.value_name("AUTHORIZED_PUBKEY")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair)
.help("New withdrawer to authorize"),
.validator(is_valid_pubkey)
.help("New authorized withdrawer"),
),
)
.subcommand(
SubCommand::with_name("vote-update-validator")
.about("Update the vote account's validator identity")
.arg(
Arg::with_name("vote_account_pubkey")
.index(1)
.value_name("VOTE_ACCOUNT_ADDRESS")
.takes_value(true)
.required(true)
.validator(is_valid_pubkey)
.help("Vote account to update"),
)
.arg(
Arg::with_name("new_identity_account")
.index(2)
.value_name("IDENTITY_KEYPAIR")
.takes_value(true)
.required(true)
.validator(is_valid_signer)
.help("Keypair of new validator that will vote with this account"),
)
.arg(
Arg::with_name("authorized_withdrawer")
.index(3)
.value_name("AUTHORIZED_KEYPAIR")
.takes_value(true)
.required(true)
.validator(is_valid_signer)
.help("Authorized withdrawer keypair"),
)
)
.subcommand(
SubCommand::with_name("vote-account")
.about("Show the contents of a vote account")
@@ -170,10 +170,10 @@ impl VoteSubCommands for App<'_, '_> {
.arg(
Arg::with_name("vote_account_pubkey")
.index(1)
.value_name("VOTE ACCOUNT PUBKEY")
.value_name("VOTE_ACCOUNT_ADDRESS")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair)
.validator(is_valid_pubkey)
.help("Vote account pubkey"),
)
.arg(
@@ -183,24 +183,64 @@ impl VoteSubCommands for App<'_, '_> {
.help("Display balance in lamports instead of SOL"),
),
)
.subcommand(
SubCommand::with_name("withdraw-from-vote-account")
.about("Withdraw lamports from a vote account into a specified account")
.arg(
Arg::with_name("vote_account_pubkey")
.index(1)
.value_name("VOTE_ACCOUNT_ADDRESS")
.takes_value(true)
.required(true)
.validator(is_valid_pubkey)
.help("Vote account from which to withdraw"),
)
.arg(
Arg::with_name("destination_account_pubkey")
.index(2)
.value_name("RECIPIENT_ADDRESS")
.takes_value(true)
.required(true)
.validator(is_valid_pubkey)
.help("The recipient of withdrawn SOL"),
)
.arg(
Arg::with_name("amount")
.index(3)
.value_name("AMOUNT")
.takes_value(true)
.required(true)
.validator(is_amount)
.help("The amount to withdraw, in SOL"),
)
.arg(
Arg::with_name("authorized_withdrawer")
.long("authorized-withdrawer")
.value_name("AUTHORIZED_KEYPAIR")
.takes_value(true)
.validator(is_valid_signer)
.help("Authorized withdrawer [default: cli config keypair]"),
)
)
}
}
pub fn parse_vote_create_account(
pub fn parse_create_vote_account(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let (vote_account, _) = signer_of(matches, "vote_account", wallet_manager)?;
let seed = matches.value_of("seed").map(|s| s.to_string());
let identity_pubkey = pubkey_of(matches, "identity_pubkey").unwrap();
let (identity_account, identity_pubkey) =
signer_of(matches, "identity_account", wallet_manager)?;
let commission = value_t_or_exit!(matches, "commission", u8);
let authorized_voter = pubkey_of(matches, "authorized_voter");
let authorized_withdrawer = pubkey_of(matches, "authorized_withdrawer");
let authorized_voter = pubkey_of_signer(matches, "authorized_voter", wallet_manager)?;
let authorized_withdrawer = pubkey_of_signer(matches, "authorized_withdrawer", wallet_manager)?;
let payer_provided = None;
let CliSignerInfo { signers } = generate_unique_signers(
vec![payer_provided, vote_account],
let signer_info = generate_unique_signers(
vec![payer_provided, vote_account, identity_account],
matches,
default_signer_path,
wallet_manager,
@@ -209,12 +249,12 @@ pub fn parse_vote_create_account(
Ok(CliCommandInfo {
command: CliCommand::CreateVoteAccount {
seed,
node_pubkey: identity_pubkey,
identity_account: signer_info.index_of(identity_pubkey).unwrap(),
authorized_voter,
authorized_withdrawer,
commission,
},
signers,
signers: signer_info.signers,
})
}
@@ -224,8 +264,10 @@ pub fn parse_vote_authorize(
wallet_manager: Option<&Arc<RemoteWalletManager>>,
vote_authorize: VoteAuthorize,
) -> Result<CliCommandInfo, CliError> {
let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap();
let new_authorized_pubkey = pubkey_of(matches, "new_authorized_pubkey").unwrap();
let vote_account_pubkey =
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
let new_authorized_pubkey =
pubkey_of_signer(matches, "new_authorized_pubkey", wallet_manager)?.unwrap();
let authorized_voter_provided = None;
let CliSignerInfo { signers } = generate_unique_signers(
@@ -250,13 +292,15 @@ pub fn parse_vote_update_validator(
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap();
let new_identity_pubkey = pubkey_of(matches, "new_identity_pubkey").unwrap();
let (authorized_voter, _) = signer_of(matches, "authorized_voter", wallet_manager)?;
let vote_account_pubkey =
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
let (new_identity_account, new_identity_pubkey) =
signer_of(matches, "new_identity_account", wallet_manager)?;
let (authorized_withdrawer, _) = signer_of(matches, "authorized_withdrawer", wallet_manager)?;
let payer_provided = None;
let CliSignerInfo { signers } = generate_unique_signers(
vec![payer_provided, authorized_voter],
let signer_info = generate_unique_signers(
vec![payer_provided, authorized_withdrawer, new_identity_account],
matches,
default_signer_path,
wallet_manager,
@@ -265,16 +309,18 @@ pub fn parse_vote_update_validator(
Ok(CliCommandInfo {
command: CliCommand::VoteUpdateValidator {
vote_account_pubkey,
new_identity_pubkey,
new_identity_account: signer_info.index_of(new_identity_pubkey).unwrap(),
},
signers,
signers: signer_info.signers,
})
}
pub fn parse_vote_get_account_command(
matches: &ArgMatches<'_>,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap();
let vote_account_pubkey =
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
let use_lamports_unit = matches.is_present("lamports");
let commitment_config = if matches.is_present("confirmed") {
CommitmentConfig::default()
@@ -291,11 +337,43 @@ pub fn parse_vote_get_account_command(
})
}
pub fn parse_withdraw_from_vote_account(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let vote_account_pubkey =
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
let destination_account_pubkey =
pubkey_of_signer(matches, "destination_account_pubkey", wallet_manager)?.unwrap();
let lamports = lamports_of_sol(matches, "amount").unwrap();
let (withdraw_authority, withdraw_authority_pubkey) =
signer_of(matches, "authorized_withdrawer", wallet_manager)?;
let payer_provided = None;
let signer_info = generate_unique_signers(
vec![payer_provided, withdraw_authority],
matches,
default_signer_path,
wallet_manager,
)?;
Ok(CliCommandInfo {
command: CliCommand::WithdrawFromVoteAccount {
vote_account_pubkey,
destination_account_pubkey,
withdraw_authority: signer_info.index_of(withdraw_authority_pubkey).unwrap(),
lamports,
},
signers: signer_info.signers,
})
}
pub fn process_create_vote_account(
rpc_client: &RpcClient,
config: &CliConfig,
seed: &Option<String>,
identity_pubkey: &Pubkey,
identity_account: SignerIndex,
authorized_voter: &Option<Pubkey>,
authorized_withdrawer: &Option<Pubkey>,
commission: u8,
@@ -312,6 +390,8 @@ pub fn process_create_vote_account(
(&vote_account_address, "vote_account".to_string()),
)?;
let identity_account = config.signers[identity_account];
let identity_pubkey = identity_account.pubkey();
check_unique_pubkeys(
(&vote_account_address, "vote_account".to_string()),
(&identity_pubkey, "identity_pubkey".to_string()),
@@ -334,9 +414,9 @@ pub fn process_create_vote_account(
.max(1);
let vote_init = VoteInit {
node_pubkey: *identity_pubkey,
authorized_voter: authorized_voter.unwrap_or(vote_account_pubkey),
authorized_withdrawer: authorized_withdrawer.unwrap_or(vote_account_pubkey),
node_pubkey: identity_pubkey,
authorized_voter: authorized_voter.unwrap_or(identity_pubkey),
authorized_withdrawer: authorized_withdrawer.unwrap_or(identity_pubkey),
commission,
};
@@ -359,7 +439,7 @@ pub fn process_create_vote_account(
};
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let message = Message::new(ixs);
let message = Message::new(&ixs);
let mut tx = Transaction::new_unsigned(message);
tx.try_sign(&config.signers, recent_blockhash)?;
check_account_for_fee(
@@ -368,7 +448,7 @@ pub fn process_create_vote_account(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &config.signers);
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<SystemError>(result)
}
@@ -391,7 +471,7 @@ pub fn process_vote_authorize(
vote_authorize, // vote or withdraw
)];
let message = Message::new_with_payer(ixs, Some(&config.signers[0].pubkey()));
let message = Message::new_with_payer(&ixs, Some(&config.signers[0].pubkey()));
let mut tx = Transaction::new_unsigned(message);
tx.try_sign(&config.signers, recent_blockhash)?;
check_account_for_fee(
@@ -400,7 +480,8 @@ pub fn process_vote_authorize(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[config.signers[0]]);
let result =
rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &[config.signers[0]]);
log_instruction_custom_error::<VoteError>(result)
}
@@ -408,21 +489,23 @@ pub fn process_vote_update_validator(
rpc_client: &RpcClient,
config: &CliConfig,
vote_account_pubkey: &Pubkey,
new_identity_pubkey: &Pubkey,
new_identity_account: SignerIndex,
) -> ProcessResult {
let authorized_voter = config.signers[1];
let authorized_withdrawer = config.signers[1];
let new_identity_account = config.signers[new_identity_account];
let new_identity_pubkey = new_identity_account.pubkey();
check_unique_pubkeys(
(vote_account_pubkey, "vote_account_pubkey".to_string()),
(new_identity_pubkey, "new_identity_pubkey".to_string()),
(&new_identity_pubkey, "new_identity_account".to_string()),
)?;
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let ixs = vec![vote_instruction::update_node(
vote_account_pubkey,
&authorized_voter.pubkey(),
new_identity_pubkey,
&authorized_withdrawer.pubkey(),
&new_identity_pubkey,
)];
let message = Message::new_with_payer(ixs, Some(&config.signers[0].pubkey()));
let message = Message::new_with_payer(&ixs, Some(&config.signers[0].pubkey()));
let mut tx = Transaction::new_unsigned(message);
tx.try_sign(&config.signers, recent_blockhash)?;
check_account_for_fee(
@@ -431,7 +514,7 @@ pub fn process_vote_update_validator(
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &config.signers);
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<VoteError>(result)
}
@@ -517,6 +600,38 @@ pub fn process_show_vote_account(
Ok("".to_string())
}
pub fn process_withdraw_from_vote_account(
rpc_client: &RpcClient,
config: &CliConfig,
vote_account_pubkey: &Pubkey,
withdraw_authority: SignerIndex,
lamports: u64,
destination_account_pubkey: &Pubkey,
) -> ProcessResult {
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let withdraw_authority = config.signers[withdraw_authority];
let ix = withdraw(
vote_account_pubkey,
&withdraw_authority.pubkey(),
lamports,
destination_account_pubkey,
);
let message = Message::new_with_payer(&[ix], Some(&config.signers[0].pubkey()));
let mut transaction = Transaction::new_unsigned(message);
transaction.try_sign(&config.signers, recent_blockhash)?;
check_account_for_fee(
rpc_client,
&config.signers[0].pubkey(),
&fee_calculator,
&transaction.message,
)?;
let result =
rpc_client.send_and_confirm_transaction_with_spinner(&mut transaction, &config.signers);
log_instruction_custom_error::<VoteError>(result)
}
#[cfg(test)]
mod tests {
use super::*;
@@ -565,13 +680,14 @@ mod tests {
let keypair = Keypair::new();
write_keypair(&keypair, tmp_file.as_file_mut()).unwrap();
// Test CreateVoteAccount SubCommand
let node_pubkey = Pubkey::new_rand();
let node_pubkey_string = format!("{}", node_pubkey);
let (identity_keypair_file, mut tmp_file) = make_tmp_file();
let identity_keypair = Keypair::new();
write_keypair(&identity_keypair, tmp_file.as_file_mut()).unwrap();
let test_create_vote_account = test_commands.clone().get_matches_from(vec![
"test",
"create-vote-account",
&keypair_file,
&node_pubkey_string,
&identity_keypair_file,
"--commission",
"10",
]);
@@ -580,14 +696,15 @@ mod tests {
CliCommandInfo {
command: CliCommand::CreateVoteAccount {
seed: None,
node_pubkey,
identity_account: 2,
authorized_voter: None,
authorized_withdrawer: None,
commission: 10,
},
signers: vec![
read_keypair_file(&default_keypair_file).unwrap().into(),
Box::new(keypair)
Box::new(keypair),
read_keypair_file(&identity_keypair_file).unwrap().into(),
],
}
);
@@ -600,21 +717,22 @@ mod tests {
"test",
"create-vote-account",
&keypair_file,
&node_pubkey_string,
&identity_keypair_file,
]);
assert_eq!(
parse_command(&test_create_vote_account2, &default_keypair_file, None).unwrap(),
CliCommandInfo {
command: CliCommand::CreateVoteAccount {
seed: None,
node_pubkey,
identity_account: 2,
authorized_voter: None,
authorized_withdrawer: None,
commission: 100,
},
signers: vec![
read_keypair_file(&default_keypair_file).unwrap().into(),
Box::new(keypair)
Box::new(keypair),
read_keypair_file(&identity_keypair_file).unwrap().into(),
],
}
);
@@ -629,7 +747,7 @@ mod tests {
"test",
"create-vote-account",
&keypair_file,
&node_pubkey_string,
&identity_keypair_file,
"--authorized-voter",
&authed.to_string(),
]);
@@ -638,14 +756,15 @@ mod tests {
CliCommandInfo {
command: CliCommand::CreateVoteAccount {
seed: None,
node_pubkey,
identity_account: 2,
authorized_voter: Some(authed),
authorized_withdrawer: None,
commission: 100
},
signers: vec![
read_keypair_file(&default_keypair_file).unwrap().into(),
Box::new(keypair)
Box::new(keypair),
read_keypair_file(&identity_keypair_file).unwrap().into(),
],
}
);
@@ -658,7 +777,7 @@ mod tests {
"test",
"create-vote-account",
&keypair_file,
&node_pubkey_string,
&identity_keypair_file,
"--authorized-withdrawer",
&authed.to_string(),
]);
@@ -667,14 +786,15 @@ mod tests {
CliCommandInfo {
command: CliCommand::CreateVoteAccount {
seed: None,
node_pubkey,
identity_account: 2,
authorized_voter: None,
authorized_withdrawer: Some(authed),
commission: 100
},
signers: vec![
read_keypair_file(&default_keypair_file).unwrap().into(),
Box::new(keypair)
Box::new(keypair),
read_keypair_file(&identity_keypair_file).unwrap().into(),
],
}
);
@@ -683,7 +803,7 @@ mod tests {
"test",
"vote-update-validator",
&pubkey_string,
&pubkey2_string,
&identity_keypair_file,
&keypair_file,
]);
assert_eq!(
@@ -691,11 +811,72 @@ mod tests {
CliCommandInfo {
command: CliCommand::VoteUpdateValidator {
vote_account_pubkey: pubkey,
new_identity_pubkey: pubkey2,
new_identity_account: 2,
},
signers: vec![
read_keypair_file(&default_keypair_file).unwrap().into(),
Box::new(read_keypair_file(&keypair_file).unwrap())
Box::new(read_keypair_file(&keypair_file).unwrap()),
read_keypair_file(&identity_keypair_file).unwrap().into(),
],
}
);
// Test WithdrawFromVoteAccount subcommand
let test_withdraw_from_vote_account = test_commands.clone().get_matches_from(vec![
"test",
"withdraw-from-vote-account",
&keypair_file,
&pubkey_string,
"42",
]);
assert_eq!(
parse_command(
&test_withdraw_from_vote_account,
&default_keypair_file,
None
)
.unwrap(),
CliCommandInfo {
command: CliCommand::WithdrawFromVoteAccount {
vote_account_pubkey: read_keypair_file(&keypair_file).unwrap().pubkey(),
destination_account_pubkey: pubkey,
withdraw_authority: 0,
lamports: 42_000_000_000
},
signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()],
}
);
// Test WithdrawFromVoteAccount subcommand with authority
let withdraw_authority = Keypair::new();
let (withdraw_authority_file, mut tmp_file) = make_tmp_file();
write_keypair(&withdraw_authority, tmp_file.as_file_mut()).unwrap();
let test_withdraw_from_vote_account = test_commands.clone().get_matches_from(vec![
"test",
"withdraw-from-vote-account",
&keypair_file,
&pubkey_string,
"42",
"--authorized-withdrawer",
&withdraw_authority_file,
]);
assert_eq!(
parse_command(
&test_withdraw_from_vote_account,
&default_keypair_file,
None
)
.unwrap(),
CliCommandInfo {
command: CliCommand::WithdrawFromVoteAccount {
vote_account_pubkey: read_keypair_file(&keypair_file).unwrap().pubkey(),
destination_account_pubkey: pubkey,
withdraw_authority: 1,
lamports: 42_000_000_000
},
signers: vec![
read_keypair_file(&default_keypair_file).unwrap().into(),
read_keypair_file(&withdraw_authority_file).unwrap().into()
],
}
);

View File

@@ -1,6 +1,13 @@
use solana_cli::cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig};
use solana_cli::{
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
nonce,
offline::{
blockhash_query::{self, BlockhashQuery},
parse_sign_only_reply_string,
},
};
use solana_client::rpc_client::RpcClient;
use solana_core::validator::TestValidator;
use solana_core::validator::{TestValidator, TestValidatorOptions};
use solana_faucet::faucet::run_local_faucet;
use solana_sdk::{
hash::Hash,
@@ -247,3 +254,126 @@ fn full_battery_tests(
check_balance(800, &rpc_client, &nonce_account);
check_balance(200, &rpc_client, &payee_pubkey);
}
#[test]
fn test_create_account_with_seed() {
let TestValidator {
server,
leader_data,
alice: mint_keypair,
ledger_path,
..
} = TestValidator::run_with_options(TestValidatorOptions {
fees: 1,
bootstrap_validator_lamports: 42_000,
});
let (sender, receiver) = channel();
run_local_faucet(mint_keypair, sender, None);
let faucet_addr = receiver.recv().unwrap();
let offline_nonce_authority_signer = keypair_from_seed(&[1u8; 32]).unwrap();
let online_nonce_creator_signer = keypair_from_seed(&[2u8; 32]).unwrap();
let to_address = Pubkey::new(&[3u8; 32]);
// Setup accounts
let rpc_client = RpcClient::new_socket(leader_data.rpc);
request_and_confirm_airdrop(
&rpc_client,
&faucet_addr,
&offline_nonce_authority_signer.pubkey(),
42,
)
.unwrap();
request_and_confirm_airdrop(
&rpc_client,
&faucet_addr,
&online_nonce_creator_signer.pubkey(),
4242,
)
.unwrap();
check_balance(42, &rpc_client, &offline_nonce_authority_signer.pubkey());
check_balance(4242, &rpc_client, &online_nonce_creator_signer.pubkey());
check_balance(0, &rpc_client, &to_address);
// Create nonce account
let creator_pubkey = online_nonce_creator_signer.pubkey();
let authority_pubkey = offline_nonce_authority_signer.pubkey();
let seed = authority_pubkey.to_string()[0..32].to_string();
let nonce_address =
create_address_with_seed(&creator_pubkey, &seed, &system_program::id()).unwrap();
check_balance(0, &rpc_client, &nonce_address);
let mut creator_config = CliConfig::default();
creator_config.json_rpc_url =
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
creator_config.signers = vec![&online_nonce_creator_signer];
creator_config.command = CliCommand::CreateNonceAccount {
nonce_account: 0,
seed: Some(seed),
nonce_authority: Some(authority_pubkey),
lamports: 241,
};
process_command(&creator_config).unwrap();
check_balance(241, &rpc_client, &nonce_address);
check_balance(42, &rpc_client, &offline_nonce_authority_signer.pubkey());
check_balance(4000, &rpc_client, &online_nonce_creator_signer.pubkey());
check_balance(0, &rpc_client, &to_address);
// Fetch nonce hash
let nonce_hash = nonce::get_account(&rpc_client, &nonce_address)
.and_then(|ref a| nonce::data_from_account(a))
.unwrap()
.blockhash;
// Test by creating transfer TX with nonce, fully offline
let mut authority_config = CliConfig::default();
authority_config.json_rpc_url = String::default();
authority_config.signers = vec![&offline_nonce_authority_signer];
// Verify we cannot contact the cluster
authority_config.command = CliCommand::ClusterVersion;
process_command(&authority_config).unwrap_err();
authority_config.command = CliCommand::Transfer {
lamports: 10,
to: to_address,
from: 0,
sign_only: true,
no_wait: false,
blockhash_query: BlockhashQuery::None(nonce_hash),
nonce_account: Some(nonce_address),
nonce_authority: 0,
fee_payer: 0,
};
let sign_only_reply = process_command(&authority_config).unwrap();
let sign_only = parse_sign_only_reply_string(&sign_only_reply);
let authority_presigner = sign_only.presigner_of(&authority_pubkey).unwrap();
assert_eq!(sign_only.blockhash, nonce_hash);
// And submit it
let mut submit_config = CliConfig::default();
submit_config.json_rpc_url =
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
submit_config.signers = vec![&authority_presigner];
submit_config.command = CliCommand::Transfer {
lamports: 10,
to: to_address,
from: 0,
sign_only: false,
no_wait: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_address),
sign_only.blockhash,
),
nonce_account: Some(nonce_address),
nonce_authority: 0,
fee_payer: 0,
};
process_command(&submit_config).unwrap();
check_balance(241, &rpc_client, &nonce_address);
check_balance(31, &rpc_client, &offline_nonce_authority_signer.pubkey());
check_balance(4000, &rpc_client, &online_nonce_creator_signer.pubkey());
check_balance(10, &rpc_client, &to_address);
server.close().unwrap();
remove_dir_all(ledger_path).unwrap();
}

View File

@@ -1,17 +1,18 @@
use chrono::prelude::*;
use serde_json::Value;
use solana_clap_utils::keypair::presigner_from_pubkey_sigs;
use solana_cli::{
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig, PayCommand},
offline::{parse_sign_only_reply_string, BlockhashQuery},
nonce,
offline::{
blockhash_query::{self, BlockhashQuery},
parse_sign_only_reply_string,
},
};
use solana_client::rpc_client::RpcClient;
use solana_core::validator::TestValidator;
use solana_faucet::faucet::run_local_faucet;
use solana_sdk::{
account_utils::StateMut,
fee_calculator::FeeCalculator,
nonce,
nonce::State as NonceState,
pubkey::Pubkey,
signature::{Keypair, Signer},
};
@@ -323,7 +324,7 @@ fn test_offline_pay_tx() {
config_offline.command = CliCommand::Pay(PayCommand {
lamports: 10,
to: bob_pubkey,
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
blockhash_query: BlockhashQuery::None(blockhash),
sign_only: true,
..PayCommand::default()
});
@@ -333,15 +334,17 @@ fn test_offline_pay_tx() {
check_balance(50, &rpc_client, &config_online.signers[0].pubkey());
check_balance(0, &rpc_client, &bob_pubkey);
let (blockhash, signers) = parse_sign_only_reply_string(&sig_response);
let offline_presigner =
presigner_from_pubkey_sigs(&config_offline.signers[0].pubkey(), &signers).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
assert!(sign_only.has_all_signers());
let offline_presigner = sign_only
.presigner_of(&config_offline.signers[0].pubkey())
.unwrap();
let online_pubkey = config_online.signers[0].pubkey();
config_online.signers = vec![&offline_presigner];
config_online.command = CliCommand::Pay(PayCommand {
lamports: 10,
to: bob_pubkey,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
..PayCommand::default()
});
process_command(&config_online).unwrap();
@@ -377,7 +380,7 @@ fn test_nonced_pay_tx() {
config.signers = vec![&default_signer];
let minimum_nonce_balance = rpc_client
.get_minimum_balance_for_rent_exemption(nonce::State::size())
.get_minimum_balance_for_rent_exemption(NonceState::size())
.unwrap();
request_and_confirm_airdrop(
@@ -408,21 +411,20 @@ fn test_nonced_pay_tx() {
check_balance(minimum_nonce_balance, &rpc_client, &nonce_account.pubkey());
// Fetch nonce hash
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
let nonce_state = StateMut::<nonce::state::Versions>::state(&account)
let nonce_hash = nonce::get_account(&rpc_client, &nonce_account.pubkey())
.and_then(|ref a| nonce::data_from_account(a))
.unwrap()
.convert_to_current();
let nonce_hash = match nonce_state {
nonce::State::Initialized(ref data) => data.blockhash,
_ => panic!("Nonce is not initialized"),
};
.blockhash;
let bob_pubkey = Pubkey::new_rand();
config.signers = vec![&default_signer];
config.command = CliCommand::Pay(PayCommand {
lamports: 10,
to: bob_pubkey,
blockhash_query: BlockhashQuery::FeeCalculator(nonce_hash),
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_account.pubkey()),
nonce_hash,
),
nonce_account: Some(nonce_account.pubkey()),
..PayCommand::default()
});
@@ -432,14 +434,11 @@ fn test_nonced_pay_tx() {
check_balance(10, &rpc_client, &bob_pubkey);
// Verify that nonce has been used
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
let nonce_state = StateMut::<nonce::state::Versions>::state(&account)
let nonce_hash2 = nonce::get_account(&rpc_client, &nonce_account.pubkey())
.and_then(|ref a| nonce::data_from_account(a))
.unwrap()
.convert_to_current();
match nonce_state {
nonce::State::Initialized(ref data) => assert_ne!(data.blockhash, nonce_hash),
_ => assert!(false, "Nonce is not initialized"),
}
.blockhash;
assert_ne!(nonce_hash, nonce_hash2);
server.close().unwrap();
remove_dir_all(ledger_path).unwrap();

View File

@@ -1,15 +1,17 @@
use solana_clap_utils::keypair::presigner_from_pubkey_sigs;
use solana_cli::{
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
offline::{parse_sign_only_reply_string, BlockhashQuery},
nonce,
offline::{
blockhash_query::{self, BlockhashQuery},
parse_sign_only_reply_string,
},
};
use solana_client::rpc_client::RpcClient;
use solana_core::validator::{TestValidator, TestValidatorOptions};
use solana_faucet::faucet::run_local_faucet;
use solana_sdk::{
account_utils::StateMut,
fee_calculator::FeeCalculator,
nonce,
nonce::State as NonceState,
pubkey::Pubkey,
signature::{keypair_from_seed, Keypair, Signer},
system_instruction::create_address_with_seed,
@@ -66,7 +68,7 @@ fn test_stake_delegation_force() {
config.signers = vec![&default_signer, &vote_keypair];
config.command = CliCommand::CreateVoteAccount {
seed: None,
node_pubkey: config.signers[0].pubkey(),
identity_account: 0,
authorized_voter: None,
authorized_withdrawer: None,
commission: 0,
@@ -84,7 +86,7 @@ fn test_stake_delegation_force() {
lockup: Lockup::default(),
lamports: 50_000,
sign_only: false,
blockhash_query: BlockhashQuery::All,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
fee_payer: 0,
@@ -175,7 +177,7 @@ fn test_seed_stake_delegation_and_deactivation() {
lockup: Lockup::default(),
lamports: 50_000,
sign_only: false,
blockhash_query: BlockhashQuery::All,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
fee_payer: 0,
@@ -258,7 +260,7 @@ fn test_stake_delegation_and_deactivation() {
lockup: Lockup::default(),
lamports: 50_000,
sign_only: false,
blockhash_query: BlockhashQuery::All,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
fee_payer: 0,
@@ -363,7 +365,7 @@ fn test_offline_stake_delegation_and_deactivation() {
lockup: Lockup::default(),
lamports: 50_000,
sign_only: false,
blockhash_query: BlockhashQuery::All,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
fee_payer: 0,
@@ -379,15 +381,17 @@ fn test_offline_stake_delegation_and_deactivation() {
stake_authority: 0,
force: false,
sign_only: true,
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
fee_payer: 0,
};
let sig_response = process_command(&config_offline).unwrap();
let (blockhash, signers) = parse_sign_only_reply_string(&sig_response);
let offline_presigner =
presigner_from_pubkey_sigs(&config_offline.signers[0].pubkey(), &signers).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
assert!(sign_only.has_all_signers());
let offline_presigner = sign_only
.presigner_of(&config_offline.signers[0].pubkey())
.unwrap();
config_payer.signers = vec![&offline_presigner];
config_payer.command = CliCommand::DelegateStake {
stake_account_pubkey: stake_keypair.pubkey(),
@@ -395,7 +399,7 @@ fn test_offline_stake_delegation_and_deactivation() {
stake_authority: 0,
force: false,
sign_only: false,
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
nonce_account: None,
nonce_authority: 0,
fee_payer: 0,
@@ -408,21 +412,23 @@ fn test_offline_stake_delegation_and_deactivation() {
stake_account_pubkey: stake_keypair.pubkey(),
stake_authority: 0,
sign_only: true,
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
fee_payer: 0,
};
let sig_response = process_command(&config_offline).unwrap();
let (blockhash, signers) = parse_sign_only_reply_string(&sig_response);
let offline_presigner =
presigner_from_pubkey_sigs(&config_offline.signers[0].pubkey(), &signers).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
assert!(sign_only.has_all_signers());
let offline_presigner = sign_only
.presigner_of(&config_offline.signers[0].pubkey())
.unwrap();
config_payer.signers = vec![&offline_presigner];
config_payer.command = CliCommand::DeactivateStake {
stake_account_pubkey: stake_keypair.pubkey(),
stake_authority: 0,
sign_only: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
nonce_account: None,
nonce_authority: 0,
fee_payer: 0,
@@ -457,7 +463,7 @@ fn test_nonced_stake_delegation_and_deactivation() {
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
let minimum_nonce_balance = rpc_client
.get_minimum_balance_for_rent_exemption(nonce::State::size())
.get_minimum_balance_for_rent_exemption(NonceState::size())
.unwrap();
request_and_confirm_airdrop(
@@ -479,7 +485,7 @@ fn test_nonced_stake_delegation_and_deactivation() {
lockup: Lockup::default(),
lamports: 50_000,
sign_only: false,
blockhash_query: BlockhashQuery::All,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
fee_payer: 0,
@@ -499,14 +505,10 @@ fn test_nonced_stake_delegation_and_deactivation() {
process_command(&config).unwrap();
// Fetch nonce hash
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
let nonce_state = StateMut::<nonce::state::Versions>::state(&account)
let nonce_hash = nonce::get_account(&rpc_client, &nonce_account.pubkey())
.and_then(|ref a| nonce::data_from_account(a))
.unwrap()
.convert_to_current();
let nonce_hash = match nonce_state {
nonce::State::Initialized(ref data) => data.blockhash,
_ => panic!("Nonce is not initialized"),
};
.blockhash;
// Delegate stake
config.signers = vec![&config_keypair];
@@ -516,7 +518,10 @@ fn test_nonced_stake_delegation_and_deactivation() {
stake_authority: 0,
force: false,
sign_only: false,
blockhash_query: BlockhashQuery::None(nonce_hash, FeeCalculator::default()),
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_account.pubkey()),
nonce_hash,
),
nonce_account: Some(nonce_account.pubkey()),
nonce_authority: 0,
fee_payer: 0,
@@ -524,21 +529,20 @@ fn test_nonced_stake_delegation_and_deactivation() {
process_command(&config).unwrap();
// Fetch nonce hash
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
let nonce_state = StateMut::<nonce::state::Versions>::state(&account)
let nonce_hash = nonce::get_account(&rpc_client, &nonce_account.pubkey())
.and_then(|ref a| nonce::data_from_account(a))
.unwrap()
.convert_to_current();
let nonce_hash = match nonce_state {
nonce::State::Initialized(ref data) => data.blockhash,
_ => panic!("Nonce is not initialized"),
};
.blockhash;
// Deactivate stake
config.command = CliCommand::DeactivateStake {
stake_account_pubkey: stake_keypair.pubkey(),
stake_authority: 0,
sign_only: false,
blockhash_query: BlockhashQuery::FeeCalculator(nonce_hash),
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_account.pubkey()),
nonce_hash,
),
nonce_account: Some(nonce_account.pubkey()),
nonce_authority: 0,
fee_payer: 0,
@@ -608,7 +612,7 @@ fn test_stake_authorize() {
lockup: Lockup::default(),
lamports: 50_000,
sign_only: false,
blockhash_query: BlockhashQuery::All,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
fee_payer: 0,
@@ -622,9 +626,7 @@ fn test_stake_authorize() {
config.signers.pop();
config.command = CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: online_authority_pubkey,
stake_authorize: StakeAuthorize::Staker,
authority: 0,
new_authorizations: vec![(StakeAuthorize::Staker, online_authority_pubkey, 0)],
sign_only: false,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
@@ -640,13 +642,40 @@ fn test_stake_authorize() {
};
assert_eq!(current_authority, online_authority_pubkey);
// Assign new offline stake authority
// Assign new online stake and withdraw authorities
let online_authority2 = Keypair::new();
let online_authority2_pubkey = online_authority2.pubkey();
let withdraw_authority = Keypair::new();
let withdraw_authority_pubkey = withdraw_authority.pubkey();
config.signers.push(&online_authority);
config.command = CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: offline_authority_pubkey,
stake_authorize: StakeAuthorize::Staker,
authority: 1,
new_authorizations: vec![
(StakeAuthorize::Staker, online_authority2_pubkey, 1),
(StakeAuthorize::Withdrawer, withdraw_authority_pubkey, 0),
],
sign_only: false,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
nonce_authority: 0,
fee_payer: 0,
};
process_command(&config).unwrap();
let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap();
let stake_state: StakeState = stake_account.state().unwrap();
let (current_staker, current_withdrawer) = match stake_state {
StakeState::Initialized(meta) => (meta.authorized.staker, meta.authorized.withdrawer),
_ => panic!("Unexpected stake state!"),
};
assert_eq!(current_staker, online_authority2_pubkey);
assert_eq!(current_withdrawer, withdraw_authority_pubkey);
// Assign new offline stake authority
config.signers.pop();
config.signers.push(&online_authority2);
config.command = CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorizations: vec![(StakeAuthorize::Staker, offline_authority_pubkey, 1)],
sign_only: false,
blockhash_query: BlockhashQuery::default(),
nonce_account: None,
@@ -668,27 +697,23 @@ fn test_stake_authorize() {
let (blockhash, _) = rpc_client.get_recent_blockhash().unwrap();
config_offline.command = CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: nonced_authority_pubkey,
stake_authorize: StakeAuthorize::Staker,
authority: 0,
new_authorizations: vec![(StakeAuthorize::Staker, nonced_authority_pubkey, 0)],
sign_only: true,
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
fee_payer: 0,
};
let sign_reply = process_command(&config_offline).unwrap();
let (blockhash, signers) = parse_sign_only_reply_string(&sign_reply);
let offline_presigner =
presigner_from_pubkey_sigs(&offline_authority_pubkey, &signers).unwrap();
let sign_only = parse_sign_only_reply_string(&sign_reply);
assert!(sign_only.has_all_signers());
let offline_presigner = sign_only.presigner_of(&offline_authority_pubkey).unwrap();
config.signers = vec![&offline_presigner];
config.command = CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: nonced_authority_pubkey,
stake_authorize: StakeAuthorize::Staker,
authority: 0,
new_authorizations: vec![(StakeAuthorize::Staker, nonced_authority_pubkey, 0)],
sign_only: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
nonce_account: None,
nonce_authority: 0,
fee_payer: 0,
@@ -704,7 +729,7 @@ fn test_stake_authorize() {
// Create nonce account
let minimum_nonce_balance = rpc_client
.get_minimum_balance_for_rent_exemption(nonce::State::size())
.get_minimum_balance_for_rent_exemption(NonceState::size())
.unwrap();
let nonce_account = Keypair::new();
config.signers = vec![&default_signer, &nonce_account];
@@ -717,14 +742,10 @@ fn test_stake_authorize() {
process_command(&config).unwrap();
// Fetch nonce hash
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
let nonce_state = StateMut::<nonce::state::Versions>::state(&account)
let nonce_hash = nonce::get_account(&rpc_client, &nonce_account.pubkey())
.and_then(|ref a| nonce::data_from_account(a))
.unwrap()
.convert_to_current();
let nonce_hash = match nonce_state {
nonce::State::Initialized(ref data) => data.blockhash,
_ => panic!("Nonce is not initialized"),
};
.blockhash;
// Nonced assignment of new online stake authority
let online_authority = Keypair::new();
@@ -732,30 +753,28 @@ fn test_stake_authorize() {
config_offline.signers.push(&nonced_authority);
config_offline.command = CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: online_authority_pubkey,
stake_authorize: StakeAuthorize::Staker,
authority: 1,
new_authorizations: vec![(StakeAuthorize::Staker, online_authority_pubkey, 1)],
sign_only: true,
blockhash_query: BlockhashQuery::None(nonce_hash, FeeCalculator::default()),
blockhash_query: BlockhashQuery::None(nonce_hash),
nonce_account: Some(nonce_account.pubkey()),
nonce_authority: 0,
fee_payer: 0,
};
let sign_reply = process_command(&config_offline).unwrap();
let (blockhash, signers) = parse_sign_only_reply_string(&sign_reply);
assert_eq!(blockhash, nonce_hash);
let offline_presigner =
presigner_from_pubkey_sigs(&offline_authority_pubkey, &signers).unwrap();
let nonced_authority_presigner =
presigner_from_pubkey_sigs(&nonced_authority_pubkey, &signers).unwrap();
let sign_only = parse_sign_only_reply_string(&sign_reply);
assert!(sign_only.has_all_signers());
assert_eq!(sign_only.blockhash, nonce_hash);
let offline_presigner = sign_only.presigner_of(&offline_authority_pubkey).unwrap();
let nonced_authority_presigner = sign_only.presigner_of(&nonced_authority_pubkey).unwrap();
config.signers = vec![&offline_presigner, &nonced_authority_presigner];
config.command = CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: online_authority_pubkey,
stake_authorize: StakeAuthorize::Staker,
authority: 1,
new_authorizations: vec![(StakeAuthorize::Staker, online_authority_pubkey, 1)],
sign_only: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_account.pubkey()),
sign_only.blockhash,
),
nonce_account: Some(nonce_account.pubkey()),
nonce_authority: 0,
fee_payer: 0,
@@ -768,14 +787,11 @@ fn test_stake_authorize() {
_ => panic!("Unexpected stake state!"),
};
assert_eq!(current_authority, online_authority_pubkey);
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
let nonce_state = StateMut::<nonce::state::Versions>::state(&account)
let new_nonce_hash = nonce::get_account(&rpc_client, &nonce_account.pubkey())
.and_then(|ref a| nonce::data_from_account(a))
.unwrap()
.convert_to_current();
let new_nonce_hash = match nonce_state {
nonce::State::Initialized(ref data) => data.blockhash,
_ => panic!("Nonce is not initialized"),
};
.blockhash;
assert_ne!(nonce_hash, new_nonce_hash);
server.close().unwrap();
@@ -819,6 +835,7 @@ fn test_stake_authorize_with_fee_payer() {
let mut config_offline = CliConfig::default();
let offline_signer = Keypair::new();
config_offline.signers = vec![&offline_signer];
config_offline.json_rpc_url = String::new();
let offline_pubkey = config_offline.signers[0].pubkey();
// Verify we're offline
config_offline.command = CliCommand::ClusterVersion;
@@ -845,7 +862,7 @@ fn test_stake_authorize_with_fee_payer() {
lockup: Lockup::default(),
lamports: 50_000,
sign_only: false,
blockhash_query: BlockhashQuery::All,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
fee_payer: 0,
@@ -859,11 +876,9 @@ fn test_stake_authorize_with_fee_payer() {
config.signers = vec![&default_signer, &payer_keypair];
config.command = CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: offline_pubkey,
stake_authorize: StakeAuthorize::Staker,
authority: 0,
new_authorizations: vec![(StakeAuthorize::Staker, offline_pubkey, 0)],
sign_only: false,
blockhash_query: BlockhashQuery::All,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
fee_payer: 1,
@@ -879,26 +894,23 @@ fn test_stake_authorize_with_fee_payer() {
let (blockhash, _) = rpc_client.get_recent_blockhash().unwrap();
config_offline.command = CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: payer_pubkey,
stake_authorize: StakeAuthorize::Staker,
authority: 0,
new_authorizations: vec![(StakeAuthorize::Staker, payer_pubkey, 0)],
sign_only: true,
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
fee_payer: 0,
};
let sign_reply = process_command(&config_offline).unwrap();
let (blockhash, signers) = parse_sign_only_reply_string(&sign_reply);
let offline_presigner = presigner_from_pubkey_sigs(&offline_pubkey, &signers).unwrap();
let sign_only = parse_sign_only_reply_string(&sign_reply);
assert!(sign_only.has_all_signers());
let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap();
config.signers = vec![&offline_presigner];
config.command = CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: payer_pubkey,
stake_authorize: StakeAuthorize::Staker,
authority: 0,
new_authorizations: vec![(StakeAuthorize::Staker, payer_pubkey, 0)],
sign_only: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
nonce_account: None,
nonce_authority: 0,
fee_payer: 0,
@@ -975,7 +987,7 @@ fn test_stake_split() {
lockup: Lockup::default(),
lamports: 10 * minimum_stake_balance,
sign_only: false,
blockhash_query: BlockhashQuery::All,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
fee_payer: 0,
@@ -990,7 +1002,7 @@ fn test_stake_split() {
// Create nonce account
let minimum_nonce_balance = rpc_client
.get_minimum_balance_for_rent_exemption(nonce::State::size())
.get_minimum_balance_for_rent_exemption(NonceState::size())
.unwrap();
let nonce_account = keypair_from_seed(&[1u8; 32]).unwrap();
config.signers = vec![&default_signer, &nonce_account];
@@ -1004,14 +1016,10 @@ fn test_stake_split() {
check_balance(minimum_nonce_balance, &rpc_client, &nonce_account.pubkey());
// Fetch nonce hash
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
let nonce_state = StateMut::<nonce::state::Versions>::state(&account)
let nonce_hash = nonce::get_account(&rpc_client, &nonce_account.pubkey())
.and_then(|ref a| nonce::data_from_account(a))
.unwrap()
.convert_to_current();
let nonce_hash = match nonce_state {
nonce::State::Initialized(ref data) => data.blockhash,
_ => panic!("Nonce is not initialized"),
};
.blockhash;
// Nonced offline split
let split_account = keypair_from_seed(&[2u8; 32]).unwrap();
@@ -1021,7 +1029,7 @@ fn test_stake_split() {
stake_account_pubkey: stake_account_pubkey,
stake_authority: 0,
sign_only: true,
blockhash_query: BlockhashQuery::None(nonce_hash, FeeCalculator::default()),
blockhash_query: BlockhashQuery::None(nonce_hash),
nonce_account: Some(nonce_account.pubkey()),
nonce_authority: 0,
split_stake_account: 1,
@@ -1030,14 +1038,18 @@ fn test_stake_split() {
fee_payer: 0,
};
let sig_response = process_command(&config_offline).unwrap();
let (blockhash, signers) = parse_sign_only_reply_string(&sig_response);
let offline_presigner = presigner_from_pubkey_sigs(&offline_pubkey, &signers).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
assert!(sign_only.has_all_signers());
let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap();
config.signers = vec![&offline_presigner, &split_account];
config.command = CliCommand::SplitStake {
stake_account_pubkey: stake_account_pubkey,
stake_authority: 0,
sign_only: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_account.pubkey()),
sign_only.blockhash,
),
nonce_account: Some(nonce_account.pubkey()),
nonce_authority: 0,
split_stake_account: 1,
@@ -1127,7 +1139,7 @@ fn test_stake_set_lockup() {
lockup,
lamports: 10 * minimum_stake_balance,
sign_only: false,
blockhash_query: BlockhashQuery::All,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
fee_payer: 0,
@@ -1242,7 +1254,7 @@ fn test_stake_set_lockup() {
// Create nonce account
let minimum_nonce_balance = rpc_client
.get_minimum_balance_for_rent_exemption(nonce::State::size())
.get_minimum_balance_for_rent_exemption(NonceState::size())
.unwrap();
let nonce_account = keypair_from_seed(&[1u8; 32]).unwrap();
let nonce_account_pubkey = nonce_account.pubkey();
@@ -1257,14 +1269,10 @@ fn test_stake_set_lockup() {
check_balance(minimum_nonce_balance, &rpc_client, &nonce_account_pubkey);
// Fetch nonce hash
let account = rpc_client.get_account(&nonce_account_pubkey).unwrap();
let nonce_state = StateMut::<nonce::state::Versions>::state(&account)
let nonce_hash = nonce::get_account(&rpc_client, &nonce_account.pubkey())
.and_then(|ref a| nonce::data_from_account(a))
.unwrap()
.convert_to_current();
let nonce_hash = match nonce_state {
nonce::State::Initialized(ref data) => data.blockhash,
_ => panic!("Nonce is not initialized"),
};
.blockhash;
// Nonced offline set lockup
let lockup = LockupArgs {
@@ -1277,21 +1285,25 @@ fn test_stake_set_lockup() {
lockup,
custodian: 0,
sign_only: true,
blockhash_query: BlockhashQuery::None(nonce_hash, FeeCalculator::default()),
blockhash_query: BlockhashQuery::None(nonce_hash),
nonce_account: Some(nonce_account_pubkey),
nonce_authority: 0,
fee_payer: 0,
};
let sig_response = process_command(&config_offline).unwrap();
let (blockhash, signers) = parse_sign_only_reply_string(&sig_response);
let offline_presigner = presigner_from_pubkey_sigs(&offline_pubkey, &signers).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
assert!(sign_only.has_all_signers());
let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap();
config.signers = vec![&offline_presigner];
config.command = CliCommand::StakeSetLockup {
stake_account_pubkey,
lockup,
custodian: 0,
sign_only: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_account_pubkey),
sign_only.blockhash,
),
nonce_account: Some(nonce_account_pubkey),
nonce_authority: 0,
fee_payer: 0,
@@ -1359,7 +1371,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
// Create nonce account
let minimum_nonce_balance = rpc_client
.get_minimum_balance_for_rent_exemption(nonce::State::size())
.get_minimum_balance_for_rent_exemption(NonceState::size())
.unwrap();
let nonce_account = keypair_from_seed(&[3u8; 32]).unwrap();
let nonce_pubkey = nonce_account.pubkey();
@@ -1373,14 +1385,10 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
process_command(&config).unwrap();
// Fetch nonce hash
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
let nonce_state = StateMut::<nonce::state::Versions>::state(&account)
let nonce_hash = nonce::get_account(&rpc_client, &nonce_account.pubkey())
.and_then(|ref a| nonce::data_from_account(a))
.unwrap()
.convert_to_current();
let nonce_hash = match nonce_state {
nonce::State::Initialized(ref data) => data.blockhash,
_ => panic!("Nonce is not initialized"),
};
.blockhash;
// Create stake account offline
let stake_keypair = keypair_from_seed(&[4u8; 32]).unwrap();
@@ -1394,16 +1402,17 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
lockup: Lockup::default(),
lamports: 50_000,
sign_only: true,
blockhash_query: BlockhashQuery::None(nonce_hash, FeeCalculator::default()),
blockhash_query: BlockhashQuery::None(nonce_hash),
nonce_account: Some(nonce_pubkey),
nonce_authority: 0,
fee_payer: 0,
from: 0,
};
let sig_response = process_command(&config_offline).unwrap();
let (blockhash, signers) = parse_sign_only_reply_string(&sig_response);
let offline_presigner = presigner_from_pubkey_sigs(&offline_pubkey, &signers).unwrap();
let stake_presigner = presigner_from_pubkey_sigs(&stake_pubkey, &signers).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
assert!(sign_only.has_all_signers());
let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap();
let stake_presigner = sign_only.presigner_of(&stake_pubkey).unwrap();
config.signers = vec![&offline_presigner, &stake_presigner];
config.command = CliCommand::CreateStakeAccount {
stake_account: 1,
@@ -1413,7 +1422,10 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
lockup: Lockup::default(),
lamports: 50_000,
sign_only: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_pubkey),
sign_only.blockhash,
),
nonce_account: Some(nonce_pubkey),
nonce_authority: 0,
fee_payer: 0,
@@ -1423,14 +1435,10 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
check_balance(50_000, &rpc_client, &stake_pubkey);
// Fetch nonce hash
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
let nonce_state = StateMut::<nonce::state::Versions>::state(&account)
let nonce_hash = nonce::get_account(&rpc_client, &nonce_account.pubkey())
.and_then(|ref a| nonce::data_from_account(a))
.unwrap()
.convert_to_current();
let nonce_hash = match nonce_state {
nonce::State::Initialized(ref data) => data.blockhash,
_ => panic!("Nonce is not initialized"),
};
.blockhash;
// Offline, nonced stake-withdraw
let recipient = keypair_from_seed(&[5u8; 32]).unwrap();
@@ -1442,14 +1450,14 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
lamports: 42,
withdraw_authority: 0,
sign_only: true,
blockhash_query: BlockhashQuery::None(nonce_hash, FeeCalculator::default()),
blockhash_query: BlockhashQuery::None(nonce_hash),
nonce_account: Some(nonce_pubkey),
nonce_authority: 0,
fee_payer: 0,
};
let sig_response = process_command(&config_offline).unwrap();
let (blockhash, signers) = parse_sign_only_reply_string(&sig_response);
let offline_presigner = presigner_from_pubkey_sigs(&offline_pubkey, &signers).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap();
config.signers = vec![&offline_presigner];
config.command = CliCommand::WithdrawStake {
stake_account_pubkey: stake_pubkey,
@@ -1457,7 +1465,10 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
lamports: 42,
withdraw_authority: 0,
sign_only: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_pubkey),
sign_only.blockhash,
),
nonce_account: Some(nonce_pubkey),
nonce_authority: 0,
fee_payer: 0,
@@ -1466,14 +1477,10 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
check_balance(42, &rpc_client, &recipient_pubkey);
// Fetch nonce hash
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
let nonce_state = StateMut::<nonce::state::Versions>::state(&account)
let nonce_hash = nonce::get_account(&rpc_client, &nonce_account.pubkey())
.and_then(|ref a| nonce::data_from_account(a))
.unwrap()
.convert_to_current();
let nonce_hash = match nonce_state {
nonce::State::Initialized(ref data) => data.blockhash,
_ => panic!("Nonce is not initialized"),
};
.blockhash;
// Create another stake account. This time with seed
let seed = "seedy";
@@ -1486,16 +1493,16 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
lockup: Lockup::default(),
lamports: 50_000,
sign_only: true,
blockhash_query: BlockhashQuery::None(nonce_hash, FeeCalculator::default()),
blockhash_query: BlockhashQuery::None(nonce_hash),
nonce_account: Some(nonce_pubkey),
nonce_authority: 0,
fee_payer: 0,
from: 0,
};
let sig_response = process_command(&config_offline).unwrap();
let (blockhash, signers) = parse_sign_only_reply_string(&sig_response);
let offline_presigner = presigner_from_pubkey_sigs(&offline_pubkey, &signers).unwrap();
let stake_presigner = presigner_from_pubkey_sigs(&stake_pubkey, &signers).unwrap();
let sign_only = parse_sign_only_reply_string(&sig_response);
let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap();
let stake_presigner = sign_only.presigner_of(&stake_pubkey).unwrap();
config.signers = vec![&offline_presigner, &stake_presigner];
config.command = CliCommand::CreateStakeAccount {
stake_account: 1,
@@ -1505,7 +1512,10 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
lockup: Lockup::default(),
lamports: 50_000,
sign_only: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_pubkey),
sign_only.blockhash,
),
nonce_account: Some(nonce_pubkey),
nonce_authority: 0,
fee_payer: 0,

View File

@@ -1,17 +1,18 @@
use solana_clap_utils::keypair::presigner_from_pubkey_sigs;
use solana_cli::{
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
offline::{parse_sign_only_reply_string, BlockhashQuery},
nonce,
offline::{
blockhash_query::{self, BlockhashQuery},
parse_sign_only_reply_string,
},
};
use solana_client::rpc_client::RpcClient;
use solana_core::validator::{TestValidator, TestValidatorOptions};
use solana_faucet::faucet::run_local_faucet;
use solana_sdk::{
account_utils::StateMut,
fee_calculator::FeeCalculator,
nonce,
nonce::State as NonceState,
pubkey::Pubkey,
signature::{keypair_from_seed, Keypair, Signer},
signature::{keypair_from_seed, Keypair, NullSigner, Signer},
};
use std::{fs::remove_dir_all, sync::mpsc::channel, thread::sleep, time::Duration};
@@ -67,7 +68,8 @@ fn test_transfer() {
to: recipient_pubkey,
from: 0,
sign_only: false,
blockhash_query: BlockhashQuery::All,
no_wait: false,
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
nonce_account: None,
nonce_authority: 0,
fee_payer: 0,
@@ -94,21 +96,24 @@ fn test_transfer() {
to: recipient_pubkey,
from: 0,
sign_only: true,
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
no_wait: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
fee_payer: 0,
};
let sign_only_reply = process_command(&offline).unwrap();
let (blockhash, signers) = parse_sign_only_reply_string(&sign_only_reply);
let offline_presigner = presigner_from_pubkey_sigs(&offline_pubkey, &signers).unwrap();
let sign_only = parse_sign_only_reply_string(&sign_only_reply);
assert!(sign_only.has_all_signers());
let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap();
config.signers = vec![&offline_presigner];
config.command = CliCommand::Transfer {
lamports: 10,
to: recipient_pubkey,
from: 0,
sign_only: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
no_wait: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
nonce_account: None,
nonce_authority: 0,
fee_payer: 0,
@@ -120,7 +125,7 @@ fn test_transfer() {
// Create nonce account
let nonce_account = keypair_from_seed(&[3u8; 32]).unwrap();
let minimum_nonce_balance = rpc_client
.get_minimum_balance_for_rent_exemption(nonce::State::size())
.get_minimum_balance_for_rent_exemption(NonceState::size())
.unwrap();
config.signers = vec![&default_signer, &nonce_account];
config.command = CliCommand::CreateNonceAccount {
@@ -133,14 +138,10 @@ fn test_transfer() {
check_balance(49_987 - minimum_nonce_balance, &rpc_client, &sender_pubkey);
// Fetch nonce hash
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
let nonce_state = StateMut::<nonce::state::Versions>::state(&account)
let nonce_hash = nonce::get_account(&rpc_client, &nonce_account.pubkey())
.and_then(|ref a| nonce::data_from_account(a))
.unwrap()
.convert_to_current();
let nonce_hash = match nonce_state {
nonce::State::Initialized(ref data) => data.blockhash,
_ => panic!("Nonce is not initialized"),
};
.blockhash;
// Nonced transfer
config.signers = vec![&default_signer];
@@ -149,7 +150,11 @@ fn test_transfer() {
to: recipient_pubkey,
from: 0,
sign_only: false,
blockhash_query: BlockhashQuery::FeeCalculator(nonce_hash),
no_wait: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_account.pubkey()),
nonce_hash,
),
nonce_account: Some(nonce_account.pubkey()),
nonce_authority: 0,
fee_payer: 0,
@@ -157,14 +162,10 @@ fn test_transfer() {
process_command(&config).unwrap();
check_balance(49_976 - minimum_nonce_balance, &rpc_client, &sender_pubkey);
check_balance(30, &rpc_client, &recipient_pubkey);
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
let nonce_state = StateMut::<nonce::state::Versions>::state(&account)
let new_nonce_hash = nonce::get_account(&rpc_client, &nonce_account.pubkey())
.and_then(|ref a| nonce::data_from_account(a))
.unwrap()
.convert_to_current();
let new_nonce_hash = match nonce_state {
nonce::State::Initialized(ref data) => data.blockhash,
_ => panic!("Nonce is not initialized"),
};
.blockhash;
assert_ne!(nonce_hash, new_nonce_hash);
// Assign nonce authority to offline
@@ -178,14 +179,10 @@ fn test_transfer() {
check_balance(49_975 - minimum_nonce_balance, &rpc_client, &sender_pubkey);
// Fetch nonce hash
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
let nonce_state = StateMut::<nonce::state::Versions>::state(&account)
let nonce_hash = nonce::get_account(&rpc_client, &nonce_account.pubkey())
.and_then(|ref a| nonce::data_from_account(a))
.unwrap()
.convert_to_current();
let nonce_hash = match nonce_state {
nonce::State::Initialized(ref data) => data.blockhash,
_ => panic!("Nonce is not initialized"),
};
.blockhash;
// Offline, nonced transfer
offline.signers = vec![&default_offline_signer];
@@ -194,21 +191,27 @@ fn test_transfer() {
to: recipient_pubkey,
from: 0,
sign_only: true,
blockhash_query: BlockhashQuery::None(nonce_hash, FeeCalculator::default()),
no_wait: false,
blockhash_query: BlockhashQuery::None(nonce_hash),
nonce_account: Some(nonce_account.pubkey()),
nonce_authority: 0,
fee_payer: 0,
};
let sign_only_reply = process_command(&offline).unwrap();
let (blockhash, signers) = parse_sign_only_reply_string(&sign_only_reply);
let offline_presigner = presigner_from_pubkey_sigs(&offline_pubkey, &signers).unwrap();
let sign_only = parse_sign_only_reply_string(&sign_only_reply);
assert!(sign_only.has_all_signers());
let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap();
config.signers = vec![&offline_presigner];
config.command = CliCommand::Transfer {
lamports: 10,
to: recipient_pubkey,
from: 0,
sign_only: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
no_wait: false,
blockhash_query: BlockhashQuery::FeeCalculator(
blockhash_query::Source::NonceAccount(nonce_account.pubkey()),
sign_only.blockhash,
),
nonce_account: Some(nonce_account.pubkey()),
nonce_authority: 0,
fee_payer: 0,
@@ -220,3 +223,117 @@ fn test_transfer() {
server.close().unwrap();
remove_dir_all(ledger_path).unwrap();
}
#[test]
fn test_transfer_multisession_signing() {
let TestValidator {
server,
leader_data,
alice: mint_keypair,
ledger_path,
..
} = TestValidator::run_with_options(TestValidatorOptions {
fees: 1,
bootstrap_validator_lamports: 42_000,
});
let (sender, receiver) = channel();
run_local_faucet(mint_keypair, sender, None);
let faucet_addr = receiver.recv().unwrap();
let to_pubkey = Pubkey::new(&[1u8; 32]);
let offline_from_signer = keypair_from_seed(&[2u8; 32]).unwrap();
let offline_fee_payer_signer = keypair_from_seed(&[3u8; 32]).unwrap();
let from_null_signer = NullSigner::new(&offline_from_signer.pubkey());
// Setup accounts
let rpc_client = RpcClient::new_socket(leader_data.rpc);
request_and_confirm_airdrop(&rpc_client, &faucet_addr, &offline_from_signer.pubkey(), 43)
.unwrap();
request_and_confirm_airdrop(
&rpc_client,
&faucet_addr,
&offline_fee_payer_signer.pubkey(),
3,
)
.unwrap();
check_balance(43, &rpc_client, &offline_from_signer.pubkey());
check_balance(3, &rpc_client, &offline_fee_payer_signer.pubkey());
check_balance(0, &rpc_client, &to_pubkey);
let (blockhash, _) = rpc_client.get_recent_blockhash().unwrap();
// Offline fee-payer signs first
let mut fee_payer_config = CliConfig::default();
fee_payer_config.json_rpc_url = String::default();
fee_payer_config.signers = vec![&offline_fee_payer_signer, &from_null_signer];
// Verify we cannot contact the cluster
fee_payer_config.command = CliCommand::ClusterVersion;
process_command(&fee_payer_config).unwrap_err();
fee_payer_config.command = CliCommand::Transfer {
lamports: 42,
to: to_pubkey,
from: 1,
sign_only: true,
no_wait: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
fee_payer: 0,
};
let sign_only_reply = process_command(&fee_payer_config).unwrap();
let sign_only = parse_sign_only_reply_string(&sign_only_reply);
assert!(!sign_only.has_all_signers());
let fee_payer_presigner = sign_only
.presigner_of(&offline_fee_payer_signer.pubkey())
.unwrap();
// Now the offline fund source
let mut from_config = CliConfig::default();
from_config.json_rpc_url = String::default();
from_config.signers = vec![&fee_payer_presigner, &offline_from_signer];
// Verify we cannot contact the cluster
from_config.command = CliCommand::ClusterVersion;
process_command(&from_config).unwrap_err();
from_config.command = CliCommand::Transfer {
lamports: 42,
to: to_pubkey,
from: 1,
sign_only: true,
no_wait: false,
blockhash_query: BlockhashQuery::None(blockhash),
nonce_account: None,
nonce_authority: 0,
fee_payer: 0,
};
let sign_only_reply = process_command(&from_config).unwrap();
let sign_only = parse_sign_only_reply_string(&sign_only_reply);
assert!(sign_only.has_all_signers());
let from_presigner = sign_only
.presigner_of(&offline_from_signer.pubkey())
.unwrap();
// Finally submit to the cluster
let mut config = CliConfig::default();
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
config.signers = vec![&fee_payer_presigner, &from_presigner];
config.command = CliCommand::Transfer {
lamports: 42,
to: to_pubkey,
from: 1,
sign_only: false,
no_wait: false,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
nonce_account: None,
nonce_authority: 0,
fee_payer: 0,
};
process_command(&config).unwrap();
check_balance(1, &rpc_client, &offline_from_signer.pubkey());
check_balance(1, &rpc_client, &offline_fee_payer_signer.pubkey());
check_balance(42, &rpc_client, &to_pubkey);
server.close().unwrap();
remove_dir_all(ledger_path).unwrap();
}

118
cli/tests/vote.rs Normal file
View File

@@ -0,0 +1,118 @@
use solana_cli::cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig};
use solana_client::rpc_client::RpcClient;
use solana_core::validator::TestValidator;
use solana_faucet::faucet::run_local_faucet;
use solana_sdk::{
account_utils::StateMut,
pubkey::Pubkey,
signature::{Keypair, Signer},
};
use solana_vote_program::vote_state::{VoteAuthorize, VoteState, VoteStateVersions};
use std::{fs::remove_dir_all, sync::mpsc::channel, thread::sleep, time::Duration};
fn check_balance(expected_balance: u64, client: &RpcClient, pubkey: &Pubkey) {
(0..5).for_each(|tries| {
let balance = client.retry_get_balance(pubkey, 1).unwrap().unwrap();
if balance == expected_balance {
return;
}
if tries == 4 {
assert_eq!(balance, expected_balance);
}
sleep(Duration::from_millis(500));
});
}
#[test]
fn test_vote_authorize_and_withdraw() {
let TestValidator {
server,
leader_data,
alice,
ledger_path,
..
} = TestValidator::run();
let (sender, receiver) = channel();
run_local_faucet(alice, sender, None);
let faucet_addr = receiver.recv().unwrap();
let rpc_client = RpcClient::new_socket(leader_data.rpc);
let default_signer = Keypair::new();
let mut config = CliConfig::default();
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
config.signers = vec![&default_signer];
request_and_confirm_airdrop(
&rpc_client,
&faucet_addr,
&config.signers[0].pubkey(),
100_000,
)
.unwrap();
// Create vote account
let vote_account_keypair = Keypair::new();
let vote_account_pubkey = vote_account_keypair.pubkey();
config.signers = vec![&default_signer, &vote_account_keypair];
config.command = CliCommand::CreateVoteAccount {
seed: None,
identity_account: 0,
authorized_voter: None,
authorized_withdrawer: Some(config.signers[0].pubkey()),
commission: 0,
};
process_command(&config).unwrap();
let vote_account = rpc_client
.get_account(&vote_account_keypair.pubkey())
.unwrap();
let vote_state: VoteStateVersions = vote_account.state().unwrap();
let authorized_withdrawer = vote_state.convert_to_current().authorized_withdrawer;
assert_eq!(authorized_withdrawer, config.signers[0].pubkey());
let expected_balance = rpc_client
.get_minimum_balance_for_rent_exemption(VoteState::size_of())
.unwrap()
.max(1);
check_balance(expected_balance, &rpc_client, &vote_account_pubkey);
// Authorize vote account withdrawal to another signer
let withdraw_authority = Keypair::new();
config.signers = vec![&default_signer];
config.command = CliCommand::VoteAuthorize {
vote_account_pubkey,
new_authorized_pubkey: withdraw_authority.pubkey(),
vote_authorize: VoteAuthorize::Withdrawer,
};
process_command(&config).unwrap();
let vote_account = rpc_client
.get_account(&vote_account_keypair.pubkey())
.unwrap();
let vote_state: VoteStateVersions = vote_account.state().unwrap();
let authorized_withdrawer = vote_state.convert_to_current().authorized_withdrawer;
assert_eq!(authorized_withdrawer, withdraw_authority.pubkey());
// Withdraw from vote account
let destination_account = Pubkey::new_rand(); // Send withdrawal to new account to make balance check easy
config.signers = vec![&default_signer, &withdraw_authority];
config.command = CliCommand::WithdrawFromVoteAccount {
vote_account_pubkey,
withdraw_authority: 1,
lamports: 100,
destination_account_pubkey: destination_account,
};
process_command(&config).unwrap();
check_balance(expected_balance - 100, &rpc_client, &vote_account_pubkey);
check_balance(100, &rpc_client, &destination_account);
// Re-assign validator identity
let new_identity_keypair = Keypair::new();
config.signers.push(&new_identity_keypair);
config.command = CliCommand::VoteUpdateValidator {
vote_account_pubkey,
new_identity_account: 2,
};
process_command(&config).unwrap();
server.close().unwrap();
remove_dir_all(ledger_path).unwrap();
}

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-client"
version = "1.0.4"
version = "1.0.19"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,6 +11,7 @@ edition = "2018"
[dependencies]
bincode = "1.2.1"
bs58 = "0.3.0"
indicatif = "0.14.0"
jsonrpc-core = "14.0.5"
log = "0.4.8"
rayon = "1.2.0"
@@ -18,8 +19,10 @@ reqwest = { version = "0.10.1", default-features = false, features = ["blocking"
serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.46"
solana-net-utils = { path = "../net-utils", version = "1.0.4" }
solana-sdk = { path = "../sdk", version = "1.0.4" }
solana-net-utils = { path = "../net-utils", version = "1.0.19" }
solana-sdk = { path = "../sdk", version = "1.0.19" }
solana-transaction-status = { path = "../transaction-status", version = "1.0.19" }
solana-vote-program = { path = "../programs/vote", version = "1.0.19" }
thiserror = "1.0"
tungstenite = "0.10.1"
url = "2.1.1"
@@ -28,4 +31,7 @@ url = "2.1.1"
assert_matches = "1.3.0"
jsonrpc-core = "14.0.5"
jsonrpc-http-server = "14.0.6"
solana-logger = { path = "../logger", version = "1.0.4" }
solana-logger = { path = "../logger", version = "1.0.19" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,20 +1,161 @@
use crate::rpc_request;
use solana_sdk::{signature::SignerError, transaction::TransactionError};
use std::{fmt, io};
use solana_sdk::{
signature::SignerError, transaction::TransactionError, transport::TransportError,
};
use std::io;
use thiserror::Error;
#[derive(Error, Debug)]
pub enum ClientError {
pub enum ClientErrorKind {
#[error(transparent)]
Io(#[from] io::Error),
#[error(transparent)]
Reqwest(#[from] reqwest::Error),
#[error(transparent)]
RpcError(#[from] rpc_request::RpcError),
#[error(transparent)]
SerdeJson(#[from] serde_json::error::Error),
#[error(transparent)]
SigningError(#[from] SignerError),
#[error(transparent)]
TransactionError(#[from] TransactionError),
#[error("Custom: {0}")]
Custom(String),
}
impl fmt::Display for ClientError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "solana client error")
impl From<TransportError> for ClientErrorKind {
fn from(err: TransportError) -> Self {
match err {
TransportError::IoError(err) => Self::Io(err),
TransportError::TransactionError(err) => Self::TransactionError(err),
TransportError::Custom(err) => Self::Custom(err),
}
}
}
impl Into<TransportError> for ClientErrorKind {
fn into(self) -> TransportError {
match self {
Self::Io(err) => TransportError::IoError(err),
Self::TransactionError(err) => TransportError::TransactionError(err),
Self::Reqwest(err) => TransportError::Custom(format!("{:?}", err)),
Self::RpcError(err) => TransportError::Custom(format!("{:?}", err)),
Self::SerdeJson(err) => TransportError::Custom(format!("{:?}", err)),
Self::SigningError(err) => TransportError::Custom(format!("{:?}", err)),
Self::Custom(err) => TransportError::Custom(format!("{:?}", err)),
}
}
}
#[derive(Error, Debug)]
#[error("{kind}")]
pub struct ClientError {
command: Option<&'static str>,
#[source]
#[error(transparent)]
kind: ClientErrorKind,
}
impl ClientError {
pub fn new_with_command(kind: ClientErrorKind, command: &'static str) -> Self {
Self {
command: Some(command),
kind,
}
}
pub fn into_with_command(self, command: &'static str) -> Self {
Self {
command: Some(command),
..self
}
}
pub fn command(&self) -> Option<&'static str> {
self.command
}
pub fn kind(&self) -> &ClientErrorKind {
&self.kind
}
}
impl From<ClientErrorKind> for ClientError {
fn from(kind: ClientErrorKind) -> Self {
Self {
command: None,
kind,
}
}
}
impl From<TransportError> for ClientError {
fn from(err: TransportError) -> Self {
Self {
command: None,
kind: err.into(),
}
}
}
impl Into<TransportError> for ClientError {
fn into(self) -> TransportError {
self.kind.into()
}
}
impl From<std::io::Error> for ClientError {
fn from(err: std::io::Error) -> Self {
Self {
command: None,
kind: err.into(),
}
}
}
impl From<reqwest::Error> for ClientError {
fn from(err: reqwest::Error) -> Self {
Self {
command: None,
kind: err.into(),
}
}
}
impl From<rpc_request::RpcError> for ClientError {
fn from(err: rpc_request::RpcError) -> Self {
Self {
command: None,
kind: err.into(),
}
}
}
impl From<serde_json::error::Error> for ClientError {
fn from(err: serde_json::error::Error) -> Self {
Self {
command: None,
kind: err.into(),
}
}
}
impl From<SignerError> for ClientError {
fn from(err: SignerError) -> Self {
Self {
command: None,
kind: err.into(),
}
}
}
impl From<TransactionError> for ClientError {
fn from(err: TransactionError) -> Self {
Self {
command: None,
kind: err.into(),
}
}
}
pub type Result<T> = std::result::Result<T, ClientError>;

View File

@@ -1,4 +1,4 @@
use crate::{client_error::ClientError, rpc_request::RpcRequest};
use crate::{client_error::Result, rpc_request::RpcRequest};
pub(crate) trait GenericRpcClientRequest {
fn send(
@@ -6,5 +6,5 @@ pub(crate) trait GenericRpcClientRequest {
request: &RpcRequest,
params: serde_json::Value,
retries: usize,
) -> Result<serde_json::Value, ClientError>;
) -> Result<serde_json::Value>;
}

View File

@@ -1,5 +1,5 @@
use crate::{
client_error::ClientError,
client_error::Result,
generic_rpc_client_request::GenericRpcClientRequest,
rpc_request::RpcRequest,
rpc_response::{Response, RpcResponseContext},
@@ -10,6 +10,7 @@ use solana_sdk::{
instruction::InstructionError,
transaction::{self, TransactionError},
};
use solana_transaction_status::TransactionStatus;
use std::{collections::HashMap, sync::RwLock};
pub const PUBKEY: &str = "7RoSF9fUmdphVCpabEoefH81WwrW7orsWonXWqTXkKV8";
@@ -39,9 +40,9 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
fn send(
&self,
request: &RpcRequest,
params: serde_json::Value,
_params: serde_json::Value,
_retries: usize,
) -> Result<serde_json::Value, ClientError> {
) -> Result<serde_json::Value> {
if let Some(value) = self.mocks.write().unwrap().remove(request) {
return Ok(value);
}
@@ -49,17 +50,6 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
return Ok(Value::Null);
}
let val = match request {
RpcRequest::ConfirmTransaction => {
if let Some(params_array) = params.as_array() {
if let Value::String(param_string) = &params_array[0] {
Value::Bool(param_string == SIGNATURE)
} else {
Value::Null
}
} else {
Value::Null
}
}
RpcRequest::GetBalance => serde_json::to_value(Response {
context: RpcResponseContext { slot: 1 },
value: Value::Number(Number::from(50)),
@@ -71,24 +61,47 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
serde_json::to_value(FeeCalculator::default()).unwrap(),
),
})?,
RpcRequest::GetFeeCalculatorForBlockhash => {
let value = if self.url == "blockhash_expired" {
Value::Null
} else {
serde_json::to_value(Some(FeeCalculator::default())).unwrap()
};
serde_json::to_value(Response {
context: RpcResponseContext { slot: 1 },
value,
})?
}
RpcRequest::GetFeeRateGovernor => serde_json::to_value(Response {
context: RpcResponseContext { slot: 1 },
value: serde_json::to_value(FeeRateGovernor::default()).unwrap(),
})?,
RpcRequest::GetSignatureStatus => {
let response: Option<transaction::Result<()>> = if self.url == "account_in_use" {
Some(Err(TransactionError::AccountInUse))
RpcRequest::GetSignatureStatuses => {
let status: transaction::Result<()> = if self.url == "account_in_use" {
Err(TransactionError::AccountInUse)
} else if self.url == "instruction_error" {
Some(Err(TransactionError::InstructionError(
Err(TransactionError::InstructionError(
0,
InstructionError::UninitializedAccount,
)))
} else if self.url == "sig_not_found" {
))
} else {
Ok(())
};
let status = if self.url == "sig_not_found" {
None
} else {
Some(Ok(()))
let err = status.clone().err();
Some(TransactionStatus {
status,
slot: 1,
confirmations: None,
err,
})
};
serde_json::to_value(response).unwrap()
serde_json::to_value(Response {
context: RpcResponseContext { slot: 1 },
value: vec![status],
})?
}
RpcRequest::GetTransactionCount => Value::Number(Number::from(1234)),
RpcRequest::GetSlot => Value::Number(Number::from(0)),

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
use crate::{
client_error::ClientError,
client_error::Result,
generic_rpc_client_request::GenericRpcClientRequest,
rpc_request::{RpcError, RpcRequest},
};
@@ -34,7 +34,7 @@ impl GenericRpcClientRequest for RpcClientRequest {
request: &RpcRequest,
params: serde_json::Value,
mut retries: usize,
) -> Result<serde_json::Value, ClientError> {
) -> Result<serde_json::Value> {
// Concurrent requests are not supported so reuse the same request id for all requests
let request_id = 1;

View File

@@ -1,9 +1,8 @@
use serde_json::{json, Value};
use std::{error, fmt};
use thiserror::Error;
#[derive(Debug, PartialEq, Eq, Hash)]
pub enum RpcRequest {
ConfirmTransaction,
DeregisterNode,
ValidatorExit,
GetAccountInfo,
@@ -18,11 +17,11 @@ pub enum RpcRequest {
GetIdentity,
GetInflation,
GetLeaderSchedule,
GetNumBlocksSinceSignatureConfirmation,
GetProgramAccounts,
GetRecentBlockhash,
GetFeeCalculatorForBlockhash,
GetFeeRateGovernor,
GetSignatureStatus,
GetSignatureStatuses,
GetSlot,
GetSlotLeader,
GetStorageTurn,
@@ -44,7 +43,6 @@ impl RpcRequest {
pub(crate) fn build_request_json(&self, id: u64, params: Value) -> Value {
let jsonrpc = "2.0";
let method = match self {
RpcRequest::ConfirmTransaction => "confirmTransaction",
RpcRequest::DeregisterNode => "deregisterNode",
RpcRequest::ValidatorExit => "validatorExit",
RpcRequest::GetAccountInfo => "getAccountInfo",
@@ -59,13 +57,11 @@ impl RpcRequest {
RpcRequest::GetIdentity => "getIdentity",
RpcRequest::GetInflation => "getInflation",
RpcRequest::GetLeaderSchedule => "getLeaderSchedule",
RpcRequest::GetNumBlocksSinceSignatureConfirmation => {
"getNumBlocksSinceSignatureConfirmation"
}
RpcRequest::GetProgramAccounts => "getProgramAccounts",
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
RpcRequest::GetFeeCalculatorForBlockhash => "getFeeCalculatorForBlockhash",
RpcRequest::GetFeeRateGovernor => "getFeeRateGovernor",
RpcRequest::GetSignatureStatus => "getSignatureStatus",
RpcRequest::GetSignatureStatuses => "getSignatureStatuses",
RpcRequest::GetSlot => "getSlot",
RpcRequest::GetSlotLeader => "getSlotLeader",
RpcRequest::GetStorageTurn => "getStorageTurn",
@@ -91,26 +87,16 @@ impl RpcRequest {
}
}
#[derive(Debug, Clone, PartialEq)]
#[derive(Debug, Error)]
pub enum RpcError {
#[error("rpc request error: {0}")]
RpcRequestError(String),
}
impl fmt::Display for RpcError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "invalid")
}
}
impl error::Error for RpcError {
fn description(&self) -> &str {
"invalid"
}
fn cause(&self) -> Option<&dyn error::Error> {
// Generic error, underlying cause isn't tracked.
None
}
#[error("parse error: expected {0}")]
ParseError(String), /* "expected" */
// Anything in a `ForUser` needs to die. The caller should be
// deciding what to tell their user
#[error("{0}")]
ForUser(String), /* "direct-to-user message" */
}
#[cfg(test)]
@@ -127,7 +113,7 @@ mod tests {
assert_eq!(request["params"], json!([addr]));
let test_request = RpcRequest::GetBalance;
let request = test_request.build_request_json(1, json!([addr]));
let request = test_request.build_request_json(1, json!([addr.clone()]));
assert_eq!(request["method"], "getBalance");
let test_request = RpcRequest::GetEpochInfo;
@@ -142,6 +128,10 @@ mod tests {
let request = test_request.build_request_json(1, Value::Null);
assert_eq!(request["method"], "getRecentBlockhash");
let test_request = RpcRequest::GetFeeCalculatorForBlockhash;
let request = test_request.build_request_json(1, json!([addr.clone()]));
assert_eq!(request["method"], "getFeeCalculatorForBlockhash");
let test_request = RpcRequest::GetFeeRateGovernor;
let request = test_request.build_request_json(1, Value::Null);
assert_eq!(request["method"], "getFeeRateGovernor");

View File

@@ -1,18 +1,14 @@
use crate::rpc_request::RpcError;
use bincode::serialize;
use jsonrpc_core::Result as JsonResult;
use crate::{client_error, rpc_request::RpcError};
use solana_sdk::{
account::Account,
clock::{Epoch, Slot},
fee_calculator::{FeeCalculator, FeeRateGovernor},
message::MessageHeader,
pubkey::Pubkey,
transaction::{Result, Transaction},
transaction::{Result, TransactionError},
};
use std::{collections::HashMap, io, net::SocketAddr, str::FromStr};
use std::{collections::HashMap, net::SocketAddr, str::FromStr};
pub type RpcResponseIn<T> = JsonResult<Response<T>>;
pub type RpcResponse<T> = io::Result<Response<T>>;
pub type RpcResult<T> = client_error::Result<Response<T>>;
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct RpcResponseContext {
@@ -32,119 +28,6 @@ pub struct RpcBlockCommitment<T> {
pub total_stake: u64,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct RpcReward {
pub pubkey: String,
pub lamports: i64,
}
pub type RpcRewards = Vec<RpcReward>;
#[derive(Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcConfirmedBlock {
pub previous_blockhash: String,
pub blockhash: String,
pub parent_slot: Slot,
pub transactions: Vec<RpcTransactionWithStatusMeta>,
pub rewards: RpcRewards,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcTransactionWithStatusMeta {
pub transaction: RpcEncodedTransaction,
pub meta: Option<RpcTransactionStatus>,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
#[serde(rename_all = "camelCase")]
pub enum RpcTransactionEncoding {
Binary,
Json,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase", untagged)]
pub enum RpcEncodedTransaction {
Binary(String),
Json(RpcTransaction),
}
impl RpcEncodedTransaction {
pub fn encode(transaction: Transaction, encoding: RpcTransactionEncoding) -> Self {
if encoding == RpcTransactionEncoding::Json {
RpcEncodedTransaction::Json(RpcTransaction {
signatures: transaction
.signatures
.iter()
.map(|sig| sig.to_string())
.collect(),
message: RpcMessage {
header: transaction.message.header,
account_keys: transaction
.message
.account_keys
.iter()
.map(|pubkey| pubkey.to_string())
.collect(),
recent_blockhash: transaction.message.recent_blockhash.to_string(),
instructions: transaction
.message
.instructions
.iter()
.map(|instruction| RpcCompiledInstruction {
program_id_index: instruction.program_id_index,
accounts: instruction.accounts.clone(),
data: bs58::encode(instruction.data.clone()).into_string(),
})
.collect(),
},
})
} else {
RpcEncodedTransaction::Binary(
bs58::encode(serialize(&transaction).unwrap()).into_string(),
)
}
}
}
/// A duplicate representation of a Transaction for pretty JSON serialization
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcTransaction {
pub signatures: Vec<String>,
pub message: RpcMessage,
}
/// A duplicate representation of a Message for pretty JSON serialization
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcMessage {
pub header: MessageHeader,
pub account_keys: Vec<String>,
pub recent_blockhash: String,
pub instructions: Vec<RpcCompiledInstruction>,
}
/// A duplicate representation of a Message for pretty JSON serialization
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcCompiledInstruction {
pub program_id_index: u8,
pub accounts: Vec<u8>,
pub data: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcTransactionStatus {
pub status: Result<()>,
pub fee: u64,
pub pre_balances: Vec<u64>,
pub post_balances: Vec<u64>,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcBlockhashFeeCalculator {
@@ -152,6 +35,12 @@ pub struct RpcBlockhashFeeCalculator {
pub fee_calculator: FeeCalculator,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcFeeCalculator {
pub fee_calculator: FeeCalculator,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcFeeRateGovernor {
@@ -165,6 +54,12 @@ pub struct RpcKeyedAccount {
pub account: RpcAccount,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcSignatureResult {
pub err: Option<TransactionError>,
}
/// A duplicate representation of a Message for pretty JSON serialization
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]

View File

@@ -188,7 +188,7 @@ impl ThinClient {
transaction: &mut Transaction,
tries: usize,
min_confirmed_blocks: usize,
) -> io::Result<Signature> {
) -> TransportResult<Signature> {
self.send_and_confirm_transaction(&[keypair], transaction, tries, min_confirmed_blocks)
}
@@ -198,7 +198,7 @@ impl ThinClient {
keypair: &Keypair,
transaction: &mut Transaction,
tries: usize,
) -> io::Result<Signature> {
) -> TransportResult<Signature> {
self.send_and_confirm_transaction(&[keypair], transaction, tries, 0)
}
@@ -209,7 +209,7 @@ impl ThinClient {
transaction: &mut Transaction,
tries: usize,
pending_confirmations: usize,
) -> io::Result<Signature> {
) -> TransportResult<Signature> {
for x in 0..tries {
let now = Instant::now();
let mut buf = vec![0; serialized_size(&transaction).unwrap() as usize];
@@ -243,13 +243,14 @@ impl ThinClient {
}
}
info!("{} tries failed transfer to {}", x, self.tpu_addr());
let (blockhash, _fee_calculator) = self.rpc_client().get_recent_blockhash()?;
let (blockhash, _fee_calculator) = self.get_recent_blockhash()?;
transaction.sign(keypairs, blockhash);
}
Err(io::Error::new(
io::ErrorKind::Other,
format!("retry_transfer failed in {} retries", tries),
))
)
.into())
}
pub fn poll_balance_with_timeout_and_commitment(
@@ -258,13 +259,15 @@ impl ThinClient {
polling_frequency: &Duration,
timeout: &Duration,
commitment_config: CommitmentConfig,
) -> io::Result<u64> {
self.rpc_client().poll_balance_with_timeout_and_commitment(
pubkey,
polling_frequency,
timeout,
commitment_config,
)
) -> TransportResult<u64> {
self.rpc_client()
.poll_balance_with_timeout_and_commitment(
pubkey,
polling_frequency,
timeout,
commitment_config,
)
.map_err(|e| e.into())
}
pub fn poll_balance_with_timeout(
@@ -272,8 +275,8 @@ impl ThinClient {
pubkey: &Pubkey,
polling_frequency: &Duration,
timeout: &Duration,
) -> io::Result<u64> {
self.rpc_client().poll_balance_with_timeout_and_commitment(
) -> TransportResult<u64> {
self.poll_balance_with_timeout_and_commitment(
pubkey,
polling_frequency,
timeout,
@@ -281,18 +284,18 @@ impl ThinClient {
)
}
pub fn poll_get_balance(&self, pubkey: &Pubkey) -> io::Result<u64> {
self.rpc_client()
.poll_get_balance_with_commitment(pubkey, CommitmentConfig::default())
pub fn poll_get_balance(&self, pubkey: &Pubkey) -> TransportResult<u64> {
self.poll_get_balance_with_commitment(pubkey, CommitmentConfig::default())
}
pub fn poll_get_balance_with_commitment(
&self,
pubkey: &Pubkey,
commitment_config: CommitmentConfig,
) -> io::Result<u64> {
) -> TransportResult<u64> {
self.rpc_client()
.poll_get_balance_with_commitment(pubkey, commitment_config)
.map_err(|e| e.into())
}
pub fn wait_for_balance(&self, pubkey: &Pubkey, expected_balance: Option<u64>) -> Option<u64> {
@@ -321,9 +324,9 @@ impl ThinClient {
signature: &Signature,
commitment_config: CommitmentConfig,
) -> TransportResult<()> {
Ok(self
.rpc_client()
.poll_for_signature_with_commitment(signature, commitment_config)?)
self.rpc_client()
.poll_for_signature_with_commitment(signature, commitment_config)
.map_err(|e| e.into())
}
/// Check a signature in the bank. This method blocks
@@ -332,16 +335,17 @@ impl ThinClient {
self.rpc_client().check_signature(signature)
}
pub fn validator_exit(&self) -> io::Result<bool> {
self.rpc_client().validator_exit()
pub fn validator_exit(&self) -> TransportResult<bool> {
self.rpc_client().validator_exit().map_err(|e| e.into())
}
pub fn get_num_blocks_since_signature_confirmation(
&mut self,
sig: &Signature,
) -> io::Result<usize> {
) -> TransportResult<usize> {
self.rpc_client()
.get_num_blocks_since_signature_confirmation(sig)
.map_err(|e| e.into())
}
}
@@ -368,7 +372,7 @@ impl SyncClient for ThinClient {
keypair: &Keypair,
instruction: Instruction,
) -> TransportResult<Signature> {
let message = Message::new(vec![instruction]);
let message = Message::new(&[instruction]);
self.send_message(&[keypair], message)
}
@@ -400,14 +404,14 @@ impl SyncClient for ThinClient {
pubkey: &Pubkey,
commitment_config: CommitmentConfig,
) -> TransportResult<Option<Account>> {
Ok(self
.rpc_client()
.get_account_with_commitment(pubkey, commitment_config)?
.value)
self.rpc_client()
.get_account_with_commitment(pubkey, commitment_config)
.map_err(|e| e.into())
.map(|r| r.value)
}
fn get_balance(&self, pubkey: &Pubkey) -> TransportResult<u64> {
Ok(self.rpc_client().get_balance(pubkey)?)
self.rpc_client().get_balance(pubkey).map_err(|e| e.into())
}
fn get_balance_with_commitment(
@@ -415,10 +419,10 @@ impl SyncClient for ThinClient {
pubkey: &Pubkey,
commitment_config: CommitmentConfig,
) -> TransportResult<u64> {
let balance = self
.rpc_client()
.get_balance_with_commitment(pubkey, commitment_config)?;
Ok(balance.value)
self.rpc_client()
.get_balance_with_commitment(pubkey, commitment_config)
.map_err(|e| e.into())
.map(|r| r.value)
}
fn get_recent_blockhash(&self) -> TransportResult<(Hash, FeeCalculator)> {
@@ -445,9 +449,20 @@ impl SyncClient for ThinClient {
}
}
fn get_fee_calculator_for_blockhash(
&self,
blockhash: &Hash,
) -> TransportResult<Option<FeeCalculator>> {
self.rpc_client()
.get_fee_calculator_for_blockhash(blockhash)
.map_err(|e| e.into())
}
fn get_fee_rate_governor(&self) -> TransportResult<FeeRateGovernor> {
let fee_rate_governor = self.rpc_client().get_fee_rate_governor()?;
Ok(fee_rate_governor.value)
self.rpc_client()
.get_fee_rate_governor()
.map_err(|e| e.into())
.map(|r| r.value)
}
fn get_signature_status(
@@ -456,7 +471,7 @@ impl SyncClient for ThinClient {
) -> TransportResult<Option<transaction::Result<()>>> {
let status = self
.rpc_client()
.get_signature_status(&signature.to_string())
.get_signature_status(&signature)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
@@ -473,7 +488,7 @@ impl SyncClient for ThinClient {
) -> TransportResult<Option<transaction::Result<()>>> {
let status = self
.rpc_client()
.get_signature_status_with_commitment(&signature.to_string(), commitment_config)
.get_signature_status_with_commitment(&signature, commitment_config)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
@@ -545,23 +560,26 @@ impl SyncClient for ThinClient {
signature: &Signature,
min_confirmed_blocks: usize,
) -> TransportResult<usize> {
Ok(self
.rpc_client()
.poll_for_signature_confirmation(signature, min_confirmed_blocks)?)
self.rpc_client()
.poll_for_signature_confirmation(signature, min_confirmed_blocks)
.map_err(|e| e.into())
}
fn poll_for_signature(&self, signature: &Signature) -> TransportResult<()> {
Ok(self.rpc_client().poll_for_signature(signature)?)
self.rpc_client()
.poll_for_signature(signature)
.map_err(|e| e.into())
}
fn get_new_blockhash(&self, blockhash: &Hash) -> TransportResult<(Hash, FeeCalculator)> {
let new_blockhash = self.rpc_client().get_new_blockhash(blockhash)?;
Ok(new_blockhash)
self.rpc_client()
.get_new_blockhash(blockhash)
.map_err(|e| e.into())
}
}
impl AsyncClient for ThinClient {
fn async_send_transaction(&self, transaction: Transaction) -> io::Result<Signature> {
fn async_send_transaction(&self, transaction: Transaction) -> TransportResult<Signature> {
let mut buf = vec![0; serialized_size(&transaction).unwrap() as usize];
let mut wr = std::io::Cursor::new(&mut buf[..]);
serialize_into(&mut wr, &transaction)
@@ -576,7 +594,7 @@ impl AsyncClient for ThinClient {
keypairs: &T,
message: Message,
recent_blockhash: Hash,
) -> io::Result<Signature> {
) -> TransportResult<Signature> {
let transaction = Transaction::new(keypairs, message, recent_blockhash);
self.async_send_transaction(transaction)
}
@@ -585,8 +603,8 @@ impl AsyncClient for ThinClient {
keypair: &Keypair,
instruction: Instruction,
recent_blockhash: Hash,
) -> io::Result<Signature> {
let message = Message::new(vec![instruction]);
) -> TransportResult<Signature> {
let message = Message::new(&[instruction]);
self.async_send_message(&[keypair], message, recent_blockhash)
}
fn async_transfer(
@@ -595,7 +613,7 @@ impl AsyncClient for ThinClient {
keypair: &Keypair,
pubkey: &Pubkey,
recent_blockhash: Hash,
) -> io::Result<Signature> {
) -> TransportResult<Signature> {
let transfer_instruction =
system_instruction::transfer(&keypair.pubkey(), pubkey, lamports);
self.async_send_instruction(keypair, transfer_instruction, recent_blockhash)

View File

@@ -1,7 +1,7 @@
[package]
name = "solana-core"
description = "Blockchain, Rebuilt for Scale"
version = "1.0.4"
version = "1.0.19"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "../README.md"
@@ -15,6 +15,7 @@ codecov = { repository = "solana-labs/solana", branch = "master", service = "git
[dependencies]
bincode = "1.2.1"
bv = { version = "0.11.1", features = ["serde"] }
bs58 = "0.3.0"
byteorder = "1.3.2"
chrono = { version = "0.4.10", features = ["serde"] }
@@ -33,6 +34,7 @@ jsonrpc-ws-server = "14.0.6"
libc = "0.2.66"
log = "0.4.8"
nix = "0.17.0"
num_cpus = "1.0.0"
num-traits = "0.2"
rand = "0.6.5"
rand_chacha = "0.1.1"
@@ -41,34 +43,34 @@ regex = "1.3.4"
serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.46"
solana-budget-program = { path = "../programs/budget", version = "1.0.4" }
solana-clap-utils = { path = "../clap-utils", version = "1.0.4" }
solana-client = { path = "../client", version = "1.0.4" }
solana-faucet = { path = "../faucet", version = "1.0.4" }
solana-budget-program = { path = "../programs/budget", version = "1.0.19" }
solana-clap-utils = { path = "../clap-utils", version = "1.0.19" }
solana-client = { path = "../client", version = "1.0.19" }
solana-transaction-status = { path = "../transaction-status", version = "1.0.19" }
solana-faucet = { path = "../faucet", version = "1.0.19" }
ed25519-dalek = "=1.0.0-pre.1"
solana-ledger = { path = "../ledger", version = "1.0.4" }
solana-logger = { path = "../logger", version = "1.0.4" }
solana-merkle-tree = { path = "../merkle-tree", version = "1.0.4" }
solana-metrics = { path = "../metrics", version = "1.0.4" }
solana-measure = { path = "../measure", version = "1.0.4" }
solana-net-utils = { path = "../net-utils", version = "1.0.4" }
solana-chacha-cuda = { path = "../chacha-cuda", version = "1.0.4" }
solana-perf = { path = "../perf", version = "1.0.4" }
solana-runtime = { path = "../runtime", version = "1.0.4" }
solana-sdk = { path = "../sdk", version = "1.0.4" }
solana-stake-program = { path = "../programs/stake", version = "1.0.4" }
solana-storage-program = { path = "../programs/storage", version = "1.0.4" }
solana-vote-program = { path = "../programs/vote", version = "1.0.4" }
solana-vote-signer = { path = "../vote-signer", version = "1.0.4" }
solana-sys-tuner = { path = "../sys-tuner", version = "1.0.4" }
sys-info = "0.5.9"
solana-ledger = { path = "../ledger", version = "1.0.19" }
solana-logger = { path = "../logger", version = "1.0.19" }
solana-merkle-tree = { path = "../merkle-tree", version = "1.0.19" }
solana-metrics = { path = "../metrics", version = "1.0.19" }
solana-measure = { path = "../measure", version = "1.0.19" }
solana-net-utils = { path = "../net-utils", version = "1.0.19" }
solana-chacha-cuda = { path = "../chacha-cuda", version = "1.0.19" }
solana-perf = { path = "../perf", version = "1.0.19" }
solana-runtime = { path = "../runtime", version = "1.0.19" }
solana-sdk = { path = "../sdk", version = "1.0.19" }
solana-stake-program = { path = "../programs/stake", version = "1.0.19" }
solana-storage-program = { path = "../programs/storage", version = "1.0.19" }
solana-vote-program = { path = "../programs/vote", version = "1.0.19" }
solana-vote-signer = { path = "../vote-signer", version = "1.0.19" }
solana-sys-tuner = { path = "../sys-tuner", version = "1.0.19" }
tempfile = "3.1.0"
thiserror = "1.0"
tokio = "0.1"
tokio-codec = "0.1"
tokio-fs = "0.1"
tokio-io = "0.1"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.0.4" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.0.19" }
trees = "0.2.1"
[dev-dependencies]
@@ -102,3 +104,6 @@ name = "cluster_info"
[[bench]]
name = "chacha"
required-features = ["chacha"]
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -0,0 +1,263 @@
// Service to verify accounts hashes with other trusted validator nodes.
//
// Each interval, publish the snapshat hash which is the full accounts state
// hash on gossip. Monitor gossip for messages from validators in the --trusted-validators
// set and halt the node if a mismatch is detected.
use crate::cluster_info::{ClusterInfo, MAX_SNAPSHOT_HASHES};
use solana_ledger::{
snapshot_package::SnapshotPackage, snapshot_package::SnapshotPackageReceiver,
snapshot_package::SnapshotPackageSender,
};
use solana_sdk::{clock::Slot, hash::Hash, pubkey::Pubkey};
use std::collections::{HashMap, HashSet};
use std::{
sync::{
atomic::{AtomicBool, Ordering},
mpsc::RecvTimeoutError,
Arc, RwLock,
},
thread::{self, Builder, JoinHandle},
time::Duration,
};
pub struct AccountsHashVerifier {
t_accounts_hash_verifier: JoinHandle<()>,
}
impl AccountsHashVerifier {
pub fn new(
snapshot_package_receiver: SnapshotPackageReceiver,
snapshot_package_sender: Option<SnapshotPackageSender>,
exit: &Arc<AtomicBool>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
trusted_validators: Option<HashSet<Pubkey>>,
halt_on_trusted_validators_accounts_hash_mismatch: bool,
fault_injection_rate_slots: u64,
) -> Self {
let exit = exit.clone();
let cluster_info = cluster_info.clone();
let t_accounts_hash_verifier = Builder::new()
.name("solana-accounts-hash".to_string())
.spawn(move || {
let mut hashes = vec![];
loop {
if exit.load(Ordering::Relaxed) {
break;
}
match snapshot_package_receiver.recv_timeout(Duration::from_secs(1)) {
Ok(snapshot_package) => {
Self::process_snapshot(
snapshot_package,
&cluster_info,
&trusted_validators,
halt_on_trusted_validators_accounts_hash_mismatch,
&snapshot_package_sender,
&mut hashes,
&exit,
fault_injection_rate_slots,
);
}
Err(RecvTimeoutError::Disconnected) => break,
Err(RecvTimeoutError::Timeout) => (),
}
}
})
.unwrap();
Self {
t_accounts_hash_verifier,
}
}
fn process_snapshot(
snapshot_package: SnapshotPackage,
cluster_info: &Arc<RwLock<ClusterInfo>>,
trusted_validators: &Option<HashSet<Pubkey>>,
halt_on_trusted_validator_accounts_hash_mismatch: bool,
snapshot_package_sender: &Option<SnapshotPackageSender>,
hashes: &mut Vec<(Slot, Hash)>,
exit: &Arc<AtomicBool>,
fault_injection_rate_slots: u64,
) {
if fault_injection_rate_slots != 0
&& snapshot_package.root % fault_injection_rate_slots == 0
{
// For testing, publish an invalid hash to gossip.
use rand::{thread_rng, Rng};
use solana_sdk::hash::extend_and_hash;
warn!("inserting fault at slot: {}", snapshot_package.root);
let rand = thread_rng().gen_range(0, 10);
let hash = extend_and_hash(&snapshot_package.hash, &[rand]);
hashes.push((snapshot_package.root, hash));
} else {
hashes.push((snapshot_package.root, snapshot_package.hash));
}
while hashes.len() > MAX_SNAPSHOT_HASHES {
hashes.remove(0);
}
if halt_on_trusted_validator_accounts_hash_mismatch {
let mut slot_to_hash = HashMap::new();
for (slot, hash) in hashes.iter() {
slot_to_hash.insert(*slot, *hash);
}
if Self::should_halt(&cluster_info, trusted_validators, &mut slot_to_hash) {
exit.store(true, Ordering::Relaxed);
}
}
if let Some(sender) = snapshot_package_sender.as_ref() {
if sender.send(snapshot_package).is_err() {}
}
cluster_info
.write()
.unwrap()
.push_accounts_hashes(hashes.clone());
}
fn should_halt(
cluster_info: &Arc<RwLock<ClusterInfo>>,
trusted_validators: &Option<HashSet<Pubkey>>,
slot_to_hash: &mut HashMap<Slot, Hash>,
) -> bool {
let mut verified_count = 0;
let mut highest_slot = 0;
if let Some(trusted_validators) = trusted_validators.as_ref() {
for trusted_validator in trusted_validators {
let cluster_info_r = cluster_info.read().unwrap();
if let Some(accounts_hashes) =
cluster_info_r.get_accounts_hash_for_node(trusted_validator)
{
for (slot, hash) in accounts_hashes {
if let Some(reference_hash) = slot_to_hash.get(slot) {
if *hash != *reference_hash {
error!("Trusted validator {} produced conflicting hashes for slot: {} ({} != {})",
trusted_validator,
slot,
hash,
reference_hash,
);
return true;
} else {
verified_count += 1;
}
} else {
highest_slot = std::cmp::max(*slot, highest_slot);
slot_to_hash.insert(*slot, *hash);
}
}
}
}
}
inc_new_counter_info!("accounts_hash_verifier-hashes_verified", verified_count);
datapoint_info!(
"accounts_hash_verifier",
("highest_slot_verified", highest_slot, i64),
);
false
}
pub fn join(self) -> thread::Result<()> {
self.t_accounts_hash_verifier.join()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::cluster_info::make_accounts_hashes_message;
use crate::contact_info::ContactInfo;
use solana_sdk::{
hash::hash,
signature::{Keypair, Signer},
};
#[test]
fn test_should_halt() {
let keypair = Keypair::new();
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let mut trusted_validators = HashSet::new();
let mut slot_to_hash = HashMap::new();
assert!(!AccountsHashVerifier::should_halt(
&cluster_info,
&Some(trusted_validators.clone()),
&mut slot_to_hash,
));
let validator1 = Keypair::new();
let hash1 = hash(&[1]);
let hash2 = hash(&[2]);
{
let message = make_accounts_hashes_message(&validator1, vec![(0, hash1)]).unwrap();
let mut cluster_info_w = cluster_info.write().unwrap();
cluster_info_w.push_message(message);
}
slot_to_hash.insert(0, hash2);
trusted_validators.insert(validator1.pubkey());
assert!(AccountsHashVerifier::should_halt(
&cluster_info,
&Some(trusted_validators.clone()),
&mut slot_to_hash,
));
}
#[test]
fn test_max_hashes() {
solana_logger::setup();
use std::path::PathBuf;
use tempfile::TempDir;
let keypair = Keypair::new();
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let trusted_validators = HashSet::new();
let exit = Arc::new(AtomicBool::new(false));
let mut hashes = vec![];
for i in 0..MAX_SNAPSHOT_HASHES + 1 {
let snapshot_links = TempDir::new().unwrap();
let snapshot_package = SnapshotPackage {
hash: hash(&[i as u8]),
root: 100 + i as u64,
slot_deltas: vec![],
snapshot_links,
tar_output_file: PathBuf::from("."),
storages: vec![],
};
AccountsHashVerifier::process_snapshot(
snapshot_package,
&cluster_info,
&Some(trusted_validators.clone()),
false,
&None,
&mut hashes,
&exit,
0,
);
}
let cluster_info_r = cluster_info.read().unwrap();
let cluster_hashes = cluster_info_r
.get_accounts_hash_for_node(&keypair.pubkey())
.unwrap();
info!("{:?}", cluster_hashes);
assert_eq!(hashes.len(), MAX_SNAPSHOT_HASHES);
assert_eq!(cluster_hashes.len(), MAX_SNAPSHOT_HASHES);
assert_eq!(cluster_hashes[0], (101, hash(&[1])));
assert_eq!(
cluster_hashes[MAX_SNAPSHOT_HASHES - 1],
(
100 + MAX_SNAPSHOT_HASHES as u64,
hash(&[MAX_SNAPSHOT_HASHES as u8])
)
);
}
}

View File

@@ -1017,7 +1017,6 @@ mod tests {
};
use crossbeam_channel::unbounded;
use itertools::Itertools;
use solana_client::rpc_response::{RpcEncodedTransaction, RpcTransactionWithStatusMeta};
use solana_ledger::{
blockstore::entries_to_test_shreds,
entry::{next_entry, Entry, EntrySlice},
@@ -1030,6 +1029,7 @@ mod tests {
system_transaction,
transaction::TransactionError,
};
use solana_transaction_status::{EncodedTransaction, TransactionWithStatusMeta};
use std::{sync::atomic::Ordering, thread::sleep};
#[test]
@@ -1975,15 +1975,25 @@ mod tests {
let confirmed_block = blockstore.get_confirmed_block(bank.slot(), None).unwrap();
assert_eq!(confirmed_block.transactions.len(), 3);
for RpcTransactionWithStatusMeta { transaction, meta } in
for TransactionWithStatusMeta { transaction, meta } in
confirmed_block.transactions.into_iter()
{
if let RpcEncodedTransaction::Json(transaction) = transaction {
if let EncodedTransaction::Json(transaction) = transaction {
if transaction.signatures[0] == success_signature.to_string() {
assert_eq!(meta.unwrap().status, Ok(()));
let meta = meta.unwrap();
assert_eq!(meta.err, None);
assert_eq!(meta.status, Ok(()));
} else if transaction.signatures[0] == ix_error_signature.to_string() {
let meta = meta.unwrap();
assert_eq!(
meta.unwrap().status,
meta.err,
Some(TransactionError::InstructionError(
0,
InstructionError::CustomError(1)
))
);
assert_eq!(
meta.status,
Err(TransactionError::InstructionError(
0,
InstructionError::CustomError(1)

View File

@@ -27,6 +27,11 @@ use crate::{
sendmmsg::{multicast, send_mmsg},
weighted_shuffle::{weighted_best, weighted_shuffle},
};
use rand::distributions::{Distribution, WeightedIndex};
use rand::SeedableRng;
use rand_chacha::ChaChaRng;
use bincode::{serialize, serialized_size};
use compression::prelude::*;
use core::cmp;
@@ -35,6 +40,7 @@ use rayon::iter::IntoParallelIterator;
use rayon::iter::ParallelIterator;
use rayon::ThreadPool;
use solana_ledger::{bank_forks::BankForks, staking_utils};
use solana_measure::measure::Measure;
use solana_measure::thread_mem_usage;
use solana_metrics::{datapoint_debug, inc_new_counter_debug, inc_new_counter_error};
use solana_net_utils::{
@@ -48,7 +54,7 @@ use solana_sdk::timing::duration_as_s;
use solana_sdk::{
clock::{Slot, DEFAULT_MS_PER_SLOT},
pubkey::Pubkey,
signature::{Keypair, Signable, Signature},
signature::{Keypair, Signable, Signature, Signer},
timing::{duration_as_ms, timestamp},
transaction::Transaction,
};
@@ -95,6 +101,12 @@ pub enum ClusterInfoError {
BadGossipAddress,
}
#[derive(Clone)]
pub struct DataBudget {
bytes: usize, // amount of bytes we have in the budget to send
last_timestamp_ms: u64, // Last time that we upped the bytes count,
// used to detect when to up the bytes budget again
}
#[derive(Clone)]
pub struct ClusterInfo {
/// The network
pub gossip: CrdsGossip,
@@ -103,6 +115,8 @@ pub struct ClusterInfo {
/// The network entrypoint
entrypoint: Option<ContactInfo>,
last_datapoint_submit: Instant,
outbound_budget: DataBudget,
}
#[derive(Default, Clone)]
@@ -180,6 +194,22 @@ struct PullData {
pub filter: CrdsFilter,
}
pub fn make_accounts_hashes_message(
keypair: &Keypair,
accounts_hashes: Vec<(Slot, Hash)>,
) -> Option<CrdsValue> {
let message = CrdsData::AccountsHashes(SnapshotHash::new(keypair.pubkey(), accounts_hashes));
Some(CrdsValue::new_signed(message, keypair))
}
fn distance(a: u64, b: u64) -> u64 {
if a > b {
a - b
} else {
b - a
}
}
// TODO These messages should go through the gpu pipeline for spam filtering
#[derive(Serialize, Deserialize, Debug)]
#[allow(clippy::large_enum_variant)]
@@ -191,6 +221,17 @@ enum Protocol {
PruneMessage(Pubkey, PruneData),
}
// Rating for pull requests
// A response table is generated as a
// 2-d table arranged by target nodes and a
// list of responses for that node,
// to/responses_index is a location in that table.
struct ResponseScore {
to: usize, // to, index of who the response is to
responses_index: usize, // index into the list of responses for a given to
score: u64, // Relative score of the response
}
impl ClusterInfo {
/// Without a valid keypair gossip will not function. Only useful for tests.
pub fn new_with_invalid_keypair(contact_info: ContactInfo) -> Self {
@@ -203,6 +244,10 @@ impl ClusterInfo {
keypair,
entrypoint: None,
last_datapoint_submit: Instant::now(),
outbound_budget: DataBudget {
bytes: 0,
last_timestamp_ms: 0,
},
};
let id = contact_info.id;
me.gossip.set_self(&id);
@@ -443,22 +488,36 @@ impl ClusterInfo {
.process_push_message(&self.id(), vec![entry], now);
}
pub fn push_snapshot_hashes(&mut self, snapshot_hashes: Vec<(Slot, Hash)>) {
if snapshot_hashes.len() > MAX_SNAPSHOT_HASHES {
pub fn push_message(&mut self, message: CrdsValue) {
let now = message.wallclock();
let id = message.pubkey();
self.gossip.process_push_message(&id, vec![message], now);
}
pub fn push_accounts_hashes(&mut self, accounts_hashes: Vec<(Slot, Hash)>) {
if accounts_hashes.len() > MAX_SNAPSHOT_HASHES {
warn!(
"snapshot_hashes too large, ignored: {}",
snapshot_hashes.len()
"accounts hashes too large, ignored: {}",
accounts_hashes.len(),
);
return;
}
let now = timestamp();
let entry = CrdsValue::new_signed(
CrdsData::SnapshotHash(SnapshotHash::new(self.id(), snapshot_hashes, now)),
&self.keypair,
);
self.gossip
.process_push_message(&self.id(), vec![entry], now);
let message = CrdsData::AccountsHashes(SnapshotHash::new(self.id(), accounts_hashes));
self.push_message(CrdsValue::new_signed(message, &self.keypair));
}
pub fn push_snapshot_hashes(&mut self, snapshot_hashes: Vec<(Slot, Hash)>) {
if snapshot_hashes.len() > MAX_SNAPSHOT_HASHES {
warn!(
"snapshot hashes too large, ignored: {}",
snapshot_hashes.len(),
);
return;
}
let message = CrdsData::SnapshotHashes(SnapshotHash::new(self.id(), snapshot_hashes));
self.push_message(CrdsValue::new_signed(message, &self.keypair));
}
pub fn push_vote(&mut self, tower_index: usize, vote: Transaction) {
@@ -518,11 +577,19 @@ impl ClusterInfo {
.collect()
}
pub fn get_accounts_hash_for_node(&self, pubkey: &Pubkey) -> Option<&Vec<(Slot, Hash)>> {
self.gossip
.crds
.table
.get(&CrdsValueLabel::AccountsHashes(*pubkey))
.map(|x| &x.value.accounts_hash().unwrap().hashes)
}
pub fn get_snapshot_hash_for_node(&self, pubkey: &Pubkey) -> Option<&Vec<(Slot, Hash)>> {
self.gossip
.crds
.table
.get(&CrdsValueLabel::SnapshotHash(*pubkey))
.get(&CrdsValueLabel::SnapshotHashes(*pubkey))
.map(|x| &x.value.snapshot_hash().unwrap().hashes)
}
@@ -973,7 +1040,7 @@ impl ClusterInfo {
let mut num_live_peers = 1i64;
peers.iter().for_each(|p| {
// A peer is considered live if they generated their contact info recently
if timestamp() - p.wallclock <= CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS {
if distance(timestamp(), p.wallclock) <= CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS {
num_live_peers += 1;
}
});
@@ -1389,20 +1456,43 @@ impl ClusterInfo {
})
});
// process the collected pulls together
let rsp = Self::handle_pull_requests(me, recycler, gossip_pull_data);
let rsp = Self::handle_pull_requests(me, recycler, gossip_pull_data, stakes);
if let Some(rsp) = rsp {
let _ignore_disconnect = response_sender.send(rsp);
}
}
// Pull requests take an incoming bloom filter of contained entries from a node
// and tries to send back to them the values it detects are missing.
fn handle_pull_requests(
me: &Arc<RwLock<Self>>,
recycler: &PacketsRecycler,
requests: Vec<PullData>,
stakes: &HashMap<Pubkey, u64>,
) -> Option<Packets> {
// split the requests into addrs and filters
let mut caller_and_filters = vec![];
let mut addrs = vec![];
let mut time = Measure::start("handle_pull_requests");
{
let mut cluster_info = me.write().unwrap();
let now = timestamp();
const INTERVAL_MS: u64 = 100;
// allow 50kBps per staked validator, epoch slots + votes ~= 1.5kB/slot ~= 4kB/s
const BYTES_PER_INTERVAL: usize = 5000;
const MAX_BUDGET_MULTIPLE: usize = 5; // allow budget build-up to 5x the interval default
if now - cluster_info.outbound_budget.last_timestamp_ms > INTERVAL_MS {
let len = std::cmp::max(stakes.len(), 2);
cluster_info.outbound_budget.bytes += len * BYTES_PER_INTERVAL;
cluster_info.outbound_budget.bytes = std::cmp::min(
cluster_info.outbound_budget.bytes,
MAX_BUDGET_MULTIPLE * len * BYTES_PER_INTERVAL,
);
cluster_info.outbound_budget.last_timestamp_ms = now;
}
}
for pull_data in requests {
caller_and_filters.push((pull_data.caller, pull_data.filter));
addrs.push(pull_data.from_addr);
@@ -1414,30 +1504,101 @@ impl ClusterInfo {
.unwrap()
.gossip
.process_pull_requests(caller_and_filters, now);
let mut packets = Packets::new_with_recycler(recycler.clone(), 64, "handle_pull_requests");
pull_responses
// Filter bad to addresses
let pull_responses: Vec<_> = pull_responses
.into_iter()
.zip(addrs.into_iter())
.for_each(|(response, from_addr)| {
if !from_addr.ip().is_unspecified() && from_addr.port() != 0 {
let len = response.len();
trace!("get updates since response {}", len);
inc_new_counter_debug!("cluster_info-pull_request-rsp", len);
Self::split_gossip_messages(response)
.into_iter()
.for_each(|payload| {
let protocol = Protocol::PullResponse(self_id, payload);
// The remote node may not know its public IP:PORT. Instead of responding to the caller's
// gossip addr, respond to the origin addr. The last origin addr is picked from the list of
// addrs.
packets
.packets
.push(Packet::from_data(&from_addr, protocol))
})
.filter_map(|(responses, from_addr)| {
if !from_addr.ip().is_unspecified()
&& from_addr.port() != 0
&& !responses.is_empty()
{
Some((responses, from_addr))
} else {
trace!("Dropping Gossip pull response, as destination is unknown");
None
}
});
})
.collect();
if pull_responses.is_empty() {
return None;
}
let mut stats: Vec<_> = pull_responses
.iter()
.enumerate()
.map(|(i, (responses, _from_addr))| {
let score: u64 = if stakes.get(&responses[0].pubkey()).is_some() {
2
} else {
1
};
responses
.iter()
.enumerate()
.map(|(j, _response)| ResponseScore {
to: i,
responses_index: j,
score,
})
.collect::<Vec<ResponseScore>>()
})
.flatten()
.collect();
stats.sort_by(|a, b| a.score.cmp(&b.score));
let weights: Vec<_> = stats.iter().map(|stat| stat.score).collect();
let seed = [48u8; 32];
let rng = &mut ChaChaRng::from_seed(seed);
let weighted_index = WeightedIndex::new(weights).unwrap();
let mut packets = Packets::new_with_recycler(recycler.clone(), 64, "handle_pull_requests");
let mut total_bytes = 0;
let outbound_budget = me.read().unwrap().outbound_budget.bytes;
let mut sent = HashSet::new();
while sent.len() < stats.len() {
let index = weighted_index.sample(rng);
if sent.contains(&index) {
continue;
}
sent.insert(index);
let stat = &stats[index];
let from_addr = pull_responses[stat.to].1;
let response = pull_responses[stat.to].0[stat.responses_index].clone();
let protocol = Protocol::PullResponse(self_id, vec![response]);
packets
.packets
.push(Packet::from_data(&from_addr, protocol));
let len = packets.packets.len();
total_bytes += packets.packets[len - 1].meta.size;
if total_bytes > outbound_budget {
inc_new_counter_info!("gossip_pull_request-no_budget", 1);
break;
}
}
{
let mut cluster_info = me.write().unwrap();
cluster_info.outbound_budget.bytes = cluster_info
.outbound_budget
.bytes
.saturating_sub(total_bytes);
}
time.stop();
inc_new_counter_info!("gossip_pull_request-sent_requests", sent.len());
inc_new_counter_info!(
"gossip_pull_request-dropped_requests",
stats.len() - sent.len()
);
debug!(
"handle_pull_requests: {} sent: {} total: {} total_bytes: {}",
time,
sent.len(),
stats.len(),
total_bytes
);
if packets.is_empty() {
return None;
}
@@ -1615,7 +1776,7 @@ impl ClusterInfo {
.unwrap()
}
fn gossip_contact_info(id: &Pubkey, gossip: SocketAddr) -> ContactInfo {
pub fn gossip_contact_info(id: &Pubkey, gossip: SocketAddr) -> ContactInfo {
ContactInfo {
id: *id,
gossip,

View File

@@ -51,7 +51,8 @@ impl ClusterInfoVoteListener {
if exit.load(Ordering::Relaxed) {
return Ok(());
}
if let Some(bank) = poh_recorder.lock().unwrap().bank() {
let poh_bank = poh_recorder.lock().unwrap().bank();
if let Some(bank) = poh_bank {
let last_ts = bank.last_vote_sync.load(Ordering::Relaxed);
let (votes, new_ts) = cluster_info.read().unwrap().get_votes(last_ts);
bank.last_vote_sync

View File

@@ -1,3 +1,6 @@
use crate::consensus::VOTE_THRESHOLD_SIZE;
use solana_measure::measure::Measure;
use solana_metrics::inc_new_counter_info;
use solana_runtime::bank::Bank;
use solana_sdk::clock::Slot;
use solana_vote_program::{vote_state::VoteState, vote_state::MAX_LOCKOUT_HISTORY};
@@ -10,9 +13,11 @@ use std::{
time::Duration,
};
pub type BlockCommitmentArray = [u64; MAX_LOCKOUT_HISTORY + 1];
#[derive(Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)]
pub struct BlockCommitment {
pub commitment: [u64; MAX_LOCKOUT_HISTORY],
pub commitment: BlockCommitmentArray,
}
impl BlockCommitment {
@@ -25,23 +30,55 @@ impl BlockCommitment {
assert!(confirmation_count > 0 && confirmation_count <= MAX_LOCKOUT_HISTORY);
self.commitment[confirmation_count - 1]
}
pub fn increase_rooted_stake(&mut self, stake: u64) {
self.commitment[MAX_LOCKOUT_HISTORY] += stake;
}
pub fn get_rooted_stake(&self) -> u64 {
self.commitment[MAX_LOCKOUT_HISTORY]
}
#[cfg(test)]
pub(crate) fn new(commitment: [u64; MAX_LOCKOUT_HISTORY]) -> Self {
pub(crate) fn new(commitment: BlockCommitmentArray) -> Self {
Self { commitment }
}
}
#[derive(Debug, Default)]
#[derive(Default)]
pub struct BlockCommitmentCache {
block_commitment: HashMap<Slot, BlockCommitment>,
total_stake: u64,
bank: Arc<Bank>,
root: Slot,
}
impl std::fmt::Debug for BlockCommitmentCache {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("BlockCommitmentCache")
.field("block_commitment", &self.block_commitment)
.field("total_stake", &self.total_stake)
.field(
"bank",
&format_args!("Bank({{current_slot: {:?}}})", self.bank.slot()),
)
.field("root", &self.root)
.finish()
}
}
impl BlockCommitmentCache {
pub fn new(block_commitment: HashMap<Slot, BlockCommitment>, total_stake: u64) -> Self {
pub fn new(
block_commitment: HashMap<Slot, BlockCommitment>,
total_stake: u64,
bank: Arc<Bank>,
root: Slot,
) -> Self {
Self {
block_commitment,
total_stake,
bank,
root,
}
}
@@ -53,38 +90,72 @@ impl BlockCommitmentCache {
self.total_stake
}
pub fn get_block_with_depth_commitment(
&self,
minimum_depth: usize,
minimum_stake_percentage: f64,
) -> Option<Slot> {
self.block_commitment
.iter()
.filter(|&(_, block_commitment)| {
let fork_stake_minimum_depth: u64 = block_commitment.commitment[minimum_depth..]
.iter()
.cloned()
.sum();
fork_stake_minimum_depth as f64 / self.total_stake as f64
>= minimum_stake_percentage
})
.map(|(slot, _)| *slot)
.max()
pub fn bank(&self) -> Arc<Bank> {
self.bank.clone()
}
pub fn get_rooted_block_with_commitment(&self, minimum_stake_percentage: f64) -> Option<u64> {
self.get_block_with_depth_commitment(MAX_LOCKOUT_HISTORY - 1, minimum_stake_percentage)
pub fn slot(&self) -> Slot {
self.bank.slot()
}
pub fn root(&self) -> Slot {
self.root
}
pub fn get_confirmation_count(&self, slot: Slot) -> Option<usize> {
self.get_lockout_count(slot, VOTE_THRESHOLD_SIZE)
}
// Returns the lowest level at which at least `minimum_stake_percentage` of the total epoch
// stake is locked out
fn get_lockout_count(&self, slot: Slot, minimum_stake_percentage: f64) -> Option<usize> {
self.get_block_commitment(slot).map(|block_commitment| {
let iterator = block_commitment.commitment.iter().enumerate().rev();
let mut sum = 0;
for (i, stake) in iterator {
sum += stake;
if (sum as f64 / self.total_stake as f64) > minimum_stake_percentage {
return i + 1;
}
}
0
})
}
pub fn is_confirmed_rooted(&self, slot: Slot) -> bool {
self.get_block_commitment(slot)
.map(|block_commitment| {
(block_commitment.get_rooted_stake() as f64 / self.total_stake as f64)
> VOTE_THRESHOLD_SIZE
})
.unwrap_or(false)
}
#[cfg(test)]
pub fn new_for_tests() -> Self {
let mut block_commitment: HashMap<Slot, BlockCommitment> = HashMap::new();
block_commitment.insert(0, BlockCommitment::default());
Self {
block_commitment,
total_stake: 42,
..Self::default()
}
}
}
pub struct CommitmentAggregationData {
bank: Arc<Bank>,
root: Slot,
total_staked: u64,
}
impl CommitmentAggregationData {
pub fn new(bank: Arc<Bank>, total_staked: u64) -> Self {
Self { bank, total_staked }
pub fn new(bank: Arc<Bank>, root: Slot, total_staked: u64) -> Self {
Self {
bank,
root,
total_staked,
}
}
}
@@ -144,14 +215,24 @@ impl AggregateCommitmentService {
continue;
}
let mut aggregate_commitment_time = Measure::start("aggregate-commitment-ms");
let block_commitment = Self::aggregate_commitment(&ancestors, &aggregation_data.bank);
let mut new_block_commitment =
BlockCommitmentCache::new(block_commitment, aggregation_data.total_staked);
let mut new_block_commitment = BlockCommitmentCache::new(
block_commitment,
aggregation_data.total_staked,
aggregation_data.bank,
aggregation_data.root,
);
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
aggregate_commitment_time.stop();
inc_new_counter_info!(
"aggregate-commitment-ms",
aggregate_commitment_time.as_ms() as usize
);
}
}
@@ -199,7 +280,7 @@ impl AggregateCommitmentService {
commitment
.entry(*a)
.or_insert_with(BlockCommitment::default)
.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, lamports);
.increase_rooted_stake(lamports);
} else {
ancestors_index = i;
break;
@@ -246,84 +327,31 @@ mod tests {
}
#[test]
fn test_get_block_with_depth_commitment() {
fn test_get_confirmations() {
let bank = Arc::new(Bank::default());
// Build BlockCommitmentCache with votes at depths 0 and 1 for 2 slots
let mut cache0 = BlockCommitment::default();
cache0.increase_confirmation_stake(1, 15);
cache0.increase_confirmation_stake(2, 25);
cache0.increase_confirmation_stake(1, 5);
cache0.increase_confirmation_stake(2, 40);
let mut cache1 = BlockCommitment::default();
cache1.increase_confirmation_stake(1, 10);
cache1.increase_confirmation_stake(2, 20);
cache1.increase_confirmation_stake(1, 40);
cache1.increase_confirmation_stake(2, 5);
let mut cache2 = BlockCommitment::default();
cache2.increase_confirmation_stake(1, 20);
cache2.increase_confirmation_stake(2, 5);
let mut block_commitment = HashMap::new();
block_commitment.entry(0).or_insert(cache0.clone());
block_commitment.entry(1).or_insert(cache1.clone());
let block_commitment_cache = BlockCommitmentCache::new(block_commitment, 50);
block_commitment.entry(2).or_insert(cache2.clone());
let block_commitment_cache = BlockCommitmentCache::new(block_commitment, 50, bank, 0);
// Neither slot has rooted votes
assert_eq!(
block_commitment_cache.get_rooted_block_with_commitment(0.1),
None
);
// Neither slot meets the minimum level of commitment 0.6 at depth 1
assert_eq!(
block_commitment_cache.get_block_with_depth_commitment(1, 0.6),
None
);
// Only slot 0 meets the minimum level of commitment 0.5 at depth 1
assert_eq!(
block_commitment_cache.get_block_with_depth_commitment(1, 0.5),
Some(0)
);
// If multiple slots meet the minimum level of commitment, method should return the most recent
assert_eq!(
block_commitment_cache.get_block_with_depth_commitment(1, 0.4),
Some(1)
);
// If multiple slots meet the minimum level of commitment, method should return the most recent
assert_eq!(
block_commitment_cache.get_block_with_depth_commitment(0, 0.6),
Some(1)
);
// Neither slot meets the minimum level of commitment 0.9 at depth 0
assert_eq!(
block_commitment_cache.get_block_with_depth_commitment(0, 0.9),
None
);
}
#[test]
fn test_get_rooted_block_with_commitment() {
// Build BlockCommitmentCache with rooted votes
let mut cache0 = BlockCommitment::new([0; MAX_LOCKOUT_HISTORY]);
cache0.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, 40);
cache0.increase_confirmation_stake(MAX_LOCKOUT_HISTORY - 1, 10);
let mut cache1 = BlockCommitment::new([0; MAX_LOCKOUT_HISTORY]);
cache1.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, 30);
cache1.increase_confirmation_stake(MAX_LOCKOUT_HISTORY - 1, 10);
cache1.increase_confirmation_stake(MAX_LOCKOUT_HISTORY - 2, 10);
let mut block_commitment = HashMap::new();
block_commitment.entry(0).or_insert(cache0.clone());
block_commitment.entry(1).or_insert(cache1.clone());
let block_commitment_cache = BlockCommitmentCache::new(block_commitment, 50);
// Only slot 0 meets the minimum level of commitment 0.66 at root
assert_eq!(
block_commitment_cache.get_rooted_block_with_commitment(0.66),
Some(0)
);
// If multiple slots meet the minimum level of commitment, method should return the most recent
assert_eq!(
block_commitment_cache.get_rooted_block_with_commitment(0.6),
Some(1)
);
// Neither slot meets the minimum level of commitment 0.9 at root
assert_eq!(
block_commitment_cache.get_rooted_block_with_commitment(0.9),
None
);
assert_eq!(block_commitment_cache.get_confirmation_count(0), Some(2));
assert_eq!(block_commitment_cache.get_confirmation_count(1), Some(1));
assert_eq!(block_commitment_cache.get_confirmation_count(2), Some(0),);
assert_eq!(block_commitment_cache.get_confirmation_count(3), None,);
}
#[test]
@@ -344,7 +372,7 @@ mod tests {
for a in ancestors {
let mut expected = BlockCommitment::default();
expected.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, lamports);
expected.increase_rooted_stake(lamports);
assert_eq!(*commitment.get(&a).unwrap(), expected);
}
}
@@ -369,7 +397,7 @@ mod tests {
for a in ancestors {
if a <= root {
let mut expected = BlockCommitment::default();
expected.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, lamports);
expected.increase_rooted_stake(lamports);
assert_eq!(*commitment.get(&a).unwrap(), expected);
} else {
let mut expected = BlockCommitment::default();
@@ -401,7 +429,7 @@ mod tests {
for (i, a) in ancestors.iter().enumerate() {
if *a <= root {
let mut expected = BlockCommitment::default();
expected.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, lamports);
expected.increase_rooted_stake(lamports);
assert_eq!(*commitment.get(&a).unwrap(), expected);
} else if i <= 4 {
let mut expected = BlockCommitment::default();

View File

@@ -1,5 +1,6 @@
use crate::contact_info::ContactInfo;
use bincode::{serialize, serialized_size};
use solana_sdk::timing::timestamp;
use solana_sdk::{
clock::Slot,
hash::Hash,
@@ -62,7 +63,8 @@ pub enum CrdsData {
ContactInfo(ContactInfo),
Vote(VoteIndex, Vote),
EpochSlots(EpochSlotIndex, EpochSlots),
SnapshotHash(SnapshotHash),
SnapshotHashes(SnapshotHash),
AccountsHashes(SnapshotHash),
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
@@ -93,11 +95,11 @@ pub struct SnapshotHash {
}
impl SnapshotHash {
pub fn new(from: Pubkey, hashes: Vec<(Slot, Hash)>, wallclock: u64) -> Self {
pub fn new(from: Pubkey, hashes: Vec<(Slot, Hash)>) -> Self {
Self {
from,
hashes,
wallclock,
wallclock: timestamp(),
}
}
}
@@ -156,7 +158,8 @@ pub enum CrdsValueLabel {
ContactInfo(Pubkey),
Vote(VoteIndex, Pubkey),
EpochSlots(Pubkey),
SnapshotHash(Pubkey),
SnapshotHashes(Pubkey),
AccountsHashes(Pubkey),
}
impl fmt::Display for CrdsValueLabel {
@@ -165,7 +168,8 @@ impl fmt::Display for CrdsValueLabel {
CrdsValueLabel::ContactInfo(_) => write!(f, "ContactInfo({})", self.pubkey()),
CrdsValueLabel::Vote(ix, _) => write!(f, "Vote({}, {})", ix, self.pubkey()),
CrdsValueLabel::EpochSlots(_) => write!(f, "EpochSlots({})", self.pubkey()),
CrdsValueLabel::SnapshotHash(_) => write!(f, "SnapshotHash({})", self.pubkey()),
CrdsValueLabel::SnapshotHashes(_) => write!(f, "SnapshotHashes({})", self.pubkey()),
CrdsValueLabel::AccountsHashes(_) => write!(f, "AccountsHashes({})", self.pubkey()),
}
}
}
@@ -176,7 +180,8 @@ impl CrdsValueLabel {
CrdsValueLabel::ContactInfo(p) => *p,
CrdsValueLabel::Vote(_, p) => *p,
CrdsValueLabel::EpochSlots(p) => *p,
CrdsValueLabel::SnapshotHash(p) => *p,
CrdsValueLabel::SnapshotHashes(p) => *p,
CrdsValueLabel::AccountsHashes(p) => *p,
}
}
}
@@ -202,7 +207,8 @@ impl CrdsValue {
CrdsData::ContactInfo(contact_info) => contact_info.wallclock,
CrdsData::Vote(_, vote) => vote.wallclock,
CrdsData::EpochSlots(_, vote) => vote.wallclock,
CrdsData::SnapshotHash(hash) => hash.wallclock,
CrdsData::SnapshotHashes(hash) => hash.wallclock,
CrdsData::AccountsHashes(hash) => hash.wallclock,
}
}
pub fn pubkey(&self) -> Pubkey {
@@ -210,7 +216,8 @@ impl CrdsValue {
CrdsData::ContactInfo(contact_info) => contact_info.id,
CrdsData::Vote(_, vote) => vote.from,
CrdsData::EpochSlots(_, slots) => slots.from,
CrdsData::SnapshotHash(hash) => hash.from,
CrdsData::SnapshotHashes(hash) => hash.from,
CrdsData::AccountsHashes(hash) => hash.from,
}
}
pub fn label(&self) -> CrdsValueLabel {
@@ -218,7 +225,8 @@ impl CrdsValue {
CrdsData::ContactInfo(_) => CrdsValueLabel::ContactInfo(self.pubkey()),
CrdsData::Vote(ix, _) => CrdsValueLabel::Vote(*ix, self.pubkey()),
CrdsData::EpochSlots(_, _) => CrdsValueLabel::EpochSlots(self.pubkey()),
CrdsData::SnapshotHash(_) => CrdsValueLabel::SnapshotHash(self.pubkey()),
CrdsData::SnapshotHashes(_) => CrdsValueLabel::SnapshotHashes(self.pubkey()),
CrdsData::AccountsHashes(_) => CrdsValueLabel::AccountsHashes(self.pubkey()),
}
}
pub fn contact_info(&self) -> Option<&ContactInfo> {
@@ -250,7 +258,14 @@ impl CrdsValue {
pub fn snapshot_hash(&self) -> Option<&SnapshotHash> {
match &self.data {
CrdsData::SnapshotHash(slots) => Some(slots),
CrdsData::SnapshotHashes(slots) => Some(slots),
_ => None,
}
}
pub fn accounts_hash(&self) -> Option<&SnapshotHash> {
match &self.data {
CrdsData::AccountsHashes(slots) => Some(slots),
_ => None,
}
}
@@ -260,7 +275,8 @@ impl CrdsValue {
let mut labels = vec![
CrdsValueLabel::ContactInfo(*key),
CrdsValueLabel::EpochSlots(*key),
CrdsValueLabel::SnapshotHash(*key),
CrdsValueLabel::SnapshotHashes(*key),
CrdsValueLabel::AccountsHashes(*key),
];
labels.extend((0..MAX_VOTES).map(|ix| CrdsValueLabel::Vote(ix, *key)));
labels
@@ -310,14 +326,15 @@ mod test {
#[test]
fn test_labels() {
let mut hits = [false; 3 + MAX_VOTES as usize];
let mut hits = [false; 4 + MAX_VOTES as usize];
// this method should cover all the possible labels
for v in &CrdsValue::record_labels(&Pubkey::default()) {
match v {
CrdsValueLabel::ContactInfo(_) => hits[0] = true,
CrdsValueLabel::EpochSlots(_) => hits[1] = true,
CrdsValueLabel::SnapshotHash(_) => hits[2] = true,
CrdsValueLabel::Vote(ix, _) => hits[*ix as usize + 3] = true,
CrdsValueLabel::SnapshotHashes(_) => hits[2] = true,
CrdsValueLabel::AccountsHashes(_) => hits[3] = true,
CrdsValueLabel::Vote(ix, _) => hits[*ix as usize + 4] = true,
}
}
assert!(hits.iter().all(|x| *x));

View File

@@ -1,6 +1,8 @@
//! The `ledger_cleanup_service` drops older ledger data to limit disk space usage
use solana_ledger::blockstore::Blockstore;
use solana_ledger::blockstore_db::Result as BlockstoreResult;
use solana_measure::measure::Measure;
use solana_metrics::datapoint_debug;
use solana_sdk::clock::Slot;
use std::string::ToString;
@@ -11,13 +13,25 @@ use std::thread;
use std::thread::{Builder, JoinHandle};
use std::time::Duration;
// - To try and keep the RocksDB size under 400GB:
// Seeing about 1600b/shred, using 2000b/shred for margin, so 200m shreds can be stored in 400gb.
// at 5k shreds/slot at 50k tps, this is 500k slots (~5 hours).
// At idle, 60 shreds/slot this is about 4m slots (18 days)
// This is chosen to allow enough time for
// - To try and keep the RocksDB size under 512GB at 50k tps (100 slots take ~2GB).
// - A validator to download a snapshot from a peer and boot from it
// - To make sure that if a validator needs to reboot from its own snapshot, it has enough slots locally
// to catch back up to where it was when it stopped
pub const DEFAULT_MAX_LEDGER_SLOTS: u64 = 270_000;
// Remove a fixed number of slots at a time, it's more efficient than doing it one-by-one
pub const DEFAULT_MAX_LEDGER_SHREDS: u64 = 200_000_000;
// Allow down to 50m, or 3.5 days at idle, 1hr at 50k load, around ~100GB
pub const DEFAULT_MIN_MAX_LEDGER_SHREDS: u64 = 50_000_000;
// Check for removing slots at this interval so we don't purge too often
// and starve other blockstore users.
pub const DEFAULT_PURGE_SLOT_INTERVAL: u64 = 512;
// Remove a limited number of slots at a time, so the operation
// does not take too long and block other blockstore users.
pub const DEFAULT_PURGE_BATCH_SIZE: u64 = 256;
pub struct LedgerCleanupService {
@@ -36,7 +50,7 @@ impl LedgerCleanupService {
max_ledger_slots
);
let exit = exit.clone();
let mut next_purge_batch = max_ledger_slots;
let mut last_purge_slot = 0;
let t_cleanup = Builder::new()
.name("solana-ledger-cleanup".to_string())
.spawn(move || loop {
@@ -47,7 +61,8 @@ impl LedgerCleanupService {
&new_root_receiver,
&blockstore,
max_ledger_slots,
&mut next_purge_batch,
&mut last_purge_slot,
DEFAULT_PURGE_SLOT_INTERVAL,
) {
match e {
RecvTimeoutError::Disconnected => break,
@@ -59,45 +74,123 @@ impl LedgerCleanupService {
Self { t_cleanup }
}
fn find_slots_to_clean(
blockstore: &Arc<Blockstore>,
root: Slot,
max_ledger_shreds: u64,
) -> (u64, Slot, Slot) {
let mut shreds = Vec::new();
let mut iterate_time = Measure::start("iterate_time");
let mut total_shreds = 0;
let mut first_slot = 0;
for (i, (slot, meta)) in blockstore.slot_meta_iterator(0).unwrap().enumerate() {
if i == 0 {
first_slot = slot;
debug!("purge: searching from slot: {}", slot);
}
// Not exact since non-full slots will have holes
total_shreds += meta.received;
shreds.push((slot, meta.received));
if slot > root {
break;
}
}
iterate_time.stop();
info!(
"checking for ledger purge: max_shreds: {} slots: {} total_shreds: {} {}",
max_ledger_shreds,
shreds.len(),
total_shreds,
iterate_time
);
if (total_shreds as u64) < max_ledger_shreds {
return (0, 0, 0);
}
let mut cur_shreds = 0;
let mut lowest_slot_to_clean = shreds[0].0;
for (slot, num_shreds) in shreds.iter().rev() {
cur_shreds += *num_shreds as u64;
if cur_shreds > max_ledger_shreds {
lowest_slot_to_clean = *slot;
break;
}
}
(cur_shreds, lowest_slot_to_clean, first_slot)
}
fn cleanup_ledger(
new_root_receiver: &Receiver<Slot>,
blockstore: &Arc<Blockstore>,
max_ledger_slots: u64,
next_purge_batch: &mut u64,
max_ledger_shreds: u64,
last_purge_slot: &mut u64,
purge_interval: u64,
) -> Result<(), RecvTimeoutError> {
let disk_utilization_pre = blockstore.storage_size();
let root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
// Notify blockstore of impending purge
if root > *next_purge_batch {
//cleanup
let lowest_slot = root - max_ledger_slots;
*blockstore.lowest_cleanup_slot.write().unwrap() = lowest_slot;
blockstore.purge_slots(0, Some(lowest_slot));
*next_purge_batch += DEFAULT_PURGE_BATCH_SIZE;
let mut root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
// Get the newest root
while let Ok(new_root) = new_root_receiver.try_recv() {
root = new_root;
}
let disk_utilization_post = blockstore.storage_size();
if let (Ok(disk_utilization_pre), Ok(disk_utilization_post)) =
(disk_utilization_pre, disk_utilization_post)
{
datapoint_debug!(
"ledger_disk_utilization",
("disk_utilization_pre", disk_utilization_pre as i64, i64),
("disk_utilization_post", disk_utilization_post as i64, i64),
(
"disk_utilization_delta",
(disk_utilization_pre as i64 - disk_utilization_post as i64),
i64
)
if root - *last_purge_slot > purge_interval {
let disk_utilization_pre = blockstore.storage_size();
info!(
"purge: new root: {} last_purge: {} purge_interval: {} disk: {:?}",
root, last_purge_slot, purge_interval, disk_utilization_pre
);
*last_purge_slot = root;
let (num_shreds_to_clean, lowest_slot_to_clean, mut first_slot) =
Self::find_slots_to_clean(blockstore, root, max_ledger_shreds);
if num_shreds_to_clean > 0 {
debug!(
"cleaning up to: {} shreds: {} first: {}",
lowest_slot_to_clean, num_shreds_to_clean, first_slot
);
loop {
let current_lowest =
std::cmp::min(lowest_slot_to_clean, first_slot + DEFAULT_PURGE_BATCH_SIZE);
let mut slot_update_time = Measure::start("slot_update");
*blockstore.lowest_cleanup_slot.write().unwrap() = current_lowest;
slot_update_time.stop();
let mut clean_time = Measure::start("ledger_clean");
blockstore.purge_slots(first_slot, Some(current_lowest));
clean_time.stop();
debug!(
"ledger purge {} -> {}: {} {}",
first_slot, current_lowest, slot_update_time, clean_time
);
first_slot += DEFAULT_PURGE_BATCH_SIZE;
if current_lowest == lowest_slot_to_clean {
break;
}
thread::sleep(Duration::from_millis(500));
}
}
let disk_utilization_post = blockstore.storage_size();
Self::report_disk_metrics(disk_utilization_pre, disk_utilization_post);
}
Ok(())
}
fn report_disk_metrics(pre: BlockstoreResult<u64>, post: BlockstoreResult<u64>) {
if let (Ok(pre), Ok(post)) = (pre, post) {
datapoint_debug!(
"ledger_disk_utilization",
("disk_utilization_pre", pre as i64, i64),
("disk_utilization_post", post as i64, i64),
("disk_utilization_delta", (pre as i64 - post as i64), i64)
);
}
}
pub fn join(self) -> thread::Result<()> {
self.t_cleanup.join()
}
@@ -111,6 +204,7 @@ mod tests {
#[test]
fn test_cleanup() {
solana_logger::setup();
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let (shreds, _) = make_many_slot_entries(0, 50, 5);
@@ -118,10 +212,10 @@ mod tests {
let blockstore = Arc::new(blockstore);
let (sender, receiver) = channel();
//send a signal to kill slots 0-40
let mut next_purge_slot = 0;
//send a signal to kill all but 5 shreds, which will be in the newest slots
let mut last_purge_slot = 0;
sender.send(50).unwrap();
LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 10, &mut next_purge_slot)
LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 5, &mut last_purge_slot, 10)
.unwrap();
//check that 0-40 don't exist
@@ -134,6 +228,62 @@ mod tests {
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_cleanup_speed() {
solana_logger::setup();
let blockstore_path = get_tmp_ledger_path!();
let mut blockstore = Blockstore::open(&blockstore_path).unwrap();
blockstore.set_no_compaction(true);
let blockstore = Arc::new(blockstore);
let (sender, receiver) = channel();
let mut first_insert = Measure::start("first_insert");
let initial_slots = 50;
let initial_entries = 5;
let (shreds, _) = make_many_slot_entries(0, initial_slots, initial_entries);
blockstore.insert_shreds(shreds, None, false).unwrap();
first_insert.stop();
info!("{}", first_insert);
let mut last_purge_slot = 0;
let mut slot = initial_slots;
let mut num_slots = 6;
for _ in 0..5 {
let mut insert_time = Measure::start("insert time");
let batch_size = 2;
let batches = num_slots / batch_size;
for i in 0..batches {
let (shreds, _) = make_many_slot_entries(slot + i * batch_size, batch_size, 5);
blockstore.insert_shreds(shreds, None, false).unwrap();
if i % 100 == 0 {
info!("inserting..{} of {}", i, batches);
}
}
insert_time.stop();
let mut time = Measure::start("purge time");
sender.send(slot + num_slots).unwrap();
LedgerCleanupService::cleanup_ledger(
&receiver,
&blockstore,
initial_slots,
&mut last_purge_slot,
10,
)
.unwrap();
time.stop();
info!(
"slot: {} size: {} {} {}",
slot, num_slots, insert_time, time
);
slot += num_slots;
num_slots *= 2;
}
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_compaction() {
let blockstore_path = get_tmp_ledger_path!();
@@ -142,7 +292,7 @@ mod tests {
let n = 10_000;
let batch_size = 100;
let batches = n / batch_size;
let max_ledger_slots = 100;
let max_ledger_shreds = 100;
for i in 0..batches {
let (shreds, _) = make_many_slot_entries(i * batch_size, batch_size, 1);
@@ -158,8 +308,9 @@ mod tests {
LedgerCleanupService::cleanup_ledger(
&receiver,
&blockstore,
max_ledger_slots,
max_ledger_shreds,
&mut next_purge_batch,
10,
)
.unwrap();
@@ -170,7 +321,7 @@ mod tests {
assert!(u2 < u1, "insufficient compaction! pre={},post={}", u1, u2,);
// check that early slots don't exist
let max_slot = n - max_ledger_slots;
let max_slot = n - max_ledger_shreds - 1;
blockstore
.slot_meta_iterator(0)
.unwrap()

View File

@@ -5,6 +5,7 @@
//! command-line tools to spin up validators and a Rust library
//!
pub mod accounts_hash_verifier;
pub mod banking_stage;
pub mod broadcast_stage;
pub mod cluster_info_vote_listener;

View File

@@ -213,49 +213,4 @@ mod tests {
assert_eq!(packets[i].meta.addr(), saddr2);
}
}
#[cfg(target_os = "linux")]
#[test]
pub fn test_recv_mmsg_batch_size() {
let reader = UdpSocket::bind("127.0.0.1:0").expect("bind");
let addr = reader.local_addr().unwrap();
let sender = UdpSocket::bind("127.0.0.1:0").expect("bind");
const TEST_BATCH_SIZE: usize = 64;
let sent = TEST_BATCH_SIZE;
let mut elapsed_in_max_batch = 0;
(0..1000).for_each(|_| {
for _ in 0..sent {
let data = [0; PACKET_DATA_SIZE];
sender.send_to(&data[..], &addr).unwrap();
}
let mut packets = vec![Packet::default(); TEST_BATCH_SIZE];
let now = Instant::now();
let recv = recv_mmsg(&reader, &mut packets[..]).unwrap().1;
elapsed_in_max_batch += now.elapsed().as_nanos();
assert_eq!(TEST_BATCH_SIZE, recv);
});
let mut elapsed_in_small_batch = 0;
(0..1000).for_each(|_| {
for _ in 0..sent {
let data = [0; PACKET_DATA_SIZE];
sender.send_to(&data[..], &addr).unwrap();
}
let mut packets = vec![Packet::default(); 4];
let mut recv = 0;
let now = Instant::now();
while let Ok(num) = recv_mmsg(&reader, &mut packets[..]) {
recv += num.1;
if recv >= TEST_BATCH_SIZE {
break;
}
}
elapsed_in_small_batch += now.elapsed().as_nanos();
assert_eq!(TEST_BATCH_SIZE, recv);
});
assert!(elapsed_in_max_batch <= elapsed_in_small_batch);
}
}

View File

@@ -11,6 +11,7 @@ use crate::{
};
use solana_ledger::{
bank_forks::BankForks,
block_error::BlockError,
blockstore::Blockstore,
blockstore_processor::{
self, BlockstoreProcessorError, ConfirmationProgress, ConfirmationTiming,
@@ -25,6 +26,7 @@ use solana_metrics::inc_new_counter_info;
use solana_runtime::bank::Bank;
use solana_sdk::{
clock::Slot,
genesis_config::GenesisConfig,
hash::Hash,
pubkey::Pubkey,
signature::{Keypair, Signer},
@@ -75,10 +77,11 @@ pub struct ReplayStageConfig {
pub leader_schedule_cache: Arc<LeaderScheduleCache>,
pub slot_full_senders: Vec<Sender<(u64, Pubkey)>>,
pub latest_root_senders: Vec<Sender<Slot>>,
pub snapshot_package_sender: Option<SnapshotPackageSender>,
pub accounts_hash_sender: Option<SnapshotPackageSender>,
pub block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
pub transaction_status_sender: Option<TransactionStatusSender>,
pub rewards_recorder_sender: Option<RewardsRecorderSender>,
pub genesis_config: GenesisConfig,
}
pub struct ReplayStage {
@@ -178,10 +181,11 @@ impl ReplayStage {
leader_schedule_cache,
slot_full_senders,
latest_root_senders,
snapshot_package_sender,
accounts_hash_sender,
block_commitment_cache,
transaction_status_sender,
rewards_recorder_sender,
genesis_config,
} = config;
let (root_bank_sender, root_bank_receiver) = channel();
@@ -191,7 +195,7 @@ impl ReplayStage {
// Start the replay stage loop
let (lockouts_sender, commitment_service) =
AggregateCommitmentService::new(&exit, block_commitment_cache);
AggregateCommitmentService::new(&exit, block_commitment_cache.clone());
#[allow(clippy::cognitive_complexity)]
let t_replay = Builder::new()
@@ -245,6 +249,7 @@ impl ReplayStage {
&slot_full_senders,
transaction_status_sender.clone(),
&verify_recyclers,
&genesis_config,
);
datapoint_debug!(
"replay_stage-memory",
@@ -308,7 +313,10 @@ impl ReplayStage {
let start = allocated.get();
if !is_locked_out && vote_threshold {
info!("voting: {} {}", bank.slot(), fork_weight);
subscriptions.notify_subscribers(bank.slot(), &bank_forks);
subscriptions.notify_subscribers(
block_commitment_cache.read().unwrap().slot(),
&bank_forks,
);
if let Some(votable_leader) =
leader_schedule_cache.slot_leader_at(bank.slot(), Some(&bank))
{
@@ -333,8 +341,9 @@ impl ReplayStage {
&root_bank_sender,
total_staked,
&lockouts_sender,
&snapshot_package_sender,
&accounts_hash_sender,
&latest_root_senders,
&subscriptions,
)?;
}
datapoint_debug!(
@@ -395,7 +404,8 @@ impl ReplayStage {
rewards_recorder_sender.clone(),
);
if let Some(bank) = poh_recorder.lock().unwrap().bank() {
let poh_bank = poh_recorder.lock().unwrap().bank();
if let Some(bank) = poh_bank {
Self::log_leader_change(
&my_pubkey,
bank.slot(),
@@ -545,6 +555,7 @@ impl ReplayStage {
bank_progress: &mut ForkProgress,
transaction_status_sender: Option<TransactionStatusSender>,
verify_recyclers: &VerifyRecyclers,
genesis_config: &GenesisConfig,
) -> result::Result<usize, BlockstoreProcessorError> {
let tx_count_before = bank_progress.replay_progress.num_txs;
let confirm_result = blockstore_processor::confirm_slot(
@@ -556,6 +567,7 @@ impl ReplayStage {
transaction_status_sender,
None,
verify_recyclers,
Some(genesis_config),
);
let tx_count_after = bank_progress.replay_progress.num_txs;
let tx_count = tx_count_after - tx_count_before;
@@ -566,11 +578,19 @@ impl ReplayStage {
// errors related to the slot being purged
let slot = bank.slot();
warn!("Fatal replay error in slot: {}, err: {:?}", slot, err);
datapoint_error!(
"replay-stage-mark_dead_slot",
("error", format!("error: {:?}", err), String),
("slot", slot, i64)
);
if let BlockstoreProcessorError::InvalidBlock(BlockError::InvalidTickCount) = err {
datapoint_info!(
"replay-stage-mark_dead_slot",
("error", format!("error: {:?}", err), String),
("slot", slot, i64)
);
} else {
datapoint_error!(
"replay-stage-mark_dead_slot",
("error", format!("error: {:?}", err), String),
("slot", slot, i64)
);
}
bank_progress.is_dead = true;
blockstore
.set_dead_slot(slot)
@@ -595,8 +615,9 @@ impl ReplayStage {
root_bank_sender: &Sender<Vec<Arc<Bank>>>,
total_staked: u64,
lockouts_sender: &Sender<CommitmentAggregationData>,
snapshot_package_sender: &Option<SnapshotPackageSender>,
accounts_hash_sender: &Option<SnapshotPackageSender>,
latest_root_senders: &[Sender<Slot>],
subscriptions: &Arc<RpcSubscriptions>,
) -> Result<()> {
if bank.is_empty() {
inc_new_counter_info!("replay_stage-voted_empty_bank", 1);
@@ -622,7 +643,8 @@ impl ReplayStage {
blockstore
.set_roots(&rooted_slots)
.expect("Ledger set roots failed");
Self::handle_new_root(new_root, &bank_forks, progress, snapshot_package_sender);
Self::handle_new_root(new_root, &bank_forks, progress, accounts_hash_sender);
subscriptions.notify_roots(rooted_slots);
latest_root_senders.iter().for_each(|s| {
if let Err(e) = s.send(new_root) {
trace!("latest root send failed: {:?}", e);
@@ -634,7 +656,13 @@ impl ReplayStage {
return Err(e.into());
}
}
Self::update_commitment_cache(bank.clone(), total_staked, lockouts_sender);
Self::update_commitment_cache(
bank.clone(),
bank_forks.read().unwrap().root(),
total_staked,
lockouts_sender,
);
if let Some(ref voting_keypair) = voting_keypair {
let node_keypair = cluster_info.read().unwrap().keypair.clone();
@@ -662,10 +690,13 @@ impl ReplayStage {
fn update_commitment_cache(
bank: Arc<Bank>,
root: Slot,
total_staked: u64,
lockouts_sender: &Sender<CommitmentAggregationData>,
) {
if let Err(e) = lockouts_sender.send(CommitmentAggregationData::new(bank, total_staked)) {
if let Err(e) =
lockouts_sender.send(CommitmentAggregationData::new(bank, root, total_staked))
{
trace!("lockouts_sender failed: {:?}", e);
}
}
@@ -712,6 +743,7 @@ impl ReplayStage {
slot_full_senders: &[Sender<(u64, Pubkey)>],
transaction_status_sender: Option<TransactionStatusSender>,
verify_recyclers: &VerifyRecyclers,
genesis_config: &GenesisConfig,
) -> bool {
let mut did_complete_bank = false;
let mut tx_count = 0;
@@ -740,6 +772,7 @@ impl ReplayStage {
bank_progress,
transaction_status_sender.clone(),
verify_recyclers,
genesis_config,
);
match replay_result {
Ok(replay_tx_count) => tx_count += replay_tx_count,
@@ -949,12 +982,12 @@ impl ReplayStage {
new_root: u64,
bank_forks: &RwLock<BankForks>,
progress: &mut HashMap<u64, ForkProgress>,
snapshot_package_sender: &Option<SnapshotPackageSender>,
accounts_hash_sender: &Option<SnapshotPackageSender>,
) {
bank_forks
.write()
.unwrap()
.set_root(new_root, snapshot_package_sender);
.set_root(new_root, accounts_hash_sender);
let r_bank_forks = bank_forks.read().unwrap();
progress.retain(|k, _| r_bank_forks.get(*k).is_some());
}
@@ -1056,9 +1089,7 @@ pub(crate) mod tests {
transaction_status_service::TransactionStatusService,
};
use crossbeam_channel::unbounded;
use solana_client::rpc_response::{RpcEncodedTransaction, RpcTransactionWithStatusMeta};
use solana_ledger::{
block_error::BlockError,
blockstore::make_slot_entries,
blockstore::{entries_to_test_shreds, BlockstoreError},
create_new_tmp_ledger,
@@ -1081,6 +1112,7 @@ pub(crate) mod tests {
transaction::TransactionError,
};
use solana_stake_program::stake_state;
use solana_transaction_status::{EncodedTransaction, TransactionWithStatusMeta};
use solana_vote_program::{
vote_state::{self, Vote, VoteState, VoteStateVersions},
vote_transaction,
@@ -1385,7 +1417,10 @@ pub(crate) mod tests {
let bank0 = Bank::new(&genesis_config);
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank0));
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = Arc::new(RpcSubscriptions::new(&exit));
let subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(BlockCommitmentCache::default())),
));
let bank_forks = BankForks::new(0, bank0);
bank_forks.working_bank().freeze();
@@ -1682,6 +1717,7 @@ pub(crate) mod tests {
&mut bank0_progress,
None,
&VerifyRecyclers::default(),
&genesis_config,
);
// Check that the erroring bank was marked as dead in the progress map
@@ -1754,7 +1790,12 @@ pub(crate) mod tests {
bank_forks.write().unwrap().insert(bank1);
let arc_bank1 = bank_forks.read().unwrap().get(1).unwrap().clone();
leader_vote(&arc_bank1, &leader_voting_pubkey);
ReplayStage::update_commitment_cache(arc_bank1.clone(), leader_lamports, &lockouts_sender);
ReplayStage::update_commitment_cache(
arc_bank1.clone(),
0,
leader_lamports,
&lockouts_sender,
);
let bank2 = Bank::new_from_parent(&arc_bank1, &Pubkey::default(), arc_bank1.slot() + 1);
let _res = bank2.transfer(10, &genesis_config_info.mint_keypair, &Pubkey::new_rand());
@@ -1765,7 +1806,12 @@ pub(crate) mod tests {
bank_forks.write().unwrap().insert(bank2);
let arc_bank2 = bank_forks.read().unwrap().get(2).unwrap().clone();
leader_vote(&arc_bank2, &leader_voting_pubkey);
ReplayStage::update_commitment_cache(arc_bank2.clone(), leader_lamports, &lockouts_sender);
ReplayStage::update_commitment_cache(
arc_bank2.clone(),
0,
leader_lamports,
&lockouts_sender,
);
thread::sleep(Duration::from_millis(200));
let mut expected0 = BlockCommitment::default();
@@ -1890,15 +1936,25 @@ pub(crate) mod tests {
let confirmed_block = blockstore.get_confirmed_block(slot, None).unwrap();
assert_eq!(confirmed_block.transactions.len(), 3);
for RpcTransactionWithStatusMeta { transaction, meta } in
for TransactionWithStatusMeta { transaction, meta } in
confirmed_block.transactions.into_iter()
{
if let RpcEncodedTransaction::Json(transaction) = transaction {
if let EncodedTransaction::Json(transaction) = transaction {
if transaction.signatures[0] == signatures[0].to_string() {
assert_eq!(meta.unwrap().status, Ok(()));
let meta = meta.unwrap();
assert_eq!(meta.err, None);
assert_eq!(meta.status, Ok(()));
} else if transaction.signatures[0] == signatures[1].to_string() {
let meta = meta.unwrap();
assert_eq!(
meta.unwrap().status,
meta.err,
Some(TransactionError::InstructionError(
0,
InstructionError::CustomError(1)
))
);
assert_eq!(
meta.status,
Err(TransactionError::InstructionError(
0,
InstructionError::CustomError(1)

View File

@@ -1,7 +1,7 @@
use crossbeam_channel::{Receiver, RecvTimeoutError, Sender};
use solana_client::rpc_response::RpcReward;
use solana_ledger::blockstore::Blockstore;
use solana_sdk::{clock::Slot, pubkey::Pubkey};
use solana_transaction_status::Reward;
use std::{
sync::{
atomic::{AtomicBool, Ordering},
@@ -49,7 +49,7 @@ impl RewardsRecorderService {
let (slot, rewards) = rewards_receiver.recv_timeout(Duration::from_secs(1))?;
let rpc_rewards = rewards
.into_iter()
.map(|(pubkey, lamports)| RpcReward {
.map(|(pubkey, lamports)| Reward {
pubkey: pubkey.to_string(),
lamports,
})

View File

@@ -1,22 +1,19 @@
//! The `rpc` module implements the Solana RPC interface.
use crate::{
cluster_info::ClusterInfo, commitment::BlockCommitmentCache, contact_info::ContactInfo,
packet::PACKET_DATA_SIZE, storage_stage::StorageState, validator::ValidatorExit,
cluster_info::ClusterInfo,
commitment::{BlockCommitmentArray, BlockCommitmentCache},
contact_info::ContactInfo,
packet::PACKET_DATA_SIZE,
storage_stage::StorageState,
validator::ValidatorExit,
};
use bincode::serialize;
use jsonrpc_core::{Error, Metadata, Result};
use jsonrpc_derive::rpc;
use solana_client::rpc_response::{
Response, RpcAccount, RpcBlockCommitment, RpcBlockhashFeeCalculator, RpcConfirmedBlock,
RpcContactInfo, RpcEpochInfo, RpcFeeRateGovernor, RpcIdentity, RpcKeyedAccount,
RpcLeaderSchedule, RpcResponseContext, RpcSignatureConfirmation, RpcStorageTurn,
RpcTransactionEncoding, RpcVersionInfo, RpcVoteAccountInfo, RpcVoteAccountStatus,
};
use solana_client::rpc_response::*;
use solana_faucet::faucet::request_airdrop_transaction;
use solana_ledger::{
bank_forks::BankForks, blockstore::Blockstore, rooted_slot_iterator::RootedSlotIterator,
};
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore};
use solana_runtime::bank::Bank;
use solana_sdk::{
clock::{Slot, UnixTimestamp},
@@ -29,15 +26,22 @@ use solana_sdk::{
timing::slot_duration_from_slots_per_year,
transaction::{self, Transaction},
};
use solana_transaction_status::{
ConfirmedBlock, ConfirmedTransaction, TransactionEncoding, TransactionStatus,
};
use solana_vote_program::vote_state::{VoteState, MAX_LOCKOUT_HISTORY};
use std::{
collections::HashMap,
net::{SocketAddr, UdpSocket},
str::FromStr,
sync::{Arc, RwLock},
thread::sleep,
time::{Duration, Instant},
};
const MAX_QUERY_ITEMS: usize = 256;
const MAX_SLOT_RANGE: u64 = 10_000;
type RpcResponse<T> = Result<Response<T>>;
fn new_response<T>(bank: &Bank, value: T) -> RpcResponse<T> {
@@ -48,11 +52,21 @@ fn new_response<T>(bank: &Bank, value: T) -> RpcResponse<T> {
#[derive(Debug, Default, Clone)]
pub struct JsonRpcConfig {
pub enable_validator_exit: bool,
pub enable_get_confirmed_block: bool,
pub enable_set_log_filter: bool,
pub enable_rpc_transaction_history: bool,
pub identity_pubkey: Pubkey,
pub faucet_addr: Option<SocketAddr>,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcSignatureStatusConfig {
pub search_transaction_history: Option<bool>,
// DEPRECATED
#[serde(flatten)]
pub commitment: Option<CommitmentConfig>,
}
#[derive(Clone)]
pub struct JsonRpcRequestProcessor {
bank_forks: Arc<RwLock<BankForks>>,
@@ -165,6 +179,18 @@ impl JsonRpcRequestProcessor {
)
}
fn get_fee_calculator_for_blockhash(
&self,
blockhash: &Hash,
) -> RpcResponse<Option<RpcFeeCalculator>> {
let bank = &*self.bank(None);
let fee_calculator = bank.get_fee_calculator(blockhash);
new_response(
bank,
fee_calculator.map(|fee_calculator| RpcFeeCalculator { fee_calculator }),
)
}
fn get_fee_rate_governor(&self) -> RpcResponse<RpcFeeRateGovernor> {
let bank = &*self.bank(None);
let fee_rate_governor = bank.get_fee_rate_governor();
@@ -185,16 +211,16 @@ impl JsonRpcRequestProcessor {
match signature {
Err(e) => Err(e),
Ok(sig) => {
let status = bank.get_signature_confirmation_status(&sig);
let status = bank.get_signature_status(&sig);
match status {
Some((_, result)) => new_response(bank, result.is_ok()),
Some(status) => new_response(bank, status.is_ok()),
None => new_response(bank, false),
}
}
}
}
fn get_block_commitment(&self, block: Slot) -> RpcBlockCommitment<[u64; MAX_LOCKOUT_HISTORY]> {
fn get_block_commitment(&self, block: Slot) -> RpcBlockCommitment<BlockCommitmentArray> {
let r_block_commitment = self.block_commitment_cache.read().unwrap();
RpcBlockCommitment {
commitment: r_block_commitment
@@ -204,19 +230,6 @@ impl JsonRpcRequestProcessor {
}
}
pub fn get_signature_confirmation_status(
&self,
signature: Signature,
commitment: Option<CommitmentConfig>,
) -> Option<RpcSignatureConfirmation> {
self.bank(commitment)
.get_signature_confirmation_status(&signature)
.map(|(confirmations, status)| RpcSignatureConfirmation {
confirmations,
status,
})
}
fn get_slot(&self, commitment: Option<CommitmentConfig>) -> Result<u64> {
Ok(self.bank(commitment).slot())
}
@@ -325,6 +338,13 @@ impl JsonRpcRequestProcessor {
Ok(pubkeys)
}
pub fn set_log_filter(&self, filter: String) -> Result<()> {
if self.config.enable_set_log_filter {
solana_logger::setup_with(&filter);
}
Ok(())
}
pub fn validator_exit(&self) -> Result<bool> {
if self.config.enable_validator_exit {
warn!("validator_exit request...");
@@ -341,9 +361,9 @@ impl JsonRpcRequestProcessor {
pub fn get_confirmed_block(
&self,
slot: Slot,
encoding: Option<RpcTransactionEncoding>,
) -> Result<Option<RpcConfirmedBlock>> {
if self.config.enable_get_confirmed_block {
encoding: Option<TransactionEncoding>,
) -> Result<Option<ConfirmedBlock>> {
if self.config.enable_rpc_transaction_history {
Ok(self.blockstore.get_confirmed_block(slot, encoding).ok())
} else {
Ok(None)
@@ -359,18 +379,12 @@ impl JsonRpcRequestProcessor {
if end_slot < start_slot {
return Ok(vec![]);
}
let start_slot = (start_slot..end_slot).find(|&slot| self.blockstore.is_root(slot));
if let Some(start_slot) = start_slot {
let mut slots: Vec<Slot> = RootedSlotIterator::new(start_slot, &self.blockstore)
.unwrap()
.map(|(slot, _)| slot)
.collect();
slots.retain(|&x| x <= end_slot);
Ok(slots)
} else {
Ok(vec![])
}
Ok(self
.blockstore
.rooted_slot_iterator(start_slot)
.map_err(|_| Error::internal_error())?
.filter(|&slot| slot <= end_slot)
.collect())
}
pub fn get_block_time(&self, slot: Slot) -> Result<Option<UnixTimestamp>> {
@@ -391,6 +405,143 @@ impl JsonRpcRequestProcessor {
.ok()
.unwrap_or(None))
}
pub fn get_signature_confirmation_status(
&self,
signature: Signature,
commitment: Option<CommitmentConfig>,
) -> Option<RpcSignatureConfirmation> {
self.get_transaction_status(signature, &self.bank(commitment))
.map(
|TransactionStatus {
status,
confirmations,
..
}| RpcSignatureConfirmation {
confirmations: confirmations.unwrap_or(MAX_LOCKOUT_HISTORY + 1),
status,
},
)
}
pub fn get_signature_status(
&self,
signature: Signature,
commitment: Option<CommitmentConfig>,
) -> Option<transaction::Result<()>> {
self.bank(commitment).get_signature_status(&signature)
}
pub fn get_signature_statuses(
&self,
signatures: Vec<Signature>,
config: Option<RpcSignatureStatusConfig>,
) -> RpcResponse<Vec<Option<TransactionStatus>>> {
let mut statuses: Vec<Option<TransactionStatus>> = vec![];
// DEPRECATED
let commitment = config
.clone()
.and_then(|x| x.commitment)
.or_else(|| Some(CommitmentConfig::recent()));
let search_transaction_history = config
.and_then(|x| x.search_transaction_history)
.unwrap_or(false);
let bank = self.bank(commitment);
for signature in signatures {
let status = if let Some(status) = self.get_transaction_status(signature, &bank) {
Some(status)
} else if self.config.enable_rpc_transaction_history && search_transaction_history {
self.blockstore
.get_transaction_status(signature)
.map_err(|_| Error::internal_error())?
.map(|(slot, status_meta)| {
let err = status_meta.status.clone().err();
TransactionStatus {
slot,
status: status_meta.status,
confirmations: None,
err,
}
})
} else {
None
};
statuses.push(status);
}
Ok(Response {
context: RpcResponseContext { slot: bank.slot() },
value: statuses,
})
}
fn get_transaction_status(
&self,
signature: Signature,
bank: &Arc<Bank>,
) -> Option<TransactionStatus> {
bank.get_signature_status_slot(&signature)
.map(|(slot, status)| {
let r_block_commitment_cache = self.block_commitment_cache.read().unwrap();
let confirmations = if r_block_commitment_cache.root() >= slot
&& r_block_commitment_cache.is_confirmed_rooted(slot)
{
None
} else {
r_block_commitment_cache
.get_confirmation_count(slot)
.or(Some(0))
};
let err = status.clone().err();
TransactionStatus {
slot,
status,
confirmations,
err,
}
})
}
pub fn get_confirmed_transaction(
&self,
signature: Signature,
encoding: Option<TransactionEncoding>,
) -> Result<Option<ConfirmedTransaction>> {
if self.config.enable_rpc_transaction_history {
Ok(self
.blockstore
.get_confirmed_transaction(signature, encoding)
.unwrap_or(None))
} else {
Ok(None)
}
}
pub fn get_confirmed_signatures_for_address(
&self,
pubkey: Pubkey,
start_slot: Slot,
end_slot: Slot,
) -> Result<Vec<Signature>> {
if self.config.enable_rpc_transaction_history {
Ok(self
.blockstore
.get_confirmed_signatures_for_address(pubkey, start_slot, end_slot)
.unwrap_or_else(|_| vec![]))
} else {
Ok(vec![])
}
}
pub fn get_first_available_block(&self) -> Result<Slot> {
Ok(self
.blockstore
.get_first_available_block()
.unwrap_or_default())
}
}
fn get_tpu_addr(cluster_info: &Arc<RwLock<ClusterInfo>>) -> Result<SocketAddr> {
@@ -418,6 +569,7 @@ impl Metadata for Meta {}
pub trait RpcSol {
type Metadata;
// DEPRECATED
#[rpc(meta, name = "confirmTransaction")]
fn confirm_transaction(
&self,
@@ -426,6 +578,24 @@ pub trait RpcSol {
commitment: Option<CommitmentConfig>,
) -> RpcResponse<bool>;
// DEPRECATED
#[rpc(meta, name = "getSignatureStatus")]
fn get_signature_status(
&self,
meta: Self::Metadata,
signature_str: String,
commitment: Option<CommitmentConfig>,
) -> Result<Option<transaction::Result<()>>>;
// DEPRECATED (used by Trust Wallet)
#[rpc(meta, name = "getSignatureConfirmation")]
fn get_signature_confirmation(
&self,
meta: Self::Metadata,
signature_str: String,
commitment: Option<CommitmentConfig>,
) -> Result<Option<RpcSignatureConfirmation>>;
#[rpc(meta, name = "getAccountInfo")]
fn get_account_info(
&self,
@@ -483,7 +653,7 @@ pub trait RpcSol {
&self,
meta: Self::Metadata,
block: Slot,
) -> Result<RpcBlockCommitment<[u64; MAX_LOCKOUT_HISTORY]>>;
) -> Result<RpcBlockCommitment<BlockCommitmentArray>>;
#[rpc(meta, name = "getGenesisHash")]
fn get_genesis_hash(&self, meta: Self::Metadata) -> Result<String>;
@@ -503,16 +673,23 @@ pub trait RpcSol {
commitment: Option<CommitmentConfig>,
) -> RpcResponse<RpcBlockhashFeeCalculator>;
#[rpc(meta, name = "getFeeCalculatorForBlockhash")]
fn get_fee_calculator_for_blockhash(
&self,
meta: Self::Metadata,
blockhash: String,
) -> RpcResponse<Option<RpcFeeCalculator>>;
#[rpc(meta, name = "getFeeRateGovernor")]
fn get_fee_rate_governor(&self, meta: Self::Metadata) -> RpcResponse<RpcFeeRateGovernor>;
#[rpc(meta, name = "getSignatureStatus")]
fn get_signature_status(
#[rpc(meta, name = "getSignatureStatuses")]
fn get_signature_statuses(
&self,
meta: Self::Metadata,
signature_str: String,
commitment: Option<CommitmentConfig>,
) -> Result<Option<transaction::Result<()>>>;
signature_strs: Vec<String>,
config: Option<RpcSignatureStatusConfig>,
) -> RpcResponse<Vec<Option<TransactionStatus>>>;
#[rpc(meta, name = "getSlot")]
fn get_slot(&self, meta: Self::Metadata, commitment: Option<CommitmentConfig>) -> Result<u64>;
@@ -579,22 +756,6 @@ pub trait RpcSol {
#[rpc(meta, name = "validatorExit")]
fn validator_exit(&self, meta: Self::Metadata) -> Result<bool>;
#[rpc(meta, name = "getNumBlocksSinceSignatureConfirmation")]
fn get_num_blocks_since_signature_confirmation(
&self,
meta: Self::Metadata,
signature_str: String,
commitment: Option<CommitmentConfig>,
) -> Result<Option<usize>>;
#[rpc(meta, name = "getSignatureConfirmation")]
fn get_signature_confirmation(
&self,
meta: Self::Metadata,
signature_str: String,
commitment: Option<CommitmentConfig>,
) -> Result<Option<RpcSignatureConfirmation>>;
#[rpc(meta, name = "getIdentity")]
fn get_identity(&self, meta: Self::Metadata) -> Result<RpcIdentity>;
@@ -609,8 +770,8 @@ pub trait RpcSol {
&self,
meta: Self::Metadata,
slot: Slot,
encoding: Option<RpcTransactionEncoding>,
) -> Result<Option<RpcConfirmedBlock>>;
encoding: Option<TransactionEncoding>,
) -> Result<Option<ConfirmedBlock>>;
#[rpc(meta, name = "getBlockTime")]
fn get_block_time(&self, meta: Self::Metadata, slot: Slot) -> Result<Option<UnixTimestamp>>;
@@ -622,6 +783,26 @@ pub trait RpcSol {
start_slot: Slot,
end_slot: Option<Slot>,
) -> Result<Vec<Slot>>;
#[rpc(meta, name = "getConfirmedTransaction")]
fn get_confirmed_transaction(
&self,
meta: Self::Metadata,
signature_str: String,
encoding: Option<TransactionEncoding>,
) -> Result<Option<ConfirmedTransaction>>;
#[rpc(meta, name = "getConfirmedSignaturesForAddress")]
fn get_confirmed_signatures_for_address(
&self,
meta: Self::Metadata,
pubkey_str: String,
start_slot: Slot,
end_slot: Slot,
) -> Result<Vec<String>>;
#[rpc(meta, name = "getFirstAvailableBlock")]
fn get_first_available_block(&self, meta: Self::Metadata) -> Result<Slot>;
}
pub struct RpcSolImpl;
@@ -779,7 +960,7 @@ impl RpcSol for RpcSolImpl {
&self,
meta: Self::Metadata,
block: Slot,
) -> Result<RpcBlockCommitment<[u64; MAX_LOCKOUT_HISTORY]>> {
) -> Result<RpcBlockCommitment<BlockCommitmentArray>> {
Ok(meta
.request_processor
.read()
@@ -831,6 +1012,20 @@ impl RpcSol for RpcSolImpl {
.get_recent_blockhash(commitment)
}
fn get_fee_calculator_for_blockhash(
&self,
meta: Self::Metadata,
blockhash: String,
) -> RpcResponse<Option<RpcFeeCalculator>> {
debug!("get_fee_calculator_for_blockhash rpc request received");
let blockhash =
Hash::from_str(&blockhash).map_err(|e| Error::invalid_params(format!("{:?}", e)))?;
meta.request_processor
.read()
.unwrap()
.get_fee_calculator_for_blockhash(&blockhash)
}
fn get_fee_rate_governor(&self, meta: Self::Metadata) -> RpcResponse<RpcFeeRateGovernor> {
debug!("get_fee_rate_governor rpc request received");
meta.request_processor
@@ -839,30 +1034,6 @@ impl RpcSol for RpcSolImpl {
.get_fee_rate_governor()
}
fn get_signature_status(
&self,
meta: Self::Metadata,
signature_str: String,
commitment: Option<CommitmentConfig>,
) -> Result<Option<transaction::Result<()>>> {
self.get_signature_confirmation(meta, signature_str, commitment)
.map(|res| res.map(|x| x.status))
}
fn get_slot(&self, meta: Self::Metadata, commitment: Option<CommitmentConfig>) -> Result<u64> {
meta.request_processor.read().unwrap().get_slot(commitment)
}
fn get_num_blocks_since_signature_confirmation(
&self,
meta: Self::Metadata,
signature_str: String,
commitment: Option<CommitmentConfig>,
) -> Result<Option<usize>> {
self.get_signature_confirmation(meta, signature_str, commitment)
.map(|res| res.map(|x| x.confirmations))
}
fn get_signature_confirmation(
&self,
meta: Self::Metadata,
@@ -881,6 +1052,46 @@ impl RpcSol for RpcSolImpl {
.get_signature_confirmation_status(signature, commitment))
}
fn get_signature_status(
&self,
meta: Self::Metadata,
signature_str: String,
commitment: Option<CommitmentConfig>,
) -> Result<Option<transaction::Result<()>>> {
let signature = verify_signature(&signature_str)?;
Ok(meta
.request_processor
.read()
.unwrap()
.get_signature_status(signature, commitment))
}
fn get_signature_statuses(
&self,
meta: Self::Metadata,
signature_strs: Vec<String>,
config: Option<RpcSignatureStatusConfig>,
) -> RpcResponse<Vec<Option<TransactionStatus>>> {
if signature_strs.len() > MAX_QUERY_ITEMS {
return Err(Error::invalid_params(format!(
"Too many inputs provided; max {}",
MAX_QUERY_ITEMS
)));
}
let mut signatures: Vec<Signature> = vec![];
for signature_str in signature_strs {
signatures.push(verify_signature(&signature_str)?);
}
meta.request_processor
.read()
.unwrap()
.get_signature_statuses(signatures, config)
}
fn get_slot(&self, meta: Self::Metadata, commitment: Option<CommitmentConfig>) -> Result<u64> {
meta.request_processor.read().unwrap().get_slot(commitment)
}
fn get_transaction_count(
&self,
meta: Self::Metadata,
@@ -967,7 +1178,10 @@ impl RpcSol for RpcSolImpl {
.request_processor
.read()
.unwrap()
.get_signature_confirmation_status(signature, commitment.clone())
.get_signature_statuses(vec![signature], None)?
.value[0]
.clone()
.filter(|result| result.satisfies_commitment(commitment.unwrap_or_default()))
.map(|x| x.status);
if signature_status == Some(Ok(())) {
@@ -1098,17 +1312,19 @@ impl RpcSol for RpcSolImpl {
})
}
fn set_log_filter(&self, _meta: Self::Metadata, filter: String) -> Result<()> {
solana_logger::setup_with(&filter);
Ok(())
fn set_log_filter(&self, meta: Self::Metadata, filter: String) -> Result<()> {
meta.request_processor
.read()
.unwrap()
.set_log_filter(filter)
}
fn get_confirmed_block(
&self,
meta: Self::Metadata,
slot: Slot,
encoding: Option<RpcTransactionEncoding>,
) -> Result<Option<RpcConfirmedBlock>> {
encoding: Option<TransactionEncoding>,
) -> Result<Option<ConfirmedBlock>> {
meta.request_processor
.read()
.unwrap()
@@ -1130,6 +1346,58 @@ impl RpcSol for RpcSolImpl {
fn get_block_time(&self, meta: Self::Metadata, slot: Slot) -> Result<Option<UnixTimestamp>> {
meta.request_processor.read().unwrap().get_block_time(slot)
}
fn get_confirmed_transaction(
&self,
meta: Self::Metadata,
signature_str: String,
encoding: Option<TransactionEncoding>,
) -> Result<Option<ConfirmedTransaction>> {
let signature = verify_signature(&signature_str)?;
meta.request_processor
.read()
.unwrap()
.get_confirmed_transaction(signature, encoding)
}
fn get_confirmed_signatures_for_address(
&self,
meta: Self::Metadata,
pubkey_str: String,
start_slot: Slot,
end_slot: Slot,
) -> Result<Vec<String>> {
let pubkey = verify_pubkey(pubkey_str)?;
if end_slot <= start_slot {
return Err(Error::invalid_params(format!(
"start_slot {} must be smaller than end_slot {}",
start_slot, end_slot
)));
}
if end_slot - start_slot > MAX_SLOT_RANGE {
return Err(Error::invalid_params(format!(
"Slot range too large; max {}",
MAX_SLOT_RANGE
)));
}
meta.request_processor
.read()
.unwrap()
.get_confirmed_signatures_for_address(pubkey, start_slot, end_slot)
.map(|signatures| {
signatures
.iter()
.map(|signature| signature.to_string())
.collect()
})
}
fn get_first_available_block(&self, meta: Self::Metadata) -> Result<Slot> {
meta.request_processor
.read()
.unwrap()
.get_first_available_block()
}
}
#[cfg(test)]
@@ -1143,7 +1411,6 @@ pub mod tests {
};
use bincode::deserialize;
use jsonrpc_core::{MetaIoHandler, Output, Response, Value};
use solana_client::rpc_response::{RpcEncodedTransaction, RpcTransactionWithStatusMeta};
use solana_ledger::{
blockstore::entries_to_test_shreds, blockstore_processor::fill_blockstore_slot_with_ticks,
entry::next_entry_mut, get_tmp_ledger_path,
@@ -1155,8 +1422,9 @@ pub mod tests {
rpc_port,
signature::{Keypair, Signer},
system_transaction,
transaction::TransactionError,
transaction::{self, TransactionError},
};
use solana_transaction_status::{EncodedTransaction, TransactionWithStatusMeta};
use solana_vote_program::{
vote_instruction,
vote_state::{Vote, VoteInit, MAX_LOCKOUT_HISTORY},
@@ -1194,18 +1462,6 @@ pub mod tests {
) -> RpcHandler {
let (bank_forks, alice, leader_vote_keypair) = new_bank_forks();
let bank = bank_forks.read().unwrap().working_bank();
let commitment_slot0 = BlockCommitment::new([8; MAX_LOCKOUT_HISTORY]);
let commitment_slot1 = BlockCommitment::new([9; MAX_LOCKOUT_HISTORY]);
let mut block_commitment: HashMap<u64, BlockCommitment> = HashMap::new();
block_commitment
.entry(0)
.or_insert(commitment_slot0.clone());
block_commitment
.entry(1)
.or_insert(commitment_slot1.clone());
let block_commitment_cache =
Arc::new(RwLock::new(BlockCommitmentCache::new(block_commitment, 42)));
let ledger_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&ledger_path).unwrap();
let blockstore = Arc::new(blockstore);
@@ -1221,6 +1477,24 @@ pub mod tests {
blockstore.clone(),
);
let mut commitment_slot0 = BlockCommitment::default();
commitment_slot0.increase_confirmation_stake(2, 9);
let mut commitment_slot1 = BlockCommitment::default();
commitment_slot1.increase_confirmation_stake(1, 9);
let mut block_commitment: HashMap<u64, BlockCommitment> = HashMap::new();
block_commitment
.entry(0)
.or_insert(commitment_slot0.clone());
block_commitment
.entry(1)
.or_insert(commitment_slot1.clone());
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::new(
block_commitment,
10,
bank.clone(),
0,
)));
// Add timestamp vote to blockstore
let vote = Vote {
slots: vec![1],
@@ -1284,7 +1558,7 @@ pub mod tests {
let request_processor = Arc::new(RwLock::new(JsonRpcRequestProcessor::new(
JsonRpcConfig {
enable_get_confirmed_block: true,
enable_rpc_transaction_history: true,
identity_pubkey: *pubkey,
..JsonRpcConfig::default()
},
@@ -1790,6 +2064,62 @@ pub mod tests {
assert_eq!(expected, result);
}
#[test]
fn test_rpc_get_signature_statuses() {
let bob_pubkey = Pubkey::new_rand();
let RpcHandler {
io,
meta,
blockhash,
alice,
confirmed_block_signatures,
..
} = start_rpc_handler_with_tx(&bob_pubkey);
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getSignatureStatuses","params":[["{}"]]}}"#,
confirmed_block_signatures[0]
);
let res = io.handle_request_sync(&req, meta.clone());
let expected_res: transaction::Result<()> = Ok(());
let json: Value = serde_json::from_str(&res.unwrap()).unwrap();
let result: Option<TransactionStatus> =
serde_json::from_value(json["result"]["value"][0].clone())
.expect("actual response deserialization");
let result = result.as_ref().unwrap();
assert_eq!(expected_res, result.status);
assert_eq!(Some(2), result.confirmations);
// Test getSignatureStatus request on unprocessed tx
let tx = system_transaction::transfer(&alice, &bob_pubkey, 10, blockhash);
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getSignatureStatuses","params":[["{}"]]}}"#,
tx.signatures[0]
);
let res = io.handle_request_sync(&req, meta.clone());
let json: Value = serde_json::from_str(&res.unwrap()).unwrap();
let result: Option<TransactionStatus> =
serde_json::from_value(json["result"]["value"][0].clone())
.expect("actual response deserialization");
assert!(result.is_none());
// Test getSignatureStatus request on a TransactionError
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getSignatureStatuses","params":[["{}"]]}}"#,
confirmed_block_signatures[1]
);
let res = io.handle_request_sync(&req, meta.clone());
let expected_res: transaction::Result<()> = Err(TransactionError::InstructionError(
0,
InstructionError::CustomError(1),
));
let json: Value = serde_json::from_str(&res.unwrap()).unwrap();
let result: Option<TransactionStatus> =
serde_json::from_value(json["result"]["value"][0].clone())
.expect("actual response deserialization");
assert_eq!(expected_res, result.as_ref().unwrap().status);
}
#[test]
fn test_rpc_get_recent_blockhash() {
let bob_pubkey = Pubkey::new_rand();
@@ -1821,6 +2151,54 @@ pub mod tests {
assert_eq!(expected, result);
}
#[test]
fn test_rpc_get_fee_calculator_for_blockhash() {
let bob_pubkey = Pubkey::new_rand();
let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey);
let (blockhash, fee_calculator) = bank.last_blockhash_with_fee_calculator();
let fee_calculator = RpcFeeCalculator { fee_calculator };
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getFeeCalculatorForBlockhash","params":["{:?}"]}}"#,
blockhash
);
let res = io.handle_request_sync(&req, meta.clone());
let expected = json!({
"jsonrpc": "2.0",
"result": {
"context":{"slot":0},
"value":fee_calculator,
},
"id": 1
});
let expected: Response =
serde_json::from_value(expected).expect("expected response deserialization");
let result: Response = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
assert_eq!(expected, result);
// Expired (non-existent) blockhash
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getFeeCalculatorForBlockhash","params":["{:?}"]}}"#,
Hash::default()
);
let res = io.handle_request_sync(&req, meta.clone());
let expected = json!({
"jsonrpc": "2.0",
"result": {
"context":{"slot":0},
"value":Value::Null,
},
"id": 1
});
let expected: Response =
serde_json::from_value(expected).expect("expected response deserialization");
let result: Response = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
assert_eq!(expected, result);
}
#[test]
fn test_rpc_get_fee_rate_governor() {
let bob_pubkey = Pubkey::new_rand();
@@ -2059,8 +2437,10 @@ pub mod tests {
fn test_rpc_processor_get_block_commitment() {
let exit = Arc::new(AtomicBool::new(false));
let validator_exit = create_validator_exit(&exit);
let commitment_slot0 = BlockCommitment::new([8; MAX_LOCKOUT_HISTORY]);
let commitment_slot1 = BlockCommitment::new([9; MAX_LOCKOUT_HISTORY]);
let bank_forks = new_bank_forks().0;
let commitment_slot0 = BlockCommitment::new([8; MAX_LOCKOUT_HISTORY + 1]);
let commitment_slot1 = BlockCommitment::new([9; MAX_LOCKOUT_HISTORY + 1]);
let mut block_commitment: HashMap<u64, BlockCommitment> = HashMap::new();
block_commitment
.entry(0)
@@ -2068,8 +2448,12 @@ pub mod tests {
block_commitment
.entry(1)
.or_insert(commitment_slot1.clone());
let block_commitment_cache =
Arc::new(RwLock::new(BlockCommitmentCache::new(block_commitment, 42)));
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::new(
block_commitment,
42,
bank_forks.read().unwrap().working_bank(),
0,
)));
let ledger_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&ledger_path).unwrap();
@@ -2077,7 +2461,7 @@ pub mod tests {
config.enable_validator_exit = true;
let request_processor = JsonRpcRequestProcessor::new(
config,
new_bank_forks().0,
bank_forks,
block_commitment_cache,
Arc::new(blockstore),
StorageState::default(),
@@ -2141,14 +2525,14 @@ pub mod tests {
.get_block_commitment(0)
.map(|block_commitment| block_commitment.commitment)
);
assert_eq!(total_stake, 42);
assert_eq!(total_stake, 10);
let req =
format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getBlockCommitment","params":[2]}}"#);
let res = io.handle_request_sync(&req, meta);
let result: Response = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
let commitment_response: RpcBlockCommitment<[u64; MAX_LOCKOUT_HISTORY]> =
let commitment_response: RpcBlockCommitment<BlockCommitmentArray> =
if let Response::Single(res) = result {
if let Output::Success(res) = res {
serde_json::from_value(res.result).unwrap()
@@ -2159,7 +2543,7 @@ pub mod tests {
panic!("Expected single response");
};
assert_eq!(commitment_response.commitment, None);
assert_eq!(commitment_response.total_stake, 42);
assert_eq!(commitment_response.total_stake, 10);
}
#[test]
@@ -2178,21 +2562,31 @@ pub mod tests {
let res = io.handle_request_sync(&req, meta.clone());
let result: Value = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
let confirmed_block: Option<RpcConfirmedBlock> =
let confirmed_block: Option<ConfirmedBlock> =
serde_json::from_value(result["result"].clone()).unwrap();
let confirmed_block = confirmed_block.unwrap();
assert_eq!(confirmed_block.transactions.len(), 3);
for RpcTransactionWithStatusMeta { transaction, meta } in
for TransactionWithStatusMeta { transaction, meta } in
confirmed_block.transactions.into_iter()
{
if let RpcEncodedTransaction::Json(transaction) = transaction {
if let EncodedTransaction::Json(transaction) = transaction {
if transaction.signatures[0] == confirmed_block_signatures[0].to_string() {
let meta = meta.unwrap();
assert_eq!(transaction.message.recent_blockhash, blockhash.to_string());
assert_eq!(meta.unwrap().status, Ok(()));
assert_eq!(meta.status, Ok(()));
assert_eq!(meta.err, None);
} else if transaction.signatures[0] == confirmed_block_signatures[1].to_string() {
let meta = meta.unwrap();
assert_eq!(
meta.unwrap().status,
meta.err,
Some(TransactionError::InstructionError(
0,
InstructionError::CustomError(1)
))
);
assert_eq!(
meta.status,
Err(TransactionError::InstructionError(
0,
InstructionError::CustomError(1)
@@ -2210,23 +2604,33 @@ pub mod tests {
let res = io.handle_request_sync(&req, meta);
let result: Value = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
let confirmed_block: Option<RpcConfirmedBlock> =
let confirmed_block: Option<ConfirmedBlock> =
serde_json::from_value(result["result"].clone()).unwrap();
let confirmed_block = confirmed_block.unwrap();
assert_eq!(confirmed_block.transactions.len(), 3);
for RpcTransactionWithStatusMeta { transaction, meta } in
for TransactionWithStatusMeta { transaction, meta } in
confirmed_block.transactions.into_iter()
{
if let RpcEncodedTransaction::Binary(transaction) = transaction {
if let EncodedTransaction::Binary(transaction) = transaction {
let decoded_transaction: Transaction =
deserialize(&bs58::decode(&transaction).into_vec().unwrap()).unwrap();
if decoded_transaction.signatures[0] == confirmed_block_signatures[0] {
let meta = meta.unwrap();
assert_eq!(decoded_transaction.message.recent_blockhash, blockhash);
assert_eq!(meta.unwrap().status, Ok(()));
assert_eq!(meta.status, Ok(()));
assert_eq!(meta.err, None);
} else if decoded_transaction.signatures[0] == confirmed_block_signatures[1] {
let meta = meta.unwrap();
assert_eq!(
meta.unwrap().status,
meta.err,
Some(TransactionError::InstructionError(
0,
InstructionError::CustomError(1)
))
);
assert_eq!(
meta.status,
Err(TransactionError::InstructionError(
0,
InstructionError::CustomError(1)

View File

@@ -4,8 +4,10 @@ use crate::rpc_subscriptions::{Confirmations, RpcSubscriptions, SlotInfo};
use jsonrpc_core::{Error, ErrorCode, Result};
use jsonrpc_derive::rpc;
use jsonrpc_pubsub::{typed::Subscriber, Session, SubscriptionId};
use solana_client::rpc_response::{RpcAccount, RpcKeyedAccount};
use solana_sdk::{pubkey::Pubkey, signature::Signature, transaction};
use solana_client::rpc_response::{
Response as RpcResponse, RpcAccount, RpcKeyedAccount, RpcSignatureResult,
};
use solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature};
use std::sync::{atomic, Arc};
// Suppress needless_return due to
@@ -26,7 +28,7 @@ pub trait RpcSolPubSub {
fn account_subscribe(
&self,
meta: Self::Metadata,
subscriber: Subscriber<RpcAccount>,
subscriber: Subscriber<RpcResponse<RpcAccount>>,
pubkey_str: String,
confirmations: Option<Confirmations>,
);
@@ -50,7 +52,7 @@ pub trait RpcSolPubSub {
fn program_subscribe(
&self,
meta: Self::Metadata,
subscriber: Subscriber<RpcKeyedAccount>,
subscriber: Subscriber<RpcResponse<RpcKeyedAccount>>,
pubkey_str: String,
confirmations: Option<Confirmations>,
);
@@ -74,7 +76,7 @@ pub trait RpcSolPubSub {
fn signature_subscribe(
&self,
meta: Self::Metadata,
subscriber: Subscriber<transaction::Result<()>>,
subscriber: Subscriber<RpcResponse<RpcSignatureResult>>,
signature_str: String,
confirmations: Option<Confirmations>,
);
@@ -102,6 +104,18 @@ pub trait RpcSolPubSub {
name = "slotUnsubscribe"
)]
fn slot_unsubscribe(&self, meta: Option<Self::Metadata>, id: SubscriptionId) -> Result<bool>;
// Get notification when a new root is set
#[pubsub(subscription = "rootNotification", subscribe, name = "rootSubscribe")]
fn root_subscribe(&self, meta: Self::Metadata, subscriber: Subscriber<Slot>);
// Unsubscribe from slot notification subscription.
#[pubsub(
subscription = "rootNotification",
unsubscribe,
name = "rootUnsubscribe"
)]
fn root_unsubscribe(&self, meta: Option<Self::Metadata>, id: SubscriptionId) -> Result<bool>;
}
#[derive(Default)]
@@ -133,7 +147,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
fn account_subscribe(
&self,
_meta: Self::Metadata,
subscriber: Subscriber<RpcAccount>,
subscriber: Subscriber<RpcResponse<RpcAccount>>,
pubkey_str: String,
confirmations: Option<Confirmations>,
) {
@@ -142,10 +156,12 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
let id = self.uid.fetch_add(1, atomic::Ordering::Relaxed);
let sub_id = SubscriptionId::Number(id as u64);
info!("account_subscribe: account={:?} id={:?}", pubkey, sub_id);
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
self.subscriptions
.add_account_subscription(&pubkey, confirmations, &sub_id, &sink)
self.subscriptions.add_account_subscription(
pubkey,
confirmations,
sub_id,
subscriber,
)
}
Err(e) => subscriber.reject(e).unwrap(),
}
@@ -171,7 +187,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
fn program_subscribe(
&self,
_meta: Self::Metadata,
subscriber: Subscriber<RpcKeyedAccount>,
subscriber: Subscriber<RpcResponse<RpcKeyedAccount>>,
pubkey_str: String,
confirmations: Option<Confirmations>,
) {
@@ -180,10 +196,12 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
let id = self.uid.fetch_add(1, atomic::Ordering::Relaxed);
let sub_id = SubscriptionId::Number(id as u64);
info!("program_subscribe: account={:?} id={:?}", pubkey, sub_id);
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
self.subscriptions
.add_program_subscription(&pubkey, confirmations, &sub_id, &sink)
self.subscriptions.add_program_subscription(
pubkey,
confirmations,
sub_id,
subscriber,
)
}
Err(e) => subscriber.reject(e).unwrap(),
}
@@ -209,7 +227,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
fn signature_subscribe(
&self,
_meta: Self::Metadata,
subscriber: Subscriber<transaction::Result<()>>,
subscriber: Subscriber<RpcResponse<RpcSignatureResult>>,
signature_str: String,
confirmations: Option<Confirmations>,
) {
@@ -222,13 +240,11 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
"signature_subscribe: signature={:?} id={:?}",
signature, sub_id
);
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
self.subscriptions.add_signature_subscription(
&signature,
signature,
confirmations,
&sub_id,
&sink,
sub_id,
subscriber,
);
}
Err(e) => subscriber.reject(e).unwrap(),
@@ -257,9 +273,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
let id = self.uid.fetch_add(1, atomic::Ordering::Relaxed);
let sub_id = SubscriptionId::Number(id as u64);
info!("slot_subscribe: id={:?}", sub_id);
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
self.subscriptions.add_slot_subscription(&sub_id, &sink);
self.subscriptions.add_slot_subscription(sub_id, subscriber);
}
fn slot_unsubscribe(&self, _meta: Option<Self::Metadata>, id: SubscriptionId) -> Result<bool> {
@@ -274,15 +288,40 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
})
}
}
fn root_subscribe(&self, _meta: Self::Metadata, subscriber: Subscriber<Slot>) {
info!("root_subscribe");
let id = self.uid.fetch_add(1, atomic::Ordering::Relaxed);
let sub_id = SubscriptionId::Number(id as u64);
info!("root_subscribe: id={:?}", sub_id);
self.subscriptions.add_root_subscription(sub_id, subscriber);
}
fn root_unsubscribe(&self, _meta: Option<Self::Metadata>, id: SubscriptionId) -> Result<bool> {
info!("root_unsubscribe");
if self.subscriptions.remove_root_subscription(&id) {
Ok(true)
} else {
Err(Error {
code: ErrorCode::InvalidParams,
message: "Invalid Request: Subscription id does not exist".into(),
data: None,
})
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use crate::rpc_subscriptions::tests::robust_poll_or_panic;
use crate::{
commitment::{BlockCommitment, BlockCommitmentCache},
genesis_utils::{create_genesis_config, GenesisConfigInfo},
rpc_subscriptions::tests::robust_poll_or_panic,
};
use jsonrpc_core::{futures::sync::mpsc, Response};
use jsonrpc_pubsub::{PubSubHandler, Session};
use serial_test_derive::serial;
use solana_budget_program::{self, budget_instruction};
use solana_ledger::bank_forks::BankForks;
use solana_runtime::bank::Bank;
@@ -292,7 +331,12 @@ mod tests {
system_program, system_transaction,
transaction::{self, Transaction},
};
use std::{sync::RwLock, thread::sleep, time::Duration};
use std::{
collections::HashMap,
sync::{atomic::AtomicBool, RwLock},
thread::sleep,
time::Duration,
};
fn process_transaction_and_notify(
bank_forks: &Arc<RwLock<BankForks>>,
@@ -314,6 +358,7 @@ mod tests {
}
#[test]
#[serial]
fn test_signature_subscribe() {
let GenesisConfigInfo {
genesis_config,
@@ -325,8 +370,13 @@ mod tests {
let bank = Bank::new(&genesis_config);
let blockhash = bank.last_blockhash();
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let rpc = RpcSolPubSubImpl::default();
let rpc = RpcSolPubSubImpl {
subscriptions: Arc::new(RpcSubscriptions::new(
&Arc::new(AtomicBool::new(false)),
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
)),
..RpcSolPubSubImpl::default()
};
// Test signature subscriptions
let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash);
@@ -338,18 +388,24 @@ mod tests {
process_transaction_and_notify(&bank_forks, &tx, &rpc.subscriptions).unwrap();
// Test signature confirmation notification
let response = robust_poll_or_panic(receiver);
let expected_res: Option<transaction::Result<()>> = Some(Ok(()));
let expected_res_str =
serde_json::to_string(&serde_json::to_value(expected_res).unwrap()).unwrap();
let expected = format!(
r#"{{"jsonrpc":"2.0","method":"signatureNotification","params":{{"result":{},"subscription":0}}}}"#,
expected_res_str
);
assert_eq!(expected, response);
let (response, _) = robust_poll_or_panic(receiver);
let expected_res = RpcSignatureResult { err: None };
let expected = json!({
"jsonrpc": "2.0",
"method": "signatureNotification",
"params": {
"result": {
"context": { "slot": 0 },
"value": expected_res,
},
"subscription": 0,
}
});
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
}
#[test]
#[serial]
fn test_signature_unsubscribe() {
let GenesisConfigInfo {
genesis_config,
@@ -398,6 +454,7 @@ mod tests {
}
#[test]
#[serial]
fn test_account_subscribe() {
let GenesisConfigInfo {
mut genesis_config,
@@ -419,7 +476,13 @@ mod tests {
let blockhash = bank.last_blockhash();
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let rpc = RpcSolPubSubImpl::default();
let rpc = RpcSolPubSubImpl {
subscriptions: Arc::new(RpcSubscriptions::new(
&Arc::new(AtomicBool::new(false)),
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
)),
..RpcSolPubSubImpl::default()
};
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification");
rpc.account_subscribe(
@@ -462,17 +525,20 @@ mod tests {
"method": "accountNotification",
"params": {
"result": {
"owner": budget_program_id.to_string(),
"lamports": 51,
"data": bs58::encode(expected_data).into_string(),
"executable": false,
"rentEpoch": 1,
"context": { "slot": 0 },
"value": {
"owner": budget_program_id.to_string(),
"lamports": 51,
"data": bs58::encode(expected_data).into_string(),
"executable": false,
"rentEpoch": 1,
},
},
"subscription": 0,
}
});
let response = robust_poll_or_panic(receiver);
let (response, _) = robust_poll_or_panic(receiver);
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
let tx = system_transaction::transfer(&alice, &witness.pubkey(), 1, blockhash);
@@ -499,6 +565,7 @@ mod tests {
}
#[test]
#[serial]
fn test_account_unsubscribe() {
let bob_pubkey = Pubkey::new_rand();
let session = create_session();
@@ -550,7 +617,13 @@ mod tests {
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let bob = Keypair::new();
let rpc = RpcSolPubSubImpl::default();
let mut rpc = RpcSolPubSubImpl::default();
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
);
rpc.subscriptions = Arc::new(subscriptions);
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification");
rpc.account_subscribe(session, subscriber, bob.pubkey().to_string(), Some(2));
@@ -581,7 +654,12 @@ mod tests {
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let bob = Keypair::new();
let rpc = RpcSolPubSubImpl::default();
let mut rpc = RpcSolPubSubImpl::default();
let exit = Arc::new(AtomicBool::new(false));
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests()));
let subscriptions = RpcSubscriptions::new(&exit, block_commitment_cache.clone());
rpc.subscriptions = Arc::new(subscriptions);
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification");
rpc.account_subscribe(session, subscriber, bob.pubkey().to_string(), Some(2));
@@ -599,30 +677,56 @@ mod tests {
let bank0 = bank_forks.read().unwrap()[0].clone();
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.write().unwrap().insert(bank1);
rpc.subscriptions.notify_subscribers(1, &bank_forks);
let bank1 = bank_forks.read().unwrap()[1].clone();
let mut cache0 = BlockCommitment::default();
cache0.increase_confirmation_stake(1, 10);
let mut block_commitment = HashMap::new();
block_commitment.entry(0).or_insert(cache0.clone());
let mut new_block_commitment =
BlockCommitmentCache::new(block_commitment, 10, bank1.clone(), 0);
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
drop(w_block_commitment_cache);
rpc.subscriptions.notify_subscribers(1, &bank_forks);
let bank2 = Bank::new_from_parent(&bank1, &Pubkey::default(), 2);
bank_forks.write().unwrap().insert(bank2);
let bank2 = bank_forks.read().unwrap()[2].clone();
let mut cache0 = BlockCommitment::default();
cache0.increase_confirmation_stake(2, 10);
let mut block_commitment = HashMap::new();
block_commitment.entry(0).or_insert(cache0.clone());
let mut new_block_commitment = BlockCommitmentCache::new(block_commitment, 10, bank2, 0);
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
drop(w_block_commitment_cache);
rpc.subscriptions.notify_subscribers(2, &bank_forks);
let expected = json!({
"jsonrpc": "2.0",
"method": "accountNotification",
"params": {
"result": {
"owner": system_program::id().to_string(),
"lamports": 100,
"data": "",
"executable": false,
"rentEpoch": 1,
"context": { "slot": 0 },
"value": {
"owner": system_program::id().to_string(),
"lamports": 100,
"data": "",
"executable": false,
"rentEpoch": 1,
},
},
"subscription": 0,
}
});
let response = robust_poll_or_panic(receiver);
let (response, _) = robust_poll_or_panic(receiver);
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
}
#[test]
#[serial]
fn test_slot_subscribe() {
let rpc = RpcSolPubSubImpl::default();
let session = create_session();
@@ -631,7 +735,7 @@ mod tests {
rpc.subscriptions.notify_slot(0, 0, 0);
// Test slot confirmation notification
let response = robust_poll_or_panic(receiver);
let (response, _) = robust_poll_or_panic(receiver);
let expected_res = SlotInfo {
parent: 0,
slot: 0,
@@ -647,13 +751,14 @@ mod tests {
}
#[test]
#[serial]
fn test_slot_unsubscribe() {
let rpc = RpcSolPubSubImpl::default();
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("slotNotification");
rpc.slot_subscribe(session, subscriber);
rpc.subscriptions.notify_slot(0, 0, 0);
let response = robust_poll_or_panic(receiver);
let (response, _) = robust_poll_or_panic(receiver);
let expected_res = SlotInfo {
parent: 0,
slot: 0,

View File

@@ -1,14 +1,20 @@
//! The `pubsub` module implements a threaded subscription service on client RPC request
use crate::rpc_pubsub::{RpcSolPubSub, RpcSolPubSubImpl};
use crate::rpc_subscriptions::RpcSubscriptions;
use crate::{
rpc_pubsub::{RpcSolPubSub, RpcSolPubSubImpl},
rpc_subscriptions::RpcSubscriptions,
};
use jsonrpc_pubsub::{PubSubHandler, Session};
use jsonrpc_ws_server::{RequestContext, ServerBuilder};
use std::net::SocketAddr;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread::{self, sleep, Builder, JoinHandle};
use std::time::Duration;
use std::{
net::SocketAddr,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
thread::{self, sleep, Builder, JoinHandle},
time::Duration,
};
pub struct PubSubService {
thread_hdl: JoinHandle<()>,
@@ -66,13 +72,20 @@ impl PubSubService {
#[cfg(test)]
mod tests {
use super::*;
use std::net::{IpAddr, Ipv4Addr};
use crate::commitment::BlockCommitmentCache;
use std::{
net::{IpAddr, Ipv4Addr},
sync::RwLock,
};
#[test]
fn test_pubsub_new() {
let pubsub_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = Arc::new(RpcSubscriptions::new(&exit));
let subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
));
let pubsub_service = PubSubService::new(&subscriptions, pubsub_addr, &exit);
let thread = pubsub_service.thread_hdl.thread();
assert_eq!(thread.name().unwrap(), "solana-pubsub");

View File

@@ -15,8 +15,9 @@ use solana_ledger::{
blockstore::Blockstore,
snapshot_utils,
};
use solana_sdk::hash::Hash;
use solana_sdk::{hash::Hash, pubkey::Pubkey};
use std::{
collections::HashSet,
net::SocketAddr,
path::{Path, PathBuf},
sync::{mpsc::channel, Arc, RwLock},
@@ -24,6 +25,10 @@ use std::{
};
use tokio::prelude::Future;
// If trusted validators are specified, consider this validator healthy if its latest account hash
// is no further behind than this distance from the latest trusted validator account hash
const HEALTH_CHECK_SLOT_DISTANCE: u64 = 150;
pub struct JsonRpcService {
thread_hdl: JoinHandle<()>,
@@ -37,15 +42,24 @@ struct RpcRequestMiddleware {
ledger_path: PathBuf,
snapshot_archive_path_regex: Regex,
snapshot_config: Option<SnapshotConfig>,
cluster_info: Arc<RwLock<ClusterInfo>>,
trusted_validators: Option<HashSet<Pubkey>>,
}
impl RpcRequestMiddleware {
pub fn new(ledger_path: PathBuf, snapshot_config: Option<SnapshotConfig>) -> Self {
pub fn new(
ledger_path: PathBuf,
snapshot_config: Option<SnapshotConfig>,
cluster_info: Arc<RwLock<ClusterInfo>>,
trusted_validators: Option<HashSet<Pubkey>>,
) -> Self {
Self {
ledger_path,
snapshot_archive_path_regex: Regex::new(r"/snapshot-\d+-[[:alnum:]]+\.tar\.bz2$")
.unwrap(),
snapshot_config,
cluster_info,
trusted_validators,
}
}
@@ -104,6 +118,58 @@ impl RpcRequestMiddleware {
),
}
}
fn health_check(&self) -> &'static str {
let response = if let Some(trusted_validators) = &self.trusted_validators {
let (latest_account_hash_slot, latest_trusted_validator_account_hash_slot) = {
let cluster_info = self.cluster_info.read().unwrap();
(
cluster_info
.get_accounts_hash_for_node(&cluster_info.id())
.map(|hashes| hashes.iter().max_by(|a, b| a.0.cmp(&b.0)))
.flatten()
.map(|slot_hash| slot_hash.0)
.unwrap_or(0),
trusted_validators
.iter()
.map(|trusted_validator| {
cluster_info
.get_accounts_hash_for_node(&trusted_validator)
.map(|hashes| hashes.iter().max_by(|a, b| a.0.cmp(&b.0)))
.flatten()
.map(|slot_hash| slot_hash.0)
.unwrap_or(0)
})
.max()
.unwrap_or(0),
)
};
// This validator is considered healthy if its latest account hash slot is within
// `HEALTH_CHECK_SLOT_DISTANCE` of the latest trusted validator's account hash slot
if latest_account_hash_slot > 0
&& latest_trusted_validator_account_hash_slot > 0
&& latest_account_hash_slot
> latest_trusted_validator_account_hash_slot
.saturating_sub(HEALTH_CHECK_SLOT_DISTANCE)
{
"ok"
} else {
warn!(
"health check: me={}, latest trusted_validator={}",
latest_account_hash_slot, latest_trusted_validator_account_hash_slot
);
"behind"
}
} else {
// No trusted validator point of reference available, so this validator is healthy
// because it's running
"ok"
};
info!("health check: {}", response);
response
}
}
impl RequestMiddleware for RpcRequestMiddleware {
@@ -138,6 +204,16 @@ impl RequestMiddleware for RpcRequestMiddleware {
}
if self.is_get_path(request.uri().path()) {
self.get(request.uri().path())
} else if request.uri().path() == "/health" {
RequestMiddlewareAction::Respond {
should_validate_hosts: true,
response: Box::new(jsonrpc_core::futures::future::ok(
hyper::Response::builder()
.status(hyper::StatusCode::OK)
.body(hyper::Body::from(self.health_check()))
.unwrap(),
)),
}
} else {
RequestMiddlewareAction::Proceed {
should_continue_on_invalid_cors: false,
@@ -161,6 +237,7 @@ impl JsonRpcService {
ledger_path: &Path,
storage_state: StorageState,
validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
trusted_validators: Option<HashSet<Pubkey>>,
) -> Self {
info!("rpc bound to {:?}", rpc_addr);
info!("rpc configuration: {:?}", config);
@@ -186,20 +263,35 @@ impl JsonRpcService {
let rpc = RpcSolImpl;
io.extend_with(rpc.to_delegate());
let server =
ServerBuilder::with_meta_extractor(io, move |_req: &hyper::Request<hyper::Body>| Meta {
let request_middleware = RpcRequestMiddleware::new(
ledger_path,
snapshot_config,
cluster_info.clone(),
trusted_validators,
);
let server = ServerBuilder::with_meta_extractor(
io,
move |_req: &hyper::Request<hyper::Body>| Meta {
request_processor: request_processor.clone(),
cluster_info: cluster_info.clone(),
genesis_hash
}).threads(4)
.cors(DomainsValidation::AllowOnly(vec![
AccessControlAllowOrigin::Any,
]))
.cors_max_age(86400)
.request_middleware(RpcRequestMiddleware::new(ledger_path, snapshot_config))
.start_http(&rpc_addr);
genesis_hash,
},
)
.threads(4)
.cors(DomainsValidation::AllowOnly(vec![
AccessControlAllowOrigin::Any,
]))
.cors_max_age(86400)
.request_middleware(request_middleware)
.start_http(&rpc_addr);
if let Err(e) = server {
warn!("JSON RPC service unavailable error: {:?}. \nAlso, check that port {} is not already in use by another application", e, rpc_addr.port());
warn!(
"JSON RPC service unavailable error: {:?}. \n\
Also, check that port {} is not already in use by another application",
e,
rpc_addr.port()
);
return;
}
@@ -240,6 +332,7 @@ mod tests {
use super::*;
use crate::{
contact_info::ContactInfo,
crds_value::{CrdsData, CrdsValue, SnapshotHash},
genesis_utils::{create_genesis_config, GenesisConfigInfo},
rpc::tests::create_validator_exit,
};
@@ -283,6 +376,7 @@ mod tests {
&PathBuf::from("farf"),
StorageState::default(),
validator_exit,
None,
);
let thread = rpc_service.thread_hdl.thread();
assert_eq!(thread.name().unwrap(), "solana-jsonrpc");
@@ -303,7 +397,11 @@ mod tests {
#[test]
fn test_is_get_path() {
let rrm = RpcRequestMiddleware::new(PathBuf::from("/"), None);
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
ContactInfo::default(),
)));
let rrm = RpcRequestMiddleware::new(PathBuf::from("/"), None, cluster_info.clone(), None);
let rrm_with_snapshot_config = RpcRequestMiddleware::new(
PathBuf::from("/"),
Some(SnapshotConfig {
@@ -311,6 +409,8 @@ mod tests {
snapshot_package_output_path: PathBuf::from("/"),
snapshot_path: PathBuf::from("/"),
}),
cluster_info,
None,
);
assert!(rrm.is_get_path("/genesis.tar.bz2"));
@@ -332,4 +432,95 @@ mod tests {
assert!(!rrm.is_get_path(".."));
assert!(!rrm.is_get_path("🎣"));
}
#[test]
fn test_health_check_with_no_trusted_validators() {
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
ContactInfo::default(),
)));
let rm = RpcRequestMiddleware::new(PathBuf::from("/"), None, cluster_info.clone(), None);
assert_eq!(rm.health_check(), "ok");
}
#[test]
fn test_health_check_with_trusted_validators() {
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
ContactInfo::default(),
)));
let trusted_validators = vec![Pubkey::new_rand(), Pubkey::new_rand(), Pubkey::new_rand()];
let rm = RpcRequestMiddleware::new(
PathBuf::from("/"),
None,
cluster_info.clone(),
Some(trusted_validators.clone().into_iter().collect()),
);
// No account hashes for this node or any trusted validators == "behind"
assert_eq!(rm.health_check(), "behind");
// No account hashes for any trusted validators == "behind"
{
let mut cluster_info = cluster_info.write().unwrap();
cluster_info
.push_accounts_hashes(vec![(1000, Hash::default()), (900, Hash::default())]);
}
assert_eq!(rm.health_check(), "behind");
// This node is ahead of the trusted validators == "ok"
{
let mut cluster_info = cluster_info.write().unwrap();
cluster_info
.gossip
.crds
.insert(
CrdsValue::new_unsigned(CrdsData::AccountsHashes(SnapshotHash::new(
trusted_validators[0].clone(),
vec![
(1, Hash::default()),
(1001, Hash::default()),
(2, Hash::default()),
],
))),
1,
)
.unwrap();
}
assert_eq!(rm.health_check(), "ok");
// Node is slightly behind the trusted validators == "ok"
{
let mut cluster_info = cluster_info.write().unwrap();
cluster_info
.gossip
.crds
.insert(
CrdsValue::new_unsigned(CrdsData::AccountsHashes(SnapshotHash::new(
trusted_validators[1].clone(),
vec![(1000 + HEALTH_CHECK_SLOT_DISTANCE - 1, Hash::default())],
))),
1,
)
.unwrap();
}
assert_eq!(rm.health_check(), "ok");
// Node is far behind the trusted validators == "behind"
{
let mut cluster_info = cluster_info.write().unwrap();
cluster_info
.gossip
.crds
.insert(
CrdsValue::new_unsigned(CrdsData::AccountsHashes(SnapshotHash::new(
trusted_validators[2].clone(),
vec![(1000 + HEALTH_CHECK_SLOT_DISTANCE, Hash::default())],
))),
1,
)
.unwrap();
}
assert_eq!(rm.health_check(), "behind");
}
}

View File

@@ -1,21 +1,30 @@
//! The `pubsub` module implements a threaded subscription service on client RPC request
use crate::commitment::BlockCommitmentCache;
use core::hash::Hash;
use jsonrpc_core::futures::Future;
use jsonrpc_pubsub::{typed::Sink, SubscriptionId};
use jsonrpc_pubsub::{
typed::{Sink, Subscriber},
SubscriptionId,
};
use serde::Serialize;
use solana_client::rpc_response::{RpcAccount, RpcKeyedAccount};
use solana_client::rpc_response::{
Response, RpcAccount, RpcKeyedAccount, RpcResponseContext, RpcSignatureResult,
};
use solana_ledger::bank_forks::BankForks;
use solana_runtime::bank::Bank;
use solana_sdk::{
account::Account, clock::Slot, pubkey::Pubkey, signature::Signature, transaction,
};
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{Receiver, RecvTimeoutError, SendError, Sender};
use std::sync::{
atomic::{AtomicBool, Ordering},
mpsc::{Receiver, RecvTimeoutError, SendError, Sender},
};
use std::thread::{Builder, JoinHandle};
use std::time::Duration;
use std::{
cmp::min,
collections::{HashMap, HashSet},
iter,
sync::{Arc, Mutex, RwLock},
@@ -35,12 +44,14 @@ pub struct SlotInfo {
enum NotificationEntry {
Slot(SlotInfo),
Root(Slot),
Bank((Slot, Arc<RwLock<BankForks>>)),
}
impl std::fmt::Debug for NotificationEntry {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
NotificationEntry::Root(root) => write!(f, "Root({})", root),
NotificationEntry::Slot(slot_info) => write!(f, "Slot({:?})", slot_info),
NotificationEntry::Bank((current_slot, _)) => {
write!(f, "Bank({{current_slot: {:?}}})", current_slot)
@@ -50,37 +61,39 @@ impl std::fmt::Debug for NotificationEntry {
}
type RpcAccountSubscriptions =
RwLock<HashMap<Pubkey, HashMap<SubscriptionId, (Sink<RpcAccount>, Confirmations)>>>;
type RpcProgramSubscriptions =
RwLock<HashMap<Pubkey, HashMap<SubscriptionId, (Sink<RpcKeyedAccount>, Confirmations)>>>;
RwLock<HashMap<Pubkey, HashMap<SubscriptionId, (Sink<Response<RpcAccount>>, Confirmations)>>>;
type RpcProgramSubscriptions = RwLock<
HashMap<Pubkey, HashMap<SubscriptionId, (Sink<Response<RpcKeyedAccount>>, Confirmations)>>,
>;
type RpcSignatureSubscriptions = RwLock<
HashMap<Signature, HashMap<SubscriptionId, (Sink<transaction::Result<()>>, Confirmations)>>,
HashMap<
Signature,
HashMap<SubscriptionId, (Sink<Response<RpcSignatureResult>>, Confirmations)>,
>,
>;
type RpcSlotSubscriptions = RwLock<HashMap<SubscriptionId, Sink<SlotInfo>>>;
type RpcRootSubscriptions = RwLock<HashMap<SubscriptionId, Sink<Slot>>>;
fn add_subscription<K, S>(
subscriptions: &mut HashMap<K, HashMap<SubscriptionId, (Sink<S>, Confirmations)>>,
hashmap_key: &K,
hashmap_key: K,
confirmations: Option<Confirmations>,
sub_id: &SubscriptionId,
sink: &Sink<S>,
sub_id: SubscriptionId,
subscriber: Subscriber<S>,
) where
K: Eq + Hash + Clone + Copy,
K: Eq + Hash,
S: Clone,
{
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
let confirmations = confirmations.unwrap_or(0);
let confirmations = if confirmations > MAX_LOCKOUT_HISTORY {
MAX_LOCKOUT_HISTORY
} else {
confirmations
};
if let Some(current_hashmap) = subscriptions.get_mut(hashmap_key) {
current_hashmap.insert(sub_id.clone(), (sink.clone(), confirmations));
let confirmations = min(confirmations, MAX_LOCKOUT_HISTORY + 1);
if let Some(current_hashmap) = subscriptions.get_mut(&hashmap_key) {
current_hashmap.insert(sub_id, (sink, confirmations));
return;
}
let mut hashmap = HashMap::new();
hashmap.insert(sub_id.clone(), (sink.clone(), confirmations));
subscriptions.insert(*hashmap_key, hashmap);
hashmap.insert(sub_id, (sink, confirmations));
subscriptions.insert(hashmap_key, hashmap);
}
fn remove_subscription<K, S>(
@@ -88,13 +101,13 @@ fn remove_subscription<K, S>(
sub_id: &SubscriptionId,
) -> bool
where
K: Eq + Hash + Clone + Copy,
K: Eq + Hash,
S: Clone,
{
let mut found = false;
subscriptions.retain(|_, v| {
v.retain(|k, _| {
let retain = *k != *sub_id;
let retain = k != sub_id;
if !retain {
found = true;
}
@@ -105,11 +118,12 @@ where
found
}
#[allow(clippy::type_complexity)]
fn check_confirmations_and_notify<K, S, B, F, X>(
subscriptions: &HashMap<K, HashMap<SubscriptionId, (Sink<S>, Confirmations)>>,
subscriptions: &HashMap<K, HashMap<SubscriptionId, (Sink<Response<S>>, Confirmations)>>,
hashmap_key: &K,
current_slot: Slot,
bank_forks: &Arc<RwLock<BankForks>>,
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
bank_method: B,
filter_results: F,
notifier: &RpcNotifier,
@@ -121,6 +135,10 @@ where
F: Fn(X, u64) -> Box<dyn Iterator<Item = S>>,
X: Clone + Serialize,
{
let mut confirmation_slots: HashMap<usize, Slot> = HashMap::new();
let r_block_commitment_cache = block_commitment_cache.read().unwrap();
let current_slot = r_block_commitment_cache.slot();
let root = r_block_commitment_cache.root();
let current_ancestors = bank_forks
.read()
.unwrap()
@@ -128,34 +146,38 @@ where
.unwrap()
.ancestors
.clone();
for (slot, _) in current_ancestors.iter() {
if let Some(confirmations) = r_block_commitment_cache.get_confirmation_count(*slot) {
confirmation_slots.entry(confirmations).or_insert(*slot);
}
}
drop(r_block_commitment_cache);
let mut notified_set: HashSet<SubscriptionId> = HashSet::new();
if let Some(hashmap) = subscriptions.get(hashmap_key) {
for (bank_sub_id, (sink, confirmations)) in hashmap.iter() {
let desired_slot: Vec<u64> = current_ancestors
.iter()
.filter(|(_, &v)| v == *confirmations)
.map(|(k, _)| k)
.cloned()
.collect();
let root: Vec<u64> = current_ancestors
.iter()
.filter(|(_, &v)| v == 32)
.map(|(k, _)| k)
.cloned()
.collect();
let root = if root.len() == 1 { root[0] } else { 0 };
if desired_slot.len() == 1 {
let desired_bank = bank_forks
.read()
.unwrap()
.get(desired_slot[0])
.unwrap()
.clone();
let results = bank_method(&desired_bank, hashmap_key);
for (sub_id, (sink, confirmations)) in hashmap.iter() {
let desired_slot = if *confirmations == 0 {
Some(&current_slot)
} else if *confirmations == MAX_LOCKOUT_HISTORY + 1 {
Some(&root)
} else {
confirmation_slots.get(confirmations)
};
if let Some(&slot) = desired_slot {
let results = {
let bank_forks = bank_forks.read().unwrap();
let desired_bank = bank_forks.get(slot).unwrap();
bank_method(&desired_bank, hashmap_key)
};
for result in filter_results(results, root) {
notifier.notify(result, sink);
notified_set.insert(bank_sub_id.clone());
notifier.notify(
Response {
context: RpcResponseContext { slot },
value: result,
},
sink,
);
notified_set.insert(sub_id.clone());
}
}
}
@@ -187,11 +209,15 @@ fn filter_account_result(
Box::new(iter::empty())
}
fn filter_signature_result<S>(result: Option<S>, _root: Slot) -> Box<dyn Iterator<Item = S>>
where
S: 'static + Clone + Serialize,
{
Box::new(result.into_iter())
fn filter_signature_result(
result: Option<transaction::Result<()>>,
_root: Slot,
) -> Box<dyn Iterator<Item = RpcSignatureResult>> {
Box::new(
result
.into_iter()
.map(|result| RpcSignatureResult { err: result.err() }),
)
}
fn filter_program_results(
@@ -213,6 +239,7 @@ pub struct RpcSubscriptions {
program_subscriptions: Arc<RpcProgramSubscriptions>,
signature_subscriptions: Arc<RpcSignatureSubscriptions>,
slot_subscriptions: Arc<RpcSlotSubscriptions>,
root_subscriptions: Arc<RpcRootSubscriptions>,
notification_sender: Arc<Mutex<Sender<NotificationEntry>>>,
t_cleanup: Option<JoinHandle<()>>,
notifier_runtime: Option<Runtime>,
@@ -221,7 +248,10 @@ pub struct RpcSubscriptions {
impl Default for RpcSubscriptions {
fn default() -> Self {
Self::new(&Arc::new(AtomicBool::new(false)))
Self::new(
&Arc::new(AtomicBool::new(false)),
Arc::new(RwLock::new(BlockCommitmentCache::default())),
)
}
}
@@ -234,7 +264,10 @@ impl Drop for RpcSubscriptions {
}
impl RpcSubscriptions {
pub fn new(exit: &Arc<AtomicBool>) -> Self {
pub fn new(
exit: &Arc<AtomicBool>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
) -> Self {
let (notification_sender, notification_receiver): (
Sender<NotificationEntry>,
Receiver<NotificationEntry>,
@@ -244,6 +277,7 @@ impl RpcSubscriptions {
let program_subscriptions = Arc::new(RpcProgramSubscriptions::default());
let signature_subscriptions = Arc::new(RpcSignatureSubscriptions::default());
let slot_subscriptions = Arc::new(RpcSlotSubscriptions::default());
let root_subscriptions = Arc::new(RpcRootSubscriptions::default());
let notification_sender = Arc::new(Mutex::new(notification_sender));
let exit_clone = exit.clone();
@@ -251,6 +285,7 @@ impl RpcSubscriptions {
let program_subscriptions_clone = program_subscriptions.clone();
let signature_subscriptions_clone = signature_subscriptions.clone();
let slot_subscriptions_clone = slot_subscriptions.clone();
let root_subscriptions_clone = root_subscriptions.clone();
let notifier_runtime = RuntimeBuilder::new()
.core_threads(1)
@@ -270,6 +305,8 @@ impl RpcSubscriptions {
program_subscriptions_clone,
signature_subscriptions_clone,
slot_subscriptions_clone,
root_subscriptions_clone,
block_commitment_cache,
);
})
.unwrap();
@@ -279,6 +316,7 @@ impl RpcSubscriptions {
program_subscriptions,
signature_subscriptions,
slot_subscriptions,
root_subscriptions,
notification_sender,
notifier_runtime: Some(notifier_runtime),
t_cleanup: Some(t_cleanup),
@@ -288,8 +326,8 @@ impl RpcSubscriptions {
fn check_account(
pubkey: &Pubkey,
current_slot: Slot,
bank_forks: &Arc<RwLock<BankForks>>,
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
account_subscriptions: Arc<RpcAccountSubscriptions>,
notifier: &RpcNotifier,
) {
@@ -297,8 +335,8 @@ impl RpcSubscriptions {
check_confirmations_and_notify(
&subscriptions,
pubkey,
current_slot,
bank_forks,
block_commitment_cache,
Bank::get_account_modified_since_parent,
filter_account_result,
notifier,
@@ -307,8 +345,8 @@ impl RpcSubscriptions {
fn check_program(
program_id: &Pubkey,
current_slot: Slot,
bank_forks: &Arc<RwLock<BankForks>>,
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
program_subscriptions: Arc<RpcProgramSubscriptions>,
notifier: &RpcNotifier,
) {
@@ -316,8 +354,8 @@ impl RpcSubscriptions {
check_confirmations_and_notify(
&subscriptions,
program_id,
current_slot,
bank_forks,
block_commitment_cache,
Bank::get_program_accounts_modified_since_parent,
filter_program_results,
notifier,
@@ -326,8 +364,8 @@ impl RpcSubscriptions {
fn check_signature(
signature: &Signature,
current_slot: Slot,
bank_forks: &Arc<RwLock<BankForks>>,
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
signature_subscriptions: Arc<RpcSignatureSubscriptions>,
notifier: &RpcNotifier,
) {
@@ -335,9 +373,9 @@ impl RpcSubscriptions {
let notified_ids = check_confirmations_and_notify(
&subscriptions,
signature,
current_slot,
bank_forks,
Bank::get_signature_status,
block_commitment_cache,
Bank::get_signature_status_processed_since_parent,
filter_signature_result,
notifier,
);
@@ -351,13 +389,19 @@ impl RpcSubscriptions {
pub fn add_account_subscription(
&self,
pubkey: &Pubkey,
pubkey: Pubkey,
confirmations: Option<Confirmations>,
sub_id: &SubscriptionId,
sink: &Sink<RpcAccount>,
sub_id: SubscriptionId,
subscriber: Subscriber<Response<RpcAccount>>,
) {
let mut subscriptions = self.account_subscriptions.write().unwrap();
add_subscription(&mut subscriptions, pubkey, confirmations, sub_id, sink);
add_subscription(
&mut subscriptions,
pubkey,
confirmations,
sub_id,
subscriber,
);
}
pub fn remove_account_subscription(&self, id: &SubscriptionId) -> bool {
@@ -367,13 +411,19 @@ impl RpcSubscriptions {
pub fn add_program_subscription(
&self,
program_id: &Pubkey,
program_id: Pubkey,
confirmations: Option<Confirmations>,
sub_id: &SubscriptionId,
sink: &Sink<RpcKeyedAccount>,
sub_id: SubscriptionId,
subscriber: Subscriber<Response<RpcKeyedAccount>>,
) {
let mut subscriptions = self.program_subscriptions.write().unwrap();
add_subscription(&mut subscriptions, program_id, confirmations, sub_id, sink);
add_subscription(
&mut subscriptions,
program_id,
confirmations,
sub_id,
subscriber,
);
}
pub fn remove_program_subscription(&self, id: &SubscriptionId) -> bool {
@@ -383,13 +433,19 @@ impl RpcSubscriptions {
pub fn add_signature_subscription(
&self,
signature: &Signature,
signature: Signature,
confirmations: Option<Confirmations>,
sub_id: &SubscriptionId,
sink: &Sink<transaction::Result<()>>,
sub_id: SubscriptionId,
subscriber: Subscriber<Response<RpcSignatureResult>>,
) {
let mut subscriptions = self.signature_subscriptions.write().unwrap();
add_subscription(&mut subscriptions, signature, confirmations, sub_id, sink);
add_subscription(
&mut subscriptions,
signature,
confirmations,
sub_id,
subscriber,
);
}
pub fn remove_signature_subscription(&self, id: &SubscriptionId) -> bool {
@@ -403,9 +459,10 @@ impl RpcSubscriptions {
self.enqueue_notification(NotificationEntry::Bank((current_slot, bank_forks.clone())));
}
pub fn add_slot_subscription(&self, sub_id: &SubscriptionId, sink: &Sink<SlotInfo>) {
pub fn add_slot_subscription(&self, sub_id: SubscriptionId, subscriber: Subscriber<SlotInfo>) {
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
let mut subscriptions = self.slot_subscriptions.write().unwrap();
subscriptions.insert(sub_id.clone(), sink.clone());
subscriptions.insert(sub_id, sink);
}
pub fn remove_slot_subscription(&self, id: &SubscriptionId) -> bool {
@@ -417,6 +474,24 @@ impl RpcSubscriptions {
self.enqueue_notification(NotificationEntry::Slot(SlotInfo { slot, parent, root }));
}
pub fn add_root_subscription(&self, sub_id: SubscriptionId, subscriber: Subscriber<Slot>) {
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
let mut subscriptions = self.root_subscriptions.write().unwrap();
subscriptions.insert(sub_id, sink);
}
pub fn remove_root_subscription(&self, id: &SubscriptionId) -> bool {
let mut subscriptions = self.root_subscriptions.write().unwrap();
subscriptions.remove(id).is_some()
}
pub fn notify_roots(&self, mut rooted_slots: Vec<Slot>) {
rooted_slots.sort();
rooted_slots.into_iter().for_each(|root| {
self.enqueue_notification(NotificationEntry::Root(root));
});
}
fn enqueue_notification(&self, notification_entry: NotificationEntry) {
match self
.notification_sender
@@ -442,6 +517,8 @@ impl RpcSubscriptions {
program_subscriptions: Arc<RpcProgramSubscriptions>,
signature_subscriptions: Arc<RpcSignatureSubscriptions>,
slot_subscriptions: Arc<RpcSlotSubscriptions>,
root_subscriptions: Arc<RpcRootSubscriptions>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
) {
loop {
if exit.load(Ordering::Relaxed) {
@@ -455,7 +532,13 @@ impl RpcSubscriptions {
notifier.notify(slot_info, sink);
}
}
NotificationEntry::Bank((current_slot, bank_forks)) => {
NotificationEntry::Root(root) => {
let subscriptions = root_subscriptions.read().unwrap();
for (_, sink) in subscriptions.iter() {
notifier.notify(root, sink);
}
}
NotificationEntry::Bank((_current_slot, bank_forks)) => {
let pubkeys: Vec<_> = {
let subs = account_subscriptions.read().unwrap();
subs.keys().cloned().collect()
@@ -463,8 +546,8 @@ impl RpcSubscriptions {
for pubkey in &pubkeys {
Self::check_account(
pubkey,
current_slot,
&bank_forks,
&block_commitment_cache,
account_subscriptions.clone(),
&notifier,
);
@@ -477,8 +560,8 @@ impl RpcSubscriptions {
for program_id in &programs {
Self::check_program(
program_id,
current_slot,
&bank_forks,
&block_commitment_cache,
program_subscriptions.clone(),
&notifier,
);
@@ -491,8 +574,8 @@ impl RpcSubscriptions {
for signature in &signatures {
Self::check_signature(
signature,
current_slot,
&bank_forks,
&block_commitment_cache,
signature_subscriptions.clone(),
&notifier,
);
@@ -533,37 +616,48 @@ impl RpcSubscriptions {
#[cfg(test)]
pub(crate) mod tests {
use super::*;
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use jsonrpc_core::futures;
use crate::{
commitment::BlockCommitment,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
};
use jsonrpc_core::futures::{self, stream::Stream};
use jsonrpc_pubsub::typed::Subscriber;
use serial_test_derive::serial;
use solana_budget_program;
use solana_sdk::{
signature::{Keypair, Signer},
system_transaction,
};
use tokio::prelude::{Async, Stream};
use std::{fmt::Debug, sync::mpsc::channel, time::Instant};
use tokio::{prelude::FutureExt, runtime::Runtime, timer::Delay};
pub(crate) fn robust_poll<T>(
mut receiver: futures::sync::mpsc::Receiver<T>,
) -> Result<T, RecvTimeoutError> {
const INITIAL_DELAY_MS: u64 = RECEIVE_DELAY_MILLIS * 2;
pub(crate) fn robust_poll_or_panic<T: Debug + Send + 'static>(
receiver: futures::sync::mpsc::Receiver<T>,
) -> (T, futures::sync::mpsc::Receiver<T>) {
let (inner_sender, inner_receiver) = channel();
let mut rt = Runtime::new().unwrap();
rt.spawn(futures::lazy(|| {
let recv_timeout = receiver
.into_future()
.timeout(Duration::from_millis(RECEIVE_DELAY_MILLIS))
.map(move |result| match result {
(Some(value), receiver) => {
inner_sender.send((value, receiver)).expect("send error")
}
(None, _) => panic!("unexpected end of stream"),
})
.map_err(|err| panic!("stream error {:?}", err));
std::thread::sleep(Duration::from_millis(INITIAL_DELAY_MS));
for _i in 0..5 {
let found = receiver.poll();
if let Ok(Async::Ready(Some(result))) = found {
return Ok(result);
}
std::thread::sleep(Duration::from_millis(RECEIVE_DELAY_MILLIS));
}
Err(RecvTimeoutError::Timeout)
}
pub(crate) fn robust_poll_or_panic<T>(receiver: futures::sync::mpsc::Receiver<T>) -> T {
robust_poll(receiver).unwrap_or_else(|err| panic!("expected response! {}", err))
const INITIAL_DELAY_MS: u64 = RECEIVE_DELAY_MILLIS * 2;
Delay::new(Instant::now() + Duration::from_millis(INITIAL_DELAY_MS))
.and_then(|_| recv_timeout)
.map_err(|err| panic!("timer error {:?}", err))
}));
inner_receiver.recv().expect("recv error")
}
#[test]
#[serial]
fn test_check_account_subscribe() {
let GenesisConfigInfo {
genesis_config,
@@ -593,10 +687,12 @@ pub(crate) mod tests {
let (subscriber, _id_receiver, transport_receiver) =
Subscriber::new_test("accountNotification");
let sub_id = SubscriptionId::Number(0 as u64);
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = RpcSubscriptions::new(&exit);
subscriptions.add_account_subscription(&alice.pubkey(), None, &sub_id, &sink);
let subscriptions = RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
);
subscriptions.add_account_subscription(alice.pubkey(), None, sub_id.clone(), subscriber);
assert!(subscriptions
.account_subscriptions
@@ -605,11 +701,25 @@ pub(crate) mod tests {
.contains_key(&alice.pubkey()));
subscriptions.notify_subscribers(0, &bank_forks);
let response = robust_poll_or_panic(transport_receiver);
let expected = format!(
r#"{{"jsonrpc":"2.0","method":"accountNotification","params":{{"result":{{"data":"1111111111111111","executable":false,"lamports":1,"owner":"Budget1111111111111111111111111111111111111","rentEpoch":1}},"subscription":0}}}}"#
);
assert_eq!(expected, response);
let (response, _) = robust_poll_or_panic(transport_receiver);
let expected = json!({
"jsonrpc": "2.0",
"method": "accountNotification",
"params": {
"result": {
"context": { "slot": 0 },
"value": {
"data": "1111111111111111",
"executable": false,
"lamports": 1,
"owner": "Budget1111111111111111111111111111111111111",
"rentEpoch": 1,
},
},
"subscription": 0,
}
});
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
subscriptions.remove_account_subscription(&sub_id);
assert!(!subscriptions
@@ -620,6 +730,7 @@ pub(crate) mod tests {
}
#[test]
#[serial]
fn test_check_program_subscribe() {
let GenesisConfigInfo {
genesis_config,
@@ -649,10 +760,17 @@ pub(crate) mod tests {
let (subscriber, _id_receiver, transport_receiver) =
Subscriber::new_test("programNotification");
let sub_id = SubscriptionId::Number(0 as u64);
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = RpcSubscriptions::new(&exit);
subscriptions.add_program_subscription(&solana_budget_program::id(), None, &sub_id, &sink);
let subscriptions = RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
);
subscriptions.add_program_subscription(
solana_budget_program::id(),
None,
sub_id.clone(),
subscriber,
);
assert!(subscriptions
.program_subscriptions
@@ -661,12 +779,28 @@ pub(crate) mod tests {
.contains_key(&solana_budget_program::id()));
subscriptions.notify_subscribers(0, &bank_forks);
let response = robust_poll_or_panic(transport_receiver);
let expected = format!(
r#"{{"jsonrpc":"2.0","method":"programNotification","params":{{"result":{{"account":{{"data":"1111111111111111","executable":false,"lamports":1,"owner":"Budget1111111111111111111111111111111111111","rentEpoch":1}},"pubkey":"{:?}"}},"subscription":0}}}}"#,
alice.pubkey()
);
assert_eq!(expected, response);
let (response, _) = robust_poll_or_panic(transport_receiver);
let expected = json!({
"jsonrpc": "2.0",
"method": "programNotification",
"params": {
"result": {
"context": { "slot": 0 },
"value": {
"account": {
"data": "1111111111111111",
"executable": false,
"lamports": 1,
"owner": "Budget1111111111111111111111111111111111111",
"rentEpoch": 1,
},
"pubkey": alice.pubkey().to_string(),
},
},
"subscription": 0,
}
});
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
subscriptions.remove_program_subscription(&sub_id);
assert!(!subscriptions
@@ -677,6 +811,7 @@ pub(crate) mod tests {
}
#[test]
#[serial]
fn test_check_signature_subscribe() {
let GenesisConfigInfo {
genesis_config,
@@ -685,77 +820,150 @@ pub(crate) mod tests {
} = create_genesis_config(100);
let bank = Bank::new(&genesis_config);
let blockhash = bank.last_blockhash();
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let mut bank_forks = BankForks::new(0, bank);
let alice = Keypair::new();
let tx = system_transaction::transfer(&mint_keypair, &alice.pubkey(), 20, blockhash);
let signature = tx.signatures[0];
let past_bank_tx =
system_transaction::transfer(&mint_keypair, &alice.pubkey(), 1, blockhash);
let unprocessed_tx =
system_transaction::transfer(&mint_keypair, &alice.pubkey(), 10, blockhash);
let not_ready_signature = unprocessed_tx.signatures[0];
system_transaction::transfer(&mint_keypair, &alice.pubkey(), 2, blockhash);
let processed_tx =
system_transaction::transfer(&mint_keypair, &alice.pubkey(), 3, blockhash);
bank_forks
.write()
.unwrap()
.get(0)
.unwrap()
.process_transaction(&tx)
.process_transaction(&past_bank_tx)
.unwrap();
let (subscriber, _id_receiver, transport_receiver) =
Subscriber::new_test("signatureNotification");
let sub_id = SubscriptionId::Number(0 as u64);
let remaining_sub_id = SubscriptionId::Number(1 as u64);
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
let next_bank =
Bank::new_from_parent(&bank_forks.banks[&0].clone(), &Pubkey::new_rand(), 1);
bank_forks.insert(next_bank);
bank_forks
.get(1)
.unwrap()
.process_transaction(&processed_tx)
.unwrap();
let bank1 = bank_forks[1].clone();
let bank_forks = Arc::new(RwLock::new(bank_forks));
let mut cache0 = BlockCommitment::default();
cache0.increase_confirmation_stake(1, 10);
let cache1 = BlockCommitment::default();
let mut block_commitment = HashMap::new();
block_commitment.entry(0).or_insert(cache0.clone());
block_commitment.entry(1).or_insert(cache1.clone());
let block_commitment_cache = BlockCommitmentCache::new(block_commitment, 10, bank1, 0);
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = RpcSubscriptions::new(&exit);
subscriptions.add_signature_subscription(&signature, None, &sub_id, &sink.clone());
let subscriptions =
RpcSubscriptions::new(&exit, Arc::new(RwLock::new(block_commitment_cache)));
let (past_bank_sub1, _id_receiver, past_bank_recv1) =
Subscriber::new_test("signatureNotification");
let (past_bank_sub2, _id_receiver, past_bank_recv2) =
Subscriber::new_test("signatureNotification");
let (processed_sub, _id_receiver, processed_recv) =
Subscriber::new_test("signatureNotification");
subscriptions.add_signature_subscription(
&not_ready_signature,
None,
&remaining_sub_id,
&sink.clone(),
past_bank_tx.signatures[0],
Some(0),
SubscriptionId::Number(1 as u64),
past_bank_sub1,
);
subscriptions.add_signature_subscription(
past_bank_tx.signatures[0],
Some(1),
SubscriptionId::Number(2 as u64),
past_bank_sub2,
);
subscriptions.add_signature_subscription(
processed_tx.signatures[0],
Some(0),
SubscriptionId::Number(3 as u64),
processed_sub,
);
subscriptions.add_signature_subscription(
unprocessed_tx.signatures[0],
Some(0),
SubscriptionId::Number(4 as u64),
Subscriber::new_test("signatureNotification").0,
);
{
let sig_subs = subscriptions.signature_subscriptions.read().unwrap();
assert!(sig_subs.contains_key(&signature));
assert!(sig_subs.contains_key(&not_ready_signature));
assert_eq!(sig_subs.get(&past_bank_tx.signatures[0]).unwrap().len(), 2);
assert!(sig_subs.contains_key(&unprocessed_tx.signatures[0]));
assert!(sig_subs.contains_key(&processed_tx.signatures[0]));
}
subscriptions.notify_subscribers(0, &bank_forks);
let response = robust_poll_or_panic(transport_receiver);
let expected_res: Option<transaction::Result<()>> = Some(Ok(()));
let expected_res_str =
serde_json::to_string(&serde_json::to_value(expected_res).unwrap()).unwrap();
let expected = format!(
r#"{{"jsonrpc":"2.0","method":"signatureNotification","params":{{"result":{},"subscription":0}}}}"#,
expected_res_str
);
subscriptions.notify_subscribers(1, &bank_forks);
let expected_res = RpcSignatureResult { err: None };
struct Notification {
slot: Slot,
id: u64,
}
let expected_notification = |exp: Notification| -> String {
let json = json!({
"jsonrpc": "2.0",
"method": "signatureNotification",
"params": {
"result": {
"context": { "slot": exp.slot },
"value": &expected_res,
},
"subscription": exp.id,
}
});
serde_json::to_string(&json).unwrap()
};
// Expect to receive a notification from bank 1 because this subscription is
// looking for 0 confirmations and so checks the current bank
let expected = expected_notification(Notification { slot: 1, id: 1 });
let (response, _) = robust_poll_or_panic(past_bank_recv1);
assert_eq!(expected, response);
// Expect to receive a notification from bank 0 because this subscription is
// looking for 1 confirmation and so checks the past bank
let expected = expected_notification(Notification { slot: 0, id: 2 });
let (response, _) = robust_poll_or_panic(past_bank_recv2);
assert_eq!(expected, response);
let expected = expected_notification(Notification { slot: 1, id: 3 });
let (response, _) = robust_poll_or_panic(processed_recv);
assert_eq!(expected, response);
// Subscription should be automatically removed after notification
assert!(!subscriptions
.signature_subscriptions
.read()
.unwrap()
.contains_key(&signature));
let sig_subs = subscriptions.signature_subscriptions.read().unwrap();
assert!(!sig_subs.contains_key(&processed_tx.signatures[0]));
assert!(!sig_subs.contains_key(&past_bank_tx.signatures[0]));
// Unprocessed signature subscription should not be removed
assert!(subscriptions
.signature_subscriptions
.read()
.unwrap()
.contains_key(&not_ready_signature));
assert_eq!(
sig_subs.get(&unprocessed_tx.signatures[0]).unwrap().len(),
1
);
}
#[test]
#[serial]
fn test_check_slot_subscribe() {
let (subscriber, _id_receiver, transport_receiver) =
Subscriber::new_test("slotNotification");
let sub_id = SubscriptionId::Number(0 as u64);
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = RpcSubscriptions::new(&exit);
subscriptions.add_slot_subscription(&sub_id, &sink);
let subscriptions = RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
);
subscriptions.add_slot_subscription(sub_id.clone(), subscriber);
assert!(subscriptions
.slot_subscriptions
@@ -764,7 +972,7 @@ pub(crate) mod tests {
.contains_key(&sub_id));
subscriptions.notify_slot(0, 0, 0);
let response = robust_poll_or_panic(transport_receiver);
let (response, _) = robust_poll_or_panic(transport_receiver);
let expected_res = SlotInfo {
parent: 0,
slot: 0,
@@ -787,28 +995,72 @@ pub(crate) mod tests {
}
#[test]
#[serial]
fn test_check_root_subscribe() {
let (subscriber, _id_receiver, mut transport_receiver) =
Subscriber::new_test("rootNotification");
let sub_id = SubscriptionId::Number(0 as u64);
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
);
subscriptions.add_root_subscription(sub_id.clone(), subscriber);
assert!(subscriptions
.root_subscriptions
.read()
.unwrap()
.contains_key(&sub_id));
subscriptions.notify_roots(vec![2, 1, 3]);
for expected_root in 1..=3 {
let (response, receiver) = robust_poll_or_panic(transport_receiver);
transport_receiver = receiver;
let expected_res_str =
serde_json::to_string(&serde_json::to_value(expected_root).unwrap()).unwrap();
let expected = format!(
r#"{{"jsonrpc":"2.0","method":"rootNotification","params":{{"result":{},"subscription":0}}}}"#,
expected_res_str
);
assert_eq!(expected, response);
}
subscriptions.remove_root_subscription(&sub_id);
assert!(!subscriptions
.root_subscriptions
.read()
.unwrap()
.contains_key(&sub_id));
}
#[test]
#[serial]
fn test_add_and_remove_subscription() {
let (subscriber, _id_receiver, _transport_receiver) = Subscriber::new_test("notification");
let sink = subscriber
.assign_id(SubscriptionId::String("test".to_string()))
.unwrap();
let mut subscriptions: HashMap<u64, HashMap<SubscriptionId, (Sink<()>, Confirmations)>> =
HashMap::new();
let num_keys = 5;
let mut next_id: u64 = 0;
for _ in 0..num_keys {
let key = next_id;
let sub_id = SubscriptionId::Number(next_id);
add_subscription(&mut subscriptions, &key, None, &sub_id, &sink.clone());
next_id += 1;
for key in 0..num_keys {
let (subscriber, _id_receiver, _transport_receiver) =
Subscriber::new_test("notification");
let sub_id = SubscriptionId::Number(key);
add_subscription(&mut subscriptions, key, None, sub_id, subscriber);
}
// Add another subscription to the "0" key
let sub_id = SubscriptionId::Number(next_id);
add_subscription(&mut subscriptions, &0, None, &sub_id, &sink.clone());
let (subscriber, _id_receiver, _transport_receiver) = Subscriber::new_test("notification");
let extra_sub_id = SubscriptionId::Number(num_keys);
add_subscription(
&mut subscriptions,
0,
None,
extra_sub_id.clone(),
subscriber,
);
assert_eq!(subscriptions.len(), num_keys);
assert_eq!(subscriptions.len(), num_keys as usize);
assert_eq!(subscriptions.get(&0).unwrap().len(), 2);
assert_eq!(subscriptions.get(&1).unwrap().len(), 1);
@@ -816,18 +1068,15 @@ pub(crate) mod tests {
remove_subscription(&mut subscriptions, &SubscriptionId::Number(0)),
true
);
assert_eq!(subscriptions.len(), num_keys);
assert_eq!(subscriptions.len(), num_keys as usize);
assert_eq!(subscriptions.get(&0).unwrap().len(), 1);
assert_eq!(
remove_subscription(&mut subscriptions, &SubscriptionId::Number(0)),
false
);
assert_eq!(
remove_subscription(&mut subscriptions, &SubscriptionId::Number(next_id)),
true
);
assert_eq!(subscriptions.len(), num_keys - 1);
assert_eq!(remove_subscription(&mut subscriptions, &extra_sub_id), true);
assert_eq!(subscriptions.len(), (num_keys - 1) as usize);
assert!(subscriptions.get(&0).is_none());
}
}

View File

@@ -9,6 +9,7 @@ use crate::{
use bincode::serialize;
use rand::{thread_rng, Rng};
use solana_ledger::blockstore::Blockstore;
use solana_measure::measure::Measure;
use solana_measure::thread_mem_usage;
use solana_metrics::{datapoint_debug, inc_new_counter_debug};
use solana_perf::packet::{Packets, PacketsRecycler};
@@ -184,12 +185,33 @@ impl ServeRepair {
blockstore: Option<&Arc<Blockstore>>,
requests_receiver: &PacketReceiver,
response_sender: &PacketSender,
max_packets: &mut usize,
) -> Result<()> {
//TODO cache connections
let timeout = Duration::new(1, 0);
let reqs = requests_receiver.recv_timeout(timeout)?;
let mut reqs_v = vec![requests_receiver.recv_timeout(timeout)?];
let mut total_packets = reqs_v[0].packets.len();
Self::handle_packets(obj, &recycler, blockstore, reqs, response_sender);
while let Ok(more) = requests_receiver.try_recv() {
total_packets += more.packets.len();
if total_packets < *max_packets {
// Drop the rest in the channel in case of dos
reqs_v.push(more);
}
}
let mut time = Measure::start("repair::handle_packets");
for reqs in reqs_v {
Self::handle_packets(obj, &recycler, blockstore, reqs, response_sender);
}
time.stop();
if total_packets >= *max_packets {
if time.as_ms() > 1000 {
*max_packets = (*max_packets * 9) / 10;
} else {
*max_packets = (*max_packets * 10) / 9;
}
}
Ok(())
}
@@ -204,22 +226,26 @@ impl ServeRepair {
let recycler = PacketsRecycler::default();
Builder::new()
.name("solana-repair-listen".to_string())
.spawn(move || loop {
let result = Self::run_listen(
&me,
&recycler,
blockstore.as_ref(),
&requests_receiver,
&response_sender,
);
match result {
Err(Error::RecvTimeoutError(_)) | Ok(_) => {}
Err(err) => info!("repair listener error: {:?}", err),
};
if exit.load(Ordering::Relaxed) {
return;
.spawn(move || {
let mut max_packets = 1024;
loop {
let result = Self::run_listen(
&me,
&recycler,
blockstore.as_ref(),
&requests_receiver,
&response_sender,
&mut max_packets,
);
match result {
Err(Error::RecvTimeoutError(_)) | Ok(_) => {}
Err(err) => info!("repair listener error: {:?}", err),
};
if exit.load(Ordering::Relaxed) {
return;
}
thread_mem_usage::datapoint("solana-repair-listen");
}
thread_mem_usage::datapoint("solana-repair-listen");
})
.unwrap()
}

View File

@@ -2,43 +2,128 @@
use crate::packet::{Packet, PacketsRecycler};
use crate::streamer::{self, PacketReceiver, PacketSender};
use bv::BitVec;
use solana_ledger::bank_forks::BankForks;
use solana_ledger::blockstore::MAX_DATA_SHREDS_PER_SLOT;
use solana_ledger::shred::{OFFSET_OF_SHRED_INDEX, SIZE_OF_SHRED_INDEX};
use solana_ledger::shred::{
OFFSET_OF_SHRED_INDEX, OFFSET_OF_SHRED_SLOT, SIZE_OF_SHRED_INDEX, SIZE_OF_SHRED_SLOT,
};
use solana_perf::cuda_runtime::PinnedVec;
use solana_perf::packet::limited_deserialize;
use solana_perf::recycler::Recycler;
use solana_sdk::clock::Slot;
use std::collections::HashMap;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::sync::RwLock;
use std::thread::{self, Builder, JoinHandle};
use std::time::Instant;
pub type ShredsReceived = HashMap<Slot, BitVec<u64>>;
pub struct ShredFetchStage {
thread_hdls: Vec<JoinHandle<()>>,
}
impl ShredFetchStage {
// updates packets received on a channel and sends them on another channel
fn modify_packets<F>(recvr: PacketReceiver, sendr: PacketSender, modify: F)
where
F: Fn(&mut Packet),
{
while let Some(mut p) = recvr.iter().next() {
let index_start = OFFSET_OF_SHRED_INDEX;
let index_end = index_start + SIZE_OF_SHRED_INDEX;
p.packets.iter_mut().for_each(|p| {
p.meta.discard = true;
if index_end <= p.meta.size {
if let Ok(index) = limited_deserialize::<u32>(&p.data[index_start..index_end]) {
if index < MAX_DATA_SHREDS_PER_SLOT as u32 {
p.meta.discard = false;
modify(p);
} else {
inc_new_counter_warn!("shred_fetch_stage-shred_index_overrun", 1);
}
fn get_slot_index(p: &Packet, index_overrun: &mut usize) -> Option<(u64, u32)> {
let index_start = OFFSET_OF_SHRED_INDEX;
let index_end = index_start + SIZE_OF_SHRED_INDEX;
let slot_start = OFFSET_OF_SHRED_SLOT;
let slot_end = slot_start + SIZE_OF_SHRED_SLOT;
if index_end <= p.meta.size {
if let Ok(index) = limited_deserialize::<u32>(&p.data[index_start..index_end]) {
if index < MAX_DATA_SHREDS_PER_SLOT as u32 && slot_end <= p.meta.size {
if let Ok(slot) = limited_deserialize::<Slot>(&p.data[slot_start..slot_end]) {
return Some((slot, index));
}
}
}
} else {
*index_overrun += 1;
}
None
}
fn process_packet<F>(
p: &mut Packet,
shreds_received: &mut ShredsReceived,
index_overrun: &mut usize,
last_root: Slot,
last_slot: Slot,
slots_per_epoch: u64,
modify: &F,
) where
F: Fn(&mut Packet),
{
p.meta.discard = true;
if let Some((slot, index)) = Self::get_slot_index(p, index_overrun) {
// Seems reasonable to limit shreds to 2 epochs away
if slot > last_root && slot < (last_slot + 2 * slots_per_epoch) {
// Shred filter
let slot_received = shreds_received
.entry(slot)
.or_insert_with(|| BitVec::new_fill(false, MAX_DATA_SHREDS_PER_SLOT as u64));
if !slot_received.get(index.into()) {
p.meta.discard = false;
modify(p);
slot_received.set(index.into(), true);
}
}
}
}
// updates packets received on a channel and sends them on another channel
fn modify_packets<F>(
recvr: PacketReceiver,
sendr: PacketSender,
bank_forks: Option<Arc<RwLock<BankForks>>>,
modify: F,
) where
F: Fn(&mut Packet),
{
let mut shreds_received = ShredsReceived::default();
let mut last_cleared = Instant::now();
// In the case of bank_forks=None, setup to accept any slot range
let mut last_root = 0;
let mut last_slot = std::u64::MAX;
let mut slots_per_epoch = 0;
while let Some(mut p) = recvr.iter().next() {
if last_cleared.elapsed().as_millis() > 200 {
shreds_received.clear();
last_cleared = Instant::now();
if let Some(bank_forks) = bank_forks.as_ref() {
let bank_forks_r = bank_forks.read().unwrap();
last_root = bank_forks_r.root();
let working_bank = bank_forks_r.working_bank();
last_slot = working_bank.slot();
let root_bank = bank_forks_r
.get(bank_forks_r.root())
.expect("Root bank should exist");
slots_per_epoch = root_bank.get_slots_in_epoch(root_bank.epoch());
}
}
let mut index_overrun = 0;
let mut shred_count = 0;
p.packets.iter_mut().for_each(|mut packet| {
shred_count += 1;
Self::process_packet(
&mut packet,
&mut shreds_received,
&mut index_overrun,
last_root,
last_slot,
slots_per_epoch,
&modify,
);
});
inc_new_counter_warn!("shred_fetch_stage-shred_index_overrun", index_overrun);
inc_new_counter_info!("shred_fetch_stage-shred_count", shred_count);
if sendr.send(p).is_err() {
break;
}
@@ -50,6 +135,7 @@ impl ShredFetchStage {
exit: &Arc<AtomicBool>,
sender: PacketSender,
recycler: Recycler<PinnedVec<Packet>>,
bank_forks: Option<Arc<RwLock<BankForks>>>,
modify: F,
) -> (Vec<JoinHandle<()>>, JoinHandle<()>)
where
@@ -71,7 +157,7 @@ impl ShredFetchStage {
let modifier_hdl = Builder::new()
.name("solana-tvu-fetch-stage-packet-modifier".to_string())
.spawn(|| Self::modify_packets(packet_receiver, sender, modify))
.spawn(move || Self::modify_packets(packet_receiver, sender, bank_forks, modify))
.unwrap();
(streamers, modifier_hdl)
}
@@ -81,6 +167,7 @@ impl ShredFetchStage {
forward_sockets: Vec<Arc<UdpSocket>>,
repair_socket: Arc<UdpSocket>,
sender: &PacketSender,
bank_forks: Option<Arc<RwLock<BankForks>>>,
exit: &Arc<AtomicBool>,
) -> Self {
let recycler: PacketsRecycler = Recycler::warmed(100, 1024);
@@ -100,6 +187,7 @@ impl ShredFetchStage {
&exit,
sender.clone(),
recycler.clone(),
bank_forks.clone(),
|p| p.meta.forward = true,
);
@@ -108,6 +196,7 @@ impl ShredFetchStage {
&exit,
sender.clone(),
recycler.clone(),
bank_forks,
|p| p.meta.repair = true,
);
@@ -128,3 +217,111 @@ impl ShredFetchStage {
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use solana_ledger::shred::Shred;
#[test]
fn test_shred_filter() {
solana_logger::setup();
let mut shreds_received = ShredsReceived::default();
let mut packet = Packet::default();
let mut index_overrun = 0;
let last_root = 0;
let last_slot = 100;
let slots_per_epoch = 10;
// packet size is 0, so cannot get index
ShredFetchStage::process_packet(
&mut packet,
&mut shreds_received,
&mut index_overrun,
last_root,
last_slot,
slots_per_epoch,
&|_p| {},
);
assert_eq!(index_overrun, 1);
assert!(packet.meta.discard);
let shred = Shred::new_from_data(1, 3, 0, None, true, true, 0, 0, 0);
shred.copy_to_packet(&mut packet);
// rejected slot is 1, root is 3
ShredFetchStage::process_packet(
&mut packet,
&mut shreds_received,
&mut index_overrun,
3,
last_slot,
slots_per_epoch,
&|_p| {},
);
assert!(packet.meta.discard);
// Accepted for 1,3
ShredFetchStage::process_packet(
&mut packet,
&mut shreds_received,
&mut index_overrun,
last_root,
last_slot,
slots_per_epoch,
&|_p| {},
);
assert!(!packet.meta.discard);
// shreds_received should filter duplicate
ShredFetchStage::process_packet(
&mut packet,
&mut shreds_received,
&mut index_overrun,
last_root,
last_slot,
slots_per_epoch,
&|_p| {},
);
assert!(packet.meta.discard);
let shred = Shred::new_from_data(1_000_000, 3, 0, None, true, true, 0, 0, 0);
shred.copy_to_packet(&mut packet);
// Slot 1 million is too high
ShredFetchStage::process_packet(
&mut packet,
&mut shreds_received,
&mut index_overrun,
last_root,
last_slot,
slots_per_epoch,
&|_p| {},
);
assert!(packet.meta.discard);
let index = MAX_DATA_SHREDS_PER_SLOT as u32;
let shred = Shred::new_from_data(5, index, 0, None, true, true, 0, 0, 0);
shred.copy_to_packet(&mut packet);
ShredFetchStage::process_packet(
&mut packet,
&mut shreds_received,
&mut index_overrun,
last_root,
last_slot,
slots_per_epoch,
&|_p| {},
);
assert!(packet.meta.discard);
}
#[test]
fn test_shred_offsets() {
let shred = Shred::new_from_data(1, 3, 0, None, true, true, 0, 0, 0);
let mut packet = Packet::default();
shred.copy_to_packet(&mut packet);
let mut index_overrun = 0;
assert_eq!(
Some((1, 3)),
ShredFetchStage::get_slot_index(&packet, &mut index_overrun)
);
}
}

View File

@@ -4,6 +4,7 @@
use crate::{
cluster_info::ClusterInfo,
commitment::BlockCommitmentCache,
contact_info::ContactInfo,
result::{Error, Result},
};
@@ -28,6 +29,7 @@ use solana_storage_program::{
storage_instruction,
storage_instruction::proof_validation,
};
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
use std::{
cmp,
collections::HashMap,
@@ -183,6 +185,7 @@ impl StorageStage {
exit: &Arc<AtomicBool>,
bank_forks: &Arc<RwLock<BankForks>>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
) -> Self {
let (instruction_sender, instruction_receiver) = channel();
@@ -254,6 +257,7 @@ impl StorageStage {
&keypair,
&storage_keypair,
&transactions_socket,
&block_commitment_cache,
)
.unwrap_or_else(|err| {
info!("failed to send storage transaction: {:?}", err)
@@ -287,6 +291,7 @@ impl StorageStage {
keypair: &Arc<Keypair>,
storage_keypair: &Arc<Keypair>,
transactions_socket: &UdpSocket,
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
) -> io::Result<()> {
let working_bank = bank_forks.read().unwrap().working_bank();
let blockhash = working_bank.confirmed_last_blockhash().0;
@@ -312,7 +317,7 @@ impl StorageStage {
}
let signer_keys = vec![keypair.as_ref(), storage_keypair.as_ref()];
let message = Message::new_with_payer(vec![instruction], Some(&signer_keys[0].pubkey()));
let message = Message::new_with_payer(&[instruction], Some(&signer_keys[0].pubkey()));
let transaction = Transaction::new(&signer_keys, message, blockhash);
// try sending the transaction upto 5 times
for _ in 0..5 {
@@ -321,8 +326,13 @@ impl StorageStage {
cluster_info.read().unwrap().my_data().tpu,
)?;
sleep(Duration::from_millis(100));
if Self::poll_for_signature_confirmation(bank_forks, &transaction.signatures[0], 0)
.is_ok()
if Self::poll_for_signature_confirmation(
bank_forks,
block_commitment_cache,
&transaction.signatures[0],
0,
)
.is_ok()
{
break;
};
@@ -332,19 +342,25 @@ impl StorageStage {
fn poll_for_signature_confirmation(
bank_forks: &Arc<RwLock<BankForks>>,
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
signature: &Signature,
min_confirmed_blocks: usize,
) -> Result<()> {
let mut now = Instant::now();
let mut confirmed_blocks = 0;
loop {
let response = bank_forks
.read()
.unwrap()
.working_bank()
.get_signature_confirmation_status(signature);
if let Some((confirmations, res)) = response {
if res.is_ok() {
let working_bank = bank_forks.read().unwrap().working_bank();
let response = working_bank.get_signature_status_slot(signature);
if let Some((slot, status)) = response {
let confirmations = if working_bank.src.roots().contains(&slot) {
MAX_LOCKOUT_HISTORY + 1
} else {
let r_block_commitment_cache = block_commitment_cache.read().unwrap();
r_block_commitment_cache
.get_confirmation_count(slot)
.unwrap_or(0)
};
if status.is_ok() {
if confirmed_blocks != confirmations {
now = Instant::now();
confirmed_blocks = confirmations;
@@ -648,12 +664,18 @@ mod tests {
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use rayon::prelude::*;
use solana_runtime::bank::Bank;
use solana_sdk::hash::Hasher;
use solana_sdk::signature::{Keypair, Signer};
use std::cmp::{max, min};
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use solana_sdk::{
hash::Hasher,
signature::{Keypair, Signer},
};
use std::{
cmp::{max, min},
sync::{
atomic::{AtomicBool, AtomicUsize, Ordering},
mpsc::channel,
Arc, RwLock,
},
};
#[test]
fn test_storage_stage_none_ledger() {
@@ -668,6 +690,7 @@ mod tests {
&[bank.clone()],
vec![0],
)));
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let (_slot_sender, slot_receiver) = channel();
let storage_state = StorageState::new(
&bank.last_blockhash(),
@@ -683,6 +706,7 @@ mod tests {
&exit.clone(),
&bank_forks,
&cluster_info,
block_commitment_cache,
);
exit.store(true, Ordering::Relaxed);
storage_stage.join().unwrap();

View File

@@ -1,10 +1,10 @@
use crossbeam_channel::{Receiver, RecvTimeoutError};
use solana_client::rpc_response::RpcTransactionStatus;
use solana_ledger::{blockstore::Blockstore, blockstore_processor::TransactionStatusBatch};
use solana_runtime::{
bank::{Bank, HashAgeKind},
nonce_utils,
};
use solana_transaction_status::TransactionStatusMeta;
use std::{
sync::{
atomic::{AtomicBool, Ordering},
@@ -70,10 +70,15 @@ impl TransactionStatusService {
}
.expect("FeeCalculator must exist");
let fee = fee_calculator.calculate_fee(transaction.message());
let (writable_keys, readonly_keys) =
transaction.message.get_account_keys_by_lock_type();
blockstore
.write_transaction_status(
(slot, transaction.signatures[0]),
&RpcTransactionStatus {
slot,
transaction.signatures[0],
writable_keys,
readonly_keys,
&TransactionStatusMeta {
status,
fee,
pre_balances,

View File

@@ -2,6 +2,7 @@
//! validation pipeline in software.
use crate::{
accounts_hash_verifier::AccountsHashVerifier,
blockstream_service::BlockstreamService,
cluster_info::ClusterInfo,
commitment::BlockCommitmentCache,
@@ -25,9 +26,11 @@ use solana_ledger::{
snapshot_package::SnapshotPackageSender,
};
use solana_sdk::{
genesis_config::GenesisConfig,
pubkey::Pubkey,
signature::{Keypair, Signer},
};
use std::collections::HashSet;
use std::{
net::UdpSocket,
path::PathBuf,
@@ -47,6 +50,7 @@ pub struct Tvu {
blockstream_service: Option<BlockstreamService>,
ledger_cleanup_service: Option<LedgerCleanupService>,
storage_stage: StorageStage,
accounts_hash_verifier: AccountsHashVerifier,
}
pub struct Sockets {
@@ -56,6 +60,17 @@ pub struct Sockets {
pub forwards: Vec<UdpSocket>,
}
#[derive(Default)]
pub struct TvuConfig {
pub max_ledger_slots: Option<u64>,
pub sigverify_disabled: bool,
pub shred_version: u16,
pub halt_on_trusted_validators_accounts_hash_mismatch: bool,
pub trusted_validators: Option<HashSet<Pubkey>>,
pub accounts_hash_fault_injection_slots: u64,
pub genesis_config: GenesisConfig,
}
impl Tvu {
/// This service receives messages from a leader in the network and processes the transactions
/// on the bank state.
@@ -74,7 +89,6 @@ impl Tvu {
blockstore: Arc<Blockstore>,
storage_state: &StorageState,
blockstream_unix_socket: Option<&PathBuf>,
max_ledger_slots: Option<u64>,
ledger_signal_receiver: Receiver<bool>,
subscriptions: &Arc<RpcSubscriptions>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
@@ -82,12 +96,11 @@ impl Tvu {
exit: &Arc<AtomicBool>,
completed_slots_receiver: CompletedSlotsReceiver,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
sigverify_disabled: bool,
cfg: Option<Arc<AtomicBool>>,
shred_version: u16,
transaction_status_sender: Option<TransactionStatusSender>,
rewards_recorder_sender: Option<RewardsRecorderSender>,
snapshot_package_sender: Option<SnapshotPackageSender>,
tvu_config: TvuConfig,
) -> Self {
let keypair: Arc<Keypair> = cluster_info
.read()
@@ -113,11 +126,12 @@ impl Tvu {
forward_sockets,
repair_socket.clone(),
&fetch_sender,
Some(bank_forks.clone()),
&exit,
);
let (verified_sender, verified_receiver) = unbounded();
let sigverify_stage = if !sigverify_disabled {
let sigverify_stage = if !tvu_config.sigverify_disabled {
SigVerifyStage::new(
fetch_receiver,
verified_sender,
@@ -143,12 +157,23 @@ impl Tvu {
completed_slots_receiver,
*bank_forks.read().unwrap().working_bank().epoch_schedule(),
cfg,
shred_version,
tvu_config.shred_version,
);
let (blockstream_slot_sender, blockstream_slot_receiver) = channel();
let (ledger_cleanup_slot_sender, ledger_cleanup_slot_receiver) = channel();
let (accounts_hash_sender, accounts_hash_receiver) = channel();
let accounts_hash_verifier = AccountsHashVerifier::new(
accounts_hash_receiver,
snapshot_package_sender,
exit,
cluster_info,
tvu_config.trusted_validators.clone(),
tvu_config.halt_on_trusted_validators_accounts_hash_mismatch,
tvu_config.accounts_hash_fault_injection_slots,
);
let replay_stage_config = ReplayStageConfig {
my_pubkey: keypair.pubkey(),
vote_account: *vote_account,
@@ -158,10 +183,11 @@ impl Tvu {
leader_schedule_cache: leader_schedule_cache.clone(),
slot_full_senders: vec![blockstream_slot_sender],
latest_root_senders: vec![ledger_cleanup_slot_sender],
snapshot_package_sender,
block_commitment_cache,
accounts_hash_sender: Some(accounts_hash_sender),
block_commitment_cache: block_commitment_cache.clone(),
transaction_status_sender,
rewards_recorder_sender,
genesis_config: tvu_config.genesis_config,
};
let (replay_stage, root_bank_receiver) = ReplayStage::new(
@@ -185,7 +211,7 @@ impl Tvu {
None
};
let ledger_cleanup_service = max_ledger_slots.map(|max_ledger_slots| {
let ledger_cleanup_service = tvu_config.max_ledger_slots.map(|max_ledger_slots| {
LedgerCleanupService::new(
ledger_cleanup_slot_receiver,
blockstore.clone(),
@@ -203,6 +229,7 @@ impl Tvu {
&exit,
&bank_forks,
&cluster_info,
block_commitment_cache,
);
Tvu {
@@ -213,6 +240,7 @@ impl Tvu {
blockstream_service,
ledger_cleanup_service,
storage_stage,
accounts_hash_verifier,
}
}
@@ -228,6 +256,7 @@ impl Tvu {
self.ledger_cleanup_service.unwrap().join()?;
}
self.replay_stage.join()?;
self.accounts_hash_verifier.join()?;
Ok(())
}
}
@@ -238,11 +267,13 @@ pub mod tests {
use crate::banking_stage::create_test_recorder;
use crate::cluster_info::{ClusterInfo, Node};
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use serial_test_derive::serial;
use solana_ledger::create_new_tmp_ledger;
use solana_runtime::bank::Bank;
use std::sync::atomic::Ordering;
#[test]
#[serial]
fn test_tvu_exit() {
solana_logger::setup();
let leader = Node::new_localhost();
@@ -288,20 +319,21 @@ pub mod tests {
blockstore,
&StorageState::default(),
None,
None,
l_receiver,
&Arc::new(RpcSubscriptions::new(&exit)),
&Arc::new(RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(BlockCommitmentCache::default())),
)),
&poh_recorder,
&leader_schedule_cache,
&exit,
completed_slots_receiver,
block_commitment_cache,
false,
None,
0,
None,
None,
None,
None,
TvuConfig::default(),
);
exit.store(true, Ordering::Relaxed);
tvu.join().unwrap();

View File

@@ -20,7 +20,7 @@ use crate::{
storage_stage::StorageState,
tpu::Tpu,
transaction_status_service::TransactionStatusService,
tvu::{Sockets, Tvu},
tvu::{Sockets, Tvu, TvuConfig},
};
use crossbeam_channel::unbounded;
use solana_ledger::{
@@ -76,6 +76,10 @@ pub struct ValidatorConfig {
pub wait_for_supermajority: Option<Slot>,
pub new_hard_forks: Option<Vec<Slot>>,
pub trusted_validators: Option<HashSet<Pubkey>>, // None = trust all
pub halt_on_trusted_validators_accounts_hash_mismatch: bool,
pub accounts_hash_fault_injection_slots: u64, // 0 = no fault injection
pub frozen_accounts: Vec<Pubkey>,
pub no_rocksdb_compaction: bool,
}
impl Default for ValidatorConfig {
@@ -99,6 +103,10 @@ impl Default for ValidatorConfig {
wait_for_supermajority: None,
new_hard_forks: None,
trusted_validators: None,
halt_on_trusted_validators_accounts_hash_mismatch: false,
accounts_hash_fault_injection_slots: 0,
frozen_accounts: vec![],
no_rocksdb_compaction: false,
}
}
}
@@ -143,7 +151,7 @@ impl Validator {
keypair: &Arc<Keypair>,
ledger_path: &Path,
vote_account: &Pubkey,
voting_keypair: &Arc<Keypair>,
authorized_voter: &Arc<Keypair>,
storage_keypair: &Arc<Keypair>,
entrypoint_info_option: Option<&ContactInfo>,
poh_verify: bool,
@@ -152,8 +160,9 @@ impl Validator {
let id = keypair.pubkey();
assert_eq!(id, node.info.id);
warn!("identity pubkey: {:?}", id);
warn!("vote pubkey: {:?}", vote_account);
warn!("identity: {}", id);
warn!("vote account: {}", vote_account);
warn!("authorized voter: {}", authorized_voter.pubkey());
report_target_features();
info!("entrypoint: {:?}", entrypoint_info_option);
@@ -230,7 +239,7 @@ impl Validator {
let blockstore = Arc::new(blockstore);
let subscriptions = Arc::new(RpcSubscriptions::new(&exit));
let subscriptions = Arc::new(RpcSubscriptions::new(&exit, block_commitment_cache.clone()));
let rpc_service = config.rpc_ports.map(|(rpc_port, rpc_pubsub_port)| {
if ContactInfo::is_valid_address(&node.info.rpc) {
@@ -253,6 +262,7 @@ impl Validator {
ledger_path,
storage_state.clone(),
validator_exit.clone(),
config.trusted_validators.clone(),
),
PubSubService::new(
&subscriptions,
@@ -263,7 +273,7 @@ impl Validator {
});
let (transaction_status_sender, transaction_status_service) =
if rpc_service.is_some() && config.rpc_config.enable_get_confirmed_block {
if rpc_service.is_some() && config.rpc_config.enable_rpc_transaction_history {
let (transaction_status_sender, transaction_status_receiver) = unbounded();
(
Some(transaction_status_sender),
@@ -278,7 +288,7 @@ impl Validator {
};
let (rewards_recorder_sender, rewards_recorder_service) =
if rpc_service.is_some() && config.rpc_config.enable_get_confirmed_block {
if rpc_service.is_some() && config.rpc_config.enable_rpc_transaction_history {
let (rewards_recorder_sender, rewards_receiver) = unbounded();
(
Some(rewards_recorder_sender),
@@ -307,7 +317,7 @@ impl Validator {
std::thread::park();
}
let poh_config = Arc::new(genesis_config.poh_config);
let poh_config = Arc::new(genesis_config.poh_config.clone());
let (mut poh_recorder, entry_receiver) = PohRecorder::new_with_clear_signal(
bank.tick_height(),
bank.last_blockhash(),
@@ -357,34 +367,6 @@ impl Validator {
.set_entrypoint(entrypoint_info.clone());
}
if let Some(ref snapshot_hash) = snapshot_hash {
if let Some(ref trusted_validators) = config.trusted_validators {
let mut trusted = false;
for _ in 0..10 {
trusted = cluster_info
.read()
.unwrap()
.get_snapshot_hash(snapshot_hash.0)
.iter()
.any(|(pubkey, hash)| {
trusted_validators.contains(pubkey) && snapshot_hash.1 == *hash
});
if trusted {
break;
}
sleep(Duration::from_secs(1));
}
if !trusted {
error!(
"The snapshot hash for slot {} is not published by your trusted validators: {:?}",
snapshot_hash.0, trusted_validators
);
process::exit(1);
}
}
}
let (snapshot_packager_service, snapshot_package_sender) =
if config.snapshot_config.is_some() {
// Start a snapshot packaging service
@@ -398,12 +380,6 @@ impl Validator {
wait_for_supermajority(config, &bank, &cluster_info);
let voting_keypair = if config.voting_disabled {
None
} else {
Some(voting_keypair.clone())
};
let poh_service = PohService::new(poh_recorder.clone(), &poh_config, &exit);
assert_eq!(
blockstore.new_shreds_signals.len(),
@@ -413,7 +389,11 @@ impl Validator {
let tvu = Tvu::new(
vote_account,
voting_keypair,
if config.voting_disabled {
None
} else {
Some(authorized_voter.clone())
},
storage_keypair,
&bank_forks,
&cluster_info,
@@ -445,7 +425,6 @@ impl Validator {
blockstore.clone(),
&storage_state,
config.blockstream_unix_socket.as_ref(),
config.max_ledger_slots,
ledger_signal_receiver,
&subscriptions,
&poh_recorder,
@@ -453,12 +432,20 @@ impl Validator {
&exit,
completed_slots_receiver,
block_commitment_cache,
config.dev_sigverify_disabled,
config.enable_partition.clone(),
node.info.shred_version,
transaction_status_sender.clone(),
rewards_recorder_sender,
snapshot_package_sender,
TvuConfig {
max_ledger_slots: config.max_ledger_slots,
sigverify_disabled: config.dev_sigverify_disabled,
halt_on_trusted_validators_accounts_hash_mismatch: config
.halt_on_trusted_validators_accounts_hash_mismatch,
shred_version: node.info.shred_version,
trusted_validators: config.trusted_validators.clone(),
accounts_hash_fault_injection_slots: config.accounts_hash_fault_injection_slots,
genesis_config,
},
);
if config.dev_sigverify_disabled {
@@ -606,13 +593,15 @@ fn new_banks_from_blockstore(
}
}
let (blockstore, ledger_signal_receiver, completed_slots_receiver) =
let (mut blockstore, ledger_signal_receiver, completed_slots_receiver) =
Blockstore::open_with_signal(blockstore_path).expect("Failed to open ledger database");
blockstore.set_no_compaction(config.no_rocksdb_compaction);
let process_options = blockstore_processor::ProcessOptions {
poh_verify,
dev_halt_at_slot: config.dev_halt_at_slot,
new_hard_forks: config.new_hard_forks.clone(),
frozen_accounts: config.frozen_accounts.clone(),
..blockstore_processor::ProcessOptions::default()
};
@@ -655,14 +644,14 @@ fn wait_for_supermajority(
}
info!(
"Waiting for more than 75% of activated stake at slot {} to be in gossip...",
"Waiting for 80% of activated stake at slot {} to be in gossip...",
bank.slot()
);
loop {
let gossip_stake_percent = get_stake_percent_in_gossip(&bank, &cluster_info);
info!("{}% of activated stake in gossip", gossip_stake_percent,);
if gossip_stake_percent > 75 {
if gossip_stake_percent >= 80 {
break;
}
sleep(Duration::new(1, 0));

View File

@@ -47,6 +47,7 @@ mod tests {
let bank0 = Bank::new_with_paths(
&genesis_config_info.genesis_config,
vec![accounts_dir.path().to_path_buf()],
&[],
);
bank0.freeze();
let mut bank_forks = BankForks::new(0, bank0);
@@ -82,6 +83,7 @@ mod tests {
let deserialized_bank = snapshot_utils::bank_from_archive(
&account_paths,
&[],
&old_bank_forks
.snapshot_config
.as_ref()

View File

@@ -3,8 +3,8 @@ use solana_client::{
rpc_client::RpcClient,
};
use solana_core::{
rpc_pubsub_service::PubSubService, rpc_subscriptions::RpcSubscriptions,
validator::TestValidator,
commitment::BlockCommitmentCache, rpc_pubsub_service::PubSubService,
rpc_subscriptions::RpcSubscriptions, validator::TestValidator,
};
use solana_sdk::{
commitment_config::CommitmentConfig, pubkey::Pubkey, rpc_port, signature::Signer,
@@ -15,7 +15,7 @@ use std::{
net::{IpAddr, SocketAddr},
sync::{
atomic::{AtomicBool, Ordering},
Arc,
Arc, RwLock,
},
thread::sleep,
time::{Duration, Instant},
@@ -58,7 +58,7 @@ fn test_rpc_client() {
let now = Instant::now();
while now.elapsed().as_secs() <= 20 {
let response = client
.confirm_transaction_with_commitment(signature.as_str(), CommitmentConfig::default())
.confirm_transaction_with_commitment(&signature, CommitmentConfig::default())
.unwrap();
if response.value {
@@ -85,7 +85,10 @@ fn test_slot_subscription() {
rpc_port::DEFAULT_RPC_PUBSUB_PORT,
);
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = Arc::new(RpcSubscriptions::new(&exit));
let subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(BlockCommitmentCache::default())),
));
let pubsub_service = PubSubService::new(&subscriptions, pubsub_addr, &exit);
std::thread::sleep(Duration::from_millis(400));

View File

@@ -15,7 +15,7 @@ use std::time::Instant;
type Nodes = HashMap<Pubkey, (bool, HashSet<i32>, Receiver<(i32, bool)>)>;
fn num_threads() -> usize {
sys_info::cpu_num().unwrap_or(10) as usize
num_cpus::get()
}
/// Search for the a node with the given balance

View File

@@ -7,9 +7,15 @@ use jsonrpc_core_client::transports::ws;
use log::*;
use reqwest::{self, header::CONTENT_TYPE};
use serde_json::{json, Value};
use solana_client::rpc_client::get_rpc_request_str;
use solana_client::{
rpc_client::{get_rpc_request_str, RpcClient},
rpc_response::{Response, RpcSignatureResult},
};
use solana_core::{rpc_pubsub::gen_client::Client as PubsubClient, validator::TestValidator};
use solana_sdk::{hash::Hash, pubkey::Pubkey, system_transaction, transaction};
use solana_sdk::{
commitment_config::CommitmentConfig, hash::Hash, pubkey::Pubkey, system_transaction,
transaction::Transaction,
};
use std::{
collections::HashSet,
fs::remove_dir_all,
@@ -17,8 +23,7 @@ use std::{
sync::mpsc::channel,
sync::{Arc, Mutex},
thread::sleep,
time::Duration,
time::SystemTime,
time::{Duration, Instant},
};
use tokio::runtime::Runtime;
@@ -204,35 +209,43 @@ fn test_rpc_subscriptions() {
// Create transaction signatures to subscribe to
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut signature_set: HashSet<String> = (0..1000)
.map(|_| {
let tx = system_transaction::transfer(&alice, &Pubkey::new_rand(), 1, genesis_hash);
transactions_socket
.send_to(&bincode::serialize(&tx).unwrap(), leader_data.tpu)
.unwrap();
tx.signatures[0].to_string()
})
let transactions: Vec<Transaction> = (0..100)
.map(|_| system_transaction::transfer(&alice, &Pubkey::new_rand(), 1, genesis_hash))
.collect();
let mut signature_set: HashSet<String> = transactions
.iter()
.map(|tx| tx.signatures[0].to_string())
.collect();
// Create the pub sub runtime
let mut rt = Runtime::new().unwrap();
let rpc_pubsub_url = format!("ws://{}/", leader_data.rpc_pubsub);
let (sender, receiver) = channel::<(String, transaction::Result<()>)>();
let sender = Arc::new(Mutex::new(sender));
let (status_sender, status_receiver) = channel::<(String, Response<RpcSignatureResult>)>();
let status_sender = Arc::new(Mutex::new(status_sender));
let (sent_sender, sent_receiver) = channel::<()>();
let sent_sender = Arc::new(Mutex::new(sent_sender));
// Subscribe to all signatures
rt.spawn({
let connect = ws::try_connect::<PubsubClient>(&rpc_pubsub_url).unwrap();
let signature_set = signature_set.clone();
connect
.and_then(move |client| {
for sig in signature_set {
let sender = sender.clone();
let status_sender = status_sender.clone();
let sent_sender = sent_sender.clone();
tokio::spawn(
client
.signature_subscribe(sig.clone(), None)
.and_then(move |sig_stream| {
sent_sender.lock().unwrap().send(()).unwrap();
sig_stream.for_each(move |result| {
sender.lock().unwrap().send((sig.clone(), result)).unwrap();
status_sender
.lock()
.unwrap()
.send((sig.clone(), result))
.unwrap();
future::ok(())
})
})
@@ -246,18 +259,49 @@ fn test_rpc_subscriptions() {
.map_err(|_| ())
});
// Wait for signature subscriptions
let deadline = Instant::now() + Duration::from_secs(2);
(0..transactions.len()).for_each(|_| {
sent_receiver
.recv_timeout(deadline.saturating_duration_since(Instant::now()))
.unwrap();
});
let rpc_client = RpcClient::new_socket(leader_data.rpc);
let mut transaction_count = rpc_client
.get_transaction_count_with_commitment(CommitmentConfig::recent())
.unwrap();
// Send all transactions to tpu socket for processing
transactions.iter().for_each(|tx| {
transactions_socket
.send_to(&bincode::serialize(&tx).unwrap(), leader_data.tpu)
.unwrap();
});
let now = Instant::now();
let expected_transaction_count = transaction_count + transactions.len() as u64;
while transaction_count < expected_transaction_count && now.elapsed() < Duration::from_secs(5) {
transaction_count = rpc_client
.get_transaction_count_with_commitment(CommitmentConfig::recent())
.unwrap();
sleep(Duration::from_millis(200));
}
// Wait for all signature subscriptions
let now = SystemTime::now();
let timeout = Duration::from_secs(5);
let deadline = Instant::now() + Duration::from_secs(5);
while !signature_set.is_empty() {
assert!(now.elapsed().unwrap() < timeout);
match receiver.recv_timeout(Duration::from_secs(1)) {
let timeout = deadline.saturating_duration_since(Instant::now());
match status_receiver.recv_timeout(timeout) {
Ok((sig, result)) => {
assert!(result.is_ok());
assert!(result.value.err.is_none());
assert!(signature_set.remove(&sig));
}
Err(_err) => {
eprintln!("unexpected receive timeout");
eprintln!(
"recv_timeout, {}/{} signatures remaining",
signature_set.len(),
transactions.len()
);
assert!(false)
}
}

View File

@@ -3,28 +3,35 @@
#[cfg(test)]
mod tests {
use log::*;
use solana_core::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_core::storage_stage::{test_cluster_info, SLOTS_PER_TURN_TEST};
use solana_core::storage_stage::{StorageStage, StorageState};
use solana_ledger::bank_forks::BankForks;
use solana_ledger::blockstore_processor;
use solana_ledger::entry;
use solana_ledger::{blockstore::Blockstore, create_new_tmp_ledger};
use solana_core::{
commitment::BlockCommitmentCache,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
storage_stage::{test_cluster_info, StorageStage, StorageState, SLOTS_PER_TURN_TEST},
};
use solana_ledger::{
bank_forks::BankForks, blockstore::Blockstore, blockstore_processor, create_new_tmp_ledger,
entry,
};
use solana_runtime::bank::Bank;
use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT;
use solana_sdk::hash::Hash;
use solana_sdk::message::Message;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, Signer};
use solana_sdk::transaction::Transaction;
use solana_storage_program::storage_instruction;
use solana_storage_program::storage_instruction::StorageAccountType;
use std::fs::remove_dir_all;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use std::thread::sleep;
use std::time::Duration;
use solana_sdk::{
clock::DEFAULT_TICKS_PER_SLOT,
hash::Hash,
message::Message,
pubkey::Pubkey,
signature::{Keypair, Signer},
transaction::Transaction,
};
use solana_storage_program::storage_instruction::{self, StorageAccountType};
use std::{
fs::remove_dir_all,
sync::{
atomic::{AtomicBool, Ordering},
mpsc::channel,
Arc, RwLock,
},
thread::sleep,
time::Duration,
};
#[test]
fn test_storage_stage_process_account_proofs() {
@@ -52,6 +59,7 @@ mod tests {
&[bank.clone()],
vec![0],
)));
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let cluster_info = test_cluster_info(&keypair.pubkey());
let (bank_sender, bank_receiver) = channel();
@@ -69,6 +77,7 @@ mod tests {
&exit.clone(),
&bank_forks,
&cluster_info,
block_commitment_cache,
);
bank_sender.send(vec![bank.clone()]).unwrap();
@@ -120,7 +129,7 @@ mod tests {
None,
)
.unwrap();
let message = Message::new_with_payer(vec![mining_proof_ix], Some(&mint_keypair.pubkey()));
let message = Message::new_with_payer(&[mining_proof_ix], Some(&mint_keypair.pubkey()));
let mining_proof_tx = Transaction::new(
&[&mint_keypair, archiver_keypair.as_ref()],
message,
@@ -171,6 +180,7 @@ mod tests {
&[bank.clone()],
vec![0],
)));
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let cluster_info = test_cluster_info(&keypair.pubkey());
let (bank_sender, bank_receiver) = channel();
@@ -188,6 +198,7 @@ mod tests {
&exit.clone(),
&bank_forks,
&cluster_info,
block_commitment_cache,
);
bank_sender.send(vec![bank.clone()]).unwrap();

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-crate-features"
version = "1.0.4"
version = "1.0.19"
description = "Solana Crate Features"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -26,3 +26,6 @@ syn_0_15 = { package = "syn", version = "0.15.42", features = ["extra-traits", "
syn_1_0 = { package = "syn", version = "1.0.3", features = ["extra-traits", "fold", "full"] }
tokio = { version = "0.1.22",features=["bytes", "codec", "default", "fs", "io", "mio", "num_cpus", "reactor", "rt-full", "sync", "tcp", "timer", "tokio-codec", "tokio-current-thread", "tokio-executor", "tokio-io", "tokio-io", "tokio-reactor", "tokio-tcp", "tokio-tcp", "tokio-threadpool", "tokio-timer", "tokio-udp", "tokio-uds", "udp", "uds"] }
winapi = { version = "0.3.8", features=["basetsd", "consoleapi", "errhandlingapi", "fileapi", "handleapi", "impl-debug", "impl-default", "knownfolders", "libloaderapi", "memoryapi", "minwinbase", "minwindef", "ntdef", "ntsecapi", "ntstatus", "objbase", "processenv", "processthreadsapi", "profileapi", "shlobj", "std", "synchapi", "sysinfoapi", "timezoneapi", "utilapiset", "winbase", "wincon", "windef", "winerror", "winnls", "winnt", "winreg", "winsock2", "winuser", "ws2def", "ws2ipdef", "ws2tcpip", "wtypesbase"] }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -3,7 +3,9 @@ set -e
cd "$(dirname "$0")"
usage=$(cargo -q run -p solana-cli -- -C ~/.foo --help | sed 's|'"$HOME"'|~|g')
: "${rust_stable:=}" # Pacify shellcheck
usage=$(cargo +"$rust_stable" -q run -p solana-cli -- -C ~/.foo --help | sed -e 's|'"$HOME"'|~|g' -e 's/[[:space:]]\+$//')
out=${1:-src/cli/usage.md}
@@ -29,6 +31,6 @@ in_subcommands=0
while read -r subcommand rest; do
[[ $subcommand == "SUBCOMMANDS:" ]] && in_subcommands=1 && continue
if ((in_subcommands)); then
section "$(cargo -q run -p solana-cli -- help "$subcommand" | sed 's|'"$HOME"'|~|g')" "####" >> "$out"
section "$(cargo +"$rust_stable" -q run -p solana-cli -- help "$subcommand" | sed 's|'"$HOME"'|~|g')" "####" >> "$out"
fi
done <<<"$usage">>"$out"

View File

@@ -8,6 +8,7 @@ TARGET=html/index.html
TEST_STAMP=src/tests.ok
all: $(TARGET)
./set-solana-release-tag.sh
svg: $(SVG_IMGS)

20
docs/set-solana-release-tag.sh Executable file
View File

@@ -0,0 +1,20 @@
#!/usr/bin/env bash
set -e
cd "$(dirname "$0")"
if [[ -n $CI_TAG ]]; then
LATEST_SOLANA_RELEASE_VERSION=$CI_TAG
else
LATEST_SOLANA_RELEASE_VERSION=$(\
curl -sSfL https://api.github.com/repos/solana-labs/solana/releases/latest \
| grep -m 1 tag_name \
| sed -ne 's/^ *"tag_name": "\([^"]*\)",$/\1/p' \
)
fi
set -x
find html/ -name \*.html -exec sed -i "s/LATEST_SOLANA_RELEASE_VERSION/$LATEST_SOLANA_RELEASE_VERSION/g" {} \;
if [[ -n $CI ]]; then
find src/ -name \*.md -exec sed -i "s/LATEST_SOLANA_RELEASE_VERSION/$LATEST_SOLANA_RELEASE_VERSION/g" {} \;
fi

Some files were not shown because too many files have changed in this diff Show More