Compare commits

...

176 Commits

Author SHA1 Message Date
mergify[bot]
65a1884c7b Enable stricter check on rent-exempt accounts on testnet (#12224) (#12226)
(cherry picked from commit 241e6f1ecf)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-09-15 08:07:23 +00:00
mergify[bot]
c1a9c826f3 Enable retirement of rent collect in Bank::deposit() on testnet (#12223) (#12227)
(cherry picked from commit 629572831b)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-09-15 07:53:36 +00:00
mergify[bot]
d9d8ec480a Enable eager-rent-collect-across-gapped-epochs bugfix (#12219) (#12222)
(cherry picked from commit 7d48339b7c)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-09-15 06:14:35 +00:00
mergify[bot]
b5c7ad3a9b Add new validator options for running in more restrictive environments (bp #12191) (#12218)
* Add --restricted-repair-only-mode flag

(cherry picked from commit 63a67f415e)

* Add --gossip-validator argument

(cherry picked from commit daae638781)

* Documenet how to reduce validator port exposure

(cherry picked from commit c8f03c7f6d)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-09-15 04:26:23 +00:00
mergify[bot]
771ff65fb4 Faucet: Improve error handling (#12215)
(cherry picked from commit af2262cbba)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-09-15 01:31:02 +00:00
mergify[bot]
a5bf59b92a patches default impl for crds filter (#12199) (#12200)
In CrdsFilter.mask all bits after mask_bits are set to 1:
https://github.com/solana-labs/solana/blob/555252f4/core/src/crds_gossip_pull.rs#L65
However the default implementation, sets both mask and mask_bits to zero
which is inconsistent with CrdsFilter::compute_mask for a mask_bits of
zero.

This commit changes the default implementation by setting mask to
`!0u64` (i.e all bits set to one). As a result, for the default crds
filter, `test_mask` will always return true, whereas previously it was
always returning false.
https://github.com/solana-labs/solana/blob/555252f4/core/src/crds_gossip_pull.rs#L85

This is only used in tests and benchmarks, but causes some benchmarks to
be misleading by short circuiting in this line:
https://github.com/solana-labs/solana/blob/555252f4/core/src/crds_gossip_pull.rs#L429

(cherry picked from commit d6ec03f13c)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-09-13 14:10:42 +00:00
mergify[bot]
8dc019ae98 Gate pointer alignment enforcement (bp #12176) (#12188)
* Gate pointer alignment enforcement (#12176)

(cherry picked from commit ae7b15f062)

# Conflicts:
#	programs/bpf/tests/programs.rs

* Fix conflicts

Co-authored-by: Jack May <jack@solana.com>
2020-09-12 00:06:39 +00:00
mergify[bot]
61dcab8c07 Update commitment options (#12173) (#12189)
(cherry picked from commit 3c69cd6d61)

Co-authored-by: carllin <wumu727@gmail.com>
2020-09-11 19:24:16 +00:00
mergify[bot]
640bf7015f Check bank capitalization (bp #11927) (#12184)
* Check bank capitalization (#11927)

* Check bank capitalization

* Simplify and unify capitalization calculation

* Improve and add tests

* Avoid overflow and inhibit automatic restart

* Fix test

* Tweak checked sum for cap. and add tests

* Fix broken build after merge conflicts..

* Rename to ClusterType

* Rename confusing method

* Clarify comment

* Verify cap. in rent and inflation tests

Co-authored-by: Stephen Akridge <sakridge@gmail.com>
(cherry picked from commit de4a613610)

# Conflicts:
#	Cargo.lock
#	accounts-bench/Cargo.toml

* Fix conflict 1/2

* Fix conflict 2/2

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-09-11 18:04:10 +00:00
mergify[bot]
db1f57162a Fix propagation on startup from snapshot (#12177) (#12182)
(cherry picked from commit 9c490e06b0)

Co-authored-by: carllin <wumu727@gmail.com>
2020-09-11 10:04:55 +00:00
mergify[bot]
bbddffa805 solana-validator --rpc-bind-address argument now works as expected (bp #12168) (#12174)
* `solana-validator --rpc-bind-address` argument now works as expected

(cherry picked from commit 6f325d4594)

* Update bootstrap-validator.sh

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-09-10 22:02:50 +00:00
mergify[bot]
61cf432477 Update commitment options (#12171) (#12172)
(cherry picked from commit 361e5322e4)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-09-10 13:55:48 -06:00
mergify[bot]
9396618c12 Calc size ahead of time to alloc once (#12154) (#12169)
(cherry picked from commit fd47d38e59)

Co-authored-by: Jack May <jack@solana.com>
2020-09-10 19:17:20 +00:00
mergify[bot]
398f12dcc5 Program subscriptions now properly check results len and token program id (#12139) (#12141)
(cherry picked from commit 4431080066)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-09-09 22:23:11 +00:00
mergify[bot]
796624adf9 uses rust intrinsics to convert hashes to u64 (#12097) (#12133)
(cherry picked from commit 28f2fa3fd5)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-09-09 16:33:15 +00:00
mergify[bot]
f6e266eff0 Activate new bpf loader on devnet (#12124)
(cherry picked from commit f54941fa15)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-09-09 05:48:18 +00:00
mergify[bot]
ed237af4d8 Prevent unbound memory growth by blockstore_processor (#12110) (#12122)
* Prevent unbound memory growth by blockstore_processor

* Promote log to info! considering infrequency

* Exclude the time of freeing from interval...

* Skip not-shrinkable slots even if forced

* Add comment

(cherry picked from commit c274e26eb8)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-09-09 04:09:27 +00:00
Michael Vines
415c80c204 Bump version to v1.3.10 2020-09-09 01:29:39 +00:00
mergify[bot]
e45f1df5dd Reduce cap by rent's leftover as temporary measure (#12111) (#12118)
* Reduce cap by rent's leftover as temporary measure

* Reset testnet cap. on start and more logs

(cherry picked from commit 5b2442d54e)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-09-08 20:02:46 +00:00
mergify[bot]
d26a809f1f getMinimumBalanceForRentExemption now only responds to valid account lengths (#12116)
(cherry picked from commit 9e96180ce4)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-09-08 18:50:22 +00:00
mergify[bot]
d1fdc96969 Add support for deprecated loader (bp #11946) (#12114)
* Add support for deprecated loader (#11946)


(cherry picked from commit ae0fd3043a)

* fix version

Co-authored-by: Jack May <jack@solana.com>
2020-09-08 18:41:29 +00:00
mergify[bot]
a5cad340ed Forward transactions to the expected leader instead of your own TPU port (bp #12004) (#12108)
* Forward transactions to the expected leader instead of your own TPU port (#12004)

* Use PoHRecorder to send to the right leader

* cleanup

* fmt

* clippy

* Cleanup, fix bug

Co-authored-by: Carl <carl@solana.com>
(cherry picked from commit c67f8bd821)

# Conflicts:
#	banks-server/Cargo.toml

* Update Cargo.toml

Co-authored-by: anatoly yakovenko <anatoly@solana.com>
Co-authored-by: Michael Vines <mvines@gmail.com>
2020-09-08 16:51:33 +00:00
Ryo Onodera
2341394e8b Rename to ClusterType and restore devnet compat. (manual bp) (#12069)
* Rename to ClusterType and restore devnet compat.

* De-duplicate parse code and add comments

* Adjust default Devnet genesis & reduce it in tests
2020-09-08 23:54:54 +09:00
mergify[bot]
180224114a Fix RPC transaction method configs serialization (#12100) (#12102)
(cherry picked from commit 9940870c89)

Co-authored-by: Justin Starry <justin@solana.com>
2020-09-08 06:12:26 +00:00
Justin Starry
58312655b4 Fix signature subscription panic (#12077) (#12092) 2020-09-07 10:31:25 +00:00
mergify[bot]
f3f86f43ee Compress snapshot archive within the validator to reduce system dependencies, and default to zstd compression (bp #12085) (#12087)
* Compress snapshot archive within the validator to reduce system dependencies

(cherry picked from commit d3750b47d2)

* Default snapshot compression to zstd instead of bzip2 for quicker snapshot generation

(cherry picked from commit 9ade73841f)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-09-07 06:55:38 +00:00
mergify[bot]
aac38516da cli: add block and first-available-block commands (bp #12083) (#12084)
* Add first-available-block command

(cherry picked from commit 6677996369)

* Add block command

(cherry picked from commit 27752c4e4d)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-09-06 20:53:44 +00:00
mergify[bot]
cd139b8185 Fix bad predicate with malformed gossip votes (#12072) (#12078)
(cherry picked from commit eabc63cdcd)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-09-06 05:35:00 +00:00
mergify[bot]
6a74bc1297 validator: Add --enable-bigtable-ledger-upload flag (bp #12040) (#12057)
* Add --enable-bigtable-ledger-upload flag

(cherry picked from commit d8e2038dda)

* Relocate BigTable uploader to ledger/ crate

(cherry picked from commit 91a56caed2)

# Conflicts:
#	ledger/Cargo.toml

* Cargo.lock

(cherry picked from commit 2b8a521562)

* Add BigTableUploadService

(cherry picked from commit bc7731b969)

* Add BigTableUploadService

(cherry picked from commit bafdcf24f5)

* Add exit flag for bigtable upload operations

(cherry picked from commit d3611f74c8)

* Remove dead code

(cherry picked from commit cd3c134b58)

* Request correct access

(cherry picked from commit 4ba43c29ce)

* Add LARGEST_CONFIRMED_ROOT_UPLOAD_DELAY

(cherry picked from commit b64fb295a1)

* Resolve merge conflicts

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-09-05 17:09:32 +00:00
mergify[bot]
e9a2dd0bc1 Add --show-transactions flag to transaction-history command (#12071)
(cherry picked from commit 2332dd774f)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-09-05 16:52:38 +00:00
mergify[bot]
feed960ef3 Bigtable bug fixes (#12058) (#12060)
* Accommodate stricted get_bincode_cell in get_confirmed_signatures_for_address

* Sort signatures newest-oldest, even within slot

(cherry picked from commit 879c98efeb)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-09-05 03:31:35 +00:00
mergify[bot]
ae8acada8c Add unlock epochs for blake3 (#12054) (#12056)
Co-authored-by: Carl <carl@solana.com>
(cherry picked from commit a13efc52b3)

Co-authored-by: carllin <wumu727@gmail.com>
2020-09-04 23:33:53 +00:00
mergify[bot]
357a341db5 Bump getMultipleAccounts input limit (#12050) (#12052)
(cherry picked from commit 954b017f85)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-09-04 18:58:45 +00:00
mergify[bot]
d0f715cb23 adds new CrdsFilterSet type for Vec<CrdsFilter> (#12029) (#12048)
(cherry picked from commit 114c211b66)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-09-04 14:10:12 +00:00
mergify[bot]
0fc0378347 fix solana-install syntax (#12043)
```
solana-install info
solana-install deploy
solana-install update
solana-install run
```

(cherry picked from commit 38f36a7a7a)

Co-authored-by: pk <4514654+pkrasam@users.noreply.github.com>
2020-09-04 05:40:47 +00:00
mergify[bot]
aa3bdd3730 Revert signature-notification format change (bp #12032) (#12038)
* Revert signature-notification format change (#12032)

* Use untagged RpcSignatureResult enum to avoid breaking downstream consumers of current signature subscriptions

* Clean up client duplication

* Clippy

(cherry picked from commit 39246f9dd7)

# Conflicts:
#	core/src/rpc_pubsub.rs

* Fix conflicts

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2020-09-04 01:34:49 +00:00
mergify[bot]
962aed0961 Mark a withdraw authority as non-circulating (#12033) (#12036)
(cherry picked from commit 2c091e4fca)

Co-authored-by: Greg Fitzgerald <greg@solana.com>
2020-09-04 00:46:10 +00:00
Josh
bd76810623 Bump version to 1.3.9 (#12034) 2020-09-03 16:55:24 -07:00
mergify[bot]
58fe45f3e5 Don't query modern Ledger wallet app version with deprecated payload size (#12031)
(cherry picked from commit dff8242887)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-09-03 21:55:55 +00:00
mergify[bot]
01a9360dbe builds crds filters without looping over filters (#11998) (#12028)
(cherry picked from commit bc7adb97ed)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-09-03 21:44:00 +00:00
mergify[bot]
f44d9d770a Rpc: add getMultipleAccounts endpoint (bp #12005) (#12024)
* Rpc: add getMultipleAccounts endpoint (#12005)

* Add rpc endpoint to return the state of multiple accounts from the same bank

* Add docs

* Review comments: Dedupe account code, default to base64, add max const

* Add get_multiple_accounts to rpc-client

(cherry picked from commit b22de369b7)

* Use new_response for consistency

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2020-09-03 19:52:46 +00:00
mergify[bot]
79d9d28e7d Update token amounts in parsed instructions to retain full precision (#12020) (#12022)
(cherry picked from commit b940da4040)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-09-03 19:22:24 +00:00
mergify[bot]
d273d1acb2 Fix forwarding calculation (#12014) (#12019)
Co-authored-by: Carl <carl@solana.com>
(cherry picked from commit 3f39ab1e04)

Co-authored-by: carllin <wumu727@gmail.com>
2020-09-03 10:36:56 +00:00
mergify[bot]
991f188f23 Fix test (#12013) (#12017)
(cherry picked from commit 36a294aae0)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-09-03 17:16:59 +09:00
mergify[bot]
8f02fdcc11 Purge storage rewards from accounts db for testnet (#11996) (#12011)
* Purge storage rewards from accounts db for testnet

* Fix test failing only on stable

(cherry picked from commit fb71ee60aa)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-09-03 08:01:37 +00:00
mergify[bot]
3e78850331 Move forward token2 native mint testnet epoch (#12007) (#12010)
(cherry picked from commit 4b1cb51a3e)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-09-03 07:48:55 +00:00
mergify[bot]
f29a741582 Clarify comments and names in inflation code (#11977) (#12008)
(cherry picked from commit 89bca6110a)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-09-03 06:47:13 +00:00
mergify[bot]
3e6052faca Bigtable method to return a single row of data (#11999) (#12002)
(cherry picked from commit b041afe1be)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-09-02 21:01:58 +00:00
mergify[bot]
0b0710d522 Use conventional special self notation (#11990) (#11997)
(cherry picked from commit 46aac4819a)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-09-02 11:59:16 +00:00
mergify[bot]
2f3fced8a8 Switch account hashing to blake3 (#11969) (#11992)
* Switch account hashing to blake3

Co-authored-by: Carl <carl@solana.com>
(cherry picked from commit af08221aec)

Co-authored-by: carllin <wumu727@gmail.com>
2020-09-02 08:40:07 +00:00
mergify[bot]
a5832366a7 Detect and notify when deserializable shreds are available (bp #11816) (#11988)
* Detect and notify when deserializable shreds are available (#11816)

* Add logic to check for complete data ranges

* Add RPC signature notification

Co-authored-by: Carl <carl@solana.com>
(cherry picked from commit 1c1a3f979d)

# Conflicts:
#	accounts-bench/Cargo.toml
#	core/src/rpc_pubsub.rs

* Fix conflicts

Co-authored-by: carllin <wumu727@gmail.com>
Co-authored-by: Carl <carl@solana.com>
2020-09-02 06:38:10 +00:00
mergify[bot]
f26ff6d6b2 Docs.rs version replacement (#11981) (#11982)
(cherry picked from commit b720921c83)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-09-01 13:52:57 -06:00
mergify[bot]
57f149e415 Ensure that the spl-token 2 native mint account is owned by the spl-token 2 program. (#11974)
Workaround for https://github.com/solana-labs/solana-program-library/issues/374 until spl-token 3 is shipped

(cherry picked from commit 7341e60043)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-09-01 18:16:22 +00:00
mergify[bot]
8ece3847f9 Add tests for the Debug and activation Vecs (#11926) (#11968)
* Add tests for the Debug and activation Vecs

* Rename a bit

(cherry picked from commit 11ac4eb21d)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-09-01 09:54:27 +00:00
mergify[bot]
0d23ad00b1 Update to rayon 1.4.0 (#11898) (#11902)
(cherry picked from commit c4253dc0f9)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-09-01 04:53:39 +00:00
mergify[bot]
d9684f99c3 Remove log (#11949) (#11961)
Co-authored-by: Carl <carl@solana.com>
(cherry picked from commit 7641b60a2b)

Co-authored-by: carllin <wumu727@gmail.com>
2020-09-01 01:05:20 +00:00
mergify[bot]
6ea9c249d7 Add missing backslash to solana-validator command (#11958)
(cherry picked from commit a19f696a42)

Co-authored-by: Richard Ayotte <rich.ayotte@gmail.com>
2020-08-31 23:43:22 +00:00
mergify[bot]
1e02069f86 Increase message_processor logging to error level (#11945) (#11948)
(cherry picked from commit 9b9d559312)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-08-31 21:26:35 +00:00
mergify[bot]
b179ed0b90 Remove encrypted secrets not required by the main public CI (bp #11942) (#11944)
* Remove unused GEOLOCATION_API_KEY

(cherry picked from commit f78594dfc1)

* Remove secrets not required by the main public CI

(cherry picked from commit 278f2fe078)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-31 18:59:52 +00:00
mergify[bot]
eb65ff750e Simplify get_programs(), specify a real Preview activation epoch for new BPFLoader (#11930)
(cherry picked from commit f385af25e5)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-31 17:21:07 +00:00
Michael Vines
663dc9959f Bump version to v1.3.8 2020-08-31 10:18:42 -07:00
mergify[bot]
53ed6a2298 Fix use-deprecated-loader arg (#11921) (#11929)
(cherry picked from commit 6234909373)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-31 09:53:34 +00:00
mergify[bot]
47962f3e80 Use DNS for devnet/testnet entrypoints (#11922)
(cherry picked from commit e4d7e1fe3f)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-31 07:53:02 +00:00
Michael Vines
d3a16e4e13 Bump RPC banks up 1 port to avoid web3.js wss port conflict
(cherry picked from commit f8bb93a0f4)
2020-08-30 23:56:19 -07:00
Michael Vines
250f5a8196 Add new trusted validator for testnet 2020-08-30 22:51:42 -07:00
Michael Vines
83a17acc17 Add methods to adjust rent burn percentage, and hashes per tick 2020-08-30 16:50:43 -07:00
Michael Vines
ed34b930e5 modify-genesis now writes elsewhere and produces a full genesis.tar.bz2 2020-08-30 16:47:27 -07:00
Michael Vines
95816f7db9 Add ability to fork a local cluster from the latest mainnet-beta snapshot 2020-08-30 16:47:18 -07:00
mergify[bot]
e652c27142 Add bank-hash subcommand (#11915)
(cherry picked from commit a07980536a)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-30 19:05:04 +00:00
mergify[bot]
0811cb2966 Fix get_parsed_token_accounts (#11907) (#11909)
(cherry picked from commit 60c7ac6f95)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-29 19:53:05 +00:00
Michael Vines
311b419f8a Bump version to v1.3.7 2020-08-29 11:42:14 -07:00
Michael Vines
acb992c3be Update to spl-token 2 2020-08-29 09:23:20 -07:00
Tyera Eulberg
61aca235a3 Bump spl-token version 2020-08-29 03:24:36 -06:00
mergify[bot]
bad0709ff1 Update to token pack/unpack changes (bp #11900) (#11903)
* Update to token pack/unpack changes (#11900)

(cherry picked from commit 2eff9a19c3)

# Conflicts:
#	account-decoder/Cargo.toml
#	core/Cargo.toml

* Fix conflicts

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2020-08-29 05:46:07 +00:00
mergify[bot]
2a649e990d Update spl-token to v2.0 (bp #11884) (#11897)
* Update spl-token to v2.0 (#11884)

* Update account-decoder to spl-token v2.0

* Update transaction-status to spl-token v2.0

* Update rpc to spl-token v2.0

* Update getTokenSupply to pull from Mint directly

* Fixup to spl-token v2.0.1

(cherry picked from commit 76be36c9ce)

# Conflicts:
#	Cargo.lock
#	account-decoder/Cargo.toml
#	core/Cargo.toml

* Fix conflicts

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2020-08-28 23:36:53 +00:00
mergify[bot]
1a25889f72 Take v0.19.3 of perf libs which improves sigverify perf 2x (#11894) (#11895)
(cherry picked from commit 9393dce1ff)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-08-28 20:37:26 +00:00
Trent Nelson
e15dca6961 Update devnet cluster docs since reboot 2020-08-28 10:36:11 -07:00
mergify[bot]
bb12f48014 Add SolFlare as stake-supporting wallet (#11891) (#11892)
Co-authored-by: publish-docs.sh <maintainers@solana.com>
(cherry picked from commit 8ba3a33129)

Co-authored-by: Dan Albert <dan@solana.com>
2020-08-28 16:17:24 +00:00
Jon Cinque
c6416fca6e Bump version to 1.3.6 (#11890) 2020-08-28 16:58:59 +02:00
Jack May
2f5d60bef7 Update epoch gating (#11880) 2020-08-27 21:17:45 -07:00
mergify[bot]
ef1a9df507 Small cleaning around consensus/bank_forks (#11873) (#11881)
(cherry picked from commit d8c529a9b8)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-08-28 01:06:21 +00:00
mergify[bot]
d2611f54a0 Make ledger-tool accounts print rent_epoch and slot (#11845) (#11870)
(cherry picked from commit 57174cdabe)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-08-27 04:49:24 +00:00
mergify[bot]
6117f8d64e Add SolFlare guide to docs (#11843) (#11868)
(cherry picked from commit 36e8441149)

Co-authored-by: Dan Albert <dan@solana.com>
2020-08-26 23:13:43 +00:00
mergify[bot]
67d9faaefc Bump compute budget (#11864) (#11867)
* Bump compute budget

* nudge

(cherry picked from commit ea179ad762)

Co-authored-by: Jack May <jack@solana.com>
2020-08-26 23:12:01 +00:00
mergify[bot]
aaa551ca7c Merge pull request #11857 from mvines/cache (#11863)
ci: cargo-target-cache is now channel specific
(cherry picked from commit 5c7080c1f4)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-26 19:50:06 +00:00
mergify[bot]
85df8cb4c5 Accounts hash calculation metrics (#11433) (#11860)
(cherry picked from commit 770d3d383c)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-08-26 19:31:52 +00:00
mergify[bot]
65f189d932 Rpc: Filter accounts with invalid mints from get_parsed_token_accounts (#11844) (#11859)
* Filter out accounts with invalid mints from get_parsed_token_accounts

* Explicit docs

(cherry picked from commit 1988ee9cd6)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-26 19:02:48 +00:00
mergify[bot]
99001f7f2e Bump MacOS nofile recommendation message (#11835)
(cherry picked from commit 8841c3398c)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-08-25 19:57:08 +00:00
mergify[bot]
39eeb0142e Add SystemInstruction::CreateAccount support to CPI (bp #11649) (#11831)
* Add SystemInstruction::CreateAccount support to CPI (#11649)

(cherry picked from commit e9b610b8df)

# Conflicts:
#	programs/bpf_loader/src/syscalls.rs
#	runtime/src/bank.rs
#	sdk/src/instruction.rs

* resolve conflicts

Co-authored-by: Jack May <jack@solana.com>
2020-08-25 19:52:27 +00:00
mergify[bot]
d5d1a344c3 Switch programs activation to whole-set based gating (bp #11750) (#11837)
* Switch programs activation to whole-set based gating (#11750)

* Implement Debug for MessageProcessor

* Switch from delta-based gating to whole-set gating

* Remove dbg!

* Fix clippy

* Clippy

* Add test

* add loader to stable operating mode at proper epoch

* refresh_programs_and_inflation after ancestor setup

* Callback via snapshot; avoid account re-add; Debug

* Fix test

* Fix test and fix the past history

* Make callback management stricter and cleaner

* Fix test

* Test overwrite and frozen for native programs

* Test epoch callback with genesis-programs

* Add assertions for parent bank

* Add tests and some minor cleaning

* Remove unsteady assertion...

* Fix test...

* Fix DOS

* Skip ensuring account by dual (whole/delta) gating

* Fix frozen abi implementation...

* Move compute budget constatnt init back into bank

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
(cherry picked from commit db4bbb3569)

# Conflicts:
#	genesis-programs/src/lib.rs

* Fix conflicts

Co-authored-by: Jack May <jack@solana.com>
Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-08-25 19:00:41 +00:00
mergify[bot]
62e3c084d3 Update system tuning and docs (bp #11680) (#11830)
* Sync FD limit and max maps to 500k

(cherry picked from commit 11951eb009)

* Expand system tuning docs

(cherry picked from commit 5354df8c1c)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-08-25 17:13:27 +00:00
mergify[bot]
3027ceb53a Document how to validate account pubkey (#11821) (#11833)
(cherry picked from commit 2c5366f259)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-08-25 17:11:09 +00:00
mergify[bot]
4e604e1211 Add (hidden) --use-deprecated-loader flag to solana deploy (#11828)
(cherry picked from commit de736e00ad)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-25 16:56:29 +00:00
mergify[bot]
6749bfd1d2 Gate aligned program heap (bp #11808) (#11814)
* Gate aligned program heap (#11808)


(cherry picked from commit c2e5dae7ba)

* resolve conflicts

Co-authored-by: Jack May <jack@solana.com>
2020-08-25 15:57:15 +00:00
mergify[bot]
d05b39c4f6 Specify loader when bootstrapping bpf programs (#11571) (#11822)
(cherry picked from commit 0a94e7e7fa)

Co-authored-by: Jack May <jack@solana.com>
2020-08-25 07:56:11 -07:00
mergify[bot]
c8e1fbd568 Update comment (#11826) (#11827)
(cherry picked from commit dbd079f54c)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-08-25 14:48:00 +00:00
mergify[bot]
ad36ddedf1 CPI support for bpf_loader_deprecated (bp #11695) (#11824)
* CPI support for bpf_loader_deprecated (#11695)

(cherry picked from commit 46830124f8)

# Conflicts:
#	programs/bpf_loader/src/syscalls.rs

* resolve conflicts

Co-authored-by: Jack May <jack@solana.com>
2020-08-25 11:02:06 +00:00
mergify[bot]
08bece7651 More efficient padding (#11656) (#11823)
(cherry picked from commit f1ba2387d3)

Co-authored-by: Jack May <jack@solana.com>
2020-08-25 10:10:59 +00:00
mergify[bot]
f162c6d1d0 Align host addresses (bp #11384) (#11817)
* Align host addresses (#11384)

* Align host addresses

* support new program abi

* update epoch rollout

* Enforce aligned pointers in cross-program invocations

(cherry picked from commit 9290e561e1)

# Conflicts:
#	core/src/validator.rs
#	genesis-programs/src/lib.rs
#	programs/bpf_loader/src/deprecated.rs
#	programs/bpf_loader/src/lib.rs
#	sdk/src/entrypoint_native.rs
#	sdk/src/lib.rs

* resolve conflicts

* nudge

Co-authored-by: Jack May <jack@solana.com>
2020-08-25 07:23:20 +00:00
mergify[bot]
f3904b5765 sdk: Make PubKey::create_program_address available in program unit tests (bp #11745) (#11810)
* sdk: Make PubKey::create_program_address available in program unit tests (#11745)

* sdk: Make PubKey::create_program_address available in program unit tests

This finishes the work started in #11604 to have
`create_program_address` available when `target_arch` is not `bpf` and
`program` is enabled.  Otherwise, there is an undefined reference error
to `sol_create_program_address`, which is only defined in `bpf`.

A small test to simply call the function has been added in order to catch
the problem in the future.

The default dependency to `solana-sdk/default` doesn't cause a problem with
existing programs since `build.sh` always specifies
`--no-default-features`, and programs in `solana-program-library` all
use it too.

* Add `default-features = false` for inter-program dependencies

Fix the build error found during CI.  The `--no-default-features` flag
only applies to the top-level package, and not to dependencies.  A program that
depends on another program, i.e. `128bit` which depends on `128bit_dep`,
must specify `default-features = false` when including that package,
otherwise the `bpf` build will try to pull in default packages, which
includes `std`.

(cherry picked from commit 9a366281d3)

# Conflicts:
#	programs/bpf/rust/128bit/Cargo.toml
#	programs/bpf/rust/invoke/Cargo.toml
#	programs/bpf/rust/many_args/Cargo.toml
#	programs/bpf/rust/param_passing/Cargo.toml

* resolve conflicts

Co-authored-by: Jon Cinque <jon.cinque@gmail.com>
Co-authored-by: Jack May <jack@solana.com>
2020-08-24 20:41:54 +00:00
mergify[bot]
67bf7515a7 Aligned program heap (#11657) (#11812)
(cherry picked from commit f8606fca4f)

Co-authored-by: Jack May <jack@solana.com>
2020-08-24 20:12:58 +00:00
mergify[bot]
0dcbc6d4d1 The constraints on compute power a program can consume is limited only to its instruction count (bp #11717) (#11800)
* The constraints on compute power a program can consume is limited only to its instruction count (#11717)

(cherry picked from commit 8d362f682b)

# Conflicts:
#	programs/bpf/Cargo.toml
#	programs/bpf_loader/Cargo.toml
#	programs/bpf_loader/src/lib.rs
#	programs/bpf_loader/src/syscalls.rs
#	runtime/src/bank.rs
#	sdk/src/instruction.rs

* Resolve conflicts

* nudge

Co-authored-by: Jack May <jack@solana.com>
2020-08-24 17:27:40 +00:00
mergify[bot]
79a2ccabb4 Return an error from create_program_address syscall (bp #11658) (#11789)
* Return an error from create_program_address syscall (#11658)

(cherry picked from commit 750e5344f1)

# Conflicts:
#	programs/bpf_loader/src/syscalls.rs

* resolve conflicts

* resolve conflicts

Co-authored-by: Jack May <jack@solana.com>
2020-08-24 04:30:29 +00:00
mergify[bot]
f5e583ef0d solana-gossip spy can now be given an identity keypair (--identity argument) (#11797)
(cherry picked from commit a1e2357d12)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-23 01:14:42 +00:00
mergify[bot]
132550cd7a RPC: Allow the sendTransaction preflight commitment level to be configured (bp #11792) (#11794)
* Allow the sendTransaction preflight commitment level to be configured

(cherry picked from commit b660704faa)

* rebase

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-22 16:41:25 +00:00
Michael Vines
08a789323f Fix typo 2020-08-22 09:22:41 -07:00
mergify[bot]
2d9781b101 Add option for repairing only from trusted validators (#11752) (#11773)
Co-authored-by: Carl <carl@solana.com>
(cherry picked from commit c8d67aa8eb)

Co-authored-by: carllin <wumu727@gmail.com>
2020-08-21 20:40:37 -07:00
mergify[bot]
6540d3c63e Make BPF Loader static (bp #11516) (#11790)
* Make BPF Loader static (#11516)

(cherry picked from commit 7c736f71fe)

# Conflicts:
#	Cargo.lock
#	core/Cargo.toml
#	core/src/lib.rs
#	core/src/validator.rs
#	genesis-programs/src/lib.rs
#	programs/bpf_loader/src/deprecated.rs
#	programs/bpf_loader/src/lib.rs
#	sdk/src/entrypoint_native.rs
#	sdk/src/lib.rs

* resolve conflicts

Co-authored-by: Jack May <jack@solana.com>
2020-08-22 01:54:50 +00:00
mergify[bot]
a227b813d8 Feature check CPI up front (#11652) (#11787)
(cherry picked from commit 4196686acf)

Co-authored-by: Jack May <jack@solana.com>
2020-08-22 01:27:33 +00:00
mergify[bot]
2b4e0abb43 fix region checks (#11651) (#11785)
(cherry picked from commit 768b386f0a)

Co-authored-by: Jack May <jack@solana.com>
2020-08-22 01:14:13 +00:00
mergify[bot]
cdf6ff7907 Fix filter_crds_values output alignment with the inputs (#11734) (#11781)
(cherry picked from commit 418b483af6)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-08-21 20:56:26 +00:00
mergify[bot]
6775e01747 Add StakeInstruction::AuthorizeWithSeed (#11700) (#11779)
* Add StakeInstruction::AuthorizeWithSeed

* chore: add authorize-with-seed to web.js

* fix: add address_owner

* Add SystemInstruction::TransferWithSeed

* Update ABI hash

* chore: better variable names

* Add AuthorizeWithSeedArgs

* Reorder and rename arguments for clarity

(cherry picked from commit f02a78d8ff)

Co-authored-by: Greg Fitzgerald <greg@solana.com>
2020-08-21 19:39:36 +00:00
mergify[bot]
4be9d5030d Squash supermajority root on blockstore replay at startup (#11727) (#11765)
(cherry picked from commit f7adb68599)

Co-authored-by: carllin <wumu727@gmail.com>
2020-08-21 08:20:09 +00:00
carllin
a9f914da8e Cleanup test utilities (#11766)
* Add voting utility

* Add blockstore utility

Co-authored-by: Carl <carl@solana.com>
2020-08-21 06:52:01 +00:00
mergify[bot]
096375584b Rpc: Return error if block does not exist (#11743) (#11749)
* Return error if block does not exist

* Update docs

(cherry picked from commit 747f8d5877)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-20 23:26:55 +00:00
mergify[bot]
d3ab1ec9cf Do not delete any ledger when --limit-ledger-size is not provided (#11741)
(cherry picked from commit ea88bbdc33)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-20 19:27:48 +00:00
mergify[bot]
e41d9c87c5 Bump spl-token to clean up magic number (bp #11726) (#11738)
* Bump spl-token to clean up magic number (#11726)

(cherry picked from commit 2fd2aceeb2)

# Conflicts:
#	account-decoder/Cargo.toml
#	core/Cargo.toml

* Fix conflicts and toml order

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2020-08-20 18:02:28 +00:00
Greg Fitzgerald
e938925b95 Add BanksClient (#11721)
Cherry-picked from #10728, but without the changes to solana-tokens
2020-08-19 22:24:24 -06:00
mergify[bot]
6c03e6c4b5 Allow votes to timestamp subsequent slots with the same timestamp (#11715) (#11720)
(cherry picked from commit b1bc901a66)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-20 00:40:04 +00:00
Trent Nelson
abce60efdf Bump version to 1.3.5 2020-08-19 20:19:34 +00:00
mergify[bot]
b36510a565 Skip grace blocks if previous leader was on different fork (#11679) (#11709)
* Start leader blocks if previous leader was on different fork

* Fix test

Co-authored-by: Carl <carl@solana.com>
(cherry picked from commit 5f8d34feb3)

Co-authored-by: carllin <wumu727@gmail.com>
2020-08-19 09:29:08 +00:00
mergify[bot]
a8220ae653 The end_slot argument to purge is now optional (#11707)
(cherry picked from commit d1500ae229)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-19 04:41:54 +00:00
mergify[bot]
2c642d4639 Filter push/pulls from spies (#11620) (#11703)
* Filter push/pulls from spies

* Don't pull from peers with shred version == 0, don't push to people with shred_version == 0

Co-authored-by: Carl <carl@solana.com>
(cherry picked from commit 0f0a2ddafe)

Co-authored-by: carllin <wumu727@gmail.com>
2020-08-19 03:20:43 +00:00
mergify[bot]
04eb35e679 Remove old signatureSubscribe info (#11704) (#11706)
(cherry picked from commit 35828e8fe7)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-19 02:44:29 +00:00
mergify[bot]
8738241567 Get index (#11694) (#11697)
(cherry picked from commit 55ce2ebd53)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-18 18:54:17 +00:00
Justin Starry
0ef9185c9e Fully enable cross program support in mainnet-beta
(cherry picked from commit 9e89a963d9)
2020-08-18 06:39:24 -07:00
mergify[bot]
e41004f185 rpc: rework binary encoding. BREAKING CHANGE (bp #11646) (#11674)
* Add base64 (binary64) encoding for getConfirmedTransaction/getConfirmedBlock

(cherry picked from commit b5f3ced860)

* decode-transaction now supports binary64

(cherry picked from commit 2ebc68a9e2)

# Conflicts:
#	cli/src/cli.rs

* Rework UiAccountData encode/decode such that it works from Rust

(cherry picked from commit 757e147b3b)

# Conflicts:
#	account-decoder/src/lib.rs
#	cli/src/cli.rs

* Rename Binary64 to Base64.  Establish Base58 encoding

(cherry picked from commit adc984a225)

# Conflicts:
#	account-decoder/src/lib.rs

* Remove "binary" encoding. Document "encoding" as required

(cherry picked from commit e5281157fa)

* resolve conflicts

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-18 02:53:10 +00:00
mergify[bot]
41cad9ccd5 Faucet: Add per-request cap (#11665) (#11669)
* Add per-request cap; also use clap-utils

* Clean up arg names and take cap inputs as SOL

(cherry picked from commit 71d5409b3b)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-18 01:00:11 +00:00
mergify[bot]
d9ae092637 Re-do rent collection check on rent-exempt account (bp #11349) (#11655)
* Re-do rent collection check on rent-exempt account (#11349)

* wip: re-do rent collection check on rent-exempt account

* Let's see how the ci goes

* Restore previous code

* Well, almost all new changes are revertable

* Update doc

* Add test and gating

* Fix tests

* Fix tests, especially avoid to change abi...

* Fix more tests...

* Fix snapshot restore

* Align to _new_ with better uninitialized detection

(cherry picked from commit 23fa84b322)

# Conflicts:
#	core/src/rpc_subscriptions.rs

* Fix conflicts

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-08-17 07:20:24 +00:00
mergify[bot]
9015e47cc5 Rpc: Add until parameter for getConfirmedSignaturesForAddress2 (#11644) (#11648)
* Refactor bigtable apis to accept start and end keys

* Make helper fn to deserialize cell data

* Refactor get_confirmed_signatures_for_address to use get_row_data range

* Add until param to get_confirmed_signatures_for_address

* Add until param to blockstore api

* Plumb until through client/cli

* Simplify client params

(cherry picked from commit 6c5b8f324a)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-15 17:59:35 +00:00
mergify[bot]
15b92e9c8d Bigtable: Use index to filter address-signatures correctly (#11622) (#11643)
* Use index to filter address-signatures correctly

* Pull additional keys to account for filtered records

* Clarify variable name

(cherry picked from commit 820af533a4)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-14 21:02:13 +00:00
Trent Nelson
c72bd900cd Bump version to 1.3.4 2020-08-14 18:29:46 +00:00
mergify[bot]
ecb75ccdcf short_vec::decode_len() returns wrong size for aliased values (bp #11624) (#11631)
* Add failing test for decoding ShortU16 alias values

(cherry picked from commit 338f66f9aa)

* Factor out ShortU16 deser vistor logic to helper

(cherry picked from commit 6222fbcc66)

* Reimplement decode_len() with ShortU16 vistor helper

(cherry picked from commit 30dbe257cf)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-08-14 15:33:00 +00:00
mergify[bot]
cf22e7a273 Restore CLI usage page (#11619) (#11621)
Co-authored-by: publish-docs.sh <maintainers@solana.com>
(cherry picked from commit 6194a29875)

Co-authored-by: Dan Albert <dan@solana.com>
2020-08-13 22:17:47 +00:00
mergify[bot]
4f7bfbdbe9 RPC: getConfirmedSignaturesForAddress2 only returns confirmed signatures (#11615) (#11618)
* Add failing test case

* Limit to only rooted slots

(cherry picked from commit 99fb36fe45)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-13 18:19:54 +00:00
mergify[bot]
011e325359 Ensure highest_confirmed_root only grows (#11596) (#11608)
* Split out commitment-cache update for unit testing

* Add failing test

* Ensure highest_confirmed_root only grows

(cherry picked from commit 4da1e9833c)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-13 08:13:00 +00:00
mergify[bot]
ba05852475 generate_pull_response optimization (#11597) (#11606)
(cherry picked from commit f519fdecc2)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-08-13 06:53:45 +00:00
mergify[bot]
8ee656edde Return blockstore signatures-for-address despite bigtable error (#11594) (#11599)
(cherry picked from commit b1e452f876)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-13 01:12:16 +00:00
mergify[bot]
311d9a56c4 Add incoming pull response counter (#11591) (#11592)
(cherry picked from commit 54137e3446)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-08-12 22:17:50 +00:00
mergify[bot]
b9f46fd904 Fix assertion failure (#11572) (#11590)
Co-authored-by: Carl <carl@solana.com>
(cherry picked from commit 473b5249e3)

Co-authored-by: carllin <wumu727@gmail.com>
2020-08-12 20:19:54 +00:00
mergify[bot]
39f2d346b8 Fix typo: epoch => slot... (#11573) (#11580)
(cherry picked from commit 51e818ad64)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-08-12 09:27:01 +00:00
Trent Nelson
4a27bfa2fe Bump version to 1.3.3 2020-08-12 00:43:50 +00:00
mergify[bot]
69e53ec92a Gossip log (#11555) (#11562)
Co-authored-by: Carl <carl@solana.com>
(cherry picked from commit 1b238dd63e)

Co-authored-by: carllin <wumu727@gmail.com>
2020-08-11 22:55:18 +00:00
mergify[bot]
b8ac76066c Move cluster slots update to separate thread (#11523) (#11558)
* Add cluster_slots_service

Co-authored-by: Carl <carl@solana.com>
(cherry picked from commit 7ef50a9352)

Co-authored-by: carllin <wumu727@gmail.com>
2020-08-11 21:24:34 +00:00
mergify[bot]
1a1d7744bd Add getTokenLargestAccounts to docs (#11560) (#11564)
(cherry picked from commit 697a0e2947)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-11 21:21:35 +00:00
mergify[bot]
61af485732 filter out old gossip pull requests (#11448) (#11553)
* init

* builds

* stats

* revert

* tests

* clippy

* add some jitter

* shorter jitter timer

* update

* fixup! update

* use saturating_sub

* fix filters

(cherry picked from commit 713851b68d)

Co-authored-by: anatoly yakovenko <anatoly@solana.com>
2020-08-11 19:42:32 +00:00
mergify[bot]
6a60d7bf8e Adapt RpcClient to recent token method changes (#11519) (#11548)
* Avoid skip_serializing_if since that breaks deserialization

* Adapt RpcClient to recent token method changes

(cherry picked from commit 17645ee20c)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-11 17:31:57 +00:00
mergify[bot]
53e917b54f Fix bad rent in Bank::deposit as if since epoch 0 (#10468) (#11539)
* Fix bad rent in Bank::deposit as if since epoch 0

* Remove redundant predicate

* Rename

* Start to add tests with some cleanup

* Forgot to add refactor code...

* Enchance test

* Really fix rent timing in deposit with robust test

* Simplify new behavior by disabling rent altogether

(cherry picked from commit 6c242f3fec)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-08-11 16:20:06 +00:00
mergify[bot]
4d49c99ac9 Fix simulateTransaction JSON-RPC docs (#11533) (#11535)
(cherry picked from commit f12fc66a69)

Co-authored-by: Justin Starry <justin@solana.com>
2020-08-11 10:26:20 +00:00
mergify[bot]
83597a5ce1 Fix solana CLI deploy (#11520) (#11530)
* Refresh blockhash for program writes and finalize transactions

* Refactor to use current api, eliminating an rpc call

* Review comment

(cherry picked from commit c0d6761f63)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-11 09:52:11 +00:00
mergify[bot]
1589a41178 Add config param to specify offset/length for single and program account info (bp #11515) (#11518)
* Add config param to specify offset/length for single and program account info (#11515)

* Add config param to specify dataSlice for account info and program accounts

* Use match instead of if

(cherry picked from commit 88ca04dbdb)

# Conflicts:
#	cli/src/cli.rs

* Fix conflicts

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2020-08-11 00:03:18 +00:00
mergify[bot]
23a381b995 Fix parsing of spl-token Mint (#11512) (#11514)
* Add failing test

* Fix jsonParsed mint

(cherry picked from commit da210ddd51)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-10 21:51:20 +00:00
mergify[bot]
eb7ac42b2e Return account data size with parsed accounts (#11506) (#11511)
(cherry picked from commit 1925b0bd0b)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-10 20:09:13 +00:00
mergify[bot]
88cf5e79f5 Unified signature for create_program_address (#11460) (#11509)
(cherry picked from commit 140b2392f6)

Co-authored-by: Jack May <jack@solana.com>
2020-08-10 18:31:51 +00:00
mergify[bot]
520453e1f3 Blockstore address signatures: handle slots that cross primary indexes, and refactor get_confirmed_signatures_for_address2 (#11497) (#11508)
* Freeze address-signature index in the middle of slot to show failure case

* Secondary filter on signature

* Use AddressSignatures iterator instead of manually decrementing slots

* Remove unused method

* Add metrics

* Add transaction-status-index doccumentation

(cherry picked from commit de5fb3ba0e)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-10 17:43:00 +00:00
mergify[bot]
27815555a1 Fallback to base64 account encoding if json parse fails (#11483) (#11488)
* Fallback to base64 account encoding if json parse fails

* Remove default binary conversion

(cherry picked from commit ebc45bd73f)

Co-authored-by: Justin Starry <justin@solana.com>
2020-08-09 16:15:09 +00:00
mergify[bot]
3e483314b6 Decode native-program and sysvar accounts (bp #11463) (#11485)
* Decode native-program and sysvar accounts (#11463)

* Pass pubkey in to account-decoder for sysvars

* Decode sysvar accounts

* Decode config accounts; move validator-info lower

* Decode stake accounts

* Review comments

* Stringify any account lamports and epochs that can be set to u64::MAX

(cherry picked from commit a9f76862fb)

# Conflicts:
#	Cargo.lock
#	account-decoder/Cargo.toml

* Fix conflicts

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2020-08-09 09:18:17 +00:00
mergify[bot]
8a67504578 Add Binary64 option for account data (#11474) (#11481)
* Add Binary64 option for account data

* Decode into binary64

* Reword docs

(cherry picked from commit 068d23f298)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-08-09 06:50:24 +00:00
mergify[bot]
a1b238280b Return delegated amount as UiTokenAmount (#11475) (#11477)
* Return delegated amount as UiTokenAmount

* Omit delegate and delegatedAmount when none

(cherry picked from commit 88d8d3d02a)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-09 00:01:01 +00:00
Trent Nelson
f9e07f575e Bump version to v1.3.2 2020-08-08 06:26:20 +00:00
Trent Nelson
c8bad57455 Bump version to v1.3.1 2020-08-07 19:21:41 -06:00
mergify[bot]
71654c0457 Fix cbindgen compatibility (#11455) (#11459)
(cherry picked from commit 5a7e99f283)

Co-authored-by: Jack May <jack@solana.com>
2020-08-07 23:44:49 +00:00
mergify[bot]
f9d6fb48a4 Send votes from banking stage to vote listener (#11434) (#11454)
*  Send votes from banking stage to vote listener

Co-authored-by: Carl <carl@solana.com>
(cherry picked from commit 7e25130529)

Co-authored-by: carllin <wumu727@gmail.com>
2020-08-07 19:45:40 +00:00
mergify[bot]
fa9aa0a1d7 Token Accounts: return ui_amount, decimals with decoded account (#11407) (#11453)
* Return ui_amount, decimals from token client methods

* Return ui_amount, decimals in RPC jsonParsed token accounts

* Fixup docs

* Return ui_amount, decimals in pubsub jsonParsed token accounts

* Remove unnecessary duplicate struct

* StringAmount rename

(cherry picked from commit b7c2681903)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-07 19:05:35 +00:00
mergify[bot]
9758ebfc99 Only run web3.js/explorer CI when targeting master branch (#11440)
(cherry picked from commit a3165c6a61)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-08-07 06:24:42 +00:00
mergify[bot]
8be23a2bb2 Add address-based lower bound to get_confirmed_signatures_for_address2 loop (#11426) (#11432)
(cherry picked from commit 5530ee4c95)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-07 00:06:44 +00:00
mergify[bot]
4ff9a6910d Fix blockstore empty panic (#11423) (#11430)
* Add panicking test

* Add failing test: fresh transaction-status column shouldn't point at valid root 0

* Prevent transaction status match outside of primary-index bounds

* Initialize transaction-status and address-signature primer entries with Slot::MAX

* Revert "Add failing test: fresh transaction-status column shouldn't point at valid root 0"

This reverts commit cbad2a9fae.

* Revert "Initialize transaction-status and address-signature primer entries with Slot::MAX"

This reverts commit ffaeac0669.

(cherry picked from commit 1061b50665)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-08-06 23:39:01 +00:00
mergify[bot]
fd192e3641 Link fix (#11368) (#11425)
* fixes logo

* cleans up homepage on docs

* adds icon files and tightens margins

* cleans up sidenav, adds top nav items

* fixes a link

* removes icon files

Co-authored-by: Dan Albert <dan@solana.com>
(cherry picked from commit 14dcaaee6c)

Co-authored-by: Raj Gokal <rajivgokal@gmail.com>
2020-08-06 19:37:04 +00:00
mergify[bot]
a8de467ef8 Realloc not supported (#11418)
(cherry picked from commit bc4c5c5a97)

Co-authored-by: Jack May <jack@solana.com>
2020-08-06 16:24:14 +00:00
Michael Vines
1a186beb5c Update lib.rs
(cherry picked from commit 5a63c9d535)
2020-08-06 07:58:05 -07:00
Michael Vines
66a21ed65e Enable cross program support in mainnet-beta epoch 63
(cherry picked from commit c9b1d08218)
2020-08-06 07:58:05 -07:00
mergify[bot]
1a42a40492 RPC: Plug getConfirmedSignaturesForAddress2 into bigtable storage (bp #11395) (#11406)
* Plug getConfirmedSignaturesForAddress2 into bigtable storage

(cherry picked from commit 4222932e08)

* Upgrade help description

(cherry picked from commit 9abb7db5f8)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-06 07:14:46 +00:00
mergify[bot]
5841e4d665 Long-term ledger storage with BigTable (bp #11222) (#11392)
* ledger-storage-bigtable boilerplate

(cherry picked from commit 9d2293bb32)

* $ wget https://pki.goog/roots.pem -O pki-goog-roots.pem

(cherry picked from commit 1617a025ce)

* Add access_token module

(cherry picked from commit 59d266a111)

* Add root_ca_certificate

(cherry picked from commit faa016e4b7)

* Add build-proto

(cherry picked from commit c31e1f5bf0)

* UiTransactionEncoding is now copy

(cherry picked from commit 494968be66)

* Increase timeout

(cherry picked from commit 57dfebc5ba)

* Add build-proto/build.sh output

(cherry picked from commit 54dae6ba2c)

* Supress doctest errors

(cherry picked from commit 019c75797d)

* Add compression

(cherry picked from commit 243e05d59f)

* Add bigtable

(cherry picked from commit 6e0353965a)

* Add configuration info

(cherry picked from commit 98cca1e774)

* Add ledger-tool bigtable subcommands

(cherry picked from commit f9049d6ee4)

# Conflicts:
#	ledger-tool/Cargo.toml

* Make room for tokio 0.2

(cherry picked from commit b876fb84ba)

# Conflicts:
#	core/Cargo.toml

* Setup a tokio 0.2 runtime for RPC usage

(cherry picked from commit 0e02740565)

# Conflicts:
#	core/Cargo.toml

* Plumb Bigtable ledger storage into the RPC subsystem

(cherry picked from commit dfae9a9864)

# Conflicts:
#	core/Cargo.toml

* Add RPC transaction history design

(cherry picked from commit e56ea138c7)

* Simplify access token refreshing

(cherry picked from commit 1f7af14386)

* Report block status more frequently

(cherry picked from commit 22c46ebf96)

* after -> before

(cherry picked from commit 227ea934ff)

* Rebase

* Cargo.lock

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-06 04:06:44 +00:00
mergify[bot]
6542a04521 Mark token-specific rpcs as unstable (#11402)
(cherry picked from commit 7430896c79)

Co-authored-by: Tyera Eulberg <tyera@solana.com>
2020-08-06 04:00:18 +00:00
mergify[bot]
5c27009758 Force program address off the curve (#11323) (#11398)
(cherry picked from commit 03263c850a)

Co-authored-by: Jack May <jack@solana.com>
2020-08-06 01:00:42 +00:00
mergify[bot]
888f3522d8 Add getConfirmedSignaturesForAddress2 RPC method (bp #11259) (#11394)
* Add getConfirmedSignaturesForAddress2 RPC method

(cherry picked from commit 1b2276520b)

* Reimplement transaction-history command with getConfirmedSignaturesForAddress2

(cherry picked from commit 087fd32ce3)

* Rework get_confirmed_signatures_for_address2

(cherry picked from commit a11f137810)

* Rename startAfter to before

(cherry picked from commit 02c0981ecf)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-08-05 22:47:50 +00:00
312 changed files with 22889 additions and 4457 deletions

View File

@@ -1,12 +1,6 @@
{
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
"environment": {
"CODECOV_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:3K68mE38LJ2RB98VWmjuNLFBNn1XTGR4:cR4r05/TOZQKmEZp1v4CSgUJtC6QJiOaL85QjXW0qZ061fMnsBA8AtAPMDoDq4WCGOZM1A==]",
"CRATES_IO_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:GGRTYDjMXksevzR6kq4Jx+FaIQZz50RU:xkbwDxcgoCyU+aT2tiI9mymigrEl6YiOr3axe3aX70ELIBKbCdPGilXP/wixvKi94g2u]",
"GEOLOCATION_API_KEY": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:U2PZLi5MU3Ru/zK1SilianEeizcMvxml:AJKf2OAtDHmJh0KyXrBnNnistItZvVVP3cZ7ZLtrVupjmWN/PzmKwSsXeCNObWS+]",
"GITHUB_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:0NJNlpD/O19mvOakCGBYDhIDfySxWFSC:Dz4NXv9x6ncRQ1u9sVoWOcqmkg0sI09qmefghB0GXZgPcFGgn6T0mw7ynNnbUvjyH8dLruKHauk=]",
"INFLUX_DATABASE": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:SzwHIeOVpmbTcGQOGngoFgYumsLZJUGq:t7Rpk49njsWvoM+ztv5Uwuiz]",
"INFLUX_PASSWORD": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:/MUs+q7pdGrUjzwcq+6pgIFxur4hxdqu:am22z2E2dtmw1f1J1Mq5JLcUHZsrEjQAJ0pp21M4AZeJbNO6bVb44d9zSkHj7xdN6U+GNlCk+wU=]",
"INFLUX_USERNAME": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:XjghH20xGVWro9B+epGlJaJcW8Wze0Bi:ZIdOtXudTY5TqKseDU7gVvQXfmXV99Xh]"
"CODECOV_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:3K68mE38LJ2RB98VWmjuNLFBNn1XTGR4:cR4r05/TOZQKmEZp1v4CSgUJtC6QJiOaL85QjXW0qZ061fMnsBA8AtAPMDoDq4WCGOZM1A==]"
}
}

View File

@@ -3,16 +3,19 @@
#
# Save target/ for the next CI build on this machine
#
(
set -x
d=$HOME/cargo-target-cache/"$BUILDKITE_LABEL"
mkdir -p "$d"
set -x
rsync -a --delete --link-dest="$PWD" target "$d"
du -hs "$d"
read -r cacheSizeInGB _ < <(du -s --block-size=1800000000 "$d")
echo "--- ${cacheSizeInGB}GB: $d"
)
if [[ -z $CARGO_TARGET_CACHE ]]; then
echo "+++ CARGO_TARGET_CACHE not defined" # pre-command should have defined it
else
(
set -x
mkdir -p "$CARGO_TARGET_CACHE"
set -x
rsync -a --delete --link-dest="$PWD" target "$CARGO_TARGET_CACHE"
du -hs "$CARGO_TARGET_CACHE"
read -r cacheSizeInGB _ < <(du -s --block-size=1800000000 "$CARGO_TARGET_CACHE")
echo "--- ${cacheSizeInGB}GB: $CARGO_TARGET_CACHE"
)
fi
#
# Add job_stats data point

View File

@@ -11,23 +11,24 @@ export PS4="++"
#
# Restore target/ from the previous CI build on this machine
#
eval "$(ci/channel-info.sh)"
export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"
(
set -x
d=$HOME/cargo-target-cache/"$BUILDKITE_LABEL"
MAX_CACHE_SIZE=18 # gigabytes
if [[ -d $d ]]; then
du -hs "$d"
read -r cacheSizeInGB _ < <(du -s --block-size=1800000000 "$d")
echo "--- ${cacheSizeInGB}GB: $d"
if [[ -d $CARGO_TARGET_CACHE ]]; then
du -hs "$CARGO_TARGET_CACHE"
read -r cacheSizeInGB _ < <(du -s --block-size=1800000000 "$CARGO_TARGET_CACHE")
echo "--- ${cacheSizeInGB}GB: $CARGO_TARGET_CACHE"
if [[ $cacheSizeInGB -gt $MAX_CACHE_SIZE ]]; then
echo "--- $d is too large, removing it"
rm -rf "$d"
echo "--- $CARGO_TARGET_CACHE is too large, removing it"
rm -rf "$CARGO_TARGET_CACHE"
fi
else
echo "--- $d not present"
echo "--- $CARGO_TARGET_CACHE not present"
fi
mkdir -p "$d"/target
rsync -a --delete --link-dest="$d" "$d"/target .
mkdir -p "$CARGO_TARGET_CACHE"/target
rsync -a --delete --link-dest="$CARGO_TARGET_CACHE" "$CARGO_TARGET_CACHE"/target .
)

View File

@@ -67,7 +67,8 @@ jobs:
# explorer pull request
- name: "explorer"
if: type = pull_request
if: type = pull_request AND branch = master
language: node_js
node_js:
- "node"
@@ -86,7 +87,8 @@ jobs:
# web3.js pull request
- name: "web3.js"
if: type = pull_request
if: type = pull_request AND branch = master
language: node_js
node_js:
- "lts/*"

1453
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -5,6 +5,9 @@ members = [
"bench-tps",
"accounts-bench",
"banking-bench",
"banks-client",
"banks-interface",
"banks-server",
"clap-utils",
"cli-config",
"client",
@@ -26,6 +29,7 @@ members = [
"log-analyzer",
"merkle-tree",
"stake-o-matic",
"storage-bigtable",
"streamer",
"measure",
"metrics",

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-account-decoder"
version = "1.3.0"
version = "1.3.10"
description = "Solana account decoder"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -9,16 +9,20 @@ license = "Apache-2.0"
edition = "2018"
[dependencies]
base64 = "0.12.3"
bincode = "1.3.1"
bs58 = "0.3.1"
bv = "0.11.1"
Inflector = "0.11.4"
lazy_static = "1.4.0"
solana-sdk = { path = "../sdk", version = "1.3.0" }
solana-vote-program = { path = "../programs/vote", version = "1.3.0" }
spl-token-v1-0 = { package = "spl-token", version = "1.0.6", features = ["skip-no-mangle"] }
serde = "1.0.112"
serde_derive = "1.0.103"
serde_json = "1.0.56"
solana-config-program = { path = "../programs/config", version = "1.3.10" }
solana-sdk = { path = "../sdk", version = "1.3.10" }
solana-stake-program = { path = "../programs/stake", version = "1.3.10" }
solana-vote-program = { path = "../programs/vote", version = "1.3.10" }
spl-token-v2-0 = { package = "spl-token", version = "2.0.3", features = ["skip-no-mangle"] }
thiserror = "1.0"
[package.metadata.docs.rs]

View File

@@ -4,14 +4,20 @@ extern crate lazy_static;
extern crate serde_derive;
pub mod parse_account_data;
pub mod parse_config;
pub mod parse_nonce;
pub mod parse_stake;
pub mod parse_sysvar;
pub mod parse_token;
pub mod parse_vote;
pub mod validator_info;
use crate::parse_account_data::{parse_account_data, ParsedAccount};
use solana_sdk::{account::Account, clock::Epoch, pubkey::Pubkey};
use crate::parse_account_data::{parse_account_data, AccountAdditionalData, ParsedAccount};
use solana_sdk::{account::Account, clock::Epoch, fee_calculator::FeeCalculator, pubkey::Pubkey};
use std::str::FromStr;
pub type StringAmount = String;
/// A duplicate representation of an Account for pretty JSON serialization
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
@@ -26,32 +32,47 @@ pub struct UiAccount {
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase", untagged)]
pub enum UiAccountData {
Binary(String),
LegacyBinary(String), // Legacy. Retained for RPC backwards compatibility
Json(ParsedAccount),
}
impl From<Vec<u8>> for UiAccountData {
fn from(data: Vec<u8>) -> Self {
Self::Binary(bs58::encode(data).into_string())
}
Binary(String, UiAccountEncoding),
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
#[serde(rename_all = "camelCase")]
pub enum UiAccountEncoding {
Binary,
Binary, // Legacy. Retained for RPC backwards compatibility
Base58,
Base64,
JsonParsed,
}
impl UiAccount {
pub fn encode(account: Account, encoding: UiAccountEncoding) -> Self {
pub fn encode(
pubkey: &Pubkey,
account: Account,
encoding: UiAccountEncoding,
additional_data: Option<AccountAdditionalData>,
data_slice_config: Option<UiDataSliceConfig>,
) -> Self {
let data = match encoding {
UiAccountEncoding::Binary => account.data.into(),
UiAccountEncoding::Binary => UiAccountData::LegacyBinary(
bs58::encode(slice_data(&account.data, data_slice_config)).into_string(),
),
UiAccountEncoding::Base58 => UiAccountData::Binary(
bs58::encode(slice_data(&account.data, data_slice_config)).into_string(),
encoding,
),
UiAccountEncoding::Base64 => UiAccountData::Binary(
base64::encode(slice_data(&account.data, data_slice_config)),
encoding,
),
UiAccountEncoding::JsonParsed => {
if let Ok(parsed_data) = parse_account_data(&account.owner, &account.data) {
if let Ok(parsed_data) =
parse_account_data(pubkey, &account.owner, &account.data, additional_data)
{
UiAccountData::Json(parsed_data)
} else {
account.data.into()
UiAccountData::Binary(base64::encode(&account.data), UiAccountEncoding::Base64)
}
}
};
@@ -67,7 +88,12 @@ impl UiAccount {
pub fn decode(&self) -> Option<Account> {
let data = match &self.data {
UiAccountData::Json(_) => None,
UiAccountData::Binary(blob) => bs58::decode(blob).into_vec().ok(),
UiAccountData::LegacyBinary(blob) => bs58::decode(blob).into_vec().ok(),
UiAccountData::Binary(blob, encoding) => match encoding {
UiAccountEncoding::Base58 => bs58::decode(blob).into_vec().ok(),
UiAccountEncoding::Base64 => base64::decode(blob).ok(),
UiAccountEncoding::Binary | UiAccountEncoding::JsonParsed => None,
},
}?;
Some(Account {
lamports: self.lamports,
@@ -78,3 +104,79 @@ impl UiAccount {
})
}
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct UiFeeCalculator {
pub lamports_per_signature: StringAmount,
}
impl From<FeeCalculator> for UiFeeCalculator {
fn from(fee_calculator: FeeCalculator) -> Self {
Self {
lamports_per_signature: fee_calculator.lamports_per_signature.to_string(),
}
}
}
impl Default for UiFeeCalculator {
fn default() -> Self {
Self {
lamports_per_signature: "0".to_string(),
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct UiDataSliceConfig {
pub offset: usize,
pub length: usize,
}
fn slice_data(data: &[u8], data_slice_config: Option<UiDataSliceConfig>) -> &[u8] {
if let Some(UiDataSliceConfig { offset, length }) = data_slice_config {
if offset >= data.len() {
&[]
} else if length > data.len() - offset {
&data[offset..]
} else {
&data[offset..offset + length]
}
} else {
data
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_slice_data() {
let data = vec![1, 2, 3, 4, 5];
let slice_config = Some(UiDataSliceConfig {
offset: 0,
length: 5,
});
assert_eq!(slice_data(&data, slice_config), &data[..]);
let slice_config = Some(UiDataSliceConfig {
offset: 0,
length: 10,
});
assert_eq!(slice_data(&data, slice_config), &data[..]);
let slice_config = Some(UiDataSliceConfig {
offset: 1,
length: 2,
});
assert_eq!(slice_data(&data, slice_config), &data[1..3]);
let slice_config = Some(UiDataSliceConfig {
offset: 10,
length: 2,
});
assert_eq!(slice_data(&data, slice_config), &[] as &[u8]);
}
}

View File

@@ -1,22 +1,31 @@
use crate::{
parse_config::parse_config,
parse_nonce::parse_nonce,
parse_token::{parse_token, spl_token_id_v1_0},
parse_stake::parse_stake,
parse_sysvar::parse_sysvar,
parse_token::{parse_token, spl_token_id_v2_0},
parse_vote::parse_vote,
};
use inflector::Inflector;
use serde_json::Value;
use solana_sdk::{instruction::InstructionError, pubkey::Pubkey, system_program};
use solana_sdk::{instruction::InstructionError, pubkey::Pubkey, system_program, sysvar};
use std::collections::HashMap;
use thiserror::Error;
lazy_static! {
static ref CONFIG_PROGRAM_ID: Pubkey = solana_config_program::id();
static ref STAKE_PROGRAM_ID: Pubkey = solana_stake_program::id();
static ref SYSTEM_PROGRAM_ID: Pubkey = system_program::id();
static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id_v1_0();
static ref SYSVAR_PROGRAM_ID: Pubkey = sysvar::id();
static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id_v2_0();
static ref VOTE_PROGRAM_ID: Pubkey = solana_vote_program::id();
pub static ref PARSABLE_PROGRAM_IDS: HashMap<Pubkey, ParsableAccount> = {
let mut m = HashMap::new();
m.insert(*CONFIG_PROGRAM_ID, ParsableAccount::Config);
m.insert(*SYSTEM_PROGRAM_ID, ParsableAccount::Nonce);
m.insert(*TOKEN_PROGRAM_ID, ParsableAccount::SplToken);
m.insert(*STAKE_PROGRAM_ID, ParsableAccount::Stake);
m.insert(*SYSVAR_PROGRAM_ID, ParsableAccount::Sysvar);
m.insert(*VOTE_PROGRAM_ID, ParsableAccount::Vote);
m
};
@@ -30,6 +39,9 @@ pub enum ParseAccountError {
#[error("Program not parsable")]
ProgramNotParsable,
#[error("Additional data required to parse: {0}")]
AdditionalDataMissing(String),
#[error("Instruction error")]
InstructionError(#[from] InstructionError),
@@ -42,31 +54,49 @@ pub enum ParseAccountError {
pub struct ParsedAccount {
pub program: String,
pub parsed: Value,
pub space: u64,
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum ParsableAccount {
Config,
Nonce,
SplToken,
Stake,
Sysvar,
Vote,
}
#[derive(Default)]
pub struct AccountAdditionalData {
pub spl_token_decimals: Option<u8>,
}
pub fn parse_account_data(
pubkey: &Pubkey,
program_id: &Pubkey,
data: &[u8],
additional_data: Option<AccountAdditionalData>,
) -> Result<ParsedAccount, ParseAccountError> {
let program_name = PARSABLE_PROGRAM_IDS
.get(program_id)
.ok_or_else(|| ParseAccountError::ProgramNotParsable)?;
let additional_data = additional_data.unwrap_or_default();
let parsed_json = match program_name {
ParsableAccount::Config => serde_json::to_value(parse_config(data, pubkey)?)?,
ParsableAccount::Nonce => serde_json::to_value(parse_nonce(data)?)?,
ParsableAccount::SplToken => serde_json::to_value(parse_token(data)?)?,
ParsableAccount::SplToken => {
serde_json::to_value(parse_token(data, additional_data.spl_token_decimals)?)?
}
ParsableAccount::Stake => serde_json::to_value(parse_stake(data)?)?,
ParsableAccount::Sysvar => serde_json::to_value(parse_sysvar(data, pubkey)?)?,
ParsableAccount::Vote => serde_json::to_value(parse_vote(data)?)?,
};
Ok(ParsedAccount {
program: format!("{:?}", program_name).to_kebab_case(),
parsed: parsed_json,
space: data.len() as u64,
})
}
@@ -81,20 +111,35 @@ mod test {
#[test]
fn test_parse_account_data() {
let account_pubkey = Pubkey::new_rand();
let other_program = Pubkey::new_rand();
let data = vec![0; 4];
assert!(parse_account_data(&other_program, &data).is_err());
assert!(parse_account_data(&account_pubkey, &other_program, &data, None).is_err());
let vote_state = VoteState::default();
let mut vote_account_data: Vec<u8> = vec![0; VoteState::size_of()];
let versioned = VoteStateVersions::Current(Box::new(vote_state));
VoteState::serialize(&versioned, &mut vote_account_data).unwrap();
let parsed = parse_account_data(&solana_vote_program::id(), &vote_account_data).unwrap();
let parsed = parse_account_data(
&account_pubkey,
&solana_vote_program::id(),
&vote_account_data,
None,
)
.unwrap();
assert_eq!(parsed.program, "vote".to_string());
assert_eq!(parsed.space, VoteState::size_of() as u64);
let nonce_data = Versions::new_current(State::Initialized(Data::default()));
let nonce_account_data = bincode::serialize(&nonce_data).unwrap();
let parsed = parse_account_data(&system_program::id(), &nonce_account_data).unwrap();
let parsed = parse_account_data(
&account_pubkey,
&system_program::id(),
&nonce_account_data,
None,
)
.unwrap();
assert_eq!(parsed.program, "nonce".to_string());
assert_eq!(parsed.space, State::size() as u64);
}
}

View File

@@ -0,0 +1,146 @@
use crate::{
parse_account_data::{ParsableAccount, ParseAccountError},
validator_info,
};
use bincode::deserialize;
use serde_json::Value;
use solana_config_program::{get_config_data, ConfigKeys};
use solana_sdk::pubkey::Pubkey;
use solana_stake_program::config::Config as StakeConfig;
pub fn parse_config(data: &[u8], pubkey: &Pubkey) -> Result<ConfigAccountType, ParseAccountError> {
let parsed_account = if pubkey == &solana_stake_program::config::id() {
get_config_data(data)
.ok()
.and_then(|data| deserialize::<StakeConfig>(data).ok())
.map(|config| ConfigAccountType::StakeConfig(config.into()))
} else {
deserialize::<ConfigKeys>(data).ok().and_then(|key_list| {
if !key_list.keys.is_empty() && key_list.keys[0].0 == validator_info::id() {
parse_config_data::<String>(data, key_list.keys).and_then(|validator_info| {
Some(ConfigAccountType::ValidatorInfo(UiConfig {
keys: validator_info.keys,
config_data: serde_json::from_str(&validator_info.config_data).ok()?,
}))
})
} else {
None
}
})
};
parsed_account.ok_or(ParseAccountError::AccountNotParsable(
ParsableAccount::Config,
))
}
fn parse_config_data<T>(data: &[u8], keys: Vec<(Pubkey, bool)>) -> Option<UiConfig<T>>
where
T: serde::de::DeserializeOwned,
{
let config_data: T = deserialize(&get_config_data(data).ok()?).ok()?;
let keys = keys
.iter()
.map(|key| UiConfigKey {
pubkey: key.0.to_string(),
signer: key.1,
})
.collect();
Some(UiConfig { keys, config_data })
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase", tag = "type", content = "info")]
pub enum ConfigAccountType {
StakeConfig(UiStakeConfig),
ValidatorInfo(UiConfig<Value>),
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct UiConfigKey {
pub pubkey: String,
pub signer: bool,
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct UiStakeConfig {
pub warmup_cooldown_rate: f64,
pub slash_penalty: u8,
}
impl From<StakeConfig> for UiStakeConfig {
fn from(config: StakeConfig) -> Self {
Self {
warmup_cooldown_rate: config.warmup_cooldown_rate,
slash_penalty: config.slash_penalty,
}
}
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct UiConfig<T> {
pub keys: Vec<UiConfigKey>,
pub config_data: T,
}
#[cfg(test)]
mod test {
use super::*;
use crate::validator_info::ValidatorInfo;
use serde_json::json;
use solana_config_program::create_config_account;
#[test]
fn test_parse_config() {
let stake_config = StakeConfig {
warmup_cooldown_rate: 0.25,
slash_penalty: 50,
};
let stake_config_account = create_config_account(vec![], &stake_config, 10);
assert_eq!(
parse_config(
&stake_config_account.data,
&solana_stake_program::config::id()
)
.unwrap(),
ConfigAccountType::StakeConfig(UiStakeConfig {
warmup_cooldown_rate: 0.25,
slash_penalty: 50,
}),
);
let validator_info = ValidatorInfo {
info: serde_json::to_string(&json!({
"name": "Solana",
}))
.unwrap(),
};
let info_pubkey = Pubkey::new_rand();
let validator_info_config_account = create_config_account(
vec![(validator_info::id(), false), (info_pubkey, true)],
&validator_info,
10,
);
assert_eq!(
parse_config(&validator_info_config_account.data, &info_pubkey).unwrap(),
ConfigAccountType::ValidatorInfo(UiConfig {
keys: vec![
UiConfigKey {
pubkey: validator_info::id().to_string(),
signer: false,
},
UiConfigKey {
pubkey: info_pubkey.to_string(),
signer: true,
}
],
config_data: serde_json::from_str(r#"{"name":"Solana"}"#).unwrap(),
}),
);
let bad_data = vec![0; 4];
assert!(parse_config(&bad_data, &info_pubkey).is_err());
}
}

View File

@@ -1,6 +1,5 @@
use crate::parse_account_data::ParseAccountError;
use crate::{parse_account_data::ParseAccountError, UiFeeCalculator};
use solana_sdk::{
fee_calculator::FeeCalculator,
instruction::InstructionError,
nonce::{state::Versions, State},
};
@@ -14,7 +13,7 @@ pub fn parse_nonce(data: &[u8]) -> Result<UiNonceState, ParseAccountError> {
State::Initialized(data) => Ok(UiNonceState::Initialized(UiNonceData {
authority: data.authority.to_string(),
blockhash: data.blockhash.to_string(),
fee_calculator: data.fee_calculator,
fee_calculator: data.fee_calculator.into(),
})),
}
}
@@ -32,7 +31,7 @@ pub enum UiNonceState {
pub struct UiNonceData {
pub authority: String,
pub blockhash: String,
pub fee_calculator: FeeCalculator,
pub fee_calculator: UiFeeCalculator,
}
#[cfg(test)]
@@ -56,7 +55,9 @@ mod test {
UiNonceState::Initialized(UiNonceData {
authority: Pubkey::default().to_string(),
blockhash: Hash::default().to_string(),
fee_calculator: FeeCalculator::default(),
fee_calculator: UiFeeCalculator {
lamports_per_signature: 0.to_string(),
},
}),
);

View File

@@ -0,0 +1,235 @@
use crate::{
parse_account_data::{ParsableAccount, ParseAccountError},
StringAmount,
};
use bincode::deserialize;
use solana_sdk::clock::{Epoch, UnixTimestamp};
use solana_stake_program::stake_state::{Authorized, Delegation, Lockup, Meta, Stake, StakeState};
pub fn parse_stake(data: &[u8]) -> Result<StakeAccountType, ParseAccountError> {
let stake_state: StakeState = deserialize(data)
.map_err(|_| ParseAccountError::AccountNotParsable(ParsableAccount::Stake))?;
let parsed_account = match stake_state {
StakeState::Uninitialized => StakeAccountType::Uninitialized,
StakeState::Initialized(meta) => StakeAccountType::Initialized(UiStakeAccount {
meta: meta.into(),
stake: None,
}),
StakeState::Stake(meta, stake) => StakeAccountType::Delegated(UiStakeAccount {
meta: meta.into(),
stake: Some(stake.into()),
}),
StakeState::RewardsPool => StakeAccountType::RewardsPool,
};
Ok(parsed_account)
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase", tag = "type", content = "info")]
pub enum StakeAccountType {
Uninitialized,
Initialized(UiStakeAccount),
Delegated(UiStakeAccount),
RewardsPool,
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct UiStakeAccount {
pub meta: UiMeta,
pub stake: Option<UiStake>,
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct UiMeta {
pub rent_exempt_reserve: StringAmount,
pub authorized: UiAuthorized,
pub lockup: UiLockup,
}
impl From<Meta> for UiMeta {
fn from(meta: Meta) -> Self {
Self {
rent_exempt_reserve: meta.rent_exempt_reserve.to_string(),
authorized: meta.authorized.into(),
lockup: meta.lockup.into(),
}
}
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct UiLockup {
pub unix_timestamp: UnixTimestamp,
pub epoch: Epoch,
pub custodian: String,
}
impl From<Lockup> for UiLockup {
fn from(lockup: Lockup) -> Self {
Self {
unix_timestamp: lockup.unix_timestamp,
epoch: lockup.epoch,
custodian: lockup.custodian.to_string(),
}
}
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct UiAuthorized {
pub staker: String,
pub withdrawer: String,
}
impl From<Authorized> for UiAuthorized {
fn from(authorized: Authorized) -> Self {
Self {
staker: authorized.staker.to_string(),
withdrawer: authorized.withdrawer.to_string(),
}
}
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct UiStake {
pub delegation: UiDelegation,
pub credits_observed: u64,
}
impl From<Stake> for UiStake {
fn from(stake: Stake) -> Self {
Self {
delegation: stake.delegation.into(),
credits_observed: stake.credits_observed,
}
}
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct UiDelegation {
pub voter: String,
pub stake: StringAmount,
pub activation_epoch: StringAmount,
pub deactivation_epoch: StringAmount,
pub warmup_cooldown_rate: f64,
}
impl From<Delegation> for UiDelegation {
fn from(delegation: Delegation) -> Self {
Self {
voter: delegation.voter_pubkey.to_string(),
stake: delegation.stake.to_string(),
activation_epoch: delegation.activation_epoch.to_string(),
deactivation_epoch: delegation.deactivation_epoch.to_string(),
warmup_cooldown_rate: delegation.warmup_cooldown_rate,
}
}
}
#[cfg(test)]
mod test {
use super::*;
use bincode::serialize;
use solana_sdk::pubkey::Pubkey;
#[test]
fn test_parse_stake() {
let stake_state = StakeState::Uninitialized;
let stake_data = serialize(&stake_state).unwrap();
assert_eq!(
parse_stake(&stake_data).unwrap(),
StakeAccountType::Uninitialized
);
let pubkey = Pubkey::new_rand();
let custodian = Pubkey::new_rand();
let authorized = Authorized::auto(&pubkey);
let lockup = Lockup {
unix_timestamp: 0,
epoch: 1,
custodian,
};
let meta = Meta {
rent_exempt_reserve: 42,
authorized,
lockup,
};
let stake_state = StakeState::Initialized(meta);
let stake_data = serialize(&stake_state).unwrap();
assert_eq!(
parse_stake(&stake_data).unwrap(),
StakeAccountType::Initialized(UiStakeAccount {
meta: UiMeta {
rent_exempt_reserve: 42.to_string(),
authorized: UiAuthorized {
staker: pubkey.to_string(),
withdrawer: pubkey.to_string(),
},
lockup: UiLockup {
unix_timestamp: 0,
epoch: 1,
custodian: custodian.to_string(),
}
},
stake: None,
})
);
let voter_pubkey = Pubkey::new_rand();
let stake = Stake {
delegation: Delegation {
voter_pubkey,
stake: 20,
activation_epoch: 2,
deactivation_epoch: std::u64::MAX,
warmup_cooldown_rate: 0.25,
},
credits_observed: 10,
};
let stake_state = StakeState::Stake(meta, stake);
let stake_data = serialize(&stake_state).unwrap();
assert_eq!(
parse_stake(&stake_data).unwrap(),
StakeAccountType::Delegated(UiStakeAccount {
meta: UiMeta {
rent_exempt_reserve: 42.to_string(),
authorized: UiAuthorized {
staker: pubkey.to_string(),
withdrawer: pubkey.to_string(),
},
lockup: UiLockup {
unix_timestamp: 0,
epoch: 1,
custodian: custodian.to_string(),
}
},
stake: Some(UiStake {
delegation: UiDelegation {
voter: voter_pubkey.to_string(),
stake: 20.to_string(),
activation_epoch: 2.to_string(),
deactivation_epoch: std::u64::MAX.to_string(),
warmup_cooldown_rate: 0.25,
},
credits_observed: 10,
})
})
);
let stake_state = StakeState::RewardsPool;
let stake_data = serialize(&stake_state).unwrap();
assert_eq!(
parse_stake(&stake_data).unwrap(),
StakeAccountType::RewardsPool
);
let bad_data = vec![1, 2, 3, 4];
assert!(parse_stake(&bad_data).is_err());
}
}

View File

@@ -0,0 +1,328 @@
use crate::{
parse_account_data::{ParsableAccount, ParseAccountError},
StringAmount, UiFeeCalculator,
};
use bincode::deserialize;
use bv::BitVec;
use solana_sdk::{
clock::{Clock, Epoch, Slot, UnixTimestamp},
epoch_schedule::EpochSchedule,
pubkey::Pubkey,
rent::Rent,
slot_hashes::SlotHashes,
slot_history::{self, SlotHistory},
stake_history::{StakeHistory, StakeHistoryEntry},
sysvar::{self, fees::Fees, recent_blockhashes::RecentBlockhashes, rewards::Rewards},
};
pub fn parse_sysvar(data: &[u8], pubkey: &Pubkey) -> Result<SysvarAccountType, ParseAccountError> {
let parsed_account = {
if pubkey == &sysvar::clock::id() {
deserialize::<Clock>(data)
.ok()
.map(|clock| SysvarAccountType::Clock(clock.into()))
} else if pubkey == &sysvar::epoch_schedule::id() {
deserialize(data).ok().map(SysvarAccountType::EpochSchedule)
} else if pubkey == &sysvar::fees::id() {
deserialize::<Fees>(data)
.ok()
.map(|fees| SysvarAccountType::Fees(fees.into()))
} else if pubkey == &sysvar::recent_blockhashes::id() {
deserialize::<RecentBlockhashes>(data)
.ok()
.map(|recent_blockhashes| {
let recent_blockhashes = recent_blockhashes
.iter()
.map(|entry| UiRecentBlockhashesEntry {
blockhash: entry.blockhash.to_string(),
fee_calculator: entry.fee_calculator.clone().into(),
})
.collect();
SysvarAccountType::RecentBlockhashes(recent_blockhashes)
})
} else if pubkey == &sysvar::rent::id() {
deserialize::<Rent>(data)
.ok()
.map(|rent| SysvarAccountType::Rent(rent.into()))
} else if pubkey == &sysvar::rewards::id() {
deserialize::<Rewards>(data)
.ok()
.map(|rewards| SysvarAccountType::Rewards(rewards.into()))
} else if pubkey == &sysvar::slot_hashes::id() {
deserialize::<SlotHashes>(data).ok().map(|slot_hashes| {
let slot_hashes = slot_hashes
.iter()
.map(|slot_hash| UiSlotHashEntry {
slot: slot_hash.0,
hash: slot_hash.1.to_string(),
})
.collect();
SysvarAccountType::SlotHashes(slot_hashes)
})
} else if pubkey == &sysvar::slot_history::id() {
deserialize::<SlotHistory>(data).ok().map(|slot_history| {
SysvarAccountType::SlotHistory(UiSlotHistory {
next_slot: slot_history.next_slot,
bits: format!("{:?}", SlotHistoryBits(slot_history.bits)),
})
})
} else if pubkey == &sysvar::stake_history::id() {
deserialize::<StakeHistory>(data).ok().map(|stake_history| {
let stake_history = stake_history
.iter()
.map(|entry| UiStakeHistoryEntry {
epoch: entry.0,
stake_history: entry.1.clone(),
})
.collect();
SysvarAccountType::StakeHistory(stake_history)
})
} else {
None
}
};
parsed_account.ok_or(ParseAccountError::AccountNotParsable(
ParsableAccount::Sysvar,
))
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase", tag = "type", content = "info")]
pub enum SysvarAccountType {
Clock(UiClock),
EpochSchedule(EpochSchedule),
Fees(UiFees),
RecentBlockhashes(Vec<UiRecentBlockhashesEntry>),
Rent(UiRent),
Rewards(UiRewards),
SlotHashes(Vec<UiSlotHashEntry>),
SlotHistory(UiSlotHistory),
StakeHistory(Vec<UiStakeHistoryEntry>),
}
#[derive(Debug, Serialize, Deserialize, PartialEq, Default)]
#[serde(rename_all = "camelCase")]
pub struct UiClock {
pub slot: Slot,
pub epoch: Epoch,
pub leader_schedule_epoch: Epoch,
pub unix_timestamp: UnixTimestamp,
}
impl From<Clock> for UiClock {
fn from(clock: Clock) -> Self {
Self {
slot: clock.slot,
epoch: clock.epoch,
leader_schedule_epoch: clock.leader_schedule_epoch,
unix_timestamp: clock.unix_timestamp,
}
}
}
#[derive(Debug, Serialize, Deserialize, PartialEq, Default)]
#[serde(rename_all = "camelCase")]
pub struct UiFees {
pub fee_calculator: UiFeeCalculator,
}
impl From<Fees> for UiFees {
fn from(fees: Fees) -> Self {
Self {
fee_calculator: fees.fee_calculator.into(),
}
}
}
#[derive(Debug, Serialize, Deserialize, PartialEq, Default)]
#[serde(rename_all = "camelCase")]
pub struct UiRent {
pub lamports_per_byte_year: StringAmount,
pub exemption_threshold: f64,
pub burn_percent: u8,
}
impl From<Rent> for UiRent {
fn from(rent: Rent) -> Self {
Self {
lamports_per_byte_year: rent.lamports_per_byte_year.to_string(),
exemption_threshold: rent.exemption_threshold,
burn_percent: rent.burn_percent,
}
}
}
#[derive(Debug, Serialize, Deserialize, PartialEq, Default)]
#[serde(rename_all = "camelCase")]
pub struct UiRewards {
pub validator_point_value: f64,
}
impl From<Rewards> for UiRewards {
fn from(rewards: Rewards) -> Self {
Self {
validator_point_value: rewards.validator_point_value,
}
}
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct UiRecentBlockhashesEntry {
pub blockhash: String,
pub fee_calculator: UiFeeCalculator,
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct UiSlotHashEntry {
pub slot: Slot,
pub hash: String,
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct UiSlotHistory {
pub next_slot: Slot,
pub bits: String,
}
struct SlotHistoryBits(BitVec<u64>);
impl std::fmt::Debug for SlotHistoryBits {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
for i in 0..slot_history::MAX_ENTRIES {
if self.0.get(i) {
write!(f, "1")?;
} else {
write!(f, "0")?;
}
}
Ok(())
}
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct UiStakeHistoryEntry {
pub epoch: Epoch,
pub stake_history: StakeHistoryEntry,
}
#[cfg(test)]
mod test {
use super::*;
use solana_sdk::{
fee_calculator::FeeCalculator,
hash::Hash,
sysvar::{recent_blockhashes::IterItem, Sysvar},
};
use std::iter::FromIterator;
#[test]
fn test_parse_sysvars() {
let clock_sysvar = Clock::default().create_account(1);
assert_eq!(
parse_sysvar(&clock_sysvar.data, &sysvar::clock::id()).unwrap(),
SysvarAccountType::Clock(UiClock::default()),
);
let epoch_schedule = EpochSchedule {
slots_per_epoch: 12,
leader_schedule_slot_offset: 0,
warmup: false,
first_normal_epoch: 1,
first_normal_slot: 12,
};
let epoch_schedule_sysvar = epoch_schedule.create_account(1);
assert_eq!(
parse_sysvar(&epoch_schedule_sysvar.data, &sysvar::epoch_schedule::id()).unwrap(),
SysvarAccountType::EpochSchedule(epoch_schedule),
);
let fees_sysvar = Fees::default().create_account(1);
assert_eq!(
parse_sysvar(&fees_sysvar.data, &sysvar::fees::id()).unwrap(),
SysvarAccountType::Fees(UiFees::default()),
);
let hash = Hash::new(&[1; 32]);
let fee_calculator = FeeCalculator {
lamports_per_signature: 10,
};
let recent_blockhashes =
RecentBlockhashes::from_iter(vec![IterItem(0, &hash, &fee_calculator)].into_iter());
let recent_blockhashes_sysvar = recent_blockhashes.create_account(1);
assert_eq!(
parse_sysvar(
&recent_blockhashes_sysvar.data,
&sysvar::recent_blockhashes::id()
)
.unwrap(),
SysvarAccountType::RecentBlockhashes(vec![UiRecentBlockhashesEntry {
blockhash: hash.to_string(),
fee_calculator: fee_calculator.into(),
}]),
);
let rent = Rent {
lamports_per_byte_year: 10,
exemption_threshold: 2.0,
burn_percent: 5,
};
let rent_sysvar = rent.create_account(1);
assert_eq!(
parse_sysvar(&rent_sysvar.data, &sysvar::rent::id()).unwrap(),
SysvarAccountType::Rent(rent.into()),
);
let rewards_sysvar = Rewards::default().create_account(1);
assert_eq!(
parse_sysvar(&rewards_sysvar.data, &sysvar::rewards::id()).unwrap(),
SysvarAccountType::Rewards(UiRewards::default()),
);
let mut slot_hashes = SlotHashes::default();
slot_hashes.add(1, hash);
let slot_hashes_sysvar = slot_hashes.create_account(1);
assert_eq!(
parse_sysvar(&slot_hashes_sysvar.data, &sysvar::slot_hashes::id()).unwrap(),
SysvarAccountType::SlotHashes(vec![UiSlotHashEntry {
slot: 1,
hash: hash.to_string(),
}]),
);
let mut slot_history = SlotHistory::default();
slot_history.add(42);
let slot_history_sysvar = slot_history.create_account(1);
assert_eq!(
parse_sysvar(&slot_history_sysvar.data, &sysvar::slot_history::id()).unwrap(),
SysvarAccountType::SlotHistory(UiSlotHistory {
next_slot: slot_history.next_slot,
bits: format!("{:?}", SlotHistoryBits(slot_history.bits)),
}),
);
let mut stake_history = StakeHistory::default();
let stake_history_entry = StakeHistoryEntry {
effective: 10,
activating: 2,
deactivating: 3,
};
stake_history.add(1, stake_history_entry.clone());
let stake_history_sysvar = stake_history.create_account(1);
assert_eq!(
parse_sysvar(&stake_history_sysvar.data, &sysvar::stake_history::id()).unwrap(),
SysvarAccountType::StakeHistory(vec![UiStakeHistoryEntry {
epoch: 1,
stake_history: stake_history_entry,
}]),
);
let bad_pubkey = Pubkey::new_rand();
assert!(parse_sysvar(&stake_history_sysvar.data, &bad_pubkey).is_err());
let bad_data = vec![0; 4];
assert!(parse_sysvar(&bad_data, &sysvar::stake_history::id()).is_err());
}
}

View File

@@ -1,54 +1,85 @@
use crate::parse_account_data::{ParsableAccount, ParseAccountError};
use solana_sdk::pubkey::Pubkey;
use spl_token_v1_0::{
option::COption,
solana_sdk::pubkey::Pubkey as SplTokenPubkey,
state::{unpack, Account, Mint, Multisig},
use crate::{
parse_account_data::{ParsableAccount, ParseAccountError},
StringAmount,
};
use std::{mem::size_of, str::FromStr};
use solana_sdk::pubkey::Pubkey;
use spl_token_v2_0::{
option::COption,
pack::Pack,
solana_sdk::pubkey::Pubkey as SplTokenPubkey,
state::{Account, AccountState, Mint, Multisig},
};
use std::str::FromStr;
// A helper function to convert spl_token_v1_0::id() as spl_sdk::pubkey::Pubkey to
// A helper function to convert spl_token_v2_0::id() as spl_sdk::pubkey::Pubkey to
// solana_sdk::pubkey::Pubkey
pub fn spl_token_id_v1_0() -> Pubkey {
Pubkey::from_str(&spl_token_v1_0::id().to_string()).unwrap()
pub fn spl_token_id_v2_0() -> Pubkey {
Pubkey::from_str(&spl_token_v2_0::id().to_string()).unwrap()
}
// A helper function to convert spl_token_v1_0::native_mint::id() as spl_sdk::pubkey::Pubkey to
// A helper function to convert spl_token_v2_0::native_mint::id() as spl_sdk::pubkey::Pubkey to
// solana_sdk::pubkey::Pubkey
pub fn spl_token_v1_0_native_mint() -> Pubkey {
Pubkey::from_str(&spl_token_v1_0::native_mint::id().to_string()).unwrap()
pub fn spl_token_v2_0_native_mint() -> Pubkey {
Pubkey::from_str(&spl_token_v2_0::native_mint::id().to_string()).unwrap()
}
pub fn parse_token(data: &[u8]) -> Result<TokenAccountType, ParseAccountError> {
let mut data = data.to_vec();
if data.len() == size_of::<Account>() {
let account: Account = *unpack(&mut data)
pub fn parse_token(
data: &[u8],
mint_decimals: Option<u8>,
) -> Result<TokenAccountType, ParseAccountError> {
if data.len() == Account::get_packed_len() {
let account = Account::unpack(data)
.map_err(|_| ParseAccountError::AccountNotParsable(ParsableAccount::SplToken))?;
let decimals = mint_decimals.ok_or_else(|| {
ParseAccountError::AdditionalDataMissing(
"no mint_decimals provided to parse spl-token account".to_string(),
)
})?;
Ok(TokenAccountType::Account(UiTokenAccount {
mint: account.mint.to_string(),
owner: account.owner.to_string(),
amount: account.amount,
token_amount: token_amount_to_ui_amount(account.amount, decimals),
delegate: match account.delegate {
COption::Some(pubkey) => Some(pubkey.to_string()),
COption::None => None,
},
is_initialized: account.is_initialized,
is_native: account.is_native,
delegated_amount: account.delegated_amount,
}))
} else if data.len() == size_of::<Mint>() {
let mint: Mint = *unpack(&mut data)
.map_err(|_| ParseAccountError::AccountNotParsable(ParsableAccount::SplToken))?;
Ok(TokenAccountType::Mint(UiMint {
owner: match mint.owner {
state: account.state.into(),
is_native: account.is_native(),
rent_exempt_reserve: match account.is_native {
COption::Some(reserve) => Some(token_amount_to_ui_amount(reserve, decimals)),
COption::None => None,
},
delegated_amount: if account.delegate.is_none() {
None
} else {
Some(token_amount_to_ui_amount(
account.delegated_amount,
decimals,
))
},
close_authority: match account.close_authority {
COption::Some(pubkey) => Some(pubkey.to_string()),
COption::None => None,
},
}))
} else if data.len() == Mint::get_packed_len() {
let mint = Mint::unpack(data)
.map_err(|_| ParseAccountError::AccountNotParsable(ParsableAccount::SplToken))?;
Ok(TokenAccountType::Mint(UiMint {
mint_authority: match mint.mint_authority {
COption::Some(pubkey) => Some(pubkey.to_string()),
COption::None => None,
},
supply: mint.supply.to_string(),
decimals: mint.decimals,
is_initialized: mint.is_initialized,
freeze_authority: match mint.freeze_authority {
COption::Some(pubkey) => Some(pubkey.to_string()),
COption::None => None,
},
}))
} else if data.len() == size_of::<Multisig>() {
let multisig: Multisig = *unpack(&mut data)
} else if data.len() == Multisig::get_packed_len() {
let multisig = Multisig::unpack(data)
.map_err(|_| ParseAccountError::AccountNotParsable(ParsableAccount::SplToken))?;
Ok(TokenAccountType::Multisig(UiMultisig {
num_required_signers: multisig.m,
@@ -86,19 +117,63 @@ pub enum TokenAccountType {
pub struct UiTokenAccount {
pub mint: String,
pub owner: String,
pub amount: u64,
pub token_amount: UiTokenAmount,
#[serde(skip_serializing_if = "Option::is_none")]
pub delegate: Option<String>,
pub is_initialized: bool,
pub state: UiAccountState,
pub is_native: bool,
pub delegated_amount: u64,
#[serde(skip_serializing_if = "Option::is_none")]
pub rent_exempt_reserve: Option<UiTokenAmount>,
#[serde(skip_serializing_if = "Option::is_none")]
pub delegated_amount: Option<UiTokenAmount>,
#[serde(skip_serializing_if = "Option::is_none")]
pub close_authority: Option<String>,
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub enum UiAccountState {
Uninitialized,
Initialized,
Frozen,
}
impl From<AccountState> for UiAccountState {
fn from(state: AccountState) -> Self {
match state {
AccountState::Uninitialized => UiAccountState::Uninitialized,
AccountState::Initialized => UiAccountState::Initialized,
AccountState::Frozen => UiAccountState::Frozen,
}
}
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct UiTokenAmount {
pub ui_amount: f64,
pub decimals: u8,
pub amount: StringAmount,
}
pub fn token_amount_to_ui_amount(amount: u64, decimals: u8) -> UiTokenAmount {
// Use `amount_to_ui_amount()` once spl_token is bumped to a version that supports it: https://github.com/solana-labs/solana-program-library/pull/211
let amount_decimals = amount as f64 / 10_usize.pow(decimals as u32) as f64;
UiTokenAmount {
ui_amount: amount_decimals,
decimals,
amount: amount.to_string(),
}
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct UiMint {
pub owner: Option<String>,
pub mint_authority: Option<String>,
pub supply: StringAmount,
pub decimals: u8,
pub is_initialized: bool,
pub freeze_authority: Option<String>,
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
@@ -110,63 +185,94 @@ pub struct UiMultisig {
pub signers: Vec<String>,
}
pub fn get_token_account_mint(data: &[u8]) -> Option<Pubkey> {
if data.len() == Account::get_packed_len() {
Some(Pubkey::new(&data[0..32]))
} else {
None
}
}
#[cfg(test)]
mod test {
use super::*;
use spl_token_v1_0::state::unpack_unchecked;
#[test]
fn test_parse_token() {
let mint_pubkey = SplTokenPubkey::new(&[2; 32]);
let owner_pubkey = SplTokenPubkey::new(&[3; 32]);
let mut account_data = [0; size_of::<Account>()];
let mut account: &mut Account = unpack_unchecked(&mut account_data).unwrap();
account.mint = mint_pubkey;
account.owner = owner_pubkey;
account.amount = 42;
account.is_initialized = true;
let mut account_data = vec![0; Account::get_packed_len()];
Account::unpack_unchecked_mut(&mut account_data, &mut |account: &mut Account| {
account.mint = mint_pubkey;
account.owner = owner_pubkey;
account.amount = 42;
account.state = AccountState::Initialized;
account.is_native = COption::None;
account.close_authority = COption::Some(owner_pubkey);
Ok(())
})
.unwrap();
assert!(parse_token(&account_data, None).is_err());
assert_eq!(
parse_token(&account_data).unwrap(),
parse_token(&account_data, Some(2)).unwrap(),
TokenAccountType::Account(UiTokenAccount {
mint: mint_pubkey.to_string(),
owner: owner_pubkey.to_string(),
amount: 42,
token_amount: UiTokenAmount {
ui_amount: 0.42,
decimals: 2,
amount: "42".to_string()
},
delegate: None,
is_initialized: true,
state: UiAccountState::Initialized,
is_native: false,
delegated_amount: 0,
rent_exempt_reserve: None,
delegated_amount: None,
close_authority: Some(owner_pubkey.to_string()),
}),
);
let mut mint_data = [0; size_of::<Mint>()];
let mut mint: &mut Mint = unpack_unchecked(&mut mint_data).unwrap();
mint.owner = COption::Some(owner_pubkey);
mint.decimals = 3;
mint.is_initialized = true;
let mut mint_data = vec![0; Mint::get_packed_len()];
Mint::unpack_unchecked_mut(&mut mint_data, &mut |mint: &mut Mint| {
mint.mint_authority = COption::Some(owner_pubkey);
mint.supply = 42;
mint.decimals = 3;
mint.is_initialized = true;
mint.freeze_authority = COption::Some(owner_pubkey);
Ok(())
})
.unwrap();
assert_eq!(
parse_token(&mint_data).unwrap(),
parse_token(&mint_data, None).unwrap(),
TokenAccountType::Mint(UiMint {
owner: Some(owner_pubkey.to_string()),
mint_authority: Some(owner_pubkey.to_string()),
supply: 42.to_string(),
decimals: 3,
is_initialized: true,
freeze_authority: Some(owner_pubkey.to_string()),
}),
);
let signer1 = SplTokenPubkey::new(&[1; 32]);
let signer2 = SplTokenPubkey::new(&[2; 32]);
let signer3 = SplTokenPubkey::new(&[3; 32]);
let mut multisig_data = [0; size_of::<Multisig>()];
let mut multisig: &mut Multisig = unpack_unchecked(&mut multisig_data).unwrap();
let mut multisig_data = vec![0; Multisig::get_packed_len()];
let mut signers = [SplTokenPubkey::default(); 11];
signers[0] = signer1;
signers[1] = signer2;
signers[2] = signer3;
multisig.m = 2;
multisig.n = 3;
multisig.is_initialized = true;
multisig.signers = signers;
Multisig::unpack_unchecked_mut(&mut multisig_data, &mut |multisig: &mut Multisig| {
multisig.m = 2;
multisig.n = 3;
multisig.is_initialized = true;
multisig.signers = signers;
Ok(())
})
.unwrap();
assert_eq!(
parse_token(&multisig_data).unwrap(),
parse_token(&multisig_data, None).unwrap(),
TokenAccountType::Multisig(UiMultisig {
num_required_signers: 2,
num_valid_signers: 3,
@@ -180,6 +286,23 @@ mod test {
);
let bad_data = vec![0; 4];
assert!(parse_token(&bad_data).is_err());
assert!(parse_token(&bad_data, None).is_err());
}
#[test]
fn test_get_token_account_mint() {
let mint_pubkey = SplTokenPubkey::new(&[2; 32]);
let mut account_data = vec![0; Account::get_packed_len()];
Account::unpack_unchecked_mut(&mut account_data, &mut |account: &mut Account| {
account.mint = mint_pubkey;
Ok(())
})
.unwrap();
let expected_mint_pubkey = Pubkey::new(&[2; 32]);
assert_eq!(
get_token_account_mint(&account_data),
Some(expected_mint_pubkey)
);
}
}

View File

@@ -1,4 +1,4 @@
use crate::parse_account_data::ParseAccountError;
use crate::{parse_account_data::ParseAccountError, StringAmount};
use solana_sdk::{
clock::{Epoch, Slot},
pubkey::Pubkey,
@@ -12,8 +12,8 @@ pub fn parse_vote(data: &[u8]) -> Result<VoteAccountType, ParseAccountError> {
.iter()
.map(|(epoch, credits, previous_credits)| UiEpochCredits {
epoch: *epoch,
credits: *credits,
previous_credits: *previous_credits,
credits: credits.to_string(),
previous_credits: previous_credits.to_string(),
})
.collect();
let votes = vote_state
@@ -115,8 +115,8 @@ struct UiPriorVoters {
#[serde(rename_all = "camelCase")]
struct UiEpochCredits {
epoch: Epoch,
credits: u64,
previous_credits: u64,
credits: StringAmount,
previous_credits: StringAmount,
}
#[cfg(test)]

View File

@@ -0,0 +1,18 @@
use solana_config_program::ConfigState;
pub const MAX_SHORT_FIELD_LENGTH: usize = 70;
pub const MAX_LONG_FIELD_LENGTH: usize = 300;
pub const MAX_VALIDATOR_INFO: u64 = 576;
solana_sdk::declare_id!("Va1idator1nfo111111111111111111111111111111");
#[derive(Debug, Deserialize, PartialEq, Serialize, Default)]
pub struct ValidatorInfo {
pub info: String,
}
impl ConfigState for ValidatorInfo {
fn max_space() -> u64 {
MAX_VALIDATOR_INFO
}
}

View File

@@ -2,18 +2,19 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-accounts-bench"
version = "1.3.0"
version = "1.3.10"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
log = "0.4.6"
rayon = "1.3.1"
solana-logger = { path = "../logger", version = "1.3.0" }
solana-runtime = { path = "../runtime", version = "1.3.0" }
solana-measure = { path = "../measure", version = "1.3.0" }
solana-sdk = { path = "../sdk", version = "1.3.0" }
rayon = "1.4.0"
solana-logger = { path = "../logger", version = "1.3.10" }
solana-runtime = { path = "../runtime", version = "1.3.10" }
solana-measure = { path = "../measure", version = "1.3.10" }
solana-sdk = { path = "../sdk", version = "1.3.10" }
solana-version = { path = "../version", version = "1.3.10" }
rand = "0.7.0"
clap = "2.33.1"
crossbeam-channel = "0.4"

View File

@@ -1,20 +1,21 @@
use clap::{value_t, App, Arg};
use clap::{crate_description, crate_name, value_t, App, Arg};
use rayon::prelude::*;
use solana_measure::measure::Measure;
use solana_runtime::{
accounts::{create_test_accounts, update_accounts, Accounts},
accounts_index::Ancestors,
};
use solana_sdk::pubkey::Pubkey;
use solana_sdk::{genesis_config::ClusterType, pubkey::Pubkey};
use std::env;
use std::fs;
use std::path::PathBuf;
fn main() {
solana_logger::setup();
let matches = App::new("crate")
.about("about")
.version("version")
let matches = App::new(crate_name!())
.about(crate_description!())
.version(solana_version::version!())
.arg(
Arg::with_name("num_slots")
.long("num_slots")
@@ -50,11 +51,12 @@ fn main() {
let clean = matches.is_present("clean");
println!("clean: {:?}", clean);
let path = PathBuf::from("farf/accounts-bench");
let path = PathBuf::from(env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_owned()))
.join("accounts-bench");
if fs::remove_dir_all(path.clone()).is_err() {
println!("Warning: Couldn't remove {:?}", path);
}
let accounts = Accounts::new(vec![path]);
let accounts = Accounts::new(vec![path], &ClusterType::Testnet);
println!("Creating {} accounts", num_accounts);
let mut create_time = Measure::start("create accounts");
let pubkeys: Vec<_> = (0..num_slots)
@@ -96,7 +98,7 @@ fn main() {
} else {
let mut pubkeys: Vec<Pubkey> = vec![];
let mut time = Measure::start("hash");
let hash = accounts.accounts_db.update_accounts_hash(0, &ancestors);
let hash = accounts.accounts_db.update_accounts_hash(0, &ancestors).0;
time.stop();
println!("hash: {} {}", hash, time);
create_test_accounts(&accounts, &mut pubkeys, 1, 0);

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-banking-bench"
version = "1.3.0"
version = "1.3.10"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -12,17 +12,17 @@ clap = "2.33.1"
crossbeam-channel = "0.4"
log = "0.4.6"
rand = "0.7.0"
rayon = "1.3.1"
solana-core = { path = "../core", version = "1.3.0" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.0" }
solana-streamer = { path = "../streamer", version = "1.3.0" }
solana-perf = { path = "../perf", version = "1.3.0" }
solana-ledger = { path = "../ledger", version = "1.3.0" }
solana-logger = { path = "../logger", version = "1.3.0" }
solana-runtime = { path = "../runtime", version = "1.3.0" }
solana-measure = { path = "../measure", version = "1.3.0" }
solana-sdk = { path = "../sdk", version = "1.3.0" }
solana-version = { path = "../version", version = "1.3.0" }
rayon = "1.4.0"
solana-core = { path = "../core", version = "1.3.10" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.10" }
solana-streamer = { path = "../streamer", version = "1.3.10" }
solana-perf = { path = "../perf", version = "1.3.10" }
solana-ledger = { path = "../ledger", version = "1.3.10" }
solana-logger = { path = "../logger", version = "1.3.10" }
solana-runtime = { path = "../runtime", version = "1.3.10" }
solana-measure = { path = "../measure", version = "1.3.10" }
solana-sdk = { path = "../sdk", version = "1.3.10" }
solana-version = { path = "../version", version = "1.3.10" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -167,6 +167,7 @@ fn main() {
let (verified_sender, verified_receiver) = unbounded();
let (vote_sender, vote_receiver) = unbounded();
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
let bank0 = Bank::new(&genesis_config);
let mut bank_forks = BankForks::new(bank0);
let mut bank = bank_forks.working_bank();
@@ -224,6 +225,7 @@ fn main() {
verified_receiver,
vote_receiver,
None,
replay_vote_sender,
);
poh_recorder.lock().unwrap().set_bank(&bank);

30
banks-client/Cargo.toml Normal file
View File

@@ -0,0 +1,30 @@
[package]
name = "solana-banks-client"
version = "1.3.10"
description = "Solana banks client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
edition = "2018"
[dependencies]
async-trait = "0.1.36"
bincode = "1.3.1"
futures = "0.3"
solana-banks-interface = { path = "../banks-interface", version = "1.3.10" }
solana-sdk = { path = "../sdk", version = "1.3.10" }
tarpc = { version = "0.21.0", features = ["full"] }
tokio = "0.2"
tokio-serde = { version = "0.6", features = ["bincode"] }
[dev-dependencies]
solana-runtime = { path = "../runtime", version = "1.3.10" }
solana-banks-server = { path = "../banks-server", version = "1.3.10" }
[lib]
crate-type = ["lib"]
name = "solana_banks_client"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

283
banks-client/src/lib.rs Normal file
View File

@@ -0,0 +1,283 @@
//! A client for the ledger state, from the perspective of an arbitrary validator.
//!
//! Use start_tcp_client() to create a client and then import BanksClientExt to
//! access its methods. Additional "*_with_context" methods are also available,
//! but they are undocumented, may change over time, and are generally more
//! cumbersome to use.
use async_trait::async_trait;
use futures::future::join_all;
pub use solana_banks_interface::{BanksClient, TransactionStatus};
use solana_banks_interface::{BanksRequest, BanksResponse};
use solana_sdk::{
account::Account, clock::Slot, commitment_config::CommitmentLevel,
fee_calculator::FeeCalculator, hash::Hash, pubkey::Pubkey, signature::Signature,
transaction::Transaction, transport,
};
use std::io::{self, Error, ErrorKind};
use tarpc::{
client, context,
rpc::{transport::channel::UnboundedChannel, ClientMessage, Response},
serde_transport::tcp,
};
use tokio::{net::ToSocketAddrs, time::Duration};
use tokio_serde::formats::Bincode;
#[async_trait]
pub trait BanksClientExt {
/// Send a transaction and return immediately. The server will resend the
/// transaction until either it is accepted by the cluster or the transaction's
/// blockhash expires.
async fn send_transaction(&mut self, transaction: Transaction) -> io::Result<()>;
/// Return a recent, rooted blockhash from the server. The cluster will only accept
/// transactions with a blockhash that has not yet expired. Use the `get_fees`
/// method to get both a blockhash and the blockhash's last valid slot.
async fn get_recent_blockhash(&mut self) -> io::Result<Hash>;
/// Return the fee parameters associated with a recent, rooted blockhash. The cluster
/// will use the transaction's blockhash to look up these same fee parameters and
/// use them to calculate the transaction fee.
async fn get_fees(&mut self) -> io::Result<(FeeCalculator, Hash, Slot)>;
/// Send a transaction and return after the transaction has been rejected or
/// reached the given level of commitment.
async fn process_transaction_with_commitment(
&mut self,
transaction: Transaction,
commitment: CommitmentLevel,
) -> transport::Result<()>;
/// Send a transaction and return after the transaction has been finalized or rejected.
async fn process_transaction(&mut self, transaction: Transaction) -> transport::Result<()>;
/// Return the status of a transaction with a signature matching the transaction's first
/// signature. Return None if the transaction is not found, which may be because the
/// blockhash was expired or the fee-paying account had insufficient funds to pay the
/// transaction fee. Note that servers rarely store the full transaction history. This
/// method may return None if the transaction status has been discarded.
async fn get_transaction_status(
&mut self,
signature: Signature,
) -> io::Result<Option<TransactionStatus>>;
/// Same as get_transaction_status, but for multiple transactions.
async fn get_transaction_statuses(
&mut self,
signatures: Vec<Signature>,
) -> io::Result<Vec<Option<TransactionStatus>>>;
/// Return the most recent rooted slot height. All transactions at or below this height
/// are said to be finalized. The cluster will not fork to a higher slot height.
async fn get_root_slot(&mut self) -> io::Result<Slot>;
/// Return the account at the given address at the time of the most recent root slot.
/// If the account is not found, None is returned.
async fn get_account(&mut self, address: Pubkey) -> io::Result<Option<Account>>;
/// Return the balance in lamports of an account at the given address at the slot
/// corresponding to the given commitment level.
async fn get_balance_with_commitment(
&mut self,
address: Pubkey,
commitment: CommitmentLevel,
) -> io::Result<u64>;
/// Return the balance in lamports of an account at the given address at the time
/// of the most recent root slot.
async fn get_balance(&mut self, address: Pubkey) -> io::Result<u64>;
}
#[async_trait]
impl BanksClientExt for BanksClient {
async fn send_transaction(&mut self, transaction: Transaction) -> io::Result<()> {
self.send_transaction_with_context(context::current(), transaction)
.await
}
async fn get_fees(&mut self) -> io::Result<(FeeCalculator, Hash, Slot)> {
self.get_fees_with_commitment_and_context(context::current(), CommitmentLevel::Root)
.await
}
async fn get_recent_blockhash(&mut self) -> io::Result<Hash> {
Ok(self.get_fees().await?.1)
}
async fn process_transaction_with_commitment(
&mut self,
transaction: Transaction,
commitment: CommitmentLevel,
) -> transport::Result<()> {
let mut ctx = context::current();
ctx.deadline += Duration::from_secs(50);
let result = self
.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
.await?;
match result {
None => Err(Error::new(ErrorKind::TimedOut, "invalid blockhash or fee-payer").into()),
Some(transaction_result) => Ok(transaction_result?),
}
}
async fn process_transaction(&mut self, transaction: Transaction) -> transport::Result<()> {
self.process_transaction_with_commitment(transaction, CommitmentLevel::default())
.await
}
async fn get_root_slot(&mut self) -> io::Result<Slot> {
self.get_slot_with_context(context::current(), CommitmentLevel::Root)
.await
}
async fn get_account(&mut self, address: Pubkey) -> io::Result<Option<Account>> {
self.get_account_with_commitment_and_context(
context::current(),
address,
CommitmentLevel::default(),
)
.await
}
async fn get_balance_with_commitment(
&mut self,
address: Pubkey,
commitment: CommitmentLevel,
) -> io::Result<u64> {
let account = self
.get_account_with_commitment_and_context(context::current(), address, commitment)
.await?;
Ok(account.map(|x| x.lamports).unwrap_or(0))
}
async fn get_balance(&mut self, address: Pubkey) -> io::Result<u64> {
self.get_balance_with_commitment(address, CommitmentLevel::default())
.await
}
async fn get_transaction_status(
&mut self,
signature: Signature,
) -> io::Result<Option<TransactionStatus>> {
self.get_transaction_status_with_context(context::current(), signature)
.await
}
async fn get_transaction_statuses(
&mut self,
signatures: Vec<Signature>,
) -> io::Result<Vec<Option<TransactionStatus>>> {
// tarpc futures oddly hold a mutable reference back to the client so clone the client upfront
let mut clients_and_signatures: Vec<_> = signatures
.into_iter()
.map(|signature| (self.clone(), signature))
.collect();
let futs = clients_and_signatures
.iter_mut()
.map(|(client, signature)| client.get_transaction_status(*signature));
let statuses = join_all(futs).await;
// Convert Vec<Result<_, _>> to Result<Vec<_>>
statuses.into_iter().collect()
}
}
pub async fn start_client(
transport: UnboundedChannel<Response<BanksResponse>, ClientMessage<BanksRequest>>,
) -> io::Result<BanksClient> {
BanksClient::new(client::Config::default(), transport).spawn()
}
pub async fn start_tcp_client<T: ToSocketAddrs>(addr: T) -> io::Result<BanksClient> {
let transport = tcp::connect(addr, Bincode::default()).await?;
BanksClient::new(client::Config::default(), transport).spawn()
}
#[cfg(test)]
mod tests {
use super::*;
use solana_banks_server::banks_server::start_local_server;
use solana_runtime::{bank::Bank, bank_forks::BankForks, genesis_utils::create_genesis_config};
use solana_sdk::{message::Message, pubkey::Pubkey, signature::Signer, system_instruction};
use std::sync::{Arc, RwLock};
use tarpc::transport;
use tokio::{runtime::Runtime, time::delay_for};
#[test]
fn test_banks_client_new() {
let (client_transport, _server_transport) = transport::channel::unbounded();
BanksClient::new(client::Config::default(), client_transport);
}
#[test]
fn test_banks_server_transfer_via_server() -> io::Result<()> {
// This test shows the preferred way to interact with BanksServer.
// It creates a runtime explicitly (no globals via tokio macros) and calls
// `runtime.block_on()` just once, to run all the async code.
let genesis = create_genesis_config(10);
let bank_forks = Arc::new(RwLock::new(BankForks::new(Bank::new(
&genesis.genesis_config,
))));
let bob_pubkey = Pubkey::new_rand();
let mint_pubkey = genesis.mint_keypair.pubkey();
let instruction = system_instruction::transfer(&mint_pubkey, &bob_pubkey, 1);
let message = Message::new(&[instruction], Some(&mint_pubkey));
Runtime::new()?.block_on(async {
let client_transport = start_local_server(&bank_forks).await;
let mut banks_client =
BanksClient::new(client::Config::default(), client_transport).spawn()?;
let recent_blockhash = banks_client.get_recent_blockhash().await?;
let transaction = Transaction::new(&[&genesis.mint_keypair], message, recent_blockhash);
banks_client.process_transaction(transaction).await.unwrap();
assert_eq!(banks_client.get_balance(bob_pubkey).await?, 1);
Ok(())
})
}
#[test]
fn test_banks_server_transfer_via_client() -> io::Result<()> {
// The caller may not want to hold the connection open until the transaction
// is processed (or blockhash expires). In this test, we verify the
// server-side functionality is available to the client.
let genesis = create_genesis_config(10);
let bank_forks = Arc::new(RwLock::new(BankForks::new(Bank::new(
&genesis.genesis_config,
))));
let mint_pubkey = &genesis.mint_keypair.pubkey();
let bob_pubkey = Pubkey::new_rand();
let instruction = system_instruction::transfer(&mint_pubkey, &bob_pubkey, 1);
let message = Message::new(&[instruction], Some(&mint_pubkey));
Runtime::new()?.block_on(async {
let client_transport = start_local_server(&bank_forks).await;
let mut banks_client =
BanksClient::new(client::Config::default(), client_transport).spawn()?;
let (_, recent_blockhash, last_valid_slot) = banks_client.get_fees().await?;
let transaction = Transaction::new(&[&genesis.mint_keypair], message, recent_blockhash);
let signature = transaction.signatures[0];
banks_client.send_transaction(transaction).await?;
let mut status = banks_client.get_transaction_status(signature).await?;
while status.is_none() {
let root_slot = banks_client.get_root_slot().await?;
if root_slot > last_valid_slot {
break;
}
delay_for(Duration::from_millis(100)).await;
status = banks_client.get_transaction_status(signature).await?;
}
assert!(status.unwrap().err.is_none());
assert_eq!(banks_client.get_balance(bob_pubkey).await?, 1);
Ok(())
})
}
}

View File

@@ -0,0 +1,21 @@
[package]
name = "solana-banks-interface"
version = "1.3.10"
description = "Solana banks RPC interface"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
edition = "2018"
[dependencies]
serde = { version = "1.0.112", features = ["derive"] }
solana-sdk = { path = "../sdk", version = "1.3.10" }
tarpc = { version = "0.21.0", features = ["full"] }
[lib]
crate-type = ["lib"]
name = "solana_banks_interface"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -0,0 +1,49 @@
use serde::{Deserialize, Serialize};
use solana_sdk::{
account::Account,
clock::Slot,
commitment_config::CommitmentLevel,
fee_calculator::FeeCalculator,
hash::Hash,
pubkey::Pubkey,
signature::Signature,
transaction::{self, Transaction, TransactionError},
};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TransactionStatus {
pub slot: Slot,
pub confirmations: Option<usize>, // None = rooted
pub err: Option<TransactionError>,
}
#[tarpc::service]
pub trait Banks {
async fn send_transaction_with_context(transaction: Transaction);
async fn get_fees_with_commitment_and_context(
commitment: CommitmentLevel,
) -> (FeeCalculator, Hash, Slot);
async fn get_transaction_status_with_context(signature: Signature)
-> Option<TransactionStatus>;
async fn get_slot_with_context(commitment: CommitmentLevel) -> Slot;
async fn process_transaction_with_commitment_and_context(
transaction: Transaction,
commitment: CommitmentLevel,
) -> Option<transaction::Result<()>>;
async fn get_account_with_commitment_and_context(
address: Pubkey,
commitment: CommitmentLevel,
) -> Option<Account>;
}
#[cfg(test)]
mod tests {
use super::*;
use tarpc::{client, transport};
#[test]
fn test_banks_client_new() {
let (client_transport, _server_transport) = transport::channel::unbounded();
BanksClient::new(client::Config::default(), client_transport);
}
}

28
banks-server/Cargo.toml Normal file
View File

@@ -0,0 +1,28 @@
[package]
name = "solana-banks-server"
version = "1.3.10"
description = "Solana banks server"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
edition = "2018"
[dependencies]
bincode = "1.3.1"
futures = "0.3"
log = "0.4.8"
solana-banks-interface = { path = "../banks-interface", version = "1.3.10" }
solana-runtime = { path = "../runtime", version = "1.3.10" }
solana-sdk = { path = "../sdk", version = "1.3.10" }
solana-metrics = { path = "../metrics", version = "1.3.10" }
tarpc = { version = "0.21.0", features = ["full"] }
tokio = "0.2"
tokio-serde = { version = "0.6", features = ["bincode"] }
[lib]
crate-type = ["lib"]
name = "solana_banks_server"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -0,0 +1,272 @@
use crate::send_transaction_service::{SendTransactionService, TransactionInfo};
use bincode::{deserialize, serialize};
use futures::{
future,
prelude::stream::{self, StreamExt},
};
use solana_banks_interface::{Banks, BanksRequest, BanksResponse, TransactionStatus};
use solana_runtime::{
bank::Bank,
bank_forks::BankForks,
commitment::{BlockCommitmentCache, CommitmentSlots},
};
use solana_sdk::{
account::Account,
clock::Slot,
commitment_config::CommitmentLevel,
fee_calculator::FeeCalculator,
hash::Hash,
pubkey::Pubkey,
signature::Signature,
transaction::{self, Transaction},
};
use std::{
collections::HashMap,
io,
net::SocketAddr,
sync::{
atomic::AtomicBool,
mpsc::{channel, Receiver, Sender},
Arc, RwLock,
},
thread::Builder,
time::Duration,
};
use tarpc::{
context::Context,
rpc::{transport::channel::UnboundedChannel, ClientMessage, Response},
serde_transport::tcp,
server::{self, Channel, Handler},
transport,
};
use tokio::time::delay_for;
use tokio_serde::formats::Bincode;
#[derive(Clone)]
struct BanksServer {
bank_forks: Arc<RwLock<BankForks>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
transaction_sender: Sender<TransactionInfo>,
}
impl BanksServer {
/// Return a BanksServer that forwards transactions to the
/// given sender. If unit-testing, those transactions can go to
/// a bank in the given BankForks. Otherwise, the receiver should
/// forward them to a validator in the leader schedule.
fn new(
bank_forks: Arc<RwLock<BankForks>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
transaction_sender: Sender<TransactionInfo>,
) -> Self {
Self {
bank_forks,
block_commitment_cache,
transaction_sender,
}
}
fn run(bank: &Bank, transaction_receiver: Receiver<TransactionInfo>) {
while let Ok(info) = transaction_receiver.recv() {
let mut transaction_infos = vec![info];
while let Ok(info) = transaction_receiver.try_recv() {
transaction_infos.push(info);
}
let transactions: Vec<_> = transaction_infos
.into_iter()
.map(|info| deserialize(&info.wire_transaction).unwrap())
.collect();
let _ = bank.process_transactions(&transactions);
}
}
/// Useful for unit-testing
fn new_loopback(bank_forks: Arc<RwLock<BankForks>>) -> Self {
let (transaction_sender, transaction_receiver) = channel();
let bank = bank_forks.read().unwrap().working_bank();
let slot = bank.slot();
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::new(
HashMap::default(),
0,
CommitmentSlots {
slot,
root: 0,
highest_confirmed_slot: 0,
highest_confirmed_root: 0,
},
)));
Builder::new()
.name("solana-bank-forks-client".to_string())
.spawn(move || Self::run(&bank, transaction_receiver))
.unwrap();
Self::new(bank_forks, block_commitment_cache, transaction_sender)
}
fn slot(&self, commitment: CommitmentLevel) -> Slot {
self.block_commitment_cache
.read()
.unwrap()
.slot_with_commitment(commitment)
}
fn bank(&self, commitment: CommitmentLevel) -> Arc<Bank> {
self.bank_forks.read().unwrap()[self.slot(commitment)].clone()
}
async fn poll_signature_status(
self,
signature: Signature,
last_valid_slot: Slot,
commitment: CommitmentLevel,
) -> Option<transaction::Result<()>> {
let mut status = self.bank(commitment).get_signature_status(&signature);
while status.is_none() {
delay_for(Duration::from_millis(200)).await;
let bank = self.bank(commitment);
if bank.slot() > last_valid_slot {
break;
}
status = bank.get_signature_status(&signature);
}
status
}
}
#[tarpc::server]
impl Banks for BanksServer {
async fn send_transaction_with_context(self, _: Context, transaction: Transaction) {
let blockhash = &transaction.message.recent_blockhash;
let last_valid_slot = self
.bank_forks
.read()
.unwrap()
.root_bank()
.get_blockhash_last_valid_slot(&blockhash)
.unwrap();
let signature = transaction.signatures.get(0).cloned().unwrap_or_default();
let info =
TransactionInfo::new(signature, serialize(&transaction).unwrap(), last_valid_slot);
self.transaction_sender.send(info).unwrap();
}
async fn get_fees_with_commitment_and_context(
self,
_: Context,
commitment: CommitmentLevel,
) -> (FeeCalculator, Hash, Slot) {
let bank = self.bank(commitment);
let (blockhash, fee_calculator) = bank.last_blockhash_with_fee_calculator();
let last_valid_slot = bank.get_blockhash_last_valid_slot(&blockhash).unwrap();
(fee_calculator, blockhash, last_valid_slot)
}
async fn get_transaction_status_with_context(
self,
_: Context,
signature: Signature,
) -> Option<TransactionStatus> {
let bank = self.bank(CommitmentLevel::Recent);
let (slot, status) = bank.get_signature_status_slot(&signature)?;
let r_block_commitment_cache = self.block_commitment_cache.read().unwrap();
let confirmations = if r_block_commitment_cache.root() >= slot {
None
} else {
r_block_commitment_cache
.get_confirmation_count(slot)
.or(Some(0))
};
Some(TransactionStatus {
slot,
confirmations,
err: status.err(),
})
}
async fn get_slot_with_context(self, _: Context, commitment: CommitmentLevel) -> Slot {
self.slot(commitment)
}
async fn process_transaction_with_commitment_and_context(
self,
_: Context,
transaction: Transaction,
commitment: CommitmentLevel,
) -> Option<transaction::Result<()>> {
let blockhash = &transaction.message.recent_blockhash;
let last_valid_slot = self
.bank_forks
.read()
.unwrap()
.root_bank()
.get_blockhash_last_valid_slot(&blockhash)
.unwrap();
let signature = transaction.signatures.get(0).cloned().unwrap_or_default();
let info =
TransactionInfo::new(signature, serialize(&transaction).unwrap(), last_valid_slot);
self.transaction_sender.send(info).unwrap();
self.poll_signature_status(signature, last_valid_slot, commitment)
.await
}
async fn get_account_with_commitment_and_context(
self,
_: Context,
address: Pubkey,
commitment: CommitmentLevel,
) -> Option<Account> {
let bank = self.bank(commitment);
bank.get_account(&address)
}
}
pub async fn start_local_server(
bank_forks: &Arc<RwLock<BankForks>>,
) -> UnboundedChannel<Response<BanksResponse>, ClientMessage<BanksRequest>> {
let banks_server = BanksServer::new_loopback(bank_forks.clone());
let (client_transport, server_transport) = transport::channel::unbounded();
let server = server::new(server::Config::default())
.incoming(stream::once(future::ready(server_transport)))
.respond_with(banks_server.serve());
tokio::spawn(server);
client_transport
}
pub async fn start_tcp_server(
listen_addr: SocketAddr,
tpu_addr: SocketAddr,
bank_forks: Arc<RwLock<BankForks>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
) -> io::Result<()> {
// Note: These settings are copied straight from the tarpc example.
let server = tcp::listen(listen_addr, Bincode::default)
.await?
// Ignore accept errors.
.filter_map(|r| future::ready(r.ok()))
.map(server::BaseChannel::with_defaults)
// Limit channels to 1 per IP.
.max_channels_per_key(1, |t| t.as_ref().peer_addr().unwrap().ip())
// serve is generated by the service attribute. It takes as input any type implementing
// the generated Banks trait.
.map(move |chan| {
let (sender, receiver) = channel();
let exit_send_transaction_service = Arc::new(AtomicBool::new(false));
SendTransactionService::new(
tpu_addr,
&bank_forks,
&exit_send_transaction_service,
receiver,
);
let server =
BanksServer::new(bank_forks.clone(), block_commitment_cache.clone(), sender);
chan.respond_with(server.serve()).execute()
})
// Max 10 channels.
.buffer_unordered(10)
.for_each(|_| async {});
server.await;
Ok(())
}

6
banks-server/src/lib.rs Normal file
View File

@@ -0,0 +1,6 @@
pub mod banks_server;
pub mod rpc_banks_service;
pub mod send_transaction_service;
#[macro_use]
extern crate solana_metrics;

View File

@@ -0,0 +1,116 @@
//! The `rpc_banks_service` module implements the Solana Banks RPC API.
use crate::banks_server::start_tcp_server;
use futures::{future::FutureExt, pin_mut, prelude::stream::StreamExt, select};
use solana_runtime::{bank_forks::BankForks, commitment::BlockCommitmentCache};
use std::{
net::SocketAddr,
sync::{
atomic::{AtomicBool, Ordering},
Arc, RwLock,
},
thread::{self, Builder, JoinHandle},
};
use tokio::{
runtime::Runtime,
time::{self, Duration},
};
pub struct RpcBanksService {
thread_hdl: JoinHandle<()>,
}
/// Run the TCP service until `exit` is set to true
async fn start_abortable_tcp_server(
listen_addr: SocketAddr,
tpu_addr: SocketAddr,
bank_forks: Arc<RwLock<BankForks>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
exit: Arc<AtomicBool>,
) {
let server = start_tcp_server(
listen_addr,
tpu_addr,
bank_forks.clone(),
block_commitment_cache.clone(),
)
.fuse();
let interval = time::interval(Duration::from_millis(100)).fuse();
pin_mut!(server, interval);
loop {
select! {
_ = server => {},
_ = interval.select_next_some() => {
if exit.load(Ordering::Relaxed) {
break;
}
}
}
}
}
impl RpcBanksService {
fn run(
listen_addr: SocketAddr,
tpu_addr: SocketAddr,
bank_forks: Arc<RwLock<BankForks>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
exit: Arc<AtomicBool>,
) {
let server = start_abortable_tcp_server(
listen_addr,
tpu_addr,
bank_forks,
block_commitment_cache,
exit,
);
Runtime::new().unwrap().block_on(server);
}
pub fn new(
listen_addr: SocketAddr,
tpu_addr: SocketAddr,
bank_forks: &Arc<RwLock<BankForks>>,
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
exit: &Arc<AtomicBool>,
) -> Self {
let bank_forks = bank_forks.clone();
let block_commitment_cache = block_commitment_cache.clone();
let exit = exit.clone();
let thread_hdl = Builder::new()
.name("solana-rpc-banks".to_string())
.spawn(move || {
Self::run(
listen_addr,
tpu_addr,
bank_forks,
block_commitment_cache,
exit,
)
})
.unwrap();
Self { thread_hdl }
}
pub fn join(self) -> thread::Result<()> {
self.thread_hdl.join()
}
}
#[cfg(test)]
mod tests {
use super::*;
use solana_runtime::bank::Bank;
#[test]
fn test_rpc_banks_server_exit() {
let bank_forks = Arc::new(RwLock::new(BankForks::new(Bank::default())));
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let exit = Arc::new(AtomicBool::new(false));
let addr = "127.0.0.1:0".parse().unwrap();
let service = RpcBanksService::new(addr, addr, &bank_forks, &block_commitment_cache, &exit);
exit.store(true, Ordering::Relaxed);
service.join().unwrap();
}
}

View File

@@ -1,6 +1,6 @@
use crate::{bank::Bank, bank_forks::BankForks};
use log::*;
use solana_metrics::{datapoint_warn, inc_new_counter_info};
use solana_runtime::{bank::Bank, bank_forks::BankForks};
use solana_sdk::{clock::Slot, signature::Signature};
use std::{
collections::HashMap,
@@ -22,9 +22,9 @@ pub struct SendTransactionService {
}
pub struct TransactionInfo {
signature: Signature,
wire_transaction: Vec<u8>,
last_valid_slot: Slot,
pub signature: Signature,
pub wire_transaction: Vec<u8>,
pub last_valid_slot: Slot,
}
impl TransactionInfo {
@@ -205,8 +205,6 @@ mod test {
#[test]
fn process_transactions() {
solana_logger::setup();
let (genesis_config, mint_keypair) = create_genesis_config(4);
let bank = Bank::new(&genesis_config);
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-bench-exchange"
version = "1.3.0"
version = "1.3.10"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -15,24 +15,24 @@ log = "0.4.8"
num-derive = "0.3"
num-traits = "0.2"
rand = "0.7.0"
rayon = "1.3.1"
rayon = "1.4.0"
serde_json = "1.0.56"
serde_yaml = "0.8.13"
solana-clap-utils = { path = "../clap-utils", version = "1.3.0" }
solana-core = { path = "../core", version = "1.3.0" }
solana-genesis = { path = "../genesis", version = "1.3.0" }
solana-client = { path = "../client", version = "1.3.0" }
solana-faucet = { path = "../faucet", version = "1.3.0" }
solana-exchange-program = { path = "../programs/exchange", version = "1.3.0" }
solana-logger = { path = "../logger", version = "1.3.0" }
solana-metrics = { path = "../metrics", version = "1.3.0" }
solana-net-utils = { path = "../net-utils", version = "1.3.0" }
solana-runtime = { path = "../runtime", version = "1.3.0" }
solana-sdk = { path = "../sdk", version = "1.3.0" }
solana-version = { path = "../version", version = "1.3.0" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.10" }
solana-core = { path = "../core", version = "1.3.10" }
solana-genesis = { path = "../genesis", version = "1.3.10" }
solana-client = { path = "../client", version = "1.3.10" }
solana-faucet = { path = "../faucet", version = "1.3.10" }
solana-exchange-program = { path = "../programs/exchange", version = "1.3.10" }
solana-logger = { path = "../logger", version = "1.3.10" }
solana-metrics = { path = "../metrics", version = "1.3.10" }
solana-net-utils = { path = "../net-utils", version = "1.3.10" }
solana-runtime = { path = "../runtime", version = "1.3.10" }
solana-sdk = { path = "../sdk", version = "1.3.10" }
solana-version = { path = "../version", version = "1.3.10" }
[dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "1.3.0" }
solana-local-cluster = { path = "../local-cluster", version = "1.3.10" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,18 +2,18 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-bench-streamer"
version = "1.3.0"
version = "1.3.10"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.33.1"
solana-clap-utils = { path = "../clap-utils", version = "1.3.0" }
solana-streamer = { path = "../streamer", version = "1.3.0" }
solana-logger = { path = "../logger", version = "1.3.0" }
solana-net-utils = { path = "../net-utils", version = "1.3.0" }
solana-version = { path = "../version", version = "1.3.0" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.10" }
solana-streamer = { path = "../streamer", version = "1.3.10" }
solana-logger = { path = "../logger", version = "1.3.10" }
solana-net-utils = { path = "../net-utils", version = "1.3.10" }
solana-version = { path = "../version", version = "1.3.10" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-bench-tps"
version = "1.3.0"
version = "1.3.10"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -11,26 +11,26 @@ homepage = "https://solana.com/"
bincode = "1.3.1"
clap = "2.33.1"
log = "0.4.8"
rayon = "1.3.1"
rayon = "1.4.0"
serde_json = "1.0.56"
serde_yaml = "0.8.13"
solana-clap-utils = { path = "../clap-utils", version = "1.3.0" }
solana-core = { path = "../core", version = "1.3.0" }
solana-genesis = { path = "../genesis", version = "1.3.0" }
solana-client = { path = "../client", version = "1.3.0" }
solana-faucet = { path = "../faucet", version = "1.3.0" }
solana-logger = { path = "../logger", version = "1.3.0" }
solana-metrics = { path = "../metrics", version = "1.3.0" }
solana-measure = { path = "../measure", version = "1.3.0" }
solana-net-utils = { path = "../net-utils", version = "1.3.0" }
solana-runtime = { path = "../runtime", version = "1.3.0" }
solana-sdk = { path = "../sdk", version = "1.3.0" }
solana-version = { path = "../version", version = "1.3.0" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.10" }
solana-core = { path = "../core", version = "1.3.10" }
solana-genesis = { path = "../genesis", version = "1.3.10" }
solana-client = { path = "../client", version = "1.3.10" }
solana-faucet = { path = "../faucet", version = "1.3.10" }
solana-logger = { path = "../logger", version = "1.3.10" }
solana-metrics = { path = "../metrics", version = "1.3.10" }
solana-measure = { path = "../measure", version = "1.3.10" }
solana-net-utils = { path = "../net-utils", version = "1.3.10" }
solana-runtime = { path = "../runtime", version = "1.3.10" }
solana-sdk = { path = "../sdk", version = "1.3.10" }
solana-version = { path = "../version", version = "1.3.10" }
[dev-dependencies]
serial_test = "0.4.0"
serial_test_derive = "0.4.0"
solana-local-cluster = { path = "../local-cluster", version = "1.3.0" }
solana-local-cluster = { path = "../local-cluster", version = "1.3.10" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -89,11 +89,17 @@ BETA_CHANNEL_LATEST_TAG=${beta_tag:+v$beta_tag}
STABLE_CHANNEL_LATEST_TAG=${stable_tag:+v$stable_tag}
if [[ $CI_BRANCH = "$STABLE_CHANNEL" ]]; then
if [[ -n $CI_BASE_BRANCH ]]; then
BRANCH="$CI_BASE_BRANCH"
elif [[ -n $CI_BRANCH ]]; then
BRANCH="$CI_BRANCH"
fi
if [[ $BRANCH = "$STABLE_CHANNEL" ]]; then
CHANNEL=stable
elif [[ $CI_BRANCH = "$EDGE_CHANNEL" ]]; then
elif [[ $BRANCH = "$EDGE_CHANNEL" ]]; then
CHANNEL=edge
elif [[ $CI_BRANCH = "$BETA_CHANNEL" ]]; then
elif [[ $BRANCH = "$BETA_CHANNEL" ]]; then
CHANNEL=beta
fi

View File

@@ -7,7 +7,7 @@ source multinode-demo/common.sh
rm -rf config/run/init-completed config/ledger config/snapshot-ledger
timeout 15 ./run.sh &
timeout 120 ./run.sh &
pid=$!
attempts=20

View File

@@ -76,7 +76,7 @@ RestartForceExitStatus=SIGPIPE
TimeoutStartSec=10
TimeoutStopSec=0
KillMode=process
LimitNOFILE=65536
LimitNOFILE=500000
[Install]
WantedBy=multi-user.target

View File

@@ -8,5 +8,5 @@ source "$HERE"/utils.sh
ensure_env || exit 1
# Allow more files to be opened by a user
sed -i 's/^\(# End of file\)/* soft nofile 65535\n\n\1/' /etc/security/limits.conf
echo "* - nofile 500000" > /etc/security/limits.d/90-solana-nofiles.conf

View File

@@ -41,13 +41,14 @@ if [[ $CI_BASE_BRANCH = "$EDGE_CHANNEL" ]]; then
echo "$0: [tree (for outdated Cargo.lock sync)|check (for compilation error)|update -p foo --precise x.y.z (for your Cargo.toml update)] ..." >&2
exit "$check_status"
fi
# Ensure nightly and --benches
_ scripts/cargo-for-all-lock-files.sh +"$rust_nightly" check --locked --all-targets
else
echo "Note: cargo-for-all-lock-files.sh skipped because $CI_BASE_BRANCH != $EDGE_CHANNEL"
fi
# Ensure nightly and --benches
_ scripts/cargo-for-all-lock-files.sh +"$rust_nightly" check --locked --all-targets
_ ci/order-crates-for-publishing.py
_ cargo +"$rust_stable" fmt --all -- --check

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-clap-utils"
version = "1.3.0"
version = "1.3.10"
description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,8 +11,8 @@ edition = "2018"
[dependencies]
clap = "2.33.0"
rpassword = "4.0"
solana-remote-wallet = { path = "../remote-wallet", version = "1.3.0" }
solana-sdk = { path = "../sdk", version = "1.3.0" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.3.10" }
solana-sdk = { path = "../sdk", version = "1.3.10" }
thiserror = "1.0.20"
tiny-bip39 = "0.7.0"
url = "2.1.0"

View File

@@ -8,6 +8,7 @@ use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_sdk::{
clock::UnixTimestamp,
commitment_config::CommitmentConfig,
genesis_config::ClusterType,
native_token::sol_to_lamports,
pubkey::Pubkey,
signature::{read_keypair_file, Keypair, Signature, Signer},
@@ -178,6 +179,10 @@ pub fn lamports_of_sol(matches: &ArgMatches<'_>, name: &str) -> Option<u64> {
value_of(matches, name).map(sol_to_lamports)
}
pub fn cluster_type_of(matches: &ArgMatches<'_>, name: &str) -> Option<ClusterType> {
value_of(matches, name)
}
pub fn commitment_of(matches: &ArgMatches<'_>, name: &str) -> Option<CommitmentConfig> {
matches.value_of(name).map(|value| match value {
"max" => CommitmentConfig::max(),

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-cli-config"
description = "Blockchain, Rebuilt for Scale"
version = "1.3.0"
version = "1.3.10"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"

View File

@@ -76,6 +76,17 @@ impl Config {
ws_url.to_string()
}
pub fn compute_rpc_banks_url(json_rpc_url: &str) -> String {
let json_rpc_url: Option<Url> = json_rpc_url.parse().ok();
if json_rpc_url.is_none() {
return "".to_string();
}
let mut url = json_rpc_url.unwrap();
let port = url.port_or_known_default().unwrap_or(80);
url.set_port(Some(port + 2)).expect("unable to set port");
url.to_string()
}
pub fn import_address_labels<P>(&mut self, filename: P) -> Result<(), io::Error>
where
P: AsRef<Path>,
@@ -122,4 +133,28 @@ mod test {
assert_eq!(Config::compute_websocket_url(&"garbage"), String::new());
}
#[test]
fn compute_rpc_banks_url() {
assert_eq!(
Config::compute_rpc_banks_url(&"http://devnet.solana.com"),
"http://devnet.solana.com:82/".to_string()
);
assert_eq!(
Config::compute_rpc_banks_url(&"https://devnet.solana.com"),
"https://devnet.solana.com:445/".to_string()
);
assert_eq!(
Config::compute_rpc_banks_url(&"http://example.com:8899"),
"http://example.com:8901/".to_string()
);
assert_eq!(
Config::compute_rpc_banks_url(&"https://example.com:1234"),
"https://example.com:1236/".to_string()
);
assert_eq!(Config::compute_rpc_banks_url(&"garbage"), String::new());
}
}

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-cli"
description = "Blockchain, Rebuilt for Scale"
version = "1.3.0"
version = "1.3.10"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -27,29 +27,29 @@ reqwest = { version = "0.10.6", default-features = false, features = ["blocking"
serde = "1.0.112"
serde_derive = "1.0.103"
serde_json = "1.0.56"
solana-account-decoder = { path = "../account-decoder", version = "1.3.0" }
solana-budget-program = { path = "../programs/budget", version = "1.3.0" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.0" }
solana-cli-config = { path = "../cli-config", version = "1.3.0" }
solana-client = { path = "../client", version = "1.3.0" }
solana-config-program = { path = "../programs/config", version = "1.3.0" }
solana-faucet = { path = "../faucet", version = "1.3.0" }
solana-logger = { path = "../logger", version = "1.3.0" }
solana-net-utils = { path = "../net-utils", version = "1.3.0" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.3.0" }
solana-runtime = { path = "../runtime", version = "1.3.0" }
solana-sdk = { path = "../sdk", version = "1.3.0" }
solana-stake-program = { path = "../programs/stake", version = "1.3.0" }
solana-transaction-status = { path = "../transaction-status", version = "1.3.0" }
solana-version = { path = "../version", version = "1.3.0" }
solana-vote-program = { path = "../programs/vote", version = "1.3.0" }
solana-vote-signer = { path = "../vote-signer", version = "1.3.0" }
solana-account-decoder = { path = "../account-decoder", version = "1.3.10" }
solana-budget-program = { path = "../programs/budget", version = "1.3.10" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.10" }
solana-cli-config = { path = "../cli-config", version = "1.3.10" }
solana-client = { path = "../client", version = "1.3.10" }
solana-config-program = { path = "../programs/config", version = "1.3.10" }
solana-faucet = { path = "../faucet", version = "1.3.10" }
solana-logger = { path = "../logger", version = "1.3.10" }
solana-net-utils = { path = "../net-utils", version = "1.3.10" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.3.10" }
solana-runtime = { path = "../runtime", version = "1.3.10" }
solana-sdk = { path = "../sdk", version = "1.3.10" }
solana-stake-program = { path = "../programs/stake", version = "1.3.10" }
solana-transaction-status = { path = "../transaction-status", version = "1.3.10" }
solana-version = { path = "../version", version = "1.3.10" }
solana-vote-program = { path = "../programs/vote", version = "1.3.10" }
solana-vote-signer = { path = "../vote-signer", version = "1.3.10" }
thiserror = "1.0.20"
url = "2.1.1"
[dev-dependencies]
solana-core = { path = "../core", version = "1.3.0" }
solana-budget-program = { path = "../programs/budget", version = "1.3.0" }
solana-core = { path = "../core", version = "1.3.10" }
solana-budget-program = { path = "../programs/budget", version = "1.3.10" }
tempfile = "3.1.0"
[[bin]]

View File

@@ -11,7 +11,7 @@ use crate::{
vote::*,
};
use chrono::prelude::*;
use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
use clap::{value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand};
use log::*;
use num_traits::FromPrimitive;
use serde_json::{self, json, Value};
@@ -25,7 +25,7 @@ use solana_client::{
client_error::{ClientError, ClientErrorKind, Result as ClientResult},
rpc_client::RpcClient,
rpc_config::{RpcLargestAccountsFilter, RpcSendTransactionConfig},
rpc_response::RpcKeyedAccount,
rpc_response::{Response, RpcKeyedAccount},
};
#[cfg(not(test))]
use solana_faucet::faucet::request_airdrop_transaction;
@@ -33,7 +33,7 @@ use solana_faucet::faucet::request_airdrop_transaction;
use solana_faucet::faucet_mock::request_airdrop_transaction;
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_sdk::{
bpf_loader,
bpf_loader, bpf_loader_deprecated,
clock::{Epoch, Slot, DEFAULT_TICKS_PER_SECOND},
commitment_config::CommitmentConfig,
decode_error::DecodeError,
@@ -193,6 +193,10 @@ pub enum CliCommand {
program_id: Pubkey,
},
Fees,
FirstAvailableBlock,
GetBlock {
slot: Slot,
},
GetBlockTime {
slot: Option<Slot>,
},
@@ -231,8 +235,10 @@ pub enum CliCommand {
TotalSupply,
TransactionHistory {
address: Pubkey,
end_slot: Option<Slot>, // None == latest slot
slot_limit: Option<u64>, // None == search full history
before: Option<Signature>,
until: Option<Signature>,
limit: usize,
show_transactions: bool,
},
// Nonce commands
AuthorizeNonceAccount {
@@ -265,6 +271,7 @@ pub enum CliCommand {
Deploy {
program_location: String,
address: Option<SignerIndex>,
use_deprecated_loader: bool,
},
// Stake Commands
CreateStakeAccount {
@@ -585,6 +592,7 @@ impl CliConfig<'_> {
config.commitment = CommitmentConfig::recent();
config.send_transaction_config = RpcSendTransactionConfig {
skip_preflight: true,
..RpcSendTransactionConfig::default()
};
config
}
@@ -635,6 +643,11 @@ pub fn parse_command(
command: CliCommand::Fees,
signers: vec![],
}),
("first-available-block", Some(_matches)) => Ok(CliCommandInfo {
command: CliCommand::FirstAvailableBlock,
signers: vec![],
}),
("block", Some(matches)) => parse_get_block(matches),
("block-time", Some(matches)) => parse_get_block_time(matches),
("epoch-info", Some(matches)) => parse_get_epoch_info(matches),
("genesis-hash", Some(_matches)) => Ok(CliCommandInfo {
@@ -695,11 +708,13 @@ pub fn parse_command(
signers.push(signer);
1
});
let use_deprecated_loader = matches.is_present("use_deprecated_loader");
Ok(CliCommandInfo {
command: CliCommand::Deploy {
program_location: matches.value_of("program_location").unwrap().to_string(),
address,
use_deprecated_loader,
},
signers,
})
@@ -850,9 +865,14 @@ pub fn parse_command(
_ => Err(CliError::BadParameter("Invalid signature".to_string())),
},
("decode-transaction", Some(matches)) => {
let encoded_transaction = EncodedTransaction::Binary(
matches.value_of("base58_transaction").unwrap().to_string(),
);
let blob = value_t_or_exit!(matches, "transaction", String);
let encoding = match matches.value_of("encoding").unwrap() {
"base58" => UiTransactionEncoding::Binary,
"base64" => UiTransactionEncoding::Base64,
_ => unreachable!(),
};
let encoded_transaction = EncodedTransaction::Binary(blob, encoding);
if let Some(transaction) = encoded_transaction.decode() {
Ok(CliCommandInfo {
command: CliCommand::DecodeTransaction(transaction),
@@ -1182,7 +1202,7 @@ fn process_confirm(
if let Some(transaction_status) = status {
if config.verbose {
match rpc_client
.get_confirmed_transaction(signature, UiTransactionEncoding::Binary)
.get_confirmed_transaction(signature, UiTransactionEncoding::Base64)
{
Ok(confirmed_transaction) => {
println!(
@@ -1235,7 +1255,13 @@ fn process_show_account(
let cli_account = CliAccount {
keyed_account: RpcKeyedAccount {
pubkey: account_pubkey.to_string(),
account: UiAccount::encode(account, UiAccountEncoding::Binary),
account: UiAccount::encode(
account_pubkey,
account,
UiAccountEncoding::Base64,
None,
None,
),
},
use_lamports_unit,
};
@@ -1283,6 +1309,7 @@ fn send_and_confirm_transactions_with_spinner<T: Signers>(
&transaction,
RpcSendTransactionConfig {
skip_preflight: true,
..RpcSendTransactionConfig::default()
},
)
.ok();
@@ -1313,23 +1340,16 @@ fn send_and_confirm_transactions_with_spinner<T: Signers>(
transactions_signatures = transactions_signatures
.into_iter()
.filter(|(_transaction, signature)| {
if let Some(signature) = signature {
if let Ok(status) = rpc_client.get_signature_status(&signature) {
if rpc_client
.get_num_blocks_since_signature_confirmation(&signature)
.unwrap_or(0)
> 1
{
return false;
} else {
return match status {
None => true,
Some(result) => result.is_err(),
};
signature
.and_then(|signature| rpc_client.get_signature_statuses(&[signature]).ok())
.map(|Response { context: _, value }| match &value[0] {
None => true,
Some(transaction_status) => {
!(transaction_status.confirmations.is_none()
|| transaction_status.confirmations.unwrap() > 1)
}
}
}
true
})
.unwrap_or(true)
})
.collect();
@@ -1359,6 +1379,7 @@ fn process_deploy(
config: &CliConfig,
program_location: &str,
address: Option<SignerIndex>,
use_deprecated_loader: bool,
) -> ProcessResult {
let new_keypair = Keypair::new(); // Create ephemeral keypair to use for program address, if not provided
let program_id = if let Some(i) = address {
@@ -1374,6 +1395,12 @@ fn process_deploy(
CliError::DynamicProgramError(format!("Unable to read program file: {}", err))
})?;
let loader_id = if use_deprecated_loader {
bpf_loader_deprecated::id()
} else {
bpf_loader::id()
};
// Build transactions to calculate fees
let mut messages: Vec<&Message> = Vec::new();
let (blockhash, fee_calculator, _) = rpc_client
@@ -1385,35 +1412,33 @@ fn process_deploy(
&program_id.pubkey(),
minimum_balance.max(1),
program_data.len() as u64,
&bpf_loader::id(),
&loader_id,
);
let message = Message::new(&[ix], Some(&config.signers[0].pubkey()));
let mut create_account_tx = Transaction::new_unsigned(message);
let signers = [config.signers[0], program_id];
create_account_tx.try_sign(&signers, blockhash)?;
messages.push(&create_account_tx.message);
let mut write_transactions = vec![];
let mut write_messages = vec![];
for (chunk, i) in program_data.chunks(DATA_CHUNK_SIZE).zip(0..) {
let instruction = loader_instruction::write(
&program_id.pubkey(),
&bpf_loader::id(),
&loader_id,
(i * DATA_CHUNK_SIZE) as u32,
chunk.to_vec(),
);
let message = Message::new(&[instruction], Some(&signers[0].pubkey()));
let mut tx = Transaction::new_unsigned(message);
tx.try_sign(&signers, blockhash)?;
write_transactions.push(tx);
write_messages.push(message);
}
for transaction in write_transactions.iter() {
messages.push(&transaction.message);
let mut write_message_refs = vec![];
for message in write_messages.iter() {
write_message_refs.push(message);
}
messages.append(&mut write_message_refs);
let instruction = loader_instruction::finalize(&program_id.pubkey(), &bpf_loader::id());
let message = Message::new(&[instruction], Some(&signers[0].pubkey()));
let mut finalize_tx = Transaction::new_unsigned(message);
finalize_tx.try_sign(&signers, blockhash)?;
messages.push(&finalize_tx.message);
let instruction = loader_instruction::finalize(&program_id.pubkey(), &loader_id);
let finalize_message = Message::new(&[instruction], Some(&signers[0].pubkey()));
messages.push(&finalize_message);
check_account_for_multiple_fees_with_commitment(
rpc_client,
@@ -1433,11 +1458,28 @@ fn process_deploy(
CliError::DynamicProgramError("Program account allocation failed".to_string())
})?;
let (blockhash, _, _) = rpc_client
.get_recent_blockhash_with_commitment(config.commitment)?
.value;
let mut write_transactions = vec![];
for message in write_messages.into_iter() {
let mut tx = Transaction::new_unsigned(message);
tx.try_sign(&signers, blockhash)?;
write_transactions.push(tx);
}
trace!("Writing program data");
send_and_confirm_transactions_with_spinner(&rpc_client, write_transactions, &signers).map_err(
|_| CliError::DynamicProgramError("Data writes to program account failed".to_string()),
)?;
let (blockhash, _, _) = rpc_client
.get_recent_blockhash_with_commitment(config.commitment)?
.value;
let mut finalize_tx = Transaction::new_unsigned(finalize_message);
finalize_tx.try_sign(&signers, blockhash)?;
trace!("Finalizing program account");
rpc_client
.send_and_confirm_transaction_with_spinner_and_config(
@@ -1445,6 +1487,7 @@ fn process_deploy(
config.commitment,
RpcSendTransactionConfig {
skip_preflight: true,
..RpcSendTransactionConfig::default()
},
)
.map_err(|e| {
@@ -1831,6 +1874,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
program_id,
} => process_create_address_with_seed(config, from_pubkey.as_ref(), &seed, &program_id),
CliCommand::Fees => process_fees(&rpc_client, config),
CliCommand::FirstAvailableBlock => process_first_available_block(&rpc_client),
CliCommand::GetBlock { slot } => process_get_block(&rpc_client, config, *slot),
CliCommand::GetBlockTime { slot } => process_get_block_time(&rpc_client, config, *slot),
CliCommand::GetEpoch => process_get_epoch(&rpc_client, config),
CliCommand::GetEpochInfo => process_get_epoch_info(&rpc_client, config),
@@ -1871,9 +1916,19 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
CliCommand::TotalSupply => process_total_supply(&rpc_client, config),
CliCommand::TransactionHistory {
address,
end_slot,
slot_limit,
} => process_transaction_history(&rpc_client, address, *end_slot, *slot_limit),
before,
until,
limit,
show_transactions,
} => process_transaction_history(
&rpc_client,
config,
address,
*before,
*until,
*limit,
*show_transactions,
),
// Nonce Commands
@@ -1943,7 +1998,14 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
CliCommand::Deploy {
program_location,
address,
} => process_deploy(&rpc_client, config, program_location, *address),
use_deprecated_loader,
} => process_deploy(
&rpc_client,
config,
program_location,
*address,
*use_deprecated_loader,
),
// Stake Commands
@@ -2564,12 +2626,22 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
SubCommand::with_name("decode-transaction")
.about("Decode a base-58 binary transaction")
.arg(
Arg::with_name("base58_transaction")
Arg::with_name("transaction")
.index(1)
.value_name("BASE58_TRANSACTION")
.value_name("TRANSACTION")
.takes_value(true)
.required(true)
.help("The transaction to decode"),
.help("transaction to decode"),
)
.arg(
Arg::with_name("encoding")
.index(2)
.value_name("ENCODING")
.possible_values(&["base58", "base64"]) // Subset of `UiTransactionEncoding` enum
.default_value("base58")
.takes_value(true)
.required(true)
.help("transaction encoding"),
),
)
.subcommand(
@@ -2620,6 +2692,13 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
.takes_value(true)
.validator(is_valid_signer)
.help("The signer for the desired address of the program [default: new random address]")
)
.arg(
Arg::with_name("use_deprecated_loader")
.long("use-deprecated-loader")
.takes_value(false)
.hidden(true) // Don't document this argument to discourage its use
.help("Use the deprecated BPF loader")
),
)
.subcommand(
@@ -3064,6 +3143,7 @@ mod tests {
command: CliCommand::Deploy {
program_location: "/Users/test/program.o".to_string(),
address: None,
use_deprecated_loader: false,
},
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
}
@@ -3084,6 +3164,7 @@ mod tests {
command: CliCommand::Deploy {
program_location: "/Users/test/program.o".to_string(),
address: Some(1),
use_deprecated_loader: false,
},
signers: vec![
read_keypair_file(&keypair_file).unwrap().into(),
@@ -3803,6 +3884,7 @@ mod tests {
config.command = CliCommand::Deploy {
program_location: pathbuf.to_str().unwrap().to_string(),
address: None,
use_deprecated_loader: false,
};
let result = process_command(&config);
let json: Value = serde_json::from_str(&result.unwrap()).unwrap();
@@ -3820,6 +3902,7 @@ mod tests {
config.command = CliCommand::Deploy {
program_location: "bad/file/location.so".to_string(),
address: None,
use_deprecated_loader: false,
};
assert!(process_command(&config).is_err());
}

View File

@@ -1,7 +1,9 @@
use crate::{
cli::{CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult},
cli_output::*,
display::{format_labeled_address, new_spinner_progress_bar, println_name_value},
display::{
format_labeled_address, new_spinner_progress_bar, println_name_value, println_transaction,
},
spend_utils::{resolve_spend_tx_and_check_account_balance, SpendAmount},
};
use clap::{value_t, value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand};
@@ -10,10 +12,10 @@ use solana_clap_utils::{
commitment::commitment_arg, input_parsers::*, input_validators::*, keypair::signer_from_path,
};
use solana_client::{
pubsub_client::{PubsubClient, SlotInfoMessage},
rpc_client::RpcClient,
pubsub_client::PubsubClient,
rpc_client::{GetConfirmedSignaturesForAddress2Config, RpcClient},
rpc_config::{RpcLargestAccountsConfig, RpcLargestAccountsFilter},
rpc_request::MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE,
rpc_response::SlotInfo,
};
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_sdk::{
@@ -24,6 +26,7 @@ use solana_sdk::{
message::Message,
native_token::lamports_to_sol,
pubkey::{self, Pubkey},
signature::Signature,
system_instruction, system_program,
sysvar::{
self,
@@ -32,6 +35,7 @@ use solana_sdk::{
},
transaction::Transaction,
};
use solana_transaction_status::UiTransactionEncoding;
use std::{
collections::{BTreeMap, HashMap, VecDeque},
net::SocketAddr,
@@ -53,6 +57,19 @@ pub trait ClusterQuerySubCommands {
impl ClusterQuerySubCommands for App<'_, '_> {
fn cluster_query_subcommands(self) -> Self {
self.subcommand(
SubCommand::with_name("block")
.about("Get a confirmed block")
.arg(
Arg::with_name("slot")
.long("slot")
.validator(is_slot)
.value_name("SLOT")
.takes_value(true)
.index(1)
.required(true),
),
)
.subcommand(
SubCommand::with_name("catchup")
.about("Wait for a validator to catch up to the cluster")
.arg(
@@ -87,6 +104,10 @@ impl ClusterQuerySubCommands for App<'_, '_> {
.about("Get the version of the cluster entrypoint"),
)
.subcommand(SubCommand::with_name("fees").about("Display current cluster fees"))
.subcommand(
SubCommand::with_name("first-available-block")
.about("Get the first available block in the storage"),
)
.subcommand(SubCommand::with_name("block-time")
.about("Get estimated production time of a block")
.alias("get-block-time")
@@ -257,9 +278,8 @@ impl ClusterQuerySubCommands for App<'_, '_> {
)
.subcommand(
SubCommand::with_name("transaction-history")
.about("Show historical transactions affecting the given address, \
ordered based on the slot in which they were confirmed in \
from lowest to highest slot")
.about("Show historical transactions affecting the given address \
from newest to oldest")
.arg(
pubkey!(Arg::with_name("address")
.index(1)
@@ -267,26 +287,28 @@ impl ClusterQuerySubCommands for App<'_, '_> {
.required(true),
"Account address"),
)
.arg(
Arg::with_name("end_slot")
.takes_value(false)
.value_name("SLOT")
.index(2)
.validator(is_slot)
.help(
"Slot to start from [default: latest slot at maximum commitment]"
),
)
.arg(
Arg::with_name("limit")
.long("limit")
.takes_value(true)
.value_name("NUMBER OF SLOTS")
.value_name("LIMIT")
.validator(is_slot)
.help(
"Limit the search to this many slots"
),
),
.default_value("1000")
.help("Maximum number of transaction signatures to return"),
)
.arg(
Arg::with_name("before")
.long("before")
.value_name("TRANSACTION_SIGNATURE")
.takes_value(true)
.help("Start with the first signature older than this one"),
)
.arg(
Arg::with_name("show_transactions")
.long("show-transactions")
.takes_value(false)
.help("Display the full transactions"),
)
)
}
}
@@ -337,6 +359,14 @@ pub fn parse_cluster_ping(
})
}
pub fn parse_get_block(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let slot = value_t_or_exit!(matches, "slot", Slot);
Ok(CliCommandInfo {
command: CliCommand::GetBlock { slot },
signers: vec![],
})
}
pub fn parse_get_block_time(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let slot = value_of(matches, "slot");
Ok(CliCommandInfo {
@@ -440,14 +470,33 @@ pub fn parse_transaction_history(
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let address = pubkey_of_signer(matches, "address", wallet_manager)?.unwrap();
let end_slot = value_t!(matches, "end_slot", Slot).ok();
let slot_limit = value_t!(matches, "limit", u64).ok();
let before = match matches.value_of("before") {
Some(signature) => Some(
signature
.parse()
.map_err(|err| CliError::BadParameter(format!("Invalid signature: {}", err)))?,
),
None => None,
};
let until = match matches.value_of("until") {
Some(signature) => Some(
signature
.parse()
.map_err(|err| CliError::BadParameter(format!("Invalid signature: {}", err)))?,
),
None => None,
};
let limit = value_t_or_exit!(matches, "limit", usize);
let show_transactions = matches.is_present("show_transactions");
Ok(CliCommandInfo {
command: CliCommand::TransactionHistory {
address,
end_slot,
slot_limit,
before,
until,
limit,
show_transactions,
},
signers: vec![],
})
@@ -590,6 +639,11 @@ pub fn process_fees(rpc_client: &RpcClient, config: &CliConfig) -> ProcessResult
Ok(config.output_format.formatted_string(&fees))
}
pub fn process_first_available_block(rpc_client: &RpcClient) -> ProcessResult {
let first_available_block = rpc_client.get_first_available_block()?;
Ok(format!("{}", first_available_block))
}
pub fn process_leader_schedule(rpc_client: &RpcClient) -> ProcessResult {
let epoch_info = rpc_client.get_epoch_info()?;
let first_slot_in_epoch = epoch_info.absolute_slot - epoch_info.slot_index;
@@ -625,6 +679,42 @@ pub fn process_leader_schedule(rpc_client: &RpcClient) -> ProcessResult {
Ok("".to_string())
}
pub fn process_get_block(rpc_client: &RpcClient, _config: &CliConfig, slot: Slot) -> ProcessResult {
let block =
rpc_client.get_confirmed_block_with_encoding(slot, UiTransactionEncoding::Base64)?;
println!("Slot: {}", slot);
println!("Parent Slot: {}", block.parent_slot);
println!("Blockhash: {}", block.blockhash);
println!("Previous Blockhash: {}", block.previous_blockhash);
if block.block_time.is_some() {
println!("Block Time: {:?}", block.block_time);
}
if !block.rewards.is_empty() {
println!("Rewards:",);
for reward in block.rewards {
println!(
" {:<44}: {}",
reward.pubkey,
if reward.lamports > 0 {
format!("{}", lamports_to_sol(reward.lamports as u64))
} else {
format!("◎-{}", lamports_to_sol(reward.lamports.abs() as u64))
}
);
}
}
for (index, transaction_with_meta) in block.transactions.iter().enumerate() {
println!("Transaction {}:", index);
println_transaction(
&transaction_with_meta.transaction.decode().unwrap(),
&transaction_with_meta.meta,
" ",
);
}
Ok("".to_string())
}
pub fn process_get_block_time(
rpc_client: &RpcClient,
config: &CliConfig,
@@ -1021,7 +1111,7 @@ pub fn process_live_slots(url: &str) -> ProcessResult {
})?;
*/
let mut current: Option<SlotInfoMessage> = None;
let mut current: Option<SlotInfo> = None;
let mut message = "".to_string();
let slot_progress = new_spinner_progress_bar();
@@ -1305,41 +1395,63 @@ pub fn process_show_validators(
pub fn process_transaction_history(
rpc_client: &RpcClient,
config: &CliConfig,
address: &Pubkey,
end_slot: Option<Slot>, // None == use latest slot
slot_limit: Option<u64>,
before: Option<Signature>,
until: Option<Signature>,
limit: usize,
show_transactions: bool,
) -> ProcessResult {
let end_slot = {
if let Some(end_slot) = end_slot {
end_slot
let results = rpc_client.get_confirmed_signatures_for_address2_with_config(
address,
GetConfirmedSignaturesForAddress2Config {
before,
until,
limit: Some(limit),
},
)?;
let transactions_found = format!("{} transactions found", results.len());
for result in results {
if config.verbose {
println!(
"{} [slot={} status={}] {}",
result.signature,
result.slot,
match result.err {
None => "Confirmed".to_string(),
Some(err) => format!("Failed: {:?}", err),
},
result.memo.unwrap_or_else(|| "".to_string()),
);
} else {
rpc_client.get_slot_with_commitment(CommitmentConfig::max())?
println!("{}", result.signature);
}
};
let mut start_slot = match slot_limit {
Some(slot_limit) => end_slot.saturating_sub(slot_limit),
None => rpc_client.minimum_ledger_slot()?,
};
println!(
"Transactions affecting {} within slots [{},{}]",
address, start_slot, end_slot
);
let mut transaction_count = 0;
while start_slot < end_slot {
let signatures = rpc_client.get_confirmed_signatures_for_address(
address,
start_slot,
(start_slot + MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE).min(end_slot),
)?;
for signature in &signatures {
println!("{}", signature);
if show_transactions {
if let Ok(signature) = result.signature.parse::<Signature>() {
match rpc_client
.get_confirmed_transaction(&signature, UiTransactionEncoding::Base64)
{
Ok(confirmed_transaction) => {
println_transaction(
&confirmed_transaction
.transaction
.transaction
.decode()
.expect("Successful decode"),
&confirmed_transaction.transaction.meta,
" ",
);
}
Err(err) => println!(" Unable to get confirmed transaction details: {}", err),
}
}
println!();
}
transaction_count += signatures.len();
start_slot += MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE;
}
Ok(format!("{} transactions found", transaction_count))
Ok(transactions_found)
}
#[cfg(test)]

View File

@@ -162,7 +162,7 @@ pub fn write_transaction<W: io::Write>(
)?;
writeln!(
w,
"{} Fee: {} SOL",
"{} Fee: {}",
prefix,
lamports_to_sol(transaction_status.fee)
)?;
@@ -179,7 +179,7 @@ pub fn write_transaction<W: io::Write>(
if pre == post {
writeln!(
w,
"{} Account {} balance: {} SOL",
"{} Account {} balance: {}",
prefix,
i,
lamports_to_sol(*pre)
@@ -187,7 +187,7 @@ pub fn write_transaction<W: io::Write>(
} else {
writeln!(
w,
"{} Account {} balance: {} SOL -> {} SOL",
"{} Account {} balance: {} -> {}",
prefix,
i,
lamports_to_sol(*pre),

View File

@@ -350,7 +350,13 @@ mod tests {
)
.unwrap();
let nonce_pubkey = Pubkey::new(&[4u8; 32]);
let rpc_nonce_account = UiAccount::encode(nonce_account, UiAccountEncoding::Binary);
let rpc_nonce_account = UiAccount::encode(
&nonce_pubkey,
nonce_account,
UiAccountEncoding::Base64,
None,
None,
);
let get_account_response = json!(Response {
context: RpcResponseContext { slot: 1 },
value: json!(Some(rpc_nonce_account)),

View File

@@ -6,9 +6,10 @@ use crate::{
use bincode::deserialize;
use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
use reqwest::blocking::Client;
use serde_derive::{Deserialize, Serialize};
use serde_json::{Map, Value};
use solana_account_decoder::validator_info::{
self, ValidatorInfo, MAX_LONG_FIELD_LENGTH, MAX_SHORT_FIELD_LENGTH,
};
use solana_clap_utils::{
input_parsers::pubkey_of,
input_validators::{is_pubkey, is_url},
@@ -27,23 +28,6 @@ use solana_sdk::{
};
use std::{error, sync::Arc};
pub const MAX_SHORT_FIELD_LENGTH: usize = 70;
pub const MAX_LONG_FIELD_LENGTH: usize = 300;
pub const MAX_VALIDATOR_INFO: u64 = 576;
solana_sdk::declare_id!("Va1idator1nfo111111111111111111111111111111");
#[derive(Debug, Deserialize, PartialEq, Serialize, Default)]
pub struct ValidatorInfo {
info: String,
}
impl ConfigState for ValidatorInfo {
fn max_space() -> u64 {
MAX_VALIDATOR_INFO
}
}
// Return an error if a validator details are longer than the max length.
pub fn check_details_length(string: String) -> Result<(), String> {
if string.len() > MAX_LONG_FIELD_LENGTH {
@@ -289,7 +273,7 @@ pub fn process_set_validator_info(
.iter()
.filter(|(_, account)| {
let key_list: ConfigKeys = deserialize(&account.data).map_err(|_| false).unwrap();
key_list.keys.contains(&(id(), false))
key_list.keys.contains(&(validator_info::id(), false))
})
.find(|(pubkey, account)| {
let (validator_pubkey, _) = parse_validator_info(&pubkey, &account).unwrap();
@@ -328,7 +312,10 @@ pub fn process_set_validator_info(
};
let build_message = |lamports| {
let keys = vec![(id(), false), (config.signers[0].pubkey(), true)];
let keys = vec![
(validator_info::id(), false),
(config.signers[0].pubkey(), true),
];
if balance == 0 {
println!(
"Publishing info for Validator {:?}",
@@ -401,7 +388,7 @@ pub fn process_get_validator_info(
let key_list: ConfigKeys = deserialize(&validator_info_account.data)
.map_err(|_| false)
.unwrap();
key_list.keys.contains(&(id(), false))
key_list.keys.contains(&(validator_info::id(), false))
})
.collect()
};
@@ -503,7 +490,7 @@ mod tests {
#[test]
fn test_parse_validator_info() {
let pubkey = Pubkey::new_rand();
let keys = vec![(id(), false), (pubkey, true)];
let keys = vec![(validator_info::id(), false), (pubkey, true)];
let config = ConfigKeys { keys };
let mut info = Map::new();

View File

@@ -63,6 +63,7 @@ fn test_cli_deploy_program() {
config.command = CliCommand::Deploy {
program_location: pathbuf.to_str().unwrap().to_string(),
address: None,
use_deprecated_loader: false,
};
let response = process_command(&config);
@@ -96,6 +97,7 @@ fn test_cli_deploy_program() {
config.command = CliCommand::Deploy {
program_location: pathbuf.to_str().unwrap().to_string(),
address: Some(1),
use_deprecated_loader: false,
};
process_command(&config).unwrap();
let account1 = rpc_client

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-client"
version = "1.3.0"
version = "1.3.10"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -14,16 +14,16 @@ bs58 = "0.3.1"
indicatif = "0.15.0"
jsonrpc-core = "14.2.0"
log = "0.4.8"
rayon = "1.3.1"
rayon = "1.4.0"
reqwest = { version = "0.10.6", default-features = false, features = ["blocking", "rustls-tls", "json"] }
serde = "1.0.112"
serde_derive = "1.0.103"
serde_json = "1.0.56"
solana-account-decoder = { path = "../account-decoder", version = "1.3.0" }
solana-net-utils = { path = "../net-utils", version = "1.3.0" }
solana-sdk = { path = "../sdk", version = "1.3.0" }
solana-transaction-status = { path = "../transaction-status", version = "1.3.0" }
solana-vote-program = { path = "../programs/vote", version = "1.3.0" }
solana-account-decoder = { path = "../account-decoder", version = "1.3.10" }
solana-net-utils = { path = "../net-utils", version = "1.3.10" }
solana-sdk = { path = "../sdk", version = "1.3.10" }
solana-transaction-status = { path = "../transaction-status", version = "1.3.10" }
solana-vote-program = { path = "../programs/vote", version = "1.3.10" }
thiserror = "1.0"
tungstenite = "0.10.1"
url = "2.1.1"
@@ -32,7 +32,7 @@ url = "2.1.1"
assert_matches = "1.3.0"
jsonrpc-core = "14.2.0"
jsonrpc-http-server = "14.2.0"
solana-logger = { path = "../logger", version = "1.3.0" }
solana-logger = { path = "../logger", version = "1.3.10" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -1,10 +1,12 @@
use crate::rpc_response::{Response as RpcResponse, RpcSignatureResult, SlotInfo};
use log::*;
use serde::{de::DeserializeOwned, Deserialize};
use serde::de::DeserializeOwned;
use serde_json::{
json,
value::Value::{Number, Object},
Map, Value,
};
use solana_sdk::signature::Signature;
use std::{
marker::PhantomData,
sync::{
@@ -18,6 +20,8 @@ use thiserror::Error;
use tungstenite::{client::AutoStream, connect, Message, WebSocket};
use url::{ParseError, Url};
type PubsubSignatureResponse = PubsubClientSubscription<RpcResponse<RpcSignatureResult>>;
#[derive(Debug, Error)]
pub enum PubsubClientError {
#[error("url parse error")]
@@ -33,13 +37,6 @@ pub enum PubsubClientError {
UnexpectedMessageError,
}
#[derive(Serialize, Deserialize, PartialEq, Clone, Debug)]
pub struct SlotInfoMessage {
pub parent: u64,
pub root: u64,
pub slot: u64,
}
pub struct PubsubClientSubscription<T>
where
T: DeserializeOwned,
@@ -73,18 +70,12 @@ where
{
fn send_subscribe(
writable_socket: &Arc<RwLock<WebSocket<AutoStream>>>,
operation: &str,
body: String,
) -> Result<u64, PubsubClientError> {
let method = format!("{}Subscribe", operation);
writable_socket
.write()
.unwrap()
.write_message(Message::Text(
json!({
"jsonrpc":"2.0","id":1,"method":method,"params":[]
})
.to_string(),
))?;
.write_message(Message::Text(body))?;
let message = writable_socket.write().unwrap().read_message()?;
Self::extract_subscription_id(message)
}
@@ -148,30 +139,28 @@ where
}
const SLOT_OPERATION: &str = "slot";
const SIGNATURE_OPERATION: &str = "signature";
pub struct PubsubClient {}
impl PubsubClient {
pub fn slot_subscribe(
url: &str,
) -> Result<
(
PubsubClientSubscription<SlotInfoMessage>,
Receiver<SlotInfoMessage>,
),
PubsubClientError,
> {
) -> Result<(PubsubClientSubscription<SlotInfo>, Receiver<SlotInfo>), PubsubClientError> {
let url = Url::parse(url)?;
let (socket, _response) = connect(url)?;
let (sender, receiver) = channel::<SlotInfoMessage>();
let (sender, receiver) = channel::<SlotInfo>();
let socket = Arc::new(RwLock::new(socket));
let socket_clone = socket.clone();
let exit = Arc::new(AtomicBool::new(false));
let exit_clone = exit.clone();
let subscription_id = PubsubClientSubscription::<SlotInfoMessage>::send_subscribe(
let subscription_id = PubsubClientSubscription::<SlotInfo>::send_subscribe(
&socket_clone,
SLOT_OPERATION,
json!({
"jsonrpc":"2.0","id":1,"method":format!("{}Subscribe", SLOT_OPERATION),"params":[]
})
.to_string(),
)
.unwrap();
@@ -181,7 +170,80 @@ impl PubsubClient {
break;
}
let message: Result<SlotInfoMessage, PubsubClientError> =
let message: Result<SlotInfo, PubsubClientError> =
PubsubClientSubscription::read_message(&socket_clone);
if let Ok(msg) = message {
match sender.send(msg) {
Ok(_) => (),
Err(err) => {
info!("receive error: {:?}", err);
break;
}
}
} else {
info!("receive error: {:?}", message);
break;
}
}
info!("websocket - exited receive loop");
});
let result: PubsubClientSubscription<SlotInfo> = PubsubClientSubscription {
message_type: PhantomData,
operation: SLOT_OPERATION,
socket,
subscription_id,
t_cleanup: Some(t_cleanup),
exit,
};
Ok((result, receiver))
}
pub fn signature_subscribe(
url: &str,
signature: &Signature,
) -> Result<
(
PubsubSignatureResponse,
Receiver<RpcResponse<RpcSignatureResult>>,
),
PubsubClientError,
> {
let url = Url::parse(url)?;
let (socket, _response) = connect(url)?;
let (sender, receiver) = channel::<RpcResponse<RpcSignatureResult>>();
let socket = Arc::new(RwLock::new(socket));
let socket_clone = socket.clone();
let exit = Arc::new(AtomicBool::new(false));
let exit_clone = exit.clone();
let body = json!({
"jsonrpc":"2.0",
"id":1,
"method":format!("{}Subscribe", SIGNATURE_OPERATION),
"params":[
signature.to_string(),
{"enableReceivedNotification": true }
]
})
.to_string();
let subscription_id =
PubsubClientSubscription::<RpcResponse<RpcSignatureResult>>::send_subscribe(
&socket_clone,
body,
)
.unwrap();
let t_cleanup = std::thread::spawn(move || {
loop {
if exit_clone.load(Ordering::Relaxed) {
break;
}
let message: Result<RpcResponse<RpcSignatureResult>, PubsubClientError> =
PubsubClientSubscription::read_message(&socket_clone);
if let Ok(msg) = message {
@@ -201,14 +263,15 @@ impl PubsubClient {
info!("websocket - exited receive loop");
});
let result: PubsubClientSubscription<SlotInfoMessage> = PubsubClientSubscription {
message_type: PhantomData,
operation: SLOT_OPERATION,
socket,
subscription_id,
t_cleanup: Some(t_cleanup),
exit,
};
let result: PubsubClientSubscription<RpcResponse<RpcSignatureResult>> =
PubsubClientSubscription {
message_type: PhantomData,
operation: SIGNATURE_OPERATION,
socket,
subscription_id,
t_cleanup: Some(t_cleanup),
exit,
};
Ok((result, receiver))
}

View File

@@ -2,7 +2,11 @@ use crate::{
client_error::{ClientError, ClientErrorKind, Result as ClientResult},
http_sender::HttpSender,
mock_sender::{MockSender, Mocks},
rpc_config::{RpcLargestAccountsConfig, RpcSendTransactionConfig, RpcTokenAccountsFilter},
rpc_config::RpcAccountInfoConfig,
rpc_config::{
RpcGetConfirmedSignaturesForAddress2Config, RpcLargestAccountsConfig,
RpcSendTransactionConfig, RpcTokenAccountsFilter,
},
rpc_request::{RpcError, RpcRequest, TokenAccountsFilter},
rpc_response::*,
rpc_sender::RpcSender,
@@ -11,10 +15,7 @@ use bincode::serialize;
use indicatif::{ProgressBar, ProgressStyle};
use log::*;
use serde_json::{json, Value};
use solana_account_decoder::{
parse_token::{parse_token, TokenAccountType, UiMint, UiMultisig, UiTokenAccount},
UiAccount,
};
use solana_account_decoder::{parse_token::UiTokenAmount, UiAccount, UiAccountEncoding};
use solana_sdk::{
account::Account,
clock::{
@@ -289,6 +290,35 @@ impl RpcClient {
Ok(signatures)
}
pub fn get_confirmed_signatures_for_address2(
&self,
address: &Pubkey,
) -> ClientResult<Vec<RpcConfirmedTransactionStatusWithSignature>> {
self.get_confirmed_signatures_for_address2_with_config(
address,
GetConfirmedSignaturesForAddress2Config::default(),
)
}
pub fn get_confirmed_signatures_for_address2_with_config(
&self,
address: &Pubkey,
config: GetConfirmedSignaturesForAddress2Config,
) -> ClientResult<Vec<RpcConfirmedTransactionStatusWithSignature>> {
let config = RpcGetConfirmedSignaturesForAddress2Config {
before: config.before.map(|signature| signature.to_string()),
until: config.until.map(|signature| signature.to_string()),
limit: config.limit,
};
let result: Vec<RpcConfirmedTransactionStatusWithSignature> = self.send(
RpcRequest::GetConfirmedSignaturesForAddress2,
json!([address.to_string(), config]),
)?;
Ok(result)
}
pub fn get_confirmed_transaction(
&self,
signature: &Signature,
@@ -433,9 +463,14 @@ impl RpcClient {
pubkey: &Pubkey,
commitment_config: CommitmentConfig,
) -> RpcResult<Option<Account>> {
let config = RpcAccountInfoConfig {
encoding: Some(UiAccountEncoding::Base64),
commitment: Some(commitment_config),
data_slice: None,
};
let response = self.sender.send(
RpcRequest::GetAccountInfo,
json!([pubkey.to_string(), commitment_config]),
json!([pubkey.to_string(), config]),
);
response
@@ -464,6 +499,38 @@ impl RpcClient {
})?
}
pub fn get_multiple_accounts(&self, pubkeys: &[Pubkey]) -> ClientResult<Vec<Option<Account>>> {
Ok(self
.get_multiple_accounts_with_commitment(pubkeys, CommitmentConfig::default())?
.value)
}
pub fn get_multiple_accounts_with_commitment(
&self,
pubkeys: &[Pubkey],
commitment_config: CommitmentConfig,
) -> RpcResult<Vec<Option<Account>>> {
let config = RpcAccountInfoConfig {
encoding: Some(UiAccountEncoding::Base64),
commitment: Some(commitment_config),
data_slice: None,
};
let pubkeys: Vec<_> = pubkeys.iter().map(|pubkey| pubkey.to_string()).collect();
let response = self.send(RpcRequest::GetMultipleAccounts, json!([[pubkeys], config]))?;
let Response {
context,
value: accounts,
} = serde_json::from_value::<Response<Option<UiAccount>>>(response)?;
let accounts: Vec<Option<Account>> = accounts
.iter()
.map(|rpc_account| rpc_account.decode())
.collect();
Ok(Response {
context,
value: accounts,
})
}
pub fn get_account_data(&self, pubkey: &Pubkey) -> ClientResult<Vec<u8>> {
Ok(self.get_account(pubkey)?.data)
}
@@ -642,6 +709,10 @@ impl RpcClient {
.into())
}
pub fn get_first_available_block(&self) -> ClientResult<Slot> {
self.send(RpcRequest::GetFirstAvailableBlock, Value::Null)
}
pub fn get_genesis_hash(&self) -> ClientResult<Hash> {
let hash_str: String = self.send(RpcRequest::GetGenesisHash, Value::Null)?;
let hash = hash_str.parse().map_err(|_| {
@@ -653,88 +724,7 @@ impl RpcClient {
Ok(hash)
}
pub fn get_token_account(&self, pubkey: &Pubkey) -> ClientResult<Option<UiTokenAccount>> {
Ok(self
.get_token_account_with_commitment(pubkey, CommitmentConfig::default())?
.value)
}
pub fn get_token_account_with_commitment(
&self,
pubkey: &Pubkey,
commitment_config: CommitmentConfig,
) -> RpcResult<Option<UiTokenAccount>> {
let Response {
context,
value: account,
} = self.get_account_with_commitment(pubkey, commitment_config)?;
Ok(Response {
context,
value: account
.map(|account| match parse_token(&account.data) {
Ok(TokenAccountType::Account(ui_token_account)) => Some(ui_token_account),
_ => None,
})
.flatten(),
})
}
pub fn get_token_mint(&self, pubkey: &Pubkey) -> ClientResult<Option<UiMint>> {
Ok(self
.get_token_mint_with_commitment(pubkey, CommitmentConfig::default())?
.value)
}
pub fn get_token_mint_with_commitment(
&self,
pubkey: &Pubkey,
commitment_config: CommitmentConfig,
) -> RpcResult<Option<UiMint>> {
let Response {
context,
value: account,
} = self.get_account_with_commitment(pubkey, commitment_config)?;
Ok(Response {
context,
value: account
.map(|account| match parse_token(&account.data) {
Ok(TokenAccountType::Mint(ui_token_mint)) => Some(ui_token_mint),
_ => None,
})
.flatten(),
})
}
pub fn get_token_multisig(&self, pubkey: &Pubkey) -> ClientResult<Option<UiMultisig>> {
Ok(self
.get_token_multisig_with_commitment(pubkey, CommitmentConfig::default())?
.value)
}
pub fn get_token_multisig_with_commitment(
&self,
pubkey: &Pubkey,
commitment_config: CommitmentConfig,
) -> RpcResult<Option<UiMultisig>> {
let Response {
context,
value: account,
} = self.get_account_with_commitment(pubkey, commitment_config)?;
Ok(Response {
context,
value: account
.map(|account| match parse_token(&account.data) {
Ok(TokenAccountType::Multisig(ui_token_multisig)) => Some(ui_token_multisig),
_ => None,
})
.flatten(),
})
}
pub fn get_token_account_balance(&self, pubkey: &Pubkey) -> ClientResult<RpcTokenAmount> {
pub fn get_token_account_balance(&self, pubkey: &Pubkey) -> ClientResult<UiTokenAmount> {
Ok(self
.get_token_account_balance_with_commitment(pubkey, CommitmentConfig::default())?
.value)
@@ -744,7 +734,7 @@ impl RpcClient {
&self,
pubkey: &Pubkey,
commitment_config: CommitmentConfig,
) -> RpcResult<RpcTokenAmount> {
) -> RpcResult<UiTokenAmount> {
self.send(
RpcRequest::GetTokenAccountBalance,
json!([pubkey.to_string(), commitment_config]),
@@ -755,7 +745,7 @@ impl RpcClient {
&self,
delegate: &Pubkey,
token_account_filter: TokenAccountsFilter,
) -> ClientResult<Vec<(Pubkey, UiTokenAccount)>> {
) -> ClientResult<Vec<RpcKeyedAccount>> {
Ok(self
.get_token_accounts_by_delegate_with_commitment(
delegate,
@@ -770,39 +760,31 @@ impl RpcClient {
delegate: &Pubkey,
token_account_filter: TokenAccountsFilter,
commitment_config: CommitmentConfig,
) -> RpcResult<Vec<(Pubkey, UiTokenAccount)>> {
) -> RpcResult<Vec<RpcKeyedAccount>> {
let token_account_filter = match token_account_filter {
TokenAccountsFilter::Mint(mint) => RpcTokenAccountsFilter::Mint(mint.to_string()),
TokenAccountsFilter::ProgramId(program_id) => {
RpcTokenAccountsFilter::ProgramId(program_id.to_string())
}
};
let Response {
context,
value: accounts,
} = self.send(
RpcRequest::GetTokenAccountsByDelegate,
json!([
delegate.to_string(),
token_account_filter,
commitment_config
]),
)?;
let pubkey_accounts = accounts_to_token_accounts(parse_keyed_accounts(
accounts,
RpcRequest::GetTokenAccountsByDelegate,
)?);
Ok(Response {
context,
value: pubkey_accounts,
})
let config = RpcAccountInfoConfig {
encoding: Some(UiAccountEncoding::JsonParsed),
commitment: Some(commitment_config),
data_slice: None,
};
self.send(
RpcRequest::GetTokenAccountsByOwner,
json!([delegate.to_string(), token_account_filter, config]),
)
}
pub fn get_token_accounts_by_owner(
&self,
owner: &Pubkey,
token_account_filter: TokenAccountsFilter,
) -> ClientResult<Vec<(Pubkey, UiTokenAccount)>> {
) -> ClientResult<Vec<RpcKeyedAccount>> {
Ok(self
.get_token_accounts_by_owner_with_commitment(
owner,
@@ -817,31 +799,27 @@ impl RpcClient {
owner: &Pubkey,
token_account_filter: TokenAccountsFilter,
commitment_config: CommitmentConfig,
) -> RpcResult<Vec<(Pubkey, UiTokenAccount)>> {
) -> RpcResult<Vec<RpcKeyedAccount>> {
let token_account_filter = match token_account_filter {
TokenAccountsFilter::Mint(mint) => RpcTokenAccountsFilter::Mint(mint.to_string()),
TokenAccountsFilter::ProgramId(program_id) => {
RpcTokenAccountsFilter::ProgramId(program_id.to_string())
}
};
let Response {
context,
value: accounts,
} = self.send(
let config = RpcAccountInfoConfig {
encoding: Some(UiAccountEncoding::JsonParsed),
commitment: Some(commitment_config),
data_slice: None,
};
self.send(
RpcRequest::GetTokenAccountsByOwner,
json!([owner.to_string(), token_account_filter, commitment_config]),
)?;
let pubkey_accounts = accounts_to_token_accounts(parse_keyed_accounts(
accounts,
RpcRequest::GetTokenAccountsByDelegate,
)?);
Ok(Response {
context,
value: pubkey_accounts,
})
json!([owner.to_string(), token_account_filter, config]),
)
}
pub fn get_token_supply(&self, mint: &Pubkey) -> ClientResult<RpcTokenAmount> {
pub fn get_token_supply(&self, mint: &Pubkey) -> ClientResult<UiTokenAmount> {
Ok(self
.get_token_supply_with_commitment(mint, CommitmentConfig::default())?
.value)
@@ -851,7 +829,7 @@ impl RpcClient {
&self,
mint: &Pubkey,
commitment_config: CommitmentConfig,
) -> RpcResult<RpcTokenAmount> {
) -> RpcResult<UiTokenAmount> {
self.send(
RpcRequest::GetTokenSupply,
json!([mint.to_string(), commitment_config]),
@@ -1181,6 +1159,13 @@ impl RpcClient {
}
}
#[derive(Debug, Default)]
pub struct GetConfirmedSignaturesForAddress2Config {
pub before: Option<Signature>,
pub until: Option<Signature>,
pub limit: Option<usize>,
}
fn new_spinner_progress_bar() -> ProgressBar {
let progress_bar = ProgressBar::new(42);
progress_bar
@@ -1222,18 +1207,6 @@ fn parse_keyed_accounts(
Ok(pubkey_accounts)
}
fn accounts_to_token_accounts(
pubkey_accounts: Vec<(Pubkey, Account)>,
) -> Vec<(Pubkey, UiTokenAccount)> {
pubkey_accounts
.into_iter()
.filter_map(|(pubkey, account)| match parse_token(&account.data) {
Ok(TokenAccountType::Account(ui_token_account)) => Some((pubkey, ui_token_account)),
_ => None,
})
.collect()
}
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -1,6 +1,9 @@
use crate::rpc_filter::RpcFilterType;
use solana_account_decoder::UiAccountEncoding;
use solana_sdk::{clock::Epoch, commitment_config::CommitmentConfig};
use solana_account_decoder::{UiAccountEncoding, UiDataSliceConfig};
use solana_sdk::{
clock::Epoch,
commitment_config::{CommitmentConfig, CommitmentLevel},
};
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
@@ -11,13 +14,18 @@ pub struct RpcSignatureStatusConfig {
#[derive(Debug, Default, Clone, Copy, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcSendTransactionConfig {
#[serde(default)]
pub skip_preflight: bool,
pub preflight_commitment: Option<CommitmentLevel>,
}
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcSimulateTransactionConfig {
#[serde(default)]
pub sig_verify: bool,
#[serde(flatten)]
pub commitment: Option<CommitmentConfig>,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
@@ -47,6 +55,7 @@ pub struct RpcStakeConfig {
#[serde(rename_all = "camelCase")]
pub struct RpcAccountInfoConfig {
pub encoding: Option<UiAccountEncoding>,
pub data_slice: Option<UiDataSliceConfig>,
#[serde(flatten)]
pub commitment: Option<CommitmentConfig>,
}
@@ -65,3 +74,19 @@ pub enum RpcTokenAccountsFilter {
Mint(String),
ProgramId(String),
}
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcSignatureSubscribeConfig {
#[serde(flatten)]
pub commitment: Option<CommitmentConfig>,
pub enable_received_notification: Option<bool>,
}
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcGetConfirmedSignaturesForAddress2Config {
pub before: Option<String>, // Signature as base-58 string
pub until: Option<String>, // Signature as base-58 string
pub limit: Option<usize>,
}

View File

@@ -14,12 +14,14 @@ pub enum RpcRequest {
GetConfirmedBlock,
GetConfirmedBlocks,
GetConfirmedSignaturesForAddress,
GetConfirmedSignaturesForAddress2,
GetConfirmedTransaction,
GetEpochInfo,
GetEpochSchedule,
GetFeeCalculatorForBlockhash,
GetFeeRateGovernor,
GetFees,
GetFirstAvailableBlock,
GetGenesisHash,
GetIdentity,
GetInflationGovernor,
@@ -27,6 +29,7 @@ pub enum RpcRequest {
GetLargestAccounts,
GetLeaderSchedule,
GetMinimumBalanceForRentExemption,
GetMultipleAccounts,
GetProgramAccounts,
GetRecentBlockhash,
GetSignatureStatuses,
@@ -65,12 +68,14 @@ impl fmt::Display for RpcRequest {
RpcRequest::GetConfirmedBlock => "getConfirmedBlock",
RpcRequest::GetConfirmedBlocks => "getConfirmedBlocks",
RpcRequest::GetConfirmedSignaturesForAddress => "getConfirmedSignaturesForAddress",
RpcRequest::GetConfirmedSignaturesForAddress2 => "getConfirmedSignaturesForAddress2",
RpcRequest::GetConfirmedTransaction => "getConfirmedTransaction",
RpcRequest::GetEpochInfo => "getEpochInfo",
RpcRequest::GetEpochSchedule => "getEpochSchedule",
RpcRequest::GetFeeCalculatorForBlockhash => "getFeeCalculatorForBlockhash",
RpcRequest::GetFeeRateGovernor => "getFeeRateGovernor",
RpcRequest::GetFees => "getFees",
RpcRequest::GetFirstAvailableBlock => "getFirstAvailableBlock",
RpcRequest::GetGenesisHash => "getGenesisHash",
RpcRequest::GetIdentity => "getIdentity",
RpcRequest::GetInflationGovernor => "getInflationGovernor",
@@ -78,6 +83,7 @@ impl fmt::Display for RpcRequest {
RpcRequest::GetLargestAccounts => "getLargestAccounts",
RpcRequest::GetLeaderSchedule => "getLeaderSchedule",
RpcRequest::GetMinimumBalanceForRentExemption => "getMinimumBalanceForRentExemption",
RpcRequest::GetMultipleAccounts => "getMultipleAccounts",
RpcRequest::GetProgramAccounts => "getProgramAccounts",
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
RpcRequest::GetSignatureStatuses => "getSignatureStatuses",
@@ -108,10 +114,12 @@ impl fmt::Display for RpcRequest {
}
}
pub const NUM_LARGEST_ACCOUNTS: usize = 20;
pub const MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS: usize = 256;
pub const MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE: u64 = 10_000;
pub const MAX_GET_CONFIRMED_BLOCKS_RANGE: u64 = 500_000;
pub const MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT: usize = 1_000;
pub const MAX_MULTIPLE_ACCOUNTS: usize = 100;
pub const NUM_LARGEST_ACCOUNTS: usize = 20;
// Validators that are this number of slots behind are considered delinquent
pub const DELINQUENT_VALIDATOR_SLOT_DISTANCE: u64 = 128;

View File

@@ -1,15 +1,15 @@
use crate::client_error;
use solana_account_decoder::UiAccount;
use solana_account_decoder::{parse_token::UiTokenAmount, UiAccount};
use solana_sdk::{
clock::{Epoch, Slot},
fee_calculator::{FeeCalculator, FeeRateGovernor},
inflation::Inflation,
transaction::{Result, TransactionError},
};
use solana_transaction_status::ConfirmedTransactionStatusWithSignature;
use std::{collections::HashMap, net::SocketAddr};
pub type RpcResult<T> = client_error::Result<Response<T>>;
pub type RpcAmount = String;
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct RpcResponseContext {
@@ -94,12 +94,32 @@ pub struct RpcKeyedAccount {
pub account: UiAccount,
}
#[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq)]
pub struct SlotInfo {
pub slot: Slot,
pub parent: Slot,
pub root: Slot,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase", untagged)]
pub enum RpcSignatureResult {
ProcessedSignature(ProcessedSignatureResult),
ReceivedSignature(ReceivedSignatureResult),
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcSignatureResult {
pub struct ProcessedSignatureResult {
pub err: Option<TransactionError>,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub enum ReceivedSignatureResult {
ReceivedSignature,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct RpcContactInfo {
/// Pubkey of the node as a base-58 string
@@ -221,18 +241,36 @@ pub struct RpcStakeActivation {
pub inactive: u64,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct RpcTokenAmount {
pub ui_amount: f64,
pub decimals: u8,
pub amount: RpcAmount,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct RpcTokenAccountBalance {
pub address: String,
#[serde(flatten)]
pub amount: RpcTokenAmount,
pub amount: UiTokenAmount,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcConfirmedTransactionStatusWithSignature {
pub signature: String,
pub slot: Slot,
pub err: Option<TransactionError>,
pub memo: Option<String>,
}
impl From<ConfirmedTransactionStatusWithSignature> for RpcConfirmedTransactionStatusWithSignature {
fn from(value: ConfirmedTransactionStatusWithSignature) -> Self {
let ConfirmedTransactionStatusWithSignature {
signature,
slot,
err,
memo,
} = value;
Self {
signature: signature.to_string(),
slot,
err,
memo,
}
}
}

View File

@@ -1,7 +1,7 @@
[package]
name = "solana-core"
description = "Blockchain, Rebuilt for Scale"
version = "1.3.0"
version = "1.3.10"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "../README.md"
@@ -38,46 +38,49 @@ num-traits = "0.2"
rand = "0.7.0"
rand_chacha = "0.2.2"
raptorq = "1.4.2"
rayon = "1.3.1"
rayon = "1.4.0"
regex = "1.3.9"
serde = "1.0.112"
serde_derive = "1.0.103"
serde_json = "1.0.56"
solana-account-decoder = { path = "../account-decoder", version = "1.3.0" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.3.0" }
solana-budget-program = { path = "../programs/budget", version = "1.3.0" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.0" }
solana-client = { path = "../client", version = "1.3.0" }
solana-faucet = { path = "../faucet", version = "1.3.0" }
solana-genesis-programs = { path = "../genesis-programs", version = "1.3.0" }
solana-ledger = { path = "../ledger", version = "1.3.0" }
solana-logger = { path = "../logger", version = "1.3.0" }
solana-merkle-tree = { path = "../merkle-tree", version = "1.3.0" }
solana-metrics = { path = "../metrics", version = "1.3.0" }
solana-measure = { path = "../measure", version = "1.3.0" }
solana-net-utils = { path = "../net-utils", version = "1.3.0" }
solana-perf = { path = "../perf", version = "1.3.0" }
solana-runtime = { path = "../runtime", version = "1.3.0" }
solana-sdk = { path = "../sdk", version = "1.3.0" }
solana-sdk-macro-frozen-abi = { path = "../sdk/macro-frozen-abi", version = "1.3.0" }
solana-stake-program = { path = "../programs/stake", version = "1.3.0" }
solana-streamer = { path = "../streamer", version = "1.3.0" }
solana-sys-tuner = { path = "../sys-tuner", version = "1.3.0" }
solana-transaction-status = { path = "../transaction-status", version = "1.3.0" }
solana-version = { path = "../version", version = "1.3.0" }
solana-vote-program = { path = "../programs/vote", version = "1.3.0" }
solana-vote-signer = { path = "../vote-signer", version = "1.3.0" }
spl-token-v1-0 = { package = "spl-token", version = "1.0.6", features = ["skip-no-mangle"] }
solana-account-decoder = { path = "../account-decoder", version = "1.3.10" }
solana-banks-server = { path = "../banks-server", version = "1.3.10" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.3.10" }
solana-budget-program = { path = "../programs/budget", version = "1.3.10" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.10" }
solana-client = { path = "../client", version = "1.3.10" }
solana-faucet = { path = "../faucet", version = "1.3.10" }
solana-genesis-programs = { path = "../genesis-programs", version = "1.3.10" }
solana-ledger = { path = "../ledger", version = "1.3.10" }
solana-logger = { path = "../logger", version = "1.3.10" }
solana-merkle-tree = { path = "../merkle-tree", version = "1.3.10" }
solana-metrics = { path = "../metrics", version = "1.3.10" }
solana-measure = { path = "../measure", version = "1.3.10" }
solana-net-utils = { path = "../net-utils", version = "1.3.10" }
solana-perf = { path = "../perf", version = "1.3.10" }
solana-runtime = { path = "../runtime", version = "1.3.10" }
solana-sdk = { path = "../sdk", version = "1.3.10" }
solana-sdk-macro-frozen-abi = { path = "../sdk/macro-frozen-abi", version = "1.3.10" }
solana-stake-program = { path = "../programs/stake", version = "1.3.10" }
solana-storage-bigtable = { path = "../storage-bigtable", version = "1.3.10" }
solana-streamer = { path = "../streamer", version = "1.3.10" }
solana-sys-tuner = { path = "../sys-tuner", version = "1.3.10" }
solana-transaction-status = { path = "../transaction-status", version = "1.3.10" }
solana-version = { path = "../version", version = "1.3.10" }
solana-vote-program = { path = "../programs/vote", version = "1.3.10" }
solana-vote-signer = { path = "../vote-signer", version = "1.3.10" }
spl-token-v2-0 = { package = "spl-token", version = "2.0.3", features = ["skip-no-mangle"] }
tempfile = "3.1.0"
thiserror = "1.0"
tokio = "0.1"
tokio-codec = "0.1"
tokio-fs = "0.1"
tokio-io = "0.1"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.3.0" }
tokio_01 = { version = "0.1", package = "tokio" }
tokio_fs_01 = { version = "0.1", package = "tokio-fs" }
tokio_io_01 = { version = "0.1", package = "tokio-io" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.3.10" }
tokio = { version = "0.2.22", features = ["full"] }
trees = "0.2.1"
[dev-dependencies]
base64 = "0.12.3"
matches = "0.1.6"
reqwest = { version = "0.10.6", default-features = false, features = ["blocking", "rustls-tls", "json"] }
serial_test = "0.4.0"
@@ -93,6 +96,9 @@ name = "banking_stage"
[[bench]]
name = "blockstore"
[[bench]]
name = "crds_gossip_pull"
[[bench]]
name = "gen_keys"

View File

@@ -73,6 +73,7 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
let batch_len = batch.packets.len();
packets.push((batch, vec![0usize; batch_len]));
}
let (s, _r) = unbounded();
// This tests the performance of buffering packets.
// If the packet buffers are copied, performance will be poor.
bencher.iter(move || {
@@ -82,6 +83,7 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
&mut packets,
10_000,
None,
&s,
);
});
@@ -190,12 +192,14 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
create_test_recorder(&bank, &blockstore, None);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let (s, _r) = unbounded();
let _banking_stage = BankingStage::new(
&cluster_info,
&poh_recorder,
verified_receiver,
vote_receiver,
None,
s,
);
poh_recorder.lock().unwrap().set_bank(&bank);

View File

@@ -0,0 +1,26 @@
#![feature(test)]
extern crate test;
use rand::{thread_rng, Rng};
use solana_core::crds_gossip_pull::CrdsFilter;
use solana_sdk::hash::{Hash, HASH_BYTES};
use test::Bencher;
#[bench]
fn bench_hash_as_u64(bencher: &mut Bencher) {
let mut rng = thread_rng();
let hashes: Vec<_> = (0..1000)
.map(|_| {
let mut buf = [0u8; HASH_BYTES];
rng.fill(&mut buf);
Hash::new(&buf)
})
.collect();
bencher.iter(|| {
hashes
.iter()
.map(CrdsFilter::hash_as_u64)
.collect::<Vec<_>>()
});
}

View File

@@ -24,12 +24,14 @@ use solana_perf::{
use solana_runtime::{
accounts_db::ErrorCounters,
bank::{Bank, TransactionBalancesSet, TransactionProcessResult},
bank_utils,
transaction_batch::TransactionBatch,
vote_sender_types::ReplayVoteSender,
};
use solana_sdk::{
clock::{
Slot, DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE,
MAX_TRANSACTION_FORWARDING_DELAY, MAX_TRANSACTION_FORWARDING_DELAY_GPU,
Slot, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE, MAX_TRANSACTION_FORWARDING_DELAY,
MAX_TRANSACTION_FORWARDING_DELAY_GPU,
},
poh_config::PohConfig,
pubkey::Pubkey,
@@ -81,6 +83,7 @@ impl BankingStage {
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
verified_vote_receiver: CrossbeamReceiver<Vec<Packets>>,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: ReplayVoteSender,
) -> Self {
Self::new_num_threads(
cluster_info,
@@ -89,6 +92,7 @@ impl BankingStage {
verified_vote_receiver,
Self::num_threads(),
transaction_status_sender,
gossip_vote_sender,
)
}
@@ -99,6 +103,7 @@ impl BankingStage {
verified_vote_receiver: CrossbeamReceiver<Vec<Packets>>,
num_threads: u32,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: ReplayVoteSender,
) -> Self {
let batch_limit = TOTAL_BUFFERED_PACKETS / ((num_threads - 1) as usize * PACKETS_PER_BATCH);
// Single thread to generate entries from many banks.
@@ -119,6 +124,7 @@ impl BankingStage {
let cluster_info = cluster_info.clone();
let mut recv_start = Instant::now();
let transaction_status_sender = transaction_status_sender.clone();
let gossip_vote_sender = gossip_vote_sender.clone();
Builder::new()
.name("solana-banking-stage-tx".to_string())
.spawn(move || {
@@ -132,7 +138,8 @@ impl BankingStage {
enable_forwarding,
i,
batch_limit,
transaction_status_sender.clone(),
transaction_status_sender,
gossip_vote_sender,
);
})
.unwrap()
@@ -168,6 +175,7 @@ impl BankingStage {
buffered_packets: &mut Vec<PacketsAndOffsets>,
batch_limit: usize,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
) -> UnprocessedPackets {
let mut unprocessed_packets = vec![];
let mut rebuffered_packets = 0;
@@ -199,6 +207,7 @@ impl BankingStage {
&msgs,
unprocessed_indexes.to_owned(),
transaction_status_sender.clone(),
gossip_vote_sender,
);
new_tx_count += processed;
@@ -283,6 +292,7 @@ impl BankingStage {
)
}
#[allow(clippy::too_many_arguments)]
fn process_buffered_packets(
my_pubkey: &Pubkey,
socket: &std::net::UdpSocket,
@@ -292,6 +302,7 @@ impl BankingStage {
enable_forwarding: bool,
batch_limit: usize,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
) -> BufferedPacketsDecision {
let (leader_at_slot_offset, poh_has_bank, would_be_leader) = {
let poh = poh_recorder.lock().unwrap();
@@ -319,6 +330,7 @@ impl BankingStage {
buffered_packets,
batch_limit,
transaction_status_sender,
gossip_vote_sender,
);
buffered_packets.append(&mut unprocessed);
}
@@ -352,6 +364,7 @@ impl BankingStage {
decision
}
#[allow(clippy::too_many_arguments)]
pub fn process_loop(
my_pubkey: Pubkey,
verified_receiver: &CrossbeamReceiver<Vec<Packets>>,
@@ -362,6 +375,7 @@ impl BankingStage {
id: u32,
batch_limit: usize,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: ReplayVoteSender,
) {
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut buffered_packets = vec![];
@@ -376,6 +390,7 @@ impl BankingStage {
enable_forwarding,
batch_limit,
transaction_status_sender.clone(),
&gossip_vote_sender,
);
if decision == BufferedPacketsDecision::Hold {
// If we are waiting on a new bank,
@@ -403,6 +418,7 @@ impl BankingStage {
id,
batch_limit,
transaction_status_sender.clone(),
&gossip_vote_sender,
) {
Err(RecvTimeoutError::Timeout) => (),
Err(RecvTimeoutError::Disconnected) => break,
@@ -501,6 +517,7 @@ impl BankingStage {
poh: &Arc<Mutex<PohRecorder>>,
batch: &TransactionBatch,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
) -> (Result<usize, PohRecorderError>, Vec<usize>) {
let mut load_execute_time = Measure::start("load_execute_time");
// Use a shorter maximum age when adding transactions into the pipeline. This will reduce
@@ -533,24 +550,23 @@ impl BankingStage {
let num_to_commit = num_to_commit.unwrap();
if num_to_commit != 0 {
let transaction_statuses = bank
.commit_transactions(
txs,
None,
&mut loaded_accounts,
&results,
tx_count,
signature_count,
)
.processing_results;
let tx_results = bank.commit_transactions(
txs,
None,
&mut loaded_accounts,
&results,
tx_count,
signature_count,
);
bank_utils::find_and_send_votes(txs, &tx_results, Some(gossip_vote_sender));
if let Some(sender) = transaction_status_sender {
let post_balances = bank.collect_balances(batch);
send_transaction_status_batch(
bank.clone(),
batch.transactions(),
batch.iteration_order_vec(),
transaction_statuses,
tx_results.processing_results,
TransactionBalancesSet::new(pre_balances, post_balances),
sender,
);
@@ -578,6 +594,7 @@ impl BankingStage {
poh: &Arc<Mutex<PohRecorder>>,
chunk_offset: usize,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
) -> (Result<usize, PohRecorderError>, Vec<usize>) {
let mut lock_time = Measure::start("lock_time");
// Once accounts are locked, other threads cannot encode transactions that will modify the
@@ -590,6 +607,7 @@ impl BankingStage {
poh,
&batch,
transaction_status_sender,
gossip_vote_sender,
);
retryable_txs.iter_mut().for_each(|x| *x += chunk_offset);
@@ -618,6 +636,7 @@ impl BankingStage {
transactions: &[Transaction],
poh: &Arc<Mutex<PohRecorder>>,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
) -> (usize, Vec<usize>) {
let mut chunk_start = 0;
let mut unprocessed_txs = vec![];
@@ -633,6 +652,7 @@ impl BankingStage {
poh,
chunk_start,
transaction_status_sender.clone(),
gossip_vote_sender,
);
trace!("process_transactions result: {:?}", result);
@@ -748,10 +768,7 @@ impl BankingStage {
&filter,
(MAX_PROCESSING_AGE)
.saturating_sub(max_tx_fwd_delay)
.saturating_sub(
(FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET * bank.ticks_per_slot()
/ DEFAULT_TICKS_PER_SECOND) as usize,
),
.saturating_sub(FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET as usize),
&mut error_counters,
);
@@ -764,6 +781,7 @@ impl BankingStage {
msgs: &Packets,
packet_indexes: Vec<usize>,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
) -> (usize, usize, Vec<usize>) {
let (transactions, transaction_to_packet_indexes) =
Self::transactions_from_packets(msgs, &packet_indexes);
@@ -775,8 +793,13 @@ impl BankingStage {
let tx_len = transactions.len();
let (processed, unprocessed_tx_indexes) =
Self::process_transactions(bank, &transactions, poh, transaction_status_sender);
let (processed, unprocessed_tx_indexes) = Self::process_transactions(
bank,
&transactions,
poh,
transaction_status_sender,
gossip_vote_sender,
);
let unprocessed_tx_count = unprocessed_tx_indexes.len();
@@ -846,6 +869,7 @@ impl BankingStage {
.collect()
}
#[allow(clippy::too_many_arguments)]
/// Process the incoming packets
pub fn process_packets(
my_pubkey: &Pubkey,
@@ -856,6 +880,7 @@ impl BankingStage {
id: u32,
batch_limit: usize,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
) -> Result<UnprocessedPackets, RecvTimeoutError> {
let mut recv_time = Measure::start("process_packets_recv");
let mms = verified_receiver.recv_timeout(recv_timeout)?;
@@ -898,6 +923,7 @@ impl BankingStage {
&msgs,
packet_indexes,
transaction_status_sender.clone(),
gossip_vote_sender,
);
new_tx_count += processed;
@@ -1044,6 +1070,7 @@ mod tests {
let bank = Arc::new(Bank::new(&genesis_config));
let (verified_sender, verified_receiver) = unbounded();
let (vote_sender, vote_receiver) = unbounded();
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(
@@ -1060,6 +1087,7 @@ mod tests {
verified_receiver,
vote_receiver,
None,
gossip_vote_sender,
);
drop(verified_sender);
drop(vote_sender);
@@ -1094,12 +1122,15 @@ mod tests {
create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
let banking_stage = BankingStage::new(
&cluster_info,
&poh_recorder,
verified_receiver,
vote_receiver,
None,
gossip_vote_sender,
);
trace!("sending bank");
drop(verified_sender);
@@ -1157,12 +1188,15 @@ mod tests {
create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
let banking_stage = BankingStage::new(
&cluster_info,
&poh_recorder,
verified_receiver,
vote_receiver,
None,
gossip_vote_sender,
);
// fund another account so we can send 2 good transactions in a single batch.
@@ -1283,6 +1317,8 @@ mod tests {
let (vote_sender, vote_receiver) = unbounded();
let ledger_path = get_tmp_ledger_path!();
{
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
let entry_receiver = {
// start a banking_stage to eat verified receiver
let bank = Arc::new(Bank::new(&genesis_config));
@@ -1305,6 +1341,7 @@ mod tests {
vote_receiver,
2,
None,
gossip_vote_sender,
);
// wait for banking_stage to eat the packets
@@ -1682,6 +1719,7 @@ mod tests {
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
poh_recorder.lock().unwrap().set_working_bank(working_bank);
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
BankingStage::process_and_record_transactions(
&bank,
@@ -1689,6 +1727,7 @@ mod tests {
&poh_recorder,
0,
None,
&gossip_vote_sender,
)
.0
.unwrap();
@@ -1725,6 +1764,7 @@ mod tests {
&poh_recorder,
0,
None,
&gossip_vote_sender,
)
.0,
Err(PohRecorderError::MaxHeightReached)
@@ -1776,12 +1816,15 @@ mod tests {
poh_recorder.lock().unwrap().set_working_bank(working_bank);
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
let (result, unprocessed) = BankingStage::process_and_record_transactions(
&bank,
&transactions,
&poh_recorder,
0,
None,
&gossip_vote_sender,
);
assert!(result.is_ok());
@@ -1865,8 +1908,16 @@ mod tests {
// record
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
let (processed_transactions_count, mut retryable_txs) =
BankingStage::process_transactions(&bank, &transactions, &poh_recorder, None);
BankingStage::process_transactions(
&bank,
&transactions,
&poh_recorder,
None,
&gossip_vote_sender,
);
assert_eq!(processed_transactions_count, 0,);
@@ -1943,12 +1994,15 @@ mod tests {
&Arc::new(AtomicBool::new(false)),
);
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
let _ = BankingStage::process_and_record_transactions(
&bank,
&transactions,
&poh_recorder,
0,
Some(transaction_status_sender),
&gossip_vote_sender,
);
transaction_status_service.join().unwrap();

View File

@@ -0,0 +1,93 @@
use solana_ledger::blockstore::Blockstore;
use solana_runtime::commitment::BlockCommitmentCache;
use std::{
sync::atomic::{AtomicBool, Ordering},
sync::{Arc, RwLock},
thread::{self, Builder, JoinHandle},
};
use tokio::runtime;
// Delay uploading the largest confirmed root for this many slots. This is done in an attempt to
// ensure that the `CacheBlockTimeService` has had enough time to add the block time for the root
// before it's uploaded to BigTable.
//
// A more direct connection between CacheBlockTimeService and BigTableUploadService would be
// preferable...
const LARGEST_CONFIRMED_ROOT_UPLOAD_DELAY: usize = 100;
pub struct BigTableUploadService {
thread: JoinHandle<()>,
}
impl BigTableUploadService {
pub fn new(
runtime_handle: runtime::Handle,
bigtable_ledger_storage: solana_storage_bigtable::LedgerStorage,
blockstore: Arc<Blockstore>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
exit: Arc<AtomicBool>,
) -> Self {
info!("Starting BigTable upload service");
let thread = Builder::new()
.name("bigtable-upload".to_string())
.spawn(move || {
Self::run(
runtime_handle,
bigtable_ledger_storage,
blockstore,
block_commitment_cache,
exit,
)
})
.unwrap();
Self { thread }
}
fn run(
runtime: runtime::Handle,
bigtable_ledger_storage: solana_storage_bigtable::LedgerStorage,
blockstore: Arc<Blockstore>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
exit: Arc<AtomicBool>,
) {
let mut start_slot = 0;
loop {
if exit.load(Ordering::Relaxed) {
break;
}
let end_slot = block_commitment_cache
.read()
.unwrap()
.highest_confirmed_root()
.saturating_sub(LARGEST_CONFIRMED_ROOT_UPLOAD_DELAY as u64);
if end_slot <= start_slot {
std::thread::sleep(std::time::Duration::from_secs(1));
continue;
}
let result = runtime.block_on(solana_ledger::bigtable_upload::upload_confirmed_blocks(
blockstore.clone(),
bigtable_ledger_storage.clone(),
start_slot,
Some(end_slot),
true,
exit.clone(),
));
match result {
Ok(()) => start_slot = end_slot,
Err(err) => {
warn!("bigtable: upload_confirmed_blocks: {}", err);
std::thread::sleep(std::time::Duration::from_secs(2));
}
}
}
}
pub fn join(self) -> thread::Result<()> {
self.thread.join()
}
}

View File

@@ -242,6 +242,7 @@ struct GossipStats {
push_message_count: Counter,
push_message_value_count: Counter,
push_response_count: Counter,
pull_requests_count: Counter,
}
pub struct ClusterInfo {
@@ -357,7 +358,7 @@ pub fn make_accounts_hashes_message(
}
// TODO These messages should go through the gpu pipeline for spam filtering
#[frozen_abi(digest = "6qRS1ZwydpdSqzeyRdDvn5uwfDdFYkuUz4K4jSkd1oFW")]
#[frozen_abi(digest = "CnN1gW2K2TRydGc84eYnQJwdTADPjQf6LJLZ4RP1QeoH")]
#[derive(Serialize, Deserialize, Debug, AbiEnumVisitor, AbiExample)]
#[allow(clippy::large_enum_variant)]
enum Protocol {
@@ -420,7 +421,7 @@ impl ClusterInfo {
gossip.set_shred_version(me.my_shred_version());
}
me.insert_self();
me.push_self(&HashMap::new());
me.push_self(&HashMap::new(), None);
me
}
@@ -452,13 +453,17 @@ impl ClusterInfo {
self.insert_self()
}
fn push_self(&self, stakes: &HashMap<Pubkey, u64>) {
fn push_self(
&self,
stakes: &HashMap<Pubkey, u64>,
gossip_validators: Option<&HashSet<Pubkey>>,
) {
let now = timestamp();
self.my_contact_info.write().unwrap().wallclock = now;
let entry =
CrdsValue::new_signed(CrdsData::ContactInfo(self.my_contact_info()), &self.keypair);
let mut w_gossip = self.gossip.write().unwrap();
w_gossip.refresh_push_active_set(stakes);
w_gossip.refresh_push_active_set(stakes, gossip_validators);
w_gossip.process_push_message(&self.id(), vec![entry], now);
}
@@ -490,6 +495,21 @@ impl ClusterInfo {
.map(map)
}
pub fn lookup_contact_info_by_gossip_addr(
&self,
gossip_addr: &SocketAddr,
) -> Option<ContactInfo> {
for versioned_value in self.gossip.read().unwrap().crds.table.values() {
if let Some(contact_info) = CrdsValue::contact_info(&versioned_value.value) {
if contact_info.gossip == *gossip_addr {
return Some(contact_info.clone());
}
}
}
None
}
pub fn my_contact_info(&self) -> ContactInfo {
self.my_contact_info.read().unwrap().clone()
}
@@ -542,7 +562,7 @@ impl ClusterInfo {
}
let ip_addr = node.gossip.ip();
Some(format!(
"{:15} {:2}| {:5} | {:44} |{:^15}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {}\n",
"{:15} {:2}| {:5} | {:44} |{:^15}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {}\n",
if ContactInfo::is_valid_address(&node.gossip) {
ip_addr.to_string()
} else {
@@ -565,6 +585,7 @@ impl ClusterInfo {
addr_to_string(&ip_addr, &node.serve_repair),
addr_to_string(&ip_addr, &node.rpc),
addr_to_string(&ip_addr, &node.rpc_pubsub),
addr_to_string(&ip_addr, &node.rpc_banks),
node.shred_version,
))
}
@@ -1346,13 +1367,17 @@ impl ClusterInfo {
messages
}
fn new_pull_requests(&self, stakes: &HashMap<Pubkey, u64>) -> Vec<(SocketAddr, Protocol)> {
fn new_pull_requests(
&self,
gossip_validators: Option<&HashSet<Pubkey>>,
stakes: &HashMap<Pubkey, u64>,
) -> Vec<(SocketAddr, Protocol)> {
let now = timestamp();
let mut pulls: Vec<_> = {
let r_gossip =
self.time_gossip_read_lock("new_pull_reqs", &self.stats.new_pull_requests);
r_gossip
.new_pull_request(now, stakes, MAX_BLOOM_SIZE)
.new_pull_request(now, gossip_validators, stakes, MAX_BLOOM_SIZE)
.ok()
.into_iter()
.filter_map(|(peer, filters, me)| {
@@ -1413,27 +1438,32 @@ impl ClusterInfo {
// Generate new push and pull requests
fn generate_new_gossip_requests(
&self,
gossip_validators: Option<&HashSet<Pubkey>>,
stakes: &HashMap<Pubkey, u64>,
generate_pull_requests: bool,
) -> Vec<(SocketAddr, Protocol)> {
let pulls: Vec<_> = if generate_pull_requests {
self.new_pull_requests(stakes)
let mut pulls: Vec<_> = if generate_pull_requests {
self.new_pull_requests(gossip_validators, stakes)
} else {
vec![]
};
let pushes: Vec<_> = self.new_push_requests();
vec![pulls, pushes].into_iter().flatten().collect()
let mut pushes: Vec<_> = self.new_push_requests();
pulls.append(&mut pushes);
pulls
}
/// At random pick a node and try to get updated changes from them
fn run_gossip(
&self,
gossip_validators: Option<&HashSet<Pubkey>>,
recycler: &PacketsRecycler,
stakes: &HashMap<Pubkey, u64>,
sender: &PacketSender,
generate_pull_requests: bool,
) -> Result<()> {
let reqs = self.generate_new_gossip_requests(&stakes, generate_pull_requests);
let reqs =
self.generate_new_gossip_requests(gossip_validators, &stakes, generate_pull_requests);
if !reqs.is_empty() {
let packets = to_packets_with_destination(recycler.clone(), &reqs);
sender.send(packets)?;
@@ -1444,11 +1474,13 @@ impl ClusterInfo {
fn handle_adopt_shred_version(self: &Arc<Self>, adopt_shred_version: &mut bool) {
// Adopt the entrypoint's `shred_version` if ours is unset
if *adopt_shred_version {
// If gossip was given an entrypoint, lookup its id
let entrypoint_id = self.entrypoint.read().unwrap().as_ref().map(|e| e.id);
if let Some(entrypoint_id) = entrypoint_id {
// If gossip was given an entrypoint, look up the ContactInfo by the given
// entrypoint gossip adddress
let gossip_addr = self.entrypoint.read().unwrap().as_ref().map(|e| e.gossip);
if let Some(gossip_addr) = gossip_addr {
// If a pull from the entrypoint was successful, it should exist in the crds table
let entrypoint = self.lookup_contact_info(&entrypoint_id, |ci| ci.clone());
let entrypoint = self.lookup_contact_info_by_gossip_addr(&gossip_addr);
if let Some(entrypoint) = entrypoint {
if entrypoint.shred_version == 0 {
info!("Unable to adopt entrypoint's shred version");
@@ -1464,6 +1496,7 @@ impl ClusterInfo {
.unwrap()
.set_shred_version(entrypoint.shred_version);
self.insert_self();
*self.entrypoint.write().unwrap() = Some(entrypoint);
*adopt_shred_version = false;
}
}
@@ -1492,12 +1525,6 @@ impl ClusterInfo {
.time_gossip_write_lock("purge", &self.stats.purge)
.purge(timestamp(), &timeouts);
inc_new_counter_info!("cluster_info-purge-count", num_purged);
let table_size = self.gossip.read().unwrap().crds.table.len();
datapoint_debug!(
"cluster_info-purge",
("table_size", table_size as i64, i64),
("purge_stake_timeout", timeout as i64, i64)
);
}
/// randomly pick a node and ask them for updates asynchronously
@@ -1505,6 +1532,7 @@ impl ClusterInfo {
self: Arc<Self>,
bank_forks: Option<Arc<RwLock<BankForks>>>,
sender: PacketSender,
gossip_validators: Option<HashSet<Pubkey>>,
exit: &Arc<AtomicBool>,
) -> JoinHandle<()> {
let exit = exit.clone();
@@ -1535,7 +1563,13 @@ impl ClusterInfo {
None => HashMap::new(),
};
let _ = self.run_gossip(&recycler, &stakes, &sender, generate_pull_requests);
let _ = self.run_gossip(
gossip_validators.as_ref(),
&recycler,
&stakes,
&sender,
generate_pull_requests,
);
if exit.load(Ordering::Relaxed) {
return;
}
@@ -1547,7 +1581,7 @@ impl ClusterInfo {
//TODO: possibly tune this parameter
//we saw a deadlock passing an self.read().unwrap().timeout into sleep
if start - last_push > CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS / 2 {
self.push_self(&stakes);
self.push_self(&stakes, gossip_validators.as_ref());
last_push = timestamp();
}
let elapsed = timestamp() - start;
@@ -1694,9 +1728,14 @@ impl ClusterInfo {
}
// process the collected pulls together
let rsp = self.handle_pull_requests(recycler, gossip_pull_data, stakes);
if let Some(rsp) = rsp {
let _ignore_disconnect = response_sender.send(rsp);
if !gossip_pull_data.is_empty() {
self.stats
.pull_requests_count
.add_relaxed(gossip_pull_data.len() as u64);
let rsp = self.handle_pull_requests(recycler, gossip_pull_data, stakes);
if let Some(rsp) = rsp {
let _ignore_disconnect = response_sender.send(rsp);
}
}
}
@@ -1745,7 +1784,7 @@ impl ClusterInfo {
"generate_pull_responses",
&self.stats.generate_pull_responses,
)
.generate_pull_responses(&caller_and_filters);
.generate_pull_responses(&caller_and_filters, now);
self.time_gossip_write_lock("process_pull_reqs", &self.stats.process_pull_requests)
.process_pull_requests(caller_and_filters, now);
@@ -1853,15 +1892,15 @@ impl ClusterInfo {
) -> (usize, usize, usize) {
let len = crds_values.len();
trace!("PullResponse me: {} from: {} len={}", self.id, from, len);
if let Some(shred_version) = self.lookup_contact_info(from, |ci| ci.shred_version) {
Self::filter_by_shred_version(
from,
&mut crds_values,
shred_version,
self.my_shred_version(),
);
}
let shred_version = self
.lookup_contact_info(from, |ci| ci.shred_version)
.unwrap_or(0);
Self::filter_by_shred_version(
from,
&mut crds_values,
shred_version,
self.my_shred_version(),
);
let filtered_len = crds_values.len();
let mut pull_stats = ProcessPullStats::default();
@@ -1913,7 +1952,8 @@ impl ClusterInfo {
shred_version: u16,
my_shred_version: u16,
) {
if my_shred_version != 0 && shred_version != 0 && shred_version != my_shred_version {
// Always run filter on spies
if my_shred_version != 0 && shred_version != my_shred_version {
// Allow someone to update their own ContactInfo so they
// can change shred versions if needed.
crds_values.retain(|crds_value| match &crds_value.data {
@@ -1934,14 +1974,15 @@ impl ClusterInfo {
self.stats.push_message_count.add_relaxed(1);
let len = crds_values.len();
if let Some(shred_version) = self.lookup_contact_info(from, |ci| ci.shred_version) {
Self::filter_by_shred_version(
from,
&mut crds_values,
shred_version,
self.my_shred_version(),
);
}
let shred_version = self
.lookup_contact_info(from, |ci| ci.shred_version)
.unwrap_or(0);
Self::filter_by_shred_version(
from,
&mut crds_values,
shred_version,
self.my_shred_version(),
);
let filtered_len = crds_values.len();
self.stats
.push_message_value_count
@@ -2086,6 +2127,10 @@ impl ClusterInfo {
fn print_reset_stats(&self, last_print: &mut Instant) {
if last_print.elapsed().as_millis() > 2000 {
let (table_size, purged_values_size) = {
let r_gossip = self.gossip.read().unwrap();
(r_gossip.crds.table.len(), r_gossip.pull.purged_values.len())
};
datapoint_info!(
"cluster_info_stats",
("entrypoint", self.stats.entrypoint.clear(), i64),
@@ -2109,6 +2154,8 @@ impl ClusterInfo {
self.stats.new_push_requests_num.clear(),
i64
),
("table_size", table_size as i64, i64),
("purged_values_size", purged_values_size as i64, i64),
);
datapoint_info!(
"cluster_info_stats2",
@@ -2255,6 +2302,14 @@ impl ClusterInfo {
i64
),
);
datapoint_info!(
"cluster_info_stats5",
(
"pull_requests_count",
self.stats.pull_requests_count.clear(),
i64
),
);
*last_print = Instant::now();
}
@@ -2419,10 +2474,12 @@ impl Node {
let rpc_pubsub_port = find_available_port_in_range(bind_ip_addr, (1024, 65535)).unwrap();
let rpc_pubsub_addr =
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), rpc_pubsub_port);
let rpc_banks_port = find_available_port_in_range(bind_ip_addr, (1024, 65535)).unwrap();
let rpc_banks_addr =
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), rpc_banks_port);
let broadcast = vec![UdpSocket::bind("0.0.0.0:0").unwrap()];
let retransmit_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let unused = UdpSocket::bind("0.0.0.0:0").unwrap();
let serve_repair = UdpSocket::bind("127.0.0.1:0").unwrap();
let info = ContactInfo {
id: *pubkey,
@@ -2432,7 +2489,7 @@ impl Node {
repair: repair.local_addr().unwrap(),
tpu: tpu.local_addr().unwrap(),
tpu_forwards: tpu_forwards.local_addr().unwrap(),
unused: unused.local_addr().unwrap(),
rpc_banks: rpc_banks_addr,
rpc: rpc_addr,
rpc_pubsub: rpc_pubsub_addr,
serve_repair: serve_repair.local_addr().unwrap(),
@@ -2513,7 +2570,7 @@ impl Node {
repair: SocketAddr::new(gossip_addr.ip(), repair_port),
tpu: SocketAddr::new(gossip_addr.ip(), tpu_port),
tpu_forwards: SocketAddr::new(gossip_addr.ip(), tpu_forwards_port),
unused: socketaddr_any!(),
rpc_banks: socketaddr_any!(),
rpc: socketaddr_any!(),
rpc_pubsub: socketaddr_any!(),
serve_repair: SocketAddr::new(gossip_addr.ip(), serve_repair_port),
@@ -2666,8 +2723,8 @@ mod tests {
.gossip
.write()
.unwrap()
.refresh_push_active_set(&HashMap::new());
let reqs = cluster_info.generate_new_gossip_requests(&HashMap::new(), true);
.refresh_push_active_set(&HashMap::new(), None);
let reqs = cluster_info.generate_new_gossip_requests(None, &HashMap::new(), true);
//assert none of the addrs are invalid.
reqs.iter().all(|(addr, _)| {
let res = ContactInfo::is_valid_address(addr);
@@ -2805,7 +2862,7 @@ mod tests {
.gossip
.write()
.unwrap()
.refresh_push_active_set(&HashMap::new());
.refresh_push_active_set(&HashMap::new(), None);
//check that all types of gossip messages are signed correctly
let (_, push_messages) = cluster_info
.gossip
@@ -2822,7 +2879,7 @@ mod tests {
.gossip
.write()
.unwrap()
.new_pull_request(timestamp(), &HashMap::new(), MAX_BLOOM_SIZE)
.new_pull_request(timestamp(), None, &HashMap::new(), MAX_BLOOM_SIZE)
.ok()
.unwrap();
assert!(val.verify());
@@ -3041,7 +3098,7 @@ mod tests {
let entrypoint_pubkey = Pubkey::new_rand();
let entrypoint = ContactInfo::new_localhost(&entrypoint_pubkey, timestamp());
cluster_info.set_entrypoint(entrypoint.clone());
let pulls = cluster_info.new_pull_requests(&HashMap::new());
let pulls = cluster_info.new_pull_requests(None, &HashMap::new());
assert_eq!(1, pulls.len() as u64);
match pulls.get(0) {
Some((addr, msg)) => {
@@ -3068,7 +3125,7 @@ mod tests {
vec![entrypoint_crdsvalue],
&timeouts,
);
let pulls = cluster_info.new_pull_requests(&HashMap::new());
let pulls = cluster_info.new_pull_requests(None, &HashMap::new());
assert_eq!(1, pulls.len() as u64);
assert_eq!(*cluster_info.entrypoint.read().unwrap(), Some(entrypoint));
}
@@ -3211,7 +3268,7 @@ mod tests {
// Pull request 1: `other_node` is present but `entrypoint` was just added (so it has a
// fresh timestamp). There should only be one pull request to `other_node`
let pulls = cluster_info.new_pull_requests(&stakes);
let pulls = cluster_info.new_pull_requests(None, &stakes);
assert_eq!(1, pulls.len() as u64);
assert_eq!(pulls.get(0).unwrap().0, other_node.gossip);
@@ -3224,14 +3281,14 @@ mod tests {
.as_mut()
.unwrap()
.wallclock = 0;
let pulls = cluster_info.new_pull_requests(&stakes);
let pulls = cluster_info.new_pull_requests(None, &stakes);
assert_eq!(2, pulls.len() as u64);
assert_eq!(pulls.get(0).unwrap().0, other_node.gossip);
assert_eq!(pulls.get(1).unwrap().0, entrypoint.gossip);
// Pull request 3: `other_node` is present and `entrypoint` was just pulled from. There should
// only be one pull request to `other_node`
let pulls = cluster_info.new_pull_requests(&stakes);
let pulls = cluster_info.new_pull_requests(None, &stakes);
assert_eq!(1, pulls.len() as u64);
assert_eq!(pulls.get(0).unwrap().0, other_node.gossip);
}
@@ -3375,4 +3432,36 @@ mod tests {
let vote = CrdsValue::new_signed(CrdsData::Vote(1, vote), &Keypair::new());
assert!(bincode::serialized_size(&vote).unwrap() <= MAX_PROTOCOL_PAYLOAD_SIZE);
}
#[test]
fn test_handle_adopt_shred_version() {
let node_keypair = Arc::new(Keypair::new());
let cluster_info = Arc::new(ClusterInfo::new(
ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()),
node_keypair,
));
// Simulating starting up with default entrypoint, no known id, only a gossip
// address
let entrypoint_gossip_addr = socketaddr!("127.0.0.2:1234");
let mut entrypoint = ContactInfo::new_localhost(&Pubkey::default(), timestamp());
entrypoint.gossip = entrypoint_gossip_addr;
assert_eq!(entrypoint.shred_version, 0);
cluster_info.set_entrypoint(entrypoint);
// Simulate getting entrypoint ContactInfo from gossip
let mut gossiped_entrypoint_info =
ContactInfo::new_localhost(&Pubkey::new_rand(), timestamp());
gossiped_entrypoint_info.gossip = entrypoint_gossip_addr;
gossiped_entrypoint_info.shred_version = 1;
cluster_info.insert_info(gossiped_entrypoint_info.clone());
// Adopt the entrypoint's gossiped contact info and verify
ClusterInfo::handle_adopt_shred_version(&cluster_info, &mut true);
assert_eq!(
cluster_info.entrypoint.read().unwrap().as_ref().unwrap(),
&gossiped_entrypoint_info
);
assert_eq!(cluster_info.my_shred_version(), 1);
}
}

View File

@@ -15,10 +15,7 @@ use crossbeam_channel::{
};
use itertools::izip;
use log::*;
use solana_ledger::{
blockstore::Blockstore,
blockstore_processor::{ReplayVotesReceiver, ReplayedVote},
};
use solana_ledger::blockstore::Blockstore;
use solana_metrics::inc_new_counter_debug;
use solana_perf::packet::{self, Packets};
use solana_runtime::{
@@ -26,6 +23,7 @@ use solana_runtime::{
bank_forks::BankForks,
epoch_stakes::{EpochAuthorizedVoters, EpochStakes},
stakes::Stakes,
vote_sender_types::{ReplayVoteReceiver, ReplayedVote},
};
use solana_sdk::{
clock::{Epoch, Slot},
@@ -248,7 +246,7 @@ impl ClusterInfoVoteListener {
bank_forks: Arc<RwLock<BankForks>>,
subscriptions: Arc<RpcSubscriptions>,
verified_vote_sender: VerifiedVoteSender,
replay_votes_receiver: ReplayVotesReceiver,
replay_votes_receiver: ReplayVoteReceiver,
blockstore: Arc<Blockstore>,
) -> Self {
let exit_ = exit.clone();
@@ -420,7 +418,7 @@ impl ClusterInfoVoteListener {
bank_forks: Arc<RwLock<BankForks>>,
subscriptions: Arc<RpcSubscriptions>,
verified_vote_sender: VerifiedVoteSender,
replay_votes_receiver: ReplayVotesReceiver,
replay_votes_receiver: ReplayVoteReceiver,
blockstore: Arc<Blockstore>,
) -> Result<()> {
let mut optimistic_confirmation_verifier =
@@ -478,7 +476,7 @@ impl ClusterInfoVoteListener {
root_bank: &Bank,
subscriptions: &RpcSubscriptions,
verified_vote_sender: &VerifiedVoteSender,
replay_votes_receiver: &ReplayVotesReceiver,
replay_votes_receiver: &ReplayVoteReceiver,
) -> Result<Vec<(Slot, Hash)>> {
Self::get_and_process_votes(
gossip_vote_txs_receiver,
@@ -496,7 +494,7 @@ impl ClusterInfoVoteListener {
root_bank: &Bank,
subscriptions: &RpcSubscriptions,
verified_vote_sender: &VerifiedVoteSender,
replay_votes_receiver: &ReplayVotesReceiver,
replay_votes_receiver: &ReplayVoteReceiver,
) -> Result<Vec<(Slot, Hash)>> {
let mut sel = Select::new();
sel.recv(gossip_vote_txs_receiver);
@@ -643,11 +641,11 @@ impl ClusterInfoVoteListener {
let mut new_optimistic_confirmed_slots = vec![];
// Process votes from gossip and ReplayStage
for (i, (vote_pubkey, vote, _)) in gossip_vote_txs
for (is_gossip, (vote_pubkey, vote, _)) in gossip_vote_txs
.iter()
.filter_map(|gossip_tx| {
vote_transaction::parse_vote_transaction(gossip_tx).filter(
|(vote_pubkey, vote, _)| {
vote_transaction::parse_vote_transaction(gossip_tx)
.filter(|(vote_pubkey, vote, _)| {
if vote.slots.is_empty() {
return false;
}
@@ -673,11 +671,10 @@ impl ClusterInfoVoteListener {
}
true
},
)
})
.map(|v| (true, v))
})
.chain(replayed_votes)
.enumerate()
.chain(replayed_votes.into_iter().map(|v| (false, v)))
{
Self::update_new_votes(
vote,
@@ -688,7 +685,7 @@ impl ClusterInfoVoteListener {
verified_vote_sender,
&mut diff,
&mut new_optimistic_confirmed_slots,
i < gossip_vote_txs.len(),
is_gossip,
);
}
@@ -772,12 +769,12 @@ impl ClusterInfoVoteListener {
#[cfg(test)]
mod tests {
use super::*;
use solana_ledger::blockstore_processor::ReplayVotesSender;
use solana_perf::packet;
use solana_runtime::{
bank::Bank,
commitment::BlockCommitmentCache,
genesis_utils::{self, GenesisConfigInfo, ValidatorVoteKeypairs},
vote_sender_types::ReplayVoteSender,
};
use solana_sdk::{
hash::Hash,
@@ -1040,7 +1037,7 @@ mod tests {
validator_voting_keypairs: &[ValidatorVoteKeypairs],
switch_proof_hash: Option<Hash>,
votes_sender: &VerifiedVoteTransactionsSender,
replay_votes_sender: &ReplayVotesSender,
replay_votes_sender: &ReplayVoteSender,
) {
validator_voting_keypairs.iter().for_each(|keypairs| {
let node_keypair = &keypairs.node_keypair;

View File

@@ -0,0 +1,191 @@
use crate::{cluster_info::ClusterInfo, cluster_slots::ClusterSlots};
use solana_ledger::blockstore::{Blockstore, CompletedSlotsReceiver};
use solana_measure::measure::Measure;
use solana_runtime::bank_forks::BankForks;
use solana_sdk::{clock::Slot, pubkey::Pubkey};
use std::{
sync::{
atomic::{AtomicBool, Ordering},
{Arc, RwLock},
},
thread::sleep,
thread::{self, Builder, JoinHandle},
time::{Duration, Instant},
};
#[derive(Default, Debug)]
struct ClusterSlotsServiceTiming {
pub lowest_slot_elapsed: u64,
pub update_completed_slots_elapsed: u64,
}
impl ClusterSlotsServiceTiming {
fn update(&mut self, lowest_slot_elapsed: u64, update_completed_slots_elapsed: u64) {
self.lowest_slot_elapsed += lowest_slot_elapsed;
self.update_completed_slots_elapsed += update_completed_slots_elapsed;
}
}
pub struct ClusterSlotsService {
t_cluster_slots_service: JoinHandle<()>,
}
impl ClusterSlotsService {
pub fn new(
blockstore: Arc<Blockstore>,
cluster_slots: Arc<ClusterSlots>,
bank_forks: Arc<RwLock<BankForks>>,
cluster_info: Arc<ClusterInfo>,
completed_slots_receiver: CompletedSlotsReceiver,
exit: Arc<AtomicBool>,
) -> Self {
let id = cluster_info.id();
Self::initialize_lowest_slot(id, &blockstore, &cluster_info);
Self::initialize_epoch_slots(&blockstore, &cluster_info, &completed_slots_receiver);
let t_cluster_slots_service = Builder::new()
.name("solana-cluster-slots-service".to_string())
.spawn(move || {
Self::run(
blockstore,
cluster_slots,
bank_forks,
cluster_info,
completed_slots_receiver,
exit,
)
})
.unwrap();
ClusterSlotsService {
t_cluster_slots_service,
}
}
pub fn join(self) -> thread::Result<()> {
self.t_cluster_slots_service.join()
}
fn run(
blockstore: Arc<Blockstore>,
cluster_slots: Arc<ClusterSlots>,
bank_forks: Arc<RwLock<BankForks>>,
cluster_info: Arc<ClusterInfo>,
completed_slots_receiver: CompletedSlotsReceiver,
exit: Arc<AtomicBool>,
) {
let mut cluster_slots_service_timing = ClusterSlotsServiceTiming::default();
let mut last_stats = Instant::now();
loop {
if exit.load(Ordering::Relaxed) {
break;
}
let new_root = bank_forks.read().unwrap().root();
let id = cluster_info.id();
let mut lowest_slot_elapsed = Measure::start("lowest_slot_elapsed");
let lowest_slot = blockstore.lowest_slot();
Self::update_lowest_slot(&id, lowest_slot, &cluster_info);
lowest_slot_elapsed.stop();
let mut update_completed_slots_elapsed =
Measure::start("update_completed_slots_elapsed");
Self::update_completed_slots(&completed_slots_receiver, &cluster_info);
cluster_slots.update(new_root, &cluster_info, &bank_forks);
update_completed_slots_elapsed.stop();
cluster_slots_service_timing.update(
lowest_slot_elapsed.as_us(),
update_completed_slots_elapsed.as_us(),
);
if last_stats.elapsed().as_secs() > 2 {
datapoint_info!(
"cluster_slots_service-timing",
(
"lowest_slot_elapsed",
cluster_slots_service_timing.lowest_slot_elapsed,
i64
),
(
"update_completed_slots_elapsed",
cluster_slots_service_timing.update_completed_slots_elapsed,
i64
),
);
cluster_slots_service_timing = ClusterSlotsServiceTiming::default();
last_stats = Instant::now();
}
sleep(Duration::from_millis(200));
}
}
fn update_completed_slots(
completed_slots_receiver: &CompletedSlotsReceiver,
cluster_info: &ClusterInfo,
) {
let mut slots: Vec<Slot> = vec![];
while let Ok(mut more) = completed_slots_receiver.try_recv() {
slots.append(&mut more);
}
slots.sort();
if !slots.is_empty() {
cluster_info.push_epoch_slots(&slots);
}
}
fn initialize_lowest_slot(id: Pubkey, blockstore: &Blockstore, cluster_info: &ClusterInfo) {
// Safe to set into gossip because by this time, the leader schedule cache should
// also be updated with the latest root (done in blockstore_processor) and thus
// will provide a schedule to window_service for any incoming shreds up to the
// last_confirmed_epoch.
cluster_info.push_lowest_slot(id, blockstore.lowest_slot());
}
fn update_lowest_slot(id: &Pubkey, lowest_slot: Slot, cluster_info: &ClusterInfo) {
cluster_info.push_lowest_slot(*id, lowest_slot);
}
fn initialize_epoch_slots(
blockstore: &Blockstore,
cluster_info: &ClusterInfo,
completed_slots_receiver: &CompletedSlotsReceiver,
) {
let root = blockstore.last_root();
let mut slots: Vec<_> = blockstore
.live_slots_iterator(root)
.filter_map(|(slot, slot_meta)| {
if slot_meta.is_full() {
Some(slot)
} else {
None
}
})
.collect();
while let Ok(mut more) = completed_slots_receiver.try_recv() {
slots.append(&mut more);
}
slots.sort();
slots.dedup();
if !slots.is_empty() {
cluster_info.push_epoch_slots(&slots);
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::cluster_info::Node;
#[test]
pub fn test_update_lowest_slot() {
let node_info = Node::new_localhost_with_pubkey(&Pubkey::default());
let cluster_info = ClusterInfo::new_with_invalid_keypair(node_info.info);
ClusterSlotsService::update_lowest_slot(&Pubkey::default(), 5, &cluster_info);
let lowest = cluster_info
.get_lowest_slot_for_node(&Pubkey::default(), None, |lowest_slot, _| {
lowest_slot.clone()
})
.unwrap();
assert_eq!(lowest.lowest, 5);
}
}

View File

@@ -8,6 +8,7 @@ use solana_runtime::{
use solana_sdk::clock::Slot;
use solana_vote_program::vote_state::VoteState;
use std::{
cmp::max,
collections::HashMap,
sync::atomic::{AtomicBool, Ordering},
sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender},
@@ -103,28 +104,8 @@ impl AggregateCommitmentService {
}
let mut aggregate_commitment_time = Measure::start("aggregate-commitment-ms");
let (block_commitment, rooted_stake) =
Self::aggregate_commitment(&ancestors, &aggregation_data.bank);
let highest_confirmed_root =
get_highest_confirmed_root(rooted_stake, aggregation_data.total_stake);
let mut new_block_commitment = BlockCommitmentCache::new(
block_commitment,
aggregation_data.total_stake,
CommitmentSlots {
slot: aggregation_data.bank.slot(),
root: aggregation_data.root,
highest_confirmed_slot: aggregation_data.root,
highest_confirmed_root,
},
);
let highest_confirmed_slot = new_block_commitment.calculate_highest_confirmed_slot();
new_block_commitment.set_highest_confirmed_slot(highest_confirmed_slot);
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
let update_commitment_slots =
Self::update_commitment_cache(block_commitment_cache, aggregation_data, ancestors);
aggregate_commitment_time.stop();
datapoint_info!(
"block-commitment-cache",
@@ -138,10 +119,46 @@ impl AggregateCommitmentService {
// Triggers rpc_subscription notifications as soon as new commitment data is available,
// sending just the commitment cache slot information that the notifications thread
// needs
subscriptions.notify_subscribers(w_block_commitment_cache.commitment_slots());
subscriptions.notify_subscribers(update_commitment_slots);
}
}
fn update_commitment_cache(
block_commitment_cache: &RwLock<BlockCommitmentCache>,
aggregation_data: CommitmentAggregationData,
ancestors: Vec<u64>,
) -> CommitmentSlots {
let (block_commitment, rooted_stake) =
Self::aggregate_commitment(&ancestors, &aggregation_data.bank);
let highest_confirmed_root =
get_highest_confirmed_root(rooted_stake, aggregation_data.total_stake);
let mut new_block_commitment = BlockCommitmentCache::new(
block_commitment,
aggregation_data.total_stake,
CommitmentSlots {
slot: aggregation_data.bank.slot(),
root: aggregation_data.root,
highest_confirmed_slot: aggregation_data.root,
highest_confirmed_root,
},
);
let highest_confirmed_slot = new_block_commitment.calculate_highest_confirmed_slot();
new_block_commitment.set_highest_confirmed_slot(highest_confirmed_slot);
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
let highest_confirmed_root = max(
new_block_commitment.highest_confirmed_root(),
w_block_commitment_cache.highest_confirmed_root(),
);
new_block_commitment.set_highest_confirmed_root(highest_confirmed_root);
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
w_block_commitment_cache.commitment_slots()
}
pub fn aggregate_commitment(
ancestors: &[Slot],
bank: &Bank,
@@ -225,9 +242,16 @@ impl AggregateCommitmentService {
mod tests {
use super::*;
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_sdk::pubkey::Pubkey;
use solana_runtime::{
bank_forks::BankForks,
genesis_utils::{create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs},
};
use solana_sdk::{pubkey::Pubkey, signature::Signer};
use solana_stake_program::stake_state;
use solana_vote_program::vote_state::{self, VoteStateVersions};
use solana_vote_program::{
vote_state::{self, VoteStateVersions},
vote_transaction,
};
#[test]
fn test_get_highest_confirmed_root() {
@@ -450,4 +474,160 @@ mod tests {
assert_eq!(rooted_stake.len(), 2);
assert_eq!(get_highest_confirmed_root(rooted_stake, 100), 1)
}
#[test]
fn test_highest_confirmed_root_advance() {
fn get_vote_account_root_slot(vote_pubkey: Pubkey, bank: &Arc<Bank>) -> Slot {
let account = &bank.vote_accounts()[&vote_pubkey].1;
let vote_state = VoteState::from(account).unwrap();
vote_state.root_slot.unwrap()
}
let block_commitment_cache = RwLock::new(BlockCommitmentCache::new_for_tests());
let validator_vote_keypairs = ValidatorVoteKeypairs::new_rand();
let validator_keypairs = vec![&validator_vote_keypairs];
let GenesisConfigInfo {
genesis_config,
mint_keypair: _,
voting_keypair: _,
} = create_genesis_config_with_vote_accounts(
1_000_000_000,
&validator_keypairs,
vec![100; 1],
);
let bank0 = Bank::new(&genesis_config);
let mut bank_forks = BankForks::new(bank0);
// Fill bank_forks with banks with votes landing in the next slot
// Create enough banks such that vote account will root slots 0 and 1
for x in 0..33 {
let previous_bank = bank_forks.get(x).unwrap();
let bank = Bank::new_from_parent(previous_bank, &Pubkey::default(), x + 1);
let vote = vote_transaction::new_vote_transaction(
vec![x],
previous_bank.hash(),
previous_bank.last_blockhash(),
&validator_vote_keypairs.node_keypair,
&validator_vote_keypairs.vote_keypair,
&validator_vote_keypairs.vote_keypair,
None,
);
bank.process_transaction(&vote).unwrap();
bank_forks.insert(bank);
}
let working_bank = bank_forks.working_bank();
let root = get_vote_account_root_slot(
validator_vote_keypairs.vote_keypair.pubkey(),
&working_bank,
);
for x in 0..root {
bank_forks.set_root(x, &None, None);
}
// Add an additional bank/vote that will root slot 2
let bank33 = bank_forks.get(33).unwrap();
let bank34 = Bank::new_from_parent(bank33, &Pubkey::default(), 34);
let vote33 = vote_transaction::new_vote_transaction(
vec![33],
bank33.hash(),
bank33.last_blockhash(),
&validator_vote_keypairs.node_keypair,
&validator_vote_keypairs.vote_keypair,
&validator_vote_keypairs.vote_keypair,
None,
);
bank34.process_transaction(&vote33).unwrap();
bank_forks.insert(bank34);
let working_bank = bank_forks.working_bank();
let root = get_vote_account_root_slot(
validator_vote_keypairs.vote_keypair.pubkey(),
&working_bank,
);
let ancestors = working_bank.status_cache_ancestors();
let _ = AggregateCommitmentService::update_commitment_cache(
&block_commitment_cache,
CommitmentAggregationData {
bank: working_bank,
root: 0,
total_stake: 100,
},
ancestors,
);
let highest_confirmed_root = block_commitment_cache
.read()
.unwrap()
.highest_confirmed_root();
bank_forks.set_root(root, &None, Some(highest_confirmed_root));
let highest_confirmed_root_bank = bank_forks.get(highest_confirmed_root);
assert!(highest_confirmed_root_bank.is_some());
// Add a forked bank. Because the vote for bank 33 landed in the non-ancestor, the vote
// account's root (and thus the highest_confirmed_root) rolls back to slot 1
let bank33 = bank_forks.get(33).unwrap();
let bank35 = Bank::new_from_parent(bank33, &Pubkey::default(), 35);
bank_forks.insert(bank35);
let working_bank = bank_forks.working_bank();
let ancestors = working_bank.status_cache_ancestors();
let _ = AggregateCommitmentService::update_commitment_cache(
&block_commitment_cache,
CommitmentAggregationData {
bank: working_bank,
root: 1,
total_stake: 100,
},
ancestors,
);
let highest_confirmed_root = block_commitment_cache
.read()
.unwrap()
.highest_confirmed_root();
let highest_confirmed_root_bank = bank_forks.get(highest_confirmed_root);
assert!(highest_confirmed_root_bank.is_some());
// Add additional banks beyond lockout built on the new fork to ensure that behavior
// continues normally
for x in 35..=37 {
let previous_bank = bank_forks.get(x).unwrap();
let bank = Bank::new_from_parent(previous_bank, &Pubkey::default(), x + 1);
let vote = vote_transaction::new_vote_transaction(
vec![x],
previous_bank.hash(),
previous_bank.last_blockhash(),
&validator_vote_keypairs.node_keypair,
&validator_vote_keypairs.vote_keypair,
&validator_vote_keypairs.vote_keypair,
None,
);
bank.process_transaction(&vote).unwrap();
bank_forks.insert(bank);
}
let working_bank = bank_forks.working_bank();
let root = get_vote_account_root_slot(
validator_vote_keypairs.vote_keypair.pubkey(),
&working_bank,
);
let ancestors = working_bank.status_cache_ancestors();
let _ = AggregateCommitmentService::update_commitment_cache(
&block_commitment_cache,
CommitmentAggregationData {
bank: working_bank,
root: 0,
total_stake: 100,
},
ancestors,
);
let highest_confirmed_root = block_commitment_cache
.read()
.unwrap()
.highest_confirmed_root();
bank_forks.set_root(root, &None, Some(highest_confirmed_root));
let highest_confirmed_root_bank = bank_forks.get(highest_confirmed_root);
assert!(highest_confirmed_root_bank.is_some());
}
}

View File

@@ -0,0 +1,82 @@
use crate::rpc_subscriptions::RpcSubscriptions;
use crossbeam_channel::{Receiver, RecvTimeoutError, Sender};
use solana_ledger::blockstore::{Blockstore, CompletedDataSetInfo};
use solana_sdk::signature::Signature;
use std::{
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
thread::{self, Builder, JoinHandle},
time::Duration,
};
pub type CompletedDataSetsReceiver = Receiver<Vec<CompletedDataSetInfo>>;
pub type CompletedDataSetsSender = Sender<Vec<CompletedDataSetInfo>>;
pub struct CompletedDataSetsService {
thread_hdl: JoinHandle<()>,
}
impl CompletedDataSetsService {
pub fn new(
completed_sets_receiver: CompletedDataSetsReceiver,
blockstore: Arc<Blockstore>,
rpc_subscriptions: Arc<RpcSubscriptions>,
exit: &Arc<AtomicBool>,
) -> Self {
let exit = exit.clone();
let thread_hdl = Builder::new()
.name("completed-data-set-service".to_string())
.spawn(move || loop {
if exit.load(Ordering::Relaxed) {
break;
}
if let Err(RecvTimeoutError::Disconnected) = Self::recv_completed_data_sets(
&completed_sets_receiver,
&blockstore,
&rpc_subscriptions,
) {
break;
}
})
.unwrap();
Self { thread_hdl }
}
fn recv_completed_data_sets(
completed_sets_receiver: &CompletedDataSetsReceiver,
blockstore: &Blockstore,
rpc_subscriptions: &RpcSubscriptions,
) -> Result<(), RecvTimeoutError> {
let completed_data_sets = completed_sets_receiver.recv_timeout(Duration::from_secs(1))?;
for completed_set_info in std::iter::once(completed_data_sets)
.chain(completed_sets_receiver.try_iter())
.flatten()
{
let CompletedDataSetInfo {
slot,
start_index,
end_index,
} = completed_set_info;
match blockstore.get_entries_in_data_block(slot, start_index, end_index, None) {
Ok(entries) => {
let transactions = entries
.into_iter()
.flat_map(|e| e.transactions.into_iter().map(|t| t.signatures[0]))
.collect::<Vec<Signature>>();
if !transactions.is_empty() {
rpc_subscriptions.notify_signatures_received((slot, transactions));
}
}
Err(e) => warn!("completed-data-set-service deserialize error: {:?}", e),
}
}
Ok(())
}
pub fn join(self) -> thread::Result<()> {
self.thread_hdl.join()
}
}

View File

@@ -18,7 +18,7 @@ use solana_vote_program::{
},
};
use std::{
collections::{BTreeMap, HashMap, HashSet},
collections::{HashMap, HashSet},
ops::Bound::{Included, Unbounded},
sync::{Arc, RwLock},
};
@@ -128,9 +128,9 @@ impl Tower {
pub(crate) fn collect_vote_lockouts<F>(
node_pubkey: &Pubkey,
bank_slot: u64,
bank_slot: Slot,
vote_accounts: F,
ancestors: &HashMap<Slot, HashSet<u64>>,
ancestors: &HashMap<Slot, HashSet<Slot>>,
all_pubkeys: &mut PubkeyReferences,
) -> ComputedBankState
where
@@ -141,13 +141,13 @@ impl Tower {
let mut bank_weight = 0;
// Tree of intervals of lockouts of the form [slot, slot + slot.lockout],
// keyed by end of the range
let mut lockout_intervals = BTreeMap::new();
let mut lockout_intervals = LockoutIntervals::new();
let mut pubkey_votes = vec![];
for (key, (lamports, account)) in vote_accounts {
if lamports == 0 {
for (key, (voted_stake, account)) in vote_accounts {
if voted_stake == 0 {
continue;
}
trace!("{} {} with stake {}", node_pubkey, key, lamports);
trace!("{} {} with stake {}", node_pubkey, key, voted_stake);
let vote_state = VoteState::from(&account);
if vote_state.is_none() {
datapoint_warn!(
@@ -197,7 +197,7 @@ impl Tower {
vote_state.process_slot_vote_unchecked(bank_slot);
for vote in &vote_state.votes {
bank_weight += vote.lockout() as u128 * lamports as u128;
bank_weight += vote.lockout() as u128 * voted_stake as u128;
Self::populate_ancestor_voted_stakes(&mut voted_stakes, &vote, ancestors);
}
@@ -208,7 +208,7 @@ impl Tower {
slot: root,
};
trace!("ROOT: {}", vote.slot);
bank_weight += vote.lockout() as u128 * lamports as u128;
bank_weight += vote.lockout() as u128 * voted_stake as u128;
Self::populate_ancestor_voted_stakes(&mut voted_stakes, &vote, ancestors);
}
}
@@ -217,7 +217,7 @@ impl Tower {
confirmation_count: MAX_LOCKOUT_HISTORY as u32,
slot: root,
};
bank_weight += vote.lockout() as u128 * lamports as u128;
bank_weight += vote.lockout() as u128 * voted_stake as u128;
Self::populate_ancestor_voted_stakes(&mut voted_stakes, &vote, ancestors);
}
@@ -239,11 +239,11 @@ impl Tower {
Self::update_ancestor_voted_stakes(
&mut voted_stakes,
vote.slot,
lamports,
voted_stake,
ancestors,
);
}
total_stake += lamports;
total_stake += voted_stake;
}
ComputedBankState {
@@ -473,8 +473,12 @@ impl Tower {
.lockout_intervals;
// Find any locked out intervals in this bank with endpoint >= last_vote,
// implies they are locked out at last_vote
for (_lockout_interval_end, value) in lockout_intervals.range((Included(last_voted_slot), Unbounded)) {
for (lockout_interval_start, vote_account_pubkey) in value {
for (_lockout_interval_end, intervals_keyed_by_end) in lockout_intervals.range((Included(last_voted_slot), Unbounded)) {
for (lockout_interval_start, vote_account_pubkey) in intervals_keyed_by_end {
if locked_out_vote_accounts.contains(vote_account_pubkey) {
continue;
}
// Only count lockouts on slots that are:
// 1) Not ancestors of `last_vote`
// 2) Not from before the current root as we can't determine if
@@ -485,7 +489,6 @@ impl Tower {
// is an ancestor of the current root, because `candidate_slot` is a
// descendant of the current root
&& *lockout_interval_start > root
&& !locked_out_vote_accounts.contains(vote_account_pubkey)
{
let stake = epoch_vote_accounts
.get(vote_account_pubkey)
@@ -587,21 +590,21 @@ impl Tower {
/// Note, stake is the same for all the ancestor.
fn update_ancestor_voted_stakes(
voted_stakes: &mut VotedStakes,
slot: Slot,
lamports: u64,
voted_slot: Slot,
voted_stake: u64,
ancestors: &HashMap<Slot, HashSet<Slot>>,
) {
// If there's no ancestors, that means this slot must be from
// before the current root, so ignore this slot
let vote_slot_ancestors = ancestors.get(&slot);
let vote_slot_ancestors = ancestors.get(&voted_slot);
if vote_slot_ancestors.is_none() {
return;
}
let mut slot_with_ancestors = vec![slot];
let mut slot_with_ancestors = vec![voted_slot];
slot_with_ancestors.extend(vote_slot_ancestors.unwrap());
for slot in slot_with_ancestors {
let current = voted_stakes.entry(slot).or_default();
*current += lamports;
*current += voted_stake;
}
}

View File

@@ -25,8 +25,8 @@ pub struct ContactInfo {
pub tpu: SocketAddr,
/// address to forward unprocessed transactions to
pub tpu_forwards: SocketAddr,
/// unused address
pub unused: SocketAddr,
/// address to which to send bank state requests
pub rpc_banks: SocketAddr,
/// address to which to send JSON-RPC requests
pub rpc: SocketAddr,
/// websocket for JSON-RPC push notifications
@@ -95,7 +95,7 @@ impl Default for ContactInfo {
repair: socketaddr_any!(),
tpu: socketaddr_any!(),
tpu_forwards: socketaddr_any!(),
unused: socketaddr_any!(),
rpc_banks: socketaddr_any!(),
rpc: socketaddr_any!(),
rpc_pubsub: socketaddr_any!(),
serve_repair: socketaddr_any!(),
@@ -115,7 +115,7 @@ impl ContactInfo {
repair: socketaddr!("127.0.0.1:1237"),
tpu: socketaddr!("127.0.0.1:1238"),
tpu_forwards: socketaddr!("127.0.0.1:1239"),
unused: socketaddr!("127.0.0.1:1240"),
rpc_banks: socketaddr!("127.0.0.1:1240"),
rpc: socketaddr!("127.0.0.1:1241"),
rpc_pubsub: socketaddr!("127.0.0.1:1242"),
serve_repair: socketaddr!("127.0.0.1:1243"),
@@ -137,7 +137,7 @@ impl ContactInfo {
repair: addr,
tpu: addr,
tpu_forwards: addr,
unused: addr,
rpc_banks: addr,
rpc: addr,
rpc_pubsub: addr,
serve_repair: addr,
@@ -162,6 +162,7 @@ impl ContactInfo {
let repair = next_port(&bind_addr, 5);
let rpc = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PORT);
let rpc_pubsub = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PUBSUB_PORT);
let rpc_banks = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_BANKS_PORT);
let serve_repair = next_port(&bind_addr, 6);
Self {
id: *pubkey,
@@ -171,7 +172,7 @@ impl ContactInfo {
repair,
tpu,
tpu_forwards,
unused: "0.0.0.0:0".parse().unwrap(),
rpc_banks,
rpc,
rpc_pubsub,
serve_repair,
@@ -248,7 +249,7 @@ mod tests {
assert!(ci.rpc.ip().is_unspecified());
assert!(ci.rpc_pubsub.ip().is_unspecified());
assert!(ci.tpu.ip().is_unspecified());
assert!(ci.unused.ip().is_unspecified());
assert!(ci.rpc_banks.ip().is_unspecified());
assert!(ci.serve_repair.ip().is_unspecified());
}
#[test]
@@ -260,7 +261,7 @@ mod tests {
assert!(ci.rpc.ip().is_multicast());
assert!(ci.rpc_pubsub.ip().is_multicast());
assert!(ci.tpu.ip().is_multicast());
assert!(ci.unused.ip().is_multicast());
assert!(ci.rpc_banks.ip().is_multicast());
assert!(ci.serve_repair.ip().is_multicast());
}
#[test]
@@ -273,7 +274,7 @@ mod tests {
assert!(ci.rpc.ip().is_unspecified());
assert!(ci.rpc_pubsub.ip().is_unspecified());
assert!(ci.tpu.ip().is_unspecified());
assert!(ci.unused.ip().is_unspecified());
assert!(ci.rpc_banks.ip().is_unspecified());
assert!(ci.serve_repair.ip().is_unspecified());
}
#[test]
@@ -286,7 +287,7 @@ mod tests {
assert_eq!(ci.tpu_forwards.port(), 13);
assert_eq!(ci.rpc.port(), rpc_port::DEFAULT_RPC_PORT);
assert_eq!(ci.rpc_pubsub.port(), rpc_port::DEFAULT_RPC_PUBSUB_PORT);
assert!(ci.unused.ip().is_unspecified());
assert_eq!(ci.rpc_banks.port(), rpc_port::DEFAULT_RPC_BANKS_PORT);
assert_eq!(ci.serve_repair.port(), 16);
}
@@ -310,6 +311,10 @@ mod tests {
d1.rpc_pubsub,
socketaddr!(format!("127.0.0.1:{}", rpc_port::DEFAULT_RPC_PUBSUB_PORT))
);
assert_eq!(
d1.rpc_banks,
socketaddr!(format!("127.0.0.1:{}", rpc_port::DEFAULT_RPC_BANKS_PORT))
);
assert_eq!(d1.tvu_forwards, socketaddr!("127.0.0.1:1238"));
assert_eq!(d1.repair, socketaddr!("127.0.0.1:1239"));
assert_eq!(d1.serve_repair, socketaddr!("127.0.0.1:1240"));

View File

@@ -24,6 +24,7 @@
//! A value is updated to a new version if the labels match, and the value
//! wallclock is later, or the value hash is greater.
use crate::crds_gossip_pull::CrdsFilter;
use crate::crds_value::{CrdsValue, CrdsValueLabel};
use bincode::serialize;
use indexmap::map::IndexMap;
@@ -37,6 +38,8 @@ pub struct Crds {
/// Stores the map of labels and values
pub table: IndexMap<CrdsValueLabel, VersionedCrdsValue>,
pub num_inserts: usize,
pub masks: IndexMap<CrdsValueLabel, u64>,
}
#[derive(PartialEq, Debug)]
@@ -86,6 +89,7 @@ impl Default for Crds {
Crds {
table: IndexMap::new(),
num_inserts: 0,
masks: IndexMap::new(),
}
}
}
@@ -126,6 +130,10 @@ impl Crds {
.map(|current| new_value > *current)
.unwrap_or(true);
if do_insert {
self.masks.insert(
label.clone(),
CrdsFilter::hash_as_u64(&new_value.value_hash),
);
let old = self.table.insert(label, new_value);
self.num_inserts += 1;
Ok(old)
@@ -193,6 +201,7 @@ impl Crds {
pub fn remove(&mut self, key: &CrdsValueLabel) {
self.table.swap_remove(key);
self.masks.swap_remove(key);
}
}

View File

@@ -115,10 +115,15 @@ impl CrdsGossip {
/// refresh the push active set
/// * ratio - number of actives to rotate
pub fn refresh_push_active_set(&mut self, stakes: &HashMap<Pubkey, u64>) {
pub fn refresh_push_active_set(
&mut self,
stakes: &HashMap<Pubkey, u64>,
gossip_validators: Option<&HashSet<Pubkey>>,
) {
self.push.refresh_push_active_set(
&self.crds,
stakes,
gossip_validators,
&self.id,
self.shred_version,
self.pull.pull_request_time.len(),
@@ -130,6 +135,7 @@ impl CrdsGossip {
pub fn new_pull_request(
&self,
now: u64,
gossip_validators: Option<&HashSet<Pubkey>>,
stakes: &HashMap<Pubkey, u64>,
bloom_size: usize,
) -> Result<(Pubkey, Vec<CrdsFilter>, CrdsValue), CrdsGossipError> {
@@ -138,6 +144,7 @@ impl CrdsGossip {
&self.id,
self.shred_version,
now,
gossip_validators,
stakes,
bloom_size,
)
@@ -159,8 +166,9 @@ impl CrdsGossip {
pub fn generate_pull_responses(
&self,
filters: &[(CrdsValue, CrdsFilter)],
now: u64,
) -> Vec<Vec<CrdsValue>> {
self.pull.generate_pull_responses(&self.crds, filters)
self.pull.generate_pull_responses(&self.crds, filters, now)
}
pub fn filter_pull_responses(
@@ -270,7 +278,7 @@ mod test {
0,
)
.unwrap();
crds_gossip.refresh_push_active_set(&HashMap::new());
crds_gossip.refresh_push_active_set(&HashMap::new(), None);
let now = timestamp();
//incorrect dest
let mut res = crds_gossip.process_prune_msg(

View File

@@ -22,6 +22,7 @@ use solana_sdk::pubkey::Pubkey;
use std::cmp;
use std::collections::VecDeque;
use std::collections::{HashMap, HashSet};
use std::convert::TryInto;
pub const CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS: u64 = 15000;
// The maximum age of a value received over pull responses
@@ -29,13 +30,23 @@ pub const CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS: u64 = 60000;
pub const FALSE_RATE: f64 = 0.1f64;
pub const KEYS: f64 = 8f64;
#[derive(Serialize, Deserialize, Default, Clone, Debug, PartialEq, AbiExample)]
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, AbiExample)]
pub struct CrdsFilter {
pub filter: Bloom<Hash>,
mask: u64,
mask_bits: u32,
}
impl Default for CrdsFilter {
fn default() -> Self {
CrdsFilter {
filter: Bloom::default(),
mask: !0u64,
mask_bits: 0u32,
}
}
}
impl solana_sdk::sanitize::Sanitize for CrdsFilter {
fn sanitize(&self) -> std::result::Result<(), solana_sdk::sanitize::SanitizeError> {
self.filter.sanitize()?;
@@ -57,25 +68,7 @@ impl CrdsFilter {
mask_bits,
}
}
// generates a vec of filters that together hold a complete set of Hashes
pub fn new_complete_set(num_items: usize, max_bytes: usize) -> Vec<Self> {
let max_bits = (max_bytes * 8) as f64;
let max_items = Self::max_items(max_bits, FALSE_RATE, KEYS);
let mask_bits = Self::mask_bits(num_items as f64, max_items as f64);
// for each possible mask combination, generate a new filter.
let mut filters = vec![];
for seed in 0..2u64.pow(mask_bits) {
let filter = Bloom::random(max_items as usize, FALSE_RATE, max_bits as usize);
let mask = Self::compute_mask(seed, mask_bits);
let filter = CrdsFilter {
filter,
mask,
mask_bits,
};
filters.push(filter)
}
filters
}
fn compute_mask(seed: u64, mask_bits: u32) -> u64 {
assert!(seed <= 2u64.pow(mask_bits));
let seed: u64 = seed.checked_shl(64 - mask_bits).unwrap_or(0x0);
@@ -91,13 +84,13 @@ impl CrdsFilter {
// for small ratios this can result in a negative number, ensure it returns 0 instead
((num_items / max_items).log2().ceil()).max(0.0) as u32
}
fn hash_as_u64(item: &Hash) -> u64 {
let arr = item.as_ref();
let mut accum = 0;
for (i, val) in arr.iter().enumerate().take(8) {
accum |= (u64::from(*val)) << (i * 8) as u64;
}
accum
pub fn hash_as_u64(item: &Hash) -> u64 {
let buf = item.as_ref()[..8].try_into().unwrap();
u64::from_le_bytes(buf)
}
pub fn test_mask_u64(&self, item: u64, ones: u64) -> bool {
let bits = item | ones;
bits == self.mask
}
pub fn test_mask(&self, item: &Hash) -> bool {
// only consider the highest mask_bits bits from the hash and set the rest to 1.
@@ -116,6 +109,42 @@ impl CrdsFilter {
}
self.filter.contains(item)
}
pub fn filter_contains(&self, item: &Hash) -> bool {
self.filter.contains(item)
}
}
/// A vector of crds filters that together hold a complete set of Hashes.
struct CrdsFilterSet(Vec<CrdsFilter>);
impl CrdsFilterSet {
fn new(num_items: usize, max_bytes: usize) -> Self {
let max_bits = (max_bytes * 8) as f64;
let max_items = CrdsFilter::max_items(max_bits, FALSE_RATE, KEYS);
let mask_bits = CrdsFilter::mask_bits(num_items as f64, max_items as f64);
// For each possible mask combination, generate a new filter.
let seeds = 0..2u64.pow(mask_bits);
let filters = seeds.map(|seed| {
let filter = Bloom::random(max_items as usize, FALSE_RATE, max_bits as usize);
let mask = CrdsFilter::compute_mask(seed, mask_bits);
CrdsFilter {
filter,
mask,
mask_bits,
}
});
Self(filters.collect())
}
// Returns the filter within the vector of crds filters which corresponds
// to the given hash value.
fn get(&mut self, hash_value: &Hash) -> Option<&mut CrdsFilter> {
let shift = 64 - self.0.first()?.mask_bits.min(64);
let index = CrdsFilter::hash_as_u64(hash_value)
.checked_shr(shift)
.unwrap_or(0u64);
self.0.get_mut(index as usize)
}
}
#[derive(Default)]
@@ -131,7 +160,7 @@ pub struct CrdsGossipPull {
/// timestamp of last request
pub pull_request_time: HashMap<Pubkey, u64>,
/// hash and insert time
purged_values: VecDeque<(Hash, u64)>,
pub purged_values: VecDeque<(Hash, u64)>,
pub crds_timeout: u64,
pub msg_timeout: u64,
pub num_pulls: usize,
@@ -156,10 +185,18 @@ impl CrdsGossipPull {
self_id: &Pubkey,
self_shred_version: u16,
now: u64,
gossip_validators: Option<&HashSet<Pubkey>>,
stakes: &HashMap<Pubkey, u64>,
bloom_size: usize,
) -> Result<(Pubkey, Vec<CrdsFilter>, CrdsValue), CrdsGossipError> {
let options = self.pull_options(crds, &self_id, self_shred_version, now, stakes);
let options = self.pull_options(
crds,
&self_id,
self_shred_version,
now,
gossip_validators,
stakes,
);
if options.is_empty() {
return Err(CrdsGossipError::NoPeers);
}
@@ -178,6 +215,7 @@ impl CrdsGossipPull {
self_id: &Pubkey,
self_shred_version: u16,
now: u64,
gossip_validators: Option<&HashSet<Pubkey>>,
stakes: &HashMap<Pubkey, u64>,
) -> Vec<(f32, &'a ContactInfo)> {
crds.table
@@ -186,9 +224,9 @@ impl CrdsGossipPull {
.filter(|v| {
v.id != *self_id
&& ContactInfo::is_valid_address(&v.gossip)
&& (self_shred_version == 0
|| v.shred_version == 0
|| self_shred_version == v.shred_version)
&& (self_shred_version == 0 || self_shred_version == v.shred_version)
&& gossip_validators
.map_or(true, |gossip_validators| gossip_validators.contains(&v.id))
})
.map(|item| {
let max_weight = f32::from(u16::max_value()) - 1.0;
@@ -237,8 +275,9 @@ impl CrdsGossipPull {
&self,
crds: &Crds,
requests: &[(CrdsValue, CrdsFilter)],
now: u64,
) -> Vec<Vec<CrdsValue>> {
self.filter_crds_values(crds, requests)
self.filter_crds_values(crds, requests, now)
}
// Checks if responses should be inserted and
@@ -355,31 +394,77 @@ impl CrdsGossipPull {
CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS,
crds.table.values().count() + self.purged_values.len(),
);
let mut filters = CrdsFilter::new_complete_set(num, bloom_size);
let mut filters = CrdsFilterSet::new(num, bloom_size);
let mut add_value_hash = |value_hash| {
if let Some(filter) = filters.get(value_hash) {
debug_assert!(filter.test_mask(value_hash));
filter.filter.add(value_hash);
}
};
for v in crds.table.values() {
filters
.iter_mut()
.for_each(|filter| filter.add(&v.value_hash));
add_value_hash(&v.value_hash);
}
for (value_hash, _insert_timestamp) in &self.purged_values {
filters.iter_mut().for_each(|filter| filter.add(value_hash));
add_value_hash(&value_hash);
}
filters
filters.0
}
/// filter values that fail the bloom filter up to max_bytes
fn filter_crds_values(
&self,
crds: &Crds,
filters: &[(CrdsValue, CrdsFilter)],
now: u64,
) -> Vec<Vec<CrdsValue>> {
let mut ret = vec![vec![]; filters.len()];
for v in crds.table.values() {
filters.iter().enumerate().for_each(|(i, (_, filter))| {
if !filter.contains(&v.value_hash) {
ret[i].push(v.value.clone());
}
});
let msg_timeout = CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
let jitter = rand::thread_rng().gen_range(0, msg_timeout / 4);
let start = filters.len();
//skip filters from callers that are too old
let future = now.saturating_add(msg_timeout);
let past = now.saturating_sub(msg_timeout);
let recent: Vec<_> = filters
.iter()
.enumerate()
.filter(|(_, (caller, _))| caller.wallclock() < future && caller.wallclock() >= past)
.collect();
inc_new_counter_info!(
"gossip_filter_crds_values-dropped_requests",
start - recent.len()
);
if recent.is_empty() {
return ret;
}
let mut total_skipped = 0;
let mask_ones: Vec<_> = recent
.iter()
.map(|(_i, (_caller, filter))| (!0u64).checked_shr(filter.mask_bits).unwrap_or(!0u64))
.collect();
for (label, mask) in crds.masks.iter() {
recent
.iter()
.zip(mask_ones.iter())
.for_each(|((i, (caller, filter)), mask_ones)| {
if filter.test_mask_u64(*mask, *mask_ones) {
let item = crds.table.get(label).unwrap();
//skip values that are too new
if item.value.wallclock()
> caller.wallclock().checked_add(jitter).unwrap_or_else(|| 0)
{
total_skipped += 1;
return;
}
if !filter.filter_contains(&item.value_hash) {
ret[*i].push(item.value.clone());
}
}
});
}
inc_new_counter_info!("gossip_filter_crds_values-dropped_values", total_skipped);
ret
}
pub fn make_timeouts_def(
@@ -471,10 +556,50 @@ mod test {
use crate::contact_info::ContactInfo;
use crate::crds_value::{CrdsData, Vote};
use itertools::Itertools;
use rand::{thread_rng, RngCore};
use solana_perf::test_tx::test_tx;
use solana_sdk::hash::hash;
use solana_sdk::hash::{hash, HASH_BYTES};
use solana_sdk::packet::PACKET_DATA_SIZE;
#[test]
fn test_hash_as_u64() {
let arr: Vec<u8> = (0..HASH_BYTES).map(|i| i as u8 + 1).collect();
let hash = Hash::new(&arr);
assert_eq!(CrdsFilter::hash_as_u64(&hash), 0x807060504030201);
}
#[test]
fn test_hash_as_u64_random() {
fn hash_as_u64_bitops(hash: &Hash) -> u64 {
let mut out = 0;
for (i, val) in hash.as_ref().iter().enumerate().take(8) {
out |= (u64::from(*val)) << (i * 8) as u64;
}
out
}
let mut rng = thread_rng();
for _ in 0..100 {
let mut buf = [0u8; HASH_BYTES];
rng.fill(&mut buf);
let hash = Hash::new(&buf);
assert_eq!(CrdsFilter::hash_as_u64(&hash), hash_as_u64_bitops(&hash));
}
}
#[test]
fn test_crds_filter_default() {
let filter = CrdsFilter::default();
let mask = CrdsFilter::compute_mask(0, filter.mask_bits);
assert_eq!(filter.mask, mask);
let mut rng = thread_rng();
for _ in 0..10 {
let mut buf = [0u8; HASH_BYTES];
rng.fill(&mut buf);
let hash = Hash::new(&buf);
assert!(filter.test_mask(&hash));
}
}
#[test]
fn test_new_pull_with_stakes() {
let mut crds = Crds::default();
@@ -495,7 +620,7 @@ mod test {
stakes.insert(id, i * 100);
}
let now = 1024;
let mut options = node.pull_options(&crds, &me.label().pubkey(), 0, now, &stakes);
let mut options = node.pull_options(&crds, &me.label().pubkey(), 0, now, None, &stakes);
assert!(!options.is_empty());
options.sort_by(|(weight_l, _), (weight_r, _)| weight_r.partial_cmp(weight_l).unwrap());
// check that the highest stake holder is also the heaviest weighted.
@@ -543,19 +668,19 @@ mod test {
crds.insert(node_123.clone(), 0).unwrap();
crds.insert(node_456.clone(), 0).unwrap();
// shred version 123 should ignore 456 nodes
// shred version 123 should ignore nodes with versions 0 and 456
let options = node
.pull_options(&crds, &me.label().pubkey(), 123, 0, &stakes)
.pull_options(&crds, &me.label().pubkey(), 123, 0, None, &stakes)
.iter()
.map(|(_, c)| c.id)
.collect::<Vec<_>>();
assert_eq!(options.len(), 2);
assert!(options.contains(&spy.pubkey()));
assert_eq!(options.len(), 1);
assert!(!options.contains(&spy.pubkey()));
assert!(options.contains(&node_123.pubkey()));
// spy nodes will see all
let options = node
.pull_options(&crds, &spy.label().pubkey(), 0, 0, &stakes)
.pull_options(&crds, &spy.label().pubkey(), 0, 0, None, &stakes)
.iter()
.map(|(_, c)| c.id)
.collect::<Vec<_>>();
@@ -565,6 +690,109 @@ mod test {
assert!(options.contains(&node_456.pubkey()));
}
#[test]
fn test_pulls_only_from_allowed() {
let mut crds = Crds::default();
let stakes = HashMap::new();
let node = CrdsGossipPull::default();
let gossip = socketaddr!("127.0.0.1:1234");
let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
gossip,
..ContactInfo::default()
}));
let node_123 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
gossip,
..ContactInfo::default()
}));
crds.insert(me.clone(), 0).unwrap();
crds.insert(node_123.clone(), 0).unwrap();
// Empty gossip_validators -- will pull from nobody
let mut gossip_validators = HashSet::new();
let options = node.pull_options(
&crds,
&me.label().pubkey(),
0,
0,
Some(&gossip_validators),
&stakes,
);
assert!(options.is_empty());
// Unknown pubkey in gossip_validators -- will pull from nobody
gossip_validators.insert(Pubkey::new_rand());
let options = node.pull_options(
&crds,
&me.label().pubkey(),
0,
0,
Some(&gossip_validators),
&stakes,
);
assert!(options.is_empty());
// node_123 pubkey in gossip_validators -- will pull from it
gossip_validators.insert(node_123.pubkey());
let options = node.pull_options(
&crds,
&me.label().pubkey(),
0,
0,
Some(&gossip_validators),
&stakes,
);
assert_eq!(options.len(), 1);
assert_eq!(options[0].1.id, node_123.pubkey());
}
#[test]
fn test_crds_filter_set_get() {
let mut crds_filter_set =
CrdsFilterSet::new(/*num_items=*/ 9672788, /*max_bytes=*/ 8196);
assert_eq!(crds_filter_set.0.len(), 1024);
let mut rng = thread_rng();
for _ in 0..100 {
let mut bytes = [0u8; HASH_BYTES];
rng.fill_bytes(&mut bytes);
let hash_value = Hash::new(&bytes);
let filter = crds_filter_set.get(&hash_value).unwrap().clone();
assert!(filter.test_mask(&hash_value));
// Validate that the returned filter is the *unique* filter which
// corresponds to the hash value (i.e. test_mask returns true).
let mut num_hits = 0;
for f in &crds_filter_set.0 {
if *f == filter {
num_hits += 1;
assert!(f.test_mask(&hash_value));
} else {
assert!(!f.test_mask(&hash_value));
}
}
assert_eq!(num_hits, 1);
}
}
#[test]
fn test_crds_filter_set_new() {
// Validates invariances required by CrdsFilterSet::get in the
// vector of filters generated by CrdsFilterSet::new.
let filters = CrdsFilterSet::new(/*num_items=*/ 55345017, /*max_bytes=*/ 4098).0;
assert_eq!(filters.len(), 16384);
let mask_bits = filters[0].mask_bits;
let right_shift = 64 - mask_bits;
let ones = !0u64 >> mask_bits;
for (i, filter) in filters.iter().enumerate() {
// Check that all mask_bits are equal.
assert_eq!(mask_bits, filter.mask_bits);
assert_eq!(i as u64, filter.mask >> right_shift);
assert_eq!(ones, ones & filter.mask);
}
}
#[test]
fn test_new_pull_request() {
let mut crds = Crds::default();
@@ -575,13 +803,13 @@ mod test {
let id = entry.label().pubkey();
let node = CrdsGossipPull::default();
assert_eq!(
node.new_pull_request(&crds, &id, 0, 0, &HashMap::new(), PACKET_DATA_SIZE),
node.new_pull_request(&crds, &id, 0, 0, None, &HashMap::new(), PACKET_DATA_SIZE),
Err(CrdsGossipError::NoPeers)
);
crds.insert(entry.clone(), 0).unwrap();
assert_eq!(
node.new_pull_request(&crds, &id, 0, 0, &HashMap::new(), PACKET_DATA_SIZE),
node.new_pull_request(&crds, &id, 0, 0, None, &HashMap::new(), PACKET_DATA_SIZE),
Err(CrdsGossipError::NoPeers)
);
@@ -590,7 +818,7 @@ mod test {
0,
)));
crds.insert(new.clone(), 0).unwrap();
let req = node.new_pull_request(&crds, &id, 0, 0, &HashMap::new(), PACKET_DATA_SIZE);
let req = node.new_pull_request(&crds, &id, 0, 0, None, &HashMap::new(), PACKET_DATA_SIZE);
let (to, _, self_info) = req.unwrap();
assert_eq!(to, new.label().pubkey());
assert_eq!(self_info, entry);
@@ -627,6 +855,7 @@ mod test {
&node_pubkey,
0,
u64::max_value(),
None,
&HashMap::new(),
PACKET_DATA_SIZE,
);
@@ -636,6 +865,67 @@ mod test {
}
}
#[test]
fn test_generate_pull_responses() {
let mut node_crds = Crds::default();
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
let node_pubkey = entry.label().pubkey();
let node = CrdsGossipPull::default();
node_crds.insert(entry, 0).unwrap();
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
node_crds.insert(new, 0).unwrap();
let req = node.new_pull_request(
&node_crds,
&node_pubkey,
0,
0,
None,
&HashMap::new(),
PACKET_DATA_SIZE,
);
let mut dest_crds = Crds::default();
let dest = CrdsGossipPull::default();
let (_, filters, caller) = req.unwrap();
let mut filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
let rsp = dest.generate_pull_responses(&dest_crds, &filters, 0);
assert_eq!(rsp[0].len(), 0);
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS,
)));
dest_crds
.insert(new, CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS)
.unwrap();
//should skip new value since caller is to old
let rsp =
dest.generate_pull_responses(&dest_crds, &filters, CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS);
assert_eq!(rsp[0].len(), 0);
assert_eq!(filters.len(), 1);
filters.push(filters[0].clone());
//should return new value since caller is new
filters[1].0 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS + 1,
)));
let rsp =
dest.generate_pull_responses(&dest_crds, &filters, CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS);
assert_eq!(rsp.len(), 2);
assert_eq!(rsp[0].len(), 0);
assert_eq!(rsp[1].len(), 1); // Orders are also preserved.
}
#[test]
fn test_process_pull_request() {
let mut node_crds = Crds::default();
@@ -656,6 +946,7 @@ mod test {
&node_pubkey,
0,
0,
None,
&HashMap::new(),
PACKET_DATA_SIZE,
);
@@ -664,7 +955,7 @@ mod test {
let mut dest = CrdsGossipPull::default();
let (_, filters, caller) = req.unwrap();
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
let rsp = dest.generate_pull_responses(&dest_crds, &filters);
let rsp = dest.generate_pull_responses(&dest_crds, &filters, 0);
dest.process_pull_requests(&mut dest_crds, filters, 1);
assert!(rsp.iter().all(|rsp| rsp.is_empty()));
assert!(dest_crds.lookup(&caller.label()).is_some());
@@ -688,7 +979,7 @@ mod test {
let mut node_crds = Crds::default();
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
1,
)));
let node_pubkey = entry.label().pubkey();
let mut node = CrdsGossipPull::default();
@@ -696,7 +987,7 @@ mod test {
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
1,
)));
node_crds.insert(new, 0).unwrap();
@@ -730,12 +1021,13 @@ mod test {
&node_pubkey,
0,
0,
None,
&HashMap::new(),
PACKET_DATA_SIZE,
);
let (_, filters, caller) = req.unwrap();
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
let mut rsp = dest.generate_pull_responses(&dest_crds, &filters);
let mut rsp = dest.generate_pull_responses(&dest_crds, &filters, 0);
dest.process_pull_requests(&mut dest_crds, filters, 0);
// if there is a false positive this is empty
// prob should be around 0.1 per iteration
@@ -855,7 +1147,7 @@ mod test {
}
#[test]
fn test_crds_filter_complete_set_add_mask() {
let mut filters = CrdsFilter::new_complete_set(1000, 10);
let mut filters = CrdsFilterSet::new(1000, 10).0;
assert!(filters.iter().all(|f| f.mask_bits > 0));
let mut h: Hash = Hash::default();
// rev to make the hash::default() miss on the first few test_masks

View File

@@ -280,6 +280,7 @@ impl CrdsGossipPush {
&mut self,
crds: &Crds,
stakes: &HashMap<Pubkey, u64>,
gossip_validators: Option<&HashSet<Pubkey>>,
self_id: &Pubkey,
self_shred_version: u16,
network_size: usize,
@@ -288,7 +289,13 @@ impl CrdsGossipPush {
let need = Self::compute_need(self.num_active, self.active_set.len(), ratio);
let mut new_items = HashMap::new();
let options: Vec<_> = self.push_options(crds, &self_id, self_shred_version, stakes);
let options: Vec<_> = self.push_options(
crds,
&self_id,
self_shred_version,
stakes,
gossip_validators,
);
if options.is_empty() {
return;
}
@@ -336,6 +343,7 @@ impl CrdsGossipPush {
self_id: &Pubkey,
self_shred_version: u16,
stakes: &HashMap<Pubkey, u64>,
gossip_validators: Option<&HashSet<Pubkey>>,
) -> Vec<(f32, &'a ContactInfo)> {
crds.table
.values()
@@ -344,9 +352,10 @@ impl CrdsGossipPush {
.filter(|(info, _)| {
info.id != *self_id
&& ContactInfo::is_valid_address(&info.gossip)
&& (self_shred_version == 0
|| info.shred_version == 0
|| self_shred_version == info.shred_version)
&& self_shred_version == info.shred_version
&& gossip_validators.map_or(true, |gossip_validators| {
gossip_validators.contains(&info.id)
})
})
.map(|(info, value)| {
let max_weight = f32::from(u16::max_value()) - 1.0;
@@ -554,7 +563,7 @@ mod test {
)));
assert_eq!(crds.insert(value1.clone(), 0), Ok(None));
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 0, 1, 1);
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
assert!(push.active_set.get(&value1.label().pubkey()).is_some());
let value2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
@@ -564,7 +573,7 @@ mod test {
assert!(push.active_set.get(&value2.label().pubkey()).is_none());
assert_eq!(crds.insert(value2.clone(), 0), Ok(None));
for _ in 0..30 {
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 0, 1, 1);
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
if push.active_set.get(&value2.label().pubkey()).is_some() {
break;
}
@@ -577,7 +586,7 @@ mod test {
));
assert_eq!(crds.insert(value2.clone(), 0), Ok(None));
}
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 0, 1, 1);
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
assert_eq!(push.active_set.len(), push.num_active);
}
#[test]
@@ -595,7 +604,7 @@ mod test {
crds.insert(peer.clone(), time).unwrap();
stakes.insert(id, i * 100);
}
let mut options = push.push_options(&crds, &Pubkey::default(), 0, &stakes);
let mut options = push.push_options(&crds, &Pubkey::default(), 0, &stakes, None);
assert!(!options.is_empty());
options.sort_by(|(weight_l, _), (weight_r, _)| weight_r.partial_cmp(weight_l).unwrap());
// check that the highest stake holder is also the heaviest weighted.
@@ -641,29 +650,85 @@ mod test {
crds.insert(me.clone(), 0).unwrap();
crds.insert(spy.clone(), 0).unwrap();
crds.insert(node_123.clone(), 0).unwrap();
crds.insert(node_456.clone(), 0).unwrap();
crds.insert(node_456, 0).unwrap();
// shred version 123 should ignore 456 nodes
// shred version 123 should ignore nodes with versions 0 and 456
let options = node
.push_options(&crds, &me.label().pubkey(), 123, &stakes)
.push_options(&crds, &me.label().pubkey(), 123, &stakes, None)
.iter()
.map(|(_, c)| c.id)
.collect::<Vec<_>>();
assert_eq!(options.len(), 2);
assert!(options.contains(&spy.pubkey()));
assert_eq!(options.len(), 1);
assert!(!options.contains(&spy.pubkey()));
assert!(options.contains(&node_123.pubkey()));
// spy nodes will see all
// spy nodes should not push to people on different shred versions
let options = node
.push_options(&crds, &spy.label().pubkey(), 0, &stakes)
.push_options(&crds, &spy.label().pubkey(), 0, &stakes, None)
.iter()
.map(|(_, c)| c.id)
.collect::<Vec<_>>();
assert_eq!(options.len(), 3);
assert!(options.contains(&me.pubkey()));
assert!(options.contains(&node_123.pubkey()));
assert!(options.contains(&node_456.pubkey()));
assert!(options.is_empty());
}
#[test]
fn test_pushes_only_to_allowed() {
let mut crds = Crds::default();
let stakes = HashMap::new();
let node = CrdsGossipPush::default();
let gossip = socketaddr!("127.0.0.1:1234");
let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
gossip,
..ContactInfo::default()
}));
let node_123 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
gossip,
..ContactInfo::default()
}));
crds.insert(me.clone(), 0).unwrap();
crds.insert(node_123.clone(), 0).unwrap();
// Unknown pubkey in gossip_validators -- will push to nobody
let mut gossip_validators = HashSet::new();
let options = node.push_options(
&crds,
&me.label().pubkey(),
0,
&stakes,
Some(&gossip_validators),
);
assert!(options.is_empty());
// Unknown pubkey in gossip_validators -- will push to nobody
gossip_validators.insert(Pubkey::new_rand());
let options = node.push_options(
&crds,
&me.label().pubkey(),
0,
&stakes,
Some(&gossip_validators),
);
assert!(options.is_empty());
// node_123 pubkey in gossip_validators -- will push to it
gossip_validators.insert(node_123.pubkey());
let options = node.push_options(
&crds,
&me.label().pubkey(),
0,
&stakes,
Some(&gossip_validators),
);
assert_eq!(options.len(), 1);
assert_eq!(options[0].1.id, node_123.pubkey());
}
#[test]
fn test_new_push_messages() {
let mut crds = Crds::default();
@@ -673,7 +738,7 @@ mod test {
0,
)));
assert_eq!(crds.insert(peer.clone(), 0), Ok(None));
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 0, 1, 1);
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
@@ -710,7 +775,7 @@ mod test {
push.process_push_message(&mut crds, &Pubkey::default(), peer_3.clone(), 0),
Ok(None)
);
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 0, 1, 1);
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
// push 3's contact info to 1 and 2 and 3
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
@@ -733,7 +798,7 @@ mod test {
0,
)));
assert_eq!(crds.insert(peer.clone(), 0), Ok(None));
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 0, 1, 1);
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
@@ -760,7 +825,7 @@ mod test {
0,
)));
assert_eq!(crds.insert(peer, 0), Ok(None));
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 0, 1, 1);
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
ci.wallclock = 1;

View File

@@ -6,15 +6,22 @@ use rand::{thread_rng, Rng};
use solana_client::thin_client::{create_client, ThinClient};
use solana_perf::recycler::Recycler;
use solana_runtime::bank_forks::BankForks;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, Signer};
use solana_sdk::{
pubkey::Pubkey,
signature::{Keypair, Signer},
};
use solana_streamer::streamer;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener, UdpSocket};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use std::thread::{self, sleep, JoinHandle};
use std::time::{Duration, Instant};
use std::{
collections::HashSet,
net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener, UdpSocket},
sync::{
atomic::{AtomicBool, Ordering},
mpsc::channel,
{Arc, RwLock},
},
thread::{self, sleep, JoinHandle},
time::{Duration, Instant},
};
pub struct GossipService {
thread_hdls: Vec<JoinHandle<()>>,
@@ -25,6 +32,7 @@ impl GossipService {
cluster_info: &Arc<ClusterInfo>,
bank_forks: Option<Arc<RwLock<BankForks>>>,
gossip_socket: UdpSocket,
gossip_validators: Option<HashSet<Pubkey>>,
exit: &Arc<AtomicBool>,
) -> Self {
let (request_sender, request_receiver) = channel();
@@ -50,7 +58,13 @@ impl GossipService {
response_sender.clone(),
exit,
);
let t_gossip = ClusterInfo::gossip(cluster_info.clone(), bank_forks, response_sender, exit);
let t_gossip = ClusterInfo::gossip(
cluster_info.clone(),
bank_forks,
response_sender,
gossip_validators,
exit,
);
let thread_hdls = vec![t_receiver, t_responder, t_listen, t_gossip];
Self { thread_hdls }
}
@@ -69,6 +83,7 @@ pub fn discover_cluster(
num_nodes: usize,
) -> std::io::Result<Vec<ContactInfo>> {
discover(
None,
Some(entrypoint),
Some(num_nodes),
Some(30),
@@ -81,6 +96,7 @@ pub fn discover_cluster(
}
pub fn discover(
keypair: Option<Arc<Keypair>>,
entrypoint: Option<&SocketAddr>,
num_nodes: Option<usize>, // num_nodes only counts validators, excludes spy nodes
timeout: Option<u64>,
@@ -89,9 +105,11 @@ pub fn discover(
my_gossip_addr: Option<&SocketAddr>,
my_shred_version: u16,
) -> std::io::Result<(Vec<ContactInfo>, Vec<ContactInfo>)> {
let keypair = keypair.unwrap_or_else(|| Arc::new(Keypair::new()));
let exit = Arc::new(AtomicBool::new(false));
let (gossip_service, ip_echo, spy_ref) =
make_gossip_node(entrypoint, &exit, my_gossip_addr, my_shred_version);
make_gossip_node(keypair, entrypoint, &exit, my_gossip_addr, my_shred_version);
let id = spy_ref.id();
info!("Entrypoint: {:?}", entrypoint);
@@ -245,12 +263,12 @@ fn spy(
/// Makes a spy or gossip node based on whether or not a gossip_addr was passed in
/// Pass in a gossip addr to fully participate in gossip instead of relying on just pulls
fn make_gossip_node(
keypair: Arc<Keypair>,
entrypoint: Option<&SocketAddr>,
exit: &Arc<AtomicBool>,
gossip_addr: Option<&SocketAddr>,
shred_version: u16,
) -> (GossipService, Option<TcpListener>, Arc<ClusterInfo>) {
let keypair = Arc::new(Keypair::new());
let (node, gossip_socket, ip_echo) = if let Some(gossip_addr) = gossip_addr {
ClusterInfo::gossip_node(&keypair.pubkey(), gossip_addr, shred_version)
} else {
@@ -261,7 +279,7 @@ fn make_gossip_node(
cluster_info.set_entrypoint(ContactInfo::new_gossip_entry_point(entrypoint));
}
let cluster_info = Arc::new(cluster_info);
let gossip_service = GossipService::new(&cluster_info, None, gossip_socket, &exit);
let gossip_service = GossipService::new(&cluster_info, None, gossip_socket, None, &exit);
(gossip_service, ip_echo, cluster_info)
}
@@ -280,7 +298,7 @@ mod tests {
let tn = Node::new_localhost();
let cluster_info = ClusterInfo::new_with_invalid_keypair(tn.info.clone());
let c = Arc::new(cluster_info);
let d = GossipService::new(&c, None, tn.sockets.gossip, &exit);
let d = GossipService::new(&c, None, tn.sockets.gossip, None, &exit);
exit.store(true, Ordering::Relaxed);
d.join().unwrap();
}

View File

@@ -372,15 +372,8 @@ impl HeaviestSubtreeForkChoice {
stake_voted_subtree = fork_info.stake_voted_at;
let mut best_child_stake_voted_subtree = 0;
let mut best_child_slot = slot;
let should_print = fork_info.children.len() > 1;
for &child in &fork_info.children {
let child_stake_voted_subtree = self.stake_voted_subtree(child).unwrap();
if should_print {
info!(
"child: {} of slot: {} has weight: {}",
child, slot, child_stake_voted_subtree
);
}
stake_voted_subtree += child_stake_voted_subtree;
if best_child_slot == slot ||
child_stake_voted_subtree > best_child_stake_voted_subtree ||

View File

@@ -9,9 +9,11 @@
pub mod accounts_background_service;
pub mod accounts_hash_verifier;
pub mod banking_stage;
pub mod bigtable_upload_service;
pub mod broadcast_stage;
pub mod cluster_info_vote_listener;
pub mod commitment_service;
pub mod completed_data_sets_service;
mod deprecated;
pub mod shred_fetch_stage;
#[macro_use]
@@ -19,6 +21,7 @@ pub mod contact_info;
pub mod bank_weight_fork_choice;
pub mod cluster_info;
pub mod cluster_slots;
pub mod cluster_slots_service;
pub mod consensus;
pub mod crds;
pub mod crds_gossip;
@@ -55,6 +58,7 @@ pub mod rpc_pubsub;
pub mod rpc_pubsub_service;
pub mod rpc_service;
pub mod rpc_subscriptions;
pub mod send_transaction_service;
pub mod serve_repair;
pub mod serve_repair_service;
pub mod sigverify;
@@ -71,9 +75,6 @@ pub mod vote_stake_tracker;
pub mod weighted_shuffle;
pub mod window_service;
#[macro_use]
extern crate solana_bpf_loader_program;
#[macro_use]
extern crate solana_budget_program;

View File

@@ -18,7 +18,7 @@ pub fn calculate_non_circulating_supply(bank: &Arc<Bank>) -> NonCirculatingSuppl
let withdraw_authority_list = withdraw_authority();
let clock = bank.clock();
let stake_accounts = bank.get_program_accounts(Some(&solana_stake_program::id()));
let stake_accounts = bank.get_program_accounts(&solana_stake_program::id());
for (pubkey, account) in stake_accounts.iter() {
let stake_account = StakeState::from(&account).unwrap_or_default();
match stake_account {
@@ -78,6 +78,7 @@ solana_sdk::pubkeys!(
"3o6xgkJ9sTmDeQWyfj3sxwon18fXJB9PV5LDc8sfgR4a",
"GumSE5HsMV5HCwBTv2D2D81yy9x17aDkvobkqAfTRgmo",
"AzVV9ZZDxTgW4wWfJmsG6ytaHpQGSe1yz76Nyy84VbQF",
"8CUUMKYNGxdgYio5CLHRHyzMEhhVRMcqefgE6dLqnVRK",
]
);
@@ -96,7 +97,9 @@ solana_sdk::pubkeys!(
mod tests {
use super::*;
use solana_sdk::{
account::Account, epoch_schedule::EpochSchedule, genesis_config::GenesisConfig,
account::Account,
epoch_schedule::EpochSchedule,
genesis_config::{ClusterType, GenesisConfig},
};
use solana_stake_program::stake_state::{Authorized, Lockup, Meta, StakeState};
use std::{collections::BTreeMap, sync::Arc};
@@ -147,6 +150,7 @@ mod tests {
let genesis_config = GenesisConfig {
accounts,
epoch_schedule: EpochSchedule::new(slots_per_epoch),
cluster_type: ClusterType::MainnetBeta,
..GenesisConfig::default()
};
let mut bank = Arc::new(Bank::new(&genesis_config));

View File

@@ -137,18 +137,13 @@ impl PohRecorder {
self.ticks_per_slot
}
fn received_any_previous_leader_data(&self, slot: Slot) -> bool {
(slot.saturating_sub(NUM_CONSECUTIVE_LEADER_SLOTS)..slot).any(|i| {
// Check if we have received any data in previous leader's slots
if let Ok(slot_meta) = self.blockstore.meta(i as Slot) {
if let Some(slot_meta) = slot_meta {
slot_meta.received > 0
} else {
false
}
} else {
false
}
fn is_same_fork_as_previous_leader(&self, slot: Slot) -> bool {
(slot.saturating_sub(NUM_CONSECUTIVE_LEADER_SLOTS)..slot).any(|slot| {
// Check if the last slot Poh reset to was any of the
// previous leader's slots.
// If so, PoH is currently building on the previous leader's blocks
// If not, PoH is building on a different fork
slot == self.start_slot
})
}
@@ -167,13 +162,13 @@ impl PohRecorder {
let target_tick_height = leader_first_tick_height.saturating_sub(1);
let ideal_target_tick_height = target_tick_height.saturating_sub(self.grace_ticks);
let current_slot = self.tick_height / self.ticks_per_slot;
// we've approached target_tick_height OR poh was reset to run immediately
// We've approached target_tick_height OR poh was reset to run immediately
// Or, previous leader didn't transmit in any of its leader slots, so ignore grace ticks
self.tick_height >= target_tick_height
|| self.start_tick_height + self.grace_ticks == leader_first_tick_height
|| (self.tick_height >= ideal_target_tick_height
&& (self.prev_slot_was_mine(current_slot)
|| !self.received_any_previous_leader_data(current_slot)))
|| !self.is_same_fork_as_previous_leader(current_slot)))
}
/// returns if leader slot has been reached, how many grace ticks were afforded,
@@ -1159,28 +1154,29 @@ mod tests {
}
poh_recorder.grace_ticks = grace_ticks;
// True, as previous leader did not transmit in its slots
assert_eq!(
poh_recorder.reached_leader_tick(new_tick_height + grace_ticks),
true
);
let mut parent_meta = SlotMeta::default();
parent_meta.received = 1;
poh_recorder
.blockstore
.put_meta_bytes(0, &serialize(&parent_meta).unwrap())
.unwrap();
// False, because the Poh was reset on slot 0, which
// is a block produced by the previous leader, so a grace
// period must be given
assert!(!poh_recorder.reached_leader_tick(new_tick_height + grace_ticks));
// False, as previous leader transmitted in one of its recent slots
// and grace ticks have not expired
assert_eq!(
poh_recorder.reached_leader_tick(new_tick_height + grace_ticks),
false
);
// Tick `NUM_CONSECUTIVE_LEADER_SLOTS` more times
let new_tick_height = 2 * NUM_CONSECUTIVE_LEADER_SLOTS * bank.ticks_per_slot();
for _ in 0..new_tick_height {
poh_recorder.tick();
}
// True, because
// 1) the Poh was reset on slot 0
// 2) Our slot starts at 2 * NUM_CONSECUTIVE_LEADER_SLOTS, which means
// none of the previous leader's `NUM_CONSECUTIVE_LEADER_SLOTS` were slots
// this Poh built on (previous leader was on different fork). Thus, skip the
// grace period.
assert!(poh_recorder.reached_leader_tick(new_tick_height + grace_ticks));
// From the bootstrap validator's perspective, it should have reached
// the tick
// the tick because the previous slot was also it's own slot (all slots
// belong to the bootstrap leader b/c it's the only staked node!), and
// validators don't give grace periods if previous slot was also their own.
poh_recorder.id = bootstrap_validator_id;
assert!(poh_recorder.reached_leader_tick(new_tick_height + grace_ticks));
}

View File

@@ -168,7 +168,7 @@ impl ForkProgress {
num_dropped_blocks_on_fork: u64,
) -> Self {
let validator_fork_info = {
if bank.collector_id() == my_pubkey && bank.slot() > 0 {
if bank.collector_id() == my_pubkey {
let stake = bank.epoch_vote_account_stake(voting_pubkey);
Some(ValidatorStakeInfo::new(
*voting_pubkey,

View File

@@ -11,14 +11,14 @@ use crate::{
};
use crossbeam_channel::{Receiver as CrossbeamReceiver, Sender as CrossbeamSender};
use solana_ledger::{
blockstore::{Blockstore, CompletedSlotsReceiver, SlotMeta},
blockstore::{Blockstore, SlotMeta},
shred::Nonce,
};
use solana_measure::measure::Measure;
use solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::VOTE_THRESHOLD_SIZE};
use solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey, timing::timestamp};
use std::{
collections::HashMap,
collections::{HashMap, HashSet},
iter::Iterator,
net::SocketAddr,
net::UdpSocket,
@@ -78,8 +78,6 @@ pub struct RepairTiming {
pub set_root_elapsed: u64,
pub get_votes_elapsed: u64,
pub add_votes_elapsed: u64,
pub lowest_slot_elapsed: u64,
pub update_completed_slots_elapsed: u64,
pub get_best_orphans_elapsed: u64,
pub get_best_shreds_elapsed: u64,
pub send_repairs_elapsed: u64,
@@ -91,15 +89,11 @@ impl RepairTiming {
set_root_elapsed: u64,
get_votes_elapsed: u64,
add_votes_elapsed: u64,
lowest_slot_elapsed: u64,
update_completed_slots_elapsed: u64,
send_repairs_elapsed: u64,
) {
self.set_root_elapsed += set_root_elapsed;
self.get_votes_elapsed += get_votes_elapsed;
self.add_votes_elapsed += add_votes_elapsed;
self.lowest_slot_elapsed += lowest_slot_elapsed;
self.update_completed_slots_elapsed += update_completed_slots_elapsed;
self.send_repairs_elapsed += send_repairs_elapsed;
}
}
@@ -112,9 +106,9 @@ pub const MAX_ORPHANS: usize = 5;
pub struct RepairInfo {
pub bank_forks: Arc<RwLock<BankForks>>,
pub completed_slots_receiver: CompletedSlotsReceiver,
pub epoch_schedule: EpochSchedule,
pub duplicate_slots_reset_sender: DuplicateSlotsResetSender,
pub repair_validators: Option<HashSet<Pubkey>>,
}
pub struct RepairSlotRange {
@@ -181,18 +175,12 @@ impl RepairService {
let mut repair_weight = RepairWeight::new(repair_info.bank_forks.read().unwrap().root());
let serve_repair = ServeRepair::new(cluster_info.clone());
let id = cluster_info.id();
Self::initialize_lowest_slot(id, blockstore, &cluster_info);
let mut repair_stats = RepairStats::default();
let mut repair_timing = RepairTiming::default();
let mut last_stats = Instant::now();
let duplicate_slot_repair_statuses: HashMap<Slot, DuplicateSlotRepairStatus> =
HashMap::new();
Self::initialize_epoch_slots(
blockstore,
&cluster_info,
&repair_info.completed_slots_receiver,
);
loop {
if exit.load(Ordering::Relaxed) {
break;
@@ -201,8 +189,6 @@ impl RepairService {
let mut set_root_elapsed;
let mut get_votes_elapsed;
let mut add_votes_elapsed;
let mut lowest_slot_elapsed;
let mut update_completed_slots_elapsed;
let repairs = {
let root_bank = repair_info.bank_forks.read().unwrap().root_bank().clone();
let new_root = root_bank.slot();
@@ -235,15 +221,6 @@ impl RepairService {
root_bank.epoch_schedule(),
);
add_votes_elapsed.stop();
lowest_slot_elapsed = Measure::start("lowest_slot_elapsed");
let lowest_slot = blockstore.lowest_slot();
Self::update_lowest_slot(&id, lowest_slot, &cluster_info);
lowest_slot_elapsed.stop();
update_completed_slots_elapsed = Measure::start("update_completed_slots_elapsed");
Self::update_completed_slots(&repair_info.completed_slots_receiver, &cluster_info);
cluster_slots.update(new_root, &cluster_info, &repair_info.bank_forks);
update_completed_slots_elapsed.stop();
/*let new_duplicate_slots = Self::find_new_duplicate_slots(
&duplicate_slot_repair_statuses,
blockstore,
@@ -258,6 +235,7 @@ impl RepairService {
blockstore,
&serve_repair,
&repair_info.duplicate_slots_reset_sender,
&repair_info.repair_validators,
);
Self::generate_and_send_duplicate_repairs(
&mut duplicate_slot_repair_statuses,
@@ -266,6 +244,7 @@ impl RepairService {
&serve_repair,
&mut repair_stats,
&repair_socket,
&repair_info.repair_validators,
);*/
repair_weight.get_best_weighted_repairs(
@@ -287,6 +266,7 @@ impl RepairService {
repair_request,
&mut cache,
&mut repair_stats,
&repair_info.repair_validators,
) {
repair_socket.send_to(&req, to).unwrap_or_else(|e| {
info!("{} repair req send_to({}) error {:?}", id, to, e);
@@ -299,8 +279,6 @@ impl RepairService {
set_root_elapsed.as_us(),
get_votes_elapsed.as_us(),
add_votes_elapsed.as_us(),
lowest_slot_elapsed.as_us(),
update_completed_slots_elapsed.as_us(),
send_repairs_elapsed.as_us(),
);
@@ -335,16 +313,6 @@ impl RepairService {
repair_timing.get_best_shreds_elapsed,
i64
),
(
"lowest-slot-elapsed",
repair_timing.lowest_slot_elapsed,
i64
),
(
"update-completed-slots-elapsed",
repair_timing.update_completed_slots_elapsed,
i64
),
(
"send-repairs-elapsed",
repair_timing.send_repairs_elapsed,
@@ -480,9 +448,16 @@ impl RepairService {
serve_repair: &ServeRepair,
repair_stats: &mut RepairStats,
repair_socket: &UdpSocket,
repair_validators: &Option<HashSet<Pubkey>>,
) {
duplicate_slot_repair_statuses.retain(|slot, status| {
Self::update_duplicate_slot_repair_addr(*slot, status, cluster_slots, serve_repair);
Self::update_duplicate_slot_repair_addr(
*slot,
status,
cluster_slots,
serve_repair,
repair_validators,
);
if let Some((repair_pubkey, repair_addr)) = status.repair_pubkey_and_addr {
let repairs = Self::generate_duplicate_repairs_for_slot(&blockstore, *slot);
@@ -535,13 +510,17 @@ impl RepairService {
status: &mut DuplicateSlotRepairStatus,
cluster_slots: &ClusterSlots,
serve_repair: &ServeRepair,
repair_validators: &Option<HashSet<Pubkey>>,
) {
let now = timestamp();
if status.repair_pubkey_and_addr.is_none()
|| now.saturating_sub(status.start) >= MAX_DUPLICATE_WAIT_MS as u64
{
let repair_pubkey_and_addr =
serve_repair.repair_request_duplicate_compute_best_peer(slot, cluster_slots);
let repair_pubkey_and_addr = serve_repair.repair_request_duplicate_compute_best_peer(
slot,
cluster_slots,
repair_validators,
);
status.repair_pubkey_and_addr = repair_pubkey_and_addr.ok();
status.start = timestamp();
}
@@ -556,6 +535,7 @@ impl RepairService {
blockstore: &Blockstore,
serve_repair: &ServeRepair,
duplicate_slots_reset_sender: &DuplicateSlotsResetSender,
repair_validators: &Option<HashSet<Pubkey>>,
) {
for slot in new_duplicate_slots {
warn!(
@@ -581,7 +561,7 @@ impl RepairService {
// Mark this slot as special repair, try to download from single
// validator to avoid corruption
let repair_pubkey_and_addr = serve_repair
.repair_request_duplicate_compute_best_peer(*slot, cluster_slots)
.repair_request_duplicate_compute_best_peer(*slot, cluster_slots, repair_validators)
.ok();
let new_duplicate_slot_repair_status = DuplicateSlotRepairStatus {
start: timestamp(),
@@ -650,59 +630,6 @@ impl RepairService {
.collect()
}
fn initialize_lowest_slot(id: Pubkey, blockstore: &Blockstore, cluster_info: &ClusterInfo) {
// Safe to set into gossip because by this time, the leader schedule cache should
// also be updated with the latest root (done in blockstore_processor) and thus
// will provide a schedule to window_service for any incoming shreds up to the
// last_confirmed_epoch.
cluster_info.push_lowest_slot(id, blockstore.lowest_slot());
}
fn update_completed_slots(
completed_slots_receiver: &CompletedSlotsReceiver,
cluster_info: &ClusterInfo,
) {
let mut slots: Vec<Slot> = vec![];
while let Ok(mut more) = completed_slots_receiver.try_recv() {
slots.append(&mut more);
}
slots.sort();
if !slots.is_empty() {
cluster_info.push_epoch_slots(&slots);
}
}
fn update_lowest_slot(id: &Pubkey, lowest_slot: Slot, cluster_info: &ClusterInfo) {
cluster_info.push_lowest_slot(*id, lowest_slot);
}
fn initialize_epoch_slots(
blockstore: &Blockstore,
cluster_info: &ClusterInfo,
completed_slots_receiver: &CompletedSlotsReceiver,
) {
let root = blockstore.last_root();
let mut slots: Vec<_> = blockstore
.live_slots_iterator(root)
.filter_map(|(slot, slot_meta)| {
if slot_meta.is_full() {
Some(slot)
} else {
None
}
})
.collect();
while let Ok(mut more) = completed_slots_receiver.try_recv() {
slots.append(&mut more);
}
slots.sort();
slots.dedup();
if !slots.is_empty() {
cluster_info.push_epoch_slots(&slots);
}
}
pub fn join(self) -> thread::Result<()> {
self.t_repair.join()
}
@@ -980,19 +907,6 @@ mod test {
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_update_lowest_slot() {
let node_info = Node::new_localhost_with_pubkey(&Pubkey::default());
let cluster_info = ClusterInfo::new_with_invalid_keypair(node_info.info);
RepairService::update_lowest_slot(&Pubkey::default(), 5, &cluster_info);
let lowest = cluster_info
.get_lowest_slot_for_node(&Pubkey::default(), None, |lowest_slot, _| {
lowest_slot.clone()
})
.unwrap();
assert_eq!(lowest.lowest, 5);
}
#[test]
pub fn test_generate_duplicate_repairs_for_slot() {
let blockstore_path = get_tmp_ledger_path!();
@@ -1055,6 +969,7 @@ mod test {
&serve_repair,
&mut RepairStats::default(),
&UdpSocket::bind("0.0.0.0:0").unwrap(),
&None,
);
assert!(duplicate_slot_repair_statuses
.get(&dead_slot)
@@ -1078,6 +993,7 @@ mod test {
&serve_repair,
&mut RepairStats::default(),
&UdpSocket::bind("0.0.0.0:0").unwrap(),
&None,
);
assert_eq!(duplicate_slot_repair_statuses.len(), 1);
assert!(duplicate_slot_repair_statuses.get(&dead_slot).is_some());
@@ -1094,6 +1010,7 @@ mod test {
&serve_repair,
&mut RepairStats::default(),
&UdpSocket::bind("0.0.0.0:0").unwrap(),
&None,
);
assert!(duplicate_slot_repair_statuses.is_empty());
}
@@ -1128,6 +1045,7 @@ mod test {
&mut duplicate_status,
&cluster_slots,
&serve_repair,
&None,
);
assert_eq!(duplicate_status.repair_pubkey_and_addr, dummy_addr);
@@ -1141,6 +1059,7 @@ mod test {
&mut duplicate_status,
&cluster_slots,
&serve_repair,
&None,
);
assert!(duplicate_status.repair_pubkey_and_addr.is_some());
@@ -1154,6 +1073,7 @@ mod test {
&mut duplicate_status,
&cluster_slots,
&serve_repair,
&None,
);
assert_ne!(duplicate_status.repair_pubkey_and_addr, dummy_addr);
}
@@ -1210,6 +1130,7 @@ mod test {
&blockstore,
&serve_repair,
&reset_sender,
&None,
);
// Blockstore should have been cleared

View File

@@ -170,11 +170,20 @@ impl RepairWeight {
if new_root == self.root {
return;
}
// Root slot of the tree that contains `new_root`, if one exists
let new_root_tree_root = self.slot_to_tree.get(&new_root).cloned();
// Purge outdated trees from `self.trees`
let subtrees_to_purge: Vec<_> = self
.trees
.keys()
.filter(|subtree_root| **subtree_root < new_root && **subtree_root != self.root)
.filter(|subtree_root| {
**subtree_root < new_root
&& new_root_tree_root
.map(|new_root_tree_root| **subtree_root != new_root_tree_root)
.unwrap_or(true)
})
.cloned()
.collect();
for subtree_root in subtrees_to_purge {
@@ -188,25 +197,26 @@ impl RepairWeight {
);
}
let mut root_tree = self.trees.remove(&self.root).expect("root tree must exist");
if let Some(new_root_tree_root) = new_root_tree_root {
let mut new_root_tree = self
.trees
.remove(&new_root_tree_root)
.expect("Found slot root earlier in self.slot_to_trees, treee must exist");
// Find all descendants of `self.root` that are not reachable from `new_root`.
// These are exactly the unrooted slots, which can be purged and added to
// `self.unrooted_slots`.
let unrooted_slots = new_root_tree.subtree_diff(new_root_tree_root, new_root);
self.remove_tree_slots(unrooted_slots.iter(), new_root);
// Find all descendants of `self.root` that are not reachable from `new_root`.
// These are exactly the unrooted slots, which can be purged and added to
// `self.unrooted_slots`.
let unrooted_slots = root_tree.subtree_diff(self.root, new_root);
self.remove_tree_slots(unrooted_slots.iter(), new_root);
new_root_tree.set_root(new_root);
if !root_tree.contains_slot(new_root) {
// If the current `root_tree` does not contain the new root, we can
// just make a new tree for the new root
self.insert_new_tree(new_root);
} else {
root_tree.set_root(new_root);
// Update `self.slot_to_tree` to reflect new root
self.rename_tree_root(&root_tree, new_root);
self.rename_tree_root(&new_root_tree, new_root);
// Insert the tree for the new root
self.trees.insert(new_root, root_tree);
self.trees.insert(new_root, new_root_tree);
} else {
self.insert_new_tree(new_root);
}
// Purge `self.unrooted_slots` of slots less than `new_root` as we know any
@@ -501,6 +511,7 @@ mod test {
use super::*;
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use solana_runtime::{bank::Bank, bank_utils};
use solana_sdk::hash::Hash;
use trees::tr;
#[test]
@@ -648,7 +659,7 @@ mod test {
assert_eq!(repair_weight.trees.get(&8).unwrap().ancestors(10), vec![8]);
// Connect orphan back to main fork
blockstore.add_tree(tr(6) / (tr(8)), true, true);
blockstore.add_tree(tr(6) / (tr(8)), true, true, 2, Hash::default());
assert_eq!(
AncestorIterator::new(8, &blockstore).collect::<Vec<_>>(),
vec![6, 5, 3, 1, 0]
@@ -732,8 +743,8 @@ mod test {
assert!(repair_weight.trees.contains_key(&20));
// Resolve orphans in blockstore
blockstore.add_tree(tr(6) / (tr(8)), true, true);
blockstore.add_tree(tr(11) / (tr(20)), true, true);
blockstore.add_tree(tr(6) / (tr(8)), true, true, 2, Hash::default());
blockstore.add_tree(tr(11) / (tr(20)), true, true, 2, Hash::default());
// Call `update_orphan_ancestors` to resolve orphan
repair_weight.update_orphan_ancestors(
&blockstore,
@@ -843,8 +854,8 @@ mod test {
// Resolve the orphans, should now return no
// orphans
repairs = vec![];
blockstore.add_tree(tr(6) / (tr(8)), true, true);
blockstore.add_tree(tr(11) / (tr(20)), true, true);
blockstore.add_tree(tr(6) / (tr(8)), true, true, 2, Hash::default());
blockstore.add_tree(tr(11) / (tr(20)), true, true, 2, Hash::default());
repair_weight.get_best_orphans(
&blockstore,
&mut repairs,
@@ -879,7 +890,7 @@ mod test {
// orphan in the `trees` map, we should search for
// exactly one more of the remaining two
let mut repairs = vec![];
blockstore.add_tree(tr(100) / (tr(101)), true, true);
blockstore.add_tree(tr(100) / (tr(101)), true, true, 2, Hash::default());
repair_weight.get_best_orphans(
&blockstore,
&mut repairs,
@@ -954,13 +965,34 @@ mod test {
}
}
#[test]
fn test_set_root_existing_non_root_tree() {
let (_, _, mut repair_weight) = setup_orphan_repair_weight();
// Set root in an existing orphan branch, slot 10
repair_weight.set_root(10);
check_old_root_purged_verify_new_root(0, 10, &repair_weight);
// Should purge old root tree [0, 6]
for slot in 0..6 {
assert!(!repair_weight.slot_to_tree.contains_key(&slot));
}
// Should purge orphan parent as well
assert!(!repair_weight.slot_to_tree.contains_key(&8));
// Other higher orphan branch rooted at slot `20` remains unchanged
assert_eq!(repair_weight.trees.get(&20).unwrap().root(), 20);
assert_eq!(*repair_weight.slot_to_tree.get(&20).unwrap(), 20);
}
#[test]
fn test_set_root_check_unrooted_slots() {
let (blockstore, bank, mut repair_weight) = setup_orphan_repair_weight();
// Chain orphan 8 back to the main fork, but don't
// touch orphan 20
blockstore.add_tree(tr(4) / (tr(8)), true, true);
blockstore.add_tree(tr(4) / (tr(8)), true, true, 2, Hash::default());
// Call `update_orphan_ancestors` to resolve orphan
repair_weight.update_orphan_ancestors(
@@ -1030,10 +1062,10 @@ mod test {
}
// Chain orphan 8 back to slot `old_parent`
blockstore.add_tree(tr(*old_parent) / (tr(8)), true, true);
blockstore.add_tree(tr(*old_parent) / (tr(8)), true, true, 2, Hash::default());
// Chain orphan 20 back to orphan 8
blockstore.add_tree(tr(8) / (tr(20)), true, true);
blockstore.add_tree(tr(8) / (tr(20)), true, true, 2, Hash::default());
// Call `update_orphan_ancestors` to resolve orphan
repair_weight.update_orphan_ancestors(
@@ -1058,7 +1090,13 @@ mod test {
// Add a vote that chains back to `old_parent`, should be purged
let new_vote_slot = 100;
blockstore.add_tree(tr(*old_parent) / tr(new_vote_slot), true, true);
blockstore.add_tree(
tr(*old_parent) / tr(new_vote_slot),
true,
true,
2,
Hash::default(),
);
repair_weight.add_votes(
&blockstore,
vec![(new_vote_slot, vec![Pubkey::default()])].into_iter(),
@@ -1106,7 +1144,7 @@ mod test {
);
// Ancestors of slot 31 are [30], with no existing subtree
blockstore.add_tree(tr(30) / (tr(31)), true, true);
blockstore.add_tree(tr(30) / (tr(31)), true, true, 2, Hash::default());
assert_eq!(
repair_weight.find_ancestor_subtree_of_slot(&blockstore, 31),
(vec![30].into_iter().collect::<VecDeque<_>>(), None)
@@ -1124,7 +1162,7 @@ mod test {
// Chain orphan 8 back to slot 4 on a different fork, ancestor search
// should not return ancestors earlier than the root
blockstore.add_tree(tr(4) / (tr(8)), true, true);
blockstore.add_tree(tr(4) / (tr(8)), true, true, 2, Hash::default());
assert_eq!(
repair_weight.find_ancestor_subtree_of_slot(&blockstore, 8),
(vec![4].into_iter().collect::<VecDeque<_>>(), None)
@@ -1211,8 +1249,8 @@ mod test {
*/
let blockstore = setup_forks();
blockstore.add_tree(tr(8) / (tr(10) / (tr(11))), true, true);
blockstore.add_tree(tr(20) / (tr(22) / (tr(23))), true, true);
blockstore.add_tree(tr(8) / (tr(10) / (tr(11))), true, true, 2, Hash::default());
blockstore.add_tree(tr(20) / (tr(22) / (tr(23))), true, true, 2, Hash::default());
assert!(blockstore.orphan(8).unwrap().is_some());
blockstore
}
@@ -1234,7 +1272,7 @@ mod test {
let forks = tr(0) / (tr(1) / (tr(2) / (tr(4))) / (tr(3) / (tr(5) / (tr(6)))));
let ledger_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&ledger_path).unwrap();
blockstore.add_tree(forks, false, true);
blockstore.add_tree(forks, false, true, 2, Hash::default());
blockstore
}
}

View File

@@ -150,6 +150,7 @@ pub mod test {
use super::*;
use solana_ledger::{get_tmp_ledger_path, shred::Shred};
use solana_runtime::bank_utils;
use solana_sdk::hash::Hash;
use trees::tr;
#[test]
@@ -246,7 +247,13 @@ pub mod test {
repairs = vec![];
let best_overall_slot = heaviest_subtree_fork_choice.best_overall_slot();
assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), 4);
blockstore.add_tree(tr(best_overall_slot) / (tr(6) / tr(7)), true, false);
blockstore.add_tree(
tr(best_overall_slot) / (tr(6) / tr(7)),
true,
false,
2,
Hash::default(),
);
get_best_repair_shreds(
&heaviest_subtree_fork_choice,
&blockstore,
@@ -300,7 +307,7 @@ pub mod test {
// Adding incomplete children with higher weighted parents, even if
// the parents are complete should still be repaired
repairs = vec![];
blockstore.add_tree(tr(2) / (tr(8)), true, false);
blockstore.add_tree(tr(2) / (tr(8)), true, false, 2, Hash::default());
get_best_repair_shreds(
&heaviest_subtree_fork_choice,
&blockstore,
@@ -322,7 +329,7 @@ pub mod test {
let (blockstore, heaviest_subtree_fork_choice) = setup_forks();
// Add a branch to slot 2, make sure it doesn't repair child
// 4 again when the Unvisited(2) event happens
blockstore.add_tree(tr(2) / (tr(6) / tr(7)), true, false);
blockstore.add_tree(tr(2) / (tr(6) / tr(7)), true, false, 2, Hash::default());
let mut repairs = vec![];
get_best_repair_shreds(
&heaviest_subtree_fork_choice,
@@ -368,7 +375,7 @@ pub mod test {
// Adding slot 2 to ignore should not remove its unexplored children from
// the repair set
repairs = vec![];
blockstore.add_tree(tr(2) / (tr(6) / tr(7)), true, false);
blockstore.add_tree(tr(2) / (tr(6) / tr(7)), true, false, 2, Hash::default());
ignore_set.insert(2);
get_best_repair_shreds(
&heaviest_subtree_fork_choice,
@@ -420,7 +427,7 @@ pub mod test {
let forks = tr(0) / (tr(1) / (tr(2) / (tr(4))) / (tr(3) / (tr(5))));
let ledger_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&ledger_path).unwrap();
blockstore.add_tree(forks.clone(), false, false);
blockstore.add_tree(forks.clone(), false, false, 2, Hash::default());
(blockstore, HeaviestSubtreeForkChoice::new_from_tree(forks))
}

View File

@@ -21,9 +21,7 @@ use crate::{
use solana_ledger::{
block_error::BlockError,
blockstore::Blockstore,
blockstore_processor::{
self, BlockstoreProcessorError, ReplayVotesSender, TransactionStatusSender,
},
blockstore_processor::{self, BlockstoreProcessorError, TransactionStatusSender},
entry::VerifyRecyclers,
leader_schedule_cache::LeaderScheduleCache,
};
@@ -31,11 +29,11 @@ use solana_measure::{measure::Measure, thread_mem_usage};
use solana_metrics::inc_new_counter_info;
use solana_runtime::{
bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache,
snapshot_package::AccountsPackageSender,
snapshot_package::AccountsPackageSender, vote_sender_types::ReplayVoteSender,
};
use solana_sdk::{
clock::{Slot, NUM_CONSECUTIVE_LEADER_SLOTS},
genesis_config::OperatingMode,
genesis_config::ClusterType,
hash::Hash,
pubkey::Pubkey,
signature::{Keypair, Signer},
@@ -223,7 +221,7 @@ impl ReplayStage {
cluster_slots: Arc<ClusterSlots>,
retransmit_slots_sender: RetransmitSlotsSender,
duplicate_slots_reset_receiver: DuplicateSlotsResetReceiver,
replay_votes_sender: ReplayVotesSender,
replay_vote_sender: ReplayVoteSender,
) -> Self {
let ReplayStageConfig {
my_pubkey,
@@ -283,7 +281,7 @@ impl ReplayStage {
let root_bank = bank_forks.read().unwrap().root_bank().clone();
let root = root_bank.slot();
let unlock_heaviest_subtree_fork_choice_slot =
Self::get_unlock_heaviest_subtree_fork_choice(root_bank.operating_mode());
Self::get_unlock_heaviest_subtree_fork_choice(root_bank.cluster_type());
let mut heaviest_subtree_fork_choice =
HeaviestSubtreeForkChoice::new_from_frozen_banks(root, &frozen_banks);
let mut bank_weight_fork_choice = BankWeightForkChoice::default();
@@ -344,7 +342,7 @@ impl ReplayStage {
&verify_recyclers,
&mut heaviest_subtree_fork_choice,
&subscriptions,
&replay_votes_sender,
&replay_vote_sender,
);
replay_active_banks_time.stop();
Self::report_memory(&allocated, "replay_active_banks", start);
@@ -940,7 +938,7 @@ impl ReplayStage {
blockstore: &Blockstore,
bank_progress: &mut ForkProgress,
transaction_status_sender: Option<TransactionStatusSender>,
replay_votes_sender: &ReplayVotesSender,
replay_vote_sender: &ReplayVoteSender,
verify_recyclers: &VerifyRecyclers,
) -> result::Result<usize, BlockstoreProcessorError> {
let tx_count_before = bank_progress.replay_progress.num_txs;
@@ -951,7 +949,7 @@ impl ReplayStage {
&mut bank_progress.replay_progress,
false,
transaction_status_sender,
Some(replay_votes_sender),
Some(replay_vote_sender),
None,
verify_recyclers,
);
@@ -1131,7 +1129,7 @@ impl ReplayStage {
let node_keypair = cluster_info.keypair.clone();
// Send our last few votes along with the new one
let vote_ix = if bank.slot() > Self::get_unlock_switch_vote_slot(bank.operating_mode()) {
let vote_ix = if bank.slot() > Self::get_unlock_switch_vote_slot(bank.cluster_type()) {
switch_fork_decision
.to_vote_instruction(
vote,
@@ -1214,7 +1212,7 @@ impl ReplayStage {
verify_recyclers: &VerifyRecyclers,
heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice,
subscriptions: &Arc<RpcSubscriptions>,
replay_votes_sender: &ReplayVotesSender,
replay_vote_sender: &ReplayVoteSender,
) -> bool {
let mut did_complete_bank = false;
let mut tx_count = 0;
@@ -1260,7 +1258,7 @@ impl ReplayStage {
&blockstore,
bank_progress,
transaction_status_sender.clone(),
replay_votes_sender,
replay_vote_sender,
verify_recyclers,
);
match replay_result {
@@ -1585,13 +1583,13 @@ impl ReplayStage {
loop {
// These cases mean confirmation of propagation on any earlier
// leader blocks must have been reached
if current_leader_slot == None || current_leader_slot.unwrap() <= root {
if current_leader_slot == None || current_leader_slot.unwrap() < root {
break;
}
let leader_propagated_stats = progress
.get_propagated_stats_mut(current_leader_slot.unwrap())
.expect("current_leader_slot > root, so must exist in the progress map");
.expect("current_leader_slot >= root, so must exist in the progress map");
// If a descendant has reached propagation threshold, then
// all its ancestor banks have also reached propagation
@@ -1857,23 +1855,25 @@ impl ReplayStage {
}
}
pub fn get_unlock_switch_vote_slot(operating_mode: OperatingMode) -> Slot {
match operating_mode {
OperatingMode::Development => 0,
// 400_000 slots into epoch 61
OperatingMode::Stable => 26_752_000,
pub fn get_unlock_switch_vote_slot(cluster_type: ClusterType) -> Slot {
match cluster_type {
ClusterType::Development => 0,
ClusterType::Devnet => 0,
// Epoch 63
OperatingMode::Preview => 21_692_256,
ClusterType::Testnet => 21_692_256,
// 400_000 slots into epoch 61
ClusterType::MainnetBeta => 26_752_000,
}
}
pub fn get_unlock_heaviest_subtree_fork_choice(operating_mode: OperatingMode) -> Slot {
match operating_mode {
OperatingMode::Development => 0,
// 400_000 slots into epoch 61
OperatingMode::Stable => 26_752_000,
pub fn get_unlock_heaviest_subtree_fork_choice(cluster_type: ClusterType) -> Slot {
match cluster_type {
ClusterType::Development => 0,
ClusterType::Devnet => 0,
// Epoch 63
OperatingMode::Preview => 21_692_256,
ClusterType::Testnet => 21_692_256,
// 400_000 slots into epoch 61
ClusterType::MainnetBeta => 26_752_000,
}
}
@@ -2417,7 +2417,7 @@ pub(crate) mod tests {
F: Fn(&Keypair, Arc<Bank>) -> Vec<Shred>,
{
let ledger_path = get_tmp_ledger_path!();
let (replay_votes_sender, _replay_votes_receiver) = unbounded();
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
let res = {
let blockstore = Arc::new(
Blockstore::open(&ledger_path)
@@ -2442,8 +2442,8 @@ pub(crate) mod tests {
&blockstore,
&mut bank0_progress,
None,
&replay_votes_sender,
&VerifyRecyclers::default(),
&replay_vote_sender,
&&VerifyRecyclers::default(),
);
// Check that the erroring bank was marked as dead in the progress map
@@ -2606,7 +2606,7 @@ pub(crate) mod tests {
blockstore.set_roots(&[slot]).unwrap();
let (transaction_status_sender, transaction_status_receiver) = unbounded();
let (replay_votes_sender, _replay_votes_receiver) = unbounded();
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
let transaction_status_service = TransactionStatusService::new(
transaction_status_receiver,
blockstore,
@@ -2620,7 +2620,7 @@ pub(crate) mod tests {
&entries,
true,
Some(transaction_status_sender),
Some(&replay_votes_sender),
Some(&replay_vote_sender),
);
transaction_status_service.join().unwrap();
@@ -3278,6 +3278,10 @@ pub(crate) mod tests {
let stake_per_validator = 10_000;
let (mut bank_forks, mut progress_map, _) =
initialize_state(&keypairs, stake_per_validator);
progress_map
.get_propagated_stats_mut(0)
.unwrap()
.is_leader_slot = true;
bank_forks.set_root(0, &None, None);
let total_epoch_stake = bank_forks.root_bank().total_epoch_stake();
@@ -3355,6 +3359,10 @@ pub(crate) mod tests {
let stake_per_validator = 10_000;
let (mut bank_forks, mut progress_map, _) =
initialize_state(&keypairs, stake_per_validator);
progress_map
.get_propagated_stats_mut(0)
.unwrap()
.is_leader_slot = true;
bank_forks.set_root(0, &None, None);
let total_epoch_stake = num_validators as u64 * stake_per_validator;
@@ -3687,6 +3695,70 @@ pub(crate) mod tests {
}
}
#[test]
fn test_leader_snapshot_restart_propagation() {
let ReplayBlockstoreComponents {
validator_voting_keys,
mut progress,
bank_forks,
leader_schedule_cache,
..
} = replay_blockstore_components();
let root_bank = bank_forks.read().unwrap().root_bank().clone();
let my_pubkey = leader_schedule_cache
.slot_leader_at(root_bank.slot(), Some(&root_bank))
.unwrap();
// Check that we are the leader of the root bank
assert!(
progress
.get_propagated_stats(root_bank.slot())
.unwrap()
.is_leader_slot
);
let ancestors = bank_forks.read().unwrap().ancestors();
// Freeze bank so it shows up in frozen banks
root_bank.freeze();
let mut frozen_banks: Vec<_> = bank_forks
.read()
.unwrap()
.frozen_banks()
.values()
.cloned()
.collect();
// Compute bank stats, make sure vote is propagated back to starting root bank
let vote_tracker = VoteTracker::default();
// Add votes
for vote_key in validator_voting_keys.values() {
vote_tracker.insert_vote(root_bank.slot(), Arc::new(*vote_key));
}
assert!(!progress.is_propagated(root_bank.slot()));
// Update propagation status
let tower = Tower::new_for_tests(0, 0.67);
ReplayStage::compute_bank_stats(
&my_pubkey,
&ancestors,
&mut frozen_banks,
&tower,
&mut progress,
&vote_tracker,
&ClusterSlots::default(),
&bank_forks,
&mut PubkeyReferences::default(),
&mut HeaviestSubtreeForkChoice::new_from_bank_forks(&bank_forks.read().unwrap()),
&mut BankWeightForkChoice::default(),
);
// Check status is true
assert!(progress.is_propagated(root_bank.slot()));
}
fn setup_forks() -> (RwLock<BankForks>, ProgressMap) {
/*
Build fork structure:

View File

@@ -20,6 +20,7 @@ pub enum Error {
ReadyTimeoutError,
RecvTimeoutError(std::sync::mpsc::RecvTimeoutError),
CrossbeamSendError,
TryCrossbeamSendError,
TryRecvError(std::sync::mpsc::TryRecvError),
Serialize(std::boxed::Box<bincode::ErrorKind>),
TransactionError(transaction::TransactionError),
@@ -87,6 +88,11 @@ impl<T> std::convert::From<crossbeam_channel::SendError<T>> for Error {
Error::CrossbeamSendError
}
}
impl<T> std::convert::From<crossbeam_channel::TrySendError<T>> for Error {
fn from(_e: crossbeam_channel::TrySendError<T>) -> Error {
Error::TryCrossbeamSendError
}
}
impl<T> std::convert::From<std::sync::mpsc::SendError<T>> for Error {
fn from(_e: std::sync::mpsc::SendError<T>) -> Error {
Error::SendError

View File

@@ -4,6 +4,8 @@ use crate::{
cluster_info::{compute_retransmit_peers, ClusterInfo, DATA_PLANE_FANOUT},
cluster_info_vote_listener::VerifiedVoteReceiver,
cluster_slots::ClusterSlots,
cluster_slots_service::ClusterSlotsService,
completed_data_sets_service::CompletedDataSetsSender,
contact_info::ContactInfo,
repair_service::DuplicateSlotsResetSender,
repair_service::RepairInfo,
@@ -27,6 +29,7 @@ use solana_sdk::timing::timestamp;
use solana_streamer::streamer::PacketReceiver;
use std::{
cmp,
collections::hash_set::HashSet,
collections::{BTreeMap, HashMap},
net::UdpSocket,
sync::atomic::{AtomicBool, AtomicU64, Ordering},
@@ -394,6 +397,7 @@ pub fn retransmitter(
pub struct RetransmitStage {
thread_hdls: Vec<JoinHandle<()>>,
window_service: WindowService,
cluster_slots_service: ClusterSlotsService,
}
impl RetransmitStage {
@@ -415,6 +419,8 @@ impl RetransmitStage {
cluster_slots: Arc<ClusterSlots>,
duplicate_slots_reset_sender: DuplicateSlotsResetSender,
verified_vote_receiver: VerifiedVoteReceiver,
repair_validators: Option<HashSet<Pubkey>>,
completed_data_sets_sender: CompletedDataSetsSender,
) -> Self {
let (retransmit_sender, retransmit_receiver) = channel();
@@ -427,13 +433,21 @@ impl RetransmitStage {
retransmit_receiver,
);
let leader_schedule_cache_clone = leader_schedule_cache.clone();
let cluster_slots_service = ClusterSlotsService::new(
blockstore.clone(),
cluster_slots.clone(),
bank_forks.clone(),
cluster_info.clone(),
completed_slots_receiver,
exit.clone(),
);
let repair_info = RepairInfo {
bank_forks,
completed_slots_receiver,
epoch_schedule,
duplicate_slots_reset_sender,
repair_validators,
};
let leader_schedule_cache_clone = leader_schedule_cache.clone();
let window_service = WindowService::new(
blockstore,
cluster_info.clone(),
@@ -460,12 +474,14 @@ impl RetransmitStage {
},
cluster_slots,
verified_vote_receiver,
completed_data_sets_sender,
);
let thread_hdls = t_retransmit;
Self {
thread_hdls,
window_service,
cluster_slots_service,
}
}
@@ -474,6 +490,7 @@ impl RetransmitStage {
thread_hdl.join()?;
}
self.window_service.join()?;
self.cluster_slots_service.join()?;
Ok(())
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -4,6 +4,7 @@ use solana_sdk::clock::Slot;
const JSON_RPC_SERVER_ERROR_1: i64 = -32001;
const JSON_RPC_SERVER_ERROR_2: i64 = -32002;
const JSON_RPC_SERVER_ERROR_3: i64 = -32003;
const JSON_RPC_SERVER_ERROR_4: i64 = -32004;
pub enum RpcCustomError {
BlockCleanedUp {
@@ -14,6 +15,9 @@ pub enum RpcCustomError {
message: String,
},
SendTransactionIsNotSigned,
BlockNotAvailable {
slot: Slot,
},
}
impl From<RpcCustomError> for Error {
@@ -40,6 +44,11 @@ impl From<RpcCustomError> for Error {
message: "Transaction is not signed".to_string(),
data: None,
},
RpcCustomError::BlockNotAvailable { slot } => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_4),
message: format!("Block not available for slot {}", slot,),
data: None,
},
}
}
}

View File

@@ -1,19 +1,17 @@
//! The `pubsub` module implements a threaded subscription service on client RPC request
use crate::rpc_subscriptions::{RpcSubscriptions, RpcVote, SlotInfo};
use crate::rpc_subscriptions::{RpcSubscriptions, RpcVote};
use jsonrpc_core::{Error, ErrorCode, Result};
use jsonrpc_derive::rpc;
use jsonrpc_pubsub::{typed::Subscriber, Session, SubscriptionId};
use solana_account_decoder::UiAccount;
use solana_client::{
rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig},
rpc_response::{Response as RpcResponse, RpcKeyedAccount, RpcSignatureResult},
rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig, RpcSignatureSubscribeConfig},
rpc_response::{Response as RpcResponse, RpcKeyedAccount, RpcSignatureResult, SlotInfo},
};
#[cfg(test)]
use solana_runtime::bank_forks::BankForks;
use solana_sdk::{
clock::Slot, commitment_config::CommitmentConfig, pubkey::Pubkey, signature::Signature,
};
use solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature};
#[cfg(test)]
use std::sync::RwLock;
use std::{
@@ -89,7 +87,7 @@ pub trait RpcSolPubSub {
meta: Self::Metadata,
subscriber: Subscriber<RpcResponse<RpcSignatureResult>>,
signature_str: String,
commitment: Option<CommitmentConfig>,
config: Option<RpcSignatureSubscribeConfig>,
);
// Unsubscribe from signature notification subscription.
@@ -248,7 +246,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
_meta: Self::Metadata,
subscriber: Subscriber<RpcResponse<RpcSignatureResult>>,
signature_str: String,
commitment: Option<CommitmentConfig>,
signature_subscribe_config: Option<RpcSignatureSubscribeConfig>,
) {
info!("signature_subscribe");
match param::<Signature>(&signature_str, "signature") {
@@ -259,8 +257,12 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
"signature_subscribe: signature={:?} id={:?}",
signature, sub_id
);
self.subscriptions
.add_signature_subscription(signature, commitment, sub_id, subscriber);
self.subscriptions.add_signature_subscription(
signature,
signature_subscribe_config,
sub_id,
subscriber,
);
}
Err(e) => subscriber.reject(e).unwrap(),
}
@@ -360,6 +362,7 @@ mod tests {
use serial_test_derive::serial;
use solana_account_decoder::{parse_account_data::parse_account_data, UiAccountEncoding};
use solana_budget_program::{self, budget_instruction};
use solana_client::rpc_response::{ProcessedSignatureResult, ReceivedSignatureResult};
use solana_runtime::{
bank::Bank,
bank_forks::BankForks,
@@ -370,6 +373,7 @@ mod tests {
},
};
use solana_sdk::{
commitment_config::CommitmentConfig,
hash::Hash,
message::Message,
pubkey::Pubkey,
@@ -439,7 +443,8 @@ mod tests {
// Test signature confirmation notification
let (response, _) = robust_poll_or_panic(receiver);
let expected_res = RpcSignatureResult { err: None };
let expected_res =
RpcSignatureResult::ProcessedSignature(ProcessedSignatureResult { err: None });
let expected = json!({
"jsonrpc": "2.0",
"method": "signatureNotification",
@@ -451,6 +456,39 @@ mod tests {
"subscription": 0,
}
});
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
// Test "received"
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("signatureNotification");
rpc.signature_subscribe(
session,
subscriber,
tx.signatures[0].to_string(),
Some(RpcSignatureSubscribeConfig {
commitment: None,
enable_received_notification: Some(true),
}),
);
let received_slot = 1;
rpc.subscriptions
.notify_signatures_received((received_slot, vec![tx.signatures[0]]));
// Test signature confirmation notification
let (response, _) = robust_poll_or_panic(receiver);
let expected_res =
RpcSignatureResult::ReceivedSignature(ReceivedSignatureResult::ReceivedSignature);
let expected = json!({
"jsonrpc": "2.0",
"method": "signatureNotification",
"params": {
"result": {
"context": { "slot": received_slot },
"value": expected_res,
},
"subscription": 1,
}
});
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
}
@@ -544,6 +582,7 @@ mod tests {
Some(RpcAccountInfoConfig {
commitment: Some(CommitmentConfig::recent()),
encoding: None,
data_slice: None,
}),
);
@@ -583,7 +622,7 @@ mod tests {
"lamports": 51,
"data": bs58::encode(expected_data).into_string(),
"executable": false,
"rentEpoch": 1,
"rentEpoch": 0,
},
},
"subscription": 0,
@@ -653,6 +692,7 @@ mod tests {
Some(RpcAccountInfoConfig {
commitment: Some(CommitmentConfig::recent()),
encoding: Some(UiAccountEncoding::JsonParsed),
data_slice: None,
}),
);
@@ -676,7 +716,13 @@ mod tests {
.get_account(&nonce_account.pubkey())
.unwrap()
.data;
let expected_data = parse_account_data(&system_program::id(), &expected_data).unwrap();
let expected_data = parse_account_data(
&nonce_account.pubkey(),
&system_program::id(),
&expected_data,
None,
)
.unwrap();
let expected = json!({
"jsonrpc": "2.0",
"method": "accountNotification",
@@ -688,7 +734,7 @@ mod tests {
"lamports": 100,
"data": expected_data,
"executable": false,
"rentEpoch": 1,
"rentEpoch": 0,
},
},
"subscription": 0,
@@ -767,6 +813,7 @@ mod tests {
Some(RpcAccountInfoConfig {
commitment: Some(CommitmentConfig::root()),
encoding: None,
data_slice: None,
}),
);
@@ -816,6 +863,7 @@ mod tests {
Some(RpcAccountInfoConfig {
commitment: Some(CommitmentConfig::root()),
encoding: None,
data_slice: None,
}),
);
@@ -849,7 +897,7 @@ mod tests {
"lamports": 100,
"data": "",
"executable": false,
"rentEpoch": 1,
"rentEpoch": 0,
},
},
"subscription": 0,

View File

@@ -1,6 +1,14 @@
//! The `rpc_service` module implements the Solana JSON RPC service.
use crate::{cluster_info::ClusterInfo, rpc::*, rpc_health::*, validator::ValidatorExit};
use crate::{
bigtable_upload_service::BigTableUploadService,
cluster_info::ClusterInfo,
poh_recorder::PohRecorder,
rpc::*,
rpc_health::*,
send_transaction_service::{LeaderInfo, SendTransactionService},
validator::ValidatorExit,
};
use jsonrpc_core::MetaIoHandler;
use jsonrpc_http_server::{
hyper, AccessControlAllowOrigin, CloseHandle, DomainsValidation, RequestMiddleware,
@@ -11,7 +19,6 @@ use solana_ledger::blockstore::Blockstore;
use solana_runtime::{
bank_forks::{BankForks, SnapshotConfig},
commitment::BlockCommitmentCache,
send_transaction_service::SendTransactionService,
snapshot_utils,
};
use solana_sdk::{hash::Hash, native_token::lamports_to_sol, pubkey::Pubkey};
@@ -20,10 +27,10 @@ use std::{
net::SocketAddr,
path::{Path, PathBuf},
sync::atomic::{AtomicBool, Ordering},
sync::{mpsc::channel, Arc, RwLock},
sync::{mpsc::channel, Arc, Mutex, RwLock},
thread::{self, Builder, JoinHandle},
};
use tokio::prelude::Future;
use tokio::runtime;
pub struct JsonRpcService {
thread_hdl: JoinHandle<()>,
@@ -32,6 +39,7 @@ pub struct JsonRpcService {
pub request_processor: JsonRpcRequestProcessor, // Used only by test_rpc_new()...
close_handle: Option<CloseHandle>,
runtime: runtime::Runtime,
}
struct RpcRequestMiddleware {
@@ -97,6 +105,9 @@ impl RpcRequestMiddleware {
}
fn process_file_get(&self, path: &str) -> RequestMiddlewareAction {
// Stuck on tokio 0.1 until the jsonrpc-http-server crate upgrades to tokio 0.2
use tokio_01::prelude::*;
let stem = path.split_at(1).1; // Drop leading '/' from path
let filename = {
match path {
@@ -115,10 +126,10 @@ impl RpcRequestMiddleware {
RequestMiddlewareAction::Respond {
should_validate_hosts: true,
response: Box::new(
tokio_fs::file::File::open(filename)
tokio_fs_01::file::File::open(filename)
.and_then(|file| {
let buf: Vec<u8> = Vec::new();
tokio_io::io::read_to_end(file, buf)
tokio_io_01::io::read_to_end(file, buf)
.and_then(|item| Ok(hyper::Response::new(item.1.into())))
.or_else(|_| Ok(RpcRequestMiddleware::internal_server_error()))
})
@@ -232,6 +243,7 @@ impl JsonRpcService {
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
blockstore: Arc<Blockstore>,
cluster_info: Arc<ClusterInfo>,
poh_recorder: Option<Arc<Mutex<PohRecorder>>>,
genesis_hash: Hash,
ledger_path: &Path,
validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
@@ -249,6 +261,45 @@ impl JsonRpcService {
));
let tpu_address = cluster_info.my_contact_info().tpu;
let mut runtime = runtime::Builder::new()
.threaded_scheduler()
.thread_name("rpc-runtime")
.enable_all()
.build()
.expect("Runtime");
let exit_bigtable_ledger_upload_service = Arc::new(AtomicBool::new(false));
let (bigtable_ledger_storage, _bigtable_ledger_upload_service) =
if config.enable_bigtable_ledger_storage || config.enable_bigtable_ledger_upload {
runtime
.block_on(solana_storage_bigtable::LedgerStorage::new(
!config.enable_bigtable_ledger_upload,
))
.map(|bigtable_ledger_storage| {
info!("BigTable ledger storage initialized");
let bigtable_ledger_upload_service = Arc::new(BigTableUploadService::new(
runtime.handle().clone(),
bigtable_ledger_storage.clone(),
blockstore.clone(),
block_commitment_cache.clone(),
exit_bigtable_ledger_upload_service.clone(),
));
(
Some(bigtable_ledger_storage),
Some(bigtable_ledger_upload_service),
)
})
.unwrap_or_else(|err| {
error!("Failed to initialize BigTable ledger storage: {:?}", err);
(None, None)
})
} else {
(None, None)
};
let (request_processor, receiver) = JsonRpcRequestProcessor::new(
config,
bank_forks.clone(),
@@ -256,14 +307,19 @@ impl JsonRpcService {
blockstore,
validator_exit.clone(),
health.clone(),
cluster_info,
cluster_info.clone(),
genesis_hash,
&runtime,
bigtable_ledger_storage,
);
let exit_send_transaction_service = Arc::new(AtomicBool::new(false));
let leader_info =
poh_recorder.map(|recorder| LeaderInfo::new(cluster_info.clone(), recorder));
let _send_transaction_service = Arc::new(SendTransactionService::new(
tpu_address,
&bank_forks,
leader_info,
&exit_send_transaction_service,
receiver,
));
@@ -313,6 +369,7 @@ impl JsonRpcService {
close_handle_sender.send(server.close_handle()).unwrap();
server.wait();
exit_send_transaction_service.store(true, Ordering::Relaxed);
exit_bigtable_ledger_upload_service.store(true, Ordering::Relaxed);
})
.unwrap();
@@ -325,6 +382,7 @@ impl JsonRpcService {
.register_exit(Box::new(move || close_handle_.close()));
Self {
thread_hdl,
runtime,
#[cfg(test)]
request_processor: test_request_processor,
close_handle: Some(close_handle),
@@ -338,6 +396,7 @@ impl JsonRpcService {
}
pub fn join(self) -> thread::Result<()> {
self.runtime.shutdown_background();
self.thread_hdl.join()
}
}
@@ -356,7 +415,7 @@ mod tests {
use solana_runtime::{
bank::Bank, bank_forks::CompressionType, snapshot_utils::SnapshotVersion,
};
use solana_sdk::signature::Signer;
use solana_sdk::{genesis_config::ClusterType, signature::Signer};
use std::net::{IpAddr, Ipv4Addr};
#[test]
@@ -387,6 +446,7 @@ mod tests {
block_commitment_cache,
blockstore,
cluster_info,
None,
Hash::default(),
&PathBuf::from("farf"),
validator_exit,
@@ -408,7 +468,10 @@ mod tests {
}
fn create_bank_forks() -> Arc<RwLock<BankForks>> {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let GenesisConfigInfo {
mut genesis_config, ..
} = create_genesis_config(10_000);
genesis_config.cluster_type = ClusterType::MainnetBeta;
let bank = Bank::new(&genesis_config);
Arc::new(RwLock::new(BankForks::new(bank)))
}

View File

@@ -1,5 +1,6 @@
//! The `pubsub` module implements a threaded subscription service on client RPC request
use crate::rpc::{get_parsed_token_account, get_parsed_token_accounts};
use core::hash::Hash;
use jsonrpc_core::futures::Future;
use jsonrpc_pubsub::{
@@ -7,11 +8,14 @@ use jsonrpc_pubsub::{
SubscriptionId,
};
use serde::Serialize;
use solana_account_decoder::{UiAccount, UiAccountEncoding};
use solana_account_decoder::{parse_token::spl_token_id_v2_0, UiAccount, UiAccountEncoding};
use solana_client::{
rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig},
rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig, RpcSignatureSubscribeConfig},
rpc_filter::RpcFilterType,
rpc_response::{Response, RpcKeyedAccount, RpcResponseContext, RpcSignatureResult},
rpc_response::{
ProcessedSignatureResult, ReceivedSignatureResult, Response, RpcKeyedAccount,
RpcResponseContext, RpcSignatureResult, SlotInfo,
},
};
use solana_runtime::{
bank::Bank,
@@ -38,17 +42,12 @@ use std::{
iter,
sync::{Arc, Mutex, RwLock},
};
use tokio::runtime::{Builder as RuntimeBuilder, Runtime, TaskExecutor};
// Stuck on tokio 0.1 until the jsonrpc-pubsub crate upgrades to tokio 0.2
use tokio_01::runtime::{Builder as RuntimeBuilder, Runtime, TaskExecutor};
const RECEIVE_DELAY_MILLIS: u64 = 100;
#[derive(Serialize, Deserialize, Clone, Copy, Debug)]
pub struct SlotInfo {
pub slot: Slot,
pub parent: Slot,
pub root: Slot,
}
// A more human-friendly version of Vote, with the bank state signature base58 encoded.
#[derive(Serialize, Deserialize, Debug)]
pub struct RpcVote {
@@ -64,6 +63,7 @@ enum NotificationEntry {
Frozen(Slot),
Bank(CommitmentSlots),
Gossip(Slot),
SignaturesReceived((Slot, Vec<Signature>)),
}
impl std::fmt::Debug for NotificationEntry {
@@ -76,6 +76,9 @@ impl std::fmt::Debug for NotificationEntry {
NotificationEntry::Bank(commitment_slots) => {
write!(f, "Bank({{slot: {:?}}})", commitment_slots.slot)
}
NotificationEntry::SignaturesReceived(slot_signatures) => {
write!(f, "SignaturesReceived({:?})", slot_signatures)
}
NotificationEntry::Gossip(slot) => write!(f, "Gossip({:?})", slot),
}
}
@@ -105,7 +108,10 @@ type RpcProgramSubscriptions = RwLock<
>,
>;
type RpcSignatureSubscriptions = RwLock<
HashMap<Signature, HashMap<SubscriptionId, SubscriptionData<Response<RpcSignatureResult>, ()>>>,
HashMap<
Signature,
HashMap<SubscriptionId, SubscriptionData<Response<RpcSignatureResult>, bool>>,
>,
>;
type RpcSlotSubscriptions = RwLock<HashMap<SubscriptionId, Sink<SlotInfo>>>;
type RpcVoteSubscriptions = RwLock<HashMap<SubscriptionId, Sink<RpcVote>>>;
@@ -131,13 +137,11 @@ fn add_subscription<K, S, T>(
last_notified_slot: RwLock::new(last_notified_slot),
config,
};
if let Some(current_hashmap) = subscriptions.get_mut(&hashmap_key) {
current_hashmap.insert(sub_id, subscription_data);
return;
}
let mut hashmap = HashMap::new();
hashmap.insert(sub_id, subscription_data);
subscriptions.insert(hashmap_key, hashmap);
subscriptions
.entry(hashmap_key)
.or_default()
.insert(sub_id, subscription_data);
}
fn remove_subscription<K, S, T>(
@@ -176,7 +180,7 @@ where
K: Eq + Hash + Clone + Copy,
S: Clone + Serialize,
B: Fn(&Bank, &K) -> X,
F: Fn(X, Slot, Option<T>) -> (Box<dyn Iterator<Item = S>>, Slot),
F: Fn(X, &K, Slot, Option<T>, Option<Arc<Bank>>) -> (Box<dyn Iterator<Item = S>>, Slot),
X: Clone + Serialize + Default,
T: Clone,
{
@@ -200,16 +204,19 @@ where
commitment_slots.highest_confirmed_slot
}
};
let results = {
let bank_forks = bank_forks.read().unwrap();
bank_forks
.get(slot)
.map(|desired_bank| bank_method(&desired_bank, hashmap_key))
.unwrap_or_default()
};
let bank = bank_forks.read().unwrap().get(slot).cloned();
let results = bank
.clone()
.map(|desired_bank| bank_method(&desired_bank, hashmap_key))
.unwrap_or_default();
let mut w_last_notified_slot = last_notified_slot.write().unwrap();
let (filter_results, result_slot) =
filter_results(results, *w_last_notified_slot, config.as_ref().cloned());
let (filter_results, result_slot) = filter_results(
results,
hashmap_key,
*w_last_notified_slot,
config.as_ref().cloned(),
bank,
);
for result in filter_results {
notifier.notify(
Response {
@@ -240,18 +247,30 @@ impl RpcNotifier {
fn filter_account_result(
result: Option<(Account, Slot)>,
pubkey: &Pubkey,
last_notified_slot: Slot,
encoding: Option<UiAccountEncoding>,
bank: Option<Arc<Bank>>,
) -> (Box<dyn Iterator<Item = UiAccount>>, Slot) {
if let Some((account, fork)) = result {
// If fork < last_notified_slot this means that we last notified for a fork
// and should notify that the account state has been reverted.
if fork != last_notified_slot {
let encoding = encoding.unwrap_or(UiAccountEncoding::Binary);
return (
Box::new(iter::once(UiAccount::encode(account, encoding))),
fork,
);
if account.owner == spl_token_id_v2_0() && encoding == UiAccountEncoding::JsonParsed {
let bank = bank.unwrap(); // If result.is_some(), bank must also be Some
return (
Box::new(iter::once(get_parsed_token_account(bank, pubkey, account))),
fork,
);
} else {
return (
Box::new(iter::once(UiAccount::encode(
pubkey, account, encoding, None, None,
))),
fork,
);
}
}
}
(Box::new(iter::empty()), last_notified_slot)
@@ -259,44 +278,51 @@ fn filter_account_result(
fn filter_signature_result(
result: Option<transaction::Result<()>>,
_signature: &Signature,
last_notified_slot: Slot,
_config: Option<()>,
_config: Option<bool>,
_bank: Option<Arc<Bank>>,
) -> (Box<dyn Iterator<Item = RpcSignatureResult>>, Slot) {
(
Box::new(
result
.into_iter()
.map(|result| RpcSignatureResult { err: result.err() }),
),
Box::new(result.into_iter().map(|result| {
RpcSignatureResult::ProcessedSignature(ProcessedSignatureResult { err: result.err() })
})),
last_notified_slot,
)
}
fn filter_program_results(
accounts: Vec<(Pubkey, Account)>,
program_id: &Pubkey,
last_notified_slot: Slot,
config: Option<ProgramConfig>,
bank: Option<Arc<Bank>>,
) -> (Box<dyn Iterator<Item = RpcKeyedAccount>>, Slot) {
let config = config.unwrap_or_default();
let encoding = config.encoding.unwrap_or(UiAccountEncoding::Binary);
let filters = config.filters;
(
let accounts_is_empty = accounts.is_empty();
let keyed_accounts = accounts.into_iter().filter(move |(_, account)| {
filters.iter().all(|filter_type| match filter_type {
RpcFilterType::DataSize(size) => account.data.len() as u64 == *size,
RpcFilterType::Memcmp(compare) => compare.bytes_match(&account.data),
})
});
let accounts: Box<dyn Iterator<Item = RpcKeyedAccount>> = if program_id == &spl_token_id_v2_0()
&& encoding == UiAccountEncoding::JsonParsed
&& !accounts_is_empty
{
let bank = bank.unwrap(); // If !accounts_is_empty, bank must be Some
Box::new(get_parsed_token_accounts(bank, keyed_accounts))
} else {
Box::new(
accounts
.into_iter()
.filter(move |(_, account)| {
filters.iter().all(|filter_type| match filter_type {
RpcFilterType::DataSize(size) => account.data.len() as u64 == *size,
RpcFilterType::Memcmp(compare) => compare.bytes_match(&account.data),
})
})
.map(move |(pubkey, account)| RpcKeyedAccount {
pubkey: pubkey.to_string(),
account: UiAccount::encode(account, encoding.clone()),
}),
),
last_notified_slot,
)
keyed_accounts.map(move |(pubkey, account)| RpcKeyedAccount {
pubkey: pubkey.to_string(),
account: UiAccount::encode(&pubkey, account, encoding.clone(), None, None),
}),
)
};
(accounts, last_notified_slot)
}
#[derive(Clone)]
@@ -605,13 +631,18 @@ impl RpcSubscriptions {
pub fn add_signature_subscription(
&self,
signature: Signature,
commitment: Option<CommitmentConfig>,
signature_subscribe_config: Option<RpcSignatureSubscribeConfig>,
sub_id: SubscriptionId,
subscriber: Subscriber<Response<RpcSignatureResult>>,
) {
let (commitment, enable_received_notification) = signature_subscribe_config
.map(|config| (config.commitment, config.enable_received_notification))
.unwrap_or_default();
let commitment_level = commitment
.unwrap_or_else(CommitmentConfig::recent)
.commitment;
let mut subscriptions = if commitment_level == CommitmentLevel::SingleGossip {
self.subscriptions
.gossip_signature_subscriptions
@@ -627,7 +658,7 @@ impl RpcSubscriptions {
sub_id,
subscriber,
0, // last_notified_slot is not utilized for signature subscriptions
None,
enable_received_notification,
);
}
@@ -672,6 +703,10 @@ impl RpcSubscriptions {
self.enqueue_notification(NotificationEntry::Slot(SlotInfo { slot, parent, root }));
}
pub fn notify_signatures_received(&self, slot_signatures: (Slot, Vec<Signature>)) {
self.enqueue_notification(NotificationEntry::SignaturesReceived(slot_signatures));
}
pub fn add_vote_subscription(&self, sub_id: SubscriptionId, subscriber: Subscriber<RpcVote>) {
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
let mut subscriptions = self.subscriptions.vote_subscriptions.write().unwrap();
@@ -816,6 +851,13 @@ impl RpcSubscriptions {
);
}
}
NotificationEntry::SignaturesReceived(slot_signatures) => {
RpcSubscriptions::process_signatures_received(
&slot_signatures,
&subscriptions.signature_subscriptions,
&notifier,
)
}
},
Err(RecvTimeoutError::Timeout) => {
// not a problem - try reading again
@@ -858,7 +900,7 @@ impl RpcSubscriptions {
&subscriptions.gossip_account_subscriptions,
&subscriptions.gossip_program_subscriptions,
&subscriptions.gossip_signature_subscriptions,
&bank_forks,
bank_forks,
&commitment_slots,
&notifier,
);
@@ -879,7 +921,7 @@ impl RpcSubscriptions {
for pubkey in &pubkeys {
Self::check_account(
pubkey,
&bank_forks,
bank_forks,
account_subscriptions.clone(),
&notifier,
&commitment_slots,
@@ -893,7 +935,7 @@ impl RpcSubscriptions {
for program_id in &programs {
Self::check_program(
program_id,
&bank_forks,
bank_forks,
program_subscriptions.clone(),
&notifier,
&commitment_slots,
@@ -907,7 +949,7 @@ impl RpcSubscriptions {
for signature in &signatures {
Self::check_signature(
signature,
&bank_forks,
bank_forks,
signature_subscriptions.clone(),
&notifier,
&commitment_slots,
@@ -915,6 +957,40 @@ impl RpcSubscriptions {
}
}
fn process_signatures_received(
(received_slot, signatures): &(Slot, Vec<Signature>),
signature_subscriptions: &Arc<RpcSignatureSubscriptions>,
notifier: &RpcNotifier,
) {
for signature in signatures {
if let Some(hashmap) = signature_subscriptions.read().unwrap().get(signature) {
for (
_,
SubscriptionData {
sink,
config: is_received_notification_enabled,
..
},
) in hashmap.iter()
{
if is_received_notification_enabled.unwrap_or_default() {
notifier.notify(
Response {
context: RpcResponseContext {
slot: *received_slot,
},
value: RpcSignatureResult::ReceivedSignature(
ReceivedSignatureResult::ReceivedSignature,
),
},
&sink,
);
}
}
}
}
}
fn shutdown(&mut self) -> std::thread::Result<()> {
if let Some(runtime) = self.notifier_runtime.take() {
info!("RPC Notifier runtime - shutting down");
@@ -950,7 +1026,7 @@ pub(crate) mod tests {
system_transaction,
};
use std::{fmt::Debug, sync::mpsc::channel, time::Instant};
use tokio::{prelude::FutureExt, runtime::Runtime, timer::Delay};
use tokio_01::{prelude::FutureExt, runtime::Runtime, timer::Delay};
pub(crate) fn robust_poll_or_panic<T: Debug + Send + 'static>(
receiver: futures::sync::mpsc::Receiver<T>,
@@ -1009,6 +1085,7 @@ pub(crate) mod tests {
Some(RpcAccountInfoConfig {
commitment: Some(CommitmentConfig::recent()),
encoding: None,
data_slice: None,
}),
sub_id.clone(),
subscriber,
@@ -1051,7 +1128,7 @@ pub(crate) mod tests {
"executable": false,
"lamports": 1,
"owner": "Budget1111111111111111111111111111111111111",
"rentEpoch": 1,
"rentEpoch": 0,
},
},
"subscription": 0,
@@ -1133,7 +1210,7 @@ pub(crate) mod tests {
"executable": false,
"lamports": 1,
"owner": "Budget1111111111111111111111111111111111111",
"rentEpoch": 1,
"rentEpoch": 0,
},
"pubkey": alice.pubkey().to_string(),
},
@@ -1222,31 +1299,55 @@ pub(crate) mod tests {
Subscriber::new_test("signatureNotification");
let (processed_sub, _id_receiver, processed_recv) =
Subscriber::new_test("signatureNotification");
let (processed_sub3, _id_receiver, processed_recv3) =
Subscriber::new_test("signatureNotification");
subscriptions.add_signature_subscription(
past_bank_tx.signatures[0],
Some(CommitmentConfig::recent()),
Some(RpcSignatureSubscribeConfig {
commitment: Some(CommitmentConfig::recent()),
enable_received_notification: Some(false),
}),
SubscriptionId::Number(1 as u64),
past_bank_sub1,
);
subscriptions.add_signature_subscription(
past_bank_tx.signatures[0],
Some(CommitmentConfig::root()),
Some(RpcSignatureSubscribeConfig {
commitment: Some(CommitmentConfig::root()),
enable_received_notification: Some(false),
}),
SubscriptionId::Number(2 as u64),
past_bank_sub2,
);
subscriptions.add_signature_subscription(
processed_tx.signatures[0],
Some(CommitmentConfig::recent()),
Some(RpcSignatureSubscribeConfig {
commitment: Some(CommitmentConfig::recent()),
enable_received_notification: Some(false),
}),
SubscriptionId::Number(3 as u64),
processed_sub,
);
subscriptions.add_signature_subscription(
unprocessed_tx.signatures[0],
Some(CommitmentConfig::recent()),
Some(RpcSignatureSubscribeConfig {
commitment: Some(CommitmentConfig::recent()),
enable_received_notification: Some(false),
}),
SubscriptionId::Number(4 as u64),
Subscriber::new_test("signatureNotification").0,
);
// Add a subscription that gets `received` notifications
subscriptions.add_signature_subscription(
unprocessed_tx.signatures[0],
Some(RpcSignatureSubscribeConfig {
commitment: Some(CommitmentConfig::recent()),
enable_received_notification: Some(true),
}),
SubscriptionId::Number(5 as u64),
processed_sub3,
);
{
let sig_subs = subscriptions
@@ -1259,46 +1360,63 @@ pub(crate) mod tests {
assert!(sig_subs.contains_key(&processed_tx.signatures[0]));
}
let mut commitment_slots = CommitmentSlots::default();
commitment_slots.slot = 1;
let received_slot = 1;
commitment_slots.slot = received_slot;
subscriptions
.notify_signatures_received((received_slot, vec![unprocessed_tx.signatures[0]]));
subscriptions.notify_subscribers(commitment_slots);
let expected_res = RpcSignatureResult { err: None };
let expected_res =
RpcSignatureResult::ProcessedSignature(ProcessedSignatureResult { err: None });
let received_expected_res =
RpcSignatureResult::ReceivedSignature(ReceivedSignatureResult::ReceivedSignature);
struct Notification {
slot: Slot,
id: u64,
}
let expected_notification = |exp: Notification| -> String {
let json = json!({
"jsonrpc": "2.0",
"method": "signatureNotification",
"params": {
"result": {
"context": { "slot": exp.slot },
"value": &expected_res,
},
"subscription": exp.id,
}
});
serde_json::to_string(&json).unwrap()
};
let expected_notification =
|exp: Notification, expected_res: &RpcSignatureResult| -> String {
let json = json!({
"jsonrpc": "2.0",
"method": "signatureNotification",
"params": {
"result": {
"context": { "slot": exp.slot },
"value": expected_res,
},
"subscription": exp.id,
}
});
serde_json::to_string(&json).unwrap()
};
// Expect to receive a notification from bank 1 because this subscription is
// looking for 0 confirmations and so checks the current bank
let expected = expected_notification(Notification { slot: 1, id: 1 });
let expected = expected_notification(Notification { slot: 1, id: 1 }, &expected_res);
let (response, _) = robust_poll_or_panic(past_bank_recv1);
assert_eq!(expected, response);
// Expect to receive a notification from bank 0 because this subscription is
// looking for 1 confirmation and so checks the past bank
let expected = expected_notification(Notification { slot: 0, id: 2 });
let expected = expected_notification(Notification { slot: 0, id: 2 }, &expected_res);
let (response, _) = robust_poll_or_panic(past_bank_recv2);
assert_eq!(expected, response);
let expected = expected_notification(Notification { slot: 1, id: 3 });
let expected = expected_notification(Notification { slot: 1, id: 3 }, &expected_res);
let (response, _) = robust_poll_or_panic(processed_recv);
assert_eq!(expected, response);
// Expect a "received" notification
let expected = expected_notification(
Notification {
slot: received_slot,
id: 5,
},
&received_expected_res,
);
let (response, _) = robust_poll_or_panic(processed_recv3);
assert_eq!(expected, response);
// Subscription should be automatically removed after notification
let sig_subs = subscriptions
.subscriptions
@@ -1311,7 +1429,7 @@ pub(crate) mod tests {
// Unprocessed signature subscription should not be removed
assert_eq!(
sig_subs.get(&unprocessed_tx.signatures[0]).unwrap().len(),
1
2
);
}
@@ -1493,6 +1611,7 @@ pub(crate) mod tests {
Some(RpcAccountInfoConfig {
commitment: Some(CommitmentConfig::single_gossip()),
encoding: None,
data_slice: None,
}),
sub_id0.clone(),
subscriber0,
@@ -1546,7 +1665,7 @@ pub(crate) mod tests {
"executable": false,
"lamports": 1,
"owner": "Budget1111111111111111111111111111111111111",
"rentEpoch": 1,
"rentEpoch": 0,
},
},
"subscription": 0,
@@ -1561,6 +1680,7 @@ pub(crate) mod tests {
Some(RpcAccountInfoConfig {
commitment: Some(CommitmentConfig::single_gossip()),
encoding: None,
data_slice: None,
}),
sub_id1.clone(),
subscriber1,
@@ -1579,7 +1699,7 @@ pub(crate) mod tests {
"executable": false,
"lamports": 1,
"owner": "Budget1111111111111111111111111111111111111",
"rentEpoch": 1,
"rentEpoch": 0,
},
},
"subscription": 1,

View File

@@ -0,0 +1,413 @@
use crate::cluster_info::ClusterInfo;
use crate::poh_recorder::PohRecorder;
use log::*;
use solana_metrics::{datapoint_warn, inc_new_counter_info};
use solana_runtime::{bank::Bank, bank_forks::BankForks};
use solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature};
use std::sync::Mutex;
use std::{
collections::HashMap,
net::{SocketAddr, UdpSocket},
sync::{
atomic::{AtomicBool, Ordering},
mpsc::Receiver,
Arc, RwLock,
},
thread::{self, Builder, JoinHandle},
time::{Duration, Instant},
};
/// Maximum size of the transaction queue
const MAX_TRANSACTION_QUEUE_SIZE: usize = 10_000; // This seems like a lot but maybe it needs to be bigger one day
pub struct SendTransactionService {
thread: JoinHandle<()>,
}
pub struct TransactionInfo {
pub signature: Signature,
pub wire_transaction: Vec<u8>,
pub last_valid_slot: Slot,
}
impl TransactionInfo {
pub fn new(signature: Signature, wire_transaction: Vec<u8>, last_valid_slot: Slot) -> Self {
Self {
signature,
wire_transaction,
last_valid_slot,
}
}
}
pub struct LeaderInfo {
cluster_info: Arc<ClusterInfo>,
poh_recorder: Arc<Mutex<PohRecorder>>,
recent_peers: HashMap<Pubkey, SocketAddr>,
}
impl LeaderInfo {
pub fn new(cluster_info: Arc<ClusterInfo>, poh_recorder: Arc<Mutex<PohRecorder>>) -> Self {
Self {
cluster_info,
poh_recorder,
recent_peers: HashMap::new(),
}
}
pub fn refresh_recent_peers(&mut self) {
self.recent_peers = self
.cluster_info
.tpu_peers()
.into_iter()
.map(|ci| (ci.id, ci.tpu))
.collect();
}
pub fn get_leader_tpu(&self) -> Option<&SocketAddr> {
self.poh_recorder
.lock()
.unwrap()
.leader_after_n_slots(0)
.and_then(|leader| self.recent_peers.get(&leader))
}
}
#[derive(Default, Debug, PartialEq)]
struct ProcessTransactionsResult {
rooted: u64,
expired: u64,
retried: u64,
failed: u64,
retained: u64,
}
impl SendTransactionService {
pub fn new(
tpu_address: SocketAddr,
bank_forks: &Arc<RwLock<BankForks>>,
leader_info: Option<LeaderInfo>,
exit: &Arc<AtomicBool>,
receiver: Receiver<TransactionInfo>,
) -> Self {
let thread = Self::retry_thread(
tpu_address,
receiver,
bank_forks.clone(),
leader_info,
exit.clone(),
);
Self { thread }
}
fn retry_thread(
tpu_address: SocketAddr,
receiver: Receiver<TransactionInfo>,
bank_forks: Arc<RwLock<BankForks>>,
mut leader_info: Option<LeaderInfo>,
exit: Arc<AtomicBool>,
) -> JoinHandle<()> {
let mut last_status_check = Instant::now();
let mut transactions = HashMap::new();
let send_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
if let Some(leader_info) = leader_info.as_mut() {
leader_info.refresh_recent_peers();
}
Builder::new()
.name("send-tx-svc".to_string())
.spawn(move || loop {
if exit.load(Ordering::Relaxed) {
break;
}
if let Ok(transaction_info) = receiver.recv_timeout(Duration::from_secs(1)) {
let address = leader_info
.as_ref()
.and_then(|leader_info| leader_info.get_leader_tpu())
.unwrap_or(&tpu_address);
Self::send_transaction(
&send_socket,
address,
&transaction_info.wire_transaction,
);
if transactions.len() < MAX_TRANSACTION_QUEUE_SIZE {
transactions.insert(transaction_info.signature, transaction_info);
} else {
datapoint_warn!("send_transaction_service-queue-overflow");
}
}
if Instant::now().duration_since(last_status_check).as_secs() >= 5 {
if !transactions.is_empty() {
datapoint_info!(
"send_transaction_service-queue-size",
("len", transactions.len(), i64)
);
let bank_forks = bank_forks.read().unwrap();
let root_bank = bank_forks.root_bank();
let working_bank = bank_forks.working_bank();
let _result = Self::process_transactions(
&working_bank,
&root_bank,
&send_socket,
&tpu_address,
&mut transactions,
&leader_info,
);
}
last_status_check = Instant::now();
if let Some(leader_info) = leader_info.as_mut() {
leader_info.refresh_recent_peers();
}
}
})
.unwrap()
}
fn process_transactions(
working_bank: &Arc<Bank>,
root_bank: &Arc<Bank>,
send_socket: &UdpSocket,
tpu_address: &SocketAddr,
transactions: &mut HashMap<Signature, TransactionInfo>,
leader_info: &Option<LeaderInfo>,
) -> ProcessTransactionsResult {
let mut result = ProcessTransactionsResult::default();
transactions.retain(|signature, transaction_info| {
if root_bank.has_signature(signature) {
info!("Transaction is rooted: {}", signature);
result.rooted += 1;
inc_new_counter_info!("send_transaction_service-rooted", 1);
false
} else if transaction_info.last_valid_slot < root_bank.slot() {
info!("Dropping expired transaction: {}", signature);
result.expired += 1;
inc_new_counter_info!("send_transaction_service-expired", 1);
false
} else {
match working_bank.get_signature_status_slot(signature) {
None => {
// Transaction is unknown to the working bank, it might have been
// dropped or landed in another fork. Re-send it
info!("Retrying transaction: {}", signature);
result.retried += 1;
inc_new_counter_info!("send_transaction_service-retry", 1);
Self::send_transaction(
&send_socket,
leader_info
.as_ref()
.and_then(|leader_info| leader_info.get_leader_tpu())
.unwrap_or(&tpu_address),
&transaction_info.wire_transaction,
);
true
}
Some((_slot, status)) => {
if status.is_err() {
info!("Dropping failed transaction: {}", signature);
result.failed += 1;
inc_new_counter_info!("send_transaction_service-failed", 1);
false
} else {
result.retained += 1;
true
}
}
}
}
});
result
}
fn send_transaction(
send_socket: &UdpSocket,
tpu_address: &SocketAddr,
wire_transaction: &[u8],
) {
if let Err(err) = send_socket.send_to(wire_transaction, tpu_address) {
warn!("Failed to send transaction to {}: {:?}", tpu_address, err);
}
}
pub fn join(self) -> thread::Result<()> {
self.thread.join()
}
}
#[cfg(test)]
mod test {
use super::*;
use solana_sdk::{
genesis_config::create_genesis_config, pubkey::Pubkey, signature::Signer,
system_transaction,
};
use std::sync::mpsc::channel;
#[test]
fn service_exit() {
let tpu_address = "127.0.0.1:0".parse().unwrap();
let bank = Bank::default();
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
let exit = Arc::new(AtomicBool::new(false));
let (_sender, receiver) = channel();
let send_tranaction_service =
SendTransactionService::new(tpu_address, &bank_forks, None, &exit, receiver);
exit.store(true, Ordering::Relaxed);
send_tranaction_service.join().unwrap();
}
#[test]
fn process_transactions() {
solana_logger::setup();
let (genesis_config, mint_keypair) = create_genesis_config(4);
let bank = Bank::new(&genesis_config);
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
let send_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let tpu_address = "127.0.0.1:0".parse().unwrap();
let root_bank = Arc::new(Bank::new_from_parent(
&bank_forks.read().unwrap().working_bank(),
&Pubkey::default(),
1,
));
let rooted_signature = root_bank
.transfer(1, &mint_keypair, &mint_keypair.pubkey())
.unwrap();
let working_bank = Arc::new(Bank::new_from_parent(&root_bank, &Pubkey::default(), 2));
let non_rooted_signature = working_bank
.transfer(2, &mint_keypair, &mint_keypair.pubkey())
.unwrap();
let failed_signature = {
let blockhash = working_bank.last_blockhash();
let transaction =
system_transaction::transfer(&mint_keypair, &Pubkey::default(), 1, blockhash);
let signature = transaction.signatures[0];
working_bank.process_transaction(&transaction).unwrap_err();
signature
};
let mut transactions = HashMap::new();
info!("Expired transactions are dropped..");
transactions.insert(
Signature::default(),
TransactionInfo::new(Signature::default(), vec![], root_bank.slot() - 1),
);
let result = SendTransactionService::process_transactions(
&working_bank,
&root_bank,
&send_socket,
&tpu_address,
&mut transactions,
&None,
);
assert!(transactions.is_empty());
assert_eq!(
result,
ProcessTransactionsResult {
expired: 1,
..ProcessTransactionsResult::default()
}
);
info!("Rooted transactions are dropped...");
transactions.insert(
rooted_signature,
TransactionInfo::new(rooted_signature, vec![], working_bank.slot()),
);
let result = SendTransactionService::process_transactions(
&working_bank,
&root_bank,
&send_socket,
&tpu_address,
&mut transactions,
&None,
);
assert!(transactions.is_empty());
assert_eq!(
result,
ProcessTransactionsResult {
rooted: 1,
..ProcessTransactionsResult::default()
}
);
info!("Failed transactions are dropped...");
transactions.insert(
failed_signature,
TransactionInfo::new(failed_signature, vec![], working_bank.slot()),
);
let result = SendTransactionService::process_transactions(
&working_bank,
&root_bank,
&send_socket,
&tpu_address,
&mut transactions,
&None,
);
assert!(transactions.is_empty());
assert_eq!(
result,
ProcessTransactionsResult {
failed: 1,
..ProcessTransactionsResult::default()
}
);
info!("Non-rooted transactions are kept...");
transactions.insert(
non_rooted_signature,
TransactionInfo::new(non_rooted_signature, vec![], working_bank.slot()),
);
let result = SendTransactionService::process_transactions(
&working_bank,
&root_bank,
&send_socket,
&tpu_address,
&mut transactions,
&None,
);
assert_eq!(transactions.len(), 1);
assert_eq!(
result,
ProcessTransactionsResult {
retained: 1,
..ProcessTransactionsResult::default()
}
);
transactions.clear();
info!("Unknown transactions are retried...");
transactions.insert(
Signature::default(),
TransactionInfo::new(Signature::default(), vec![], working_bank.slot()),
);
let result = SendTransactionService::process_transactions(
&working_bank,
&root_bank,
&send_socket,
&tpu_address,
&mut transactions,
&None,
);
assert_eq!(transactions.len(), 1);
assert_eq!(
result,
ProcessTransactionsResult {
retried: 1,
..ProcessTransactionsResult::default()
}
);
}
}

View File

@@ -21,7 +21,7 @@ use solana_sdk::{
};
use solana_streamer::streamer::{PacketReceiver, PacketSender};
use std::{
collections::HashMap,
collections::{HashMap, HashSet},
net::SocketAddr,
sync::atomic::{AtomicBool, Ordering},
sync::{Arc, RwLock},
@@ -382,12 +382,13 @@ impl ServeRepair {
repair_request: RepairType,
cache: &mut RepairCache,
repair_stats: &mut RepairStats,
repair_validators: &Option<HashSet<Pubkey>>,
) -> Result<(SocketAddr, Vec<u8>)> {
// find a peer that appears to be accepting replication and has the desired slot, as indicated
// by a valid tvu port location
let slot = repair_request.slot();
if cache.get(&slot).is_none() {
let repair_peers: Vec<_> = self.cluster_info.repair_peers(slot);
let repair_peers = self.repair_peers(&repair_validators, slot);
if repair_peers.is_empty() {
return Err(ClusterInfoError::NoPeers.into());
}
@@ -411,8 +412,9 @@ impl ServeRepair {
&self,
slot: Slot,
cluster_slots: &ClusterSlots,
repair_validators: &Option<HashSet<Pubkey>>,
) -> Result<(Pubkey, SocketAddr)> {
let repair_peers: Vec<_> = self.cluster_info.repair_peers(slot);
let repair_peers: Vec<_> = self.repair_peers(repair_validators, slot);
if repair_peers.is_empty() {
return Err(ClusterInfoError::NoPeers.into());
}
@@ -448,6 +450,27 @@ impl ServeRepair {
}
}
fn repair_peers(
&self,
repair_validators: &Option<HashSet<Pubkey>>,
slot: Slot,
) -> Vec<ContactInfo> {
if let Some(repair_validators) = repair_validators {
repair_validators
.iter()
.filter_map(|key| {
if *key != self.my_info.id {
self.cluster_info.lookup_contact_info(key, |ci| ci.clone())
} else {
None
}
})
.collect()
} else {
self.cluster_info.repair_peers(slot)
}
}
fn run_window_request(
recycler: &PacketsRecycler,
from: &ContactInfo,
@@ -662,7 +685,7 @@ mod tests {
repair: socketaddr!("127.0.0.1:1237"),
tpu: socketaddr!("127.0.0.1:1238"),
tpu_forwards: socketaddr!("127.0.0.1:1239"),
unused: socketaddr!("127.0.0.1:1240"),
rpc_banks: socketaddr!("127.0.0.1:1240"),
rpc: socketaddr!("127.0.0.1:1241"),
rpc_pubsub: socketaddr!("127.0.0.1:1242"),
serve_repair: socketaddr!("127.0.0.1:1243"),
@@ -733,6 +756,7 @@ mod tests {
RepairType::Shred(0, 0),
&mut HashMap::new(),
&mut RepairStats::default(),
&None,
);
assert_matches!(rv, Err(Error::ClusterInfoError(ClusterInfoError::NoPeers)));
@@ -745,7 +769,7 @@ mod tests {
repair: socketaddr!([127, 0, 0, 1], 1237),
tpu: socketaddr!([127, 0, 0, 1], 1238),
tpu_forwards: socketaddr!([127, 0, 0, 1], 1239),
unused: socketaddr!([127, 0, 0, 1], 1240),
rpc_banks: socketaddr!([127, 0, 0, 1], 1240),
rpc: socketaddr!([127, 0, 0, 1], 1241),
rpc_pubsub: socketaddr!([127, 0, 0, 1], 1242),
serve_repair: serve_repair_addr,
@@ -759,6 +783,7 @@ mod tests {
RepairType::Shred(0, 0),
&mut HashMap::new(),
&mut RepairStats::default(),
&None,
)
.unwrap();
assert_eq!(nxt.serve_repair, serve_repair_addr);
@@ -773,7 +798,7 @@ mod tests {
repair: socketaddr!([127, 0, 0, 1], 1237),
tpu: socketaddr!([127, 0, 0, 1], 1238),
tpu_forwards: socketaddr!([127, 0, 0, 1], 1239),
unused: socketaddr!([127, 0, 0, 1], 1240),
rpc_banks: socketaddr!([127, 0, 0, 1], 1240),
rpc: socketaddr!([127, 0, 0, 1], 1241),
rpc_pubsub: socketaddr!([127, 0, 0, 1], 1242),
serve_repair: serve_repair_addr2,
@@ -791,6 +816,7 @@ mod tests {
RepairType::Shred(0, 0),
&mut HashMap::new(),
&mut RepairStats::default(),
&None,
)
.unwrap();
if rv.0 == serve_repair_addr {
@@ -937,4 +963,71 @@ mod tests {
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]
fn test_repair_with_repair_validators() {
let cluster_slots = ClusterSlots::default();
let me = ContactInfo::new_localhost(&Pubkey::new_rand(), timestamp());
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(me.clone()));
// Insert two peers on the network
let contact_info2 = ContactInfo::new_localhost(&Pubkey::new_rand(), timestamp());
let contact_info3 = ContactInfo::new_localhost(&Pubkey::new_rand(), timestamp());
cluster_info.insert_info(contact_info2.clone());
cluster_info.insert_info(contact_info3.clone());
let serve_repair = ServeRepair::new(cluster_info);
// If:
// 1) repair validator set doesn't exist in gossip
// 2) repair validator set only includes our own id
// then no repairs should be generated
for pubkey in &[Pubkey::new_rand(), me.id] {
let trusted_validators = Some(vec![*pubkey].into_iter().collect());
assert!(serve_repair.repair_peers(&trusted_validators, 1).is_empty());
assert!(serve_repair
.repair_request(
&cluster_slots,
RepairType::Shred(0, 0),
&mut HashMap::new(),
&mut RepairStats::default(),
&trusted_validators,
)
.is_err());
}
// If trusted validator exists in gossip, should return repair successfully
let trusted_validators = Some(vec![contact_info2.id].into_iter().collect());
let repair_peers = serve_repair.repair_peers(&trusted_validators, 1);
assert_eq!(repair_peers.len(), 1);
assert_eq!(repair_peers[0].id, contact_info2.id);
assert!(serve_repair
.repair_request(
&cluster_slots,
RepairType::Shred(0, 0),
&mut HashMap::new(),
&mut RepairStats::default(),
&trusted_validators,
)
.is_ok());
// Using no trusted validators should default to all
// validator's available in gossip, excluding myself
let repair_peers: HashSet<Pubkey> = serve_repair
.repair_peers(&None, 1)
.into_iter()
.map(|c| c.id)
.collect();
assert_eq!(repair_peers.len(), 2);
assert!(repair_peers.contains(&contact_info2.id));
assert!(repair_peers.contains(&contact_info3.id));
assert!(serve_repair
.repair_request(
&cluster_slots,
RepairType::Shred(0, 0),
&mut HashMap::new(),
&mut RepairStats::default(),
&None,
)
.is_ok());
}
}

View File

@@ -13,11 +13,11 @@ use crate::{
sigverify_stage::SigVerifyStage,
};
use crossbeam_channel::unbounded;
use solana_ledger::{
blockstore::Blockstore,
blockstore_processor::{ReplayVotesReceiver, TransactionStatusSender},
use solana_ledger::{blockstore::Blockstore, blockstore_processor::TransactionStatusSender};
use solana_runtime::{
bank_forks::BankForks,
vote_sender_types::{ReplayVoteReceiver, ReplayVoteSender},
};
use solana_runtime::bank_forks::BankForks;
use std::{
net::UdpSocket,
sync::{
@@ -55,7 +55,8 @@ impl Tpu {
vote_tracker: Arc<VoteTracker>,
bank_forks: Arc<RwLock<BankForks>>,
verified_vote_sender: VerifiedVoteSender,
replay_votes_receiver: ReplayVotesReceiver,
replay_vote_receiver: ReplayVoteReceiver,
replay_vote_sender: ReplayVoteSender,
) -> Self {
let (packet_sender, packet_receiver) = channel();
let fetch_stage = FetchStage::new_with_sender(
@@ -82,7 +83,7 @@ impl Tpu {
bank_forks,
subscriptions.clone(),
verified_vote_sender,
replay_votes_receiver,
replay_vote_receiver,
blockstore.clone(),
);
@@ -92,6 +93,7 @@ impl Tpu {
verified_receiver,
verified_vote_packets_receiver,
transaction_status_sender,
replay_vote_sender,
);
let broadcast_stage = broadcast_type.new_broadcast_stage(

View File

@@ -57,7 +57,7 @@ impl TransactionStatusService {
} = write_transaction_status_receiver.recv_timeout(Duration::from_secs(1))?;
let slot = bank.slot();
for (((transaction, (status, hash_age_kind)), pre_balances), post_balances) in
for ((((_, transaction), (status, hash_age_kind)), pre_balances), post_balances) in
OrderedIterator::new(&transactions, iteration_order.as_deref())
.zip(statuses)
.zip(balances.pre_balances)

View File

@@ -8,6 +8,7 @@ use crate::{
cluster_info::ClusterInfo,
cluster_info_vote_listener::{VerifiedVoteReceiver, VoteTracker},
cluster_slots::ClusterSlots,
completed_data_sets_service::CompletedDataSetsSender,
ledger_cleanup_service::LedgerCleanupService,
poh_recorder::PohRecorder,
replay_stage::{ReplayStage, ReplayStageConfig},
@@ -21,12 +22,12 @@ use crate::{
use crossbeam_channel::unbounded;
use solana_ledger::{
blockstore::{Blockstore, CompletedSlotsReceiver},
blockstore_processor::{ReplayVotesSender, TransactionStatusSender},
blockstore_processor::TransactionStatusSender,
leader_schedule_cache::LeaderScheduleCache,
};
use solana_runtime::{
bank_forks::BankForks, commitment::BlockCommitmentCache,
snapshot_package::AccountsPackageSender,
snapshot_package::AccountsPackageSender, vote_sender_types::ReplayVoteSender,
};
use solana_sdk::{
pubkey::Pubkey,
@@ -66,6 +67,7 @@ pub struct TvuConfig {
pub shred_version: u16,
pub halt_on_trusted_validators_accounts_hash_mismatch: bool,
pub trusted_validators: Option<HashSet<Pubkey>>,
pub repair_validators: Option<HashSet<Pubkey>>,
pub accounts_hash_fault_injection_slots: u64,
}
@@ -98,7 +100,8 @@ impl Tvu {
vote_tracker: Arc<VoteTracker>,
retransmit_slots_sender: RetransmitSlotsSender,
verified_vote_receiver: VerifiedVoteReceiver,
replay_votes_sender: ReplayVotesSender,
replay_vote_sender: ReplayVoteSender,
completed_data_sets_sender: CompletedDataSetsSender,
tvu_config: TvuConfig,
) -> Self {
let keypair: Arc<Keypair> = cluster_info.keypair.clone();
@@ -150,6 +153,8 @@ impl Tvu {
cluster_slots.clone(),
duplicate_slots_reset_sender,
verified_vote_receiver,
tvu_config.repair_validators,
completed_data_sets_sender,
);
let (ledger_cleanup_slot_sender, ledger_cleanup_slot_receiver) = channel();
@@ -199,7 +204,7 @@ impl Tvu {
cluster_slots,
retransmit_slots_sender,
duplicate_slots_reset_receiver,
replay_votes_sender,
replay_vote_sender,
);
let ledger_cleanup_service = tvu_config.max_ledger_shreds.map(|max_ledger_shreds| {
@@ -247,6 +252,7 @@ pub mod tests {
};
use serial_test_derive::serial;
use solana_ledger::{
blockstore::BlockstoreSignals,
create_new_tmp_ledger,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
};
@@ -273,9 +279,13 @@ pub mod tests {
let cref1 = Arc::new(cluster_info1);
let (blockstore_path, _) = create_new_tmp_ledger!(&genesis_config);
let (blockstore, l_receiver, completed_slots_receiver) =
Blockstore::open_with_signal(&blockstore_path, None)
.expect("Expected to successfully open ledger");
let BlockstoreSignals {
blockstore,
ledger_signal_receiver,
completed_slots_receiver,
..
} = Blockstore::open_with_signal(&blockstore_path, None)
.expect("Expected to successfully open ledger");
let blockstore = Arc::new(blockstore);
let bank = bank_forks.working_bank();
let (exit, poh_recorder, poh_service, _entry_receiver) =
@@ -285,7 +295,8 @@ pub mod tests {
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let (retransmit_slots_sender, _retransmit_slots_receiver) = unbounded();
let (_verified_vote_sender, verified_vote_receiver) = unbounded();
let (replay_votes_sender, _replay_votes_receiver) = unbounded();
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
let (completed_data_sets_sender, _completed_data_sets_receiver) = unbounded();
let bank_forks = Arc::new(RwLock::new(bank_forks));
let tvu = Tvu::new(
&vote_keypair.pubkey(),
@@ -301,7 +312,7 @@ pub mod tests {
}
},
blockstore,
l_receiver,
ledger_signal_receiver,
&Arc::new(RpcSubscriptions::new(
&exit,
bank_forks.clone(),
@@ -319,7 +330,8 @@ pub mod tests {
Arc::new(VoteTracker::new(&bank)),
retransmit_slots_sender,
verified_vote_receiver,
replay_votes_sender,
replay_vote_sender,
completed_data_sets_sender,
TvuConfig::default(),
);
exit.store(true, Ordering::Relaxed);

View File

@@ -4,6 +4,7 @@ use crate::{
broadcast_stage::BroadcastStageType,
cluster_info::{ClusterInfo, Node},
cluster_info_vote_listener::VoteTracker,
completed_data_sets_service::CompletedDataSetsService,
contact_info::ContactInfo,
gossip_service::{discover_cluster, GossipService},
poh_recorder::{PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS},
@@ -21,13 +22,14 @@ use crate::{
transaction_status_service::TransactionStatusService,
tvu::{Sockets, Tvu, TvuConfig},
};
use crossbeam_channel::unbounded;
use crossbeam_channel::{bounded, unbounded};
use rand::{thread_rng, Rng};
use solana_banks_server::rpc_banks_service::RpcBanksService;
use solana_ledger::{
bank_forks_utils,
blockstore::{Blockstore, CompletedSlotsReceiver, PurgeType},
blockstore::{Blockstore, BlockstoreSignals, CompletedSlotsReceiver, PurgeType},
blockstore_db::BlockstoreRecoveryMode,
blockstore_processor::{self, ReplayVotesSender, TransactionStatusSender},
blockstore_processor::{self, TransactionStatusSender},
create_new_tmp_ledger,
leader_schedule::FixedSchedule,
leader_schedule_cache::LeaderScheduleCache,
@@ -53,7 +55,7 @@ use solana_sdk::{
use solana_vote_program::vote_state::VoteState;
use std::{
collections::HashSet,
net::{IpAddr, Ipv4Addr, SocketAddr},
net::SocketAddr,
path::{Path, PathBuf},
process,
sync::atomic::{AtomicBool, Ordering},
@@ -63,6 +65,8 @@ use std::{
time::Duration,
};
pub const MAX_COMPLETED_DATA_SETS_IN_CHANNEL: usize = 100_000;
#[derive(Clone, Debug)]
pub struct ValidatorConfig {
pub dev_halt_at_slot: Option<Slot>,
@@ -72,7 +76,7 @@ pub struct ValidatorConfig {
pub voting_disabled: bool,
pub account_paths: Vec<PathBuf>,
pub rpc_config: JsonRpcConfig,
pub rpc_ports: Option<(u16, u16)>, // (API, PubSub)
pub rpc_addrs: Option<(SocketAddr, SocketAddr, SocketAddr)>, // (JsonRpc, JsonRpcPubSub, Banks)
pub snapshot_config: Option<SnapshotConfig>,
pub max_ledger_shreds: Option<u64>,
pub broadcast_stage_type: BroadcastStageType,
@@ -81,6 +85,8 @@ pub struct ValidatorConfig {
pub wait_for_supermajority: Option<Slot>,
pub new_hard_forks: Option<Vec<Slot>>,
pub trusted_validators: Option<HashSet<Pubkey>>, // None = trust all
pub repair_validators: Option<HashSet<Pubkey>>, // None = repair from all
pub gossip_validators: Option<HashSet<Pubkey>>, // None = gossip with all
pub halt_on_trusted_validators_accounts_hash_mismatch: bool,
pub accounts_hash_fault_injection_slots: u64, // 0 = no fault injection
pub frozen_accounts: Vec<Pubkey>,
@@ -101,7 +107,7 @@ impl Default for ValidatorConfig {
max_ledger_shreds: None,
account_paths: Vec::new(),
rpc_config: JsonRpcConfig::default(),
rpc_ports: None,
rpc_addrs: None,
snapshot_config: None,
broadcast_stage_type: BroadcastStageType::Standard,
enable_partition: None,
@@ -109,6 +115,8 @@ impl Default for ValidatorConfig {
wait_for_supermajority: None,
new_hard_forks: None,
trusted_validators: None,
repair_validators: None,
gossip_validators: None,
halt_on_trusted_validators_accounts_hash_mismatch: false,
accounts_hash_fault_injection_slots: 0,
frozen_accounts: vec![],
@@ -148,11 +156,12 @@ struct TransactionHistoryServices {
pub struct Validator {
pub id: Pubkey,
validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
rpc_service: Option<(JsonRpcService, PubSubService)>,
rpc_service: Option<(JsonRpcService, PubSubService, RpcBanksService)>,
transaction_status_service: Option<TransactionStatusService>,
rewards_recorder_service: Option<RewardsRecorderService>,
gossip_service: GossipService,
serve_repair_service: ServeRepairService,
completed_data_sets_service: CompletedDataSetsService,
snapshot_packager_service: Option<SnapshotPackagerService>,
poh_recorder: Arc<Mutex<PohRecorder>>,
poh_service: PohService,
@@ -223,7 +232,7 @@ impl Validator {
validator_exit.register_exit(Box::new(move || exit_.store(true, Ordering::Relaxed)));
let validator_exit = Arc::new(RwLock::new(Some(validator_exit)));
let (replay_votes_sender, replay_votes_receiver) = unbounded();
let (replay_vote_sender, replay_vote_receiver) = unbounded();
let (
genesis_config,
bank_forks,
@@ -238,7 +247,7 @@ impl Validator {
rewards_recorder_sender,
rewards_recorder_service,
},
) = new_banks_from_ledger(config, ledger_path, poh_verify, &exit, &replay_votes_sender);
) = new_banks_from_ledger(config, ledger_path, poh_verify, &exit);
let leader_schedule_cache = Arc::new(leader_schedule_cache);
let bank = bank_forks.working_bank();
@@ -281,37 +290,14 @@ impl Validator {
block_commitment_cache.clone(),
));
let rpc_override_health_check = Arc::new(AtomicBool::new(false));
let rpc_service = config.rpc_ports.map(|(rpc_port, rpc_pubsub_port)| {
if ContactInfo::is_valid_address(&node.info.rpc) {
assert!(ContactInfo::is_valid_address(&node.info.rpc_pubsub));
assert_eq!(rpc_port, node.info.rpc.port());
assert_eq!(rpc_pubsub_port, node.info.rpc_pubsub.port());
} else {
assert!(!ContactInfo::is_valid_address(&node.info.rpc_pubsub));
}
(
JsonRpcService::new(
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), rpc_port),
config.rpc_config.clone(),
config.snapshot_config.clone(),
bank_forks.clone(),
block_commitment_cache.clone(),
blockstore.clone(),
cluster_info.clone(),
genesis_config.hash(),
ledger_path,
validator_exit.clone(),
config.trusted_validators.clone(),
rpc_override_health_check.clone(),
),
PubSubService::new(
&subscriptions,
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), rpc_pubsub_port),
&exit,
),
)
});
let (completed_data_sets_sender, completed_data_sets_receiver) =
bounded(MAX_COMPLETED_DATA_SETS_IN_CHANNEL);
let completed_data_sets_service = CompletedDataSetsService::new(
completed_data_sets_receiver,
blockstore.clone(),
subscriptions.clone(),
&exit,
);
info!(
"Starting PoH: epoch={} slot={} tick_height={} blockhash={} leader={:?}",
@@ -335,7 +321,7 @@ impl Validator {
std::thread::park();
}
let poh_config = Arc::new(genesis_config.poh_config);
let poh_config = Arc::new(genesis_config.poh_config.clone());
let (mut poh_recorder, entry_receiver) = PohRecorder::new_with_clear_signal(
bank.tick_height(),
bank.last_blockhash(),
@@ -359,12 +345,53 @@ impl Validator {
}
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let rpc_override_health_check = Arc::new(AtomicBool::new(false));
let rpc_service = config
.rpc_addrs
.map(|(rpc_addr, rpc_pubsub_addr, rpc_banks_addr)| {
if ContactInfo::is_valid_address(&node.info.rpc) {
assert!(ContactInfo::is_valid_address(&node.info.rpc_pubsub));
assert_eq!(rpc_addr.port(), node.info.rpc.port());
assert_eq!(rpc_pubsub_addr.port(), node.info.rpc_pubsub.port());
assert_eq!(rpc_banks_addr.port(), node.info.rpc_banks.port());
} else {
assert!(!ContactInfo::is_valid_address(&node.info.rpc_pubsub));
}
let tpu_address = cluster_info.my_contact_info().tpu;
(
JsonRpcService::new(
rpc_addr,
config.rpc_config.clone(),
config.snapshot_config.clone(),
bank_forks.clone(),
block_commitment_cache.clone(),
blockstore.clone(),
cluster_info.clone(),
Some(poh_recorder.clone()),
genesis_config.hash(),
ledger_path,
validator_exit.clone(),
config.trusted_validators.clone(),
rpc_override_health_check.clone(),
),
PubSubService::new(&subscriptions, rpc_pubsub_addr, &exit),
RpcBanksService::new(
rpc_banks_addr,
tpu_address,
&bank_forks,
&block_commitment_cache,
&exit,
),
)
});
let ip_echo_server = solana_net_utils::ip_echo_server(node.sockets.ip_echo.unwrap());
let gossip_service = GossipService::new(
&cluster_info,
Some(bank_forks.clone()),
node.sockets.gossip,
config.gossip_validators.clone(),
&exit,
);
@@ -453,13 +480,15 @@ impl Validator {
vote_tracker.clone(),
retransmit_slots_sender,
verified_vote_receiver,
replay_votes_sender,
replay_vote_sender.clone(),
completed_data_sets_sender,
TvuConfig {
max_ledger_shreds: config.max_ledger_shreds,
halt_on_trusted_validators_accounts_hash_mismatch: config
.halt_on_trusted_validators_accounts_hash_mismatch,
shred_version: node.info.shred_version,
trusted_validators: config.trusted_validators.clone(),
repair_validators: config.repair_validators.clone(),
accounts_hash_fault_injection_slots: config.accounts_hash_fault_injection_slots,
},
);
@@ -481,7 +510,8 @@ impl Validator {
vote_tracker,
bank_forks,
verified_vote_sender,
replay_votes_receiver,
replay_vote_receiver,
replay_vote_sender,
);
datapoint_info!("validator-new", ("id", id.to_string(), String));
@@ -493,6 +523,7 @@ impl Validator {
transaction_status_service,
rewards_recorder_service,
snapshot_packager_service,
completed_data_sets_service,
tpu,
tvu,
poh_service,
@@ -542,9 +573,10 @@ impl Validator {
pub fn join(self) -> Result<()> {
self.poh_service.join()?;
drop(self.poh_recorder);
if let Some((rpc_service, rpc_pubsub_service)) = self.rpc_service {
if let Some((rpc_service, rpc_pubsub_service, rpc_banks_service)) = self.rpc_service {
rpc_service.join()?;
rpc_pubsub_service.join()?;
rpc_banks_service.join()?;
}
if let Some(transaction_status_service) = self.transaction_status_service {
transaction_status_service.join()?;
@@ -562,6 +594,7 @@ impl Validator {
self.serve_repair_service.join()?;
self.tpu.join()?;
self.tvu.join()?;
self.completed_data_sets_service.join()?;
self.ip_echo_server.shutdown_now();
Ok(())
@@ -574,7 +607,6 @@ fn new_banks_from_ledger(
ledger_path: &Path,
poh_verify: bool,
exit: &Arc<AtomicBool>,
replay_votes_sender: &ReplayVotesSender,
) -> (
GenesisConfig,
BankForks,
@@ -606,9 +638,13 @@ fn new_banks_from_ledger(
}
}
let (mut blockstore, ledger_signal_receiver, completed_slots_receiver) =
Blockstore::open_with_signal(ledger_path, config.wal_recovery_mode.clone())
.expect("Failed to open ledger database");
let BlockstoreSignals {
mut blockstore,
ledger_signal_receiver,
completed_slots_receiver,
..
} = Blockstore::open_with_signal(ledger_path, config.wal_recovery_mode.clone())
.expect("Failed to open ledger database");
blockstore.set_no_compaction(config.no_rocksdb_compaction);
let process_options = blockstore_processor::ProcessOptions {
@@ -621,7 +657,7 @@ fn new_banks_from_ledger(
let blockstore = Arc::new(blockstore);
let transaction_history_services =
if config.rpc_ports.is_some() && config.rpc_config.enable_rpc_transaction_history {
if config.rpc_addrs.is_some() && config.rpc_config.enable_rpc_transaction_history {
initialize_rpc_transaction_history_services(blockstore.clone(), exit)
} else {
TransactionHistoryServices::default()
@@ -636,7 +672,6 @@ fn new_banks_from_ledger(
transaction_history_services
.transaction_status_sender
.clone(),
Some(&replay_votes_sender),
)
.unwrap_or_else(|err| {
error!("Failed to load ledger: {:?}", err);
@@ -851,18 +886,14 @@ impl TestValidator {
} = create_genesis_config_with_leader_ex(
mint_lamports,
&contact_info.id,
Arc::new(Keypair::new()),
Arc::new(Keypair::new()),
&Keypair::new(),
&Pubkey::new_rand(),
42,
bootstrap_validator_lamports,
);
genesis_config
.native_instruction_processors
.push(solana_budget_program!());
genesis_config
.native_instruction_processors
.push(solana_bpf_loader_program!());
genesis_config.rent.lamports_per_byte_year = 1;
genesis_config.rent.exemption_threshold = 1.0;
genesis_config.fee_rate_governor = FeeRateGovernor::new(fees, 0);
@@ -870,15 +901,16 @@ impl TestValidator {
let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_config);
let config = ValidatorConfig {
rpc_ports: Some((node.info.rpc.port(), node.info.rpc_pubsub.port())),
rpc_addrs: Some((node.info.rpc, node.info.rpc_pubsub, node.info.rpc_banks)),
..ValidatorConfig::default()
};
let vote_pubkey = voting_keypair.pubkey();
let node = Validator::new(
node,
&node_keypair,
&ledger_path,
&voting_keypair.pubkey(),
vec![voting_keypair.clone()],
vec![Arc::new(voting_keypair)],
None,
true,
&config,
@@ -890,7 +922,7 @@ impl TestValidator {
alice: mint_keypair,
ledger_path,
genesis_hash: blockhash,
vote_pubkey: voting_keypair.pubkey(),
vote_pubkey,
}
}
}
@@ -1035,9 +1067,10 @@ mod tests {
let voting_keypair = Arc::new(Keypair::new());
let config = ValidatorConfig {
rpc_ports: Some((
validator_node.info.rpc.port(),
validator_node.info.rpc_pubsub.port(),
rpc_addrs: Some((
validator_node.info.rpc,
validator_node.info.rpc_pubsub,
validator_node.info.rpc_banks,
)),
..ValidatorConfig::default()
};
@@ -1107,11 +1140,12 @@ mod tests {
.genesis_config;
let (validator_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
ledger_paths.push(validator_ledger_path.clone());
let vote_account_keypair = Arc::new(Keypair::new());
let vote_account_keypair = Keypair::new();
let config = ValidatorConfig {
rpc_ports: Some((
validator_node.info.rpc.port(),
validator_node.info.rpc_pubsub.port(),
rpc_addrs: Some((
validator_node.info.rpc,
validator_node.info.rpc_pubsub,
validator_node.info.rpc_banks,
)),
..ValidatorConfig::default()
};
@@ -1120,7 +1154,7 @@ mod tests {
&Arc::new(validator_keypair),
&validator_ledger_path,
&vote_account_keypair.pubkey(),
vec![vote_account_keypair.clone()],
vec![Arc::new(vote_account_keypair)],
Some(&leader_node.info),
true,
&config,

View File

@@ -5,6 +5,7 @@ use crate::{
cluster_info::ClusterInfo,
cluster_info_vote_listener::VerifiedVoteReceiver,
cluster_slots::ClusterSlots,
completed_data_sets_service::CompletedDataSetsSender,
repair_response,
repair_service::{RepairInfo, RepairService},
result::{Error, Result},
@@ -123,6 +124,7 @@ fn run_insert<F>(
leader_schedule_cache: &Arc<LeaderScheduleCache>,
handle_duplicate: F,
metrics: &mut BlockstoreInsertionMetrics,
completed_data_sets_sender: &CompletedDataSetsSender,
) -> Result<()>
where
F: Fn(Shred),
@@ -138,13 +140,13 @@ where
let mut i = 0;
shreds.retain(|shred| (verify_repair(&shred, &repair_infos[i]), i += 1).0);
blockstore.insert_shreds_handle_duplicate(
completed_data_sets_sender.try_send(blockstore.insert_shreds_handle_duplicate(
shreds,
Some(leader_schedule_cache),
false,
&handle_duplicate,
metrics,
)?;
)?)?;
Ok(())
}
@@ -302,6 +304,7 @@ impl WindowService {
shred_filter: F,
cluster_slots: Arc<ClusterSlots>,
verified_vote_receiver: VerifiedVoteReceiver,
completed_data_sets_sender: CompletedDataSetsSender,
) -> WindowService
where
F: 'static
@@ -333,6 +336,7 @@ impl WindowService {
leader_schedule_cache,
insert_receiver,
duplicate_sender,
completed_data_sets_sender,
);
let t_window = Self::start_recv_window_thread(
@@ -387,6 +391,7 @@ impl WindowService {
leader_schedule_cache: &Arc<LeaderScheduleCache>,
insert_receiver: CrossbeamReceiver<(Vec<Shred>, Vec<Option<RepairMeta>>)>,
duplicate_sender: CrossbeamSender<Shred>,
completed_data_sets_sender: CompletedDataSetsSender,
) -> JoinHandle<()> {
let exit = exit.clone();
let blockstore = blockstore.clone();
@@ -415,6 +420,7 @@ impl WindowService {
&leader_schedule_cache,
&handle_duplicate,
&mut metrics,
&completed_data_sets_sender,
) {
if Self::should_exit_on_error(e, &mut handle_timeout, &handle_error) {
break;

View File

@@ -1,31 +1,32 @@
// Long-running bank_forks tests
macro_rules! DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS {
($x:ident) => {
($x:ident, $y:ident, $z:ident) => {
#[allow(non_snake_case)]
mod $x {
mod $z {
use super::*;
const SNAPSHOT_VERSION: SnapshotVersion = SnapshotVersion::$x;
const CLUSTER_TYPE: ClusterType = ClusterType::$y;
#[test]
fn test_bank_forks_status_cache_snapshot_n() {
run_test_bank_forks_status_cache_snapshot_n(SNAPSHOT_VERSION)
run_test_bank_forks_status_cache_snapshot_n(SNAPSHOT_VERSION, CLUSTER_TYPE)
}
#[test]
fn test_bank_forks_snapshot_n() {
run_test_bank_forks_snapshot_n(SNAPSHOT_VERSION)
run_test_bank_forks_snapshot_n(SNAPSHOT_VERSION, CLUSTER_TYPE)
}
#[test]
fn test_concurrent_snapshot_packaging() {
run_test_concurrent_snapshot_packaging(SNAPSHOT_VERSION)
run_test_concurrent_snapshot_packaging(SNAPSHOT_VERSION, CLUSTER_TYPE)
}
#[test]
fn test_slots_to_snapshot() {
run_test_slots_to_snapshot(SNAPSHOT_VERSION)
run_test_slots_to_snapshot(SNAPSHOT_VERSION, CLUSTER_TYPE)
}
}
};
@@ -49,7 +50,7 @@ mod tests {
};
use solana_sdk::{
clock::Slot,
genesis_config::GenesisConfig,
genesis_config::{ClusterType, GenesisConfig},
hash::hashv,
pubkey::Pubkey,
signature::{Keypair, Signer},
@@ -58,7 +59,10 @@ mod tests {
use std::{fs, path::PathBuf, sync::atomic::AtomicBool, sync::mpsc::channel, sync::Arc};
use tempfile::TempDir;
DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS!(V1_2_0);
DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS!(V1_2_0, Development, V1_2_0_Development);
DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS!(V1_2_0, Devnet, V1_2_0_Devnet);
DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS!(V1_2_0, Testnet, V1_2_0_Testnet);
DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS!(V1_2_0, MainnetBeta, V1_2_0_MainnetBeta);
struct SnapshotTestConfig {
accounts_dir: TempDir,
@@ -72,12 +76,14 @@ mod tests {
impl SnapshotTestConfig {
fn new(
snapshot_version: SnapshotVersion,
cluster_type: ClusterType,
snapshot_interval_slots: u64,
) -> SnapshotTestConfig {
let accounts_dir = TempDir::new().unwrap();
let snapshot_dir = TempDir::new().unwrap();
let snapshot_output_path = TempDir::new().unwrap();
let genesis_config_info = create_genesis_config(10_000);
let mut genesis_config_info = create_genesis_config(10_000);
genesis_config_info.genesis_config.cluster_type = cluster_type;
let bank0 = Bank::new_with_paths(
&genesis_config_info.genesis_config,
vec![accounts_dir.path().to_path_buf()],
@@ -158,6 +164,7 @@ mod tests {
// `last_slot` bank
fn run_bank_forks_snapshot_n<F>(
snapshot_version: SnapshotVersion,
cluster_type: ClusterType,
last_slot: Slot,
f: F,
set_root_interval: u64,
@@ -166,7 +173,7 @@ mod tests {
{
solana_logger::setup();
// Set up snapshotting config
let mut snapshot_test_config = SnapshotTestConfig::new(snapshot_version, 1);
let mut snapshot_test_config = SnapshotTestConfig::new(snapshot_version, cluster_type, 1);
let bank_forks = &mut snapshot_test_config.bank_forks;
let mint_keypair = &snapshot_test_config.genesis_config_info.mint_keypair;
@@ -211,11 +218,15 @@ mod tests {
restore_from_snapshot(bank_forks, last_slot, genesis_config, account_paths);
}
fn run_test_bank_forks_snapshot_n(snapshot_version: SnapshotVersion) {
fn run_test_bank_forks_snapshot_n(
snapshot_version: SnapshotVersion,
cluster_type: ClusterType,
) {
// create banks up to slot 4 and create 1 new account in each bank. test that bank 4 snapshots
// and restores correctly
run_bank_forks_snapshot_n(
snapshot_version,
cluster_type,
4,
|bank, mint_keypair| {
let key1 = Keypair::new().pubkey();
@@ -246,11 +257,14 @@ mod tests {
}
}
fn run_test_concurrent_snapshot_packaging(snapshot_version: SnapshotVersion) {
fn run_test_concurrent_snapshot_packaging(
snapshot_version: SnapshotVersion,
cluster_type: ClusterType,
) {
solana_logger::setup();
// Set up snapshotting config
let mut snapshot_test_config = SnapshotTestConfig::new(snapshot_version, 1);
let mut snapshot_test_config = SnapshotTestConfig::new(snapshot_version, cluster_type, 1);
let bank_forks = &mut snapshot_test_config.bank_forks;
let accounts_dir = &snapshot_test_config.accounts_dir;
@@ -395,7 +409,7 @@ mod tests {
);
}
fn run_test_slots_to_snapshot(snapshot_version: SnapshotVersion) {
fn run_test_slots_to_snapshot(snapshot_version: SnapshotVersion, cluster_type: ClusterType) {
solana_logger::setup();
let num_set_roots = MAX_CACHE_ENTRIES * 2;
@@ -404,6 +418,7 @@ mod tests {
// Make sure this test never clears bank.slots_since_snapshot
let mut snapshot_test_config = SnapshotTestConfig::new(
snapshot_version,
cluster_type,
(*add_root_interval * num_set_roots * 2) as u64,
);
let mut current_bank = snapshot_test_config.bank_forks[0].clone();
@@ -437,7 +452,10 @@ mod tests {
}
}
fn run_test_bank_forks_status_cache_snapshot_n(snapshot_version: SnapshotVersion) {
fn run_test_bank_forks_status_cache_snapshot_n(
snapshot_version: SnapshotVersion,
cluster_type: ClusterType,
) {
// create banks up to slot (MAX_CACHE_ENTRIES * 2) + 1 while transferring 1 lamport into 2 different accounts each time
// this is done to ensure the AccountStorageEntries keep getting cleaned up as the root moves
// ahead. Also tests the status_cache purge and status cache snapshotting.
@@ -447,6 +465,7 @@ mod tests {
for set_root_interval in &[1, 4] {
run_bank_forks_snapshot_n(
snapshot_version,
cluster_type,
(MAX_CACHE_ENTRIES * 2 + 1) as u64,
|bank, mint_keypair| {
let tx = system_transaction::transfer(

View File

@@ -1,7 +1,4 @@
use solana_client::{
pubsub_client::{PubsubClient, SlotInfoMessage},
rpc_client::RpcClient,
};
use solana_client::{pubsub_client::PubsubClient, rpc_client::RpcClient, rpc_response::SlotInfo};
use solana_core::{
rpc_pubsub_service::PubSubService, rpc_subscriptions::RpcSubscriptions,
validator::TestValidator,
@@ -105,7 +102,7 @@ fn test_slot_subscription() {
let (mut client, receiver) =
PubsubClient::slot_subscribe(&format!("ws://0.0.0.0:{}/", pubsub_addr.port())).unwrap();
let mut errors: Vec<(SlotInfoMessage, SlotInfoMessage)> = Vec::new();
let mut errors: Vec<(SlotInfo, SlotInfo)> = Vec::new();
for i in 0..3 {
subscriptions.notify_slot(i + 1, i, i);
@@ -114,7 +111,7 @@ fn test_slot_subscription() {
match maybe_actual {
Ok(actual) => {
let expected = SlotInfoMessage {
let expected = SlotInfo {
slot: i + 1,
parent: i,
root: i,

View File

@@ -222,7 +222,7 @@ fn network_simulator(network: &mut Network, max_convergance: f64) {
network_values.par_iter().for_each(|node| {
node.lock()
.unwrap()
.refresh_push_active_set(&HashMap::new());
.refresh_push_active_set(&HashMap::new(), None);
});
let mut total_bytes = bytes_tx;
for second in 1..num {
@@ -361,7 +361,7 @@ fn network_run_push(network: &mut Network, start: usize, end: usize) -> (usize,
network_values.par_iter().for_each(|node| {
node.lock()
.unwrap()
.refresh_push_active_set(&HashMap::new());
.refresh_push_active_set(&HashMap::new(), None);
});
}
total = network_values
@@ -408,7 +408,7 @@ fn network_run_pull(
.filter_map(|from| {
from.lock()
.unwrap()
.new_pull_request(now, &HashMap::new(), cluster_info::MAX_BLOOM_SIZE)
.new_pull_request(now, None, &HashMap::new(), cluster_info::MAX_BLOOM_SIZE)
.ok()
})
.collect()
@@ -436,7 +436,7 @@ fn network_run_pull(
let rsp = node
.lock()
.unwrap()
.generate_pull_responses(&filters)
.generate_pull_responses(&filters, now)
.into_iter()
.flatten()
.collect();
@@ -581,7 +581,7 @@ fn test_prune_errors() {
0,
)
.unwrap();
crds_gossip.refresh_push_active_set(&HashMap::new());
crds_gossip.refresh_push_active_set(&HashMap::new(), None);
let now = timestamp();
//incorrect dest
let mut res = crds_gossip.process_prune_msg(

View File

@@ -19,7 +19,8 @@ fn test_node(exit: &Arc<AtomicBool>) -> (Arc<ClusterInfo>, GossipService, UdpSoc
let keypair = Arc::new(Keypair::new());
let mut test_node = Node::new_localhost_with_pubkey(&keypair.pubkey());
let cluster_info = Arc::new(ClusterInfo::new(test_node.info.clone(), keypair));
let gossip_service = GossipService::new(&cluster_info, None, test_node.sockets.gossip, exit);
let gossip_service =
GossipService::new(&cluster_info, None, test_node.sockets.gossip, None, exit);
let _ = cluster_info.my_contact_info();
(
cluster_info,
@@ -39,6 +40,7 @@ fn test_node_with_bank(
&cluster_info,
Some(bank_forks),
test_node.sockets.gossip,
None,
exit,
);
let _ = cluster_info.my_contact_info();
@@ -233,7 +235,9 @@ pub fn cluster_info_scale() {
let nodes: Vec<_> = vote_keypairs
.into_iter()
.map(|keypairs| test_node_with_bank(keypairs.node_keypair, &exit, bank_forks.clone()))
.map(|keypairs| {
test_node_with_bank(Arc::new(keypairs.node_keypair), &exit, bank_forks.clone())
})
.collect();
let ci0 = nodes[0].0.my_contact_info();
for node in &nodes[1..] {

View File

@@ -26,7 +26,7 @@ use std::{
thread::sleep,
time::{Duration, Instant},
};
use tokio::runtime::Runtime;
use tokio_01::runtime::Runtime;
macro_rules! json_req {
($method: expr, $params: expr) => {{
@@ -100,6 +100,20 @@ fn test_rpc_send_tx() {
assert_eq!(confirmed_tx, true);
use solana_account_decoder::UiAccountEncoding;
use solana_client::rpc_config::RpcAccountInfoConfig;
let config = RpcAccountInfoConfig {
encoding: Some(UiAccountEncoding::Base64),
commitment: None,
data_slice: None,
};
let req = json_req!(
"getAccountInfo",
json!([bs58::encode(bob_pubkey).into_string(), config])
);
let json: Value = post_rpc(req, &leader_data);
info!("{:?}", json["result"]["value"]);
server.close().unwrap();
remove_dir_all(ledger_path).unwrap();
}
@@ -189,7 +203,7 @@ fn test_rpc_subscriptions() {
.and_then(move |client| {
for sig in signature_set {
let status_sender = status_sender.clone();
tokio::spawn(
tokio_01::spawn(
client
.signature_subscribe(sig.clone(), None)
.and_then(move |sig_stream| {
@@ -203,7 +217,7 @@ fn test_rpc_subscriptions() {
}),
);
}
tokio::spawn(
tokio_01::spawn(
client
.slot_subscribe()
.and_then(move |slot_stream| {
@@ -218,7 +232,7 @@ fn test_rpc_subscriptions() {
);
for pubkey in account_set {
let account_sender = account_sender.clone();
tokio::spawn(
tokio_01::spawn(
client
.account_subscribe(pubkey, None)
.and_then(move |account_stream| {
@@ -271,8 +285,12 @@ fn test_rpc_subscriptions() {
let timeout = deadline.saturating_duration_since(Instant::now());
match status_receiver.recv_timeout(timeout) {
Ok((sig, result)) => {
assert!(result.value.err.is_none());
assert!(signature_set.remove(&sig));
if let RpcSignatureResult::ProcessedSignature(result) = result.value {
assert!(result.err.is_none());
assert!(signature_set.remove(&sig));
} else {
panic!("Unexpected result");
}
}
Err(_err) => {
assert!(

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-crate-features"
version = "1.3.0"
version = "1.3.10"
description = "Solana Crate Features"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"

Some files were not shown because too many files have changed in this diff Show More