Compare commits

...

265 Commits

Author SHA1 Message Date
ab235b8160 Add extra checks to verify_and_update (bp #13848) (#13849)
* Add extra checks to verify_and_update (#13848)

* Add extra checks to verify_and_update

* nudge

(cherry picked from commit ce4304cc9a)

# Conflicts:
#	runtime/src/message_processor.rs

* fix conflicts

Co-authored-by: Jack May <jack@solana.com>
2020-11-29 11:27:30 +00:00
51e8872804 Fix avx check with newest nightly compiler (#13465) (#13809)
(cherry picked from commit c644b05c54)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-11-25 10:50:36 +00:00
5dbec42394 Bump version to 1.3.23 2020-11-25 03:02:13 -07:00
c48298128d Remove spl-token-cli from 1.3.22 build 2020-11-25 01:18:50 -07:00
db3f154b3f Fix nonces (#13800)
Co-authored-by: Trent Nelson <trent@solana.com>
2020-11-24 23:54:02 -08:00
b0e5da40a9 Add stake calculation tests with inflation/slashing (#13605) (#13797)
* Add stake calculation tests with inflation/slashing

* Clean up the test

(cherry picked from commit 42421e77a9)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-11-25 04:59:20 +00:00
0c2433d796 Cap split stake at source stake when splitting entire balance (#13754) (#13765)
(cherry picked from commit f0f99ffc7e)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-11-23 18:23:58 +00:00
de03a5092d ledger-tool cap: output credits_observed (#13746) (#13747)
(cherry picked from commit 3bc7d85986)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-11-21 17:31:03 +00:00
fd39a09eae stake: Don't pay out rewards for epochs where inflation was not enabled (#13744)
(cherry picked from commit 13aa38d307)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-11-21 05:28:08 +00:00
0ad7b64961 sanitizes bloom filters to avoid division by zero (#13714) (#13717)
Pull requests received over the wire can cause a validator to panic
because of division by zero in bloom filters:
https://github.com/solana-labs/solana/blob/af08ba93e/runtime/src/bloom.rs#L86-L88

(cherry picked from commit a8c29505f0)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-11-20 00:53:23 +00:00
c9d6fde7cf Allow GNUSparse for genesis.bin (#13704) (#13706)
(cherry picked from commit 397cf726fc)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-11-19 21:47:13 +00:00
4462eabd8c Check for overflow in rent partition calculation (#13569) (#13695)
Co-authored-by: Carl Lin <carl@solana.com>
(cherry picked from commit 110acd20dc)

Co-authored-by: carllin <wumu727@gmail.com>
2020-11-19 13:28:16 +00:00
e675ef85ce RPC: Demote missing block error to warning (#13684)
It frightens the tourists

(cherry picked from commit f2a1a0ac5c)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-11-19 06:57:08 +00:00
1f693c5925 audit: Ignore RUSTSEC-2020-0071, potential SEGV in time 2020-11-18 22:30:28 -07:00
fc20597cbc Update Initialized split rent-exempt value (#13646) (#13652)
(cherry picked from commit 39932d7664)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-11-17 20:34:07 +00:00
0aab403cbc Remove overflow opportunities (#13649) (#13650)
(cherry picked from commit a7bed62af0)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-11-17 20:15:43 +00:00
b0523dc236 Fix assertion failure (#13626) (#13630)
Co-authored-by: Carl Lin <carl@solana.com>
(cherry picked from commit afc1b59475)

Co-authored-by: carllin <wumu727@gmail.com>
2020-11-17 09:33:02 +00:00
02d36d0be0 Quiet notification logs when no subscriptions (#13629) (#13636)
(cherry picked from commit 3e4acba72f)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-11-17 08:22:12 +00:00
b1e8e8a966 Improve TestValidator instantiation (bp #13627) (#13634)
* Improve TestValidator instantiation (#13627)

* Add TestValidator::new_with_fees constructor, and warning for low bootstrap_validator_lamports

* Add logging to solana-tokens integration test to help catch low bootstrap_validator_lamports in the future

* Reasonable TestValidator mint_lamports

(cherry picked from commit ef99689592)

# Conflicts:
#	tokens/Cargo.toml
#	tokens/tests/commands.rs

* Fix conflicts

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2020-11-17 01:08:36 -07:00
e12cb457fb Reject faked stake/vote accounts in stake mgmt. (bp #13615) (#13620)
* Reject faked stake/vote accounts in stake mgmt. (#13615)

* Reject faked stake/vote accounts in stake mgmt.

* Use clearer name

(cherry picked from commit 2b3faa1947)

# Conflicts:
#	programs/stake/src/stake_instruction.rs

* Fix conflict

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-11-16 22:05:31 +00:00
29ac8f5164 ledger-tool cap: delegation owner and stake v2 flag (bp #13602) (#13606)
* ledger-tool cap: delegation owner and stake v2 flag (#13602)

* Output delegation owner as well

* Add --enable-stake-program-v2

* Small cleanup and add sanity assertion

* Fix typo...

(cherry picked from commit bcd303a447)

* Fix compilation error

* rustfmt

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-11-16 01:33:04 +00:00
c6818f8faf Disable the PubSub vote subscription by default (#13599)
The --rpc-pubsub-enable-vote-subscription flag may be used to enable it.
The current vote subscription is problematic because it emits a
notification for *every* vote, so hundreds a second in a real cluster.
Critically it's also missing information about *who* is voting,
rendering all those notifications practically useless.

Until these two issues can be resolved, the vote subscription is not
much more than a potential DoS vector.

(cherry picked from commit 5d72e52ad0)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-11-14 21:51:15 +00:00
965e6dfc9a Add counter metrics to rpc-subscriptions (#13596) (#13597)
(cherry picked from commit 88ae321d3f)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-11-14 20:53:38 +00:00
41eab22117 Send pubsub metrics to metrics server (#13584) (#13585)
(cherry picked from commit 34bf80ba9c)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-11-13 20:44:59 +00:00
ea2daf0cfa Bump version to 1.3.22 2020-11-13 18:03:30 +00:00
5a61827702 Fix overflow in entry tick verification (bp #13572) (#13580)
* Fix overflow in entry hash count verification

(cherry picked from commit d611337394)

* clippy

(cherry picked from commit 01a4889b53)

Co-authored-by: Justin Starry <justin@solana.com>
2020-11-13 16:33:03 +00:00
a997c723b5 Small cleaning update_epoch_stakes (#13576) (#13577)
(cherry picked from commit c97a7d1105)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-11-13 12:28:51 +00:00
8b026ba829 ip-echo-server: Don't use framed decoder, it can't be read-limited (#13570)
(cherry picked from commit 6dc735e996)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-11-13 09:16:06 +00:00
05f2b64a6a --gossip-host may now be specified with --entrypoint (#13566)
(cherry picked from commit 328f59ebef)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-11-13 07:32:16 +00:00
e341a0b0f2 fix bpf lddw check (#13554) (#13557)
(cherry picked from commit 30ef53cb13)

Co-authored-by: Jack May <jack@solana.com>
2020-11-12 22:25:33 +00:00
51a48ae507 Bound ip-echo-server reply read (bp #13543) (#13545)
* ip-echo-server: Name the header length magic number

(cherry picked from commit aab5f24518)

* ip-echo-server: Add helper to compute reply length

(cherry picked from commit 7481ba5618)

* ip-echo-server: Limit socket read to expected reply length

(cherry picked from commit d2cfeb31b9)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-11-12 07:03:28 +00:00
b34895db37 Fix slow/stuck unstaking due to toggling in epoch (bp #13501) (#13534)
* Fix slow/stuck unstaking due to toggling in epoch (#13501)

* Fix slow/stuck unstaking due to toggling in epoch

* nits

* nits

* Add stake_program_v2 feature status check to cli

Co-authored-by: Tyera Eulberg <tyera@solana.com>
(cherry picked from commit 89b474e192)

* Fix conflict

* PartialEq<Vec<T>> is not impl for &[T] on rust v1.45.1

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2020-11-12 01:20:52 +00:00
3e6398caec Validator: Periodically log what we're waiting for during --wait-for-supermajority (#13530)
(cherry picked from commit 38f15e41b5)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-11-11 21:18:00 +00:00
b9555116ec Increment version to v1.3.21 (#13528) 2020-11-11 19:30:46 +00:00
81a4769de7 Fix parsing CreateAccountWithSeed instructions (#13513) (#13516)
* Reduce required num_system_accounts and handle 2-account instructions properly

* Update CreateAccountWithSeed account docs to be correct

* Add CreateAccountWithSeed test

(cherry picked from commit 91f4e99b4c)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-11-11 01:01:38 +00:00
9ee741e021 watchtower: Fix all clear duration message (#13509)
(cherry picked from commit 2a96e722b4)

Co-authored-by: Justin Starry <justin@solana.com>
2020-11-10 19:01:13 +00:00
74c228a9d4 Bump SPL Token version fetched for localnet (bp #13490) (#13505)
* Bump token version fetched for localnet (#13490)

(cherry picked from commit 3282334741)

# Conflicts:
#	fetch-spl.sh

* Fix conflict, ie bump loader to be consistent with devnet/mainnet-beta

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2020-11-10 18:03:53 +00:00
320140fe8e Fix signature access (#13491) (#13502)
(cherry picked from commit 70c4626efe)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-11-10 18:03:10 +00:00
dfcd0c41ec Make testnet section less ambiguous (#13504) (#13507)
(cherry picked from commit 599dae8f09)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-11-10 17:12:52 +00:00
d217ea2afc Clean up Delegation::stake_activating_and_deactivating (#13471) (#13472)
(cherry picked from commit 5306eb93cc)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-11-09 09:52:34 +00:00
553209f64c Clean up Delegation::stake_and_activating (#13460) (#13468)
(cherry picked from commit 737d3e376d)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-11-08 09:31:14 +00:00
4fcf19e414 Output more inflation calc details in ledger-tool (bp #13345) (#13466)
* Output more inflation calc details in ledger-tool (#13345)

* Output more inflation calc details in ledger-tool

* Fix broken ci...

* Rename confusing variables

* Fix panic by wrapping PointValue with Opiton...

* Minor modifications

* Remove explict needless flush; Drop already does

* Yet another csv field adjustments

* Add data_size and rename epochs to earned_epochs

* Introduce null_tracer

* Unwrap Option in new_from_parent_with_tracer

* Don't shorten identifiers

* Allow irrefutable_let_patterns temporalily

* More null_tracer

* More field adjustments

(cherry picked from commit a81e7e7749)

# Conflicts:
#	runtime/src/bank.rs

* Fix conflict

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-11-08 09:03:09 +00:00
e57e632870 Add builtin mem tests (bp #13429) (#13436)
* Add builtin mem tests (#13429)


(cherry picked from commit 84b139cc94)

* resolve crate version

* port to v1.3 conventions

* nudge

Co-authored-by: Jack May <jack@solana.com>
2020-11-07 01:23:14 +00:00
20678cf5ef Fix stake split rent-exempt adjustment (#13357) (#13452)
* Add failing tests

* Fix stake split

* Calculate split rent-exempt-reserve and use

* Add comment in rent.rs

* Add tests for edge cases when splitting to larger accounts, and reject overflow splits

* Reframe InsufficientFunds checks in terms of lamports var

* Test hardening review comments

(cherry picked from commit 4c5f345798)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-11-06 21:55:08 +00:00
0a05bbca2f Fix stake redelegate (bp #13358) (#13449)
* stake: Add redelegation failing test

(cherry picked from commit 491ad59d2e)

* stake: Consider withdraws we redelegating

(cherry picked from commit fe1e08b9ad)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-11-06 20:27:06 +00:00
ab7dff16a2 Feature-gate stake program (bp #13394) (#13438)
* Feature-gate stake program (#13394)

* Add legacy stake-program handling

* Strip out duplicative legacy code

* Add feature for stake-program-fix

* Feature-deploy new stake program

* Expand comment

(cherry picked from commit 1b1d9f6b0c)

# Conflicts:
#	runtime/src/builtins.rs
#	runtime/src/feature_set.rs

* Fix conflicts

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2020-11-06 14:00:37 +00:00
5e16e80993 CI: Check monorepo for consistent crate versions (bp #13431) (#13432)
* increment-cargo-version.sh: Add check subcommand

(cherry picked from commit 5d4015358a)

* CI: Check monorepo for consistent crate versions

(cherry picked from commit 7a4e293b3b)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-11-06 02:27:51 +00:00
a388b5a356 docs: Clarify the commitment levels based on questions (#13387) (#13425)
* Clarify the commitment levels based on questions

Many people have asked about what commitment levels mean, and which to
choose.  This update includes some of the language at
`sdk/src/commitment_config.rs` and a recommendation for different use
cases.

Additionally, the preflight commitment documentation was out of date,
specifying that "max" was always used, and this is no longer the case.

* Update docs/src/developing/clients/jsonrpc-api.md

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

* Update docs/src/developing/clients/jsonrpc-api.md

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

* Update docs/src/developing/clients/jsonrpc-api.md

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

* Fix typo

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
(cherry picked from commit ede891a6c6)

Co-authored-by: Jon Cinque <jon.cinque@gmail.com>
2020-11-05 19:38:59 +00:00
ef776c0a0e Allow feature builtins to overwrite existing builtins (bp #13403) (#13419)
* Allow feature builtins to overwrite existing builtins (#13403)

* Allow feature builtins to overwrite existing builtins

* Add feature_builtin ActivationType

* Correctly retain idempotent for replacing case

* Fix test

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
(cherry picked from commit bc62313c66)

# Conflicts:
#	ledger/src/builtins.rs
#	runtime/src/bank.rs

* Fix conflicts

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2020-11-05 17:53:03 +00:00
a899d75d2d Fix duplicate records of inner instructions (#13380) (#13412)
* Fix duplicate records of inner instructions

* fix tests

* fix clippy

* Remove bad_inner_instructions

(cherry picked from commit c24fbb6f8b)

Co-authored-by: Justin Starry <justin@solana.com>
2020-11-05 08:44:16 +00:00
27733bb4d7 Bump low end validator RAM requirement (#13406) 2020-11-05 15:24:26 +08:00
0b9c87b6ec Docs: Clarify validator disk requirements 2020-11-05 15:24:26 +08:00
df1e62f23f Update docs to latest processors (#11613) 2020-11-05 15:24:26 +08:00
f697a86d1e Comment Stakes::clone_with_epoch (#13388) (#13389)
(cherry picked from commit b0d1ae1d8b)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-11-04 13:49:48 +00:00
64c76c2f4b Revert "check sysvar id for AccountInfo (#13175) (#13351)"
This reverts commit 290d514051.
2020-11-02 12:14:33 -08:00
290d514051 check sysvar id for AccountInfo (#13175) (#13351)
(cherry picked from commit 322c667655)

# Conflicts:
#	sdk/program/src/sysvar/mod.rs

Co-authored-by: Jack May <jack@solana.com>
2020-11-02 18:18:07 +00:00
958c43f337 Small code cleanup and typo fixes (#13325) (#13340)
* Small code cleanup and typo fixes

* Clean up calculate_points_and_credits

(cherry picked from commit 0e4509c497)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-11-02 02:38:43 +00:00
4513128c75 check sysvar id for AccountInfo (#13175) 2020-11-01 06:58:02 +00:00
d01968ed21 cargo update -p futures-task / cargo update -p futures-util 2020-10-31 19:00:22 +00:00
f9ac24d1f2 Switch to dirs-next 2020-10-31 19:00:22 +00:00
fe5a09b50a Ignore stdweb 2020-10-31 19:00:22 +00:00
f59c70a836 Print the entry type as well when checking archive (#13312) (#13313)
(cherry picked from commit bc7133d752)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-30 17:46:02 +00:00
74bfb00959 Update AccountInfo comments (#13302)
(cherry picked from commit 72d41e5801)
2020-10-30 08:09:42 -07:00
4296bfc728 clarify comment (#13289) (#13291)
(cherry picked from commit b5c8b86e7c)

Co-authored-by: Jack May <jack@solana.com>
2020-10-29 22:28:09 +00:00
ccec111178 more portable install.sh (bp #13114) (#13219)
* more portable install.sh (#13114)

(cherry picked from commit 4e0d1b1d4a)

# Conflicts:
#	sdk/bpf/scripts/install.sh

* resolve conflicts

Co-authored-by: Jack May <jack@solana.com>
2020-10-29 21:10:14 +00:00
3a8c6f33a3 adds more parallel processing to gossip packets handling (#12988) (#13287)
(cherry picked from commit 3738611f5c)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-10-29 18:05:52 +00:00
06067dd823 implements ping-pong packets between nodes (bp #12794) (#13281)
* implements ping-pong packets between nodes (#12794)

https://hackerone.com/reports/991106

> It’s possible to use UDP gossip protocol to amplify DDoS attacks. An attacker
> can spoof IP address in UDP packet when sending PullRequest to the node.
> There's no any validation if provided source IP address is not spoofed and
> the node can send much larger PullResponse to victim's IP. As I checked,
> PullRequest is about 290 bytes, while PullResponse is about 10 kB. It means
> that amplification is about 34x. This way an attacker can easily perform DDoS
> attack both on Solana node and third-party server.
>
> To prevent it, need for example to implement ping-pong mechanism similar as
> in Ethereum: Before accepting requests from remote client needs to validate
> his IP. Local node sends Ping packet to the remote node and it needs to reply
> with Pong packet that contains hash of matching Ping packet. Content of Ping
> packet is unpredictable. If hash from Pong packet matches, local node can
> remember IP where Ping packet was sent as correct and allow further
> communication.
>
> More info:
> https://github.com/ethereum/devp2p/blob/master/discv4.md#endpoint-proof
> https://github.com/ethereum/devp2p/blob/master/discv4.md#wire-protocol

The commit adds a PingCache, which maintains records of remote nodes
which have returned a valid response to a ping message, and on-the-fly
ping messages pending a pong response from the remote node.

When handling pull-requests, those from addresses which have not passed
the ping-pong check are filtered out, and additionally ping packets are
added for addresses which need to be (re)verified.

(cherry picked from commit ae91270961)

# Conflicts:
#	Cargo.lock
#	core/src/cluster_info.rs

* resolves mergify merge conflicts

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-10-29 16:33:29 +00:00
9f58a0383c Disable eager rent collection for less noise (#13275) (#13279)
(cherry picked from commit 363c148dbe)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-29 15:38:56 +00:00
428cacff88 scans crds table in parallel for finding old labels (bp #13073) (#13277)
* scans crds table in parallel for finding old labels (#13073)

From runtime profiles, the majority time of ClusterInfo::handle_purge
https://github.com/solana-labs/solana/blob/0776fa05c/core/src/cluster_info.rs#L1605-L1626
is spent scanning crds table finding old labels:
https://github.com/solana-labs/solana/blob/0776fa05c/core/src/crds.rs#L175-L197

This can be done in parallel given that gossip thread-pool:
https://github.com/solana-labs/solana/blob/0776fa05c/core/src/cluster_info.rs#L1637-L1641
is idle when handle_purge is invoked:
https://github.com/solana-labs/solana/blob/0776fa05c/core/src/cluster_info.rs#L1681

(cherry picked from commit 37c8842bcb)

# Conflicts:
#	core/tests/crds_gossip.rs

* resolves mergify merge conflict

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-10-29 15:33:59 +00:00
6a4f89b193 excludes origin from prune set (#13204) (#13278)
On the receiving end, prune messages are ignored if the origin points to
the node itself:
https://github.com/solana-labs/solana/blob/631f029fe/core/src/crds_gossip_push.rs#L285-L295
So to avoid sending these over the wire, the requester can exclude
origin from the prune set.

(cherry picked from commit be80f6d5c5)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-10-29 15:22:47 +00:00
65ad72cd64 improves threads' utilization in processing gossip packets (#12962) (#13251)
ClusterInfo::process_packets handles incoming packets in a thread_pool:
https://github.com/solana-labs/solana/blob/87311cce7/core/src/cluster_info.rs#L2118-L2134

However, profiling runtime shows that threads are not well utilized and
a lot of the processing is done sequentially.

This commit redistributes the work done in parallel. Testing on a gce
cluster shows 20%+ improvement in processing gossip packets with much
smaller variations.

(cherry picked from commit 75d62ca095)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-10-29 13:59:12 +00:00
0ddf684376 marks pull request creation time only once per peer (#13113) (#13252)
mark_pull_request_creation time requires an exclusive lock on gossip:
https://github.com/solana-labs/solana/blob/16944e218/core/src/cluster_info.rs#L1547-L1548
Current code is redundantly marking each peer once for each request.
There are at most only 2 unique peers, whereas there are hundreds of
requests per each. So the lock is acquired hundreds of time longer than
necessary.

(cherry picked from commit 4bfda3e766)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-10-29 13:54:39 +00:00
1376c325b6 Improve final report of ledger-tool capitalization (#13232) (#13235)
(cherry picked from commit 4698ee5e4a)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-28 18:28:23 +00:00
e29a49f107 Use pico inflation for ledger-tool capitalization --enable-inflation (#13215) (#13221)
* Use pico inflation for ledger-tool capitalization --enable-inflation

* rust fmt

(cherry picked from commit 7d2962135d)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-28 20:54:56 +09:00
4e7c096177 Use zstd for create-snapshot (#13214) (#13217)
(cherry picked from commit 6d4c69b7c3)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-28 06:37:28 +00:00
b18fa8deac Fix log (#13207) (#13210)
Co-authored-by: Carl Lin <carl@solana.com>
(cherry picked from commit f96ab5a818)

Co-authored-by: carllin <wumu727@gmail.com>
2020-10-28 03:11:21 +00:00
bba4f3006f Parse vote instructions (#13202) (#13208)
(cherry picked from commit c4962af9eb)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-28 01:53:13 +00:00
802a2cc985 passes through feature-set to gossip requests handling (#12878) (#13205)
* passes through feature-set to down to gossip requests handling
* takes the feature-set from root_bank instead of working_bank

(cherry picked from commit 48283161c3)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-10-27 23:46:12 +00:00
ad5ef9cc48 Account for forward delay in transaction simulation (#13199) (#13200)
(cherry picked from commit 631f029fe9)

Co-authored-by: Justin Starry <justin@solana.com>
2020-10-27 18:34:41 +00:00
cf1c7c8c00 macos portable rust-bpf (#13176) (#13186)
(cherry picked from commit fc83a666fc)

Co-authored-by: Jack May <jack@solana.com>
2020-10-27 04:45:41 +00:00
a28fb586b0 Add SSH key for buildkite-agent on achille (#13182)
(cherry picked from commit ff4b34202c)

# Conflicts:
#	net/scripts/solana-user-authorized_keys.sh

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-27 04:25:53 +00:00
5737ea448e CLI: Surface deploy transaction errors (#13169)
(cherry picked from commit a82971879f)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-26 22:26:02 +00:00
e54a537015 update call depth docs (#13155) (#13161)
(cherry picked from commit 35f77ccc73)

Co-authored-by: Jack May <jack@solana.com>
2020-10-26 19:45:02 +00:00
39eae50024 implements DataBudget using atomics (#12856) (#13157)
(cherry picked from commit 05cf15a382)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-10-26 18:27:45 +00:00
736e5c3ec7 Fix BigTable reward type encoding (bp #13142) (#13146)
* Fix reward type encoding

(cherry picked from commit 0a89bb4d3c)

* Don't reuse BPF target build artifacts

(cherry picked from commit 41a56e14fc)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-26 08:18:05 +00:00
f8e310d99d Hide noisy specialization warnings for frozen abi (#13141) (#13143)
(cherry picked from commit 5caf81dbf8)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-26 05:11:12 +00:00
fd7bcfdb4d add precompile verification to simulate_transaction (#13080) (#13125)
(cherry picked from commit 766406fd23)

Co-authored-by: Josh <josh.hundley@gmail.com>
2020-10-24 04:57:12 +00:00
e553c8bb45 Clean up opt conf verifier and vote state tracker (#13081) (#13123)
* Clean up opt conf verifier and vote state tracker

* Update test to follow new message and some knob

* Rename

(cherry picked from commit 0264147d42)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-24 02:39:33 +00:00
1c5f8f51ee Shorten magic install URL (#13121)
(cherry picked from commit b5170b993e)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-23 23:20:03 +00:00
5084871e27 Cli: deploy programs via TPU (#13090) (#13110)
* Deploy: send write transactions to leader tpu

* Less apparent stalling during confirmation

* Add EpochInfo mock

* Only get cluster nodes once

* Send deploy writes to next leader

(cherry picked from commit 16944e218f)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-23 17:14:37 +00:00
184a56aae7 shrink debug (#13089) (#13108)
(cherry picked from commit 7d2729f6bd)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-10-23 16:14:09 +00:00
7d66cba7f4 Remove spammy invalid rpc log (#13100) (#13101)
(cherry picked from commit c95f6c4b83)

Co-authored-by: Justin Starry <justin@solana.com>
2020-10-23 08:21:33 +00:00
8f615278d2 Add deploy err if program-account balance is too high (#13091) (#13097)
* Add deploy err if program-account balance is too high

* Review comments

* Add system-program check

* Rename and unhide flag

(cherry picked from commit 4669fa0f98)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-23 05:50:21 +00:00
884d68ddcf Add Pubkey::new_unique()/Hash::new_unique() 2020-10-23 03:19:38 +00:00
b095a52027 Add programming-faq to sidebar (#12586) (#13087)
* Add programming-faq to sidebar

* nudge

* fix path

(cherry picked from commit 22d16c69b7)

Co-authored-by: Jack May <jack@solana.com>
2020-10-22 12:34:22 +00:00
1f5861e107 Add programming faq (#12545) (#13086)
* Add programming faq

* feedback and new content

* nudge

(cherry picked from commit b51c0f3095)

Co-authored-by: Jack May <jack@solana.com>
2020-10-22 12:33:29 +00:00
b90b46fee1 Allow nodes to advertise a different rpc address over gossip (#13053) (#13077)
* Allow nodes to advertise a different rpc address over gossip

* Feedback

(cherry picked from commit 8b0242a5d8)

Co-authored-by: Justin Starry <justin@solana.com>
2020-10-22 04:44:41 +00:00
bff820d549 Remove unused pubkey::Pubkey imports 2020-10-21 20:27:57 -07:00
5ad0ccdfe1 cargo fmt 2020-10-21 20:27:57 -07:00
d8c7d06737 Run codemod --extensions rs Hash::new_rand solana_sdk:#️⃣:new_rand 2020-10-21 20:27:57 -07:00
422bb3c526 Run codemod --extensions rs Pubkey::new_rand solana_sdk::pubkey::new_rand 2020-10-21 20:27:57 -07:00
048a2b982c Add pubkey_new_rand(), mark Pubkey::new_rand() deprecated 2020-10-21 20:27:57 -07:00
76f0557462 Add hash_new_rand(), mark Hash::new_rand() as deprecated 2020-10-21 20:27:57 -07:00
6ebb933302 Bump version to 1.3.20 2020-10-22 00:08:28 +00:00
15a49d7508 RPC: Don't send base64 TXs to old clusters (#13062)
Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-21 21:07:08 +00:00
a86a781fd5 CLI: Print address ephemeral keypair seed phrase to stderr on deploy failure (bp #13046) (#13054)
* CLI: Print address ephemeral keypair seed phrase to stderr on deploy failure

(cherry picked from commit 2905ccc7ec)

# Conflicts:
#	cli/Cargo.toml
#	cli/src/cli.rs

* Fix conflicts

Co-authored-by: Trent Nelson <trent@solana.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2020-10-21 21:03:00 +00:00
57d8d0ab6e Add ledger-tool dead-slots and improve purge a lot (#13070) 2020-10-21 18:38:37 +00:00
63fe350900 Port various rent fixes to runtime feature (bp #12842) (#13067)
* Port various rent fixes to runtime feature (#12842)

* Port various rent fixes to runtime feature

* Fix CI

* Use more consistent naming...

(cherry picked from commit 608b81b412)

# Conflicts:
#	runtime/src/bank.rs

* Fix conflict

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-21 14:47:59 +00:00
b384ce9e03 Skip 'Stake by Feature Set' output when showing status of a single feature (#13051)
(cherry picked from commit ad65d4785e)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-21 07:01:20 +00:00
d81d4ad8a1 Improve vote-account "Recent Timestamp" output (#12970)
(cherry picked from commit 2cc3d7511a)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-21 05:09:58 +00:00
9ed1d2337b Parse stake and system instructions (bp #13035) (#13044)
* Parse stake and system instructions (#13035)

* Fix token account check

* Add helper to check num accounts

* Add parse_stake

* Add parse_system

* Fix AuthorizeNonce docs

* Remove jsonParsed unstable markers

* Clippy

(cherry picked from commit 46d0019955)

* Fix for older clippy

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2020-10-21 04:20:02 +00:00
14aaae3485 Support Debug Bank (#13017) (#13042)
(cherry picked from commit c0675968b1)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-21 02:50:12 +00:00
caac786a38 validator: Activate RPC before halting on slot (#13001)
(cherry picked from commit 3b3f7341fa)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-20 23:50:11 +00:00
218a76ed1b Force unset CARGO to use correct version of cargo (#13027) (#13033)
(cherry picked from commit 81d0c8ae7f)

Co-authored-by: Jon Cinque <jon.cinque@gmail.com>
2020-10-20 22:52:06 +00:00
964f05afa8 Fix secp256k1 instruction indexing and add tests (#13026) (#13031)
(cherry picked from commit 83c53ae4b5)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-10-20 22:24:20 +00:00
a0e2f49263 Remove unsupported metrics tarball from release artifacts
(cherry picked from commit 62f20bc170)
2020-10-20 13:14:46 -07:00
25078d46ba filters out inactive nodes from push options (#12674) (#13022)
* filters out inactive nodes from push options

https://github.com/solana-labs/solana/pull/12620
patched the DDOS issue with nodes which go offline:
https://github.com/solana-labs/solana/issues/12409

However, offline nodes still see (much lesser) traffic spike, likely
because no origins are pruned from their bloom filter in active set:
https://github.com/solana-labs/solana/blob/aaf3790d8/core/src/crds_gossip_push.rs#L276-L286
and so multiple nodes push redundant duplicate messages to them
simultaneously:
https://github.com/solana-labs/solana/blob/aaf3790d8/core/src/crds_gossip_push.rs#L254-L255

This commit will filter out inactive peers from potential push targets
entirely. To mitigate eclipse attacks, staked nodes are retried
periodically.

* uses current timestamp in test/crds_gossip

(cherry picked from commit a5c6a78f6d)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-10-20 19:44:52 +00:00
840ea326db Remove errant print 2020-10-20 08:58:54 -06:00
b9fc31ec95 Parse bpf loader instructions (#12998) (#13004)
* Add parsing for BpfLoader2 instructions

* Skip info if null

* Return account address in info map

(cherry picked from commit 942e4273ba)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-20 06:21:47 +00:00
9481ee79a8 Ignore more paths in increment-cargo-version.sh (#12996)
(cherry picked from commit c1c69ecc34)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-19 23:57:11 +00:00
dddd0b76f1 removes invalid/outdated pending push messages early (#12555) (#12992)
In CrdsGossipPush::new_push_messages:
https://github.com/solana-labs/solana/blob/972619edb/core/src/crds_gossip_push.rs#L211-L228
we already have paid the cost of looking-up the label in crds table and
checking the hash value and wallclock only to find out that in some
cases the value is invalid or is outdated. So might as well remove the
value here rather than wait for the next call to
purge_old_pending_push_messages:
https://github.com/solana-labs/solana/blob/972619edb/core/src/crds_gossip_push.rs#L372

(cherry picked from commit b5faa11f73)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-10-19 22:02:18 +00:00
7674a5fea8 Bump version to v1.3.19 (#12986) 2020-10-19 19:13:28 +00:00
78be777b65 Revert "CLI: Put deploy ephemeral keypair behind a flag (#12941)" (#12981)
This reverts commit c2806aa2f9.
2020-10-19 17:31:33 +00:00
9b5c10a6aa Mention monitoring and updating for exchanges (#12953) (#12960)
* Mention monitoring and updating for exchanges

* Fix link syntax...

* Apply suggestions from code review

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>

* Apply suggestions from code review

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

* More review comments and word-wrapping

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>
(cherry picked from commit 87311cce7f)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-17 09:50:51 +00:00
8714c14549 keygen: add more mnemonic language support (#12944) (#12956)
(cherry picked from commit 4451042c76)

Co-authored-by: guanqun <guanqun.lu@gmail.com>
2020-10-17 04:01:22 +00:00
1982a7a8e3 Check payer balance for program account rent as needed (#12952) (#12954)
(cherry picked from commit b6bfed64cb)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-16 19:11:13 +00:00
c2806aa2f9 CLI: Put deploy ephemeral keypair behind a flag (#12941)
(cherry picked from commit 5a5b7f39c1)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-16 06:09:40 +00:00
e6521ef44c Report compute budget usage (#12931) (#12933)
(cherry picked from commit b510474dcb)

Co-authored-by: Jack May <jack@solana.com>
2020-10-16 00:19:18 +00:00
03ab8fac29 Update get-block method in get_confirmed_transaction (#12923) (#12929)
* Update get-block method in get_confirmed_transaction

* Remove superfluous into()

(cherry picked from commit 42943ab86d)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-15 21:33:38 +00:00
644304d171 Support arbitrary toolchains with cargo wrapper script (#12925)
(cherry picked from commit 99aecdaf65)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-15 20:15:29 +00:00
782ed192b6 program log pubkey as base58 (bp #12901) (#12910)
* program log pubkey as base58 (#12901)

(cherry picked from commit 3f9e6a600b)

# Conflicts:
#	programs/bpf/benches/bpf_loader.rs
#	programs/bpf/c/src/tuner/tuner.c
#	programs/bpf_loader/src/syscalls.rs
#	runtime/src/process_instruction.rs

* fix conflicts

Co-authored-by: Jack May <jack@solana.com>
2020-10-15 19:21:27 +00:00
0da2f73eb4 Release: Use pinned cargo version to install spl-token-cli (#12915)
(cherry picked from commit bb2f0df9e1)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-15 17:55:15 +00:00
a3fdfea674 RPC: Add metrics for TX encoding (#12879)
(cherry picked from commit c26512255d)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-15 15:41:13 +00:00
43121a56eb Surface 'Program account allocation failed' error details (#12902)
(cherry picked from commit eec3d25ab9)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-15 10:25:26 +00:00
2c0b4f3b4f Respect RefCell when calling invoke (#12858) (#12890)
* Respect RefCell when calling invoke

* nudge

(cherry picked from commit 969f7b015b)

Co-authored-by: Jack May <jack@solana.com>
2020-10-15 02:24:51 +00:00
db30316bb3 Release: Include SPL Token in release tarballs (#12888)
(cherry picked from commit f70762913c)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-15 01:32:29 +00:00
73e4d9d623 Don't report RewardType::Fee when none was awarded (#12876)
(cherry picked from commit 4b04ed86b6)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-14 17:36:17 +00:00
46b864dcb0 Expose program error constants (#12861) (#12870)
(cherry picked from commit d4e953277e)

Co-authored-by: Jack May <jack@solana.com>
2020-10-14 08:48:45 +00:00
4f2e60fea4 Add nop feature set for upcoming ported rent fixes (bp #12841) (#12846)
* Add nop feature set for upcoming ported rent fixes (#12841)

(cherry picked from commit 7de7efe96c)

# Conflicts:
#	runtime/src/feature_set.rs

* Update feature_set.rs

* Add missing comma...

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-14 04:47:07 +00:00
7ce9beacb6 Add log_messages to proto file (#12859) (#12862)
(cherry picked from commit 67ed44c007)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-14 01:37:42 +00:00
9dab79274b terminology update, nonce to bump seed (bp #12840) (#12850)
* terminology update, nonce to bump seed (#12840)

(cherry picked from commit 56211378d3)

# Conflicts:
#	sdk/src/pubkey.rs

* fix conflicts

Co-authored-by: Jack May <jack@solana.com>
2020-10-13 19:24:30 +00:00
df8b07ddbd solana vote-account/solana stake-account now works with RPC servers without --enable-rpc-transaction-history (bp #12826) (#12848)
* Implementation-defined RPC server errors are now accessible to client/ users

(cherry picked from commit 247228ee61)

* Cleanly handle RPC servers that don't have --enable-rpc-transaction-history enabled

(cherry picked from commit 14d793b22c)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-13 18:14:45 +00:00
762423c9a9 Add transaction log messages to |solana confirm -v| output (#12835)
(cherry picked from commit e9dbbdeb81)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-13 07:10:03 +00:00
689aa4d44f Check ELF file for errors before deploy (bp #12741) (#12800)
* Check ELF file for errors before deploy (#12741)

* Check ELF file for errors before deploy

* Update cli/src/cli.rs

Co-authored-by: Michael Vines <mvines@gmail.com>

* Fix formatting

* Bump solana_rbpf

Co-authored-by: Michael Vines <mvines@gmail.com>
(cherry picked from commit 6bbf6a79b7)

# Conflicts:
#	Cargo.lock
#	cli/Cargo.toml
#	cli/src/cli.rs

* rebase

Co-authored-by: Alexandre Esteves <2335822+alexfmpe@users.noreply.github.com>
Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-13 05:12:33 +00:00
cff1496e94 Add docs on vote account key rotation (bp #12815) (#12830)
* Add docs on vote account key rotation

(cherry picked from commit 253114ca20)

* Update docs/src/running-validator/vote-accounts.md

Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>
(cherry picked from commit d83027c0cd)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-13 04:21:16 +00:00
d1c51950f3 Bump version to v1.3.18 2020-10-13 03:57:13 +00:00
774a12e7b9 CI: Fix crate publication (#12824)
(cherry picked from commit c38021502e)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-12 23:19:31 +00:00
85e8926d81 fix native_loader behavior for invalid accounts (#12814) (#12818)
(cherry picked from commit c24da1ee16)

Co-authored-by: Jack May <jack@solana.com>
2020-10-12 22:09:55 +00:00
a2be9c647f Use latest stable channel release if there's no beta release (#12822)
(cherry picked from commit 65213a1782)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-12 22:03:05 +00:00
744006fe78 RpcClient: Encode TXs as base64 by default (#12816)
(cherry picked from commit efbe37ba20)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-12 21:56:13 +00:00
edf59cccd8 Fix fee mismatch on snapshot deserialize (#12697) (#12753)
Co-authored-by: Carl Lin <carl@solana.com>
(cherry picked from commit c879e7c1ad)

Co-authored-by: carllin <wumu727@gmail.com>
2020-10-12 11:34:13 +00:00
9c72bf871f Move no-0-rent rent dist. behavior under feature (#12804) (#12810)
(cherry picked from commit 2f5bb7e507)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-12 10:42:01 +00:00
033c87e3f1 simulate_transaction_with_config() now passes full config to server (#12802)
(cherry picked from commit b3c2752bb0)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-12 06:13:55 +00:00
d35d853e57 Cargo.lock 2020-10-12 04:21:26 +00:00
03317f6bf7 Switch to tempfile 2020-10-12 04:21:26 +00:00
4f0e928a0a Rework cargo audit ignores 2020-10-12 04:21:26 +00:00
8600d5188d Don't bother paying 0 rent (#12792)
(cherry picked from commit 1fc7c1ecee)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-10 18:12:18 +00:00
8cd7716ee3 Update docs to show support for Nano X (bp #12647) (#12654)
* Update docs to show support for Nano X (#12647)

(cherry picked from commit 0ef3cac1f8)

# Conflicts:
#	docs/src/wallet-guide/ledger-live.md

* Fix merge conflict

Co-authored-by: Dan Albert <dan@solana.com>
Co-authored-by: publish-docs.sh <maintainers@solana.com>
2020-10-10 16:06:41 +00:00
b13385fba6 Update gossip entrypoints 2020-10-10 08:39:54 -07:00
9f82208c29 Store program logs in blockstore / bigtable (TransactionWithStatusMeta) (bp #12678) (#12734)
* Store program logs in blockstore / bigtable (TransactionWithStatusMeta) (#12678)

* introduce store program logs in blockstore / bigtable

* fix test, transaction logs created for successful transactions

* fix test for legacy bincode implementation around log_messages

* only api nodes should record logs

* truncate transaction logs to 100KB

* refactor log truncate for improved coverage

(cherry picked from commit 8f5431551e)

# Conflicts:
#	runtime/src/bank.rs

* Resolve merge conflicts in bank.rs

* rerun cargo fmt

Co-authored-by: Josh <josh.hundley@gmail.com>
2020-10-10 08:55:41 +00:00
596ede864b document program address collisions (bp #12774) (#12781)
* document program address collisions (#12774)

(cherry picked from commit 9ac8db3533)

# Conflicts:
#	sdk/src/pubkey.rs

* Update pubkey.rs

* Update pubkey.rs

Co-authored-by: Jack May <jack@solana.com>
Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-10 06:50:58 +00:00
e4bd382472 Expose all rewards (fees, rent, voting and staking) in RPC getConfirmedBlock and the cli (bp #12768) (#12789)
* Expose all rewards (fees, rent, voting and staking) in RPC getConfirmedBlock and the cli

(cherry picked from commit c5c8da1ac0)

# Conflicts:
#	Cargo.lock
#	runtime/src/bank.rs
#	transaction-status/Cargo.toml

* fix: surface full block rewards type

(cherry picked from commit 1b16790325)

* resolve conflicts

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-10 06:25:10 +00:00
a70aa28832 Bump version to 1.3.17 2020-10-10 03:22:14 +00:00
9a63cf51b5 Fix typo (#12780) (#12783)
(cherry picked from commit 5800217998)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-10 02:18:18 +00:00
b31ec0579c Local program allocator (#12679) (#12767)
(cherry picked from commit 630eb3b907)

Co-authored-by: Jack May <jack@solana.com>
2020-10-10 01:44:18 +00:00
ad31768dd9 Add adjustable stack size and call depth (bp #12728) (#12769)
* Add adjustable stack size and call depth (#12728)

(cherry picked from commit c3907be623)

# Conflicts:
#	programs/bpf/Cargo.lock
#	programs/bpf/Cargo.toml
#	programs/bpf/build.rs
#	programs/bpf_loader/Cargo.toml
#	programs/bpf_loader/src/lib.rs
#	runtime/src/feature_set.rs
#	runtime/src/process_instruction.rs

* resolve conflicts

Co-authored-by: Jack May <jack@solana.com>
2020-10-09 23:27:59 +00:00
079ea91d6f Add convenience script for working in stability branches (#12765) (#12772)
* Add convenience script for working in stability branches

* Update scripts/curgo.sh

Co-authored-by: Michael Vines <mvines@gmail.com>

* re{locate,name} to /cargo

Co-authored-by: Michael Vines <mvines@gmail.com>
(cherry picked from commit ed95071c27)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-09 22:29:47 +00:00
48d08f2010 Bump max invoke depth to 4 (bp #12742) (#12763)
* Bump max invoke depth to 4 (#12742)

(cherry picked from commit 2cd7cd3149)

# Conflicts:
#	programs/bpf/rust/invoked/src/processor.rs
#	runtime/src/feature_set.rs
#	runtime/src/message_processor.rs
#	runtime/src/process_instruction.rs

* fix conflicts

* resolve conflicts

Co-authored-by: Jack May <jack@solana.com>
2020-10-09 21:12:35 +00:00
b34ba0fc4e Remove skip-no-mangle entirely (bp #12696) (#12708)
* Remove skip-no-mangle entirely (#12696)

(cherry picked from commit 41ad3dd8f0)

# Conflicts:
#	account-decoder/Cargo.toml
#	core/Cargo.toml
#	transaction-status/Cargo.toml

* resolve conflicts

Co-authored-by: Jack May <jack@solana.com>
2020-10-09 20:44:50 +00:00
41b99b96c0 Ryos compat fixes 1.3 (#12762)
* Fix various ledger-tool error due to no builtins

* Add missing file...

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-09 11:49:15 -06:00
fea6e4d39d Only fetch snapshot if it's newer than local (#12663) (#12751)
* Only fetch snapshot if it's newer than local

* Prefer as_ref over clone

* More nits

* Don't wait forwever for newer snapshot

(cherry picked from commit 81489ccb76)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-09 07:23:06 +00:00
fced68c3f4 Add inflation_kill_switch feature (#12748)
(cherry picked from commit c8807d227a)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-09 05:39:17 +00:00
7f006d810c Add new internal accounts (#12740) (#12746)
Co-authored-by: publish-docs.sh <maintainers@solana.com>
(cherry picked from commit 2c5f83c264)

Co-authored-by: Dan Albert <dan@solana.com>
2020-10-09 02:18:27 +00:00
cab1b102e6 Minor variable name cleanup (#12744)
(cherry picked from commit 3a04026599)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-09 02:03:03 +00:00
448887a469 Fixup wallet docs, manual cherrypick for backport (#12738)
Co-authored-by: publish-docs.sh <maintainers@solana.com>
2020-10-08 21:51:24 +00:00
bded162ed8 fix conflicts (#12733)
Co-authored-by: Jack May <jack@solana.com>
2020-10-08 20:40:59 +00:00
765dd1b775 Nit, short name (bp #12195) (#12732)
* Nit, short name (#12195)

(cherry picked from commit daba17a95c)

# Conflicts:
#	runtime/src/bank.rs

* fix conflicts

Co-authored-by: Jack May <jack@solana.com>
2020-10-08 20:03:15 +00:00
b74fbdf7eb Pipe FeatureSet though InvokeContext (bp #12536) (#12730)
* Pipe FeatureSet though InvokeContext (#12536)

* Pipe FeatureSet though InvokeContext

* gate program size cap

* nit

(cherry picked from commit 74fcb184b2)

# Conflicts:
#	runtime/src/bank.rs
#	runtime/src/feature_set.rs

* fix conflicts

Co-authored-by: Jack May <jack@solana.com>
2020-10-08 18:18:51 +00:00
fb0f0adb16 Display vote/stake account epoch rewards 2020-10-08 08:34:40 -07:00
2a3c4d87ce Support multiple connected HW wallets configured with the same seed phrase (bp #12716) (#12719)
* remote-wallet: Select hardware wallets based on host device path

(cherry picked from commit 8e3353d9ef)

* remote-wallet: Append wallet "name" to entries in selector UI

(cherry picked from commit f1a2ad1b7d)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-08 05:46:24 +00:00
eaa6e04e83 Revert "Restore --expected-shred-version argument for mainnet-beta" (#12722)
This reverts commit 9410eab2af.

(cherry picked from commit dadc84fa8c)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-08 04:48:40 +00:00
925973ee60 RPC: Transaction deser can be quite slow (bp #12683) (#12702)
* RPC: Check encoded transaction size before decoding

(cherry picked from commit 7f67d36777)

* RPC: Support base64 encoded transactions

Defaults to base58

(cherry picked from commit e35889542b)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-07 05:50:23 +00:00
2c55319e41 Add env variable for rayon thread counts (#12693) (#12698)
(cherry picked from commit 37222683ee)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-10-07 01:26:18 +00:00
f22a5efde5 Faucet request limiter can overflow (bp #12691) (#12694)
* faucet: Add failing test case

(cherry picked from commit 5ae704d560)

* faucet: Use checked math in request limiter

(cherry picked from commit 87de82ac94)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-06 20:02:14 +00:00
add5c5b29f Add systemd and log rotation section to validator start docs (#12675) (#12676)
* Add systemd and log rotation section to validator start docs

* Update docs/src/running-validator/validator-start.md

Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>

* Update docs/src/running-validator/validator-start.md

Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>

Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>
(cherry picked from commit fbb5e5c4e6)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-05 19:34:44 +00:00
1b53f8b33d Update channel_restriction.sh 2020-10-04 10:28:12 -06:00
34591d2f2f Show commit in --version and ledger-tool's log (#12636) (#12662)
* Show commit in `--version` and ledger-tool's log

* Another handy hidden env var

* Fix test

* Rename to semver!

* Fix syntax error...

(cherry picked from commit 026e7de819)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-03 16:45:27 +00:00
bc1b95d6d0 Bump version to v1.3.16 2020-10-03 04:24:03 +00:00
90d586a4f8 solana stakes now employs server-side filtering if only one vote account is provided (#12657)
(cherry picked from commit 9abaf6ec1d)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-03 02:00:49 +00:00
efcb5cd9f0 Fix zero-len slice translations (#12642) (#12656)
(cherry picked from commit d0aa8a6446)

Co-authored-by: Jack May <jack@solana.com>
2020-10-03 01:58:27 +00:00
ffa0ee69ca Weight push peers by how long we haven't pushed to them (#12620) (#12651)
(cherry picked from commit 71c469c72b)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-10-02 22:27:35 +00:00
a983430ddb Check CPI program is executable (#12644) (#12649)
(cherry picked from commit adeb06e550)

Co-authored-by: Jack May <jack@solana.com>
2020-10-02 22:27:29 +00:00
bd94250fca Improve solana deploy (#12621) (#12646)
* Check program account before attempting to create it

* Use last_valid_slot to timeout status checks

* Include transaction history in RpcClient::get_signature_statuses requests

* Improve solana-deploy send-transactions

* Clippy

* Improve mock deploy test

* Review comments

(cherry picked from commit 19f385db76)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-02 20:44:26 +00:00
db5251a524 solana catchup now retries if the initial RPC connection fails (#12645)
(cherry picked from commit 978b26a9c5)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-02 20:42:51 +00:00
efb665071c limits number of threads in core/tests/crds_gossip.rs (#12615) (#12641)
crds_gossip tests start large networks, which with large thread-pools
will exhaust system resources, causing failures in ci tests:
https://buildkite.com/solana-labs/solana/builds/31953

The commit limits size of thread-pools in the test.

(cherry picked from commit 2c669f65f1)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-10-02 20:01:49 +00:00
e69ee1ec64 Add GetConfirmedBlocksWithLimit RPC method
(cherry picked from commit 75b621160e)
2020-10-02 08:21:08 -07:00
307686eeba Add --no-port-check to validator (#12245) (#12638)
(cherry picked from commit aa70dbfc62)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-02 12:46:40 +00:00
de1e2f9c0c Add inflation subcommand (#12632)
(cherry picked from commit 42aeead6b4)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-02 07:14:02 +00:00
01f93003d3 Improve block command output (#12631)
(cherry picked from commit 14036ac580)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-02 07:03:29 +00:00
75219afc91 Document postBalance field (#12628)
(cherry picked from commit e03a64ae1b)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-02 05:38:11 +00:00
71526923a6 Expose validator cli arguments for pubsub buffer tuning (#12622)
(cherry picked from commit f41a73d76a)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-02 04:43:44 +00:00
29af9d1a36 Avoid overflow when computing rent distribution (bp #12112) (#12607)
* Avoid overflow when computing rent distribution (#12112)

* Avoid overflow when computing rent distribution

* Use assert_eq!....

* Fix tests

* Add test

* Use FeatureSet

* Add comments

* Address review comments

* Tweak a bit.

* Fix fmt

(cherry picked from commit e3773d919c)

# Conflicts:
#	runtime/src/bank.rs
#	runtime/src/feature_set.rs

* Fix conflict

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-02 01:37:47 +00:00
46311181dc Add nonced-tx check to RpcClient (#12600) (#12604)
(cherry picked from commit 8f10e407ee)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-01 07:51:16 +00:00
17de653ce0 Move nonce utils from runtime to sdk (bp #12577) (#12583)
* runtime: Move prepare_if_nonce_account into accounts

(cherry picked from commit caec631344)

* Move nonced tx helpers to SDK

(cherry picked from commit 65b868f4eb)

* Move remaining nonce utils from runtime to SDK

(cherry picked from commit 3c7b9c2938)

# Conflicts:
#	runtime/src/bank.rs

* Fix conflict

Co-authored-by: Trent Nelson <trent@solana.com>
Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-01 06:25:25 +00:00
387ecdf70e Add ci env to travis config (#12608) (#12610)
Co-authored-by: publish-docs.sh <maintainers@solana.com>
(cherry picked from commit a17907b9a2)

Co-authored-by: Dan Albert <dan@solana.com>
2020-10-01 06:03:57 +00:00
fbe5a89e74 retains hash value of outdated responses received from pull requests (#12513) (#12603)
pull_response_fail_inserts has been increasing:
https://cdn.discordapp.com/attachments/478692221441409024/759096187587657778/pull_response_fail_insert.png
but for outdated values which fail to insert:
https://github.com/solana-labs/solana/blob/a5c3fc14b3/core/src/crds_gossip_pull.rs#L332-L344
https://github.com/solana-labs/solana/blob/a5c3fc14b3/core/src/crds.rs#L104-L108
are not recorded anywhere, and so the next pull request may obtain the
same redundant payload again, unnecessary taking bandwidth.

This commit holds on to the hashes of failed-inserts for a while, similar
to purged_values:
https://github.com/solana-labs/solana/blob/a5c3fc14b3/core/src/crds_gossip_pull.rs#L380
and filter them out for the next pull request:
https://github.com/solana-labs/solana/blob/a5c3fc14b3/core/src/crds_gossip_pull.rs#L204

(cherry picked from commit 1866521df6)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-10-01 01:47:20 +00:00
afbdcf3068 Include post balance information for rewards (#12598) (#12602)
* Include post balance information for rewards

* Add post-balance to stored Reward struct

* Handle extended Reward in bigtable

Co-authored-by: Michael Vines <mvines@gmail.com>
(cherry picked from commit c31a34fbcb)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-01 01:05:37 +00:00
2d1b995006 Use protobufs to store confirmed blocks in BigTable (#12526) (#12597)
* Use protobufs to store confirmed blocks in BigTable

* Cleanup

* Reorganize proto

* Clean up use statements

* Split out function for unit testing

* s/utils/convert

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
(cherry picked from commit ce598c5c98)

Co-authored-by: Justin Starry <justin@solana.com>
2020-09-30 19:37:02 +00:00
d9d3a95a72 Fix TransactionStatusMeta breakage in blockstore (#12587) (#12596)
* Add helper to facilitate deserializing legacy structs

* Use default_on_eof to fix blockstore vis-a-vis TransactionStatusMeta

* Add should-panic test and comments

(cherry picked from commit 865d01c38d)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-09-30 19:18:43 +00:00
ea990fd259 Update devnet genesis hash 2020-09-30 11:37:51 -07:00
4f30f9c8cf Modernize python scripts (#12595)
(cherry picked from commit fce3c70b72)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-09-30 18:24:49 +00:00
d13694d839 Tighten docs publishing flow (#12572) (#12594)
(cherry picked from commit ede19ef33b)

Co-authored-by: Dan Albert <dan@solana.com>
2020-09-30 18:23:49 +00:00
700c8c1ec1 epoch_rewards datapoint now includes the correct rewards epoch (previous epoch) (#12582)
(cherry picked from commit f57af4fec2)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-09-30 06:56:05 +00:00
33ace54b0f Fix banks RPC port (#12570) (#12574)
* Fix Banks RPC ports

* Add get_account_with_commitment

(cherry picked from commit d158d45051)

Co-authored-by: Greg Fitzgerald <greg@solana.com>
2020-09-30 01:23:13 +00:00
5d2f450b89 Tune the sys-tuner documentation (#12576)
(cherry picked from commit 6156dc300d)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-09-30 01:18:24 +00:00
55b0e9e9c7 builds crds filters in parallel (bp #12360) (#12571)
* builds crds filters in parallel (#12360)

Based on run-time profiles, the majority time of new_pull_requests is
spent building bloom filters, in hashing and bit-vec ops.

This commit builds crds filters in parallel using rayon constructs. The
added benchmark shows ~5x speedup (4-core machine, 8 threads).

(cherry picked from commit 537bbde22e)

# Conflicts:
#	core/Cargo.toml

* resolves mergify merge conflict

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-09-30 01:08:16 +00:00
6d1bea7fb4 Include active stake in 'epoch_rewards' datapoint (#12573)
(cherry picked from commit 82848d6c73)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-09-30 01:08:04 +00:00
d19ed8816e Query BigTable for block time if does not exist in blockstore (#12560) (#12565)
(cherry picked from commit 96a7d4dbd8)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-09-29 23:26:03 +00:00
af7f48a2fd Track inserted repair shreds (#12455) (#12563)
(cherry picked from commit ce98088457)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-09-29 22:57:58 +00:00
0965389f41 Enable json output from solana feature status (#12554) (#12559) 2020-09-29 21:00:20 +00:00
24c60cf3db Bump version to v1.3.15 2020-09-29 20:57:08 +00:00
c8f4bfca90 Notify but don't abort on unexpected stake account balance 2020-09-29 11:39:11 -07:00
4a6b65ce53 Switch get_program_accounts to use base64 2020-09-29 18:21:19 +00:00
836ed842d6 Increase rpc pubsub max payload to unblock large account notifications (#12548) (#12551)
(cherry picked from commit 36d55c0667)

Co-authored-by: Justin Starry <justin@solana.com>
2020-09-29 17:20:31 +00:00
966d0f72bb Move process_instruction defs to runtime (#12507) (#12549)
(cherry picked from commit 2ff983647f)

Co-authored-by: Jack May <jack@solana.com>
2020-09-29 15:52:38 +00:00
a07e90516b separates out ClusterInfo::{gossip,listen} thread-pools (#12535) (#12547)
https://github.com/solana-labs/solana/pull/12402
moved gossip-work threads:
https://github.com/solana-labs/solana/blob/afd9bfc45/core/src/cluster_info.rs#L2330-L2334
to ClusterInfo::new as a new field in the ClusterInfo struct:
https://github.com/solana-labs/solana/blob/35208c5ee/core/src/cluster_info.rs#L249
So that they can be shared between listen and gossip threads:
https://github.com/solana-labs/solana/blob/afd9bfc45/core/src/gossip_service.rs#L54-L67

However, in testing https://github.com/solana-labs/solana/pull/12360
it turned out this will cause breakage:
https://buildkite.com/solana-labs/solana/builds/31646
https://buildkite.com/solana-labs/solana/builds/31651
https://buildkite.com/solana-labs/solana/builds/31655
Whereas with separate thread pools all is good. It might be the case
that one thread is slowing down the other by exhausting the thread-pool
whereas with separate thread-pools we get fair scheduling guarantees
from the os.

This commit reverts https://github.com/solana-labs/solana/pull/12402
and instead adds separate thread-pools for listen and gossip threads:
https://github.com/solana-labs/solana/blob/afd9bfc45/core/src/gossip_service.rs#L54-L67

(cherry picked from commit 0d5258b6d3)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-09-29 11:27:47 +00:00
bd2e09d55a patches bug in Crds::find_old_labels with pubkey specific timeout (#12528) (#12546)
Current code only returns values which are expired based on the default
timeout. Example from the added unit test:
  - value inserted at time 0
  - pubkey specific timeout = 1
  - default timeout = 3
Then at now = 2, the value is expired, but the function fails to return
the value because it compares with the default timeout.

(cherry picked from commit 57ed4e4657)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-09-29 10:13:13 +00:00
655577f9fe feature subcommand: display active stake by feature id when feature activation is not available (#12543)
(cherry picked from commit 322dbd894f)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-09-29 06:17:31 +00:00
3781ad259b clap-utils: Allow nonce/offline args to be global (bp #12538) 2020-09-29 04:51:33 +00:00
5ad5f8b458 cli-output: Add a path to handling --verbose and --quiet display (bp #12531) 2020-09-29 04:44:59 +00:00
5b322a995f Rpc -> proper optimistic confirmation (#12514) (#12537)
* Add service to track the most recent optimistically confirmed bank

* Plumb service into ClusterInfoVoteListener and ReplayStage

* Clean up test

* Use OptimisticallyConfirmedBank in RPC

* Remove superfluous notifications from RpcSubscriptions

* Use crossbeam to avoid mpsc recv_timeout panic

* Review comments

* Remove superfluous last_checked_slots, but pass in OptimisticallyConfirmedBank for complete correctness

(cherry picked from commit 89621adca7)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-09-29 03:49:18 +00:00
63d9f32bb4 purges old pending push messages more efficiently (#12522) (#12533)
(cherry picked from commit c94fe9236f)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-09-29 01:34:58 +00:00
a550d82202 Enable commitment arg on solana deploy (#12532) (#12534)
(cherry picked from commit 35208c5ee7)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-09-28 23:32:50 +00:00
4cf69365b2 Port BPFLoader2 activation to FeatureSet (bp #12490) (#12530)
* Cargo.lock

(cherry picked from commit 6071d0d206)

# Conflicts:
#	Cargo.lock

* Port BPFLoader2 activation to FeatureSet and rework built-in program activation

(cherry picked from commit 31696a1d72)

# Conflicts:
#	core/Cargo.toml
#	genesis-programs/Cargo.toml
#	genesis/Cargo.toml
#	ledger/Cargo.toml
#	local-cluster/Cargo.toml
#	runtime/src/bank.rs

* Add Builtin AbiExample

(cherry picked from commit 833ad20b01)

* Rebase

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-09-28 23:12:05 +00:00
873b4ee830 Add a couple feature tests (#12529)
(cherry picked from commit 2956cc5aed)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-09-28 20:31:09 +00:00
672d9c9f62 Add feature to resolve spl-token v2 multisig bug (#12525)
(cherry picked from commit f9a74b51ef)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-09-28 18:14:20 +00:00
4bd29c1b32 Add pico-inflation feature
(cherry picked from commit aa5c008fa8)
2020-09-28 09:34:35 -07:00
72c082f55a Add precompile verification to preflight (#12486) (#12516)
(cherry picked from commit 6583c8cffe)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-09-28 06:37:52 +00:00
d712a908c2 Port fix_recent_blockhashes_sysvar_delay to FeatureSet (#12503)
(cherry picked from commit 5d6410c1cb)

# Conflicts:
#	runtime/src/feature_set.rs

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-09-26 20:26:58 +00:00
e3ca1a81b4 Add copy-on-write executor cache (bp #12502) (#12511)
* Add copy-on-write executor cache (#12502)

* Add copy-on-write executor cache

* Add remove_executor function to the bank

(cherry picked from commit 965f653471)

# Conflicts:
#	runtime/src/bank.rs

* rebase

Co-authored-by: Jack May <jack@solana.com>
Co-authored-by: Michael Vines <mvines@gmail.com>
2020-09-26 20:26:50 +00:00
16bce553e4 Nit: bpf test cleanup (#12401) (#12508)
(cherry picked from commit 7c4822efb1)

Co-authored-by: Jack May <jack@solana.com>
2020-09-26 17:53:27 +00:00
bc3aa53e02 Runtime feature activation framework (bp #12376) (#12497)
* Runtime feature activation framework

(cherry picked from commit 93259f0bae)

# Conflicts:
#	runtime/src/bank.rs

* Add feature set identifier to gossiped version information

(cherry picked from commit 35f5f9fc7b)

# Conflicts:
#	Cargo.lock
#	version/Cargo.toml

* Port instructions sysvar and secp256k1 program activation to FeatureSet

(cherry picked from commit c10da16d7b)

# Conflicts:
#	runtime/src/bank.rs
#	runtime/src/message_processor.rs

* Add feature management commands

(cherry picked from commit 93ed0ab2bb)

# Conflicts:
#	Cargo.lock
#	cli/Cargo.toml

* Make test_process_rest_api less fragile

(cherry picked from commit 7526bb96f3)

* Remove id field

(cherry picked from commit cc6ba1e131)

* FeatureSet test

(cherry picked from commit 92406cf9a0)

* cargo fmt

(cherry picked from commit 199940d683)

* cli review feedback

(cherry picked from commit 3a2b8c5e5b)

* Rename active() to is_active()

(cherry picked from commit e39fac9f01)

* Resolve merge conflicts

* Remove continues from compute_active_feature_set()

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-09-26 17:49:53 +00:00
6a698af235 Deerror 2020-09-25 22:19:09 -07:00
7ec38bd71c Improve 'Failed to create snapshot archive' warning message
(cherry picked from commit 5dcf348098)
2020-09-25 21:06:05 -07:00
8e3882287a Add epoch rewards metric datapoint (bp #12505) (#12509)
* Add epoch rewards metric datapoint

(cherry picked from commit e50386f928)

# Conflicts:
#	runtime/src/bank.rs

* Update bank.rs

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-09-26 04:02:23 +00:00
0c4074049b Cleanup names, fix line dependent test (#12477) (#12482)
(cherry picked from commit b8c4b88188)

Co-authored-by: Jack May <jack@solana.com>
2020-09-26 01:08:55 +00:00
250d2ba74a Pre-construct cpi instruction recorders before message processing (#12467) (#12504)
(cherry picked from commit 1c970bb39f)

Co-authored-by: Justin Starry <justin@solana.com>
2020-09-26 00:40:32 +00:00
b96e0e3d27 Drain the entire compute budget (bp #12478) (#12492)
* Drain the entire compute budget (#12478)


(cherry picked from commit d00453f747)

* fix conflict

Co-authored-by: Jack May <jack@solana.com>
2020-09-25 23:22:19 +00:00
99b513d905 Bump rust-bpf to v0.2.4 (#12361) (#12501)
(cherry picked from commit 65049bd112)

Co-authored-by: Jack May <jack@solana.com>
2020-09-25 22:12:39 +00:00
e85c792f70 Add RPC notify and banking keys debug (bp #12396) (#12452)
* Add RPC notify and banking keys debug (#12396)

(cherry picked from commit 68e5a2ef56)

# Conflicts:
#	core/src/validator.rs

* Rebase

Co-authored-by: sakridge <sakridge@gmail.com>
Co-authored-by: Michael Vines <mvines@gmail.com>
2020-09-25 21:05:21 +00:00
b65a764593 Bump jsonrpc crates to 15.0.0 (bp #12491) 2020-09-25 19:49:10 +00:00
a514b0e77b Add ComputeBudget tuner (bp #12476) (#12483)
* Add ComputeBudget tuner (#12476)

(cherry picked from commit d326512121)

# Conflicts:
#	programs/bpf/Cargo.toml

* fix conflicts

Co-authored-by: Jack May <jack@solana.com>
2020-09-25 19:13:07 +00:00
179dd6ee59 Ignore cargo audit RUSTSEC-2020-0008 (#12489)
(cherry picked from commit cd5c7f30d5)

Co-authored-by: Jack May <jack@solana.com>
2020-09-25 10:01:23 -07:00
21ba2bad24 Add Signers impl for Vec<Box<dyn Signer>> (#12470)
(cherry picked from commit 07dfa37cce)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-09-25 10:45:29 +00:00
64b6372f9c cli-output: Add CliTokenAccount type (bp #12466) (#12468)
* account-decoder: Add string format helpers to UiTokenAmount

(cherry picked from commit bb144bf758)

* cli-output: Add CliTokenAccount type

(cherry picked from commit d95bce2600)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-09-25 06:05:19 +00:00
495ea7cd2f introduce RpcPerfSample and modify getPerformanceSamples output (#12434) (#12464)
* introduce RpcPerfSample and modify getPerformanceSamples output

* camelCase test results

(cherry picked from commit 1d04c1db94)

Co-authored-by: Josh <josh.hundley@gmail.com>
2020-09-24 22:45:05 +00:00
bb12d65102 Remove legacy inflation activation code (#12460)
(cherry picked from commit c4aee8c0a0)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-09-24 20:38:41 +00:00
72365bb9d2 moves gossip-work thread pool cons to ClusterInfo::new (#12402) (#12458)
(cherry picked from commit 42f1ef8acb)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-09-24 20:01:03 +00:00
c44f6981b1 adds an atomic variant of the bloom filter (#12422) (#12459)
For crds_gossip_pull, we want to parallelize build_crds_filters, which
requires concurrent writes to bloom filters.

This commit implements a variant of the bloom filter which uses atomics
for its bits vector and so is thread-safe.

(cherry picked from commit bb183938d9)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-09-24 19:49:10 +00:00
0213016999 Use publish=false (#12447) (#12453)
(cherry picked from commit a5c3fc14b3)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-09-24 16:58:18 +00:00
4e9e05f311 shards crds values based on their hash prefix (bp #12187) (#12312)
* shards crds values based on their hash prefix (#12187)

filter_crds_values checks every crds filter against every hash value:
https://github.com/solana-labs/solana/blob/ee646aa7/core/src/crds_gossip_pull.rs#L432
which can be inefficient if the filter's bit-mask only matches small
portion of the entire crds table.

This commit shards crds values into separate tables based on shard_bits
first bits of their hash prefix. Given a (mask, mask_bits) filter,
filtering crds can be done by inspecting only relevant shards.

If CrdsFilter.mask_bits <= shard_bits, then precisely only the crds
values which match (mask, mask_bits) bit pattern are traversed.
If CrdsFilter.mask_bits > shard_bits, then approximately only
1/2^shard_bits of crds values are inspected.

Benchmarking on a gce cluster of 20 nodes, I see ~10% improvement in
generate_pull_responses metric, but with larger clusters, crds table and
2^mask_bits are both larger, so the impact should be more significant.

(cherry picked from commit 9b866d79fb)

* bumps indexmap to 1.6.0

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-09-24 16:10:53 +00:00
7212bb12ea Record and store invoked instructions in transaction meta (#12311) (#12449)
* Record invoked instructions and store in transaction meta

* Enable cpi recording if transaction sender is some

* Rename invoked to innerInstructions

(cherry picked from commit 6601ec8f26)

Co-authored-by: Justin Starry <justin@solana.com>
2020-09-24 15:42:34 +00:00
9ff2378948 Remove transaction encoding from storage layer (bp #12404) (#12440)
* Remove transaction encoding from storage layer (#12404)

(cherry picked from commit 731a943239)

* Bump

Co-authored-by: Justin Starry <justin@solana.com>
2020-09-24 10:11:27 +00:00
ec4938a9f3 Bump version to 1.3.14 (#12444) 2020-09-24 07:42:54 +00:00
41b45ca281 Allow publishing of secp256k1 program 2020-09-24 00:05:11 -06:00
396 changed files with 23408 additions and 7358 deletions

View File

@ -31,4 +31,9 @@ export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"
mkdir -p "$CARGO_TARGET_CACHE"/target
rsync -a --delete --link-dest="$CARGO_TARGET_CACHE" "$CARGO_TARGET_CACHE"/target .
# Don't reuse BPF target build artifacts due to incremental build issues with
# `std:
# "found possibly newer version of crate `std` which `xyz` depends on
rm -rf target/bpfel-unknown-unknown
)

View File

@ -124,6 +124,8 @@ jobs:
- ~/.npm
before_install:
- source ci/env.sh
- .travis/channel_restriction.sh edge beta || travis_terminate 0
- .travis/affects.sh docs/ .travis || travis_terminate 0
- cd docs/
- source .travis/before_install.sh

19
.travis/channel_restriction.sh Executable file
View File

@ -0,0 +1,19 @@
#!/usr/bin/env bash
#
# Only proceed if we are on one of the channels passed in, or a tag build
#
set -ex
[[ -n $CI_TAG ]] && exit 0
eval "$(ci/channel-info.sh)"
for acceptable_channel in "$@"; do
if [[ "$CHANNEL" == "$acceptable_channel" ]]; then
exit 0
fi
done
echo "Not running from one of the following channels: $*"
exit 1

864
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -19,7 +19,6 @@ members = [
"perf",
"validator",
"genesis",
"genesis-programs",
"gossip",
"install",
"keygen",

View File

@ -61,8 +61,9 @@ $ cargo test
### Starting a local testnet
Start your own testnet locally, instructions are in the [online docs](https://docs.solana.com/bench-tps).
### Accessing the remote testnet
* `testnet` - public stable testnet accessible via devnet.solana.com. Runs 24/7
### Accessing the remote development cluster
* `devnet` - stable public cluster for development accessible via
devnet.solana.com. Runs 24/7. Learn more about the [public clusters](https://docs.solana.com/clusters)
# Benchmarking

View File

@ -1,6 +1,6 @@
[package]
name = "solana-account-decoder"
version = "1.3.13"
version = "1.3.23"
description = "Solana account decoder"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@ -18,11 +18,11 @@ lazy_static = "1.4.0"
serde = "1.0.112"
serde_derive = "1.0.103"
serde_json = "1.0.56"
solana-config-program = { path = "../programs/config", version = "1.3.13" }
solana-sdk = { path = "../sdk", version = "1.3.13" }
solana-stake-program = { path = "../programs/stake", version = "1.3.13" }
solana-vote-program = { path = "../programs/vote", version = "1.3.13" }
spl-token-v2-0 = { package = "spl-token", version = "2.0.6", features = ["skip-no-mangle"] }
solana-config-program = { path = "../programs/config", version = "1.3.23" }
solana-sdk = { path = "../sdk", version = "1.3.23" }
solana-stake-program = { path = "../programs/stake", version = "1.3.23" }
solana-vote-program = { path = "../programs/vote", version = "1.3.23" }
spl-token-v2-0 = { package = "spl-token", version = "=2.0.6", features = ["skip-no-mangle"] }
thiserror = "1.0"
[package.metadata.docs.rs]

View File

@ -111,8 +111,8 @@ mod test {
#[test]
fn test_parse_account_data() {
let account_pubkey = Pubkey::new_rand();
let other_program = Pubkey::new_rand();
let account_pubkey = solana_sdk::pubkey::new_rand();
let other_program = solana_sdk::pubkey::new_rand();
let data = vec![0; 4];
assert!(parse_account_data(&account_pubkey, &other_program, &data, None).is_err());

View File

@ -117,7 +117,7 @@ mod test {
}))
.unwrap(),
};
let info_pubkey = Pubkey::new_rand();
let info_pubkey = solana_sdk::pubkey::new_rand();
let validator_info_config_account = create_config_account(
vec![(validator_info::id(), false), (info_pubkey, true)],
&validator_info,

View File

@ -134,7 +134,6 @@ impl From<Delegation> for UiDelegation {
mod test {
use super::*;
use bincode::serialize;
use solana_sdk::pubkey::Pubkey;
#[test]
fn test_parse_stake() {
@ -145,8 +144,8 @@ mod test {
StakeAccountType::Uninitialized
);
let pubkey = Pubkey::new_rand();
let custodian = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
let custodian = solana_sdk::pubkey::new_rand();
let authorized = Authorized::auto(&pubkey);
let lockup = Lockup {
unix_timestamp: 0,
@ -180,7 +179,7 @@ mod test {
})
);
let voter_pubkey = Pubkey::new_rand();
let voter_pubkey = solana_sdk::pubkey::new_rand();
let stake = Stake {
delegation: Delegation {
voter_pubkey,

View File

@ -319,7 +319,7 @@ mod test {
}]),
);
let bad_pubkey = Pubkey::new_rand();
let bad_pubkey = solana_sdk::pubkey::new_rand();
assert!(parse_sysvar(&stake_history_sysvar.data, &bad_pubkey).is_err());
let bad_data = vec![0; 4];

View File

@ -154,6 +154,31 @@ pub struct UiTokenAmount {
pub amount: StringAmount,
}
impl UiTokenAmount {
pub fn real_number_string(&self) -> String {
let decimals = self.decimals as usize;
if decimals > 0 {
let amount = u64::from_str(&self.amount).unwrap_or(0);
// Left-pad zeros to decimals + 1, so we at least have an integer zero
let mut s = format!("{:01$}", amount, decimals + 1);
// Add the decimal point (Sorry, "," locales!)
s.insert(s.len() - decimals, '.');
s
} else {
self.amount.clone()
}
}
pub fn real_number_string_trimmed(&self) -> String {
let s = self.real_number_string();
let zeros_trimmed = s.trim_end_matches('0');
let decimal_trimmed = zeros_trimmed.trim_end_matches('.');
decimal_trimmed.to_string()
}
}
pub fn token_amount_to_ui_amount(amount: u64, decimals: u8) -> UiTokenAmount {
// Use `amount_to_ui_amount()` once spl_token is bumped to a version that supports it: https://github.com/solana-labs/solana-program-library/pull/211
let amount_decimals = amount as f64 / 10_usize.pow(decimals as u32) as f64;
@ -296,4 +321,20 @@ mod test {
Some(expected_mint_pubkey)
);
}
#[test]
fn test_ui_token_amount_real_string() {
let token_amount = token_amount_to_ui_amount(1, 0);
assert_eq!(&token_amount.real_number_string(), "1");
assert_eq!(&token_amount.real_number_string_trimmed(), "1");
let token_amount = token_amount_to_ui_amount(1, 9);
assert_eq!(&token_amount.real_number_string(), "0.000000001");
assert_eq!(&token_amount.real_number_string_trimmed(), "0.000000001");
let token_amount = token_amount_to_ui_amount(1_000_000_000, 9);
assert_eq!(&token_amount.real_number_string(), "1.000000000");
assert_eq!(&token_amount.real_number_string_trimmed(), "1");
let token_amount = token_amount_to_ui_amount(1_234_567_890, 3);
assert_eq!(&token_amount.real_number_string(), "1234567.890");
assert_eq!(&token_amount.real_number_string_trimmed(), "1234567.89");
}
}

View File

@ -2,19 +2,20 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-accounts-bench"
version = "1.3.13"
version = "1.3.23"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
publish = false
[dependencies]
log = "0.4.6"
rayon = "1.4.0"
solana-logger = { path = "../logger", version = "1.3.13" }
solana-runtime = { path = "../runtime", version = "1.3.13" }
solana-measure = { path = "../measure", version = "1.3.13" }
solana-sdk = { path = "../sdk", version = "1.3.13" }
solana-version = { path = "../version", version = "1.3.13" }
solana-logger = { path = "../logger", version = "1.3.23" }
solana-runtime = { path = "../runtime", version = "1.3.23" }
solana-measure = { path = "../measure", version = "1.3.23" }
solana-sdk = { path = "../sdk", version = "1.3.23" }
solana-version = { path = "../version", version = "1.3.23" }
rand = "0.7.0"
clap = "2.33.1"
crossbeam-channel = "0.4"

View File

@ -2,10 +2,11 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-banking-bench"
version = "1.3.13"
version = "1.3.23"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
publish = false
[dependencies]
clap = "2.33.1"
@ -13,16 +14,16 @@ crossbeam-channel = "0.4"
log = "0.4.6"
rand = "0.7.0"
rayon = "1.4.0"
solana-core = { path = "../core", version = "1.3.13" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.13" }
solana-streamer = { path = "../streamer", version = "1.3.13" }
solana-perf = { path = "../perf", version = "1.3.13" }
solana-ledger = { path = "../ledger", version = "1.3.13" }
solana-logger = { path = "../logger", version = "1.3.13" }
solana-runtime = { path = "../runtime", version = "1.3.13" }
solana-measure = { path = "../measure", version = "1.3.13" }
solana-sdk = { path = "../sdk", version = "1.3.13" }
solana-version = { path = "../version", version = "1.3.13" }
solana-core = { path = "../core", version = "1.3.23" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.23" }
solana-streamer = { path = "../streamer", version = "1.3.23" }
solana-perf = { path = "../perf", version = "1.3.23" }
solana-ledger = { path = "../ledger", version = "1.3.23" }
solana-logger = { path = "../logger", version = "1.3.23" }
solana-runtime = { path = "../runtime", version = "1.3.23" }
solana-measure = { path = "../measure", version = "1.3.23" }
solana-sdk = { path = "../sdk", version = "1.3.23" }
solana-version = { path = "../version", version = "1.3.23" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -20,7 +20,6 @@ use solana_perf::packet::to_packets_chunked;
use solana_runtime::{bank::Bank, bank_forks::BankForks};
use solana_sdk::{
hash::Hash,
pubkey::Pubkey,
signature::Keypair,
signature::Signature,
system_transaction,
@ -69,7 +68,7 @@ fn make_accounts_txs(
hash: Hash,
same_payer: bool,
) -> Vec<Transaction> {
let to_pubkey = Pubkey::new_rand();
let to_pubkey = solana_sdk::pubkey::new_rand();
let payer_key = Keypair::new();
let dummy = system_transaction::transfer(&payer_key, &to_pubkey, 1, hash);
(0..total_num_transactions)
@ -78,9 +77,9 @@ fn make_accounts_txs(
let mut new = dummy.clone();
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
if !same_payer {
new.message.account_keys[0] = Pubkey::new_rand();
new.message.account_keys[0] = solana_sdk::pubkey::new_rand();
}
new.message.account_keys[1] = Pubkey::new_rand();
new.message.account_keys[1] = solana_sdk::pubkey::new_rand();
new.signatures = vec![Signature::new(&sig[0..64])];
new
})
@ -241,7 +240,7 @@ fn main() {
let base_tx_count = bank.transaction_count();
let mut txs_processed = 0;
let mut root = 1;
let collector = Pubkey::new_rand();
let collector = solana_sdk::pubkey::new_rand();
let config = Config {
packets_per_batch: packets_per_chunk,
chunk_len,

View File

@ -1,6 +1,6 @@
[package]
name = "solana-banks-client"
version = "1.3.13"
version = "1.3.23"
description = "Solana banks client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@ -12,15 +12,15 @@ edition = "2018"
async-trait = "0.1.36"
bincode = "1.3.1"
futures = "0.3"
solana-banks-interface = { path = "../banks-interface", version = "1.3.13" }
solana-sdk = { path = "../sdk", version = "1.3.13" }
solana-banks-interface = { path = "../banks-interface", version = "1.3.23" }
solana-sdk = { path = "../sdk", version = "1.3.23" }
tarpc = { version = "0.21.0", features = ["full"] }
tokio = "0.2"
tokio-serde = { version = "0.6", features = ["bincode"] }
[dev-dependencies]
solana-runtime = { path = "../runtime", version = "1.3.13" }
solana-banks-server = { path = "../banks-server", version = "1.3.13" }
solana-runtime = { path = "../runtime", version = "1.3.23" }
solana-banks-server = { path = "../banks-server", version = "1.3.23" }
[lib]
crate-type = ["lib"]

View File

@ -71,6 +71,14 @@ pub trait BanksClientExt {
/// are said to be finalized. The cluster will not fork to a higher slot height.
async fn get_root_slot(&mut self) -> io::Result<Slot>;
/// Return the account at the given address at the slot corresponding to the given
/// commitment level. If the account is not found, None is returned.
async fn get_account_with_commitment(
&mut self,
address: Pubkey,
commitment: CommitmentLevel,
) -> io::Result<Option<Account>>;
/// Return the account at the given address at the time of the most recent root slot.
/// If the account is not found, None is returned.
async fn get_account(&mut self, address: Pubkey) -> io::Result<Option<Account>>;
@ -130,13 +138,18 @@ impl BanksClientExt for BanksClient {
.await
}
async fn get_account_with_commitment(
&mut self,
address: Pubkey,
commitment: CommitmentLevel,
) -> io::Result<Option<Account>> {
self.get_account_with_commitment_and_context(context::current(), address, commitment)
.await
}
async fn get_account(&mut self, address: Pubkey) -> io::Result<Option<Account>> {
self.get_account_with_commitment_and_context(
context::current(),
address,
CommitmentLevel::default(),
)
.await
self.get_account_with_commitment(address, CommitmentLevel::default())
.await
}
async fn get_balance_with_commitment(
@ -200,7 +213,7 @@ mod tests {
use super::*;
use solana_banks_server::banks_server::start_local_server;
use solana_runtime::{bank::Bank, bank_forks::BankForks, genesis_utils::create_genesis_config};
use solana_sdk::{message::Message, pubkey::Pubkey, signature::Signer, system_instruction};
use solana_sdk::{message::Message, signature::Signer, system_instruction};
use std::sync::{Arc, RwLock};
use tarpc::transport;
use tokio::{runtime::Runtime, time::delay_for};
@ -222,7 +235,7 @@ mod tests {
&genesis.genesis_config,
))));
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let mint_pubkey = genesis.mint_keypair.pubkey();
let instruction = system_instruction::transfer(&mint_pubkey, &bob_pubkey, 1);
let message = Message::new(&[instruction], Some(&mint_pubkey));
@ -252,7 +265,7 @@ mod tests {
))));
let mint_pubkey = &genesis.mint_keypair.pubkey();
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let instruction = system_instruction::transfer(&mint_pubkey, &bob_pubkey, 1);
let message = Message::new(&[instruction], Some(&mint_pubkey));

View File

@ -1,6 +1,6 @@
[package]
name = "solana-banks-interface"
version = "1.3.13"
version = "1.3.23"
description = "Solana banks RPC interface"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@ -10,7 +10,7 @@ edition = "2018"
[dependencies]
serde = { version = "1.0.112", features = ["derive"] }
solana-sdk = { path = "../sdk", version = "1.3.13" }
solana-sdk = { path = "../sdk", version = "1.3.23" }
tarpc = { version = "0.21.0", features = ["full"] }
[lib]

View File

@ -1,6 +1,6 @@
[package]
name = "solana-banks-server"
version = "1.3.13"
version = "1.3.23"
description = "Solana banks server"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@ -12,10 +12,10 @@ edition = "2018"
bincode = "1.3.1"
futures = "0.3"
log = "0.4.8"
solana-banks-interface = { path = "../banks-interface", version = "1.3.13" }
solana-runtime = { path = "../runtime", version = "1.3.13" }
solana-sdk = { path = "../sdk", version = "1.3.13" }
solana-metrics = { path = "../metrics", version = "1.3.13" }
solana-banks-interface = { path = "../banks-interface", version = "1.3.23" }
solana-runtime = { path = "../runtime", version = "1.3.23" }
solana-sdk = { path = "../sdk", version = "1.3.23" }
solana-metrics = { path = "../metrics", version = "1.3.23" }
tarpc = { version = "0.21.0", features = ["full"] }
tokio = "0.2"
tokio-serde = { version = "0.6", features = ["bincode"] }

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-bench-exchange"
version = "1.3.13"
version = "1.3.23"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -18,21 +18,21 @@ rand = "0.7.0"
rayon = "1.4.0"
serde_json = "1.0.56"
serde_yaml = "0.8.13"
solana-clap-utils = { path = "../clap-utils", version = "1.3.13" }
solana-core = { path = "../core", version = "1.3.13" }
solana-genesis = { path = "../genesis", version = "1.3.13" }
solana-client = { path = "../client", version = "1.3.13" }
solana-faucet = { path = "../faucet", version = "1.3.13" }
solana-exchange-program = { path = "../programs/exchange", version = "1.3.13" }
solana-logger = { path = "../logger", version = "1.3.13" }
solana-metrics = { path = "../metrics", version = "1.3.13" }
solana-net-utils = { path = "../net-utils", version = "1.3.13" }
solana-runtime = { path = "../runtime", version = "1.3.13" }
solana-sdk = { path = "../sdk", version = "1.3.13" }
solana-version = { path = "../version", version = "1.3.13" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.23" }
solana-core = { path = "../core", version = "1.3.23" }
solana-genesis = { path = "../genesis", version = "1.3.23" }
solana-client = { path = "../client", version = "1.3.23" }
solana-faucet = { path = "../faucet", version = "1.3.23" }
solana-exchange-program = { path = "../programs/exchange", version = "1.3.23" }
solana-logger = { path = "../logger", version = "1.3.23" }
solana-metrics = { path = "../metrics", version = "1.3.23" }
solana-net-utils = { path = "../net-utils", version = "1.3.23" }
solana-runtime = { path = "../runtime", version = "1.3.23" }
solana-sdk = { path = "../sdk", version = "1.3.23" }
solana-version = { path = "../version", version = "1.3.23" }
[dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "1.3.13" }
solana-local-cluster = { path = "../local-cluster", version = "1.3.23" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -2,18 +2,19 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-bench-streamer"
version = "1.3.13"
version = "1.3.23"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
publish = false
[dependencies]
clap = "2.33.1"
solana-clap-utils = { path = "../clap-utils", version = "1.3.13" }
solana-streamer = { path = "../streamer", version = "1.3.13" }
solana-logger = { path = "../logger", version = "1.3.13" }
solana-net-utils = { path = "../net-utils", version = "1.3.13" }
solana-version = { path = "../version", version = "1.3.13" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.23" }
solana-streamer = { path = "../streamer", version = "1.3.23" }
solana-logger = { path = "../logger", version = "1.3.23" }
solana-net-utils = { path = "../net-utils", version = "1.3.23" }
solana-version = { path = "../version", version = "1.3.23" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -2,10 +2,11 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-bench-tps"
version = "1.3.13"
version = "1.3.23"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
publish = false
[dependencies]
bincode = "1.3.1"
@ -14,23 +15,23 @@ log = "0.4.8"
rayon = "1.4.0"
serde_json = "1.0.56"
serde_yaml = "0.8.13"
solana-clap-utils = { path = "../clap-utils", version = "1.3.13" }
solana-core = { path = "../core", version = "1.3.13" }
solana-genesis = { path = "../genesis", version = "1.3.13" }
solana-client = { path = "../client", version = "1.3.13" }
solana-faucet = { path = "../faucet", version = "1.3.13" }
solana-logger = { path = "../logger", version = "1.3.13" }
solana-metrics = { path = "../metrics", version = "1.3.13" }
solana-measure = { path = "../measure", version = "1.3.13" }
solana-net-utils = { path = "../net-utils", version = "1.3.13" }
solana-runtime = { path = "../runtime", version = "1.3.13" }
solana-sdk = { path = "../sdk", version = "1.3.13" }
solana-version = { path = "../version", version = "1.3.13" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.23" }
solana-core = { path = "../core", version = "1.3.23" }
solana-genesis = { path = "../genesis", version = "1.3.23" }
solana-client = { path = "../client", version = "1.3.23" }
solana-faucet = { path = "../faucet", version = "1.3.23" }
solana-logger = { path = "../logger", version = "1.3.23" }
solana-metrics = { path = "../metrics", version = "1.3.23" }
solana-measure = { path = "../measure", version = "1.3.23" }
solana-net-utils = { path = "../net-utils", version = "1.3.23" }
solana-runtime = { path = "../runtime", version = "1.3.23" }
solana-sdk = { path = "../sdk", version = "1.3.23" }
solana-version = { path = "../version", version = "1.3.23" }
[dev-dependencies]
serial_test = "0.4.0"
serial_test_derive = "0.4.0"
solana-local-cluster = { path = "../local-cluster", version = "1.3.13" }
solana-local-cluster = { path = "../local-cluster", version = "1.3.23" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

31
cargo Executable file
View File

@ -0,0 +1,31 @@
#!/usr/bin/env bash
# shellcheck source=ci/rust-version.sh
here=$(dirname "$0")
source "${here}"/ci/rust-version.sh all
toolchain=
case "$1" in
stable)
# shellcheck disable=SC2054 # rust_stable is sourced from rust-version.sh
toolchain="$rust_stable"
shift
;;
nightly)
# shellcheck disable=SC2054 # rust_nightly is sourced from rust-version.sh
toolchain="$rust_nightly"
shift
;;
+*)
toolchain="${1#+}"
shift
;;
*)
# shellcheck disable=SC2054 # rust_stable is sourced from rust-version.sh
toolchain="$rust_stable"
;;
esac
set -x
exec cargo "+${toolchain}" "${@}"

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python2.7
#!/usr/bin/env python3
#
# This script figures the order in which workspace crates must be published to
# crates.io. Along the way it also ensures there are no circular dependencies
@ -45,21 +45,27 @@ def get_packages():
sorted_dependency_graph = []
max_iterations = pow(len(dependency_graph),2)
while dependency_graph:
deleted_packages = []
if max_iterations == 0:
# One day be more helpful and find the actual cycle for the user...
sys.exit('Error: Circular dependency suspected between these packages: \n {}\n'.format('\n '.join(dependency_graph.keys())))
max_iterations -= 1
for package, dependencies in dependency_graph.items():
if package in deleted_packages:
continue
for dependency in dependencies:
if dependency in dependency_graph:
break
else:
del dependency_graph[package]
deleted_packages.append(package)
sorted_dependency_graph.append((package, manifest_path[package]))
dependency_graph = {p: d for p, d in dependency_graph.items() if not p in deleted_packages }
return sorted_dependency_graph
for package, manifest in get_packages():
print os.path.relpath(manifest)
print(os.path.relpath(manifest))

View File

@ -91,17 +91,15 @@ echo --- Creating release tarball
cp "${RELEASE_BASENAME}"/version.yml "${TARBALL_BASENAME}"-$TARGET.yml
)
# Metrics tarball is platform agnostic, only publish it from Linux
# Maybe tarballs are platform agnostic, only publish them from the Linux build
MAYBE_TARBALLS=
if [[ "$CI_OS_NAME" = linux ]]; then
metrics/create-metrics-tarball.sh
(
set -x
sdk/bpf/scripts/package.sh
[[ -f bpf-sdk.tar.bz2 ]]
)
MAYBE_TARBALLS="bpf-sdk.tar.bz2 solana-metrics.tar.bz2"
MAYBE_TARBALLS="bpf-sdk.tar.bz2"
fi
source ci/upload-ci-artifact.sh
@ -126,7 +124,7 @@ for file in "${TARBALL_BASENAME}"-$TARGET.tar.bz2 "${TARBALL_BASENAME}"-$TARGET.
/usr/bin/s3cmd --acl-public put /solana/"$file" s3://release.solana.com/"$CHANNEL_OR_TAG"/"$file"
echo Published to:
$DRYRUN ci/format-url.sh http://release.solana.com/"$CHANNEL_OR_TAG"/"$file"
$DRYRUN ci/format-url.sh https://release.solana.com/"$CHANNEL_OR_TAG"/"$file"
)
if [[ -n $TAG ]]; then
@ -149,4 +147,30 @@ for file in "${TARBALL_BASENAME}"-$TARGET.tar.bz2 "${TARBALL_BASENAME}"-$TARGET.
fi
done
# Create install wrapper for release.solana.com
if [[ -n $BUILDKITE ]]; then
cat > release.solana.com-install <<EOF
SOLANA_RELEASE=$CHANNEL_OR_TAG
SOLANA_INSTALL_INIT_ARGS=$CHANNEL_OR_TAG
SOLANA_DOWNLOAD_ROOT=http://release.solana.com
EOF
cat install/solana-install-init.sh >> release.solana.com-install
echo --- AWS S3 Store: "install"
(
set -x
$DRYRUN docker run \
--rm \
--env AWS_ACCESS_KEY_ID \
--env AWS_SECRET_ACCESS_KEY \
--volume "$PWD:/solana" \
eremite/aws-cli:2018.12.18 \
/usr/bin/s3cmd --acl-public put /solana/release.solana.com-install s3://release.solana.com/"$CHANNEL_OR_TAG"/install
echo Published to:
$DRYRUN ci/format-url.sh https://release.solana.com/"$CHANNEL_OR_TAG"/install
)
fi
echo --- ok

View File

@ -9,6 +9,8 @@ source ci/rust-version.sh stable
source ci/rust-version.sh nightly
eval "$(ci/channel-info.sh)"
scripts/increment-cargo-version.sh check
echo --- build environment
(
set -x
@ -56,7 +58,29 @@ _ cargo +"$rust_stable" fmt --all -- --check
# run nightly clippy for `sdk/` as there's a moderate amount of nightly-only code there
_ cargo +"$rust_nightly" clippy -Zunstable-options --workspace --all-targets -- --deny=warnings
_ scripts/cargo-for-all-lock-files.sh +"$rust_stable" audit --ignore RUSTSEC-2020-0002 --ignore RUSTSEC-2020-0008
cargo_audit_ignores=(
# failure is officially deprecated/unmaintained
#
# Blocked on multiple upstream crates removing their `failure` dependency.
--ignore RUSTSEC-2020-0036
# `net2` crate has been deprecated; use `socket2` instead
#
# Blocked on https://github.com/paritytech/jsonrpc/issues/575
--ignore RUSTSEC-2020-0016
# stdweb is unmaintained
#
# Blocked on multiple upstream crates removing their `stdweb` dependency.
--ignore RUSTSEC-2020-0056
# Potential segfault in the time crate
#
# Blocked on multiple crates updating `time` to >= 0.2.23
--ignore RUSTSEC-2020-0071
)
_ scripts/cargo-for-all-lock-files.sh +"$rust_stable" audit "${cargo_audit_ignores[@]}"
{
cd programs/bpf

View File

@ -1,6 +1,6 @@
[package]
name = "solana-clap-utils"
version = "1.3.13"
version = "1.3.23"
description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@ -11,8 +11,8 @@ edition = "2018"
[dependencies]
clap = "2.33.0"
rpassword = "4.0"
solana-remote-wallet = { path = "../remote-wallet", version = "1.3.13" }
solana-sdk = { path = "../sdk", version = "1.3.13" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.3.23" }
solana-sdk = { path = "../sdk", version = "1.3.23" }
thiserror = "1.0.20"
tiny-bip39 = "0.7.0"
url = "2.1.0"

View File

@ -15,7 +15,7 @@ pub fn commitment_arg_with_default<'a, 'b>(default_value: &'static str) -> Arg<'
Arg::with_name(COMMITMENT_ARG.name)
.long(COMMITMENT_ARG.long)
.takes_value(true)
.possible_values(&["recent", "single", "root", "max"])
.possible_values(&["recent", "single", "singleGossip", "root", "max"])
.default_value(default_value)
.value_name("COMMITMENT_LEVEL")
.help(COMMITMENT_ARG.help)

View File

@ -228,8 +228,8 @@ mod tests {
assert_eq!(values_of(&matches, "multiple"), Some(vec![50, 39]));
assert_eq!(values_of::<u64>(&matches, "single"), None);
let pubkey0 = Pubkey::new_rand();
let pubkey1 = Pubkey::new_rand();
let pubkey0 = solana_sdk::pubkey::new_rand();
let pubkey1 = solana_sdk::pubkey::new_rand();
let matches = app().clone().get_matches_from(vec![
"test",
"--multiple",
@ -251,7 +251,7 @@ mod tests {
assert_eq!(value_of(&matches, "single"), Some(50));
assert_eq!(value_of::<u64>(&matches, "multiple"), None);
let pubkey = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
let matches = app()
.clone()
.get_matches_from(vec!["test", "--single", &pubkey.to_string()]);
@ -331,8 +331,8 @@ mod tests {
#[test]
fn test_pubkeys_sigs_of() {
let key1 = Pubkey::new_rand();
let key2 = Pubkey::new_rand();
let key1 = solana_sdk::pubkey::new_rand();
let key2 = solana_sdk::pubkey::new_rand();
let sig1 = Keypair::new().sign_message(&[0u8]);
let sig2 = Keypair::new().sign_message(&[1u8]);
let signer1 = format!("{}={}", key1, sig1);

View File

@ -298,7 +298,24 @@ pub fn keypair_from_seed_phrase(
keypair_from_seed_phrase_and_passphrase(&seed_phrase, &passphrase)?
} else {
let sanitized = sanitize_seed_phrase(seed_phrase);
let mnemonic = Mnemonic::from_phrase(&sanitized, Language::English)?;
let parse_language_fn = || {
for language in &[
Language::English,
Language::ChineseSimplified,
Language::ChineseTraditional,
Language::Japanese,
Language::Spanish,
Language::Korean,
Language::French,
Language::Italian,
] {
if let Ok(mnemonic) = Mnemonic::from_phrase(&sanitized, *language) {
return Ok(mnemonic);
}
}
Err("Can't get mnemonic from seed phrases")
};
let mnemonic = parse_language_fn()?;
let passphrase = prompt_passphrase(&passphrase_prompt)?;
let seed = Seed::new(&mnemonic, &passphrase);
keypair_from_seed(seed.as_bytes())?

View File

@ -36,12 +36,15 @@ pub fn nonce_authority_arg<'a, 'b>() -> Arg<'a, 'b> {
}
pub trait NonceArgs {
fn nonce_args(self) -> Self;
fn nonce_args(self, global: bool) -> Self;
}
impl NonceArgs for App<'_, '_> {
fn nonce_args(self) -> Self {
self.arg(nonce_arg())
.arg(nonce_authority_arg().requires(NONCE_ARG.name))
fn nonce_args(self, global: bool) -> Self {
self.arg(nonce_arg().global(global)).arg(
nonce_authority_arg()
.requires(NONCE_ARG.name)
.global(global),
)
}
}

View File

@ -48,13 +48,13 @@ fn signer_arg<'a, 'b>() -> Arg<'a, 'b> {
}
pub trait OfflineArgs {
fn offline_args(self) -> Self;
fn offline_args(self, global: bool) -> Self;
}
impl OfflineArgs for App<'_, '_> {
fn offline_args(self) -> Self {
self.arg(blockhash_arg())
.arg(sign_only_arg())
.arg(signer_arg())
fn offline_args(self, global: bool) -> Self {
self.arg(blockhash_arg().global(global))
.arg(sign_only_arg().global(global))
.arg(signer_arg().global(global))
}
}

View File

@ -3,13 +3,13 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-cli-config"
description = "Blockchain, Rebuilt for Scale"
version = "1.3.13"
version = "1.3.23"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
dirs = "2.0.2"
dirs-next = "2.0.0"
lazy_static = "1.4.0"
serde = "1.0.112"
serde_derive = "1.0.103"

View File

@ -5,7 +5,7 @@ use url::Url;
lazy_static! {
pub static ref CONFIG_FILE: Option<String> = {
dirs::home_dir().map(|mut path| {
dirs_next::home_dir().map(|mut path| {
path.extend(&[".config", "solana", "cli", "config.yml"]);
path.to_str().unwrap().to_string()
})
@ -25,7 +25,7 @@ pub struct Config {
impl Default for Config {
fn default() -> Self {
let keypair_path = {
let mut keypair_path = dirs::home_dir().expect("home directory");
let mut keypair_path = dirs_next::home_dir().expect("home directory");
keypair_path.extend(&[".config", "solana", "id.json"]);
keypair_path.to_str().unwrap().to_string()
};
@ -82,7 +82,7 @@ impl Config {
return "".to_string();
}
let mut url = json_rpc_url.unwrap();
let port = url.port_or_known_default().unwrap_or(80);
let port = url.port().unwrap_or(8899);
url.set_port(Some(port + 3)).expect("unable to set port");
url.to_string()
}
@ -138,12 +138,12 @@ mod test {
fn compute_rpc_banks_url() {
assert_eq!(
Config::compute_rpc_banks_url(&"http://devnet.solana.com"),
"http://devnet.solana.com:83/".to_string()
"http://devnet.solana.com:8902/".to_string()
);
assert_eq!(
Config::compute_rpc_banks_url(&"https://devnet.solana.com"),
"https://devnet.solana.com:446/".to_string()
"https://devnet.solana.com:8902/".to_string()
);
assert_eq!(

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-cli-output"
description = "Blockchain, Rebuilt for Scale"
version = "1.3.13"
version = "1.3.23"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -17,12 +17,13 @@ indicatif = "0.15.0"
serde = "1.0.112"
serde_derive = "1.0.103"
serde_json = "1.0.56"
solana-clap-utils = { path = "../clap-utils", version = "1.3.13" }
solana-client = { path = "../client", version = "1.3.13" }
solana-sdk = { path = "../sdk", version = "1.3.13" }
solana-stake-program = { path = "../programs/stake", version = "1.3.13" }
solana-transaction-status = { path = "../transaction-status", version = "1.3.13" }
solana-vote-program = { path = "../programs/vote", version = "1.3.13" }
solana-account-decoder = { path = "../account-decoder", version = "1.3.23" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.23" }
solana-client = { path = "../client", version = "1.3.23" }
solana-sdk = { path = "../sdk", version = "1.3.23" }
solana-stake-program = { path = "../programs/stake", version = "1.3.23" }
solana-transaction-status = { path = "../transaction-status", version = "1.3.23" }
solana-vote-program = { path = "../programs/vote", version = "1.3.23" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -1,9 +1,13 @@
use crate::display::{build_balance_message, format_labeled_address, writeln_name_value};
use crate::{
display::{build_balance_message, format_labeled_address, writeln_name_value},
QuietDisplay, VerboseDisplay,
};
use chrono::{DateTime, NaiveDateTime, SecondsFormat, Utc};
use console::{style, Emoji};
use inflector::cases::titlecase::to_title_case;
use serde::{Deserialize, Serialize};
use serde_json::{Map, Value};
use solana_account_decoder::parse_token::UiTokenAccount;
use solana_clap_utils::keypair::SignOnly;
use solana_client::rpc_response::{
RpcAccountBalance, RpcKeyedAccount, RpcSupply, RpcVoteAccountInfo,
@ -37,15 +41,27 @@ pub enum OutputFormat {
Display,
Json,
JsonCompact,
DisplayQuiet,
DisplayVerbose,
}
impl OutputFormat {
pub fn formatted_string<T>(&self, item: &T) -> String
where
T: Serialize + fmt::Display,
T: Serialize + fmt::Display + QuietDisplay + VerboseDisplay,
{
match self {
OutputFormat::Display => format!("{}", item),
OutputFormat::DisplayQuiet => {
let mut s = String::new();
QuietDisplay::write_str(item, &mut s).unwrap();
s
}
OutputFormat::DisplayVerbose => {
let mut s = String::new();
VerboseDisplay::write_str(item, &mut s).unwrap();
s
}
OutputFormat::Json => serde_json::to_string_pretty(item).unwrap(),
OutputFormat::JsonCompact => serde_json::to_value(item).unwrap().to_string(),
}
@ -60,6 +76,9 @@ pub struct CliAccount {
pub use_lamports_unit: bool,
}
impl QuietDisplay for CliAccount {}
impl VerboseDisplay for CliAccount {}
impl fmt::Display for CliAccount {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f)?;
@ -102,6 +121,9 @@ pub struct CliBlockProduction {
pub verbose: bool,
}
impl QuietDisplay for CliBlockProduction {}
impl VerboseDisplay for CliBlockProduction {}
impl fmt::Display for CliBlockProduction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f)?;
@ -206,6 +228,9 @@ impl From<EpochInfo> for CliEpochInfo {
}
}
impl QuietDisplay for CliEpochInfo {}
impl VerboseDisplay for CliEpochInfo {}
impl fmt::Display for CliEpochInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f)?;
@ -276,7 +301,7 @@ pub struct CliValidatorsStakeByVersion {
pub struct CliValidators {
pub total_active_stake: u64,
pub total_current_stake: u64,
pub total_deliquent_stake: u64,
pub total_delinquent_stake: u64,
pub current_validators: Vec<CliValidator>,
pub delinquent_validators: Vec<CliValidator>,
pub stake_by_version: BTreeMap<String, CliValidatorsStakeByVersion>,
@ -284,6 +309,9 @@ pub struct CliValidators {
pub use_lamports_unit: bool,
}
impl QuietDisplay for CliValidators {}
impl VerboseDisplay for CliValidators {}
impl fmt::Display for CliValidators {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn write_vote_account(
@ -332,7 +360,7 @@ impl fmt::Display for CliValidators {
"Active Stake:",
&build_balance_message(self.total_active_stake, self.use_lamports_unit, true),
)?;
if self.total_deliquent_stake > 0 {
if self.total_delinquent_stake > 0 {
writeln_name_value(
f,
"Current Stake:",
@ -348,11 +376,11 @@ impl fmt::Display for CliValidators {
&format!(
"{} ({:0.2}%)",
&build_balance_message(
self.total_deliquent_stake,
self.total_delinquent_stake,
self.use_lamports_unit,
true
),
100. * self.total_deliquent_stake as f64 / self.total_active_stake as f64
100. * self.total_delinquent_stake as f64 / self.total_active_stake as f64
),
)?;
}
@ -472,6 +500,9 @@ pub struct CliNonceAccount {
pub use_lamports_unit: bool,
}
impl QuietDisplay for CliNonceAccount {}
impl VerboseDisplay for CliNonceAccount {}
impl fmt::Display for CliNonceAccount {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
@ -509,6 +540,9 @@ impl CliStakeVec {
}
}
impl QuietDisplay for CliStakeVec {}
impl VerboseDisplay for CliStakeVec {}
impl fmt::Display for CliStakeVec {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for state in &self.0 {
@ -527,6 +561,9 @@ pub struct CliKeyedStakeState {
pub stake_state: CliStakeState,
}
impl QuietDisplay for CliKeyedStakeState {}
impl VerboseDisplay for CliKeyedStakeState {}
impl fmt::Display for CliKeyedStakeState {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "Stake Pubkey: {}", self.stake_pubkey)?;
@ -534,6 +571,48 @@ impl fmt::Display for CliKeyedStakeState {
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliEpochReward {
pub epoch: Epoch,
pub effective_slot: Slot,
pub amount: u64, // lamports
pub post_balance: u64, // lamports
pub percent_change: f64,
pub apr: f64,
}
fn show_epoch_rewards(
f: &mut fmt::Formatter,
epoch_rewards: &Option<Vec<CliEpochReward>>,
) -> fmt::Result {
if let Some(epoch_rewards) = epoch_rewards {
if epoch_rewards.is_empty() {
return Ok(());
}
writeln!(f, "Epoch Rewards:")?;
writeln!(
f,
" {:<8} {:<11} {:<15} {:<15} {:>14} {:>14}",
"Epoch", "Reward Slot", "Amount", "New Balance", "Percent Change", "APR"
)?;
for reward in epoch_rewards {
writeln!(
f,
" {:<8} {:<11} ◎{:<14.9} ◎{:<14.9} {:>13.9}% {:>13.9}%",
reward.epoch,
reward.effective_slot,
lamports_to_sol(reward.amount),
lamports_to_sol(reward.post_balance),
reward.percent_change,
reward.apr,
)?;
}
}
Ok(())
}
#[derive(Default, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliStakeState {
@ -563,8 +642,13 @@ pub struct CliStakeState {
pub activating_stake: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub deactivating_stake: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub epoch_rewards: Option<Vec<CliEpochReward>>,
}
impl QuietDisplay for CliStakeState {}
impl VerboseDisplay for CliStakeState {}
impl fmt::Display for CliStakeState {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn show_authorized(f: &mut fmt::Formatter, authorized: &CliAuthorized) -> fmt::Result {
@ -577,13 +661,8 @@ impl fmt::Display for CliStakeState {
if lockup.unix_timestamp != UnixTimestamp::default() {
writeln!(
f,
"Lockup Timestamp: {} (UnixTimestamp: {})",
DateTime::<Utc>::from_utc(
NaiveDateTime::from_timestamp(lockup.unix_timestamp, 0),
Utc
)
.to_rfc3339_opts(SecondsFormat::Secs, true),
lockup.unix_timestamp
"Lockup Timestamp: {}",
unix_timestamp_to_string(lockup.unix_timestamp)
)?;
}
if lockup.epoch != Epoch::default() {
@ -713,13 +792,14 @@ impl fmt::Display for CliStakeState {
}
show_authorized(f, self.authorized.as_ref().unwrap())?;
show_lockup(f, self.lockup.as_ref())?;
show_epoch_rewards(f, &self.epoch_rewards)?
}
}
Ok(())
}
}
#[derive(Serialize, Deserialize)]
#[derive(Serialize, Deserialize, PartialEq)]
pub enum CliStakeType {
Stake,
RewardsPool,
@ -741,6 +821,9 @@ pub struct CliStakeHistory {
pub use_lamports_unit: bool,
}
impl QuietDisplay for CliStakeHistory {}
impl VerboseDisplay for CliStakeHistory {}
impl fmt::Display for CliStakeHistory {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f)?;
@ -835,6 +918,9 @@ impl CliValidatorInfoVec {
}
}
impl QuietDisplay for CliValidatorInfoVec {}
impl VerboseDisplay for CliValidatorInfoVec {}
impl fmt::Display for CliValidatorInfoVec {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.0.is_empty() {
@ -856,6 +942,9 @@ pub struct CliValidatorInfo {
pub info: Map<String, Value>,
}
impl QuietDisplay for CliValidatorInfo {}
impl VerboseDisplay for CliValidatorInfo {}
impl fmt::Display for CliValidatorInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln_name_value(f, "Validator Identity Pubkey:", &self.identity_pubkey)?;
@ -887,8 +976,13 @@ pub struct CliVoteAccount {
pub epoch_voting_history: Vec<CliEpochVotingHistory>,
#[serde(skip_serializing)]
pub use_lamports_unit: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub epoch_rewards: Option<Vec<CliEpochReward>>,
}
impl QuietDisplay for CliVoteAccount {}
impl VerboseDisplay for CliVoteAccount {}
impl fmt::Display for CliVoteAccount {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
@ -909,7 +1003,12 @@ impl fmt::Display for CliVoteAccount {
None => "~".to_string(),
}
)?;
writeln!(f, "Recent Timestamp: {:?}", self.recent_timestamp)?;
writeln!(
f,
"Recent Timestamp: {} from slot {}",
unix_timestamp_to_string(self.recent_timestamp.timestamp),
self.recent_timestamp.slot
)?;
if !self.votes.is_empty() {
writeln!(f, "Recent Votes:")?;
for vote in &self.votes {
@ -928,6 +1027,7 @@ impl fmt::Display for CliVoteAccount {
)?;
}
}
show_epoch_rewards(f, &self.epoch_rewards)?;
Ok(())
}
}
@ -938,6 +1038,9 @@ pub struct CliAuthorizedVoters {
authorized_voters: BTreeMap<Epoch, String>,
}
impl QuietDisplay for CliAuthorizedVoters {}
impl VerboseDisplay for CliAuthorizedVoters {}
impl fmt::Display for CliAuthorizedVoters {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self.authorized_voters)
@ -987,19 +1090,25 @@ pub struct CliBlockTime {
pub timestamp: UnixTimestamp,
}
impl QuietDisplay for CliBlockTime {}
impl VerboseDisplay for CliBlockTime {}
fn unix_timestamp_to_string(unix_timestamp: UnixTimestamp) -> String {
format!(
"{} (UnixTimestamp: {})",
match NaiveDateTime::from_timestamp_opt(unix_timestamp, 0) {
Some(ndt) =>
DateTime::<Utc>::from_utc(ndt, Utc).to_rfc3339_opts(SecondsFormat::Secs, true),
None => "unknown".to_string(),
},
unix_timestamp,
)
}
impl fmt::Display for CliBlockTime {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln_name_value(f, "Block:", &self.slot.to_string())?;
writeln_name_value(
f,
"Date:",
&format!(
"{} (UnixTimestamp: {})",
DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(self.timestamp, 0), Utc)
.to_rfc3339_opts(SecondsFormat::Secs, true),
self.timestamp
),
)
writeln_name_value(f, "Date:", &unix_timestamp_to_string(self.timestamp))
}
}
@ -1015,6 +1124,9 @@ pub struct CliSignOnlyData {
pub bad_sig: Vec<String>,
}
impl QuietDisplay for CliSignOnlyData {}
impl VerboseDisplay for CliSignOnlyData {}
impl fmt::Display for CliSignOnlyData {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f)?;
@ -1047,6 +1159,9 @@ pub struct CliSignature {
pub signature: String,
}
impl QuietDisplay for CliSignature {}
impl VerboseDisplay for CliSignature {}
impl fmt::Display for CliSignature {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f)?;
@ -1061,6 +1176,9 @@ pub struct CliAccountBalances {
pub accounts: Vec<RpcAccountBalance>,
}
impl QuietDisplay for CliAccountBalances {}
impl VerboseDisplay for CliAccountBalances {}
impl fmt::Display for CliAccountBalances {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
@ -1103,6 +1221,9 @@ impl From<RpcSupply> for CliSupply {
}
}
impl QuietDisplay for CliSupply {}
impl VerboseDisplay for CliSupply {}
impl fmt::Display for CliSupply {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln_name_value(f, "Total:", &format!("{} SOL", lamports_to_sol(self.total)))?;
@ -1136,6 +1257,9 @@ pub struct CliFees {
pub last_valid_slot: Slot,
}
impl QuietDisplay for CliFees {}
impl VerboseDisplay for CliFees {}
impl fmt::Display for CliFees {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln_name_value(f, "Blockhash:", &self.blockhash)?;
@ -1149,6 +1273,50 @@ impl fmt::Display for CliFees {
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliTokenAccount {
pub address: String,
#[serde(flatten)]
pub token_account: UiTokenAccount,
}
impl QuietDisplay for CliTokenAccount {}
impl VerboseDisplay for CliTokenAccount {}
impl fmt::Display for CliTokenAccount {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f)?;
writeln_name_value(f, "Address:", &self.address)?;
let account = &self.token_account;
writeln_name_value(
f,
"Balance:",
&account.token_amount.real_number_string_trimmed(),
)?;
let mint = format!(
"{}{}",
account.mint,
if account.is_native { " (native)" } else { "" }
);
writeln_name_value(f, "Mint:", &mint)?;
writeln_name_value(f, "Owner:", &account.owner)?;
writeln_name_value(f, "State:", &format!("{:?}", account.state))?;
if let Some(delegate) = &account.delegate {
writeln!(f, "Delegation:")?;
writeln_name_value(f, " Delegate:", delegate)?;
let allowance = account.delegated_amount.as_ref().unwrap();
writeln_name_value(f, " Allowance:", &allowance.real_number_string_trimmed())?;
}
writeln_name_value(
f,
"Close authority:",
&account.close_authority.as_ref().unwrap_or(&String::new()),
)?;
Ok(())
}
}
pub fn return_signers(
tx: &Transaction,
output_format: &OutputFormat,
@ -1293,4 +1461,51 @@ mod tests {
assert_eq!(sign_only.absent_signers[0], absent.pubkey());
assert_eq!(sign_only.bad_signers[0], bad.pubkey());
}
#[test]
fn test_verbose_quiet_output_formats() {
#[derive(Deserialize, Serialize)]
struct FallbackToDisplay {}
impl std::fmt::Display for FallbackToDisplay {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "display")
}
}
impl QuietDisplay for FallbackToDisplay {}
impl VerboseDisplay for FallbackToDisplay {}
let f = FallbackToDisplay {};
assert_eq!(&OutputFormat::Display.formatted_string(&f), "display");
assert_eq!(&OutputFormat::DisplayQuiet.formatted_string(&f), "display");
assert_eq!(
&OutputFormat::DisplayVerbose.formatted_string(&f),
"display"
);
#[derive(Deserialize, Serialize)]
struct DiscreteVerbosityDisplay {}
impl std::fmt::Display for DiscreteVerbosityDisplay {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "display")
}
}
impl QuietDisplay for DiscreteVerbosityDisplay {
fn write_str(&self, w: &mut dyn std::fmt::Write) -> std::fmt::Result {
write!(w, "quiet")
}
}
impl VerboseDisplay for DiscreteVerbosityDisplay {
fn write_str(&self, w: &mut dyn std::fmt::Write) -> std::fmt::Result {
write!(w, "verbose")
}
}
let f = DiscreteVerbosityDisplay {};
assert_eq!(&OutputFormat::Display.formatted_string(&f), "display");
assert_eq!(&OutputFormat::DisplayQuiet.formatted_string(&f), "quiet");
assert_eq!(
&OutputFormat::DisplayVerbose.formatted_string(&f),
"verbose"
);
}
}

View File

@ -197,6 +197,15 @@ pub fn write_transaction<W: io::Write>(
)?;
}
}
if let Some(log_messages) = &transaction_status.log_messages {
if !log_messages.is_empty() {
writeln!(w, "{}Log Messages:", prefix,)?;
for log_message in log_messages {
writeln!(w, "{} {}", prefix, log_message,)?;
}
}
}
} else {
writeln!(w, "{}Status: Unavailable", prefix)?;
}

View File

@ -1,3 +1,15 @@
mod cli_output;
pub mod display;
pub use cli_output::*;
pub trait QuietDisplay: std::fmt::Display {
fn write_str(&self, w: &mut dyn std::fmt::Write) -> std::fmt::Result {
write!(w, "{}", self)
}
}
pub trait VerboseDisplay: std::fmt::Display {
fn write_str(&self, w: &mut dyn std::fmt::Write) -> std::fmt::Result {
write!(w, "{}", self)
}
}

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-cli"
description = "Blockchain, Rebuilt for Scale"
version = "1.3.13"
version = "1.3.23"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -16,7 +16,7 @@ clap = "2.33.1"
criterion-stats = "0.3.0"
ctrlc = { version = "3.1.5", features = ["termination"] }
console = "0.11.3"
dirs = "2.0.2"
dirs-next = "2.0.0"
log = "0.4.8"
Inflector = "0.11.4"
indicatif = "0.15.0"
@ -27,29 +27,33 @@ reqwest = { version = "0.10.6", default-features = false, features = ["blocking"
serde = "1.0.112"
serde_derive = "1.0.103"
serde_json = "1.0.56"
solana-account-decoder = { path = "../account-decoder", version = "1.3.13" }
solana-budget-program = { path = "../programs/budget", version = "1.3.13" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.13" }
solana-cli-config = { path = "../cli-config", version = "1.3.13" }
solana-cli-output = { path = "../cli-output", version = "1.3.13" }
solana-client = { path = "../client", version = "1.3.13" }
solana-config-program = { path = "../programs/config", version = "1.3.13" }
solana-faucet = { path = "../faucet", version = "1.3.13" }
solana-logger = { path = "../logger", version = "1.3.13" }
solana-net-utils = { path = "../net-utils", version = "1.3.13" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.3.13" }
solana-sdk = { path = "../sdk", version = "1.3.13" }
solana-stake-program = { path = "../programs/stake", version = "1.3.13" }
solana-transaction-status = { path = "../transaction-status", version = "1.3.13" }
solana-version = { path = "../version", version = "1.3.13" }
solana-vote-program = { path = "../programs/vote", version = "1.3.13" }
solana-vote-signer = { path = "../vote-signer", version = "1.3.13" }
solana-account-decoder = { path = "../account-decoder", version = "1.3.23" }
solana-budget-program = { path = "../programs/budget", version = "1.3.23" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.3.23" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.23" }
solana-cli-config = { path = "../cli-config", version = "1.3.23" }
solana-cli-output = { path = "../cli-output", version = "1.3.23" }
solana-client = { path = "../client", version = "1.3.23" }
solana-config-program = { path = "../programs/config", version = "1.3.23" }
solana-faucet = { path = "../faucet", version = "1.3.23" }
solana-logger = { path = "../logger", version = "1.3.23" }
solana-net-utils = { path = "../net-utils", version = "1.3.23" }
solana_rbpf = "=0.1.32"
solana-remote-wallet = { path = "../remote-wallet", version = "1.3.23" }
solana-runtime = { path = "../runtime", version = "1.3.23" }
solana-sdk = { path = "../sdk", version = "1.3.23" }
solana-stake-program = { path = "../programs/stake", version = "1.3.23" }
solana-transaction-status = { path = "../transaction-status", version = "1.3.23" }
solana-version = { path = "../version", version = "1.3.23" }
solana-vote-program = { path = "../programs/vote", version = "1.3.23" }
solana-vote-signer = { path = "../vote-signer", version = "1.3.23" }
thiserror = "1.0.20"
tiny-bip39 = "0.7.0"
url = "2.1.1"
[dev-dependencies]
solana-core = { path = "../core", version = "1.3.13" }
solana-budget-program = { path = "../programs/budget", version = "1.3.13" }
solana-core = { path = "../core", version = "1.3.23" }
solana-budget-program = { path = "../programs/budget", version = "1.3.23" }
tempfile = "3.1.0"
[[bin]]

View File

@ -54,12 +54,42 @@ pub fn check_account_for_multiple_fees_with_commitment(
fee_calculator: &FeeCalculator,
messages: &[&Message],
commitment: CommitmentConfig,
) -> Result<(), CliError> {
check_account_for_spend_multiple_fees_with_commitment(
rpc_client,
account_pubkey,
0,
fee_calculator,
messages,
commitment,
)
}
pub fn check_account_for_spend_multiple_fees_with_commitment(
rpc_client: &RpcClient,
account_pubkey: &Pubkey,
balance: u64,
fee_calculator: &FeeCalculator,
messages: &[&Message],
commitment: CommitmentConfig,
) -> Result<(), CliError> {
let fee = calculate_fee(fee_calculator, messages);
if !check_account_for_balance_with_commitment(rpc_client, account_pubkey, fee, commitment)
.map_err(Into::<ClientError>::into)?
if !check_account_for_balance_with_commitment(
rpc_client,
account_pubkey,
balance + fee,
commitment,
)
.map_err(Into::<ClientError>::into)?
{
return Err(CliError::InsufficientFundsForFee(lamports_to_sol(fee)));
if balance > 0 {
return Err(CliError::InsufficientFundsForSpendAndFee(
lamports_to_sol(balance),
lamports_to_sol(fee),
));
} else {
return Err(CliError::InsufficientFundsForFee(lamports_to_sol(fee)));
}
}
Ok(())
}
@ -67,7 +97,7 @@ pub fn check_account_for_multiple_fees_with_commitment(
pub fn calculate_fee(fee_calculator: &FeeCalculator, messages: &[&Message]) -> u64 {
messages
.iter()
.map(|message| fee_calculator.calculate_fee(message, None))
.map(|message| fee_calculator.calculate_fee(message))
.sum()
}
@ -131,7 +161,7 @@ mod tests {
context: RpcResponseContext { slot: 1 },
value: json!(account_balance),
});
let pubkey = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
let fee_calculator = FeeCalculator::new(1);
let pubkey0 = Pubkey::new(&[0; 32]);
@ -191,7 +221,7 @@ mod tests {
context: RpcResponseContext { slot: 1 },
value: json!(account_balance),
});
let pubkey = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
let mut mocks = HashMap::new();
mocks.insert(RpcRequest::GetBalance, account_balance_response);
@ -237,9 +267,9 @@ mod tests {
#[test]
fn test_check_unique_pubkeys() {
let pubkey0 = Pubkey::new_rand();
let pubkey0 = solana_sdk::pubkey::new_rand();
let pubkey_clone = pubkey0;
let pubkey1 = Pubkey::new_rand();
let pubkey1 = solana_sdk::pubkey::new_rand();
check_unique_pubkeys((&pubkey0, "foo".to_string()), (&pubkey1, "bar".to_string()))
.expect("unexpected result");

View File

@ -1,12 +1,16 @@
use crate::{
checks::*, cluster_query::*, nonce::*, spend_utils::*, stake::*, validator_info::*, vote::*,
checks::*, cluster_query::*, feature::*, inflation::*, nonce::*, send_tpu::*, spend_utils::*,
stake::*, validator_info::*, vote::*,
};
use bincode::serialize;
use bip39::{Language, Mnemonic, MnemonicType, Seed};
use chrono::prelude::*;
use clap::{value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand};
use log::*;
use num_traits::FromPrimitive;
use serde_json::{self, json, Value};
use solana_account_decoder::{UiAccount, UiAccountEncoding};
use solana_bpf_loader_program::bpf_verifier;
use solana_budget_program::budget_instruction::{self, BudgetError};
use solana_clap_utils::{
self,
@ -30,24 +34,27 @@ use solana_client::{
nonce_utils,
rpc_client::RpcClient,
rpc_config::{RpcLargestAccountsFilter, RpcSendTransactionConfig},
rpc_response::{Response, RpcKeyedAccount},
rpc_request::MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS,
rpc_response::{RpcKeyedAccount, RpcLeaderSchedule},
};
#[cfg(not(test))]
use solana_faucet::faucet::request_airdrop_transaction;
#[cfg(test)]
use solana_faucet::faucet_mock::request_airdrop_transaction;
use solana_rbpf::vm::EbpfVm;
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_sdk::{
bpf_loader, bpf_loader_deprecated,
clock::{Epoch, Slot, DEFAULT_TICKS_PER_SECOND},
clock::{Epoch, Slot},
commitment_config::CommitmentConfig,
decode_error::DecodeError,
hash::Hash,
instruction::InstructionError,
instruction::{Instruction, InstructionError},
loader_instruction,
message::Message,
native_token::Sol,
pubkey::{Pubkey, MAX_SEED_LEN},
signature::{Keypair, Signature, Signer, SignerError},
signature::{keypair_from_seed, Keypair, Signature, Signer, SignerError},
signers::Signers,
system_instruction::{self, SystemError},
system_program,
@ -60,12 +67,13 @@ use solana_stake_program::{
use solana_transaction_status::{EncodedTransaction, UiTransactionEncoding};
use solana_vote_program::vote_state::VoteAuthorize;
use std::{
cmp::min,
collections::HashMap,
error,
fmt::Write as FmtWrite,
fs::File,
io::{Read, Write},
net::{IpAddr, SocketAddr},
net::{IpAddr, SocketAddr, UdpSocket},
str::FromStr,
sync::Arc,
thread::sleep,
@ -107,10 +115,12 @@ pub enum CliCommand {
seed: String,
program_id: Pubkey,
},
Feature(FeatureCliCommand),
Inflation(InflationCliCommand),
Fees,
FirstAvailableBlock,
GetBlock {
slot: Slot,
slot: Option<Slot>,
},
GetBlockTime {
slot: Option<Slot>,
@ -187,6 +197,7 @@ pub enum CliCommand {
program_location: String,
address: Option<SignerIndex>,
use_deprecated_loader: bool,
allow_excessive_balance: bool,
},
// Stake Commands
CreateStakeAccount {
@ -554,6 +565,9 @@ pub fn parse_command(
("create-address-with-seed", Some(matches)) => {
parse_create_address_with_seed(matches, default_signer, wallet_manager)
}
("feature", Some(matches)) => {
parse_feature_subcommand(matches, default_signer, wallet_manager)
}
("fees", Some(_matches)) => Ok(CliCommandInfo {
command: CliCommand::Fees,
signers: vec![],
@ -572,6 +586,9 @@ pub fn parse_command(
("epoch", Some(matches)) => parse_get_epoch(matches),
("slot", Some(matches)) => parse_get_slot(matches),
("block-height", Some(matches)) => parse_get_block_height(matches),
("inflation", Some(matches)) => {
parse_inflation_subcommand(matches, default_signer, wallet_manager)
}
("largest-accounts", Some(matches)) => parse_largest_accounts(matches),
("supply", Some(matches)) => parse_supply(matches),
("total-supply", Some(matches)) => parse_total_supply(matches),
@ -616,13 +633,13 @@ pub fn parse_command(
signers.push(signer);
1
});
let use_deprecated_loader = matches.is_present("use_deprecated_loader");
Ok(CliCommandInfo {
command: CliCommand::Deploy {
program_location: matches.value_of("program_location").unwrap().to_string(),
address,
use_deprecated_loader,
use_deprecated_loader: matches.is_present("use_deprecated_loader"),
allow_excessive_balance: matches.is_present("allow_excessive_balance"),
},
signers,
})
@ -1119,37 +1136,56 @@ fn send_and_confirm_transactions_with_spinner<T: Signers>(
rpc_client: &RpcClient,
mut transactions: Vec<Transaction>,
signer_keys: &T,
commitment: CommitmentConfig,
mut last_valid_slot: Slot,
) -> Result<(), Box<dyn error::Error>> {
let progress_bar = new_spinner_progress_bar();
let mut send_retries = 5;
let mut leader_schedule: Option<RpcLeaderSchedule> = None;
let mut leader_schedule_epoch = 0;
let send_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let cluster_nodes = rpc_client.get_cluster_nodes().ok();
loop {
let mut status_retries = 15;
progress_bar.set_message("Finding leader node...");
let epoch_info = rpc_client.get_epoch_info_with_commitment(commitment)?;
if epoch_info.epoch > leader_schedule_epoch || leader_schedule.is_none() {
leader_schedule = rpc_client
.get_leader_schedule_with_commitment(Some(epoch_info.absolute_slot), commitment)?;
leader_schedule_epoch = epoch_info.epoch;
}
let tpu_address = get_leader_tpu(
min(epoch_info.slot_index + 1, epoch_info.slots_in_epoch),
leader_schedule.as_ref(),
cluster_nodes.as_ref(),
);
// Send all transactions
let mut transactions_signatures = vec![];
let mut pending_transactions = HashMap::new();
let num_transactions = transactions.len();
for transaction in transactions {
if cfg!(not(test)) {
// Delay ~1 tick between write transactions in an attempt to reduce AccountInUse errors
// when all the write transactions modify the same program account (eg, deploying a
// new program)
sleep(Duration::from_millis(1000 / DEFAULT_TICKS_PER_SECOND));
if let Some(tpu_address) = tpu_address {
let wire_transaction =
serialize(&transaction).expect("serialization should succeed");
send_transaction_tpu(&send_socket, &tpu_address, &wire_transaction);
} else {
let _result = rpc_client
.send_transaction_with_config(
&transaction,
RpcSendTransactionConfig {
preflight_commitment: Some(commitment.commitment),
..RpcSendTransactionConfig::default()
},
)
.ok();
}
let signature = rpc_client
.send_transaction_with_config(
&transaction,
RpcSendTransactionConfig {
skip_preflight: true,
..RpcSendTransactionConfig::default()
},
)
.ok();
transactions_signatures.push((transaction, signature));
pending_transactions.insert(transaction.signatures[0], transaction);
progress_bar.set_message(&format!(
"[{}/{}] Transactions sent",
transactions_signatures.len(),
"[{}/{}] Total Transactions sent",
pending_transactions.len(),
num_transactions
));
}
@ -1160,34 +1196,50 @@ fn send_and_confirm_transactions_with_spinner<T: Signers>(
progress_bar.set_message(&format!(
"[{}/{}] Transactions confirmed",
num_transactions - transactions_signatures.len(),
num_transactions - pending_transactions.len(),
num_transactions
));
let mut statuses = vec![];
let pending_signatures = pending_transactions.keys().cloned().collect::<Vec<_>>();
for pending_signatures_chunk in
pending_signatures.chunks(MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS - 1)
{
statuses.extend(
rpc_client
.get_signature_statuses_with_history(pending_signatures_chunk)?
.value
.into_iter(),
);
}
assert_eq!(statuses.len(), pending_signatures.len());
for (signature, status) in pending_signatures.into_iter().zip(statuses.into_iter()) {
if let Some(status) = status {
if status.confirmations.is_none() || status.confirmations.unwrap() > 1 {
let _ = pending_transactions.remove(&signature);
}
}
progress_bar.set_message(&format!(
"[{}/{}] Transactions confirmed",
num_transactions - pending_transactions.len(),
num_transactions
));
}
if pending_transactions.is_empty() {
return Ok(());
}
let slot = rpc_client.get_slot_with_commitment(commitment)?;
if slot > last_valid_slot {
break;
}
if cfg!(not(test)) {
// Retry twice a second
sleep(Duration::from_millis(500));
}
transactions_signatures = transactions_signatures
.into_iter()
.filter(|(_transaction, signature)| {
signature
.and_then(|signature| rpc_client.get_signature_statuses(&[signature]).ok())
.map(|Response { context: _, value }| match &value[0] {
None => true,
Some(transaction_status) => {
!(transaction_status.confirmations.is_none()
|| transaction_status.confirmations.unwrap() > 1)
}
})
.unwrap_or(true)
})
.collect();
if transactions_signatures.is_empty() {
return Ok(());
}
}
if send_retries == 0 {
@ -1196,10 +1248,12 @@ fn send_and_confirm_transactions_with_spinner<T: Signers>(
send_retries -= 1;
// Re-sign any failed transactions with a new blockhash and retry
let (blockhash, _fee_calculator) = rpc_client
.get_new_blockhash(&transactions_signatures[0].0.message().recent_blockhash)?;
let (blockhash, _fee_calculator, new_last_valid_slot) = rpc_client
.get_recent_blockhash_with_commitment(commitment)?
.value;
last_valid_slot = new_last_valid_slot;
transactions = vec![];
for (mut transaction, _) in transactions_signatures.into_iter() {
for (_, mut transaction) in pending_transactions.into_iter() {
transaction.try_sign(signer_keys, blockhash)?;
transactions.push(transaction);
}
@ -1212,8 +1266,52 @@ fn process_deploy(
program_location: &str,
address: Option<SignerIndex>,
use_deprecated_loader: bool,
allow_excessive_balance: bool,
) -> ProcessResult {
const WORDS: usize = 12;
// Create ephemeral keypair to use for program address, if not provided
let mnemonic = Mnemonic::new(MnemonicType::for_word_count(WORDS)?, Language::English);
let seed = Seed::new(&mnemonic, "");
let new_keypair = keypair_from_seed(seed.as_bytes())?;
let result = do_process_deploy(
rpc_client,
config,
program_location,
address,
use_deprecated_loader,
allow_excessive_balance,
new_keypair,
);
if result.is_err() && address.is_none() {
let phrase: &str = mnemonic.phrase();
let divider = String::from_utf8(vec![b'='; phrase.len()]).unwrap();
eprintln!(
"{}\nTo reuse this address, recover the ephemeral keypair file with",
divider
);
eprintln!(
"`solana-keygen recover` and the following {}-word seed phrase,",
WORDS
);
eprintln!(
"then pass it as the [PROGRAM_ADDRESS_SIGNER] argument to `solana deploy ...`\n{}\n{}\n{}",
divider, phrase, divider
);
}
result
}
fn do_process_deploy(
rpc_client: &RpcClient,
config: &CliConfig,
program_location: &str,
address: Option<SignerIndex>,
use_deprecated_loader: bool,
allow_excessive_balance: bool,
new_keypair: Keypair,
) -> ProcessResult {
let new_keypair = Keypair::new(); // Create ephemeral keypair to use for program address, if not provided
let program_id = if let Some(i) = address {
config.signers[i]
} else {
@ -1227,30 +1325,94 @@ fn process_deploy(
CliError::DynamicProgramError(format!("Unable to read program file: {}", err))
})?;
EbpfVm::create_executable_from_elf(&program_data, Some(|x| bpf_verifier::check(x, true)))
.map_err(|err| CliError::DynamicProgramError(format!("ELF error: {}", err)))?;
let loader_id = if use_deprecated_loader {
bpf_loader_deprecated::id()
} else {
bpf_loader::id()
};
let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption(program_data.len())?;
let signers = [config.signers[0], program_id];
// Check program account to see if partial initialization has occurred
let (initial_instructions, balance_needed) = if let Some(account) = rpc_client
.get_account_with_commitment(&program_id.pubkey(), config.commitment)?
.value
{
let mut instructions: Vec<Instruction> = vec![];
let mut balance_needed = 0;
if account.executable {
return Err(CliError::DynamicProgramError(
"Program account is already executable".to_string(),
)
.into());
}
if account.owner != loader_id && !system_program::check_id(&account.owner) {
return Err(CliError::DynamicProgramError(
"Program account is already owned by another account".to_string(),
)
.into());
}
if account.data.is_empty() && system_program::check_id(&account.owner) {
instructions.push(system_instruction::allocate(
&program_id.pubkey(),
program_data.len() as u64,
));
if account.owner != loader_id {
instructions.push(system_instruction::assign(&program_id.pubkey(), &loader_id));
}
}
if account.lamports < minimum_balance {
let balance = minimum_balance - account.lamports;
instructions.push(system_instruction::transfer(
&config.signers[0].pubkey(),
&program_id.pubkey(),
balance,
));
balance_needed = balance;
} else if account.lamports > minimum_balance
&& system_program::check_id(&account.owner)
&& !allow_excessive_balance
{
return Err(CliError::DynamicProgramError(format!(
"Program account has a balance: {:?}; it may already be in use",
Sol(account.lamports)
))
.into());
}
(instructions, balance_needed)
} else {
(
vec![system_instruction::create_account(
&config.signers[0].pubkey(),
&program_id.pubkey(),
minimum_balance,
program_data.len() as u64,
&loader_id,
)],
minimum_balance,
)
};
let initial_message = if !initial_instructions.is_empty() {
Some(Message::new(
&initial_instructions,
Some(&config.signers[0].pubkey()),
))
} else {
None
};
// Build transactions to calculate fees
let mut messages: Vec<&Message> = Vec::new();
let (blockhash, fee_calculator, _) = rpc_client
.get_recent_blockhash_with_commitment(config.commitment)?
.value;
let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption(program_data.len())?;
let ix = system_instruction::create_account(
&config.signers[0].pubkey(),
&program_id.pubkey(),
minimum_balance.max(1),
program_data.len() as u64,
&loader_id,
);
let message = Message::new(&[ix], Some(&config.signers[0].pubkey()));
let mut create_account_tx = Transaction::new_unsigned(message);
let signers = [config.signers[0], program_id];
create_account_tx.try_sign(&signers, blockhash)?;
messages.push(&create_account_tx.message);
if let Some(message) = &initial_message {
messages.push(message);
}
let mut write_messages = vec![];
for (chunk, i) in program_data.chunks(DATA_CHUNK_SIZE).zip(0..) {
let instruction = loader_instruction::write(
@ -1272,25 +1434,44 @@ fn process_deploy(
let finalize_message = Message::new(&[instruction], Some(&signers[0].pubkey()));
messages.push(&finalize_message);
check_account_for_multiple_fees_with_commitment(
let (blockhash, fee_calculator, _) = rpc_client
.get_recent_blockhash_with_commitment(config.commitment)?
.value;
check_account_for_spend_multiple_fees_with_commitment(
rpc_client,
&config.signers[0].pubkey(),
balance_needed,
&fee_calculator,
&messages,
config.commitment,
)?;
trace!("Creating program account");
let result = rpc_client.send_and_confirm_transaction_with_spinner_and_config(
&create_account_tx,
config.commitment,
config.send_transaction_config,
);
log_instruction_custom_error::<SystemError>(result, &config).map_err(|_| {
CliError::DynamicProgramError("Program account allocation failed".to_string())
})?;
if let Some(message) = initial_message {
trace!("Creating or modifying program account");
let num_required_signatures = message.header.num_required_signatures;
let (blockhash, _, _) = rpc_client
let mut initial_transaction = Transaction::new_unsigned(message);
// Most of the initial_transaction combinations require both the fee-payer and new program
// account to sign the transaction. One (transfer) only requires the fee-payer signature.
// This check is to ensure signing does not fail on a KeypairPubkeyMismatch error from an
// extraneous signature.
if num_required_signatures == 2 {
initial_transaction.try_sign(&signers, blockhash)?;
} else {
initial_transaction.try_sign(&[signers[0]], blockhash)?;
}
let result = rpc_client.send_and_confirm_transaction_with_spinner_and_config(
&initial_transaction,
config.commitment,
config.send_transaction_config,
);
log_instruction_custom_error::<SystemError>(result, &config).map_err(|err| {
CliError::DynamicProgramError(format!("Program account allocation failed: {}", err))
})?;
}
let (blockhash, _, last_valid_slot) = rpc_client
.get_recent_blockhash_with_commitment(config.commitment)?
.value;
@ -1302,9 +1483,16 @@ fn process_deploy(
}
trace!("Writing program data");
send_and_confirm_transactions_with_spinner(&rpc_client, write_transactions, &signers).map_err(
|_| CliError::DynamicProgramError("Data writes to program account failed".to_string()),
)?;
send_and_confirm_transactions_with_spinner(
&rpc_client,
write_transactions,
&signers,
config.commitment,
last_valid_slot,
)
.map_err(|err| {
CliError::DynamicProgramError(format!("Data writes to program account failed: {}", err))
})?;
let (blockhash, _, _) = rpc_client
.get_recent_blockhash_with_commitment(config.commitment)?
@ -1709,6 +1897,9 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
program_id,
} => process_create_address_with_seed(config, from_pubkey.as_ref(), &seed, &program_id),
CliCommand::Fees => process_fees(&rpc_client, config),
CliCommand::Feature(feature_subcommand) => {
process_feature_subcommand(&rpc_client, config, feature_subcommand)
}
CliCommand::FirstAvailableBlock => process_first_available_block(&rpc_client),
CliCommand::GetBlock { slot } => process_get_block(&rpc_client, config, *slot),
CliCommand::GetBlockTime { slot } => process_get_block_time(&rpc_client, config, *slot),
@ -1721,6 +1912,9 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
process_largest_accounts(&rpc_client, config, filter.clone())
}
CliCommand::GetTransactionCount => process_get_transaction_count(&rpc_client, config),
CliCommand::Inflation(inflation_subcommand) => {
process_inflation_subcommand(&rpc_client, config, inflation_subcommand)
}
CliCommand::LeaderSchedule => process_leader_schedule(&rpc_client),
CliCommand::LiveSlots => process_live_slots(&config.websocket_url),
CliCommand::Ping {
@ -1834,12 +2028,14 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
program_location,
address,
use_deprecated_loader,
allow_excessive_balance,
} => process_deploy(
&rpc_client,
config,
program_location,
*address,
*use_deprecated_loader,
*allow_excessive_balance,
),
// Stake Commands
@ -2357,6 +2553,8 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
),
)
.cluster_query_subcommands()
.feature_subcommands()
.inflation_subcommands()
.nonce_subcommands()
.stake_subcommands()
.subcommand(
@ -2501,7 +2699,7 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
.arg(
Arg::with_name("address_signer")
.index(2)
.value_name("SIGNER_KEYPAIR")
.value_name("PROGRAM_ADDRESS_SIGNER")
.takes_value(true)
.validator(is_valid_signer)
.help("The signer for the desired address of the program [default: new random address]")
@ -2512,7 +2710,14 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
.takes_value(false)
.hidden(true) // Don't document this argument to discourage its use
.help("Use the deprecated BPF loader")
),
)
.arg(
Arg::with_name("allow_excessive_balance")
.long("allow-excessive-deploy-account-balance")
.takes_value(false)
.help("Use the designated program id, even if the account already holds a large balance of SOL")
)
.arg(commitment_arg_with_default("max")),
)
.subcommand(
SubCommand::with_name("pay")
@ -2564,8 +2769,8 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
.long("cancelable")
.takes_value(false),
)
.offline_args()
.nonce_args()
.offline_args(false)
.nonce_args(false)
)
.subcommand(
SubCommand::with_name("resolve-signer")
@ -2656,8 +2861,8 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
.takes_value(false)
.help("Return signature immediately after submitting the transaction, instead of waiting for confirmations"),
)
.offline_args()
.nonce_args()
.offline_args(false)
.nonce_args(false)
.arg(fee_payer_arg()),
)
.subcommand(
@ -2694,7 +2899,12 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
mod tests {
use super::*;
use serde_json::Value;
use solana_client::{blockhash_query, mock_sender::SIGNATURE};
use solana_client::{
blockhash_query,
mock_sender::SIGNATURE,
rpc_request::RpcRequest,
rpc_response::{Response, RpcResponseContext},
};
use solana_sdk::{
pubkey::Pubkey,
signature::{keypair_from_seed, read_keypair_file, write_keypair_file, Presigner},
@ -2739,7 +2949,10 @@ mod tests {
.unwrap();
assert_eq!(signer_info.signers.len(), 1);
assert_eq!(signer_info.index_of(None), Some(0));
assert_eq!(signer_info.index_of(Some(Pubkey::new_rand())), None);
assert_eq!(
signer_info.index_of(Some(solana_sdk::pubkey::new_rand())),
None
);
let keypair0 = keypair_from_seed(&[1u8; 32]).unwrap();
let keypair0_pubkey = keypair0.pubkey();
@ -2795,11 +3008,11 @@ mod tests {
fn test_cli_parse_command() {
let test_commands = app("test", "desc", "version");
let pubkey = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
let pubkey_string = format!("{}", pubkey);
let witness0 = Pubkey::new_rand();
let witness0 = solana_sdk::pubkey::new_rand();
let witness0_string = format!("{}", witness0);
let witness1 = Pubkey::new_rand();
let witness1 = solana_sdk::pubkey::new_rand();
let witness1_string = format!("{}", witness1);
let dt = Utc.ymd(2018, 9, 19).and_hms(17, 30, 59);
@ -2909,7 +3122,7 @@ mod tests {
assert!(parse_command(&test_bad_signature, &default_signer, &mut None).is_err());
// Test CreateAddressWithSeed
let from_pubkey = Some(Pubkey::new_rand());
let from_pubkey = Some(solana_sdk::pubkey::new_rand());
let from_str = from_pubkey.unwrap().to_string();
for (name, program_id) in &[
("STAKE", solana_stake_program::id()),
@ -2966,6 +3179,7 @@ mod tests {
program_location: "/Users/test/program.o".to_string(),
address: None,
use_deprecated_loader: false,
allow_excessive_balance: false,
},
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
}
@ -2987,6 +3201,7 @@ mod tests {
program_location: "/Users/test/program.o".to_string(),
address: Some(1),
use_deprecated_loader: false,
allow_excessive_balance: false,
},
signers: vec![
read_keypair_file(&keypair_file).unwrap().into(),
@ -3265,7 +3480,7 @@ mod tests {
let authority_pubkey = keypair.pubkey();
let authority_pubkey_string = format!("{}", authority_pubkey);
let sig = keypair.sign_message(&[0u8]);
let signer_arg = format!("{}={}", Pubkey::new_rand(), sig);
let signer_arg = format!("{}={}", solana_sdk::pubkey::new_rand(), sig);
let test_pay = test_commands.clone().get_matches_from(vec![
"test",
"pay",
@ -3378,7 +3593,7 @@ mod tests {
};
assert_eq!(process_command(&config).unwrap(), "0.00000005 SOL");
let process_id = Pubkey::new_rand();
let process_id = solana_sdk::pubkey::new_rand();
config.command = CliCommand::Cancel(process_id);
assert!(process_command(&config).is_ok());
@ -3401,7 +3616,7 @@ mod tests {
let result = process_command(&config);
assert!(result.is_ok());
let new_authorized_pubkey = Pubkey::new_rand();
let new_authorized_pubkey = solana_sdk::pubkey::new_rand();
config.signers = vec![&bob_keypair];
config.command = CliCommand::VoteAuthorize {
vote_account_pubkey: bob_pubkey,
@ -3423,7 +3638,7 @@ mod tests {
let bob_keypair = Keypair::new();
let bob_pubkey = bob_keypair.pubkey();
let custodian = Pubkey::new_rand();
let custodian = solana_sdk::pubkey::new_rand();
config.command = CliCommand::CreateStakeAccount {
stake_account: 1,
seed: None,
@ -3446,8 +3661,8 @@ mod tests {
let result = process_command(&config);
assert!(result.is_ok());
let stake_account_pubkey = Pubkey::new_rand();
let to_pubkey = Pubkey::new_rand();
let stake_account_pubkey = solana_sdk::pubkey::new_rand();
let to_pubkey = solana_sdk::pubkey::new_rand();
config.command = CliCommand::WithdrawStake {
stake_account_pubkey,
destination_account_pubkey: to_pubkey,
@ -3464,7 +3679,7 @@ mod tests {
let result = process_command(&config);
assert!(result.is_ok());
let stake_account_pubkey = Pubkey::new_rand();
let stake_account_pubkey = solana_sdk::pubkey::new_rand();
config.command = CliCommand::DeactivateStake {
stake_account_pubkey,
stake_authority: 0,
@ -3477,7 +3692,7 @@ mod tests {
let result = process_command(&config);
assert!(result.is_ok());
let stake_account_pubkey = Pubkey::new_rand();
let stake_account_pubkey = solana_sdk::pubkey::new_rand();
let split_stake_account = Keypair::new();
config.command = CliCommand::SplitStake {
stake_account_pubkey,
@ -3495,8 +3710,8 @@ mod tests {
let result = process_command(&config);
assert!(result.is_ok());
let stake_account_pubkey = Pubkey::new_rand();
let source_stake_account_pubkey = Pubkey::new_rand();
let stake_account_pubkey = solana_sdk::pubkey::new_rand();
let source_stake_account_pubkey = solana_sdk::pubkey::new_rand();
let merge_stake_account = Keypair::new();
config.command = CliCommand::MergeStake {
stake_account_pubkey,
@ -3539,7 +3754,7 @@ mod tests {
let result = process_command(&config);
assert!(result.is_ok());
let witness = Pubkey::new_rand();
let witness = solana_sdk::pubkey::new_rand();
config.command = CliCommand::Pay(PayCommand {
amount: SpendAmount::Some(10),
to: bob_pubkey,
@ -3550,19 +3765,19 @@ mod tests {
let result = process_command(&config);
assert!(result.is_ok());
let process_id = Pubkey::new_rand();
let process_id = solana_sdk::pubkey::new_rand();
config.command = CliCommand::TimeElapsed(bob_pubkey, process_id, dt);
config.signers = vec![&keypair];
let result = process_command(&config);
assert!(result.is_ok());
let witness = Pubkey::new_rand();
let witness = solana_sdk::pubkey::new_rand();
config.command = CliCommand::Witness(bob_pubkey, witness);
let result = process_command(&config);
assert!(result.is_ok());
// CreateAddressWithSeed
let from_pubkey = Pubkey::new_rand();
let from_pubkey = solana_sdk::pubkey::new_rand();
config.signers = vec![];
config.command = CliCommand::CreateAddressWithSeed {
from_pubkey: Some(from_pubkey),
@ -3575,7 +3790,7 @@ mod tests {
assert_eq!(address.unwrap(), expected_address.to_string());
// Need airdrop cases
let to = Pubkey::new_rand();
let to = solana_sdk::pubkey::new_rand();
config.signers = vec![&keypair];
config.command = CliCommand::Airdrop {
faucet_host: None,
@ -3589,7 +3804,7 @@ mod tests {
let result = process_command(&config);
assert!(result.is_ok());
let witness = Pubkey::new_rand();
let witness = solana_sdk::pubkey::new_rand();
config.command = CliCommand::Witness(bob_pubkey, witness);
let result = process_command(&config);
assert!(result.is_ok());
@ -3699,7 +3914,15 @@ mod tests {
// Success case
let mut config = CliConfig::default();
config.rpc_client = Some(RpcClient::new_mock("deploy_succeeds".to_string()));
let account_info_response = json!(Response {
context: RpcResponseContext { slot: 1 },
value: Value::Null,
});
let mut mocks = HashMap::new();
mocks.insert(RpcRequest::GetAccountInfo, account_info_response);
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
config.rpc_client = Some(rpc_client);
let default_keypair = Keypair::new();
config.signers = vec![&default_keypair];
@ -3707,6 +3930,7 @@ mod tests {
program_location: pathbuf.to_str().unwrap().to_string(),
address: None,
use_deprecated_loader: false,
allow_excessive_balance: false,
};
let result = process_command(&config);
let json: Value = serde_json::from_str(&result.unwrap()).unwrap();
@ -3725,6 +3949,7 @@ mod tests {
program_location: "bad/file/location.so".to_string(),
address: None,
use_deprecated_loader: false,
allow_excessive_balance: false,
};
assert!(process_command(&config).is_err());
}

View File

@ -1,7 +1,9 @@
use crate::{
cli::{CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult},
spend_utils::{resolve_spend_tx_and_check_account_balance, SpendAmount},
stake::is_stake_program_v2_enabled,
};
use chrono::{Local, TimeZone};
use clap::{value_t, value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand};
use console::{style, Emoji};
use solana_clap_utils::{
@ -14,10 +16,15 @@ use solana_cli_output::{
*,
};
use solana_client::{
client_error::ClientErrorKind,
pubsub_client::PubsubClient,
rpc_client::{GetConfirmedSignaturesForAddress2Config, RpcClient},
rpc_config::{RpcLargestAccountsConfig, RpcLargestAccountsFilter},
rpc_response::{RpcVersionInfo, SlotInfo},
rpc_config::{
RpcAccountInfoConfig, RpcLargestAccountsConfig, RpcLargestAccountsFilter,
RpcProgramAccountsConfig,
},
rpc_filter,
rpc_response::SlotInfo,
};
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_sdk::{
@ -67,8 +74,7 @@ impl ClusterQuerySubCommands for App<'_, '_> {
.validator(is_slot)
.value_name("SLOT")
.takes_value(true)
.index(1)
.required(true),
.index(1),
),
)
.subcommand(
@ -357,7 +363,7 @@ pub fn parse_cluster_ping(
}
pub fn parse_get_block(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let slot = value_t_or_exit!(matches, "slot", Slot);
let slot = value_of(matches, "slot");
Ok(CliCommandInfo {
command: CliCommand::GetBlock { slot },
signers: vec![],
@ -536,7 +542,20 @@ pub fn process_catchup(
RpcClient::new_socket(rpc_addr)
};
let reported_node_pubkey = node_client.get_identity()?;
let reported_node_pubkey = loop {
match node_client.get_identity() {
Ok(reported_node_pubkey) => break reported_node_pubkey,
Err(err) => {
if let ClientErrorKind::Reqwest(err) = err.kind() {
progress_bar.set_message(&format!("Connection failed: {}", err));
sleep(Duration::from_secs(sleep_interval as u64));
continue;
}
return Err(Box::new(err));
}
}
};
if reported_node_pubkey != *node_pubkey {
return Err(format!(
"The identity reported by node RPC URL does not match. Expected: {:?}. Reported: {:?}",
@ -681,30 +700,71 @@ pub fn process_leader_schedule(rpc_client: &RpcClient) -> ProcessResult {
Ok("".to_string())
}
pub fn process_get_block(rpc_client: &RpcClient, _config: &CliConfig, slot: Slot) -> ProcessResult {
let block =
pub fn process_get_block(
rpc_client: &RpcClient,
_config: &CliConfig,
slot: Option<Slot>,
) -> ProcessResult {
let slot = if let Some(slot) = slot {
slot
} else {
rpc_client.get_slot()?
};
let mut block =
rpc_client.get_confirmed_block_with_encoding(slot, UiTransactionEncoding::Base64)?;
println!("Slot: {}", slot);
println!("Parent Slot: {}", block.parent_slot);
println!("Blockhash: {}", block.blockhash);
println!("Previous Blockhash: {}", block.previous_blockhash);
if block.block_time.is_some() {
println!("Block Time: {:?}", block.block_time);
if let Some(block_time) = block.block_time {
println!("Block Time: {:?}", Local.timestamp(block_time, 0));
}
if !block.rewards.is_empty() {
block.rewards.sort_by(|a, b| a.pubkey.cmp(&b.pubkey));
let mut total_rewards = 0;
println!("Rewards:",);
println!(
" {:<44} {:^15} {:<15} {:<20} {:>14}",
"Address", "Type", "Amount", "New Balance", "Percent Change"
);
for reward in block.rewards {
let sign = if reward.lamports < 0 { "-" } else { "" };
total_rewards += reward.lamports;
println!(
" {:<44}: {}",
" {:<44} {:^15} {:>15} {}",
reward.pubkey,
if reward.lamports > 0 {
format!("{}", lamports_to_sol(reward.lamports as u64))
if let Some(reward_type) = reward.reward_type {
format!("{}", reward_type)
} else {
format!("-{}", lamports_to_sol(reward.lamports.abs() as u64))
"-".to_string()
},
format!(
"{}{:<14.9}",
sign,
lamports_to_sol(reward.lamports.abs() as u64)
),
if reward.post_balance == 0 {
" - -".to_string()
} else {
format!(
"{:<19.9} {:>13.9}%",
lamports_to_sol(reward.post_balance),
reward.lamports.abs() as f64
/ (reward.post_balance as f64 - reward.lamports as f64)
)
}
);
}
let sign = if total_rewards < 0 { "-" } else { "" };
println!(
"Total Rewards: {}{:<12.9}",
sign,
lamports_to_sol(total_rewards.abs() as u64)
);
}
for (index, transaction_with_meta) in block.transactions.iter().enumerate() {
println!("Transaction {}:", index);
@ -1208,14 +1268,16 @@ pub fn process_show_gossip(rpc_client: &RpcClient, config: &CliConfig) -> Proces
.into_iter()
.map(|node| {
format!(
"{:15} | {:44} | {:6} | {:5} | {:5} | {}",
"{:15} | {:44} | {:6} | {:5} | {:21} | {}",
node.gossip
.map(|addr| addr.ip().to_string())
.unwrap_or_else(|| "none".to_string()),
format_labeled_address(&node.pubkey, &config.address_labels),
format_port(node.gossip),
format_port(node.tpu),
format_port(node.rpc),
node.rpc
.map(|addr| addr.to_string())
.unwrap_or_else(|| "none".to_string()),
node.version.unwrap_or_else(|| "unknown".to_string()),
)
})
@ -1223,9 +1285,9 @@ pub fn process_show_gossip(rpc_client: &RpcClient, config: &CliConfig) -> Proces
Ok(format!(
"IP Address | Node identifier \
| Gossip | TPU | RPC | Version\n\
| Gossip | TPU | RPC Address | Version\n\
----------------+----------------------------------------------+\
--------+-------+-------+----------------\n\
--------+-------+-----------------------+----------------\n\
{}\n\
Nodes: {}",
s.join("\n"),
@ -1244,17 +1306,52 @@ pub fn process_show_stakes(
let progress_bar = new_spinner_progress_bar();
progress_bar.set_message("Fetching stake accounts...");
let all_stake_accounts = rpc_client.get_program_accounts(&solana_stake_program::id())?;
let mut program_accounts_config = RpcProgramAccountsConfig {
filters: None,
account_config: RpcAccountInfoConfig {
encoding: Some(solana_account_decoder::UiAccountEncoding::Base64),
..RpcAccountInfoConfig::default()
},
};
if let Some(vote_account_pubkeys) = vote_account_pubkeys {
// Use server-side filtering if only one vote account is provided
if vote_account_pubkeys.len() == 1 {
program_accounts_config.filters = Some(vec![
// Filter by `StakeState::Stake(_, _)`
rpc_filter::RpcFilterType::Memcmp(rpc_filter::Memcmp {
offset: 0,
bytes: rpc_filter::MemcmpEncodedBytes::Binary(
bs58::encode([2, 0, 0, 0]).into_string(),
),
encoding: Some(rpc_filter::MemcmpEncoding::Binary),
}),
// Filter by `Delegation::voter_pubkey`, which begins at byte offset 124
rpc_filter::RpcFilterType::Memcmp(rpc_filter::Memcmp {
offset: 124,
bytes: rpc_filter::MemcmpEncodedBytes::Binary(
vote_account_pubkeys[0].to_string(),
),
encoding: Some(rpc_filter::MemcmpEncoding::Binary),
}),
]);
}
}
let all_stake_accounts = rpc_client
.get_program_accounts_with_config(&solana_stake_program::id(), program_accounts_config)?;
let stake_history_account = rpc_client.get_account(&stake_history::id())?;
progress_bar.finish_and_clear();
let clock_account = rpc_client.get_account(&sysvar::clock::id())?;
let clock: Clock = Sysvar::from_account(&clock_account).ok_or_else(|| {
CliError::RpcRequestError("Failed to deserialize clock sysvar".to_string())
})?;
progress_bar.finish_and_clear();
let stake_history = StakeHistory::from_account(&stake_history_account).ok_or_else(|| {
CliError::RpcRequestError("Failed to deserialize stake history".to_string())
})?;
let clock: Clock = Sysvar::from_account(&clock_account).ok_or_else(|| {
CliError::RpcRequestError("Failed to deserialize clock sysvar".to_string())
})?;
// At v1.6, this check can be removed and simply passed as `true`
let stake_program_v2_enabled = is_stake_program_v2_enabled(rpc_client);
let mut stake_accounts: Vec<CliKeyedStakeState> = vec![];
for (stake_pubkey, stake_account) in all_stake_accounts {
@ -1270,6 +1367,7 @@ pub fn process_show_stakes(
use_lamports_unit,
&stake_history,
&clock,
stake_program_v2_enabled,
),
});
}
@ -1288,6 +1386,7 @@ pub fn process_show_stakes(
use_lamports_unit,
&stake_history,
&clock,
stake_program_v2_enabled,
),
});
}
@ -1314,12 +1413,9 @@ pub fn process_show_validators(
for contact_info in rpc_client.get_cluster_nodes()? {
node_version.insert(
contact_info.pubkey,
RpcVersionInfo {
solana_core: contact_info
.version
.unwrap_or_else(|| unknown_version.clone()),
}
.to_string(),
contact_info
.version
.unwrap_or_else(|| unknown_version.clone()),
);
}
@ -1330,12 +1426,12 @@ pub fn process_show_validators(
.map(|vote_account| vote_account.activated_stake)
.sum();
let total_deliquent_stake = vote_accounts
let total_delinquent_stake = vote_accounts
.delinquent
.iter()
.map(|vote_account| vote_account.activated_stake)
.sum();
let total_current_stake = total_active_stake - total_deliquent_stake;
let total_current_stake = total_active_stake - total_delinquent_stake;
let mut current = vote_accounts.current;
current.sort_by(|a, b| b.activated_stake.cmp(&a.activated_stake));
@ -1389,7 +1485,7 @@ pub fn process_show_validators(
let cli_validators = CliValidators {
total_active_stake,
total_current_stake,
total_deliquent_stake,
total_delinquent_stake,
current_validators,
delinquent_validators,
stake_by_version,

367
cli/src/feature.rs Normal file
View File

@ -0,0 +1,367 @@
use crate::{
cli::{CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult},
spend_utils::{resolve_spend_tx_and_check_account_balance, SpendAmount},
};
use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
use console::style;
use serde::{Deserialize, Serialize};
use solana_clap_utils::{input_parsers::*, input_validators::*, keypair::*};
use solana_cli_output::{QuietDisplay, VerboseDisplay};
use solana_client::{client_error::ClientError, rpc_client::RpcClient};
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_runtime::{
feature::{self, Feature},
feature_set::FEATURE_NAMES,
};
use solana_sdk::{
clock::Slot, message::Message, pubkey::Pubkey, system_instruction, transaction::Transaction,
};
use std::{collections::HashMap, fmt, sync::Arc};
#[derive(Debug, PartialEq)]
pub enum FeatureCliCommand {
Status { features: Vec<Pubkey> },
Activate { feature: Pubkey },
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase", tag = "status", content = "sinceSlot")]
pub enum CliFeatureStatus {
Inactive,
Pending,
Active(Slot),
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliFeature {
pub id: String,
pub description: String,
#[serde(flatten)]
pub status: CliFeatureStatus,
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CliFeatures {
pub features: Vec<CliFeature>,
pub feature_activation_allowed: bool,
#[serde(skip)]
pub inactive: bool,
}
impl fmt::Display for CliFeatures {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.features.len() > 1 {
writeln!(
f,
"{}",
style(format!(
"{:<44} {:<40} {}",
"Feature", "Description", "Status"
))
.bold()
)?;
}
for feature in &self.features {
writeln!(
f,
"{:<44} {:<40} {}",
feature.id,
feature.description,
match feature.status {
CliFeatureStatus::Inactive => style("inactive".to_string()).red(),
CliFeatureStatus::Pending => style("activation pending".to_string()).yellow(),
CliFeatureStatus::Active(activation_slot) =>
style(format!("active since slot {}", activation_slot)).green(),
}
)?;
}
if self.inactive && !self.feature_activation_allowed {
writeln!(
f,
"{}",
style("\nFeature activation is not allowed at this time")
.bold()
.red()
)?;
}
Ok(())
}
}
impl QuietDisplay for CliFeatures {}
impl VerboseDisplay for CliFeatures {}
pub trait FeatureSubCommands {
fn feature_subcommands(self) -> Self;
}
impl FeatureSubCommands for App<'_, '_> {
fn feature_subcommands(self) -> Self {
self.subcommand(
SubCommand::with_name("feature")
.about("Runtime feature management")
.setting(AppSettings::SubcommandRequiredElseHelp)
.subcommand(
SubCommand::with_name("status")
.about("Query runtime feature status")
.arg(
Arg::with_name("features")
.value_name("ADDRESS")
.validator(is_valid_pubkey)
.index(1)
.multiple(true)
.help("Feature status to query [default: all known features]"),
),
)
.subcommand(
SubCommand::with_name("activate")
.about("Activate a runtime feature")
.arg(
Arg::with_name("feature")
.value_name("FEATURE_KEYPAIR")
.validator(is_valid_signer)
.index(1)
.required(true)
.help("The signer for the feature to activate"),
),
),
)
}
}
fn known_feature(feature: &Pubkey) -> Result<(), CliError> {
if FEATURE_NAMES.contains_key(feature) {
Ok(())
} else {
Err(CliError::BadParameter(format!(
"Unknown feature: {}",
feature
)))
}
}
pub fn parse_feature_subcommand(
matches: &ArgMatches<'_>,
default_signer: &DefaultSigner,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let response = match matches.subcommand() {
("activate", Some(matches)) => {
let (feature_signer, feature) = signer_of(matches, "feature", wallet_manager)?;
let mut signers = vec![default_signer.signer_from_path(matches, wallet_manager)?];
signers.push(feature_signer.unwrap());
let feature = feature.unwrap();
known_feature(&feature)?;
CliCommandInfo {
command: CliCommand::Feature(FeatureCliCommand::Activate { feature }),
signers,
}
}
("status", Some(matches)) => {
let mut features = if let Some(features) = pubkeys_of(matches, "features") {
for feature in &features {
known_feature(feature)?;
}
features
} else {
FEATURE_NAMES.keys().cloned().collect()
};
features.sort();
CliCommandInfo {
command: CliCommand::Feature(FeatureCliCommand::Status { features }),
signers: vec![],
}
}
_ => unreachable!(),
};
Ok(response)
}
pub fn process_feature_subcommand(
rpc_client: &RpcClient,
config: &CliConfig,
feature_subcommand: &FeatureCliCommand,
) -> ProcessResult {
match feature_subcommand {
FeatureCliCommand::Status { features } => process_status(rpc_client, config, features),
FeatureCliCommand::Activate { feature } => process_activate(rpc_client, config, *feature),
}
}
fn active_stake_by_feature_set(rpc_client: &RpcClient) -> Result<HashMap<u32, u64>, ClientError> {
// Validator identity -> feature set
let feature_set_map = rpc_client
.get_cluster_nodes()?
.into_iter()
.map(|contact_info| (contact_info.pubkey, contact_info.feature_set))
.collect::<HashMap<_, _>>();
let vote_accounts = rpc_client.get_vote_accounts()?;
let total_active_stake: u64 = vote_accounts
.current
.iter()
.chain(vote_accounts.delinquent.iter())
.map(|vote_account| vote_account.activated_stake)
.sum();
// Sum all active stake by feature set
let mut active_stake_by_feature_set = HashMap::new();
for vote_account in vote_accounts.current {
if let Some(Some(feature_set)) = feature_set_map.get(&vote_account.node_pubkey) {
*active_stake_by_feature_set.entry(*feature_set).or_default() +=
vote_account.activated_stake;
} else {
*active_stake_by_feature_set
.entry(0 /* "unknown" */)
.or_default() += vote_account.activated_stake;
}
}
// Convert active stake to a percentage so the caller doesn't need `total_active_stake`
for (_, val) in active_stake_by_feature_set.iter_mut() {
*val = *val * 100 / total_active_stake;
}
Ok(active_stake_by_feature_set)
}
// Feature activation is only allowed when 95% of the active stake is on the current feature set
fn feature_activation_allowed(rpc_client: &RpcClient, quiet: bool) -> Result<bool, ClientError> {
let my_feature_set = solana_version::Version::default().feature_set;
let active_stake_by_feature_set = active_stake_by_feature_set(rpc_client)?;
let feature_activation_allowed = active_stake_by_feature_set
.get(&my_feature_set)
.map(|percentage| *percentage >= 95)
.unwrap_or(false);
if !feature_activation_allowed && !quiet {
println!("{}", style("Stake By Feature Set:").bold());
for (feature_set, percentage) in active_stake_by_feature_set.iter() {
if *feature_set == 0 {
println!("unknown - {}%", percentage);
} else {
println!(
"{} - {}% {}",
feature_set,
percentage,
if *feature_set == my_feature_set {
" <-- me"
} else {
""
}
);
}
}
println!();
}
Ok(feature_activation_allowed)
}
fn process_status(
rpc_client: &RpcClient,
config: &CliConfig,
feature_ids: &[Pubkey],
) -> ProcessResult {
let mut features: Vec<CliFeature> = vec![];
let mut inactive = false;
for (i, account) in rpc_client
.get_multiple_accounts(feature_ids)?
.into_iter()
.enumerate()
{
let feature_id = &feature_ids[i];
let feature_name = FEATURE_NAMES.get(feature_id).unwrap();
if let Some(account) = account {
if let Some(feature) = Feature::from_account(&account) {
let feature_status = match feature.activated_at {
None => CliFeatureStatus::Pending,
Some(activation_slot) => CliFeatureStatus::Active(activation_slot),
};
features.push(CliFeature {
id: feature_id.to_string(),
description: feature_name.to_string(),
status: feature_status,
});
continue;
}
}
inactive = true;
features.push(CliFeature {
id: feature_id.to_string(),
description: feature_name.to_string(),
status: CliFeatureStatus::Inactive,
});
}
let feature_activation_allowed = feature_activation_allowed(rpc_client, features.len() <= 1)?;
let feature_set = CliFeatures {
features,
feature_activation_allowed,
inactive,
};
Ok(config.output_format.formatted_string(&feature_set))
}
fn process_activate(
rpc_client: &RpcClient,
config: &CliConfig,
feature_id: Pubkey,
) -> ProcessResult {
let account = rpc_client
.get_multiple_accounts(&[feature_id])?
.into_iter()
.next()
.unwrap();
if let Some(account) = account {
if Feature::from_account(&account).is_some() {
return Err(format!("{} has already been activated", feature_id).into());
}
}
if !feature_activation_allowed(rpc_client, false)? {
return Err("Feature activation is not allowed at this time".into());
}
let rent = rpc_client.get_minimum_balance_for_rent_exemption(Feature::size_of())?;
let (blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let (message, _) = resolve_spend_tx_and_check_account_balance(
rpc_client,
false,
SpendAmount::Some(rent),
&fee_calculator,
&config.signers[0].pubkey(),
|lamports| {
Message::new(
&[
system_instruction::transfer(
&config.signers[0].pubkey(),
&feature_id,
lamports,
),
system_instruction::allocate(&feature_id, Feature::size_of() as u64),
system_instruction::assign(&feature_id, &feature::id()),
],
Some(&config.signers[0].pubkey()),
)
},
config.commitment,
)?;
let mut transaction = Transaction::new_unsigned(message);
transaction.try_sign(&config.signers, blockhash)?;
println!(
"Activating {} ({})",
FEATURE_NAMES.get(&feature_id).unwrap(),
feature_id
);
rpc_client.send_and_confirm_transaction_with_spinner(&transaction)?;
Ok("".to_string())
}

89
cli/src/inflation.rs Normal file
View File

@ -0,0 +1,89 @@
use crate::cli::{CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult};
use clap::{App, ArgMatches, SubCommand};
use console::style;
use solana_clap_utils::keypair::*;
use solana_client::rpc_client::RpcClient;
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use std::sync::Arc;
#[derive(Debug, PartialEq)]
pub enum InflationCliCommand {
Show,
}
pub trait InflationSubCommands {
fn inflation_subcommands(self) -> Self;
}
impl InflationSubCommands for App<'_, '_> {
fn inflation_subcommands(self) -> Self {
self.subcommand(SubCommand::with_name("inflation").about("Show inflation information"))
}
}
pub fn parse_inflation_subcommand(
_matches: &ArgMatches<'_>,
_default_signer: &DefaultSigner,
_wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
Ok(CliCommandInfo {
command: CliCommand::Inflation(InflationCliCommand::Show),
signers: vec![],
})
}
pub fn process_inflation_subcommand(
rpc_client: &RpcClient,
_config: &CliConfig,
inflation_subcommand: &InflationCliCommand,
) -> ProcessResult {
assert_eq!(*inflation_subcommand, InflationCliCommand::Show);
let governor = rpc_client.get_inflation_governor()?;
let current_inflation_rate = rpc_client.get_inflation_rate()?;
println!("{}", style("Inflation Governor:").bold());
if (governor.initial - governor.terminal).abs() < f64::EPSILON {
println!(
"Fixed APR: {:>5.2}%",
governor.terminal * 100.
);
} else {
println!("Initial APR: {:>5.2}%", governor.initial * 100.);
println!(
"Terminal APR: {:>5.2}%",
governor.terminal * 100.
);
println!("Rate reduction per year: {:>5.2}%", governor.taper * 100.);
}
if governor.foundation_term > 0. {
println!("Foundation percentage: {:>5.2}%", governor.foundation);
println!(
"Foundation term: {:.1} years",
governor.foundation_term
);
}
println!(
"\n{}",
style(format!(
"Inflation for Epoch {}:",
current_inflation_rate.epoch
))
.bold()
);
println!(
"Total APR: {:>5.2}%",
current_inflation_rate.total * 100.
);
println!(
"Staking APR: {:>5.2}%",
current_inflation_rate.validator * 100.
);
println!(
"Foundation APR: {:>5.2}%",
current_inflation_rate.foundation * 100.
);
Ok("".to_string())
}

View File

@ -23,7 +23,10 @@ extern crate serde_derive;
pub mod checks;
pub mod cli;
pub mod cluster_query;
pub mod feature;
pub mod inflation;
pub mod nonce;
pub mod send_tpu;
pub mod spend_utils;
pub mod stake;
pub mod test_utils;

View File

@ -833,7 +833,7 @@ mod tests {
#[test]
fn test_check_nonce_account() {
let blockhash = Hash::default();
let nonce_pubkey = Pubkey::new_rand();
let nonce_pubkey = solana_sdk::pubkey::new_rand();
let data = Versions::new_current(State::Initialized(nonce::state::Data {
authority: nonce_pubkey,
blockhash,
@ -869,7 +869,7 @@ mod tests {
}
let data = Versions::new_current(State::Initialized(nonce::state::Data {
authority: Pubkey::new_rand(),
authority: solana_sdk::pubkey::new_rand(),
blockhash,
fee_calculator: FeeCalculator::default(),
}));

29
cli/src/send_tpu.rs Normal file
View File

@ -0,0 +1,29 @@
use log::*;
use solana_client::rpc_response::{RpcContactInfo, RpcLeaderSchedule};
use std::net::{SocketAddr, UdpSocket};
pub fn get_leader_tpu(
slot_index: u64,
leader_schedule: Option<&RpcLeaderSchedule>,
cluster_nodes: Option<&Vec<RpcContactInfo>>,
) -> Option<SocketAddr> {
leader_schedule?
.iter()
.find(|(_pubkey, slots)| slots.iter().any(|slot| *slot as u64 == slot_index))
.and_then(|(pubkey, _)| {
cluster_nodes?
.iter()
.find(|contact_info| contact_info.pubkey == *pubkey)
.and_then(|contact_info| contact_info.tpu)
})
}
pub fn send_transaction_tpu(
send_socket: &UdpSocket,
tpu_address: &SocketAddr,
wire_transaction: &[u8],
) {
if let Err(err) = send_socket.send_to(wire_transaction, tpu_address) {
warn!("Failed to send transaction to {}: {:?}", tpu_address, err);
}
}

View File

@ -7,6 +7,7 @@ use crate::{
nonce::check_nonce_account,
spend_utils::{resolve_spend_tx_and_check_account_balances, SpendAmount},
};
use chrono::{Local, TimeZone};
use clap::{App, Arg, ArgGroup, ArgMatches, SubCommand};
use solana_clap_utils::{
fee_payer::{fee_payer_arg, FEE_PAYER_ARG},
@ -18,16 +19,22 @@ use solana_clap_utils::{
ArgConstant,
};
use solana_cli_output::{
return_signers, CliStakeHistory, CliStakeHistoryEntry, CliStakeState, CliStakeType,
return_signers, CliEpochReward, CliStakeHistory, CliStakeHistoryEntry, CliStakeState,
CliStakeType,
};
use solana_client::{
blockhash_query::BlockhashQuery, nonce_utils, rpc_client::RpcClient,
rpc_request::DELINQUENT_VALIDATOR_SLOT_DISTANCE,
blockhash_query::BlockhashQuery,
client_error::{ClientError, ClientErrorKind},
nonce_utils,
rpc_client::RpcClient,
rpc_custom_error,
rpc_request::{self, DELINQUENT_VALIDATOR_SLOT_DISTANCE},
};
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_runtime::{feature::Feature, feature_set};
use solana_sdk::{
account_utils::StateMut,
clock::Clock,
clock::{Clock, Epoch, Slot, UnixTimestamp, SECONDS_PER_DAY},
message::Message,
pubkey::Pubkey,
system_instruction::SystemError,
@ -43,7 +50,7 @@ use solana_stake_program::{
stake_state::{Authorized, Lockup, Meta, StakeAuthorize, StakeState},
};
use solana_vote_program::vote_state::VoteState;
use std::{ops::Deref, sync::Arc};
use std::{convert::TryInto, ops::Deref, sync::Arc};
pub const STAKE_AUTHORITY_ARG: ArgConstant<'static> = ArgConstant {
name: "stake_authority",
@ -154,8 +161,8 @@ impl StakeSubCommands for App<'_, '_> {
.validator(is_valid_signer)
.help("Source account of funds [default: cli config keypair]"),
)
.offline_args()
.nonce_args()
.offline_args(false)
.nonce_args(false)
.arg(fee_payer_arg())
)
.subcommand(
@ -183,8 +190,8 @@ impl StakeSubCommands for App<'_, '_> {
"The vote account to which the stake will be delegated")
)
.arg(stake_authority_arg())
.offline_args()
.nonce_args()
.offline_args(false)
.nonce_args(false)
.arg(fee_payer_arg())
)
.subcommand(
@ -213,8 +220,8 @@ impl StakeSubCommands for App<'_, '_> {
)
.arg(stake_authority_arg())
.arg(withdraw_authority_arg())
.offline_args()
.nonce_args()
.offline_args(false)
.nonce_args(false)
.arg(fee_payer_arg())
)
.subcommand(
@ -228,8 +235,8 @@ impl StakeSubCommands for App<'_, '_> {
"Stake account to be deactivated. ")
)
.arg(stake_authority_arg())
.offline_args()
.nonce_args()
.offline_args(false)
.nonce_args(false)
.arg(fee_payer_arg())
)
.subcommand(
@ -268,8 +275,8 @@ impl StakeSubCommands for App<'_, '_> {
.help("Seed for address generation; if specified, the resulting account will be at a derived address of the SPLIT STAKE ACCOUNT pubkey")
)
.arg(stake_authority_arg())
.offline_args()
.nonce_args()
.offline_args(false)
.nonce_args(false)
.arg(fee_payer_arg())
)
.subcommand(
@ -290,8 +297,8 @@ impl StakeSubCommands for App<'_, '_> {
"Source stake account for the merge. If successful, this stake account will no longer exist after the merge")
)
.arg(stake_authority_arg())
.offline_args()
.nonce_args()
.offline_args(false)
.nonce_args(false)
.arg(fee_payer_arg())
)
.subcommand(
@ -321,8 +328,8 @@ impl StakeSubCommands for App<'_, '_> {
.help("The amount to withdraw from the stake account, in SOL")
)
.arg(withdraw_authority_arg())
.offline_args()
.nonce_args()
.offline_args(false)
.nonce_args(false)
.arg(fee_payer_arg())
.arg(
Arg::with_name("custodian")
@ -376,8 +383,8 @@ impl StakeSubCommands for App<'_, '_> {
.validator(is_valid_signer)
.help("Keypair of the existing custodian [default: cli config pubkey]")
)
.offline_args()
.nonce_args()
.offline_args(false)
.nonce_args(false)
.arg(fee_payer_arg())
)
.subcommand(
@ -1495,6 +1502,7 @@ pub fn build_stake_state(
use_lamports_unit: bool,
stake_history: &StakeHistory,
clock: &Clock,
stake_program_v2_enabled: bool,
) -> CliStakeState {
match stake_state {
StakeState::Stake(
@ -1506,9 +1514,12 @@ pub fn build_stake_state(
stake,
) => {
let current_epoch = clock.epoch;
let (active_stake, activating_stake, deactivating_stake) = stake
.delegation
.stake_activating_and_deactivating(current_epoch, Some(stake_history));
let (active_stake, activating_stake, deactivating_stake) =
stake.delegation.stake_activating_and_deactivating(
current_epoch,
Some(stake_history),
stake_program_v2_enabled,
);
let lockup = if lockup.is_in_force(clock, None) {
Some(lockup.into())
} else {
@ -1543,6 +1554,7 @@ pub fn build_stake_state(
active_stake: u64_some_if_not_zero(active_stake),
activating_stake: u64_some_if_not_zero(activating_stake),
deactivating_stake: u64_some_if_not_zero(deactivating_stake),
..CliStakeState::default()
}
}
StakeState::RewardsPool => CliStakeState {
@ -1577,17 +1589,112 @@ pub fn build_stake_state(
}
}
pub(crate) fn fetch_epoch_rewards(
rpc_client: &RpcClient,
address: &Pubkey,
lowest_epoch: Epoch,
) -> Result<Vec<CliEpochReward>, Box<dyn std::error::Error>> {
let mut all_epoch_rewards = vec![];
let epoch_schedule = rpc_client.get_epoch_schedule()?;
let slot = rpc_client.get_slot()?;
let first_available_block = rpc_client.get_first_available_block()?;
let mut epoch = epoch_schedule.get_epoch_and_slot_index(slot).0;
let mut epoch_info: Option<(Slot, UnixTimestamp, solana_transaction_status::Rewards)> = None;
while epoch > lowest_epoch {
let first_slot_in_epoch = epoch_schedule.get_first_slot_in_epoch(epoch);
if first_slot_in_epoch < first_available_block {
// RPC node is out of history data
break;
}
let first_confirmed_block_in_epoch = *rpc_client
.get_confirmed_blocks_with_limit(first_slot_in_epoch, 1)?
.get(0)
.ok_or_else(|| format!("Unable to fetch first confirmed block for epoch {}", epoch))?;
let first_confirmed_block = match rpc_client.get_confirmed_block_with_encoding(
first_confirmed_block_in_epoch,
solana_transaction_status::UiTransactionEncoding::Base64,
) {
Ok(first_confirmed_block) => first_confirmed_block,
Err(ClientError {
kind:
ClientErrorKind::RpcError(rpc_request::RpcError::RpcResponseError {
code: rpc_custom_error::JSON_RPC_SERVER_ERROR_BLOCK_NOT_AVAILABLE,
message: _,
}),
request: _,
}) => {
// RPC node doesn't have this block
break;
}
Err(err) => {
return Err(err.into());
}
};
let epoch_start_time = if let Some(block_time) = first_confirmed_block.block_time {
block_time
} else {
break;
};
// Rewards for the previous epoch are found in the first confirmed block of the current epoch
let previous_epoch_rewards = first_confirmed_block.rewards;
if let Some((effective_slot, epoch_end_time, epoch_rewards)) = epoch_info {
let wallclock_epoch_duration =
{ Local.timestamp(epoch_end_time, 0) - Local.timestamp(epoch_start_time, 0) }
.to_std()?
.as_secs_f64();
let wallclock_epochs_per_year =
(SECONDS_PER_DAY * 356) as f64 / wallclock_epoch_duration;
if let Some(reward) = epoch_rewards
.into_iter()
.find(|reward| reward.pubkey == address.to_string())
{
if reward.post_balance > reward.lamports.try_into().unwrap_or(0) {
let balance_increase_percent = reward.lamports.abs() as f64
/ (reward.post_balance as f64 - reward.lamports as f64);
all_epoch_rewards.push(CliEpochReward {
epoch,
effective_slot,
amount: reward.lamports.abs() as u64,
post_balance: reward.post_balance,
percent_change: balance_increase_percent,
apr: balance_increase_percent * wallclock_epochs_per_year,
});
}
}
}
epoch -= 1;
epoch_info = Some((
first_confirmed_block_in_epoch,
epoch_start_time,
previous_epoch_rewards,
));
}
Ok(all_epoch_rewards)
}
pub fn process_show_stake_account(
rpc_client: &RpcClient,
config: &CliConfig,
stake_account_pubkey: &Pubkey,
stake_account_address: &Pubkey,
use_lamports_unit: bool,
) -> ProcessResult {
let stake_account = rpc_client.get_account(stake_account_pubkey)?;
let stake_account = rpc_client.get_account(stake_account_address)?;
if stake_account.owner != solana_stake_program::id() {
return Err(CliError::RpcRequestError(format!(
"{:?} is not a stake account",
stake_account_pubkey,
stake_account_address,
))
.into());
}
@ -1603,13 +1710,24 @@ pub fn process_show_stake_account(
CliError::RpcRequestError("Failed to deserialize clock sysvar".to_string())
})?;
let state = build_stake_state(
let mut state = build_stake_state(
stake_account.lamports,
&stake_state,
use_lamports_unit,
&stake_history,
&clock,
is_stake_program_v2_enabled(rpc_client), // At v1.6, this check can be removed and simply passed as `true`
);
if state.stake_type == CliStakeType::Stake {
if let Some(activation_epoch) = state.activation_epoch {
state.epoch_rewards = Some(fetch_epoch_rewards(
rpc_client,
stake_account_address,
activation_epoch,
)?);
}
}
Ok(config.output_format.formatted_string(&state))
}
Err(err) => Err(CliError::RpcRequestError(format!(
@ -1769,6 +1887,15 @@ pub fn process_delegate_stake(
}
}
pub fn is_stake_program_v2_enabled(rpc_client: &RpcClient) -> bool {
rpc_client
.get_account(&feature_set::stake_program_v2::id())
.ok()
.and_then(|account| Feature::from_account(&account))
.and_then(|feature| feature.activated_at)
.is_some()
}
#[cfg(test)]
mod tests {
use super::*;
@ -2314,9 +2441,9 @@ mod tests {
);
// Test CreateStakeAccount SubCommand
let custodian = Pubkey::new_rand();
let custodian = solana_sdk::pubkey::new_rand();
let custodian_string = format!("{}", custodian);
let authorized = Pubkey::new_rand();
let authorized = solana_sdk::pubkey::new_rand();
let authorized_string = format!("{}", authorized);
let test_create_stake_account = test_commands.clone().get_matches_from(vec![
"test",
@ -2454,7 +2581,7 @@ mod tests {
);
// Test DelegateStake Subcommand
let vote_account_pubkey = Pubkey::new_rand();
let vote_account_pubkey = solana_sdk::pubkey::new_rand();
let vote_account_string = vote_account_pubkey.to_string();
let test_delegate_stake = test_commands.clone().get_matches_from(vec![
"test",
@ -2481,7 +2608,7 @@ mod tests {
);
// Test DelegateStake Subcommand w/ authority
let vote_account_pubkey = Pubkey::new_rand();
let vote_account_pubkey = solana_sdk::pubkey::new_rand();
let vote_account_string = vote_account_pubkey.to_string();
let test_delegate_stake = test_commands.clone().get_matches_from(vec![
"test",
@ -2600,7 +2727,7 @@ mod tests {
);
// Test Delegate Subcommand w/ absent fee payer
let key1 = Pubkey::new_rand();
let key1 = solana_sdk::pubkey::new_rand();
let sig1 = Keypair::new().sign_message(&[0u8]);
let signer1 = format!("{}={}", key1, sig1);
let test_delegate_stake = test_commands.clone().get_matches_from(vec![
@ -2640,7 +2767,7 @@ mod tests {
);
// Test Delegate Subcommand w/ absent fee payer and absent nonce authority
let key2 = Pubkey::new_rand();
let key2 = solana_sdk::pubkey::new_rand();
let sig2 = Keypair::new().sign_message(&[0u8]);
let signer2 = format!("{}={}", key2, sig2);
let test_delegate_stake = test_commands.clone().get_matches_from(vec![
@ -2968,7 +3095,7 @@ mod tests {
);
// Test Deactivate Subcommand w/ absent fee payer
let key1 = Pubkey::new_rand();
let key1 = solana_sdk::pubkey::new_rand();
let sig1 = Keypair::new().sign_message(&[0u8]);
let signer1 = format!("{}={}", key1, sig1);
let test_deactivate_stake = test_commands.clone().get_matches_from(vec![
@ -3005,7 +3132,7 @@ mod tests {
);
// Test Deactivate Subcommand w/ absent fee payer and nonce authority
let key2 = Pubkey::new_rand();
let key2 = solana_sdk::pubkey::new_rand();
let sig2 = Keypair::new().sign_message(&[0u8]);
let signer2 = format!("{}={}", key2, sig2);
let test_deactivate_stake = test_commands.clone().get_matches_from(vec![
@ -3184,7 +3311,7 @@ mod tests {
let stake_account_keypair = Keypair::new();
write_keypair(&stake_account_keypair, tmp_file.as_file_mut()).unwrap();
let source_stake_account_pubkey = Pubkey::new_rand();
let source_stake_account_pubkey = solana_sdk::pubkey::new_rand();
let test_merge_stake_account = test_commands.clone().get_matches_from(vec![
"test",
"merge-stake",

View File

@ -486,7 +486,7 @@ mod tests {
#[test]
fn test_parse_validator_info() {
let pubkey = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
let keys = vec![(validator_info::id(), false), (pubkey, true)];
let config = ConfigKeys { keys };

View File

@ -671,11 +671,11 @@ fn get_vote_account(
pub fn process_show_vote_account(
rpc_client: &RpcClient,
config: &CliConfig,
vote_account_pubkey: &Pubkey,
vote_account_address: &Pubkey,
use_lamports_unit: bool,
) -> ProcessResult {
let (vote_account, vote_state) =
get_vote_account(rpc_client, vote_account_pubkey, config.commitment)?;
get_vote_account(rpc_client, vote_account_address, config.commitment)?;
let epoch_schedule = rpc_client.get_epoch_schedule()?;
@ -696,6 +696,12 @@ pub fn process_show_vote_account(
}
}
let epoch_rewards = Some(crate::stake::fetch_epoch_rewards(
rpc_client,
vote_account_address,
1,
)?);
let vote_account_data = CliVoteAccount {
account_balance: vote_account.lamports,
validator_identity: vote_state.node_pubkey.to_string(),
@ -708,6 +714,7 @@ pub fn process_show_vote_account(
votes,
epoch_voting_history,
use_lamports_unit,
epoch_rewards,
};
Ok(config.output_format.formatted_string(&vote_account_data))
@ -908,7 +915,7 @@ mod tests {
);
// test init with an authed voter
let authed = Pubkey::new_rand();
let authed = solana_sdk::pubkey::new_rand();
let (keypair_file, mut tmp_file) = make_tmp_file();
let keypair = Keypair::new();
write_keypair(&keypair, tmp_file.as_file_mut()).unwrap();

View File

@ -55,7 +55,7 @@ fn test_cli_deploy_program() {
faucet_host: None,
faucet_port: faucet_addr.port(),
pubkey: None,
lamports: 3 * minimum_balance_for_rent_exemption, // min balance for rent exemption for two programs + leftover for tx processing
lamports: 4 * minimum_balance_for_rent_exemption, // min balance for rent exemption for three programs + leftover for tx processing
};
config.signers = vec![&keypair];
process_command(&config).unwrap();
@ -64,6 +64,7 @@ fn test_cli_deploy_program() {
program_location: pathbuf.to_str().unwrap().to_string(),
address: None,
use_deprecated_loader: false,
allow_excessive_balance: false,
};
let response = process_command(&config);
@ -98,6 +99,7 @@ fn test_cli_deploy_program() {
program_location: pathbuf.to_str().unwrap().to_string(),
address: Some(1),
use_deprecated_loader: false,
allow_excessive_balance: false,
};
process_command(&config).unwrap();
let account1 = rpc_client
@ -113,6 +115,44 @@ fn test_cli_deploy_program() {
// Attempt to redeploy to the same address
process_command(&config).unwrap_err();
// Attempt to deploy to account with excess balance
let custom_address_keypair = Keypair::new();
config.command = CliCommand::Airdrop {
faucet_host: None,
faucet_port: faucet_addr.port(),
pubkey: None,
lamports: 2 * minimum_balance_for_rent_exemption, // Anything over minimum_balance_for_rent_exemption should trigger err
};
config.signers = vec![&custom_address_keypair];
process_command(&config).unwrap();
config.signers = vec![&keypair, &custom_address_keypair];
config.command = CliCommand::Deploy {
program_location: pathbuf.to_str().unwrap().to_string(),
address: Some(1),
use_deprecated_loader: false,
allow_excessive_balance: false,
};
process_command(&config).unwrap_err();
// Use forcing parameter to deploy to account with excess balance
config.command = CliCommand::Deploy {
program_location: pathbuf.to_str().unwrap().to_string(),
address: Some(1),
use_deprecated_loader: false,
allow_excessive_balance: true,
};
process_command(&config).unwrap();
let account2 = rpc_client
.get_account_with_commitment(&custom_address_keypair.pubkey(), CommitmentConfig::recent())
.unwrap()
.value
.unwrap();
assert_eq!(account2.lamports, 2 * minimum_balance_for_rent_exemption);
assert_eq!(account2.owner, bpf_loader::id());
assert_eq!(account2.executable, true);
assert_eq!(account0.data, account2.data);
server.close().unwrap();
remove_dir_all(ledger_path).unwrap();
}

View File

@ -10,7 +10,7 @@ use solana_client::{
rpc_client::RpcClient,
};
use solana_core::contact_info::ContactInfo;
use solana_core::test_validator::{TestValidator, TestValidatorOptions};
use solana_core::test_validator::TestValidator;
use solana_faucet::faucet::run_local_faucet;
use solana_sdk::{
commitment_config::CommitmentConfig,
@ -172,7 +172,7 @@ fn full_battery_tests(
assert_ne!(first_nonce, third_nonce);
// Withdraw from nonce account
let payee_pubkey = Pubkey::new_rand();
let payee_pubkey = solana_sdk::pubkey::new_rand();
config_payer.signers = authorized_signers;
config_payer.command = CliCommand::WithdrawFromNonceAccount {
nonce_account,
@ -231,17 +231,14 @@ fn full_battery_tests(
#[test]
fn test_create_account_with_seed() {
solana_logger::setup();
let TestValidator {
server,
leader_data,
alice: mint_keypair,
ledger_path,
..
} = TestValidator::run_with_options(TestValidatorOptions {
fees: 1,
bootstrap_validator_lamports: 42_000,
..TestValidatorOptions::default()
});
} = TestValidator::run_with_fees(1);
let (sender, receiver) = channel();
run_local_faucet(mint_keypair, sender, None);

View File

@ -30,7 +30,7 @@ fn test_cli_timestamp_tx() {
ledger_path,
..
} = TestValidator::run();
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let (sender, receiver) = channel();
run_local_faucet(alice, sender, None);
@ -117,7 +117,7 @@ fn test_cli_witness_tx() {
ledger_path,
..
} = TestValidator::run();
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let (sender, receiver) = channel();
run_local_faucet(alice, sender, None);
@ -199,7 +199,7 @@ fn test_cli_cancel_tx() {
ledger_path,
..
} = TestValidator::run();
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let (sender, receiver) = channel();
run_local_faucet(alice, sender, None);
@ -274,7 +274,7 @@ fn test_offline_pay_tx() {
ledger_path,
..
} = TestValidator::run();
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let (sender, receiver) = channel();
run_local_faucet(alice, sender, None);
@ -419,7 +419,7 @@ fn test_nonced_pay_tx() {
.unwrap()
.blockhash;
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
config.signers = vec![&default_signer];
config.command = CliCommand::Pay(PayCommand {
amount: SpendAmount::Some(10),

View File

@ -9,7 +9,7 @@ use solana_client::{
nonce_utils,
rpc_client::RpcClient,
};
use solana_core::test_validator::{TestValidator, TestValidatorOptions};
use solana_core::test_validator::TestValidator;
use solana_faucet::faucet::run_local_faucet;
use solana_sdk::{
account_utils::StateMut,
@ -848,11 +848,7 @@ fn test_stake_authorize_with_fee_payer() {
alice,
ledger_path,
..
} = TestValidator::run_with_options(TestValidatorOptions {
fees: SIG_FEE,
bootstrap_validator_lamports: 42_000,
..TestValidatorOptions::default()
});
} = TestValidator::run_with_fees(SIG_FEE);
let (sender, receiver) = channel();
run_local_faucet(alice, sender, None);
let faucet_addr = receiver.recv().unwrap();
@ -985,11 +981,7 @@ fn test_stake_split() {
alice,
ledger_path,
..
} = TestValidator::run_with_options(TestValidatorOptions {
fees: 1,
bootstrap_validator_lamports: 42_000,
..TestValidatorOptions::default()
});
} = TestValidator::run_with_fees(1);
let (sender, receiver) = channel();
run_local_faucet(alice, sender, None);
let faucet_addr = receiver.recv().unwrap();
@ -1140,11 +1132,7 @@ fn test_stake_set_lockup() {
alice,
ledger_path,
..
} = TestValidator::run_with_options(TestValidatorOptions {
fees: 1,
bootstrap_validator_lamports: 42_000,
..TestValidatorOptions::default()
});
} = TestValidator::run_with_fees(1);
let (sender, receiver) = channel();
run_local_faucet(alice, sender, None);
let faucet_addr = receiver.recv().unwrap();

View File

@ -9,7 +9,7 @@ use solana_client::{
nonce_utils,
rpc_client::RpcClient,
};
use solana_core::test_validator::{TestValidator, TestValidatorOptions};
use solana_core::test_validator::TestValidator;
use solana_faucet::faucet::run_local_faucet;
use solana_sdk::{
commitment_config::CommitmentConfig,
@ -21,17 +21,14 @@ use std::{fs::remove_dir_all, sync::mpsc::channel};
#[test]
fn test_transfer() {
solana_logger::setup();
let TestValidator {
server,
leader_data,
alice: mint_keypair,
ledger_path,
..
} = TestValidator::run_with_options(TestValidatorOptions {
fees: 1,
bootstrap_validator_lamports: 42_000,
..TestValidatorOptions::default()
});
} = TestValidator::run_with_fees(1);
let (sender, receiver) = channel();
run_local_faucet(mint_keypair, sender, None);
@ -252,17 +249,14 @@ fn test_transfer() {
#[test]
fn test_transfer_multisession_signing() {
solana_logger::setup();
let TestValidator {
server,
leader_data,
alice: mint_keypair,
ledger_path,
..
} = TestValidator::run_with_options(TestValidatorOptions {
fees: 1,
bootstrap_validator_lamports: 42_000,
..TestValidatorOptions::default()
});
} = TestValidator::run_with_fees(1);
let (sender, receiver) = channel();
run_local_faucet(mint_keypair, sender, None);
@ -382,17 +376,14 @@ fn test_transfer_multisession_signing() {
#[test]
fn test_transfer_all() {
solana_logger::setup();
let TestValidator {
server,
leader_data,
alice: mint_keypair,
ledger_path,
..
} = TestValidator::run_with_options(TestValidatorOptions {
fees: 1,
bootstrap_validator_lamports: 42_000,
..TestValidatorOptions::default()
});
} = TestValidator::run_with_fees(1);
let (sender, receiver) = channel();
run_local_faucet(mint_keypair, sender, None);

View File

@ -12,7 +12,6 @@ use solana_faucet::faucet::run_local_faucet;
use solana_sdk::{
account_utils::StateMut,
commitment_config::CommitmentConfig,
pubkey::Pubkey,
signature::{Keypair, Signer},
};
use solana_vote_program::vote_state::{VoteAuthorize, VoteState, VoteStateVersions};
@ -110,7 +109,7 @@ fn test_vote_authorize_and_withdraw() {
assert_eq!(authorized_withdrawer, withdraw_authority.pubkey());
// Withdraw from vote account
let destination_account = Pubkey::new_rand(); // Send withdrawal to new account to make balance check easy
let destination_account = solana_sdk::pubkey::new_rand(); // Send withdrawal to new account to make balance check easy
config.signers = vec![&default_signer, &withdraw_authority];
config.command = CliCommand::WithdrawFromVoteAccount {
vote_account_pubkey,

View File

@ -1,6 +1,6 @@
[package]
name = "solana-client"
version = "1.3.13"
version = "1.3.23"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@ -9,32 +9,35 @@ license = "Apache-2.0"
edition = "2018"
[dependencies]
base64 = "0.13.0"
bincode = "1.3.1"
bs58 = "0.3.1"
clap = "2.33.0"
indicatif = "0.15.0"
jsonrpc-core = "14.2.0"
jsonrpc-core = "15.0.0"
log = "0.4.8"
rayon = "1.4.0"
reqwest = { version = "0.10.6", default-features = false, features = ["blocking", "rustls-tls", "json"] }
semver = "0.11.0"
serde = "1.0.112"
serde_derive = "1.0.103"
serde_json = "1.0.56"
solana-account-decoder = { path = "../account-decoder", version = "1.3.13" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.13" }
solana-net-utils = { path = "../net-utils", version = "1.3.13" }
solana-sdk = { path = "../sdk", version = "1.3.13" }
solana-transaction-status = { path = "../transaction-status", version = "1.3.13" }
solana-vote-program = { path = "../programs/vote", version = "1.3.13" }
solana-account-decoder = { path = "../account-decoder", version = "1.3.23" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.23" }
solana-net-utils = { path = "../net-utils", version = "1.3.23" }
solana-sdk = { path = "../sdk", version = "1.3.23" }
solana-transaction-status = { path = "../transaction-status", version = "1.3.23" }
solana-version = { path = "../version", version = "1.3.23" }
solana-vote-program = { path = "../programs/vote", version = "1.3.23" }
thiserror = "1.0"
tungstenite = "0.10.1"
url = "2.1.1"
[dev-dependencies]
assert_matches = "1.3.0"
jsonrpc-core = "14.2.0"
jsonrpc-http-server = "14.2.0"
solana-logger = { path = "../logger", version = "1.3.13" }
jsonrpc-core = "15.0.0"
jsonrpc-http-server = "15.0.0"
solana-logger = { path = "../logger", version = "1.3.23" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@ -177,7 +177,9 @@ mod tests {
#[test]
fn test_blockhash_query_new_from_matches_ok() {
let test_commands = App::new("blockhash_query_test").nonce_args().offline_args();
let test_commands = App::new("blockhash_query_test")
.nonce_args(false)
.offline_args(false);
let blockhash = hash(&[1u8]);
let blockhash_string = blockhash.to_string();

View File

@ -50,10 +50,10 @@ impl Into<TransportError> for ClientErrorKind {
#[derive(Error, Debug)]
#[error("{kind}")]
pub struct ClientError {
request: Option<rpc_request::RpcRequest>,
pub request: Option<rpc_request::RpcRequest>,
#[source]
kind: ClientErrorKind,
pub kind: ClientErrorKind,
}
impl ClientError {

View File

@ -27,6 +27,13 @@ impl HttpSender {
}
}
#[derive(Deserialize, Debug)]
struct RpcErrorObject {
code: i64,
message: String,
/*data field omitted*/
}
impl RpcSender for HttpSender {
fn send(&self, request: RpcRequest, params: serde_json::Value) -> Result<serde_json::Value> {
// Concurrent requests are not supported so reuse the same request id for all requests
@ -63,11 +70,20 @@ impl RpcSender for HttpSender {
let json: serde_json::Value = serde_json::from_str(&response.text()?)?;
if json["error"].is_object() {
return Err(RpcError::RpcRequestError(format!(
"RPC Error response: {}",
serde_json::to_string(&json["error"]).unwrap()
))
.into());
return match serde_json::from_value::<RpcErrorObject>(json["error"].clone())
{
Ok(rpc_error_object) => Err(RpcError::RpcResponseError {
code: rpc_error_object.code,
message: rpc_error_object.message,
}
.into()),
Err(err) => Err(RpcError::RpcRequestError(format!(
"Failed to deserialize RPC error response: {} [{}]",
serde_json::to_string(&json["error"]).unwrap(),
err
))
.into()),
};
}
return Ok(json["result"].clone());
}

View File

@ -10,6 +10,7 @@ pub mod perf_utils;
pub mod pubsub_client;
pub mod rpc_client;
pub mod rpc_config;
pub mod rpc_custom_error;
pub mod rpc_filter;
pub mod rpc_request;
pub mod rpc_response;

View File

@ -1,17 +1,19 @@
use crate::{
client_error::Result,
rpc_request::RpcRequest,
rpc_response::{Response, RpcResponseContext},
rpc_response::{Response, RpcResponseContext, RpcVersionInfo},
rpc_sender::RpcSender,
};
use serde_json::{Number, Value};
use serde_json::{json, Number, Value};
use solana_sdk::{
epoch_info::EpochInfo,
fee_calculator::{FeeCalculator, FeeRateGovernor},
instruction::InstructionError,
signature::Signature,
transaction::{self, Transaction, TransactionError},
};
use solana_transaction_status::TransactionStatus;
use solana_version::Version;
use std::{collections::HashMap, sync::RwLock};
pub const PUBKEY: &str = "7RoSF9fUmdphVCpabEoefH81WwrW7orsWonXWqTXkKV8";
@ -57,6 +59,13 @@ impl RpcSender for MockSender {
serde_json::to_value(FeeCalculator::default()).unwrap(),
),
})?,
RpcRequest::GetEpochInfo => serde_json::to_value(EpochInfo {
epoch: 1,
slot_index: 2,
slots_in_epoch: 32,
absolute_slot: 34,
block_height: 34,
})?,
RpcRequest::GetFeeCalculatorForBlockhash => {
let value = if self.url == "blockhash_expired" {
Value::Null
@ -94,9 +103,15 @@ impl RpcSender for MockSender {
err,
})
};
let statuses: Vec<Option<TransactionStatus>> = params.as_array().unwrap()[0]
.as_array()
.unwrap()
.iter()
.map(|_| status.clone())
.collect();
serde_json::to_value(Response {
context: RpcResponseContext { slot: 1 },
value: vec![status],
value: statuses,
})?
}
RpcRequest::GetTransactionCount => Value::Number(Number::from(1234)),
@ -106,13 +121,20 @@ impl RpcSender for MockSender {
Signature::new(&[8; 64]).to_string()
} else {
let tx_str = params.as_array().unwrap()[0].as_str().unwrap().to_string();
let data = bs58::decode(tx_str).into_vec().unwrap();
let data = base64::decode(tx_str).unwrap();
let tx: Transaction = bincode::deserialize(&data).unwrap();
tx.signatures[0].to_string()
};
Value::String(signature)
}
RpcRequest::GetMinimumBalanceForRentExemption => Value::Number(Number::from(20)),
RpcRequest::GetVersion => {
let version = Version::default();
json!(RpcVersionInfo {
solana_core: version.to_string(),
feature_set: Some(version.feature_set),
})
}
_ => Value::Null,
};
Ok(val)

View File

@ -5,7 +5,8 @@ use crate::{
rpc_config::RpcAccountInfoConfig,
rpc_config::{
RpcGetConfirmedSignaturesForAddress2Config, RpcLargestAccountsConfig,
RpcSendTransactionConfig, RpcTokenAccountsFilter,
RpcProgramAccountsConfig, RpcSendTransactionConfig, RpcSimulateTransactionConfig,
RpcTokenAccountsFilter,
},
rpc_request::{RpcError, RpcRequest, TokenAccountsFilter},
rpc_response::*,
@ -32,26 +33,49 @@ use solana_sdk::{
hash::Hash,
pubkey::Pubkey,
signature::Signature,
transaction::{self, Transaction},
transaction::{self, uses_durable_nonce, Transaction},
};
use solana_transaction_status::{
ConfirmedBlock, ConfirmedTransaction, TransactionStatus, UiTransactionEncoding,
EncodedConfirmedBlock, EncodedConfirmedTransaction, TransactionStatus, UiTransactionEncoding,
};
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
use std::{
net::SocketAddr,
sync::RwLock,
thread::sleep,
time::{Duration, Instant},
};
pub struct RpcClient {
sender: Box<dyn RpcSender + Send + Sync + 'static>,
default_cluster_transaction_encoding: RwLock<Option<UiTransactionEncoding>>,
}
fn serialize_encode_transaction(
transaction: &Transaction,
encoding: UiTransactionEncoding,
) -> ClientResult<String> {
let serialized = serialize(transaction)
.map_err(|e| ClientErrorKind::Custom(format!("transaction serialization failed: {}", e)))?;
let encoded = match encoding {
UiTransactionEncoding::Base58 => bs58::encode(serialized).into_string(),
UiTransactionEncoding::Base64 => base64::encode(serialized),
_ => {
return Err(ClientErrorKind::Custom(format!(
"unsupported transaction encoding: {}. Supported encodings: base58, base64",
encoding
))
.into())
}
};
Ok(encoded)
}
impl RpcClient {
pub fn new_sender<T: RpcSender + Send + Sync + 'static>(sender: T) -> Self {
Self {
sender: Box::new(sender),
default_cluster_transaction_encoding: RwLock::new(None),
}
}
@ -107,13 +131,46 @@ impl RpcClient {
self.send_transaction_with_config(transaction, RpcSendTransactionConfig::default())
}
fn default_cluster_transaction_encoding(&self) -> Result<UiTransactionEncoding, RpcError> {
let default_cluster_transaction_encoding =
self.default_cluster_transaction_encoding.read().unwrap();
if let Some(encoding) = *default_cluster_transaction_encoding {
Ok(encoding)
} else {
drop(default_cluster_transaction_encoding);
let cluster_version = self.get_version().map_err(|e| {
RpcError::RpcRequestError(format!("cluster version query failed: {}", e))
})?;
let cluster_version =
semver::Version::parse(&cluster_version.solana_core).map_err(|e| {
RpcError::RpcRequestError(format!("failed to parse cluster version: {}", e))
})?;
// Prefer base64 since 1.3.16
let encoding = if cluster_version < semver::Version::new(1, 3, 16) {
UiTransactionEncoding::Base58
} else {
UiTransactionEncoding::Base64
};
*self.default_cluster_transaction_encoding.write().unwrap() = Some(encoding);
Ok(encoding)
}
}
pub fn send_transaction_with_config(
&self,
transaction: &Transaction,
config: RpcSendTransactionConfig,
) -> ClientResult<Signature> {
let serialized_encoded = bs58::encode(serialize(transaction).unwrap()).into_string();
let encoding = if let Some(encoding) = config.encoding {
encoding
} else {
self.default_cluster_transaction_encoding()?
};
let config = RpcSendTransactionConfig {
encoding: Some(encoding),
..config
};
let serialized_encoded = serialize_encode_transaction(transaction, encoding)?;
let signature_base58_str: String = self.send(
RpcRequest::SendTransaction,
json!([serialized_encoded, config]),
@ -140,12 +197,28 @@ impl RpcClient {
pub fn simulate_transaction(
&self,
transaction: &Transaction,
sig_verify: bool,
) -> RpcResult<RpcSimulateTransactionResult> {
let serialized_encoded = bs58::encode(serialize(transaction).unwrap()).into_string();
self.simulate_transaction_with_config(transaction, RpcSimulateTransactionConfig::default())
}
pub fn simulate_transaction_with_config(
&self,
transaction: &Transaction,
config: RpcSimulateTransactionConfig,
) -> RpcResult<RpcSimulateTransactionResult> {
let encoding = if let Some(encoding) = config.encoding {
encoding
} else {
self.default_cluster_transaction_encoding()?
};
let config = RpcSimulateTransactionConfig {
encoding: Some(encoding),
..config
};
let serialized_encoded = serialize_encode_transaction(transaction, encoding)?;
self.send(
RpcRequest::SimulateTransaction,
json!([serialized_encoded, { "sigVerify": sig_verify }]),
json!([serialized_encoded, config]),
)
}
@ -164,6 +237,19 @@ impl RpcClient {
self.send(RpcRequest::GetSignatureStatuses, json!([signatures]))
}
pub fn get_signature_statuses_with_history(
&self,
signatures: &[Signature],
) -> RpcResult<Vec<Option<TransactionStatus>>> {
let signatures: Vec<_> = signatures.iter().map(|s| s.to_string()).collect();
self.send(
RpcRequest::GetSignatureStatuses,
json!([signatures, {
"searchTransactionHistory": true
}]),
)
}
pub fn get_signature_status_with_commitment(
&self,
signature: &Signature,
@ -248,7 +334,7 @@ impl RpcClient {
self.send(RpcRequest::GetClusterNodes, Value::Null)
}
pub fn get_confirmed_block(&self, slot: Slot) -> ClientResult<ConfirmedBlock> {
pub fn get_confirmed_block(&self, slot: Slot) -> ClientResult<EncodedConfirmedBlock> {
self.get_confirmed_block_with_encoding(slot, UiTransactionEncoding::Json)
}
@ -256,7 +342,7 @@ impl RpcClient {
&self,
slot: Slot,
encoding: UiTransactionEncoding,
) -> ClientResult<ConfirmedBlock> {
) -> ClientResult<EncodedConfirmedBlock> {
self.send(RpcRequest::GetConfirmedBlock, json!([slot, encoding]))
}
@ -271,6 +357,17 @@ impl RpcClient {
)
}
pub fn get_confirmed_blocks_with_limit(
&self,
start_slot: Slot,
limit: usize,
) -> ClientResult<Vec<Slot>> {
self.send(
RpcRequest::GetConfirmedBlocksWithLimit,
json!([start_slot, limit]),
)
}
pub fn get_confirmed_signatures_for_address(
&self,
address: &Pubkey,
@ -326,7 +423,7 @@ impl RpcClient {
&self,
signature: &Signature,
encoding: UiTransactionEncoding,
) -> ClientResult<ConfirmedTransaction> {
) -> ClientResult<EncodedConfirmedTransaction> {
self.send(
RpcRequest::GetConfirmedTransaction,
json!([signature.to_string(), encoding]),
@ -415,7 +512,13 @@ impl RpcClient {
transaction: &Transaction,
) -> ClientResult<Signature> {
let signature = self.send_transaction(transaction)?;
let recent_blockhash = transaction.message.recent_blockhash;
let recent_blockhash = if uses_durable_nonce(transaction).is_some() {
self.get_recent_blockhash_with_commitment(CommitmentConfig::recent())?
.value
.0
} else {
transaction.message.recent_blockhash
};
let status = loop {
let status = self.get_signature_status(&signature)?;
if status.is_none() {
@ -572,8 +675,27 @@ impl RpcClient {
}
pub fn get_program_accounts(&self, pubkey: &Pubkey) -> ClientResult<Vec<(Pubkey, Account)>> {
let accounts: Vec<RpcKeyedAccount> =
self.send(RpcRequest::GetProgramAccounts, json!([pubkey.to_string()]))?;
self.get_program_accounts_with_config(
pubkey,
RpcProgramAccountsConfig {
filters: None,
account_config: RpcAccountInfoConfig {
encoding: Some(UiAccountEncoding::Base64),
..RpcAccountInfoConfig::default()
},
},
)
}
pub fn get_program_accounts_with_config(
&self,
pubkey: &Pubkey,
config: RpcProgramAccountsConfig,
) -> ClientResult<Vec<(Pubkey, Account)>> {
let accounts: Vec<RpcKeyedAccount> = self.send(
RpcRequest::GetProgramAccounts,
json!([pubkey.to_string(), config]),
)?;
parse_keyed_accounts(accounts, RpcRequest::GetProgramAccounts)
}
@ -1109,7 +1231,13 @@ impl RpcClient {
"[{}/{}] Finalizing transaction {}",
confirmations, desired_confirmations, transaction.signatures[0],
));
let recent_blockhash = transaction.message.recent_blockhash;
let recent_blockhash = if uses_durable_nonce(transaction).is_some() {
self.get_recent_blockhash_with_commitment(CommitmentConfig::recent())?
.value
.0
} else {
transaction.message.recent_blockhash
};
let signature = self.send_transaction_with_config(transaction, config)?;
let (signature, status) = loop {
// Get recent commitment in order to count confirmations for successful transactions
@ -1328,7 +1456,7 @@ mod tests {
let rpc_client = RpcClient::new_mock("succeeds".to_string());
let key = Keypair::new();
let to = Pubkey::new_rand();
let to = solana_sdk::pubkey::new_rand();
let blockhash = Hash::default();
let tx = system_transaction::transfer(&key, &to, 50, blockhash);
@ -1381,7 +1509,7 @@ mod tests {
let rpc_client = RpcClient::new_mock("succeeds".to_string());
let key = Keypair::new();
let to = Pubkey::new_rand();
let to = solana_sdk::pubkey::new_rand();
let blockhash = Hash::default();
let tx = system_transaction::transfer(&key, &to, 50, blockhash);
let result = rpc_client.send_and_confirm_transaction(&tx);

View File

@ -4,6 +4,7 @@ use solana_sdk::{
clock::Epoch,
commitment_config::{CommitmentConfig, CommitmentLevel},
};
use solana_transaction_status::UiTransactionEncoding;
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
@ -17,6 +18,7 @@ pub struct RpcSendTransactionConfig {
#[serde(default)]
pub skip_preflight: bool,
pub preflight_commitment: Option<CommitmentLevel>,
pub encoding: Option<UiTransactionEncoding>,
}
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
@ -26,6 +28,7 @@ pub struct RpcSimulateTransactionConfig {
pub sig_verify: bool,
#[serde(flatten)]
pub commitment: Option<CommitmentConfig>,
pub encoding: Option<UiTransactionEncoding>,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]

View File

@ -1,12 +1,15 @@
//! Implementation defined RPC server errors
use crate::rpc_response::RpcSimulateTransactionResult;
use jsonrpc_core::{Error, ErrorCode};
use solana_client::rpc_response::RpcSimulateTransactionResult;
use solana_sdk::clock::Slot;
const JSON_RPC_SERVER_ERROR_1: i64 = -32001;
const JSON_RPC_SERVER_ERROR_2: i64 = -32002;
const JSON_RPC_SERVER_ERROR_3: i64 = -32003;
const JSON_RPC_SERVER_ERROR_4: i64 = -32004;
const JSON_RPC_SERVER_ERROR_5: i64 = -32005;
pub const JSON_RPC_SERVER_ERROR_BLOCK_CLEANED_UP: i64 = -32001;
pub const JSON_RPC_SERVER_ERROR_SEND_TRANSACTION_PREFLIGHT_FAILURE: i64 = -32002;
pub const JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_VERIFICATION_FAILURE: i64 = -32003;
pub const JSON_RPC_SERVER_ERROR_BLOCK_NOT_AVAILABLE: i64 = -32004;
pub const JSON_RPC_SERVER_ERROR_NODE_UNHEALTHLY: i64 = -32005;
pub const JSON_RPC_SERVER_ERROR_TRANSACTION_PRECOMPILE_VERIFICATION_FAILURE: i64 = -32006;
pub enum RpcCustomError {
BlockCleanedUp {
@ -22,6 +25,7 @@ pub enum RpcCustomError {
slot: Slot,
},
RpcNodeUnhealthy,
TransactionPrecompileVerificationFailure(solana_sdk::transaction::TransactionError),
}
impl From<RpcCustomError> for Error {
@ -31,7 +35,7 @@ impl From<RpcCustomError> for Error {
slot,
first_available_block,
} => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_1),
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_BLOCK_CLEANED_UP),
message: format!(
"Block {} cleaned up, does not exist on node. First available block: {}",
slot, first_available_block,
@ -39,25 +43,36 @@ impl From<RpcCustomError> for Error {
data: None,
},
RpcCustomError::SendTransactionPreflightFailure { message, result } => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_2),
code: ErrorCode::ServerError(
JSON_RPC_SERVER_ERROR_SEND_TRANSACTION_PREFLIGHT_FAILURE,
),
message,
data: Some(serde_json::json!(result)),
},
RpcCustomError::TransactionSignatureVerificationFailure => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_3),
code: ErrorCode::ServerError(
JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_VERIFICATION_FAILURE,
),
message: "Transaction signature verification failure".to_string(),
data: None,
},
RpcCustomError::BlockNotAvailable { slot } => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_4),
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_BLOCK_NOT_AVAILABLE),
message: format!("Block not available for slot {}", slot),
data: None,
},
RpcCustomError::RpcNodeUnhealthy => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_5),
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_NODE_UNHEALTHLY),
message: "RPC node is unhealthy".to_string(),
data: None,
},
RpcCustomError::TransactionPrecompileVerificationFailure(e) => Self {
code: ErrorCode::ServerError(
JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_VERIFICATION_FAILURE,
),
message: format!("Transaction precompile verification failure {:?}", e),
data: None,
},
}
}
}

View File

@ -13,6 +13,7 @@ pub enum RpcRequest {
GetClusterNodes,
GetConfirmedBlock,
GetConfirmedBlocks,
GetConfirmedBlocksWithLimit,
GetConfirmedSignaturesForAddress,
GetConfirmedSignaturesForAddress2,
GetConfirmedTransaction,
@ -67,6 +68,7 @@ impl fmt::Display for RpcRequest {
RpcRequest::GetClusterNodes => "getClusterNodes",
RpcRequest::GetConfirmedBlock => "getConfirmedBlock",
RpcRequest::GetConfirmedBlocks => "getConfirmedBlocks",
RpcRequest::GetConfirmedBlocksWithLimit => "getConfirmedBlocksWithLimit",
RpcRequest::GetConfirmedSignaturesForAddress => "getConfirmedSignaturesForAddress",
RpcRequest::GetConfirmedSignaturesForAddress2 => "getConfirmedSignaturesForAddress2",
RpcRequest::GetConfirmedTransaction => "getConfirmedTransaction",
@ -138,8 +140,10 @@ impl RpcRequest {
#[derive(Debug, Error)]
pub enum RpcError {
#[error("rpc request error: {0}")]
#[error("RPC request error: {0}")]
RpcRequestError(String),
#[error("RPC response error {code}: {message}")]
RpcResponseError { code: i64, message: String },
#[error("parse error: expected {0}")]
ParseError(String), /* "expected" */
// Anything in a `ForUser` needs to die. The caller should be
@ -224,7 +228,7 @@ mod tests {
// Test request with CommitmentConfig and params
let test_request = RpcRequest::GetTokenAccountsByOwner;
let mint = Pubkey::new_rand();
let mint = solana_sdk::pubkey::new_rand();
let token_account_filter = RpcTokenAccountsFilter::Mint(mint.to_string());
let request = test_request
.build_request_json(1, json!([addr, token_account_filter, commitment_config]));

View File

@ -121,6 +121,7 @@ pub enum ReceivedSignatureResult {
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcContactInfo {
/// Pubkey of the node as a base-58 string
pub pubkey: String,
@ -132,6 +133,8 @@ pub struct RpcContactInfo {
pub rpc: Option<SocketAddr>,
/// Software version
pub version: Option<String>,
/// First 4 bytes of the FeatureSet identifier
pub feature_set: Option<u32>,
}
/// Map of leader base58 identity pubkeys to the slot indices relative to the first epoch slot
@ -142,6 +145,8 @@ pub type RpcLeaderSchedule = HashMap<String, Vec<usize>>;
pub struct RpcVersionInfo {
/// The current version of solana-core
pub solana_core: String,
/// first 4 bytes of the FeatureSet identifier
pub feature_set: Option<u32>,
}
impl fmt::Debug for RpcVersionInfo {
@ -275,6 +280,15 @@ pub struct RpcConfirmedTransactionStatusWithSignature {
pub memo: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RpcPerfSample {
pub slot: Slot,
pub num_transactions: u64,
pub num_slots: u64,
pub sample_period_secs: u16,
}
impl From<ConfirmedTransactionStatusWithSignature> for RpcConfirmedTransactionStatusWithSignature {
fn from(value: ConfirmedTransactionStatusWithSignature) -> Self {
let ConfirmedTransactionStatusWithSignature {

View File

@ -1,7 +1,7 @@
[package]
name = "solana-core"
description = "Blockchain, Rebuilt for Scale"
version = "1.3.13"
version = "1.3.23"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "../README.md"
@ -14,6 +14,7 @@ edition = "2018"
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
[dependencies]
base64 = "0.12.3"
bincode = "1.3.1"
bv = { version = "0.11.1", features = ["serde"] }
bs58 = "0.3.1"
@ -24,15 +25,16 @@ crossbeam-channel = "0.4"
ed25519-dalek = "=1.0.0-pre.4"
fs_extra = "1.1.0"
flate2 = "1.0"
indexmap = "1.4"
indexmap = { version = "1.5", features = ["rayon"] }
itertools = "0.9.0"
jsonrpc-core = "14.2.0"
jsonrpc-core-client = { version = "14.2.0", features = ["ws"] }
jsonrpc-derive = "14.2.1"
jsonrpc-http-server = "14.2.0"
jsonrpc-pubsub = "14.2.0"
jsonrpc-ws-server = "14.2.0"
jsonrpc-core = "15.0.0"
jsonrpc-core-client = { version = "15.0.0", features = ["ws"] }
jsonrpc-derive = "15.0.0"
jsonrpc-http-server = "15.0.0"
jsonrpc-pubsub = "15.0.0"
jsonrpc-ws-server = "15.0.0"
log = "0.4.8"
lru = "0.6.0"
num_cpus = "1.13.0"
num-traits = "0.2"
rand = "0.7.0"
@ -43,44 +45,41 @@ regex = "1.3.9"
serde = "1.0.112"
serde_derive = "1.0.103"
serde_json = "1.0.56"
solana-account-decoder = { path = "../account-decoder", version = "1.3.13" }
solana-banks-server = { path = "../banks-server", version = "1.3.13" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.3.13" }
solana-budget-program = { path = "../programs/budget", version = "1.3.13" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.13" }
solana-client = { path = "../client", version = "1.3.13" }
solana-faucet = { path = "../faucet", version = "1.3.13" }
solana-genesis-programs = { path = "../genesis-programs", version = "1.3.13" }
solana-ledger = { path = "../ledger", version = "1.3.13" }
solana-logger = { path = "../logger", version = "1.3.13" }
solana-merkle-tree = { path = "../merkle-tree", version = "1.3.13" }
solana-metrics = { path = "../metrics", version = "1.3.13" }
solana-measure = { path = "../measure", version = "1.3.13" }
solana-net-utils = { path = "../net-utils", version = "1.3.13" }
solana-perf = { path = "../perf", version = "1.3.13" }
solana-runtime = { path = "../runtime", version = "1.3.13" }
solana-sdk = { path = "../sdk", version = "1.3.13" }
solana-sdk-macro-frozen-abi = { path = "../sdk/macro-frozen-abi", version = "1.3.13" }
solana-stake-program = { path = "../programs/stake", version = "1.3.13" }
solana-storage-bigtable = { path = "../storage-bigtable", version = "1.3.13" }
solana-streamer = { path = "../streamer", version = "1.3.13" }
solana-sys-tuner = { path = "../sys-tuner", version = "1.3.13" }
solana-transaction-status = { path = "../transaction-status", version = "1.3.13" }
solana-version = { path = "../version", version = "1.3.13" }
solana-vote-program = { path = "../programs/vote", version = "1.3.13" }
solana-vote-signer = { path = "../vote-signer", version = "1.3.13" }
spl-token-v2-0 = { package = "spl-token", version = "2.0.6", features = ["skip-no-mangle"] }
solana-account-decoder = { path = "../account-decoder", version = "1.3.23" }
solana-banks-server = { path = "../banks-server", version = "1.3.23" }
solana-budget-program = { path = "../programs/budget", version = "1.3.23" }
solana-clap-utils = { path = "../clap-utils", version = "1.3.23" }
solana-client = { path = "../client", version = "1.3.23" }
solana-faucet = { path = "../faucet", version = "1.3.23" }
solana-ledger = { path = "../ledger", version = "1.3.23" }
solana-logger = { path = "../logger", version = "1.3.23" }
solana-merkle-tree = { path = "../merkle-tree", version = "1.3.23" }
solana-metrics = { path = "../metrics", version = "1.3.23" }
solana-measure = { path = "../measure", version = "1.3.23" }
solana-net-utils = { path = "../net-utils", version = "1.3.23" }
solana-perf = { path = "../perf", version = "1.3.23" }
solana-runtime = { path = "../runtime", version = "1.3.23" }
solana-sdk = { path = "../sdk", version = "1.3.23" }
solana-sdk-macro-frozen-abi = { path = "../sdk/macro-frozen-abi", version = "1.3.23" }
solana-stake-program = { path = "../programs/stake", version = "1.3.23" }
solana-storage-bigtable = { path = "../storage-bigtable", version = "1.3.23" }
solana-streamer = { path = "../streamer", version = "1.3.23" }
solana-sys-tuner = { path = "../sys-tuner", version = "1.3.23" }
solana-transaction-status = { path = "../transaction-status", version = "1.3.23" }
solana-version = { path = "../version", version = "1.3.23" }
solana-vote-program = { path = "../programs/vote", version = "1.3.23" }
solana-vote-signer = { path = "../vote-signer", version = "1.3.23" }
spl-token-v2-0 = { package = "spl-token", version = "=2.0.6", features = ["skip-no-mangle"] }
tempfile = "3.1.0"
thiserror = "1.0"
tokio_01 = { version = "0.1", package = "tokio" }
tokio_fs_01 = { version = "0.1", package = "tokio-fs" }
tokio_io_01 = { version = "0.1", package = "tokio-io" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.3.13" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.3.23" }
tokio = { version = "0.2.22", features = ["full"] }
trees = "0.2.1"
[dev-dependencies]
base64 = "0.12.3"
matches = "0.1.6"
reqwest = { version = "0.10.6", default-features = false, features = ["blocking", "rustls-tls", "json"] }
serial_test = "0.4.0"
@ -96,9 +95,15 @@ name = "banking_stage"
[[bench]]
name = "blockstore"
[[bench]]
name = "crds"
[[bench]]
name = "crds_gossip_pull"
[[bench]]
name = "crds_shards"
[[bench]]
name = "gen_keys"

View File

@ -20,7 +20,6 @@ use solana_runtime::bank::Bank;
use solana_sdk::genesis_config::GenesisConfig;
use solana_sdk::hash::Hash;
use solana_sdk::message::Message;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::Keypair;
use solana_sdk::signature::Signature;
use solana_sdk::signature::Signer;
@ -56,7 +55,7 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(100_000);
let bank = Arc::new(Bank::new(&genesis_config));
let ledger_path = get_tmp_ledger_path!();
let my_pubkey = Pubkey::new_rand();
let my_pubkey = solana_sdk::pubkey::new_rand();
{
let blockstore = Arc::new(
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
@ -94,15 +93,15 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
}
fn make_accounts_txs(txes: usize, mint_keypair: &Keypair, hash: Hash) -> Vec<Transaction> {
let to_pubkey = Pubkey::new_rand();
let to_pubkey = solana_sdk::pubkey::new_rand();
let dummy = system_transaction::transfer(mint_keypair, &to_pubkey, 1, hash);
(0..txes)
.into_par_iter()
.map(|_| {
let mut new = dummy.clone();
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
new.message.account_keys[0] = Pubkey::new_rand();
new.message.account_keys[1] = Pubkey::new_rand();
new.message.account_keys[0] = solana_sdk::pubkey::new_rand();
new.message.account_keys[1] = solana_sdk::pubkey::new_rand();
new.signatures = vec![Signature::new(&sig[0..64])];
new
})
@ -116,7 +115,7 @@ fn make_programs_txs(txes: usize, hash: Hash) -> Vec<Transaction> {
let mut instructions = vec![];
let from_key = Keypair::new();
for _ in 1..progs {
let to_key = Pubkey::new_rand();
let to_key = solana_sdk::pubkey::new_rand();
instructions.push(system_instruction::transfer(&from_key.pubkey(), &to_key, 1));
}
let message = Message::new(&instructions, Some(&from_key.pubkey()));

View File

@ -8,7 +8,6 @@ use solana_core::broadcast_stage::{broadcast_shreds, get_broadcast_peers};
use solana_core::cluster_info::{ClusterInfo, Node};
use solana_core::contact_info::ContactInfo;
use solana_ledger::shred::Shred;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::timing::timestamp;
use std::{
collections::HashMap,
@ -20,7 +19,7 @@ use test::Bencher;
#[bench]
fn broadcast_shreds_bench(bencher: &mut Bencher) {
solana_logger::setup();
let leader_pubkey = Pubkey::new_rand();
let leader_pubkey = solana_sdk::pubkey::new_rand();
let leader_info = Node::new_localhost_with_pubkey(&leader_pubkey);
let cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info);
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
@ -30,7 +29,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
let mut stakes = HashMap::new();
const NUM_PEERS: usize = 200;
for _ in 0..NUM_PEERS {
let id = Pubkey::new_rand();
let id = solana_sdk::pubkey::new_rand();
let contact_info = ContactInfo::new_localhost(&id, timestamp());
cluster_info.insert_info(contact_info);
stakes.insert(id, thread_rng().gen_range(1, NUM_PEERS) as u64);

31
core/benches/crds.rs Normal file
View File

@ -0,0 +1,31 @@
#![feature(test)]
extern crate test;
use rand::{thread_rng, Rng};
use rayon::ThreadPoolBuilder;
use solana_core::crds::Crds;
use solana_core::crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
use solana_core::crds_value::CrdsValue;
use solana_sdk::pubkey::Pubkey;
use std::collections::HashMap;
use test::Bencher;
#[bench]
fn bench_find_old_labels(bencher: &mut Bencher) {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut rng = thread_rng();
let mut crds = Crds::default();
let now = CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS + CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS / 1000;
std::iter::repeat_with(|| (CrdsValue::new_rand(&mut rng), rng.gen_range(0, now)))
.take(50_000)
.for_each(|(v, ts)| assert!(crds.insert(v, ts).is_ok()));
let mut timeouts = HashMap::new();
timeouts.insert(Pubkey::default(), CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS);
bencher.iter(|| {
let out = crds.find_old_labels(&thread_pool, now, &timeouts);
assert!(out.len() > 10);
assert!(out.len() < 250);
out
});
}

View File

@ -3,19 +3,18 @@
extern crate test;
use rand::{thread_rng, Rng};
use solana_core::crds_gossip_pull::CrdsFilter;
use solana_sdk::hash::{Hash, HASH_BYTES};
use rayon::ThreadPoolBuilder;
use solana_core::cluster_info::MAX_BLOOM_SIZE;
use solana_core::crds::Crds;
use solana_core::crds_gossip_pull::{CrdsFilter, CrdsGossipPull};
use solana_core::crds_value::CrdsValue;
use test::Bencher;
#[bench]
fn bench_hash_as_u64(bencher: &mut Bencher) {
let mut rng = thread_rng();
let hashes: Vec<_> = (0..1000)
.map(|_| {
let mut buf = [0u8; HASH_BYTES];
rng.fill(&mut buf);
Hash::new(&buf)
})
let hashes: Vec<_> = std::iter::repeat_with(|| solana_sdk::hash::new_rand(&mut rng))
.take(1000)
.collect();
bencher.iter(|| {
hashes
@ -24,3 +23,30 @@ fn bench_hash_as_u64(bencher: &mut Bencher) {
.collect::<Vec<_>>()
});
}
#[bench]
fn bench_build_crds_filters(bencher: &mut Bencher) {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut rng = thread_rng();
let mut crds_gossip_pull = CrdsGossipPull::default();
let mut crds = Crds::default();
for _ in 0..50_000 {
crds_gossip_pull
.purged_values
.push_back((solana_sdk::hash::new_rand(&mut rng), rng.gen()));
}
let mut num_inserts = 0;
for _ in 0..90_000 {
if crds
.insert(CrdsValue::new_rand(&mut rng), rng.gen())
.is_ok()
{
num_inserts += 1;
}
}
assert_eq!(num_inserts, 90_000);
bencher.iter(|| {
let filters = crds_gossip_pull.build_crds_filters(&thread_pool, &crds, MAX_BLOOM_SIZE);
assert_eq!(filters.len(), 128);
});
}

View File

@ -0,0 +1,71 @@
#![feature(test)]
extern crate test;
use rand::{thread_rng, Rng};
use solana_core::contact_info::ContactInfo;
use solana_core::crds::VersionedCrdsValue;
use solana_core::crds_shards::CrdsShards;
use solana_core::crds_value::{CrdsData, CrdsValue};
use solana_sdk::timing::timestamp;
use test::Bencher;
const CRDS_SHARDS_BITS: u32 = 8;
fn new_test_crds_value() -> VersionedCrdsValue {
let data = CrdsData::ContactInfo(ContactInfo::new_localhost(
&solana_sdk::pubkey::new_rand(),
timestamp(),
));
VersionedCrdsValue::new(timestamp(), CrdsValue::new_unsigned(data))
}
fn bench_crds_shards_find(bencher: &mut Bencher, num_values: usize, mask_bits: u32) {
let values: Vec<VersionedCrdsValue> = std::iter::repeat_with(new_test_crds_value)
.take(num_values)
.collect();
let mut shards = CrdsShards::new(CRDS_SHARDS_BITS);
for (index, value) in values.iter().enumerate() {
assert!(shards.insert(index, value));
}
let mut rng = thread_rng();
bencher.iter(|| {
let mask = rng.gen();
let _hits = shards.find(mask, mask_bits).count();
});
}
#[bench]
fn bench_crds_shards_find_0(bencher: &mut Bencher) {
bench_crds_shards_find(bencher, 100_000, 0);
}
#[bench]
fn bench_crds_shards_find_1(bencher: &mut Bencher) {
bench_crds_shards_find(bencher, 100_000, 1);
}
#[bench]
fn bench_crds_shards_find_3(bencher: &mut Bencher) {
bench_crds_shards_find(bencher, 100_000, 3);
}
#[bench]
fn bench_crds_shards_find_5(bencher: &mut Bencher) {
bench_crds_shards_find(bencher, 100_000, 5);
}
#[bench]
fn bench_crds_shards_find_7(bencher: &mut Bencher) {
bench_crds_shards_find(bencher, 100_000, 7);
}
#[bench]
fn bench_crds_shards_find_8(bencher: &mut Bencher) {
bench_crds_shards_find(bencher, 100_000, 8);
}
#[bench]
fn bench_crds_shards_find_9(bencher: &mut Bencher) {
bench_crds_shards_find(bencher, 100_000, 9);
}

View File

@ -14,7 +14,6 @@ use solana_perf::packet::to_packets_chunked;
use solana_perf::test_tx::test_tx;
use solana_runtime::bank::Bank;
use solana_runtime::bank_forks::BankForks;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::timing::timestamp;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicUsize, Ordering};
@ -33,7 +32,7 @@ fn bench_retransmitter(bencher: &mut Bencher) {
const NUM_PEERS: usize = 4;
let mut peer_sockets = Vec::new();
for _ in 0..NUM_PEERS {
let id = Pubkey::new_rand();
let id = solana_sdk::pubkey::new_rand();
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut contact_info = ContactInfo::new_localhost(&id, timestamp());
contact_info.tvu = socket.local_addr().unwrap();

View File

@ -30,10 +30,9 @@ use solana_runtime::{
};
use solana_sdk::{
clock::{
Epoch, Slot, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE, MAX_TRANSACTION_FORWARDING_DELAY,
Slot, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE, MAX_TRANSACTION_FORWARDING_DELAY,
MAX_TRANSACTION_FORWARDING_DELAY_GPU,
},
genesis_config::ClusterType,
poh_config::PohConfig,
pubkey::Pubkey,
timing::{duration_as_ms, timestamp},
@ -531,8 +530,20 @@ impl BankingStage {
} else {
vec![]
};
let (mut loaded_accounts, results, mut retryable_txs, tx_count, signature_count) =
bank.load_and_execute_transactions(batch, MAX_PROCESSING_AGE, None);
let (
mut loaded_accounts,
results,
inner_instructions,
transaction_logs,
mut retryable_txs,
tx_count,
signature_count,
) = bank.load_and_execute_transactions(
batch,
MAX_PROCESSING_AGE,
transaction_status_sender.is_some(),
transaction_status_sender.is_some(),
);
load_execute_time.stop();
let freeze_lock = bank.freeze_lock();
@ -569,6 +580,8 @@ impl BankingStage {
batch.iteration_order_vec(),
tx_results.processing_results,
TransactionBalancesSet::new(pre_balances, post_balances),
inner_instructions,
transaction_logs,
sender,
);
}
@ -725,8 +738,7 @@ impl BankingStage {
fn transactions_from_packets(
msgs: &Packets,
transaction_indexes: &[usize],
cluster_type: ClusterType,
epoch: Epoch,
secp256k1_program_enabled: bool,
) -> (Vec<Transaction>, Vec<usize>) {
let packets = Packets::new(
transaction_indexes
@ -736,25 +748,24 @@ impl BankingStage {
);
let transactions = Self::deserialize_transactions(&packets);
let maybe_secp_verified_transactions: Vec<_> =
if solana_sdk::secp256k1::is_enabled(cluster_type, epoch) {
transactions
.into_iter()
.map(|tx| {
if let Some(tx) = tx {
if tx.verify_precompiles().is_ok() {
Some(tx)
} else {
None
}
let maybe_secp_verified_transactions: Vec<_> = if secp256k1_program_enabled {
transactions
.into_iter()
.map(|tx| {
if let Some(tx) = tx {
if tx.verify_precompiles().is_ok() {
Some(tx)
} else {
None
}
})
.collect()
} else {
transactions
};
} else {
None
}
})
.collect()
} else {
transactions
};
Self::filter_transaction_indexes(maybe_secp_verified_transactions, &transaction_indexes)
}
@ -808,8 +819,7 @@ impl BankingStage {
let (transactions, transaction_to_packet_indexes) = Self::transactions_from_packets(
msgs,
&packet_indexes,
bank.cluster_type(),
bank.epoch(),
bank.secp256k1_program_enabled(),
);
debug!(
"bank: {} filtered transactions {}",
@ -862,8 +872,7 @@ impl BankingStage {
let (transactions, transaction_to_packet_indexes) = Self::transactions_from_packets(
msgs,
&transaction_indexes,
bank.cluster_type(),
bank.epoch(),
bank.secp256k1_program_enabled(),
);
let tx_count = transaction_to_packet_indexes.len();
@ -1091,7 +1100,7 @@ mod tests {
system_transaction,
transaction::TransactionError,
};
use solana_transaction_status::{EncodedTransaction, TransactionWithStatusMeta};
use solana_transaction_status::TransactionWithStatusMeta;
use std::{sync::atomic::Ordering, thread::sleep};
#[test]
@ -1236,16 +1245,16 @@ mod tests {
bank.process_transaction(&fund_tx).unwrap();
// good tx
let to = Pubkey::new_rand();
let to = solana_sdk::pubkey::new_rand();
let tx = system_transaction::transfer(&mint_keypair, &to, 1, start_hash);
// good tx, but no verify
let to2 = Pubkey::new_rand();
let to2 = solana_sdk::pubkey::new_rand();
let tx_no_ver = system_transaction::transfer(&keypair, &to2, 2, start_hash);
// bad tx, AccountNotFound
let keypair = Keypair::new();
let to3 = Pubkey::new_rand();
let to3 = solana_sdk::pubkey::new_rand();
let tx_anf = system_transaction::transfer(&keypair, &to3, 1, start_hash);
// send 'em over
@ -1438,9 +1447,9 @@ mod tests {
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
poh_recorder.lock().unwrap().set_working_bank(working_bank);
let pubkey = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
let keypair2 = Keypair::new();
let pubkey2 = Pubkey::new_rand();
let pubkey2 = solana_sdk::pubkey::new_rand();
let transactions = vec![
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()),
@ -1518,7 +1527,7 @@ mod tests {
mint_keypair,
..
} = create_genesis_config(10_000);
let pubkey = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
let transactions = vec![
None,
@ -1599,7 +1608,7 @@ mod tests {
mint_keypair,
..
} = create_genesis_config(10_000);
let pubkey = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
let transactions = vec![
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()),
@ -1670,8 +1679,8 @@ mod tests {
#[test]
fn test_should_process_or_forward_packets() {
let my_pubkey = Pubkey::new_rand();
let my_pubkey1 = Pubkey::new_rand();
let my_pubkey = solana_sdk::pubkey::new_rand();
let my_pubkey1 = solana_sdk::pubkey::new_rand();
assert_eq!(
BankingStage::consume_or_forward_packets(&my_pubkey, None, true, false,),
@ -1717,7 +1726,7 @@ mod tests {
..
} = create_genesis_config(10_000);
let bank = Arc::new(Bank::new(&genesis_config));
let pubkey = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
let transactions = vec![system_transaction::transfer(
&mint_keypair,
@ -1814,8 +1823,8 @@ mod tests {
..
} = create_genesis_config(10_000);
let bank = Arc::new(Bank::new(&genesis_config));
let pubkey = Pubkey::new_rand();
let pubkey1 = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
let pubkey1 = solana_sdk::pubkey::new_rand();
let transactions = vec![
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()),
@ -1910,7 +1919,7 @@ mod tests {
} = create_genesis_config(10_000);
let bank = Arc::new(Bank::new(&genesis_config));
let pubkey = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
let transactions =
vec![
@ -1928,7 +1937,7 @@ mod tests {
bank.slot(),
Some((4, 4)),
bank.ticks_per_slot(),
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
@ -1968,8 +1977,8 @@ mod tests {
..
} = create_genesis_config(10_000);
let bank = Arc::new(Bank::new(&genesis_config));
let pubkey = Pubkey::new_rand();
let pubkey1 = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
let pubkey1 = solana_sdk::pubkey::new_rand();
let keypair1 = Keypair::new();
let success_tx =
@ -2037,36 +2046,25 @@ mod tests {
transaction_status_service.join().unwrap();
let confirmed_block = blockstore.get_confirmed_block(bank.slot(), None).unwrap();
let confirmed_block = blockstore.get_confirmed_block(bank.slot()).unwrap();
assert_eq!(confirmed_block.transactions.len(), 3);
for TransactionWithStatusMeta { transaction, meta } in
confirmed_block.transactions.into_iter()
{
if let EncodedTransaction::Json(transaction) = transaction {
if transaction.signatures[0] == success_signature.to_string() {
let meta = meta.unwrap();
assert_eq!(meta.err, None);
assert_eq!(meta.status, Ok(()));
} else if transaction.signatures[0] == ix_error_signature.to_string() {
let meta = meta.unwrap();
assert_eq!(
meta.err,
Some(TransactionError::InstructionError(
0,
InstructionError::Custom(1)
))
);
assert_eq!(
meta.status,
Err(TransactionError::InstructionError(
0,
InstructionError::Custom(1)
))
);
} else {
assert_eq!(meta, None);
}
if transaction.signatures[0] == success_signature {
assert_eq!(meta.unwrap().status, Ok(()));
} else if transaction.signatures[0] == ix_error_signature {
let meta = meta.unwrap();
assert_eq!(
meta.status,
Err(TransactionError::InstructionError(
0,
InstructionError::Custom(1)
))
);
} else {
assert_eq!(meta, None);
}
}
}

View File

@ -140,13 +140,12 @@ impl BroadcastRun for BroadcastFakeShredsRun {
mod tests {
use super::*;
use crate::contact_info::ContactInfo;
use solana_sdk::pubkey::Pubkey;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
#[test]
fn test_tvu_peers_ordering() {
let cluster = ClusterInfo::new_with_invalid_keypair(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
));
cluster.insert_info(ContactInfo::new_with_socketaddr(&SocketAddr::new(

View File

@ -92,7 +92,7 @@ mod tests {
let bank0 = Arc::new(Bank::new(&genesis_config));
let tx = system_transaction::transfer(
&mint_keypair,
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
1,
genesis_config.hash(),
);

File diff suppressed because it is too large Load Diff

View File

@ -2,6 +2,7 @@ use crate::{
cluster_info::{ClusterInfo, GOSSIP_SLEEP_MILLIS},
crds_value::CrdsValueLabel,
optimistic_confirmation_verifier::OptimisticConfirmationVerifier,
optimistically_confirmed_bank_tracker::{BankNotification, BankNotificationSender},
poh_recorder::PohRecorder,
pubkey_references::LockedPubkeyReferences,
result::{Error, Result},
@ -26,7 +27,7 @@ use solana_runtime::{
vote_sender_types::{ReplayVoteReceiver, ReplayedVote},
};
use solana_sdk::{
clock::{Epoch, Slot},
clock::{Epoch, Slot, DEFAULT_MS_PER_SLOT},
epoch_schedule::EpochSchedule,
hash::Hash,
pubkey::Pubkey,
@ -97,7 +98,7 @@ impl VoteTracker {
epoch_schedule: *root_bank.epoch_schedule(),
..VoteTracker::default()
};
vote_tracker.process_new_root_bank(&root_bank);
vote_tracker.progress_with_new_root_bank(&root_bank);
assert_eq!(
*vote_tracker.leader_schedule_epoch.read().unwrap(),
root_bank.get_leader_schedule_epoch(root_bank.slot())
@ -173,7 +174,7 @@ impl VoteTracker {
self.keys.get_or_insert(&pubkey);
}
fn update_leader_schedule_epoch(&self, root_bank: &Bank) {
fn progress_leader_schedule_epoch(&self, root_bank: &Bank) {
// Update with any newly calculated epoch state about future epochs
let start_leader_schedule_epoch = *self.leader_schedule_epoch.read().unwrap();
let mut greatest_leader_schedule_epoch = start_leader_schedule_epoch;
@ -204,7 +205,7 @@ impl VoteTracker {
}
}
fn update_new_root(&self, root_bank: &Bank) {
fn purge_stale_state(&self, root_bank: &Bank) {
// Purge any outdated slot data
let new_root = root_bank.slot();
let root_epoch = root_bank.epoch();
@ -219,15 +220,15 @@ impl VoteTracker {
self.epoch_authorized_voters
.write()
.unwrap()
.retain(|epoch, _| epoch >= &root_epoch);
.retain(|epoch, _| *epoch >= root_epoch);
self.keys.purge();
*self.current_epoch.write().unwrap() = root_epoch;
}
}
fn process_new_root_bank(&self, root_bank: &Bank) {
self.update_leader_schedule_epoch(root_bank);
self.update_new_root(root_bank);
fn progress_with_new_root_bank(&self, root_bank: &Bank) {
self.progress_leader_schedule_epoch(root_bank);
self.purge_stale_state(root_bank);
}
}
@ -248,6 +249,7 @@ impl ClusterInfoVoteListener {
verified_vote_sender: VerifiedVoteSender,
replay_votes_receiver: ReplayVoteReceiver,
blockstore: Arc<Blockstore>,
bank_notification_sender: Option<BankNotificationSender>,
) -> Self {
let exit_ = exit.clone();
@ -293,6 +295,7 @@ impl ClusterInfoVoteListener {
verified_vote_sender,
replay_votes_receiver,
blockstore,
bank_notification_sender,
);
})
.unwrap();
@ -420,8 +423,9 @@ impl ClusterInfoVoteListener {
verified_vote_sender: VerifiedVoteSender,
replay_votes_receiver: ReplayVoteReceiver,
blockstore: Arc<Blockstore>,
bank_notification_sender: Option<BankNotificationSender>,
) -> Result<()> {
let mut optimistic_confirmation_verifier =
let mut confirmation_verifier =
OptimisticConfirmationVerifier::new(bank_forks.read().unwrap().root());
let mut last_process_root = Instant::now();
loop {
@ -430,41 +434,40 @@ impl ClusterInfoVoteListener {
}
let root_bank = bank_forks.read().unwrap().root_bank().clone();
if last_process_root.elapsed().as_millis() > 400 {
let unrooted_optimistic_slots = optimistic_confirmation_verifier
.get_unrooted_optimistic_slots(&root_bank, &blockstore);
if last_process_root.elapsed().as_millis() > DEFAULT_MS_PER_SLOT as u128 {
let unrooted_optimistic_slots = confirmation_verifier
.verify_for_unrooted_optimistic_slots(&root_bank, &blockstore);
// SlotVoteTracker's for all `slots` in `unrooted_optimistic_slots`
// should still be available because we haven't purged in
// `process_new_root_bank()` yet, which is called below
// `progress_with_new_root_bank()` yet, which is called below
OptimisticConfirmationVerifier::log_unrooted_optimistic_slots(
&root_bank,
&vote_tracker,
&unrooted_optimistic_slots,
);
vote_tracker.process_new_root_bank(&root_bank);
vote_tracker.progress_with_new_root_bank(&root_bank);
last_process_root = Instant::now();
}
let optimistic_confirmed_slots = Self::get_and_process_votes(
let confirmed_slots = Self::listen_and_confirm_votes(
&gossip_vote_txs_receiver,
&vote_tracker,
&root_bank,
&subscriptions,
&verified_vote_sender,
&replay_votes_receiver,
&bank_notification_sender,
);
if let Err(e) = optimistic_confirmed_slots {
match e {
match confirmed_slots {
Ok(confirmed_slots) => {
confirmation_verifier.add_new_optimistic_confirmed_slots(confirmed_slots);
}
Err(e) => match e {
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Timeout)
| Error::ReadyTimeoutError => (),
_ => {
error!("thread {:?} error {:?}", thread::current().name(), e);
}
}
} else {
let optimistic_confirmed_slots = optimistic_confirmed_slots.unwrap();
optimistic_confirmation_verifier
.add_new_optimistic_confirmed_slots(optimistic_confirmed_slots);
},
}
}
}
@ -478,23 +481,25 @@ impl ClusterInfoVoteListener {
verified_vote_sender: &VerifiedVoteSender,
replay_votes_receiver: &ReplayVoteReceiver,
) -> Result<Vec<(Slot, Hash)>> {
Self::get_and_process_votes(
Self::listen_and_confirm_votes(
gossip_vote_txs_receiver,
vote_tracker,
root_bank,
subscriptions,
verified_vote_sender,
replay_votes_receiver,
&None,
)
}
fn get_and_process_votes(
fn listen_and_confirm_votes(
gossip_vote_txs_receiver: &VerifiedVoteTransactionsReceiver,
vote_tracker: &VoteTracker,
root_bank: &Bank,
subscriptions: &RpcSubscriptions,
verified_vote_sender: &VerifiedVoteSender,
replay_votes_receiver: &ReplayVoteReceiver,
bank_notification_sender: &Option<BankNotificationSender>,
) -> Result<Vec<(Slot, Hash)>> {
let mut sel = Select::new();
sel.recv(gossip_vote_txs_receiver);
@ -516,13 +521,14 @@ impl ClusterInfoVoteListener {
let gossip_vote_txs: Vec<_> = gossip_vote_txs_receiver.try_iter().flatten().collect();
let replay_votes: Vec<_> = replay_votes_receiver.try_iter().collect();
if !gossip_vote_txs.is_empty() || !replay_votes.is_empty() {
return Ok(Self::process_votes(
return Ok(Self::filter_and_confirm_with_new_votes(
vote_tracker,
gossip_vote_txs,
replay_votes,
root_bank,
subscriptions,
verified_vote_sender,
bank_notification_sender,
));
} else {
remaining_wait_time = remaining_wait_time
@ -533,7 +539,7 @@ impl ClusterInfoVoteListener {
}
#[allow(clippy::too_many_arguments)]
fn update_new_votes(
fn track_new_votes_and_notify_confirmations(
vote: Vote,
vote_pubkey: &Pubkey,
vote_tracker: &VoteTracker,
@ -543,66 +549,69 @@ impl ClusterInfoVoteListener {
diff: &mut HashMap<Slot, HashMap<Arc<Pubkey>, bool>>,
new_optimistic_confirmed_slots: &mut Vec<(Slot, Hash)>,
is_gossip_vote: bool,
bank_notification_sender: &Option<BankNotificationSender>,
) {
if vote.slots.is_empty() {
return;
}
let last_vote_slot = vote.slots.last().unwrap();
let last_vote_slot = *vote.slots.last().unwrap();
let last_vote_hash = vote.hash;
let root = root_bank.slot();
let last_vote_hash = vote.hash;
let mut is_new_vote = false;
for slot in vote.slots.iter().rev() {
// If slot is before the root, or so far ahead we don't have
// stake information, then ignore it
let epoch = root_bank.epoch_schedule().get_epoch(*slot);
// If slot is before the root, ignore it
for slot in vote.slots.iter().filter(|slot| **slot > root).rev() {
let slot = *slot;
// if we don't have stake information, ignore it
let epoch = root_bank.epoch_schedule().get_epoch(slot);
let epoch_stakes = root_bank.epoch_stakes(epoch);
if *slot <= root || epoch_stakes.is_none() {
if epoch_stakes.is_none() {
continue;
}
let epoch_stakes = epoch_stakes.unwrap();
let epoch_vote_accounts = Stakes::vote_accounts(epoch_stakes.stakes());
let total_epoch_stake = epoch_stakes.total_stake();
let unduplicated_pubkey = vote_tracker.keys.get_or_insert(&vote_pubkey);
// The last vote slot, which is the greatest slot in the stack
// of votes in a vote transaction, qualifies for optimistic confirmation.
let update_optimistic_confirmation_info = if slot == last_vote_slot {
let stake = epoch_vote_accounts
if slot == last_vote_slot {
let vote_accounts = Stakes::vote_accounts(epoch_stakes.stakes());
let stake = vote_accounts
.get(&vote_pubkey)
.map(|(stake, _)| *stake)
.unwrap_or(0);
Some((stake, last_vote_hash))
} else {
None
};
.unwrap_or_default();
let total_stake = epoch_stakes.total_stake();
// If this vote for this slot qualifies for optimistic confirmation
if let Some((stake, hash)) = update_optimistic_confirmation_info {
// Fast track processing of the last slot in a vote transactions
// so that notifications for optimistic confirmation can be sent
// as soon as possible.
let (is_confirmed, is_new) = Self::add_optimistic_confirmation_vote(
let (is_confirmed, is_new) = Self::track_optimistic_confirmation_vote(
vote_tracker,
*slot,
hash,
last_vote_slot,
last_vote_hash,
unduplicated_pubkey.clone(),
stake,
total_epoch_stake,
total_stake,
);
if is_confirmed {
new_optimistic_confirmed_slots.push((*slot, last_vote_hash));
new_optimistic_confirmed_slots.push((last_vote_slot, last_vote_hash));
// Notify subscribers about new optimistic confirmation
subscriptions.notify_gossip_subscribers(*slot);
if let Some(sender) = bank_notification_sender {
sender
.send(BankNotification::OptimisticallyConfirmed(last_vote_slot))
.unwrap_or_else(|err| {
warn!("bank_notification_sender failed: {:?}", err)
});
}
}
if !is_new && !is_gossip_vote {
// By now:
// 1) The vote must have come from ReplayStage,
// 2) We've seen this vote from replay for this hash before
// (`add_optimistic_confirmation_vote()` will not set `is_new == true`
// (`track_optimistic_confirmation_vote()` will not set `is_new == true`
// for same slot different hash), so short circuit because this vote
// has no new information
@ -614,7 +623,7 @@ impl ClusterInfoVoteListener {
is_new_vote = is_new;
}
diff.entry(*slot)
diff.entry(slot)
.or_default()
.entry(unduplicated_pubkey)
.and_modify(|seen_in_gossip_previously| {
@ -629,13 +638,47 @@ impl ClusterInfoVoteListener {
}
}
fn process_votes(
fn filter_gossip_votes(
vote_tracker: &VoteTracker,
vote_pubkey: &Pubkey,
vote: &Vote,
gossip_tx: &Transaction,
) -> bool {
if vote.slots.is_empty() {
return false;
}
let last_vote_slot = vote.slots.last().unwrap();
// Votes from gossip need to be verified as they have not been
// verified by the replay pipeline. Determine the authorized voter
// based on the last vote slot. This will drop votes from authorized
// voters trying to make votes for slots earlier than the epoch for
// which they are authorized
let actual_authorized_voter =
vote_tracker.get_authorized_voter(&vote_pubkey, *last_vote_slot);
if actual_authorized_voter.is_none() {
return false;
}
// Voting without the correct authorized pubkey, dump the vote
if !VoteTracker::vote_contains_authorized_voter(
&gossip_tx,
&actual_authorized_voter.unwrap(),
) {
return false;
}
true
}
fn filter_and_confirm_with_new_votes(
vote_tracker: &VoteTracker,
gossip_vote_txs: Vec<Transaction>,
replayed_votes: Vec<ReplayedVote>,
root_bank: &Bank,
subscriptions: &RpcSubscriptions,
verified_vote_sender: &VerifiedVoteSender,
bank_notification_sender: &Option<BankNotificationSender>,
) -> Vec<(Slot, Hash)> {
let mut diff: HashMap<Slot, HashMap<Arc<Pubkey>, bool>> = HashMap::new();
let mut new_optimistic_confirmed_slots = vec![];
@ -646,37 +689,13 @@ impl ClusterInfoVoteListener {
.filter_map(|gossip_tx| {
vote_transaction::parse_vote_transaction(gossip_tx)
.filter(|(vote_pubkey, vote, _)| {
if vote.slots.is_empty() {
return false;
}
let last_vote_slot = vote.slots.last().unwrap();
// Votes from gossip need to be verified as they have not been
// verified by the replay pipeline. Determine the authorized voter
// based on the last vote slot. This will drop votes from authorized
// voters trying to make votes for slots earlier than the epoch for
// which they are authorized
let actual_authorized_voter =
vote_tracker.get_authorized_voter(&vote_pubkey, *last_vote_slot);
if actual_authorized_voter.is_none() {
return false;
}
// Voting without the correct authorized pubkey, dump the vote
if !VoteTracker::vote_contains_authorized_voter(
&gossip_tx,
&actual_authorized_voter.unwrap(),
) {
return false;
}
true
Self::filter_gossip_votes(vote_tracker, vote_pubkey, vote, gossip_tx)
})
.map(|v| (true, v))
})
.chain(replayed_votes.into_iter().map(|v| (false, v)))
{
Self::update_new_votes(
Self::track_new_votes_and_notify_confirmations(
vote,
&vote_pubkey,
&vote_tracker,
@ -686,6 +705,7 @@ impl ClusterInfoVoteListener {
&mut diff,
&mut new_optimistic_confirmed_slots,
is_gossip,
bank_notification_sender,
);
}
@ -740,7 +760,7 @@ impl ClusterInfoVoteListener {
// Returns if the slot was optimistically confirmed, and whether
// the slot was new
fn add_optimistic_confirmation_vote(
fn track_optimistic_confirmation_vote(
vote_tracker: &VoteTracker,
slot: Slot,
hash: Hash,
@ -769,6 +789,7 @@ impl ClusterInfoVoteListener {
#[cfg(test)]
mod tests {
use super::*;
use crate::optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank;
use solana_perf::packet;
use solana_runtime::{
bank::Bank,
@ -880,7 +901,7 @@ mod tests {
let (vote_tracker, bank, _, _) = setup();
// Check outdated slots are purged with new root
let new_voter = Arc::new(Pubkey::new_rand());
let new_voter = Arc::new(solana_sdk::pubkey::new_rand());
// Make separate copy so the original doesn't count toward
// the ref count, which would prevent cleanup
let new_voter_ = Arc::new(*new_voter);
@ -891,7 +912,7 @@ mod tests {
.unwrap()
.contains_key(&bank.slot()));
let bank1 = Bank::new_from_parent(&bank, &Pubkey::default(), bank.slot() + 1);
vote_tracker.process_new_root_bank(&bank1);
vote_tracker.progress_with_new_root_bank(&bank1);
assert!(!vote_tracker
.slot_vote_trackers
.read()
@ -908,7 +929,7 @@ mod tests {
bank.epoch_schedule()
.get_first_slot_in_epoch(current_epoch + 1),
);
vote_tracker.process_new_root_bank(&new_epoch_bank);
vote_tracker.progress_with_new_root_bank(&new_epoch_bank);
assert!(!vote_tracker.keys.0.read().unwrap().contains(&new_voter));
assert_eq!(
*vote_tracker.current_epoch.read().unwrap(),
@ -938,7 +959,7 @@ mod tests {
);
let next_leader_schedule_bank =
Bank::new_from_parent(&bank, &Pubkey::default(), next_leader_schedule_computed);
vote_tracker.update_leader_schedule_epoch(&next_leader_schedule_bank);
vote_tracker.progress_leader_schedule_epoch(&next_leader_schedule_bank);
assert_eq!(
*vote_tracker.leader_schedule_epoch.read().unwrap(),
next_leader_schedule_epoch
@ -989,13 +1010,14 @@ mod tests {
&votes_sender,
&replay_votes_sender,
);
ClusterInfoVoteListener::get_and_process_votes(
ClusterInfoVoteListener::listen_and_confirm_votes(
&votes_receiver,
&vote_tracker,
&bank3,
&subscriptions,
&verified_vote_sender,
&replay_votes_receiver,
&None,
)
.unwrap();
@ -1017,13 +1039,14 @@ mod tests {
&votes_sender,
&replay_votes_sender,
);
ClusterInfoVoteListener::get_and_process_votes(
ClusterInfoVoteListener::listen_and_confirm_votes(
&votes_receiver,
&vote_tracker,
&bank3,
&subscriptions,
&verified_vote_sender,
&replay_votes_receiver,
&None,
)
.unwrap();
@ -1094,13 +1117,14 @@ mod tests {
);
// Check that all the votes were registered for each validator correctly
ClusterInfoVoteListener::get_and_process_votes(
ClusterInfoVoteListener::listen_and_confirm_votes(
&votes_txs_receiver,
&vote_tracker,
&bank0,
&subscriptions,
&verified_vote_sender,
&replay_votes_receiver,
&None,
)
.unwrap();
@ -1212,13 +1236,14 @@ mod tests {
}
// Read and process votes from channel `votes_receiver`
ClusterInfoVoteListener::get_and_process_votes(
ClusterInfoVoteListener::listen_and_confirm_votes(
&votes_txs_receiver,
&vote_tracker,
&bank0,
&subscriptions,
&verified_vote_sender,
&replay_votes_receiver,
&None,
)
.unwrap();
@ -1306,13 +1331,14 @@ mod tests {
))
.unwrap();
}
let _ = ClusterInfoVoteListener::get_and_process_votes(
let _ = ClusterInfoVoteListener::listen_and_confirm_votes(
&votes_receiver,
&vote_tracker,
&bank,
&subscriptions,
&verified_vote_sender,
&replay_votes_receiver,
&None,
);
}
let slot_vote_tracker = vote_tracker.get_slot_vote_tracker(vote_slot).unwrap();
@ -1423,13 +1449,16 @@ mod tests {
);
let bank = Bank::new(&genesis_config);
let exit = Arc::new(AtomicBool::new(false));
let bank_forks = BankForks::new(bank);
let bank = bank_forks.get(0).unwrap().clone();
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
let bank = bank_forks.read().unwrap().get(0).unwrap().clone();
let vote_tracker = VoteTracker::new(&bank);
let optimistically_confirmed_bank =
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks);
let subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(bank_forks)),
bank_forks,
Arc::new(RwLock::new(BlockCommitmentCache::default())),
optimistically_confirmed_bank,
));
// Send a vote to process, should add a reference to the pubkey for that voter
@ -1448,7 +1477,7 @@ mod tests {
)];
let (verified_vote_sender, _verified_vote_receiver) = unbounded();
ClusterInfoVoteListener::process_votes(
ClusterInfoVoteListener::filter_and_confirm_with_new_votes(
&vote_tracker,
vote_tx,
// Add gossip vote for same slot, should not affect outcome
@ -1460,6 +1489,7 @@ mod tests {
&bank,
&subscriptions,
&verified_vote_sender,
&None,
);
let ref_count = Arc::strong_count(
&vote_tracker
@ -1518,7 +1548,7 @@ mod tests {
let new_root_bank =
Bank::new_from_parent(&bank, &Pubkey::default(), first_slot_in_new_epoch - 2);
ClusterInfoVoteListener::process_votes(
ClusterInfoVoteListener::filter_and_confirm_with_new_votes(
&vote_tracker,
vote_txs,
vec![(
@ -1529,6 +1559,7 @@ mod tests {
&new_root_bank,
&subscriptions,
&verified_vote_sender,
&None,
);
// Check new replay vote pubkey first
@ -1578,12 +1609,15 @@ mod tests {
let bank = Bank::new(&genesis_config);
let vote_tracker = VoteTracker::new(&bank);
let exit = Arc::new(AtomicBool::new(false));
let bank_forks = BankForks::new(bank);
let bank = bank_forks.get(0).unwrap().clone();
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
let bank = bank_forks.read().unwrap().get(0).unwrap().clone();
let optimistically_confirmed_bank =
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks);
let subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(bank_forks)),
bank_forks,
Arc::new(RwLock::new(BlockCommitmentCache::default())),
optimistically_confirmed_bank,
));
// Integrity Checks
@ -1650,7 +1684,7 @@ mod tests {
fn run_test_verify_votes_1_pass(hash: Option<Hash>) {
let vote_tx = test_vote_tx(hash);
let votes = vec![vote_tx];
let labels = vec![CrdsValueLabel::Vote(0, Pubkey::new_rand())];
let labels = vec![CrdsValueLabel::Vote(0, solana_sdk::pubkey::new_rand())];
let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, labels);
assert_eq!(vote_txs.len(), 1);
verify_packets_len(&packets, 1);
@ -1667,7 +1701,7 @@ mod tests {
let mut bad_vote = vote_tx.clone();
bad_vote.signatures[0] = Signature::default();
let votes = vec![vote_tx.clone(), bad_vote, vote_tx];
let label = CrdsValueLabel::Vote(0, Pubkey::new_rand());
let label = CrdsValueLabel::Vote(0, solana_sdk::pubkey::new_rand());
let labels: Vec<_> = (0..votes.len()).map(|_| label.clone()).collect();
let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, labels);
assert_eq!(vote_txs.len(), 2);

View File

@ -237,8 +237,8 @@ mod tests {
let mut c1 = ContactInfo::default();
let mut c2 = ContactInfo::default();
let mut map = HashMap::new();
let k1 = Pubkey::new_rand();
let k2 = Pubkey::new_rand();
let k1 = solana_sdk::pubkey::new_rand();
let k2 = solana_sdk::pubkey::new_rand();
map.insert(Arc::new(k1), std::u64::MAX / 2);
map.insert(Arc::new(k2), 0);
cs.cluster_slots
@ -259,8 +259,8 @@ mod tests {
let mut c1 = ContactInfo::default();
let mut c2 = ContactInfo::default();
let mut map = HashMap::new();
let k1 = Pubkey::new_rand();
let k2 = Pubkey::new_rand();
let k1 = solana_sdk::pubkey::new_rand();
let k2 = solana_sdk::pubkey::new_rand();
map.insert(Arc::new(k2), 0);
cs.cluster_slots
.write()
@ -290,7 +290,7 @@ mod tests {
let cs = ClusterSlots::default();
let mut contact_infos = vec![ContactInfo::default(); 2];
for ci in contact_infos.iter_mut() {
ci.id = Pubkey::new_rand();
ci.id = solana_sdk::pubkey::new_rand();
}
let slot = 9;
@ -359,7 +359,7 @@ mod tests {
let mut epoch_slot = EpochSlots::default();
epoch_slot.fill(&[1], 0);
cs.update_internal(0, (vec![epoch_slot], None));
let self_id = Pubkey::new_rand();
let self_id = solana_sdk::pubkey::new_rand();
assert_eq!(
cs.generate_repairs_for_missing_slots(&self_id, 0),
vec![RepairType::HighestShred(1, 0)]

View File

@ -375,19 +375,22 @@ mod tests {
let rooted_stake_amount = 40;
let sk1 = Pubkey::new_rand();
let pk1 = Pubkey::new_rand();
let mut vote_account1 = vote_state::create_account(&pk1, &Pubkey::new_rand(), 0, 100);
let sk1 = solana_sdk::pubkey::new_rand();
let pk1 = solana_sdk::pubkey::new_rand();
let mut vote_account1 =
vote_state::create_account(&pk1, &solana_sdk::pubkey::new_rand(), 0, 100);
let stake_account1 =
stake_state::create_account(&sk1, &pk1, &vote_account1, &genesis_config.rent, 100);
let sk2 = Pubkey::new_rand();
let pk2 = Pubkey::new_rand();
let mut vote_account2 = vote_state::create_account(&pk2, &Pubkey::new_rand(), 0, 50);
let sk2 = solana_sdk::pubkey::new_rand();
let pk2 = solana_sdk::pubkey::new_rand();
let mut vote_account2 =
vote_state::create_account(&pk2, &solana_sdk::pubkey::new_rand(), 0, 50);
let stake_account2 =
stake_state::create_account(&sk2, &pk2, &vote_account2, &genesis_config.rent, 50);
let sk3 = Pubkey::new_rand();
let pk3 = Pubkey::new_rand();
let mut vote_account3 = vote_state::create_account(&pk3, &Pubkey::new_rand(), 0, 1);
let sk3 = solana_sdk::pubkey::new_rand();
let pk3 = solana_sdk::pubkey::new_rand();
let mut vote_account3 =
vote_state::create_account(&pk3, &solana_sdk::pubkey::new_rand(), 0, 1);
let stake_account3 = stake_state::create_account(
&sk3,
&pk3,
@ -395,9 +398,10 @@ mod tests {
&genesis_config.rent,
rooted_stake_amount,
);
let sk4 = Pubkey::new_rand();
let pk4 = Pubkey::new_rand();
let mut vote_account4 = vote_state::create_account(&pk4, &Pubkey::new_rand(), 0, 1);
let sk4 = solana_sdk::pubkey::new_rand();
let pk4 = solana_sdk::pubkey::new_rand();
let mut vote_account4 =
vote_state::create_account(&pk4, &solana_sdk::pubkey::new_rand(), 0, 1);
let stake_account4 = stake_state::create_account(
&sk4,
&pk4,

View File

@ -1,6 +1,7 @@
use crate::rpc_subscriptions::RpcSubscriptions;
use crossbeam_channel::{Receiver, RecvTimeoutError, Sender};
use solana_ledger::blockstore::{Blockstore, CompletedDataSetInfo};
use solana_ledger::entry::Entry;
use solana_sdk::signature::Signature;
use std::{
sync::{
@ -61,10 +62,7 @@ impl CompletedDataSetsService {
} = completed_set_info;
match blockstore.get_entries_in_data_block(slot, start_index, end_index, None) {
Ok(entries) => {
let transactions = entries
.into_iter()
.flat_map(|e| e.transactions.into_iter().map(|t| t.signatures[0]))
.collect::<Vec<Signature>>();
let transactions = Self::get_transaction_signatures(entries);
if !transactions.is_empty() {
rpc_subscriptions.notify_signatures_received((slot, transactions));
}
@ -76,7 +74,51 @@ impl CompletedDataSetsService {
Ok(())
}
fn get_transaction_signatures(entries: Vec<Entry>) -> Vec<Signature> {
entries
.into_iter()
.flat_map(|e| {
e.transactions
.into_iter()
.filter_map(|mut t| t.signatures.drain(..).next())
})
.collect::<Vec<Signature>>()
}
pub fn join(self) -> thread::Result<()> {
self.thread_hdl.join()
}
}
#[cfg(test)]
pub mod test {
use super::*;
use solana_sdk::hash::Hash;
use solana_sdk::signature::{Keypair, Signer};
use solana_sdk::transaction::Transaction;
#[test]
fn test_zero_signatures() {
let tx = Transaction::new_with_payer(&[], None);
let entries = vec![Entry::new(&Hash::default(), 1, vec![tx])];
let signatures = CompletedDataSetsService::get_transaction_signatures(entries);
assert!(signatures.is_empty());
}
#[test]
fn test_multi_signatures() {
let kp = Keypair::new();
let tx =
Transaction::new_signed_with_payer(&[], Some(&kp.pubkey()), &[&kp], Hash::default());
let entries = vec![Entry::new(&Hash::default(), 1, vec![tx.clone()])];
let signatures = CompletedDataSetsService::get_transaction_signatures(entries);
assert_eq!(signatures.len(), 1);
let entries = vec![
Entry::new(&Hash::default(), 1, vec![tx.clone(), tx.clone()]),
Entry::new(&Hash::default(), 1, vec![tx]),
];
let signatures = CompletedDataSetsService::get_transaction_signatures(entries);
assert_eq!(signatures.len(), 3);
}
}

View File

@ -1013,7 +1013,7 @@ pub mod test {
&mut account.data,
)
.expect("serialize state");
stakes.push((Pubkey::new_rand(), (*lamports, account)));
stakes.push((solana_sdk::pubkey::new_rand(), (*lamports, account)));
}
stakes
}

View File

@ -130,7 +130,7 @@ impl ContactInfo {
let addr = socketaddr!("224.0.1.255:1000");
assert!(addr.ip().is_multicast());
Self {
id: Pubkey::new_rand(),
id: solana_sdk::pubkey::new_rand(),
gossip: addr,
tvu: addr,
tvu_forwards: addr,

View File

@ -24,22 +24,25 @@
//! A value is updated to a new version if the labels match, and the value
//! wallclock is later, or the value hash is greater.
use crate::crds_gossip_pull::CrdsFilter;
use crate::crds_shards::CrdsShards;
use crate::crds_value::{CrdsValue, CrdsValueLabel};
use bincode::serialize;
use indexmap::map::IndexMap;
use indexmap::map::{Entry, IndexMap};
use rayon::{prelude::*, ThreadPool};
use solana_sdk::hash::{hash, Hash};
use solana_sdk::pubkey::Pubkey;
use std::cmp;
use std::collections::HashMap;
use std::ops::Index;
const CRDS_SHARDS_BITS: u32 = 8;
#[derive(Clone)]
pub struct Crds {
/// Stores the map of labels and values
pub table: IndexMap<CrdsValueLabel, VersionedCrdsValue>,
pub num_inserts: usize,
pub masks: IndexMap<CrdsValueLabel, u64>,
pub shards: CrdsShards,
}
#[derive(PartialEq, Debug)]
@ -89,7 +92,7 @@ impl Default for Crds {
Crds {
table: IndexMap::new(),
num_inserts: 0,
masks: IndexMap::new(),
shards: CrdsShards::new(CRDS_SHARDS_BITS),
}
}
}
@ -103,19 +106,13 @@ impl Crds {
&self,
value: CrdsValue,
local_timestamp: u64,
) -> Option<VersionedCrdsValue> {
) -> (bool, VersionedCrdsValue) {
let new_value = self.new_versioned(local_timestamp, value);
let label = new_value.value.label();
let would_insert = self
.table
.get(&label)
.map(|current| new_value > *current)
.unwrap_or(true);
if would_insert {
Some(new_value)
} else {
None
}
// New value is outdated and fails to insert, if it already exists in
// the table with a more recent wallclock.
let outdated = matches!(self.table.get(&label), Some(current) if new_value <= *current);
(!outdated, new_value)
}
/// insert the new value, returns the old value if insert succeeds
pub fn insert_versioned(
@ -123,23 +120,28 @@ impl Crds {
new_value: VersionedCrdsValue,
) -> Result<Option<VersionedCrdsValue>, CrdsError> {
let label = new_value.value.label();
let wallclock = new_value.value.wallclock();
let do_insert = self
.table
.get(&label)
.map(|current| new_value > *current)
.unwrap_or(true);
if do_insert {
self.masks.insert(
label.clone(),
CrdsFilter::hash_as_u64(&new_value.value_hash),
);
let old = self.table.insert(label, new_value);
self.num_inserts += 1;
Ok(old)
} else {
trace!("INSERT FAILED data: {} new.wallclock: {}", label, wallclock,);
Err(CrdsError::InsertFailed)
match self.table.entry(label) {
Entry::Vacant(entry) => {
assert!(self.shards.insert(entry.index(), &new_value));
entry.insert(new_value);
self.num_inserts += 1;
Ok(None)
}
Entry::Occupied(mut entry) if *entry.get() < new_value => {
let index = entry.index();
assert!(self.shards.remove(index, entry.get()));
assert!(self.shards.insert(index, &new_value));
self.num_inserts += 1;
Ok(Some(entry.insert(new_value)))
}
_ => {
trace!(
"INSERT FAILED data: {} new.wallclock: {}",
new_value.value.label(),
new_value.value.wallclock(),
);
Err(CrdsError::InsertFailed)
}
}
}
pub fn insert(
@ -175,33 +177,40 @@ impl Crds {
/// * timeouts - Pubkey specific timeouts with Pubkey::default() as the default timeout.
pub fn find_old_labels(
&self,
thread_pool: &ThreadPool,
now: u64,
timeouts: &HashMap<Pubkey, u64>,
) -> Vec<CrdsValueLabel> {
let min_ts = *timeouts
let default_timeout = *timeouts
.get(&Pubkey::default())
.expect("must have default timeout");
self.table
.iter()
.filter_map(|(k, v)| {
if now < v.local_timestamp
|| (timeouts.get(&k.pubkey()).is_some()
&& now - v.local_timestamp < timeouts[&k.pubkey()])
{
None
} else if now - v.local_timestamp >= min_ts {
Some(k)
} else {
None
}
})
.cloned()
.collect()
thread_pool.install(|| {
self.table
.par_iter()
.with_min_len(1024)
.filter_map(|(k, v)| {
let timeout = timeouts.get(&k.pubkey()).unwrap_or(&default_timeout);
if v.local_timestamp.saturating_add(*timeout) <= now {
Some(k.clone())
} else {
None
}
})
.collect()
})
}
pub fn remove(&mut self, key: &CrdsValueLabel) {
self.table.swap_remove(key);
self.masks.swap_remove(key);
pub fn remove(&mut self, key: &CrdsValueLabel) -> Option<VersionedCrdsValue> {
let (index, _, value) = self.table.swap_remove_full(key)?;
assert!(self.shards.remove(index, &value));
// The previously last element in the table is now moved to the
// 'index' position. Shards need to be updated accordingly.
if index < self.table.len() {
let value = self.table.index(index);
assert!(self.shards.remove(self.table.len(), value));
assert!(self.shards.insert(index, value));
}
Some(value)
}
}
@ -210,6 +219,8 @@ mod test {
use super::*;
use crate::contact_info::ContactInfo;
use crate::crds_value::CrdsData;
use rand::{thread_rng, Rng};
use rayon::ThreadPoolBuilder;
#[test]
fn test_insert() {
@ -282,30 +293,67 @@ mod test {
}
#[test]
fn test_find_old_records_default() {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut crds = Crds::default();
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
assert_eq!(crds.insert(val.clone(), 1), Ok(None));
let mut set = HashMap::new();
set.insert(Pubkey::default(), 0);
assert!(crds.find_old_labels(0, &set).is_empty());
assert!(crds.find_old_labels(&thread_pool, 0, &set).is_empty());
set.insert(Pubkey::default(), 1);
assert_eq!(crds.find_old_labels(2, &set), vec![val.label()]);
assert_eq!(
crds.find_old_labels(&thread_pool, 2, &set),
vec![val.label()]
);
set.insert(Pubkey::default(), 2);
assert_eq!(crds.find_old_labels(4, &set), vec![val.label()]);
assert_eq!(
crds.find_old_labels(&thread_pool, 4, &set),
vec![val.label()]
);
}
#[test]
fn test_find_old_records_with_override() {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut rng = thread_rng();
let mut crds = Crds::default();
let mut timeouts = HashMap::new();
let val = CrdsValue::new_rand(&mut rng);
timeouts.insert(Pubkey::default(), 3);
assert_eq!(crds.insert(val.clone(), 0), Ok(None));
assert!(crds.find_old_labels(&thread_pool, 2, &timeouts).is_empty());
timeouts.insert(val.pubkey(), 1);
assert_eq!(
crds.find_old_labels(&thread_pool, 2, &timeouts),
vec![val.label()]
);
timeouts.insert(val.pubkey(), u64::MAX);
assert!(crds.find_old_labels(&thread_pool, 2, &timeouts).is_empty());
timeouts.insert(Pubkey::default(), 1);
assert!(crds.find_old_labels(&thread_pool, 2, &timeouts).is_empty());
timeouts.remove(&val.pubkey());
assert_eq!(
crds.find_old_labels(&thread_pool, 2, &timeouts),
vec![val.label()]
);
}
#[test]
fn test_remove_default() {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut crds = Crds::default();
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
assert_matches!(crds.insert(val.clone(), 1), Ok(_));
let mut set = HashMap::new();
set.insert(Pubkey::default(), 1);
assert_eq!(crds.find_old_labels(2, &set), vec![val.label()]);
assert_eq!(
crds.find_old_labels(&thread_pool, 2, &set),
vec![val.label()]
);
crds.remove(&val.label());
assert!(crds.find_old_labels(2, &set).is_empty());
assert!(crds.find_old_labels(&thread_pool, 2, &set).is_empty());
}
#[test]
fn test_find_old_records_staked() {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut crds = Crds::default();
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
assert_eq!(crds.insert(val.clone(), 1), Ok(None));
@ -313,24 +361,72 @@ mod test {
//now < timestamp
set.insert(Pubkey::default(), 0);
set.insert(val.pubkey(), 0);
assert!(crds.find_old_labels(0, &set).is_empty());
assert!(crds.find_old_labels(&thread_pool, 0, &set).is_empty());
//pubkey shouldn't expire since its timeout is MAX
set.insert(val.pubkey(), std::u64::MAX);
assert!(crds.find_old_labels(2, &set).is_empty());
assert!(crds.find_old_labels(&thread_pool, 2, &set).is_empty());
//default has max timeout, but pubkey should still expire
set.insert(Pubkey::default(), std::u64::MAX);
set.insert(val.pubkey(), 1);
assert_eq!(crds.find_old_labels(2, &set), vec![val.label()]);
assert_eq!(
crds.find_old_labels(&thread_pool, 2, &set),
vec![val.label()]
);
set.insert(val.pubkey(), 2);
assert!(crds.find_old_labels(2, &set).is_empty());
assert_eq!(crds.find_old_labels(3, &set), vec![val.label()]);
assert!(crds.find_old_labels(&thread_pool, 2, &set).is_empty());
assert_eq!(
crds.find_old_labels(&thread_pool, 3, &set),
vec![val.label()]
);
}
#[test]
fn test_crds_shards() {
fn check_crds_shards(crds: &Crds) {
crds.shards
.check(&crds.table.values().cloned().collect::<Vec<_>>());
}
let mut crds = Crds::default();
let pubkeys: Vec<_> = std::iter::repeat_with(solana_sdk::pubkey::new_rand)
.take(256)
.collect();
let mut rng = thread_rng();
let mut num_inserts = 0;
for _ in 0..4096 {
let pubkey = pubkeys[rng.gen_range(0, pubkeys.len())];
let value = VersionedCrdsValue::new(
rng.gen(), // local_timestamp
CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&pubkey,
rng.gen(), // now
))),
);
if crds.insert_versioned(value).is_ok() {
check_crds_shards(&crds);
num_inserts += 1;
}
}
assert_eq!(num_inserts, crds.num_inserts);
assert!(num_inserts > 700);
assert!(crds.table.len() > 200);
assert!(num_inserts > crds.table.len());
check_crds_shards(&crds);
// Remove values one by one and assert that shards stay valid.
while !crds.table.is_empty() {
let index = rng.gen_range(0, crds.table.len());
let key = crds.table.get_index(index).unwrap().0.clone();
crds.remove(&key);
check_crds_shards(&crds);
}
}
#[test]
fn test_remove_staked() {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut crds = Crds::default();
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
assert_matches!(crds.insert(val.clone(), 1), Ok(_));
@ -339,9 +435,12 @@ mod test {
//default has max timeout, but pubkey should still expire
set.insert(Pubkey::default(), std::u64::MAX);
set.insert(val.pubkey(), 1);
assert_eq!(crds.find_old_labels(2, &set), vec![val.label()]);
assert_eq!(
crds.find_old_labels(&thread_pool, 2, &set),
vec![val.label()]
);
crds.remove(&val.label());
assert!(crds.find_old_labels(2, &set).is_empty());
assert!(crds.find_old_labels(&thread_pool, 2, &set).is_empty());
}
#[test]
@ -421,14 +520,14 @@ mod test {
let v1 = VersionedCrdsValue::new(
1,
CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
))),
);
let v2 = VersionedCrdsValue::new(
1,
CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
))),
);

View File

@ -10,7 +10,8 @@ use crate::{
crds_gossip_push::{CrdsGossipPush, CRDS_GOSSIP_NUM_ACTIVE},
crds_value::{CrdsValue, CrdsValueLabel},
};
use solana_sdk::pubkey::Pubkey;
use rayon::ThreadPool;
use solana_sdk::{hash::Hash, pubkey::Pubkey};
use std::collections::{HashMap, HashSet};
///The min size for bloom filters
@ -134,12 +135,14 @@ impl CrdsGossip {
/// generate a random request
pub fn new_pull_request(
&self,
thread_pool: &ThreadPool,
now: u64,
gossip_validators: Option<&HashSet<Pubkey>>,
stakes: &HashMap<Pubkey, u64>,
bloom_size: usize,
) -> Result<(Pubkey, Vec<CrdsFilter>, CrdsValue), CrdsGossipError> {
self.pull.new_pull_request(
thread_pool,
&self.crds,
&self.id,
self.shred_version,
@ -158,9 +161,12 @@ impl CrdsGossip {
self.pull.mark_pull_request_creation_time(from, now)
}
/// process a pull request and create a response
pub fn process_pull_requests(&mut self, filters: Vec<(CrdsValue, CrdsFilter)>, now: u64) {
pub fn process_pull_requests<I>(&mut self, callers: I, now: u64)
where
I: IntoIterator<Item = CrdsValue>,
{
self.pull
.process_pull_requests(&mut self.crds, filters, now);
.process_pull_requests(&mut self.crds, callers, now);
}
pub fn generate_pull_responses(
@ -177,7 +183,7 @@ impl CrdsGossip {
response: Vec<CrdsValue>,
now: u64,
process_pull_stats: &mut ProcessPullStats,
) -> (Vec<VersionedCrdsValue>, Vec<VersionedCrdsValue>) {
) -> (Vec<VersionedCrdsValue>, Vec<VersionedCrdsValue>, Vec<Hash>) {
self.pull
.filter_pull_responses(&self.crds, timeouts, response, now, process_pull_stats)
}
@ -188,6 +194,7 @@ impl CrdsGossip {
from: &Pubkey,
responses: Vec<VersionedCrdsValue>,
responses_expired_timeout: Vec<VersionedCrdsValue>,
failed_inserts: Vec<Hash>,
now: u64,
process_pull_stats: &mut ProcessPullStats,
) {
@ -196,6 +203,7 @@ impl CrdsGossip {
from,
responses,
responses_expired_timeout,
failed_inserts,
now,
process_pull_stats,
);
@ -214,7 +222,12 @@ impl CrdsGossip {
self.pull.make_timeouts(&self.id, stakes, epoch_ms)
}
pub fn purge(&mut self, now: u64, timeouts: &HashMap<Pubkey, u64>) -> usize {
pub fn purge(
&mut self,
thread_pool: &ThreadPool,
now: u64,
timeouts: &HashMap<Pubkey, u64>,
) -> usize {
let mut rv = 0;
if now > self.push.msg_timeout {
let min = now - self.push.msg_timeout;
@ -229,12 +242,15 @@ impl CrdsGossip {
let min = self.pull.crds_timeout;
assert_eq!(timeouts[&self.id], std::u64::MAX);
assert_eq!(timeouts[&Pubkey::default()], min);
rv = self.pull.purge_active(&mut self.crds, now, &timeouts);
rv = self
.pull
.purge_active(thread_pool, &mut self.crds, now, &timeouts);
}
if now > 5 * self.pull.crds_timeout {
let min = now - 5 * self.pull.crds_timeout;
self.pull.purge_purged(min);
}
self.pull.purge_failed_inserts(now);
rv
}
}

View File

@ -16,17 +16,21 @@ use crate::crds_gossip_error::CrdsGossipError;
use crate::crds_value::{CrdsValue, CrdsValueLabel};
use rand::distributions::{Distribution, WeightedIndex};
use rand::Rng;
use solana_runtime::bloom::Bloom;
use rayon::{prelude::*, ThreadPool};
use solana_runtime::bloom::{AtomicBloom, Bloom};
use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
use std::cmp;
use std::collections::VecDeque;
use std::collections::{HashMap, HashSet};
use std::convert::TryInto;
use std::ops::Index;
pub const CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS: u64 = 15000;
// The maximum age of a value received over pull responses
pub const CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS: u64 = 60000;
// Retention period of hashes of received outdated values.
const FAILED_INSERTS_RETENTION_MS: u64 = 20_000;
pub const FALSE_RATE: f64 = 0.1f64;
pub const KEYS: f64 = 8f64;
@ -74,7 +78,7 @@ impl CrdsFilter {
let seed: u64 = seed.checked_shl(64 - mask_bits).unwrap_or(0x0);
seed | (!0u64).checked_shr(mask_bits).unwrap_or(!0x0) as u64
}
pub fn max_items(max_bits: f64, false_rate: f64, num_keys: f64) -> f64 {
fn max_items(max_bits: f64, false_rate: f64, num_keys: f64) -> f64 {
let m = max_bits;
let p = false_rate;
let k = num_keys;
@ -88,62 +92,69 @@ impl CrdsFilter {
let buf = item.as_ref()[..8].try_into().unwrap();
u64::from_le_bytes(buf)
}
pub fn test_mask_u64(&self, item: u64, ones: u64) -> bool {
let bits = item | ones;
bits == self.mask
}
pub fn test_mask(&self, item: &Hash) -> bool {
fn test_mask(&self, item: &Hash) -> bool {
// only consider the highest mask_bits bits from the hash and set the rest to 1.
let ones = (!0u64).checked_shr(self.mask_bits).unwrap_or(!0u64);
let bits = Self::hash_as_u64(item) | ones;
bits == self.mask
}
pub fn add(&mut self, item: &Hash) {
#[cfg(test)]
fn add(&mut self, item: &Hash) {
if self.test_mask(item) {
self.filter.add(item);
}
}
pub fn contains(&self, item: &Hash) -> bool {
#[cfg(test)]
fn contains(&self, item: &Hash) -> bool {
if !self.test_mask(item) {
return true;
}
self.filter.contains(item)
}
pub fn filter_contains(&self, item: &Hash) -> bool {
fn filter_contains(&self, item: &Hash) -> bool {
self.filter.contains(item)
}
}
/// A vector of crds filters that together hold a complete set of Hashes.
struct CrdsFilterSet(Vec<CrdsFilter>);
struct CrdsFilterSet {
filters: Vec<AtomicBloom<Hash>>,
mask_bits: u32,
}
impl CrdsFilterSet {
fn new(num_items: usize, max_bytes: usize) -> Self {
let max_bits = (max_bytes * 8) as f64;
let max_items = CrdsFilter::max_items(max_bits, FALSE_RATE, KEYS);
let mask_bits = CrdsFilter::mask_bits(num_items as f64, max_items as f64);
// For each possible mask combination, generate a new filter.
let seeds = 0..2u64.pow(mask_bits);
let filters = seeds.map(|seed| {
let filter = Bloom::random(max_items as usize, FALSE_RATE, max_bits as usize);
let mask = CrdsFilter::compute_mask(seed, mask_bits);
CrdsFilter {
filter,
mask,
mask_bits,
}
});
Self(filters.collect())
let filters = std::iter::repeat_with(|| {
Bloom::random(max_items as usize, FALSE_RATE, max_bits as usize).into()
})
.take(1 << mask_bits)
.collect();
Self { filters, mask_bits }
}
// Returns the filter within the vector of crds filters which corresponds
// to the given hash value.
fn get(&mut self, hash_value: &Hash) -> Option<&mut CrdsFilter> {
let shift = 64 - self.0.first()?.mask_bits.min(64);
let index = CrdsFilter::hash_as_u64(hash_value)
.checked_shr(shift)
.unwrap_or(0u64);
self.0.get_mut(index as usize)
fn add(&self, hash_value: Hash) {
let index = CrdsFilter::hash_as_u64(&hash_value)
.checked_shr(64 - self.mask_bits)
.unwrap_or(0);
self.filters[index as usize].add(&hash_value);
}
}
impl Into<Vec<CrdsFilter>> for CrdsFilterSet {
fn into(self) -> Vec<CrdsFilter> {
let mask_bits = self.mask_bits;
self.filters
.into_iter()
.enumerate()
.map(|(seed, filter)| CrdsFilter {
filter: filter.into(),
mask: CrdsFilter::compute_mask(seed as u64, mask_bits),
mask_bits,
})
.collect()
}
}
@ -161,6 +172,11 @@ pub struct CrdsGossipPull {
pub pull_request_time: HashMap<Pubkey, u64>,
/// hash and insert time
pub purged_values: VecDeque<(Hash, u64)>,
// Hash value and record time (ms) of the pull responses which failed to be
// inserted in crds table; Preserved to stop the sender to send back the
// same outdated payload again by adding them to the filter for the next
// pull request.
pub failed_inserts: VecDeque<(Hash, u64)>,
pub crds_timeout: u64,
pub msg_timeout: u64,
pub num_pulls: usize,
@ -171,6 +187,7 @@ impl Default for CrdsGossipPull {
Self {
purged_values: VecDeque::new(),
pull_request_time: HashMap::new(),
failed_inserts: VecDeque::new(),
crds_timeout: CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS,
msg_timeout: CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS,
num_pulls: 0,
@ -181,6 +198,7 @@ impl CrdsGossipPull {
/// generate a random request
pub fn new_pull_request(
&self,
thread_pool: &ThreadPool,
crds: &Crds,
self_id: &Pubkey,
self_shred_version: u16,
@ -200,7 +218,7 @@ impl CrdsGossipPull {
if options.is_empty() {
return Err(CrdsGossipError::NoPeers);
}
let filters = self.build_crds_filters(crds, bloom_size);
let filters = self.build_crds_filters(thread_pool, crds, bloom_size);
let index = WeightedIndex::new(options.iter().map(|weighted| weighted.0)).unwrap();
let random = index.sample(&mut rand::thread_rng());
let self_info = crds
@ -253,21 +271,18 @@ impl CrdsGossipPull {
}
/// process a pull request
pub fn process_pull_requests(
&mut self,
crds: &mut Crds,
requests: Vec<(CrdsValue, CrdsFilter)>,
now: u64,
) {
requests.into_iter().for_each(|(caller, _)| {
pub fn process_pull_requests<I>(&mut self, crds: &mut Crds, callers: I, now: u64)
where
I: IntoIterator<Item = CrdsValue>,
{
for caller in callers {
let key = caller.label().pubkey();
let old = crds.insert(caller, now);
if let Some(val) = old.ok().and_then(|opt| opt) {
if let Ok(Some(val)) = crds.insert(caller, now) {
self.purged_values
.push_back((val.value_hash, val.local_timestamp));
}
crds.update_record_timestamp(&key, now);
});
}
}
/// Create gossip responses to pull requests
@ -282,9 +297,10 @@ impl CrdsGossipPull {
// Checks if responses should be inserted and
// returns those responses converted to VersionedCrdsValue
// Separated in two vecs as:
// Separated in three vecs as:
// .0 => responses that update the owner timestamp
// .1 => responses that do not update the owner timestamp
// .2 => hash value of outdated values which will fail to insert.
pub fn filter_pull_responses(
&self,
crds: &Crds,
@ -292,9 +308,18 @@ impl CrdsGossipPull {
responses: Vec<CrdsValue>,
now: u64,
stats: &mut ProcessPullStats,
) -> (Vec<VersionedCrdsValue>, Vec<VersionedCrdsValue>) {
) -> (Vec<VersionedCrdsValue>, Vec<VersionedCrdsValue>, Vec<Hash>) {
let mut versioned = vec![];
let mut versioned_expired_timestamp = vec![];
let mut failed_inserts = vec![];
let mut maybe_push = |response, values: &mut Vec<VersionedCrdsValue>| {
let (push, value) = crds.would_insert(response, now);
if push {
values.push(value);
} else {
failed_inserts.push(value.value_hash)
}
};
for r in responses {
let owner = r.label().pubkey();
// Check if the crds value is older than the msg_timeout
@ -325,24 +350,17 @@ impl CrdsGossipPull {
if crds.lookup(&CrdsValueLabel::ContactInfo(owner)).is_none() {
stats.timeout_count += 1;
stats.failed_timeout += 1;
continue;
} else {
// Silently insert this old value without bumping record timestamps
match crds.would_insert(r, now) {
Some(resp) => versioned_expired_timestamp.push(resp),
None => stats.failed_insert += 1,
}
continue;
maybe_push(r, &mut versioned_expired_timestamp);
}
continue;
}
}
}
match crds.would_insert(r, now) {
Some(resp) => versioned.push(resp),
None => stats.failed_insert += 1,
}
maybe_push(r, &mut versioned);
}
(versioned, versioned_expired_timestamp)
(versioned, versioned_expired_timestamp, failed_inserts)
}
/// process a vec of pull responses
@ -352,63 +370,93 @@ impl CrdsGossipPull {
from: &Pubkey,
responses: Vec<VersionedCrdsValue>,
responses_expired_timeout: Vec<VersionedCrdsValue>,
mut failed_inserts: Vec<Hash>,
now: u64,
stats: &mut ProcessPullStats,
) -> Vec<(CrdsValueLabel, Hash, u64)> {
let mut success = vec![];
let mut owners = HashSet::new();
for r in responses_expired_timeout {
stats.failed_insert += crds.insert_versioned(r).is_err() as usize;
let value_hash = r.value_hash;
if crds.insert_versioned(r).is_err() {
failed_inserts.push(value_hash);
}
}
for r in responses {
let owner = r.value.label().pubkey();
let label = r.value.label();
let wc = r.value.wallclock();
let hash = r.value_hash;
let old = crds.insert_versioned(r);
if old.is_err() {
stats.failed_insert += 1;
} else {
stats.success += 1;
self.num_pulls += 1;
success.push((label, hash, wc));
match crds.insert_versioned(r) {
Err(_) => failed_inserts.push(hash),
Ok(old) => {
stats.success += 1;
self.num_pulls += 1;
owners.insert(label.pubkey());
success.push((label, hash, wc));
if let Some(val) = old {
self.purged_values
.push_back((val.value_hash, val.local_timestamp))
}
}
}
old.ok().map(|opt| {
owners.insert(owner);
opt.map(|val| {
self.purged_values
.push_back((val.value_hash, val.local_timestamp))
})
});
}
owners.insert(*from);
for owner in owners {
crds.update_record_timestamp(&owner, now);
}
stats.failed_insert += failed_inserts.len();
self.purge_failed_inserts(now);
self.failed_inserts
.extend(failed_inserts.into_iter().zip(std::iter::repeat(now)));
success
}
pub fn purge_failed_inserts(&mut self, now: u64) {
if FAILED_INSERTS_RETENTION_MS < now {
let cutoff = now - FAILED_INSERTS_RETENTION_MS;
let outdated = self
.failed_inserts
.iter()
.take_while(|(_, ts)| *ts < cutoff)
.count();
self.failed_inserts.drain(..outdated);
}
}
// build a set of filters of the current crds table
// num_filters - used to increase the likelyhood of a value in crds being added to some filter
pub fn build_crds_filters(&self, crds: &Crds, bloom_size: usize) -> Vec<CrdsFilter> {
pub fn build_crds_filters(
&self,
thread_pool: &ThreadPool,
crds: &Crds,
bloom_size: usize,
) -> Vec<CrdsFilter> {
const PAR_MIN_LENGTH: usize = 512;
let num = cmp::max(
CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS,
crds.table.values().count() + self.purged_values.len(),
crds.table.len() + self.purged_values.len() + self.failed_inserts.len(),
);
let mut filters = CrdsFilterSet::new(num, bloom_size);
let mut add_value_hash = |value_hash| {
if let Some(filter) = filters.get(value_hash) {
debug_assert!(filter.test_mask(value_hash));
filter.filter.add(value_hash);
}
};
for v in crds.table.values() {
add_value_hash(&v.value_hash);
}
for (value_hash, _insert_timestamp) in &self.purged_values {
add_value_hash(&value_hash);
}
filters.0
let filters = CrdsFilterSet::new(num, bloom_size);
thread_pool.install(|| {
crds.table
.par_values()
.with_min_len(PAR_MIN_LENGTH)
.map(|v| v.value_hash)
.chain(
self.purged_values
.par_iter()
.with_min_len(PAR_MIN_LENGTH)
.map(|(v, _)| *v),
)
.chain(
self.failed_inserts
.par_iter()
.with_min_len(PAR_MIN_LENGTH)
.map(|(v, _)| *v),
)
.for_each(|v| filters.add(v));
});
filters.into()
}
/// filter values that fail the bloom filter up to max_bytes
@ -418,52 +466,44 @@ impl CrdsGossipPull {
filters: &[(CrdsValue, CrdsFilter)],
now: u64,
) -> Vec<Vec<CrdsValue>> {
let mut ret = vec![vec![]; filters.len()];
let msg_timeout = CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
let jitter = rand::thread_rng().gen_range(0, msg_timeout / 4);
let start = filters.len();
//skip filters from callers that are too old
let future = now.saturating_add(msg_timeout);
let past = now.saturating_sub(msg_timeout);
let recent: Vec<_> = filters
let mut dropped_requests = 0;
let mut total_skipped = 0;
let ret = filters
.iter()
.enumerate()
.filter(|(_, (caller, _))| caller.wallclock() < future && caller.wallclock() >= past)
.map(|(caller, filter)| {
let caller_wallclock = caller.wallclock();
if caller_wallclock >= future || caller_wallclock < past {
dropped_requests += 1;
return vec![];
}
let caller_wallclock = caller_wallclock.checked_add(jitter).unwrap_or(0);
crds.shards
.find(filter.mask, filter.mask_bits)
.filter_map(|index| {
let item = crds.table.index(index);
debug_assert!(filter.test_mask(&item.value_hash));
//skip values that are too new
if item.value.wallclock() > caller_wallclock {
total_skipped += 1;
None
} else if filter.filter_contains(&item.value_hash) {
None
} else {
Some(item.value.clone())
}
})
.collect()
})
.collect();
inc_new_counter_info!(
"gossip_filter_crds_values-dropped_requests",
start - recent.len()
dropped_requests
);
if recent.is_empty() {
return ret;
}
let mut total_skipped = 0;
let mask_ones: Vec<_> = recent
.iter()
.map(|(_i, (_caller, filter))| (!0u64).checked_shr(filter.mask_bits).unwrap_or(!0u64))
.collect();
for (label, mask) in crds.masks.iter() {
recent
.iter()
.zip(mask_ones.iter())
.for_each(|((i, (caller, filter)), mask_ones)| {
if filter.test_mask_u64(*mask, *mask_ones) {
let item = crds.table.get(label).unwrap();
//skip values that are too new
if item.value.wallclock()
> caller.wallclock().checked_add(jitter).unwrap_or_else(|| 0)
{
total_skipped += 1;
return;
}
if !filter.filter_contains(&item.value_hash) {
ret[*i].push(item.value.clone());
}
}
});
}
inc_new_counter_info!("gossip_filter_crds_values-dropped_values", total_skipped);
ret
}
@ -493,24 +533,21 @@ impl CrdsGossipPull {
/// The value_hash of an active item is put into self.purged_values queue
pub fn purge_active(
&mut self,
thread_pool: &ThreadPool,
crds: &mut Crds,
now: u64,
timeouts: &HashMap<Pubkey, u64>,
) -> usize {
let old = crds.find_old_labels(now, timeouts);
let mut purged: VecDeque<_> = old
.iter()
.filter_map(|label| {
let rv = crds
.lookup_versioned(label)
.map(|val| (val.value_hash, val.local_timestamp));
crds.remove(label);
rv
})
.collect();
let ret = purged.len();
self.purged_values.append(&mut purged);
ret
let num_purged_values = self.purged_values.len();
self.purged_values.extend(
crds.find_old_labels(thread_pool, now, timeouts)
.into_iter()
.filter_map(|label| {
let val = crds.remove(&label)?;
Some((val.value_hash, val.local_timestamp))
}),
);
self.purged_values.len() - num_purged_values
}
/// Purge values from the `self.purged_values` queue that are older then purge_timeout
pub fn purge_purged(&mut self, min_ts: u64) {
@ -533,13 +570,14 @@ impl CrdsGossipPull {
now: u64,
) -> (usize, usize, usize) {
let mut stats = ProcessPullStats::default();
let (versioned, versioned_expired_timeout) =
let (versioned, versioned_expired_timeout, failed_inserts) =
self.filter_pull_responses(crds, timeouts, response, now, &mut stats);
self.process_pull_responses(
crds,
from,
versioned,
versioned_expired_timeout,
failed_inserts,
now,
&mut stats,
);
@ -553,10 +591,12 @@ impl CrdsGossipPull {
#[cfg(test)]
mod test {
use super::*;
use crate::cluster_info::MAX_BLOOM_SIZE;
use crate::contact_info::ContactInfo;
use crate::crds_value::{CrdsData, Vote};
use itertools::Itertools;
use rand::{thread_rng, RngCore};
use rand::thread_rng;
use rayon::ThreadPoolBuilder;
use solana_perf::test_tx::test_tx;
use solana_sdk::hash::{hash, HASH_BYTES};
use solana_sdk::packet::PACKET_DATA_SIZE;
@ -579,9 +619,7 @@ mod test {
}
let mut rng = thread_rng();
for _ in 0..100 {
let mut buf = [0u8; HASH_BYTES];
rng.fill(&mut buf);
let hash = Hash::new(&buf);
let hash = solana_sdk::hash::new_rand(&mut rng);
assert_eq!(CrdsFilter::hash_as_u64(&hash), hash_as_u64_bitops(&hash));
}
}
@ -593,9 +631,7 @@ mod test {
assert_eq!(filter.mask, mask);
let mut rng = thread_rng();
for _ in 0..10 {
let mut buf = [0u8; HASH_BYTES];
rng.fill(&mut buf);
let hash = Hash::new(&buf);
let hash = solana_sdk::hash::new_rand(&mut rng);
assert!(filter.test_mask(&hash));
}
}
@ -606,13 +642,13 @@ mod test {
let mut stakes = HashMap::new();
let node = CrdsGossipPull::default();
let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
crds.insert(me.clone(), 0).unwrap();
for i in 1..=30 {
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
let id = entry.label().pubkey();
@ -639,25 +675,25 @@ mod test {
let gossip = socketaddr!("127.0.0.1:1234");
let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
id: solana_sdk::pubkey::new_rand(),
shred_version: 123,
gossip,
..ContactInfo::default()
}));
let spy = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
id: solana_sdk::pubkey::new_rand(),
shred_version: 0,
gossip,
..ContactInfo::default()
}));
let node_123 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
id: solana_sdk::pubkey::new_rand(),
shred_version: 123,
gossip,
..ContactInfo::default()
}));
let node_456 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
id: solana_sdk::pubkey::new_rand(),
shred_version: 456,
gossip,
..ContactInfo::default()
@ -698,12 +734,12 @@ mod test {
let gossip = socketaddr!("127.0.0.1:1234");
let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
id: solana_sdk::pubkey::new_rand(),
gossip,
..ContactInfo::default()
}));
let node_123 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
id: solana_sdk::pubkey::new_rand(),
gossip,
..ContactInfo::default()
}));
@ -724,7 +760,7 @@ mod test {
assert!(options.is_empty());
// Unknown pubkey in gossip_validators -- will pull from nobody
gossip_validators.insert(Pubkey::new_rand());
gossip_validators.insert(solana_sdk::pubkey::new_rand());
let options = node.pull_options(
&crds,
&me.label().pubkey(),
@ -750,29 +786,32 @@ mod test {
}
#[test]
fn test_crds_filter_set_get() {
let mut crds_filter_set =
CrdsFilterSet::new(/*num_items=*/ 9672788, /*max_bytes=*/ 8196);
assert_eq!(crds_filter_set.0.len(), 1024);
fn test_crds_filter_set_add() {
let mut rng = thread_rng();
for _ in 0..100 {
let mut bytes = [0u8; HASH_BYTES];
rng.fill_bytes(&mut bytes);
let hash_value = Hash::new(&bytes);
let filter = crds_filter_set.get(&hash_value).unwrap().clone();
assert!(filter.test_mask(&hash_value));
// Validate that the returned filter is the *unique* filter which
// corresponds to the hash value (i.e. test_mask returns true).
let crds_filter_set =
CrdsFilterSet::new(/*num_items=*/ 9672788, /*max_bytes=*/ 8196);
let hash_values: Vec<_> = std::iter::repeat_with(|| solana_sdk::hash::new_rand(&mut rng))
.take(1024)
.collect();
for hash_value in &hash_values {
crds_filter_set.add(*hash_value);
}
let filters: Vec<CrdsFilter> = crds_filter_set.into();
assert_eq!(filters.len(), 1024);
for hash_value in hash_values {
let mut num_hits = 0;
for f in &crds_filter_set.0 {
if *f == filter {
let mut false_positives = 0;
for filter in &filters {
if filter.test_mask(&hash_value) {
num_hits += 1;
assert!(f.test_mask(&hash_value));
} else {
assert!(!f.test_mask(&hash_value));
assert!(filter.contains(&hash_value));
assert!(filter.filter.contains(&hash_value));
} else if filter.filter.contains(&hash_value) {
false_positives += 1;
}
}
assert_eq!(num_hits, 1);
assert!(false_positives < 5);
}
}
@ -780,7 +819,8 @@ mod test {
fn test_crds_filter_set_new() {
// Validates invariances required by CrdsFilterSet::get in the
// vector of filters generated by CrdsFilterSet::new.
let filters = CrdsFilterSet::new(/*num_items=*/ 55345017, /*max_bytes=*/ 4098).0;
let filters: Vec<CrdsFilter> =
CrdsFilterSet::new(/*num_items=*/ 55345017, /*max_bytes=*/ 4098).into();
assert_eq!(filters.len(), 16384);
let mask_bits = filters[0].mask_bits;
let right_shift = 64 - mask_bits;
@ -793,32 +833,113 @@ mod test {
}
}
#[test]
fn test_build_crds_filter() {
let mut rng = thread_rng();
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut crds_gossip_pull = CrdsGossipPull::default();
let mut crds = Crds::default();
for _ in 0..10_000 {
crds_gossip_pull
.purged_values
.push_back((solana_sdk::hash::new_rand(&mut rng), rng.gen()));
}
let mut num_inserts = 0;
for _ in 0..20_000 {
if crds
.insert(CrdsValue::new_rand(&mut rng), rng.gen())
.is_ok()
{
num_inserts += 1;
}
}
assert_eq!(num_inserts, 20_000);
let filters = crds_gossip_pull.build_crds_filters(&thread_pool, &crds, MAX_BLOOM_SIZE);
assert_eq!(filters.len(), 32);
let hash_values: Vec<_> = crds
.table
.values()
.map(|v| v.value_hash)
.chain(
crds_gossip_pull
.purged_values
.iter()
.map(|(value_hash, _)| value_hash)
.cloned(),
)
.collect();
assert_eq!(hash_values.len(), 10_000 + 20_000);
let mut false_positives = 0;
for hash_value in hash_values {
let mut num_hits = 0;
for filter in &filters {
if filter.test_mask(&hash_value) {
num_hits += 1;
assert!(filter.contains(&hash_value));
assert!(filter.filter.contains(&hash_value));
} else if filter.filter.contains(&hash_value) {
false_positives += 1;
}
}
assert_eq!(num_hits, 1);
}
assert!(false_positives < 50_000, "fp: {}", false_positives);
}
#[test]
fn test_new_pull_request() {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut crds = Crds::default();
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
let id = entry.label().pubkey();
let node = CrdsGossipPull::default();
assert_eq!(
node.new_pull_request(&crds, &id, 0, 0, None, &HashMap::new(), PACKET_DATA_SIZE),
node.new_pull_request(
&thread_pool,
&crds,
&id,
0,
0,
None,
&HashMap::new(),
PACKET_DATA_SIZE
),
Err(CrdsGossipError::NoPeers)
);
crds.insert(entry.clone(), 0).unwrap();
assert_eq!(
node.new_pull_request(&crds, &id, 0, 0, None, &HashMap::new(), PACKET_DATA_SIZE),
node.new_pull_request(
&thread_pool,
&crds,
&id,
0,
0,
None,
&HashMap::new(),
PACKET_DATA_SIZE
),
Err(CrdsGossipError::NoPeers)
);
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
crds.insert(new.clone(), 0).unwrap();
let req = node.new_pull_request(&crds, &id, 0, 0, None, &HashMap::new(), PACKET_DATA_SIZE);
let req = node.new_pull_request(
&thread_pool,
&crds,
&id,
0,
0,
None,
&HashMap::new(),
PACKET_DATA_SIZE,
);
let (to, _, self_info) = req.unwrap();
assert_eq!(to, new.label().pubkey());
assert_eq!(self_info, entry);
@ -826,21 +947,22 @@ mod test {
#[test]
fn test_new_mark_creation_time() {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut crds = Crds::default();
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
let node_pubkey = entry.label().pubkey();
let mut node = CrdsGossipPull::default();
crds.insert(entry.clone(), 0).unwrap();
let old = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
crds.insert(old.clone(), 0).unwrap();
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
crds.insert(new.clone(), 0).unwrap();
@ -851,6 +973,7 @@ mod test {
// odds of getting the other request should be 1 in u64::max_value()
for _ in 0..10 {
let req = node.new_pull_request(
&thread_pool,
&crds,
&node_pubkey,
0,
@ -867,20 +990,22 @@ mod test {
#[test]
fn test_generate_pull_responses() {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut node_crds = Crds::default();
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
let node_pubkey = entry.label().pubkey();
let node = CrdsGossipPull::default();
node_crds.insert(entry, 0).unwrap();
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
node_crds.insert(new, 0).unwrap();
let req = node.new_pull_request(
&thread_pool,
&node_crds,
&node_pubkey,
0,
@ -899,7 +1024,7 @@ mod test {
assert_eq!(rsp[0].len(), 0);
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS,
)));
dest_crds
@ -915,7 +1040,7 @@ mod test {
filters.push(filters[0].clone());
//should return new value since caller is new
filters[1].0 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS + 1,
)));
@ -928,20 +1053,22 @@ mod test {
#[test]
fn test_process_pull_request() {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut node_crds = Crds::default();
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
let node_pubkey = entry.label().pubkey();
let node = CrdsGossipPull::default();
node_crds.insert(entry, 0).unwrap();
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
node_crds.insert(new, 0).unwrap();
let req = node.new_pull_request(
&thread_pool,
&node_crds,
&node_pubkey,
0,
@ -956,7 +1083,11 @@ mod test {
let (_, filters, caller) = req.unwrap();
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
let rsp = dest.generate_pull_responses(&dest_crds, &filters, 0);
dest.process_pull_requests(&mut dest_crds, filters, 1);
dest.process_pull_requests(
&mut dest_crds,
filters.into_iter().map(|(caller, _)| caller),
1,
);
assert!(rsp.iter().all(|rsp| rsp.is_empty()));
assert!(dest_crds.lookup(&caller.label()).is_some());
assert_eq!(
@ -976,9 +1107,10 @@ mod test {
}
#[test]
fn test_process_pull_request_response() {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut node_crds = Crds::default();
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
1,
)));
let node_pubkey = entry.label().pubkey();
@ -986,14 +1118,14 @@ mod test {
node_crds.insert(entry, 0).unwrap();
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
1,
)));
node_crds.insert(new, 0).unwrap();
let mut dest = CrdsGossipPull::default();
let mut dest_crds = Crds::default();
let new_id = Pubkey::new_rand();
let new_id = solana_sdk::pubkey::new_rand();
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&new_id, 1,
)));
@ -1017,6 +1149,7 @@ mod test {
for _ in 0..30 {
// there is a chance of a false positive with bloom filters
let req = node.new_pull_request(
&thread_pool,
&node_crds,
&node_pubkey,
0,
@ -1028,7 +1161,11 @@ mod test {
let (_, filters, caller) = req.unwrap();
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
let mut rsp = dest.generate_pull_responses(&dest_crds, &filters, 0);
dest.process_pull_requests(&mut dest_crds, filters, 0);
dest.process_pull_requests(
&mut dest_crds,
filters.into_iter().map(|(caller, _)| caller),
0,
);
// if there is a false positive this is empty
// prob should be around 0.1 per iteration
if rsp.is_empty() {
@ -1071,9 +1208,10 @@ mod test {
}
#[test]
fn test_gossip_purge() {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut node_crds = Crds::default();
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
let node_label = entry.label();
@ -1081,7 +1219,7 @@ mod test {
let mut node = CrdsGossipPull::default();
node_crds.insert(entry, 0).unwrap();
let old = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
node_crds.insert(old.clone(), 0).unwrap();
@ -1092,7 +1230,7 @@ mod test {
// purge
let timeouts = node.make_timeouts_def(&node_pubkey, &HashMap::new(), 0, 1);
node.purge_active(&mut node_crds, 2, &timeouts);
node.purge_active(&thread_pool, &mut node_crds, 2, &timeouts);
//verify self is still valid after purge
assert_eq!(node_crds.lookup(&node_label).unwrap().label(), node_label);
@ -1103,7 +1241,7 @@ mod test {
// there is a chance of a false positive with bloom filters
// assert that purged value is still in the set
// chance of 30 consecutive false positives is 0.1^30
let filters = node.build_crds_filters(&node_crds, PACKET_DATA_SIZE);
let filters = node.build_crds_filters(&thread_pool, &node_crds, PACKET_DATA_SIZE);
assert!(filters.iter().any(|filter| filter.contains(&value_hash)));
}
@ -1147,7 +1285,7 @@ mod test {
}
#[test]
fn test_crds_filter_complete_set_add_mask() {
let mut filters = CrdsFilterSet::new(1000, 10).0;
let mut filters: Vec<CrdsFilter> = CrdsFilterSet::new(1000, 10).into();
assert!(filters.iter().all(|f| f.mask_bits > 0));
let mut h: Hash = Hash::default();
// rev to make the hash::default() miss on the first few test_masks
@ -1193,7 +1331,7 @@ mod test {
let mut node_crds = Crds::default();
let mut node = CrdsGossipPull::default();
let peer_pubkey = Pubkey::new_rand();
let peer_pubkey = solana_sdk::pubkey::new_rand();
let peer_entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(
ContactInfo::new_localhost(&peer_pubkey, 0),
));

View File

@ -19,7 +19,7 @@ use crate::{
use bincode::serialized_size;
use indexmap::map::IndexMap;
use itertools::Itertools;
use rand::{self, seq::SliceRandom, thread_rng, RngCore};
use rand::{seq::SliceRandom, Rng};
use solana_runtime::bloom::Bloom;
use solana_sdk::{hash::Hash, packet::PACKET_DATA_SIZE, pubkey::Pubkey, timing::timestamp};
use std::{
@ -35,7 +35,12 @@ pub const CRDS_GOSSIP_PUSH_FANOUT: usize = 6;
pub const CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS: u64 = 30000;
pub const CRDS_GOSSIP_PRUNE_MSG_TIMEOUT_MS: u64 = 500;
pub const CRDS_GOSSIP_PRUNE_STAKE_THRESHOLD_PCT: f64 = 0.15;
pub const CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES: usize = 2;
pub const CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES: usize = 3;
// Do not push to peers which have not been updated for this long.
const PUSH_ACTIVE_TIMEOUT_MS: u64 = 60_000;
// 10 minutes
const MAX_PUSHED_TO_TIMEOUT_MS: u64 = 10 * 60 * 1000;
#[derive(Clone)]
pub struct CrdsGossipPush {
@ -50,6 +55,8 @@ pub struct CrdsGossipPush {
/// This cache represents a lagging view of which validators
/// currently have this node in their `active_set`
received_cache: HashMap<Pubkey, HashMap<Pubkey, (bool, u64)>>,
last_pushed_to: HashMap<Pubkey, u64>,
last_pushed_to_cleanup_ts: u64,
pub num_active: usize,
pub push_fanout: usize,
pub msg_timeout: u64,
@ -67,6 +74,8 @@ impl Default for CrdsGossipPush {
active_set: IndexMap::new(),
push_messages: HashMap::new(),
received_cache: HashMap::new(),
last_pushed_to: HashMap::new(),
last_pushed_to_cleanup_ts: 0,
num_active: CRDS_GOSSIP_NUM_ACTIVE,
push_fanout: CRDS_GOSSIP_PUSH_FANOUT,
msg_timeout: CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS,
@ -119,7 +128,7 @@ impl CrdsGossipPush {
.collect();
let mut seed = [0; 32];
seed[0..8].copy_from_slice(&thread_rng().next_u64().to_le_bytes());
rand::thread_rng().fill(&mut seed[..]);
let shuffle = weighted_shuffle(
staked_peers.iter().map(|(_, stake)| *stake).collect_vec(),
seed,
@ -127,8 +136,12 @@ impl CrdsGossipPush {
let mut keep = HashSet::new();
let mut peer_stake_sum = 0;
keep.insert(*origin);
for next in shuffle {
let (next_peer, next_stake) = staked_peers[next];
if next_peer == *origin {
continue;
}
keep.insert(next_peer);
peer_stake_sum += next_stake;
if peer_stake_sum >= prune_stake_threshold
@ -209,62 +222,77 @@ impl CrdsGossipPush {
/// The list of push messages is created such that all the randomly selected peers have not
/// pruned the source addresses.
pub fn new_push_messages(&mut self, crds: &Crds, now: u64) -> HashMap<Pubkey, Vec<CrdsValue>> {
let mut total_bytes: usize = 0;
let mut values = vec![];
let mut push_messages: HashMap<Pubkey, Vec<CrdsValue>> = HashMap::new();
trace!("new_push_messages {}", self.push_messages.len());
for (label, hash) in &self.push_messages {
let res = crds.lookup_versioned(label);
if res.is_none() {
continue;
}
let version = res.unwrap();
if version.value_hash != *hash {
continue;
}
let value = &version.value;
if value.wallclock() > now || value.wallclock() + self.msg_timeout < now {
continue;
}
total_bytes += serialized_size(value).unwrap() as usize;
if total_bytes > self.max_bytes {
break;
}
values.push(value.clone());
let push_fanout = self.push_fanout.min(self.active_set.len());
if push_fanout == 0 {
return HashMap::default();
}
trace!(
"new_push_messages {} {}",
values.len(),
self.active_set.len()
);
for v in values {
let mut num_pushes = 0;
let mut num_values = 0;
let mut total_bytes: usize = 0;
let mut labels = vec![];
let mut push_messages: HashMap<Pubkey, Vec<CrdsValue>> = HashMap::new();
let cutoff = now.saturating_sub(self.msg_timeout);
let lookup = |label, &hash| -> Option<&CrdsValue> {
let value = crds.lookup_versioned(label)?;
if value.value_hash != hash || value.value.wallclock() < cutoff {
None
} else {
Some(&value.value)
}
};
let mut push_value = |origin: Pubkey, value: &CrdsValue| {
//use a consistent index for the same origin so
//the active set learns the MST for that origin
let start = v.label().pubkey().as_ref()[0] as usize;
let max = self.push_fanout.min(self.active_set.len());
for i in start..(start + max) {
let ix = i % self.active_set.len();
if let Some((p, filter)) = self.active_set.get_index(ix) {
if !filter.contains(&v.label().pubkey()) {
trace!("new_push_messages insert {} {:?}", *p, v);
push_messages.entry(*p).or_default().push(v.clone());
self.num_pushes += 1;
}
let start = origin.as_ref()[0] as usize;
for i in start..(start + push_fanout) {
let index = i % self.active_set.len();
let (peer, filter) = self.active_set.get_index(index).unwrap();
if !filter.contains(&origin) {
trace!("new_push_messages insert {} {:?}", *peer, value);
push_messages.entry(*peer).or_default().push(value.clone());
num_pushes += 1;
}
self.push_messages.remove(&v.label());
}
};
for (label, hash) in &self.push_messages {
match lookup(label, hash) {
None => labels.push(label.clone()),
Some(value) if value.wallclock() > now => continue,
Some(value) => {
total_bytes += serialized_size(value).unwrap() as usize;
if total_bytes > self.max_bytes {
break;
}
num_values += 1;
labels.push(label.clone());
push_value(label.pubkey(), value);
}
}
}
self.num_pushes += num_pushes;
trace!("new_push_messages {} {}", num_values, self.active_set.len());
for label in labels {
self.push_messages.remove(&label);
}
for target_pubkey in push_messages.keys() {
*self.last_pushed_to.entry(*target_pubkey).or_insert(0) = now;
}
if now - self.last_pushed_to_cleanup_ts > MAX_PUSHED_TO_TIMEOUT_MS {
self.last_pushed_to
.retain(|_id, timestamp| now - *timestamp > MAX_PUSHED_TO_TIMEOUT_MS);
self.last_pushed_to_cleanup_ts = now;
}
push_messages
}
/// add the `from` to the peer's filter of nodes
pub fn process_prune_msg(&mut self, self_pubkey: &Pubkey, peer: &Pubkey, origins: &[Pubkey]) {
for origin in origins {
if origin == self_pubkey {
continue;
}
if let Some(p) = self.active_set.get_mut(peer) {
p.add(origin)
if let Some(peer) = self.active_set.get_mut(peer) {
for origin in origins {
if origin != self_pubkey {
peer.add(origin);
}
}
}
}
@ -286,6 +314,7 @@ impl CrdsGossipPush {
network_size: usize,
ratio: usize,
) {
let mut rng = rand::thread_rng();
let need = Self::compute_need(self.num_active, self.active_set.len(), ratio);
let mut new_items = HashMap::new();
@ -301,7 +330,7 @@ impl CrdsGossipPush {
}
let mut seed = [0; 32];
seed[0..8].copy_from_slice(&thread_rng().next_u64().to_le_bytes());
rng.fill(&mut seed[..]);
let mut shuffle = weighted_shuffle(
options.iter().map(|weighted| weighted.0).collect_vec(),
seed,
@ -327,7 +356,7 @@ impl CrdsGossipPush {
}
}
let mut keys: Vec<Pubkey> = self.active_set.keys().cloned().collect();
keys.shuffle(&mut rand::thread_rng());
keys.shuffle(&mut rng);
let num = keys.len() / ratio;
for k in &keys[..num] {
self.active_set.swap_remove(k);
@ -345,11 +374,26 @@ impl CrdsGossipPush {
stakes: &HashMap<Pubkey, u64>,
gossip_validators: Option<&HashSet<Pubkey>>,
) -> Vec<(f32, &'a ContactInfo)> {
let now = timestamp();
let mut rng = rand::thread_rng();
let max_weight = u16::MAX as f32 - 1.0;
let active_cutoff = now.saturating_sub(PUSH_ACTIVE_TIMEOUT_MS);
crds.table
.values()
.filter(|v| v.value.contact_info().is_some())
.map(|v| (v.value.contact_info().unwrap(), v))
.filter(|(info, _)| {
.filter_map(|value| {
let info = value.value.contact_info()?;
// Stop pushing to nodes which have not been active recently.
if value.local_timestamp < active_cutoff {
// In order to mitigate eclipse attack, for staked nodes
// continue retrying periodically.
let stake = stakes.get(&info.id).unwrap_or(&0);
if *stake == 0 || rng.gen_ratio(7, 8) {
return None;
}
}
Some(info)
})
.filter(|info| {
info.id != *self_id
&& ContactInfo::is_valid_address(&info.gossip)
&& self_shred_version == info.shred_version
@ -357,10 +401,9 @@ impl CrdsGossipPush {
gossip_validators.contains(&info.id)
})
})
.map(|(info, value)| {
let max_weight = f32::from(u16::max_value()) - 1.0;
let last_updated: u64 = value.local_timestamp;
let since = ((timestamp() - last_updated) / 1024) as u32;
.map(|info| {
let last_pushed_to: u64 = *self.last_pushed_to.get(&info.id).unwrap_or(&0);
let since = (now.saturating_sub(last_pushed_to) / 1024) as u32;
let stake = get_stake(&info.id, stakes);
let weight = get_weight(max_weight, since, stake);
(weight, info)
@ -370,34 +413,19 @@ impl CrdsGossipPush {
/// purge old pending push messages
pub fn purge_old_pending_push_messages(&mut self, crds: &Crds, min_time: u64) {
let old_msgs: Vec<CrdsValueLabel> = self
.push_messages
.iter()
.filter_map(|(k, hash)| {
if let Some(versioned) = crds.lookup_versioned(k) {
if versioned.value.wallclock() < min_time || versioned.value_hash != *hash {
Some(k)
} else {
None
}
} else {
Some(k)
}
})
.cloned()
.collect();
for k in old_msgs {
self.push_messages.remove(&k);
}
self.push_messages.retain(|k, hash| {
matches!(crds.lookup_versioned(k), Some(versioned) if
versioned.value.wallclock() >= min_time
&& versioned.value_hash == *hash)
});
}
/// purge received push message cache
pub fn purge_old_received_cache(&mut self, min_time: u64) {
self.received_cache
.iter_mut()
.for_each(|v| v.1.retain(|_, v| v.1 > min_time));
self.received_cache.retain(|_, v| !v.is_empty());
self.received_cache.retain(|_, v| {
v.retain(|_, (_, t)| *t > min_time);
!v.is_empty()
});
}
}
@ -413,15 +441,15 @@ mod test {
let mut push = CrdsGossipPush::default();
let mut stakes = HashMap::new();
let self_id = Pubkey::new_rand();
let origin = Pubkey::new_rand();
let self_id = solana_sdk::pubkey::new_rand();
let origin = solana_sdk::pubkey::new_rand();
stakes.insert(self_id, 100);
stakes.insert(origin, 100);
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&origin, 0,
)));
let low_staked_peers = (0..10).map(|_| Pubkey::new_rand());
let low_staked_peers = (0..10).map(|_| solana_sdk::pubkey::new_rand());
let mut low_staked_set = HashSet::new();
low_staked_peers.for_each(|p| {
let _ = push.process_push_message(&mut crds, &p, value.clone(), 0);
@ -435,7 +463,7 @@ mod test {
"should not prune if min threshold has not been reached"
);
let high_staked_peer = Pubkey::new_rand();
let high_staked_peer = solana_sdk::pubkey::new_rand();
let high_stake = CrdsGossipPush::prune_stake_threshold(100, 100) + 10;
stakes.insert(high_staked_peer, high_stake);
let _ = push.process_push_message(&mut crds, &high_staked_peer, value, 0);
@ -458,7 +486,7 @@ mod test {
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
let label = value.label();
@ -479,7 +507,7 @@ mod test {
fn test_process_push_old_version() {
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
let mut ci = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
ci.wallclock = 1;
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
@ -502,7 +530,7 @@ mod test {
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let timeout = push.msg_timeout;
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
let mut ci = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
// push a version to far in the future
ci.wallclock = timeout + 1;
@ -524,7 +552,7 @@ mod test {
fn test_process_push_update() {
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
let mut ci = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
ci.wallclock = 0;
let value_old = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
@ -555,23 +583,24 @@ mod test {
#[test]
fn test_refresh_active_set() {
solana_logger::setup();
let now = timestamp();
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let value1 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
assert_eq!(crds.insert(value1.clone(), 0), Ok(None));
assert_eq!(crds.insert(value1.clone(), now), Ok(None));
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
assert!(push.active_set.get(&value1.label().pubkey()).is_some());
let value2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
assert!(push.active_set.get(&value2.label().pubkey()).is_none());
assert_eq!(crds.insert(value2.clone(), 0), Ok(None));
assert_eq!(crds.insert(value2.clone(), now), Ok(None));
for _ in 0..30 {
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
if push.active_set.get(&value2.label().pubkey()).is_some() {
@ -582,27 +611,29 @@ mod test {
for _ in 0..push.num_active {
let value2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(
ContactInfo::new_localhost(&Pubkey::new_rand(), 0),
ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0),
));
assert_eq!(crds.insert(value2.clone(), 0), Ok(None));
assert_eq!(crds.insert(value2.clone(), now), Ok(None));
}
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
assert_eq!(push.active_set.len(), push.num_active);
}
#[test]
fn test_active_set_refresh_with_bank() {
solana_logger::setup();
let time = timestamp() - 1024; //make sure there's at least a 1 second delay
let mut crds = Crds::default();
let push = CrdsGossipPush::default();
let mut push = CrdsGossipPush::default();
let mut stakes = HashMap::new();
for i in 1..=100 {
let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
time,
)));
let id = peer.label().pubkey();
crds.insert(peer.clone(), time).unwrap();
stakes.insert(id, i * 100);
push.last_pushed_to.insert(id, time);
}
let mut options = push.push_options(&crds, &Pubkey::default(), 0, &stakes, None);
assert!(!options.is_empty());
@ -616,6 +647,7 @@ mod test {
#[test]
fn test_no_pushes_to_from_different_shred_versions() {
let now = timestamp();
let mut crds = Crds::default();
let stakes = HashMap::new();
let node = CrdsGossipPush::default();
@ -623,34 +655,34 @@ mod test {
let gossip = socketaddr!("127.0.0.1:1234");
let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
id: solana_sdk::pubkey::new_rand(),
shred_version: 123,
gossip,
..ContactInfo::default()
}));
let spy = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
id: solana_sdk::pubkey::new_rand(),
shred_version: 0,
gossip,
..ContactInfo::default()
}));
let node_123 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
id: solana_sdk::pubkey::new_rand(),
shred_version: 123,
gossip,
..ContactInfo::default()
}));
let node_456 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
id: solana_sdk::pubkey::new_rand(),
shred_version: 456,
gossip,
..ContactInfo::default()
}));
crds.insert(me.clone(), 0).unwrap();
crds.insert(spy.clone(), 0).unwrap();
crds.insert(node_123.clone(), 0).unwrap();
crds.insert(node_456, 0).unwrap();
crds.insert(me.clone(), now).unwrap();
crds.insert(spy.clone(), now).unwrap();
crds.insert(node_123.clone(), now).unwrap();
crds.insert(node_456, now).unwrap();
// shred version 123 should ignore nodes with versions 0 and 456
let options = node
@ -673,24 +705,25 @@ mod test {
#[test]
fn test_pushes_only_to_allowed() {
let now = timestamp();
let mut crds = Crds::default();
let stakes = HashMap::new();
let node = CrdsGossipPush::default();
let gossip = socketaddr!("127.0.0.1:1234");
let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
id: solana_sdk::pubkey::new_rand(),
gossip,
..ContactInfo::default()
}));
let node_123 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
id: solana_sdk::pubkey::new_rand(),
gossip,
..ContactInfo::default()
}));
crds.insert(me.clone(), 0).unwrap();
crds.insert(node_123.clone(), 0).unwrap();
crds.insert(node_123.clone(), now).unwrap();
// Unknown pubkey in gossip_validators -- will push to nobody
let mut gossip_validators = HashSet::new();
@ -705,7 +738,7 @@ mod test {
assert!(options.is_empty());
// Unknown pubkey in gossip_validators -- will push to nobody
gossip_validators.insert(Pubkey::new_rand());
gossip_validators.insert(solana_sdk::pubkey::new_rand());
let options = node.push_options(
&crds,
&me.label().pubkey(),
@ -731,17 +764,18 @@ mod test {
#[test]
fn test_new_push_messages() {
let now = timestamp();
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
assert_eq!(crds.insert(peer.clone(), 0), Ok(None));
assert_eq!(crds.insert(peer.clone(), now), Ok(None));
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
let mut expected = HashMap::new();
@ -755,24 +789,25 @@ mod test {
}
#[test]
fn test_personalized_push_messages() {
let now = timestamp();
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let peer_1 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
assert_eq!(crds.insert(peer_1.clone(), 0), Ok(None));
assert_eq!(crds.insert(peer_1.clone(), now), Ok(None));
let peer_2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
assert_eq!(crds.insert(peer_2.clone(), 0), Ok(None));
assert_eq!(crds.insert(peer_2.clone(), now), Ok(None));
let peer_3 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
&solana_sdk::pubkey::new_rand(),
now,
)));
assert_eq!(
push.process_push_message(&mut crds, &Pubkey::default(), peer_3.clone(), 0),
push.process_push_message(&mut crds, &Pubkey::default(), peer_3.clone(), now),
Ok(None)
);
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
@ -786,22 +821,22 @@ mod test {
expected.insert(peer_1.pubkey(), vec![new_msg.clone()]);
expected.insert(peer_2.pubkey(), vec![new_msg]);
assert_eq!(push.active_set.len(), 3);
assert_eq!(push.new_push_messages(&crds, 0), expected);
assert_eq!(push.new_push_messages(&crds, now), expected);
}
#[test]
fn test_process_prune() {
let mut crds = Crds::default();
let self_id = Pubkey::new_rand();
let self_id = solana_sdk::pubkey::new_rand();
let mut push = CrdsGossipPush::default();
let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
assert_eq!(crds.insert(peer.clone(), 0), Ok(None));
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
let expected = HashMap::new();
@ -821,13 +856,13 @@ mod test {
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
assert_eq!(crds.insert(peer, 0), Ok(None));
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
let mut ci = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
ci.wallclock = 1;
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci));
let expected = HashMap::new();
@ -843,7 +878,7 @@ mod test {
fn test_purge_old_received_cache() {
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
let mut ci = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
ci.wallclock = 0;
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci));
let label = value.label();

234
core/src/crds_shards.rs Normal file
View File

@ -0,0 +1,234 @@
use crate::crds::VersionedCrdsValue;
use crate::crds_gossip_pull::CrdsFilter;
use indexmap::map::IndexMap;
use std::cmp::Ordering;
use std::ops::{Index, IndexMut};
#[derive(Clone)]
pub struct CrdsShards {
// shards[k] includes crds values which the first shard_bits of their hash
// value is equal to k. Each shard is a mapping from crds values indices to
// their hash value.
shards: Vec<IndexMap<usize, u64>>,
shard_bits: u32,
}
impl CrdsShards {
pub fn new(shard_bits: u32) -> Self {
CrdsShards {
shards: vec![IndexMap::new(); 1 << shard_bits],
shard_bits,
}
}
#[must_use]
pub fn insert(&mut self, index: usize, value: &VersionedCrdsValue) -> bool {
let hash = CrdsFilter::hash_as_u64(&value.value_hash);
self.shard_mut(hash).insert(index, hash).is_none()
}
#[must_use]
pub fn remove(&mut self, index: usize, value: &VersionedCrdsValue) -> bool {
let hash = CrdsFilter::hash_as_u64(&value.value_hash);
self.shard_mut(hash).swap_remove(&index).is_some()
}
/// Returns indices of all crds values which the first 'mask_bits' of their
/// hash value is equal to 'mask'.
pub fn find(&self, mask: u64, mask_bits: u32) -> impl Iterator<Item = usize> + '_ {
let ones = (!0u64).checked_shr(mask_bits).unwrap_or(0);
let mask = mask | ones;
match self.shard_bits.cmp(&mask_bits) {
Ordering::Less => {
let pred = move |(&index, hash)| {
if hash | ones == mask {
Some(index)
} else {
None
}
};
Iter::Less(self.shard(mask).iter().filter_map(pred))
}
Ordering::Equal => Iter::Equal(self.shard(mask).keys().cloned()),
Ordering::Greater => {
let count = 1 << (self.shard_bits - mask_bits);
let end = self.shard_index(mask) + 1;
Iter::Greater(
self.shards[end - count..end]
.iter()
.flat_map(IndexMap::keys)
.cloned(),
)
}
}
}
#[inline]
fn shard_index(&self, hash: u64) -> usize {
hash.checked_shr(64 - self.shard_bits).unwrap_or(0) as usize
}
#[inline]
fn shard(&self, hash: u64) -> &IndexMap<usize, u64> {
let shard_index = self.shard_index(hash);
self.shards.index(shard_index)
}
#[inline]
fn shard_mut(&mut self, hash: u64) -> &mut IndexMap<usize, u64> {
let shard_index = self.shard_index(hash);
self.shards.index_mut(shard_index)
}
// Checks invariants in the shards tables against the crds table.
#[cfg(test)]
pub fn check(&self, crds: &[VersionedCrdsValue]) {
let mut indices: Vec<_> = self
.shards
.iter()
.flat_map(IndexMap::keys)
.cloned()
.collect();
indices.sort_unstable();
assert_eq!(indices, (0..crds.len()).collect::<Vec<_>>());
for (shard_index, shard) in self.shards.iter().enumerate() {
for (&index, &hash) in shard {
assert_eq!(hash, CrdsFilter::hash_as_u64(&crds[index].value_hash));
assert_eq!(
shard_index as u64,
hash.checked_shr(64 - self.shard_bits).unwrap_or(0)
);
}
}
}
}
// Wrapper for 3 types of iterators we get when comparing shard_bits and
// mask_bits in find method. This is to avoid Box<dyn Iterator<Item =...>>
// which involves dynamic dispatch and is relatively slow.
enum Iter<R, S, T> {
Less(R),
Equal(S),
Greater(T),
}
impl<R, S, T> Iterator for Iter<R, S, T>
where
R: Iterator<Item = usize>,
S: Iterator<Item = usize>,
T: Iterator<Item = usize>,
{
type Item = usize;
fn next(&mut self) -> Option<Self::Item> {
match self {
Self::Greater(iter) => iter.next(),
Self::Less(iter) => iter.next(),
Self::Equal(iter) => iter.next(),
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::contact_info::ContactInfo;
use crate::crds_value::{CrdsData, CrdsValue};
use rand::{thread_rng, Rng};
use solana_sdk::timing::timestamp;
use std::collections::HashSet;
use std::ops::Index;
fn new_test_crds_value() -> VersionedCrdsValue {
let data = CrdsData::ContactInfo(ContactInfo::new_localhost(
&solana_sdk::pubkey::new_rand(),
timestamp(),
));
VersionedCrdsValue::new(timestamp(), CrdsValue::new_unsigned(data))
}
// Returns true if the first mask_bits most significant bits of hash is the
// same as the given bit mask.
fn check_mask(value: &VersionedCrdsValue, mask: u64, mask_bits: u32) -> bool {
let hash = CrdsFilter::hash_as_u64(&value.value_hash);
let ones = (!0u64).checked_shr(mask_bits).unwrap_or(0u64);
(hash | ones) == (mask | ones)
}
// Manual filtering by scanning all the values.
fn filter_crds_values(
values: &[VersionedCrdsValue],
mask: u64,
mask_bits: u32,
) -> HashSet<usize> {
values
.iter()
.enumerate()
.filter_map(|(index, value)| {
if check_mask(value, mask, mask_bits) {
Some(index)
} else {
None
}
})
.collect()
}
#[test]
fn test_crds_shards_round_trip() {
let mut rng = thread_rng();
// Generate some random hash and crds value labels.
let mut values: Vec<_> = std::iter::repeat_with(new_test_crds_value)
.take(4096)
.collect();
// Insert everything into the crds shards.
let mut shards = CrdsShards::new(5);
for (index, value) in values.iter().enumerate() {
assert!(shards.insert(index, value));
}
shards.check(&values);
// Remove some of the values.
for _ in 0..512 {
let index = rng.gen_range(0, values.len());
let value = values.swap_remove(index);
assert!(shards.remove(index, &value));
if index < values.len() {
let value = values.index(index);
assert!(shards.remove(values.len(), value));
assert!(shards.insert(index, value));
}
shards.check(&values);
}
// Random masks.
for _ in 0..10 {
let mask = rng.gen();
for mask_bits in 0..12 {
let mut set = filter_crds_values(&values, mask, mask_bits);
for index in shards.find(mask, mask_bits) {
assert!(set.remove(&index));
}
assert!(set.is_empty());
}
}
// Existing hash values.
for (index, value) in values.iter().enumerate() {
let mask = CrdsFilter::hash_as_u64(&value.value_hash);
let hits: Vec<_> = shards.find(mask, 64).collect();
assert_eq!(hits, vec![index]);
}
// Remove everything.
while !values.is_empty() {
let index = rng.gen_range(0, values.len());
let value = values.swap_remove(index);
assert!(shards.remove(index, &value));
if index < values.len() {
let value = values.index(index);
assert!(shards.remove(values.len(), value));
assert!(shards.insert(index, value));
}
if index % 5 == 0 {
shards.check(&values);
}
}
}
}

View File

@ -75,6 +75,7 @@ pub enum CrdsData {
SnapshotHashes(SnapshotHash),
AccountsHashes(SnapshotHash),
EpochSlots(EpochSlotsIndex, EpochSlots),
LegacyVersion(LegacyVersion),
Version(Version),
}
@ -102,6 +103,7 @@ impl Sanitize for CrdsData {
}
val.sanitize()
}
CrdsData::LegacyVersion(version) => version.sanitize(),
CrdsData::Version(version) => version.sanitize(),
}
}
@ -208,6 +210,23 @@ impl Vote {
}
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, AbiExample)]
pub struct LegacyVersion {
pub from: Pubkey,
pub wallclock: u64,
pub version: solana_version::LegacyVersion,
}
impl Sanitize for LegacyVersion {
fn sanitize(&self) -> Result<(), SanitizeError> {
if self.wallclock >= MAX_WALLCLOCK {
return Err(SanitizeError::ValueOutOfBounds);
}
self.from.sanitize()?;
self.version.sanitize()
}
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, AbiExample)]
pub struct Version {
pub from: Pubkey,
@ -245,6 +264,7 @@ pub enum CrdsValueLabel {
SnapshotHashes(Pubkey),
EpochSlots(EpochSlotsIndex, Pubkey),
AccountsHashes(Pubkey),
LegacyVersion(Pubkey),
Version(Pubkey),
}
@ -257,6 +277,7 @@ impl fmt::Display for CrdsValueLabel {
CrdsValueLabel::SnapshotHashes(_) => write!(f, "SnapshotHash({})", self.pubkey()),
CrdsValueLabel::EpochSlots(ix, _) => write!(f, "EpochSlots({}, {})", ix, self.pubkey()),
CrdsValueLabel::AccountsHashes(_) => write!(f, "AccountsHashes({})", self.pubkey()),
CrdsValueLabel::LegacyVersion(_) => write!(f, "LegacyVersion({})", self.pubkey()),
CrdsValueLabel::Version(_) => write!(f, "Version({})", self.pubkey()),
}
}
@ -271,6 +292,7 @@ impl CrdsValueLabel {
CrdsValueLabel::SnapshotHashes(p) => *p,
CrdsValueLabel::EpochSlots(_, p) => *p,
CrdsValueLabel::AccountsHashes(p) => *p,
CrdsValueLabel::LegacyVersion(p) => *p,
CrdsValueLabel::Version(p) => *p,
}
}
@ -289,6 +311,17 @@ impl CrdsValue {
value.sign(keypair);
value
}
/// New random crds value for tests and benchmarks.
pub fn new_rand<R: ?Sized>(rng: &mut R) -> CrdsValue
where
R: rand::Rng,
{
let now = rng.gen();
let contact_info = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), now);
Self::new_signed(CrdsData::ContactInfo(contact_info), &Keypair::new())
}
/// Totally unsecure unverifiable wallclock of the node that generated this message
/// Latest wallclock is always picked.
/// This is used to time out push messages.
@ -300,6 +333,7 @@ impl CrdsValue {
CrdsData::SnapshotHashes(hash) => hash.wallclock,
CrdsData::AccountsHashes(hash) => hash.wallclock,
CrdsData::EpochSlots(_, p) => p.wallclock,
CrdsData::LegacyVersion(version) => version.wallclock,
CrdsData::Version(version) => version.wallclock,
}
}
@ -311,6 +345,7 @@ impl CrdsValue {
CrdsData::SnapshotHashes(hash) => hash.from,
CrdsData::AccountsHashes(hash) => hash.from,
CrdsData::EpochSlots(_, p) => p.from,
CrdsData::LegacyVersion(version) => version.from,
CrdsData::Version(version) => version.from,
}
}
@ -322,6 +357,7 @@ impl CrdsValue {
CrdsData::SnapshotHashes(_) => CrdsValueLabel::SnapshotHashes(self.pubkey()),
CrdsData::AccountsHashes(_) => CrdsValueLabel::AccountsHashes(self.pubkey()),
CrdsData::EpochSlots(ix, _) => CrdsValueLabel::EpochSlots(*ix, self.pubkey()),
CrdsData::LegacyVersion(_) => CrdsValueLabel::LegacyVersion(self.pubkey()),
CrdsData::Version(_) => CrdsValueLabel::Version(self.pubkey()),
}
}
@ -373,6 +409,13 @@ impl CrdsValue {
}
}
pub fn legacy_version(&self) -> Option<&LegacyVersion> {
match &self.data {
CrdsData::LegacyVersion(legacy_version) => Some(legacy_version),
_ => None,
}
}
pub fn version(&self) -> Option<&Version> {
match &self.data {
CrdsData::Version(version) => Some(version),
@ -387,6 +430,7 @@ impl CrdsValue {
CrdsValueLabel::LowestSlot(*key),
CrdsValueLabel::SnapshotHashes(*key),
CrdsValueLabel::AccountsHashes(*key),
CrdsValueLabel::LegacyVersion(*key),
CrdsValueLabel::Version(*key),
];
labels.extend((0..MAX_VOTES).map(|ix| CrdsValueLabel::Vote(ix, *key)));
@ -438,7 +482,7 @@ mod test {
#[test]
fn test_labels() {
let mut hits = [false; 5 + MAX_VOTES as usize + MAX_EPOCH_SLOTS as usize];
let mut hits = [false; 6 + MAX_VOTES as usize + MAX_EPOCH_SLOTS as usize];
// this method should cover all the possible labels
for v in &CrdsValue::record_labels(&Pubkey::default()) {
match v {
@ -446,10 +490,11 @@ mod test {
CrdsValueLabel::LowestSlot(_) => hits[1] = true,
CrdsValueLabel::SnapshotHashes(_) => hits[2] = true,
CrdsValueLabel::AccountsHashes(_) => hits[3] = true,
CrdsValueLabel::Version(_) => hits[4] = true,
CrdsValueLabel::Vote(ix, _) => hits[*ix as usize + 5] = true,
CrdsValueLabel::LegacyVersion(_) => hits[4] = true,
CrdsValueLabel::Version(_) => hits[5] = true,
CrdsValueLabel::Vote(ix, _) => hits[*ix as usize + 6] = true,
CrdsValueLabel::EpochSlots(ix, _) => {
hits[*ix as usize + MAX_VOTES as usize + 5] = true
hits[*ix as usize + MAX_VOTES as usize + 6] = true
}
}
}

112
core/src/data_budget.rs Normal file
View File

@ -0,0 +1,112 @@
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
#[derive(Default)]
pub struct DataBudget {
// Amount of bytes we have in the budget to send.
bytes: AtomicUsize,
// Last time that we upped the bytes count, used
// to detect when to up the bytes budget again
last_timestamp_ms: AtomicU64,
}
impl DataBudget {
// If there are enough bytes in the budget, consumes from
// the budget and returns true. Otherwise returns false.
#[must_use]
pub fn take(&self, size: usize) -> bool {
let mut budget = self.bytes.load(Ordering::Acquire);
loop {
if budget < size {
return false;
}
match self.bytes.compare_exchange_weak(
budget,
budget - size,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => return true,
Err(bytes) => budget = bytes,
}
}
}
// Updates timestamp and returns true, if at least given milliseconds
// has passed since last update. Otherwise returns false.
fn can_update(&self, duration_millis: u64) -> bool {
let now = solana_sdk::timing::timestamp();
let mut last_timestamp = self.last_timestamp_ms.load(Ordering::Acquire);
loop {
if now < last_timestamp + duration_millis {
return false;
}
match self.last_timestamp_ms.compare_exchange_weak(
last_timestamp,
now,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => return true,
Err(ts) => last_timestamp = ts,
}
}
}
// Updates the budget if at least given milliseconds has passed since last
// update. Updater function maps current value of bytes to the new one.
pub fn update<F>(&self, duration_millis: u64, updater: F)
where
F: Fn(usize) -> usize,
{
if !self.can_update(duration_millis) {
return;
}
let mut bytes = self.bytes.load(Ordering::Acquire);
loop {
match self.bytes.compare_exchange_weak(
bytes,
updater(bytes),
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => break,
Err(b) => bytes = b,
}
}
}
// Non-atomic clone only for tests and simulations.
pub fn clone_non_atomic(&self) -> Self {
Self {
bytes: AtomicUsize::new(self.bytes.load(Ordering::Acquire)),
last_timestamp_ms: AtomicU64::new(self.last_timestamp_ms.load(Ordering::Acquire)),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::time::Duration;
#[test]
fn test_data_budget() {
let budget = DataBudget::default();
assert!(!budget.take(1)); // budget = 0.
budget.update(1000, |bytes| bytes + 5); // budget updates to 5.
assert!(budget.take(1));
assert!(budget.take(2));
assert!(!budget.take(3)); // budget = 2, out of budget.
budget.update(30, |_| 10); // no update, budget = 2.
assert!(!budget.take(3)); // budget = 2, out of budget.
std::thread::sleep(Duration::from_millis(50));
budget.update(30, |bytes| bytes * 2); // budget updates to 4.
assert!(budget.take(3));
assert!(budget.take(1));
assert!(!budget.take(1)); // budget = 0.
}
}

View File

@ -306,8 +306,8 @@ mod tests {
#[test]
fn test_gossip_services_spy() {
let keypair = Keypair::new();
let peer0 = Pubkey::new_rand();
let peer1 = Pubkey::new_rand();
let peer0 = solana_sdk::pubkey::new_rand();
let peer1 = solana_sdk::pubkey::new_rand();
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
let peer0_info = ContactInfo::new_localhost(&peer0, 0);
let peer1_info = ContactInfo::new_localhost(&peer1, 0);
@ -335,7 +335,7 @@ mod tests {
spy_ref.clone(),
None,
Some(0),
Some(Pubkey::new_rand()),
Some(solana_sdk::pubkey::new_rand()),
None,
);
assert_eq!(met_criteria, false);
@ -349,7 +349,7 @@ mod tests {
spy_ref.clone(),
Some(1),
Some(0),
Some(Pubkey::new_rand()),
Some(solana_sdk::pubkey::new_rand()),
None,
);
assert_eq!(met_criteria, false);

View File

@ -187,6 +187,7 @@ impl HeaviestSubtreeForkChoice {
.expect("new root must exist in fork_infos map")
.parent = None;
self.root = new_root;
self.last_root_time = Instant::now();
}
pub fn add_root_parent(&mut self, root_parent: Slot) {

View File

@ -30,7 +30,9 @@ pub mod crds_gossip;
pub mod crds_gossip_error;
pub mod crds_gossip_pull;
pub mod crds_gossip_push;
pub mod crds_shards;
pub mod crds_value;
pub mod data_budget;
pub mod epoch_slots;
pub mod fetch_stage;
pub mod fork_choice;
@ -41,6 +43,8 @@ pub mod ledger_cleanup_service;
pub mod local_vote_signer_service;
pub mod non_circulating_supply;
pub mod optimistic_confirmation_verifier;
pub mod optimistically_confirmed_bank_tracker;
pub mod ping_pong;
pub mod poh_recorder;
pub mod poh_service;
pub mod progress_map;
@ -54,7 +58,6 @@ mod result;
pub mod retransmit_stage;
pub mod rewards_recorder_service;
pub mod rpc;
pub mod rpc_error;
pub mod rpc_health;
pub mod rpc_pubsub;
pub mod rpc_pubsub_service;

View File

@ -79,6 +79,26 @@ solana_sdk::pubkeys!(
"GumSE5HsMV5HCwBTv2D2D81yy9x17aDkvobkqAfTRgmo",
"AzVV9ZZDxTgW4wWfJmsG6ytaHpQGSe1yz76Nyy84VbQF",
"8CUUMKYNGxdgYio5CLHRHyzMEhhVRMcqefgE6dLqnVRK",
"CQDYc4ET2mbFhVpgj41gXahL6Exn5ZoPcGAzSHuYxwmE",
"5PLJZLJiRR9vf7d1JCCg7UuWjtyN9nkab9uok6TqSyuP",
"7xJ9CLtEAcEShw9kW2gSoZkRWL566Dg12cvgzANJwbTr",
"BuCEvc9ze8UoAQwwsQLy8d447C8sA4zeVtVpc6m5wQeS",
"8ndGYFjav6NDXvzYcxs449Aub3AxYv4vYpk89zRDwgj7",
"8W58E8JVJjH1jCy5CeHJQgvwFXTyAVyesuXRZGbcSUGG",
"GNiz4Mq886bTNDT3pijGsu2gbw6it7sqrwncro45USeB",
"GhsotwFMH6XUrRLJCxcx62h7748N2Uq8mf87hUGkmPhg",
"Fgyh8EeYGZtbW8sS33YmNQnzx54WXPrJ5KWNPkCfWPot",
"8UVjvYyoqP6sqcctTso3xpCdCfgTMiv3VRh7vraC2eJk",
"BhvLngiqqKeZ8rpxch2uGjeCiC88zzewoWPRuoxpp1aS",
"63DtkW7zuARcd185EmHAkfF44bDcC2SiTSEj2spLP3iA",
"GvpCiTgq9dmEeojCDBivoLoZqc4AkbUDACpqPMwYLWKh",
"7Y8smnoUrYKGGuDq2uaFKVxJYhojgg7DVixHyAtGTYEV",
"DUS1KxwUhUyDKB4A81E8vdnTe3hSahd92Abtn9CXsEcj",
"F9MWFw8cnYVwsRq8Am1PGfFL3cQUZV37mbGoxZftzLjN",
"8vqrX3H2BYLaXVintse3gorPEM4TgTwTFZNN1Fm9TdYs",
"CUageMFi49kzoDqtdU8NvQ4Bq3sbtJygjKDAXJ45nmAi",
"5smrYwb1Hr2T8XMnvsqccTgXxuqQs14iuE8RbHFYf2Cf",
"xQadXQiUTCCFhfHjvQx1hyJK6KVWr1w2fD6DT3cdwj7",
]
);
@ -115,7 +135,7 @@ mod tests {
let num_genesis_accounts = 10;
for _ in 0..num_genesis_accounts {
accounts.insert(
Pubkey::new_rand(),
solana_sdk::pubkey::new_rand(),
Account::new(balance, 0, &Pubkey::default()),
);
}
@ -127,7 +147,7 @@ mod tests {
let num_stake_accounts = 3;
for _ in 0..num_stake_accounts {
let pubkey = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
let meta = Meta {
authorized: Authorized::auto(&pubkey),
lockup: Lockup {

View File

@ -20,7 +20,7 @@ impl OptimisticConfirmationVerifier {
}
// Returns any optimistic slots that were not rooted
pub fn get_unrooted_optimistic_slots(
pub fn verify_for_unrooted_optimistic_slots(
&mut self,
root_bank: &Bank,
blockstore: &Blockstore,
@ -34,8 +34,8 @@ impl OptimisticConfirmationVerifier {
std::mem::swap(&mut slots_before_root, &mut self.unchecked_slots);
slots_before_root
.into_iter()
.filter(|(optimistic_slot, hash)| {
(*optimistic_slot == root && *hash != root_bank.hash())
.filter(|(optimistic_slot, optimistic_hash)| {
(*optimistic_slot == root && *optimistic_hash != root_bank.hash())
|| (!root_ancestors.contains_key(&optimistic_slot) &&
// In this second part of the `and`, we account for the possibility that
// there was some other root `rootX` set in BankForks where:
@ -76,6 +76,10 @@ impl OptimisticConfirmationVerifier {
self.last_optimistic_slot_ts = Instant::now();
}
pub fn format_optimistic_confirmd_slot_violation_log(slot: Slot) -> String {
format!("Optimistically confirmed slot {} was not rooted", slot)
}
pub fn log_unrooted_optimistic_slots(
root_bank: &Bank,
vote_tracker: &VoteTracker,
@ -96,7 +100,7 @@ impl OptimisticConfirmationVerifier {
.unwrap_or(0);
error!(
"Optimistic slot {} was not rooted,
"{},
hash: {},
epoch: {},
voted keys: {:?},
@ -105,7 +109,7 @@ impl OptimisticConfirmationVerifier {
voted stake: {},
total epoch stake: {},
pct: {}",
optimistic_slot,
Self::format_optimistic_confirmd_slot_violation_log(*optimistic_slot),
hash,
epoch,
r_slot_tracker
@ -181,7 +185,8 @@ mod test {
.cloned()
.unwrap();
assert_eq!(
optimistic_confirmation_verifier.get_unrooted_optimistic_slots(&bank1, &blockstore),
optimistic_confirmation_verifier
.verify_for_unrooted_optimistic_slots(&bank1, &blockstore),
vec![(1, bad_bank_hash)]
);
assert_eq!(optimistic_confirmation_verifier.unchecked_slots.len(), 1);
@ -228,7 +233,7 @@ mod test {
.cloned()
.unwrap();
assert!(optimistic_confirmation_verifier
.get_unrooted_optimistic_slots(&bank5, &blockstore)
.verify_for_unrooted_optimistic_slots(&bank5, &blockstore)
.is_empty());
// 5 is >= than all the unchecked slots, so should clear everything
assert!(optimistic_confirmation_verifier.unchecked_slots.is_empty());
@ -244,7 +249,7 @@ mod test {
.cloned()
.unwrap();
assert!(optimistic_confirmation_verifier
.get_unrooted_optimistic_slots(&bank3, &blockstore)
.verify_for_unrooted_optimistic_slots(&bank3, &blockstore)
.is_empty());
// 3 is bigger than only slot 1, so slot 5 should be left over
assert_eq!(optimistic_confirmation_verifier.unchecked_slots.len(), 1);
@ -264,7 +269,8 @@ mod test {
.cloned()
.unwrap();
assert_eq!(
optimistic_confirmation_verifier.get_unrooted_optimistic_slots(&bank4, &blockstore),
optimistic_confirmation_verifier
.verify_for_unrooted_optimistic_slots(&bank4, &blockstore),
vec![optimistic_slots[1]]
);
// 4 is bigger than only slots 1 and 3, so slot 5 should be left over
@ -303,7 +309,8 @@ mod test {
optimistic_confirmation_verifier
.add_new_optimistic_confirmed_slots(optimistic_slots.clone());
assert_eq!(
optimistic_confirmation_verifier.get_unrooted_optimistic_slots(&bank7, &blockstore),
optimistic_confirmation_verifier
.verify_for_unrooted_optimistic_slots(&bank7, &blockstore),
optimistic_slots[0..=1].to_vec()
);
assert!(optimistic_confirmation_verifier.unchecked_slots.is_empty());
@ -312,7 +319,7 @@ mod test {
blockstore.set_roots(&[1, 3]).unwrap();
optimistic_confirmation_verifier.add_new_optimistic_confirmed_slots(optimistic_slots);
assert!(optimistic_confirmation_verifier
.get_unrooted_optimistic_slots(&bank7, &blockstore)
.verify_for_unrooted_optimistic_slots(&bank7, &blockstore)
.is_empty());
assert!(optimistic_confirmation_verifier.unchecked_slots.is_empty());
}

View File

@ -0,0 +1,294 @@
//! The `optimistically_confirmed_bank_tracker` module implements a threaded service to track the
//! most recent optimistically confirmed bank for use in rpc services, and triggers gossip
//! subscription notifications
use crate::rpc_subscriptions::RpcSubscriptions;
use crossbeam_channel::{Receiver, RecvTimeoutError, Sender};
use solana_runtime::{bank::Bank, bank_forks::BankForks};
use solana_sdk::clock::Slot;
use std::{
collections::HashSet,
sync::{
atomic::{AtomicBool, Ordering},
Arc, RwLock,
},
thread::{self, Builder, JoinHandle},
time::Duration,
};
pub struct OptimisticallyConfirmedBank {
pub bank: Arc<Bank>,
}
impl OptimisticallyConfirmedBank {
pub fn locked_from_bank_forks_root(bank_forks: &Arc<RwLock<BankForks>>) -> Arc<RwLock<Self>> {
Arc::new(RwLock::new(Self {
bank: bank_forks.read().unwrap().root_bank().clone(),
}))
}
}
pub enum BankNotification {
OptimisticallyConfirmed(Slot),
Frozen(Arc<Bank>),
Root(Arc<Bank>),
}
impl std::fmt::Debug for BankNotification {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
BankNotification::OptimisticallyConfirmed(slot) => {
write!(f, "OptimisticallyConfirmed({:?})", slot)
}
BankNotification::Frozen(bank) => write!(f, "Frozen({})", bank.slot()),
BankNotification::Root(bank) => write!(f, "Root({})", bank.slot()),
}
}
}
pub type BankNotificationReceiver = Receiver<BankNotification>;
pub type BankNotificationSender = Sender<BankNotification>;
pub struct OptimisticallyConfirmedBankTracker {
thread_hdl: JoinHandle<()>,
}
impl OptimisticallyConfirmedBankTracker {
pub fn new(
receiver: BankNotificationReceiver,
exit: &Arc<AtomicBool>,
bank_forks: Arc<RwLock<BankForks>>,
optimistically_confirmed_bank: Arc<RwLock<OptimisticallyConfirmedBank>>,
subscriptions: Arc<RpcSubscriptions>,
) -> Self {
let exit_ = exit.clone();
let mut pending_optimistically_confirmed_banks = HashSet::new();
let thread_hdl = Builder::new()
.name("solana-optimistic-bank-tracker".to_string())
.spawn(move || loop {
if exit_.load(Ordering::Relaxed) {
break;
}
if let Err(RecvTimeoutError::Disconnected) = Self::recv_notification(
&receiver,
&bank_forks,
&optimistically_confirmed_bank,
&subscriptions,
&mut pending_optimistically_confirmed_banks,
) {
break;
}
})
.unwrap();
Self { thread_hdl }
}
fn recv_notification(
receiver: &Receiver<BankNotification>,
bank_forks: &Arc<RwLock<BankForks>>,
optimistically_confirmed_bank: &Arc<RwLock<OptimisticallyConfirmedBank>>,
subscriptions: &Arc<RpcSubscriptions>,
mut pending_optimistically_confirmed_banks: &mut HashSet<Slot>,
) -> Result<(), RecvTimeoutError> {
let notification = receiver.recv_timeout(Duration::from_secs(1))?;
Self::process_notification(
notification,
bank_forks,
optimistically_confirmed_bank,
subscriptions,
&mut pending_optimistically_confirmed_banks,
);
Ok(())
}
pub(crate) fn process_notification(
notification: BankNotification,
bank_forks: &Arc<RwLock<BankForks>>,
optimistically_confirmed_bank: &Arc<RwLock<OptimisticallyConfirmedBank>>,
subscriptions: &Arc<RpcSubscriptions>,
pending_optimistically_confirmed_banks: &mut HashSet<Slot>,
) {
debug!("received bank notification: {:?}", notification);
match notification {
BankNotification::OptimisticallyConfirmed(slot) => {
if let Some(bank) = bank_forks
.read()
.unwrap()
.get(slot)
.filter(|b| b.is_frozen())
{
let mut w_optimistically_confirmed_bank =
optimistically_confirmed_bank.write().unwrap();
if bank.slot() > w_optimistically_confirmed_bank.bank.slot() {
w_optimistically_confirmed_bank.bank = bank.clone();
subscriptions.notify_gossip_subscribers(slot);
}
drop(w_optimistically_confirmed_bank);
} else if slot > bank_forks.read().unwrap().root_bank().slot() {
pending_optimistically_confirmed_banks.insert(slot);
}
}
BankNotification::Frozen(bank) => {
let frozen_slot = bank.slot();
if pending_optimistically_confirmed_banks.remove(&bank.slot()) {
let mut w_optimistically_confirmed_bank =
optimistically_confirmed_bank.write().unwrap();
if frozen_slot > w_optimistically_confirmed_bank.bank.slot() {
w_optimistically_confirmed_bank.bank = bank;
subscriptions.notify_gossip_subscribers(frozen_slot);
}
drop(w_optimistically_confirmed_bank);
}
}
BankNotification::Root(bank) => {
let root_slot = bank.slot();
let mut w_optimistically_confirmed_bank =
optimistically_confirmed_bank.write().unwrap();
if root_slot > w_optimistically_confirmed_bank.bank.slot() {
w_optimistically_confirmed_bank.bank = bank;
}
drop(w_optimistically_confirmed_bank);
pending_optimistically_confirmed_banks.retain(|&s| s > root_slot);
}
}
}
pub fn close(self) -> thread::Result<()> {
self.join()
}
pub fn join(self) -> thread::Result<()> {
self.thread_hdl.join()
}
}
#[cfg(test)]
mod tests {
use super::*;
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_runtime::commitment::BlockCommitmentCache;
use solana_sdk::pubkey::Pubkey;
#[test]
fn test_process_notification() {
let exit = Arc::new(AtomicBool::new(false));
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(100);
let bank = Bank::new(&genesis_config);
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone();
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.write().unwrap().insert(bank1);
let bank1 = bank_forks.read().unwrap().get(1).unwrap().clone();
let bank2 = Bank::new_from_parent(&bank1, &Pubkey::default(), 2);
bank_forks.write().unwrap().insert(bank2);
let bank2 = bank_forks.read().unwrap().get(2).unwrap().clone();
let bank3 = Bank::new_from_parent(&bank2, &Pubkey::default(), 3);
bank_forks.write().unwrap().insert(bank3);
let optimistically_confirmed_bank =
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks);
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
bank_forks.clone(),
block_commitment_cache,
optimistically_confirmed_bank.clone(),
));
let mut pending_optimistically_confirmed_banks = HashSet::new();
assert_eq!(optimistically_confirmed_bank.read().unwrap().bank.slot(), 0);
OptimisticallyConfirmedBankTracker::process_notification(
BankNotification::OptimisticallyConfirmed(2),
&bank_forks,
&optimistically_confirmed_bank,
&subscriptions,
&mut pending_optimistically_confirmed_banks,
);
assert_eq!(optimistically_confirmed_bank.read().unwrap().bank.slot(), 2);
// Test max optimistically confirmed bank remains in the cache
OptimisticallyConfirmedBankTracker::process_notification(
BankNotification::OptimisticallyConfirmed(1),
&bank_forks,
&optimistically_confirmed_bank,
&subscriptions,
&mut pending_optimistically_confirmed_banks,
);
assert_eq!(optimistically_confirmed_bank.read().unwrap().bank.slot(), 2);
// Test bank will only be cached when frozen
OptimisticallyConfirmedBankTracker::process_notification(
BankNotification::OptimisticallyConfirmed(3),
&bank_forks,
&optimistically_confirmed_bank,
&subscriptions,
&mut pending_optimistically_confirmed_banks,
);
assert_eq!(optimistically_confirmed_bank.read().unwrap().bank.slot(), 2);
assert_eq!(pending_optimistically_confirmed_banks.len(), 1);
assert_eq!(pending_optimistically_confirmed_banks.contains(&3), true);
// Test bank will only be cached when frozen
let bank3 = bank_forks.read().unwrap().get(3).unwrap().clone();
OptimisticallyConfirmedBankTracker::process_notification(
BankNotification::Frozen(bank3),
&bank_forks,
&optimistically_confirmed_bank,
&subscriptions,
&mut pending_optimistically_confirmed_banks,
);
assert_eq!(optimistically_confirmed_bank.read().unwrap().bank.slot(), 3);
// Test higher root will be cached and clear pending_optimistically_confirmed_banks
let bank3 = bank_forks.read().unwrap().get(3).unwrap().clone();
let bank4 = Bank::new_from_parent(&bank3, &Pubkey::default(), 4);
bank_forks.write().unwrap().insert(bank4);
OptimisticallyConfirmedBankTracker::process_notification(
BankNotification::OptimisticallyConfirmed(4),
&bank_forks,
&optimistically_confirmed_bank,
&subscriptions,
&mut pending_optimistically_confirmed_banks,
);
assert_eq!(optimistically_confirmed_bank.read().unwrap().bank.slot(), 3);
assert_eq!(pending_optimistically_confirmed_banks.len(), 1);
assert_eq!(pending_optimistically_confirmed_banks.contains(&4), true);
let bank4 = bank_forks.read().unwrap().get(4).unwrap().clone();
let bank5 = Bank::new_from_parent(&bank4, &Pubkey::default(), 5);
bank_forks.write().unwrap().insert(bank5);
let bank5 = bank_forks.read().unwrap().get(5).unwrap().clone();
OptimisticallyConfirmedBankTracker::process_notification(
BankNotification::Root(bank5),
&bank_forks,
&optimistically_confirmed_bank,
&subscriptions,
&mut pending_optimistically_confirmed_banks,
);
assert_eq!(optimistically_confirmed_bank.read().unwrap().bank.slot(), 5);
assert_eq!(pending_optimistically_confirmed_banks.len(), 0);
assert_eq!(pending_optimistically_confirmed_banks.contains(&4), false);
// Banks <= root do not get added to pending list, even if not frozen
let bank5 = bank_forks.read().unwrap().get(5).unwrap().clone();
let bank6 = Bank::new_from_parent(&bank5, &Pubkey::default(), 6);
bank_forks.write().unwrap().insert(bank6);
let bank5 = bank_forks.read().unwrap().get(5).unwrap().clone();
let bank7 = Bank::new_from_parent(&bank5, &Pubkey::default(), 7);
bank_forks.write().unwrap().insert(bank7);
bank_forks.write().unwrap().set_root(7, &None, None);
OptimisticallyConfirmedBankTracker::process_notification(
BankNotification::OptimisticallyConfirmed(6),
&bank_forks,
&optimistically_confirmed_bank,
&subscriptions,
&mut pending_optimistically_confirmed_banks,
);
assert_eq!(optimistically_confirmed_bank.read().unwrap().bank.slot(), 5);
assert_eq!(pending_optimistically_confirmed_banks.len(), 0);
assert_eq!(pending_optimistically_confirmed_banks.contains(&6), false);
}
}

400
core/src/ping_pong.rs Normal file
View File

@ -0,0 +1,400 @@
use bincode::{serialize, Error};
use lru::LruCache;
use rand::{AsByteSliceMut, CryptoRng, Rng};
use serde::Serialize;
use solana_sdk::hash::{self, Hash};
use solana_sdk::pubkey::Pubkey;
use solana_sdk::sanitize::{Sanitize, SanitizeError};
use solana_sdk::signature::{Keypair, Signable, Signature, Signer};
use std::borrow::Cow;
use std::net::SocketAddr;
use std::time::{Duration, Instant};
#[derive(AbiExample, Debug, Deserialize, Serialize)]
pub struct Ping<T> {
from: Pubkey,
token: T,
signature: Signature,
}
#[derive(AbiExample, Debug, Deserialize, Serialize)]
pub struct Pong {
from: Pubkey,
hash: Hash, // Hash of received ping token.
signature: Signature,
}
/// Maintains records of remote nodes which have returned a valid response to a
/// ping message, and on-the-fly ping messages pending a pong response from the
/// remote node.
pub struct PingCache {
// Time-to-live of received pong messages.
ttl: Duration,
// Timestamp of last ping message sent to a remote node.
// Used to rate limit pings to remote nodes.
pings: LruCache<(Pubkey, SocketAddr), Instant>,
// Verified pong responses from remote nodes.
pongs: LruCache<(Pubkey, SocketAddr), Instant>,
// Hash of ping tokens sent out to remote nodes,
// pending a pong response back.
pending_cache: LruCache<Hash, (Pubkey, SocketAddr)>,
}
impl<T: Serialize> Ping<T> {
pub fn new(token: T, keypair: &Keypair) -> Result<Self, Error> {
let signature = keypair.sign_message(&serialize(&token)?);
let ping = Ping {
from: keypair.pubkey(),
token,
signature,
};
Ok(ping)
}
}
impl<T> Ping<T>
where
T: Serialize + AsByteSliceMut + Default,
{
pub fn new_rand<R>(rng: &mut R, keypair: &Keypair) -> Result<Self, Error>
where
R: Rng + CryptoRng,
{
let mut token = T::default();
rng.fill(&mut token);
Ping::new(token, keypair)
}
}
impl<T> Sanitize for Ping<T> {
fn sanitize(&self) -> Result<(), SanitizeError> {
self.from.sanitize()?;
// TODO Add self.token.sanitize()?; when rust's
// specialization feature becomes stable.
self.signature.sanitize()
}
}
impl<T: Serialize> Signable for Ping<T> {
fn pubkey(&self) -> Pubkey {
self.from
}
fn signable_data(&self) -> Cow<[u8]> {
Cow::Owned(serialize(&self.token).unwrap())
}
fn get_signature(&self) -> Signature {
self.signature
}
fn set_signature(&mut self, signature: Signature) {
self.signature = signature;
}
}
impl Pong {
pub fn new<T: Serialize>(ping: &Ping<T>, keypair: &Keypair) -> Result<Self, Error> {
let hash = hash::hash(&serialize(&ping.token)?);
let pong = Pong {
from: keypair.pubkey(),
hash,
signature: keypair.sign_message(hash.as_ref()),
};
Ok(pong)
}
}
impl Sanitize for Pong {
fn sanitize(&self) -> Result<(), SanitizeError> {
self.from.sanitize()?;
self.hash.sanitize()?;
self.signature.sanitize()
}
}
impl Signable for Pong {
fn pubkey(&self) -> Pubkey {
self.from
}
fn signable_data(&self) -> Cow<[u8]> {
Cow::Owned(self.hash.as_ref().into())
}
fn get_signature(&self) -> Signature {
self.signature
}
fn set_signature(&mut self, signature: Signature) {
self.signature = signature;
}
}
impl PingCache {
pub fn new(ttl: Duration, cap: usize) -> Self {
Self {
ttl,
pings: LruCache::new(cap),
pongs: LruCache::new(cap),
pending_cache: LruCache::new(cap),
}
}
/// Checks if the pong hash, pubkey and socket match a ping message sent
/// out previously. If so records current timestamp for the remote node and
/// returns true.
/// Note: Does not verify the signature.
pub fn add(&mut self, pong: &Pong, socket: SocketAddr, now: Instant) -> bool {
let node = (pong.pubkey(), socket);
match self.pending_cache.peek(&pong.hash) {
Some(value) if *value == node => {
self.pings.pop(&node);
self.pongs.put(node, now);
self.pending_cache.pop(&pong.hash);
true
}
_ => false,
}
}
/// Checks if the remote node has been pinged recently. If not, calls the
/// given function to generates a new ping message, records current
/// timestamp and hash of ping token, and returns the ping message.
fn maybe_ping<T, F>(
&mut self,
now: Instant,
node: (Pubkey, SocketAddr),
mut pingf: F,
) -> Option<Ping<T>>
where
T: Serialize,
F: FnMut() -> Option<Ping<T>>,
{
// Rate limit consecutive pings sent to a remote node.
let delay = self.ttl / 64;
match self.pings.peek(&node) {
Some(t) if now.saturating_duration_since(*t) < delay => None,
_ => {
let ping = pingf()?;
let hash = hash::hash(&serialize(&ping.token).ok()?);
self.pings.put(node, now);
self.pending_cache.put(hash, node);
Some(ping)
}
}
}
/// Returns true if the remote node has responded to a ping message.
/// Removes expired pong messages. In order to extend verifications before
/// expiration, if the pong message is not too recent, and the node has not
/// been pinged recently, calls the given function to generates a new ping
/// message, records current timestamp and hash of ping token, and returns
/// the ping message.
/// Caller should verify if the socket address is valid. (e.g. by using
/// ContactInfo::is_valid_address).
pub fn check<T, F>(
&mut self,
now: Instant,
node: (Pubkey, SocketAddr),
pingf: F,
) -> (bool, Option<Ping<T>>)
where
T: Serialize,
F: FnMut() -> Option<Ping<T>>,
{
let (check, should_ping) = match self.pongs.get(&node) {
None => (false, true),
Some(t) => {
let age = now.saturating_duration_since(*t);
// Pop if the pong message has expired.
if age > self.ttl {
self.pongs.pop(&node);
}
// If the pong message is not too recent, generate a new ping
// message to extend remote node verification.
(true, age > self.ttl / 8)
}
};
let ping = if should_ping {
self.maybe_ping(now, node, pingf)
} else {
None
};
(check, ping)
}
// Only for tests and simulations.
pub(crate) fn mock_clone(&self) -> Self {
let mut clone = Self {
ttl: self.ttl,
pings: LruCache::new(self.pings.cap()),
pongs: LruCache::new(self.pongs.cap()),
pending_cache: LruCache::new(self.pending_cache.cap()),
};
for (k, v) in self.pongs.iter().rev() {
clone.pings.put(*k, *v);
}
for (k, v) in self.pongs.iter().rev() {
clone.pongs.put(*k, *v);
}
for (k, v) in self.pending_cache.iter().rev() {
clone.pending_cache.put(*k, *v);
}
clone
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashSet;
use std::iter::repeat_with;
use std::net::{Ipv4Addr, SocketAddrV4};
type Token = [u8; 32];
#[test]
fn test_ping_pong() {
let mut rng = rand::thread_rng();
let keypair = Keypair::new();
let ping = Ping::<Token>::new_rand(&mut rng, &keypair).unwrap();
assert!(ping.verify());
assert!(ping.sanitize().is_ok());
let pong = Pong::new(&ping, &keypair).unwrap();
assert!(pong.verify());
assert!(pong.sanitize().is_ok());
assert_eq!(hash::hash(&ping.token), pong.hash);
}
#[test]
fn test_ping_cache() {
let now = Instant::now();
let mut rng = rand::thread_rng();
let ttl = Duration::from_millis(256);
let mut cache = PingCache::new(ttl, /*cap=*/ 1000);
let this_node = Keypair::new();
let keypairs: Vec<_> = repeat_with(Keypair::new).take(8).collect();
let sockets: Vec<_> = repeat_with(|| {
SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::new(rng.gen(), rng.gen(), rng.gen(), rng.gen()),
rng.gen(),
))
})
.take(8)
.collect();
let remote_nodes: Vec<(&Keypair, SocketAddr)> = repeat_with(|| {
let keypair = &keypairs[rng.gen_range(0, keypairs.len())];
let socket = sockets[rng.gen_range(0, sockets.len())];
(keypair, socket)
})
.take(128)
.collect();
// Initially all checks should fail. The first observation of each node
// should create a ping packet.
let mut seen_nodes = HashSet::<(Pubkey, SocketAddr)>::new();
let pings: Vec<Option<Ping<Token>>> = remote_nodes
.iter()
.map(|(keypair, socket)| {
let node = (keypair.pubkey(), *socket);
let pingf = || Ping::<Token>::new_rand(&mut rng, &this_node).ok();
let (check, ping) = cache.check(now, node, pingf);
assert!(!check);
assert_eq!(seen_nodes.insert(node), ping.is_some());
ping
})
.collect();
let now = now + Duration::from_millis(1);
let panic_ping = || -> Option<Ping<Token>> { panic!("this should not happen!") };
for ((keypair, socket), ping) in remote_nodes.iter().zip(&pings) {
match ping {
None => {
// Already have a recent ping packets for nodes, so no new
// ping packet will be generated.
let node = (keypair.pubkey(), *socket);
let (check, ping) = cache.check(now, node, panic_ping);
assert!(check);
assert!(ping.is_none());
}
Some(ping) => {
let pong = Pong::new(ping, keypair).unwrap();
assert!(cache.add(&pong, *socket, now));
}
}
}
let now = now + Duration::from_millis(1);
// All nodes now have a recent pong packet.
for (keypair, socket) in &remote_nodes {
let node = (keypair.pubkey(), *socket);
let (check, ping) = cache.check(now, node, panic_ping);
assert!(check);
assert!(ping.is_none());
}
let now = now + ttl / 8;
// All nodes still have a valid pong packet, but the cache will create
// a new ping packet to extend verification.
seen_nodes.clear();
for (keypair, socket) in &remote_nodes {
let node = (keypair.pubkey(), *socket);
let pingf = || Ping::<Token>::new_rand(&mut rng, &this_node).ok();
let (check, ping) = cache.check(now, node, pingf);
assert!(check);
assert_eq!(seen_nodes.insert(node), ping.is_some());
}
let now = now + Duration::from_millis(1);
// All nodes still have a valid pong packet, and a very recent ping
// packet pending response. So no new ping packet will be created.
for (keypair, socket) in &remote_nodes {
let node = (keypair.pubkey(), *socket);
let (check, ping) = cache.check(now, node, panic_ping);
assert!(check);
assert!(ping.is_none());
}
let now = now + ttl;
// Pong packets are still valid but expired. The first observation of
// each node will remove the pong packet from cache and create a new
// ping packet.
seen_nodes.clear();
for (keypair, socket) in &remote_nodes {
let node = (keypair.pubkey(), *socket);
let pingf = || Ping::<Token>::new_rand(&mut rng, &this_node).ok();
let (check, ping) = cache.check(now, node, pingf);
if seen_nodes.insert(node) {
assert!(check);
assert!(ping.is_some());
} else {
assert!(!check);
assert!(ping.is_none());
}
}
let now = now + Duration::from_millis(1);
// No valid pong packet in the cache. A recent ping packet already
// created, so no new one will be created.
for (keypair, socket) in &remote_nodes {
let node = (keypair.pubkey(), *socket);
let (check, ping) = cache.check(now, node, panic_ping);
assert!(!check);
assert!(ping.is_none());
}
let now = now + ttl / 64;
// No valid pong packet in the cache. Another ping packet will be
// created for the first observation of each node.
seen_nodes.clear();
for (keypair, socket) in &remote_nodes {
let node = (keypair.pubkey(), *socket);
let pingf = || Ping::<Token>::new_rand(&mut rng, &this_node).ok();
let (check, ping) = cache.check(now, node, pingf);
assert!(!check);
assert_eq!(seen_nodes.insert(node), ping.is_some());
}
}
}

View File

@ -401,7 +401,7 @@ mod test {
fn test_add_vote_pubkey() {
let mut stats = PropagatedStats::default();
let mut all_pubkeys = PubkeyReferences::default();
let mut vote_pubkey = Pubkey::new_rand();
let mut vote_pubkey = solana_sdk::pubkey::new_rand();
all_pubkeys.get_or_insert(&vote_pubkey);
// Add a vote pubkey, the number of references in all_pubkeys
@ -420,7 +420,7 @@ mod test {
assert_eq!(stats.propagated_validators_stake, 1);
// Adding another pubkey should succeed
vote_pubkey = Pubkey::new_rand();
vote_pubkey = solana_sdk::pubkey::new_rand();
stats.add_vote_pubkey(&vote_pubkey, &mut all_pubkeys, 2);
assert!(stats.propagated_validators.contains(&vote_pubkey));
assert_eq!(stats.propagated_validators_stake, 3);
@ -434,7 +434,7 @@ mod test {
fn test_add_node_pubkey_internal() {
let num_vote_accounts = 10;
let staked_vote_accounts = 5;
let vote_account_pubkeys: Vec<_> = std::iter::repeat_with(Pubkey::new_rand)
let vote_account_pubkeys: Vec<_> = std::iter::repeat_with(solana_sdk::pubkey::new_rand)
.take(num_vote_accounts)
.collect();
let epoch_vote_accounts: HashMap<_, _> = vote_account_pubkeys
@ -445,7 +445,7 @@ mod test {
let mut stats = PropagatedStats::default();
let mut all_pubkeys = PubkeyReferences::default();
let mut node_pubkey = Pubkey::new_rand();
let mut node_pubkey = solana_sdk::pubkey::new_rand();
all_pubkeys.get_or_insert(&node_pubkey);
// Add a vote pubkey, the number of references in all_pubkeys
@ -481,7 +481,7 @@ mod test {
// Adding another pubkey with same vote accounts should succeed, but stake
// shouldn't increase
node_pubkey = Pubkey::new_rand();
node_pubkey = solana_sdk::pubkey::new_rand();
stats.add_node_pubkey_internal(
&node_pubkey,
&mut all_pubkeys,
@ -500,8 +500,8 @@ mod test {
// Adding another pubkey with different vote accounts should succeed
// and increase stake
node_pubkey = Pubkey::new_rand();
let vote_account_pubkeys: Vec<_> = std::iter::repeat_with(Pubkey::new_rand)
node_pubkey = solana_sdk::pubkey::new_rand();
let vote_account_pubkeys: Vec<_> = std::iter::repeat_with(solana_sdk::pubkey::new_rand)
.take(num_vote_accounts)
.collect();
let epoch_vote_accounts: HashMap<_, _> = vote_account_pubkeys

View File

@ -11,6 +11,7 @@ use crate::{
consensus::{ComputedBankState, Stake, SwitchForkDecision, Tower, VotedStakes},
fork_choice::{ForkChoice, SelectVoteAndResetForkResult},
heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice,
optimistically_confirmed_bank_tracker::{BankNotification, BankNotificationSender},
poh_recorder::{PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS},
progress_map::{ForkProgress, ProgressMap, PropagatedStats},
pubkey_references::PubkeyReferences,
@ -108,6 +109,7 @@ pub struct ReplayStageConfig {
pub transaction_status_sender: Option<TransactionStatusSender>,
pub rewards_recorder_sender: Option<RewardsRecorderSender>,
pub cache_block_time_sender: Option<CacheBlockTimeSender>,
pub bank_notification_sender: Option<BankNotificationSender>,
}
#[derive(Default)]
@ -238,6 +240,7 @@ impl ReplayStage {
transaction_status_sender,
rewards_recorder_sender,
cache_block_time_sender,
bank_notification_sender,
} = config;
trace!("replay stage");
@ -324,7 +327,6 @@ impl ReplayStage {
&bank_forks,
&leader_schedule_cache,
&subscriptions,
rewards_recorder_sender.clone(),
&mut progress,
&mut all_pubkeys,
);
@ -344,8 +346,9 @@ impl ReplayStage {
transaction_status_sender.clone(),
&verify_recyclers,
&mut heaviest_subtree_fork_choice,
&subscriptions,
&replay_vote_sender,
&bank_notification_sender,
&rewards_recorder_sender,
);
replay_active_banks_time.stop();
Self::report_memory(&allocated, "replay_active_banks", start);
@ -498,6 +501,7 @@ impl ReplayStage {
&block_commitment_cache,
&mut heaviest_subtree_fork_choice,
&cache_block_time_sender,
&bank_notification_sender,
)?;
};
voting_time.stop();
@ -586,7 +590,6 @@ impl ReplayStage {
&poh_recorder,
&leader_schedule_cache,
&subscriptions,
rewards_recorder_sender.clone(),
&progress,
&retransmit_slots_sender,
&mut skipped_slots_info,
@ -827,7 +830,6 @@ impl ReplayStage {
poh_recorder: &Arc<Mutex<PohRecorder>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
subscriptions: &Arc<RpcSubscriptions>,
rewards_recorder_sender: Option<RewardsRecorderSender>,
progress_map: &ProgressMap,
retransmit_slots_sender: &RetransmitSlotsSender,
skipped_slots_info: &mut SkippedSlotsInfo,
@ -926,7 +928,6 @@ impl ReplayStage {
poh_slot,
root_slot,
my_pubkey,
&rewards_recorder_sender,
subscriptions,
);
@ -1009,6 +1010,7 @@ impl ReplayStage {
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice,
cache_block_time_sender: &Option<CacheBlockTimeSender>,
bank_notification_sender: &Option<BankNotificationSender>,
) -> Result<()> {
if bank.is_empty() {
inc_new_counter_info!("replay_stage-voted_empty_bank", 1);
@ -1024,7 +1026,7 @@ impl ReplayStage {
.expect("Root bank doesn't exist")
.clone();
let mut rooted_banks = root_bank.parents();
rooted_banks.push(root_bank);
rooted_banks.push(root_bank.clone());
let rooted_slots: Vec<_> = rooted_banks.iter().map(|bank| bank.slot()).collect();
// Call leader schedule_cache.set_root() before blockstore.set_root() because
// bank_forks.root is consumed by repair_service to update gossip, so we don't want to
@ -1056,6 +1058,11 @@ impl ReplayStage {
heaviest_subtree_fork_choice,
);
subscriptions.notify_roots(rooted_slots);
if let Some(sender) = bank_notification_sender {
sender
.send(BankNotification::Root(root_bank))
.unwrap_or_else(|err| warn!("bank_notification_sender failed: {:?}", err));
}
latest_root_senders.iter().for_each(|s| {
if let Err(e) = s.send(new_root) {
trace!("latest root send failed: {:?}", e);
@ -1222,8 +1229,9 @@ impl ReplayStage {
transaction_status_sender: Option<TransactionStatusSender>,
verify_recyclers: &VerifyRecyclers,
heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice,
subscriptions: &Arc<RpcSubscriptions>,
replay_vote_sender: &ReplayVoteSender,
bank_notification_sender: &Option<BankNotificationSender>,
rewards_recorder_sender: &Option<RewardsRecorderSender>,
) -> bool {
let mut did_complete_bank = false;
let mut tx_count = 0;
@ -1294,7 +1302,13 @@ impl ReplayStage {
bank.freeze();
heaviest_subtree_fork_choice
.add_new_leaf_slot(bank.slot(), Some(bank.parent_slot()));
subscriptions.notify_frozen(bank.slot());
if let Some(sender) = bank_notification_sender {
sender
.send(BankNotification::Frozen(bank.clone()))
.unwrap_or_else(|err| warn!("bank_notification_sender failed: {:?}", err));
}
Self::record_rewards(&bank, &rewards_recorder_sender);
} else {
trace!(
"bank {} not completed tick_height: {}, max_tick_height: {}",
@ -1772,7 +1786,6 @@ impl ReplayStage {
bank_forks: &RwLock<BankForks>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
subscriptions: &Arc<RpcSubscriptions>,
rewards_recorder_sender: Option<RewardsRecorderSender>,
progress: &mut ProgressMap,
all_pubkeys: &mut PubkeyReferences,
) {
@ -1818,7 +1831,6 @@ impl ReplayStage {
child_slot,
forks.root(),
&leader,
&rewards_recorder_sender,
subscriptions,
);
let empty: Vec<&Pubkey> = vec![];
@ -1846,21 +1858,18 @@ impl ReplayStage {
slot: u64,
root_slot: u64,
leader: &Pubkey,
rewards_recorder_sender: &Option<RewardsRecorderSender>,
subscriptions: &Arc<RpcSubscriptions>,
) -> Bank {
subscriptions.notify_slot(slot, parent.slot(), root_slot);
let child_bank = Bank::new_from_parent(parent, leader, slot);
Self::record_rewards(&child_bank, &rewards_recorder_sender);
child_bank
Bank::new_from_parent(parent, leader, slot)
}
fn record_rewards(bank: &Bank, rewards_recorder_sender: &Option<RewardsRecorderSender>) {
if let Some(rewards_recorder_sender) = rewards_recorder_sender {
if let Some(ref rewards) = bank.rewards {
let rewards = bank.rewards.read().unwrap();
if !rewards.is_empty() {
rewards_recorder_sender
.send((bank.slot(), rewards.iter().copied().collect()))
.send((bank.slot(), rewards.clone()))
.unwrap_or_else(|err| warn!("rewards_recorder_sender failed: {:?}", err));
}
}
@ -1930,6 +1939,7 @@ pub(crate) mod tests {
use crate::{
consensus::test::{initialize_state, VoteSimulator},
consensus::Tower,
optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank,
progress_map::ValidatorStakeInfo,
replay_stage::ReplayStage,
transaction_status_service::TransactionStatusService,
@ -1961,7 +1971,7 @@ pub(crate) mod tests {
system_transaction,
transaction::TransactionError,
};
use solana_transaction_status::{EncodedTransaction, TransactionWithStatusMeta};
use solana_transaction_status::TransactionWithStatusMeta;
use solana_vote_program::{
vote_state::{VoteState, VoteStateVersions},
vote_transaction,
@ -2042,11 +2052,14 @@ pub(crate) mod tests {
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank0)));
// RpcSubscriptions
let optimistically_confirmed_bank =
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks);
let exit = Arc::new(AtomicBool::new(false));
let rpc_subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
bank_forks.clone(),
Arc::new(RwLock::new(BlockCommitmentCache::default())),
optimistically_confirmed_bank,
));
ReplayBlockstoreComponents {
@ -2106,7 +2119,6 @@ pub(crate) mod tests {
&bank_forks,
&leader_schedule_cache,
&rpc_subscriptions,
None,
&mut progress,
&mut PubkeyReferences::default(),
);
@ -2130,7 +2142,6 @@ pub(crate) mod tests {
&bank_forks,
&leader_schedule_cache,
&rpc_subscriptions,
None,
&mut progress,
&mut PubkeyReferences::default(),
);
@ -2512,7 +2523,7 @@ pub(crate) mod tests {
bank.store_account(&pubkey, &leader_vote_account);
}
let leader_pubkey = Pubkey::new_rand();
let leader_pubkey = solana_sdk::pubkey::new_rand();
let leader_lamports = 3;
let genesis_config_info =
create_genesis_config_with_leader(50, &leader_pubkey, leader_lamports);
@ -2537,6 +2548,7 @@ pub(crate) mod tests {
&exit,
bank_forks.clone(),
block_commitment_cache.clone(),
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
));
let (lockouts_sender, _) =
AggregateCommitmentService::new(&exit, block_commitment_cache.clone(), subscriptions);
@ -2553,7 +2565,11 @@ pub(crate) mod tests {
.is_none());
let bank1 = Bank::new_from_parent(&arc_bank0, &Pubkey::default(), arc_bank0.slot() + 1);
let _res = bank1.transfer(10, &genesis_config_info.mint_keypair, &Pubkey::new_rand());
let _res = bank1.transfer(
10,
&genesis_config_info.mint_keypair,
&solana_sdk::pubkey::new_rand(),
);
for _ in 0..genesis_config.ticks_per_slot {
bank1.register_tick(&Hash::default());
}
@ -2569,7 +2585,11 @@ pub(crate) mod tests {
);
let bank2 = Bank::new_from_parent(&arc_bank1, &Pubkey::default(), arc_bank1.slot() + 1);
let _res = bank2.transfer(10, &genesis_config_info.mint_keypair, &Pubkey::new_rand());
let _res = bank2.transfer(
10,
&genesis_config_info.mint_keypair,
&solana_sdk::pubkey::new_rand(),
);
for _ in 0..genesis_config.ticks_per_slot {
bank2.register_tick(&Hash::default());
}
@ -2701,36 +2721,26 @@ pub(crate) mod tests {
blockstore.clone(),
);
let confirmed_block = blockstore.get_confirmed_block(slot, None).unwrap();
let confirmed_block = blockstore.get_confirmed_block(slot).unwrap();
assert_eq!(confirmed_block.transactions.len(), 3);
for TransactionWithStatusMeta { transaction, meta } in
confirmed_block.transactions.into_iter()
{
if let EncodedTransaction::Json(transaction) = transaction {
if transaction.signatures[0] == signatures[0].to_string() {
let meta = meta.unwrap();
assert_eq!(meta.err, None);
assert_eq!(meta.status, Ok(()));
} else if transaction.signatures[0] == signatures[1].to_string() {
let meta = meta.unwrap();
assert_eq!(
meta.err,
Some(TransactionError::InstructionError(
0,
InstructionError::Custom(1)
))
);
assert_eq!(
meta.status,
Err(TransactionError::InstructionError(
0,
InstructionError::Custom(1)
))
);
} else {
assert_eq!(meta, None);
}
if transaction.signatures[0] == signatures[0] {
let meta = meta.unwrap();
assert_eq!(meta.status, Ok(()));
} else if transaction.signatures[0] == signatures[1] {
let meta = meta.unwrap();
assert_eq!(
meta.status,
Err(TransactionError::InstructionError(
0,
InstructionError::Custom(1)
))
);
} else {
assert_eq!(meta, None);
}
}
}

Some files were not shown because too many files have changed in this diff Show More