Compare commits

...

163 Commits

Author SHA1 Message Date
mergify[bot]
25141288f4 Fix typos (#13334) (#13335)
(cherry picked from commit af9a3f004e)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-11-01 07:06:46 +00:00
mergify[bot]
b28d10d46f Add bank timestamp bounding (bp #13120) (#13331)
* Add bounding feature

(cherry picked from commit 96b8aa8bd1)

* Repurpose unused as Clock::epoch_start_timestamp; add gated update

(cherry picked from commit 0049ab69fb)

* Add bounded timestamp-estimation method

(cherry picked from commit 80db6c0980)

* Use bounded timestamp-correction when feature enabled

(cherry picked from commit 90778615f6)

* Prevent block times from ever going backward

(cherry picked from commit eb2560e782)

* Sample votes from ancestors back to root

(cherry picked from commit 4260b3b416)

* Add Clock sysvar details, update struct docs

(cherry picked from commit 3a1e125ce3)

* Add design proposal and update validator-timestamp-oracle

(cherry picked from commit a3912bc084)

* Adapt to feature::create_account

Co-authored-by: Tyera Eulberg <tyera@solana.com>
Co-authored-by: Michael Vines <mvines@gmail.com>
2020-11-01 06:02:34 +00:00
mergify[bot]
b6dc48da75 Add solana-program-test crate (bp #13324) (#13329)
* MockInvokeContext::get_programs() implementation

(cherry picked from commit 8acc47ee1b)

* start_local_server() now works with Banks > 0

(cherry picked from commit fa4bab4608)

* Add solana-program-test crate

(cherry picked from commit 52a292a75b)

* rebase

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-11-01 05:43:43 +00:00
mergify[bot]
f2d929c12d Move Feature struct to solana-program (#13321)
(cherry picked from commit 4b65e32f22)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-31 20:33:35 +00:00
Michael Vines
c49b89091a cargo update -p futures-task / cargo update -p futures-util 2020-10-31 18:50:51 +00:00
Michael Vines
23fe3a86d9 Switch to dirs-next 2020-10-31 18:50:51 +00:00
Michael Vines
2f778725d6 Ignore stdweb 2020-10-31 18:50:51 +00:00
mergify[bot]
93a119a51e Print the entry type as well when checking archive (#13312) (#13314)
(cherry picked from commit bc7133d752)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-30 17:48:23 +00:00
Jack May
65a7b536c9 Update AccountInfo comments (#13302)
(cherry picked from commit 72d41e5801)
2020-10-30 08:09:37 -07:00
mergify[bot]
1281483a8c Fix tower/blockstore unsync due to external causes (#12671) (#13310)
* Fix tower/blockstore unsync due to external causes

* Add and clean up long comments

* Clean up test

* Comment about warped_slot_history

* Run test_future_tower with master-only/master-slave

* Update comments about false leader condition

(cherry picked from commit 1df15d85c3)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-30 11:47:22 +00:00
mergify[bot]
4312841433 de-mut some InvokeContext methods (bp #13301) (#13309)
* de-mut some InvokeContext methods

(cherry picked from commit da9548fd12)

* Simplify CPI interface into MessageProcessor

(cherry picked from commit 9263ae1c60)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-30 10:35:59 +00:00
mergify[bot]
b859acbfea Upgrade tarpc and tokio (bp #13293) (#13300)
* Upgrade tarpc and tokio (#13293)

(cherry picked from commit ca00197009)

# Conflicts:
#	banks-client/Cargo.toml
#	banks-interface/Cargo.toml
#	banks-server/Cargo.toml

* rebase

Co-authored-by: Greg Fitzgerald <greg@solana.com>
Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-30 08:05:27 +00:00
mergify[bot]
40a3885d3b Native/builtin programs now receive an InvokeContext (bp #13286) (#13298)
* Native/builtin programs now receive an InvokeContext

(cherry picked from commit df8dab9d2b)

* Remove MessageProcessor::loaders

(cherry picked from commit 2664a1f7ef)

* Remove Entrypoint type

(cherry picked from commit 225bed11c7)

* Remove programs clone()

(cherry picked from commit 33884d847a)

* Add sol_log_compute_units syscall

(cherry picked from commit 66e51a7363)

* Add Bank::set_bpf_compute_budget()

(cherry picked from commit 7d686b72a0)

* Rebase

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-30 07:47:17 +00:00
Alexander Meißner
36b7c2ea97 Refactors the common code of test and bench targets into the solana_runtime::bpf_test_utils module. (#13203)
(cherry picked from commit 65ee3a6bdd)
2020-10-29 22:03:09 -07:00
mergify[bot]
24bd4ff6d4 clarify comment (#13289) (#13292)
(cherry picked from commit b5c8b86e7c)

Co-authored-by: Jack May <jack@solana.com>
2020-10-29 22:38:26 +00:00
mergify[bot]
69b3f10207 move Account to solana-sdk (bp #13198) (#13269)
* move Account to solana-sdk (#13198)

(cherry picked from commit c458d4b213)

# Conflicts:
#	programs/bpf/benches/bpf_loader.rs

* resolve conflicts

Co-authored-by: Jack May <jack@solana.com>
2020-10-29 19:16:52 +00:00
mergify[bot]
9922f09a1d adds more parallel processing to gossip packets handling (#12988) (#13282)
(cherry picked from commit 3738611f5c)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-10-29 16:47:37 +00:00
mergify[bot]
38a99c0c25 Disable eager rent collection for less noise (#13275) (#13280)
(cherry picked from commit 363c148dbe)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-29 15:49:02 +00:00
mergify[bot]
7031235714 excludes origin from prune set (#13204) (#13276)
On the receiving end, prune messages are ignored if the origin points to
the node itself:
https://github.com/solana-labs/solana/blob/631f029fe/core/src/crds_gossip_push.rs#L285-L295
So to avoid sending these over the wire, the requester can exclude
origin from the prune set.

(cherry picked from commit be80f6d5c5)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-10-29 14:19:06 +00:00
mergify[bot]
dfb2356a9a Update FeatureSet::active to include slot-activated (#13256) (#13263)
* Update FeatureSet::active to include slot-activated

* Clippy suggestion

(cherry picked from commit c2dbf53d76)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-29 03:42:42 +00:00
mergify[bot]
010794806a Finer grained AccountsIndex locking (#12787) (#13240)
Co-authored-by: Carl Lin <carl@solana.com>

Co-authored-by: carllin <wumu727@gmail.com>
Co-authored-by: Carl Lin <carl@solana.com>
2020-10-28 23:46:54 +00:00
mergify[bot]
6f95d5f72a Update links from sdk to program (#13248) (#13249)
(cherry picked from commit db9ddc7e5b)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-28 21:32:12 +00:00
mergify[bot]
2720b939fd Calculate accounts hash async in accounts background service (#12852) (#13244)
(cherry picked from commit 456eae6ccb)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-10-28 21:20:07 +00:00
mergify[bot]
a25c3fcf7d Add doc page on sysvar accounts (#13237) (#13246)
* Add doc page on sysvar accounts

* Update with suggestions

(cherry picked from commit 664b6125b6)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-28 21:16:10 +00:00
mergify[bot]
7cc4810174 docs: Metrics update (bp #13239) (#13241)
* docs: Remove stale metrics steps

(cherry picked from commit 4dc4fefee2)

* docs: Reference metrics envvars for each cluster

(cherry picked from commit eb597cd60f)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-28 19:46:59 +00:00
mergify[bot]
c1a55bf249 Improve final report of ledger-tool capitalization (#13232) (#13236)
(cherry picked from commit 4698ee5e4a)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-28 18:45:19 +00:00
mergify[bot]
f19778b7d9 implements ping-pong packets between nodes (#12794) (#13234)
https://hackerone.com/reports/991106

> It’s possible to use UDP gossip protocol to amplify DDoS attacks. An attacker
> can spoof IP address in UDP packet when sending PullRequest to the node.
> There's no any validation if provided source IP address is not spoofed and
> the node can send much larger PullResponse to victim's IP. As I checked,
> PullRequest is about 290 bytes, while PullResponse is about 10 kB. It means
> that amplification is about 34x. This way an attacker can easily perform DDoS
> attack both on Solana node and third-party server.
>
> To prevent it, need for example to implement ping-pong mechanism similar as
> in Ethereum: Before accepting requests from remote client needs to validate
> his IP. Local node sends Ping packet to the remote node and it needs to reply
> with Pong packet that contains hash of matching Ping packet. Content of Ping
> packet is unpredictable. If hash from Pong packet matches, local node can
> remember IP where Ping packet was sent as correct and allow further
> communication.
>
> More info:
> https://github.com/ethereum/devp2p/blob/master/discv4.md#endpoint-proof
> https://github.com/ethereum/devp2p/blob/master/discv4.md#wire-protocol

The commit adds a PingCache, which maintains records of remote nodes
which have returned a valid response to a ping message, and on-the-fly
ping messages pending a pong response from the remote node.

When handling pull-requests, those from addresses which have not passed
the ping-pong check are filtered out, and additionally ping packets are
added for addresses which need to be (re)verified.

(cherry picked from commit ae91270961)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-10-28 18:36:28 +00:00
mergify[bot]
eecdacac42 Don't hold dashmap write lock in store create (#13007) (#13230)
Co-authored-by: Carl Lin <carl@solana.com>
(cherry picked from commit c8fc0a6ba1)

Co-authored-by: carllin <wumu727@gmail.com>
2020-10-28 11:36:28 +00:00
mergify[bot]
429f130532 Switch accounts storage lock to DashMap (#12126) (#13223)
Co-authored-by: Carl Lin <carl@solana.com>
(cherry picked from commit f8d338c9cb)

Co-authored-by: carllin <wumu727@gmail.com>
2020-10-28 08:07:28 +00:00
mergify[bot]
19b9839dfc Use pico inflation for ledger-tool capitalization --enable-inflation (#13215) (#13222)
* Use pico inflation for ledger-tool capitalization --enable-inflation

* rust fmt

(cherry picked from commit 7d2962135d)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-28 07:40:21 +00:00
mergify[bot]
ad2bf3afa6 more portable install.sh (#13114) (#13220)
(cherry picked from commit 4e0d1b1d4a)

Co-authored-by: Jack May <jack@solana.com>
2020-10-28 06:45:32 +00:00
mergify[bot]
5c739ba236 Use zstd for create-snapshot (#13214) (#13218)
(cherry picked from commit 6d4c69b7c3)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-28 06:38:20 +00:00
mergify[bot]
9fac507606 Fix log (#13207) (#13211)
Co-authored-by: Carl Lin <carl@solana.com>
(cherry picked from commit f96ab5a818)

Co-authored-by: carllin <wumu727@gmail.com>
2020-10-28 03:21:01 +00:00
mergify[bot]
d5a37cb06e Parse vote instructions (#13202) (#13209)
(cherry picked from commit c4962af9eb)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-28 01:19:29 +00:00
mergify[bot]
86eb0157c0 Account for forward delay in transaction simulation (#13199) (#13201)
(cherry picked from commit 631f029fe9)

Co-authored-by: Justin Starry <justin@solana.com>
2020-10-27 18:35:43 +00:00
mergify[bot]
072dab0948 Fix pr crossing for sysvar keyed-accounts (#13189) (#13191)
(cherry picked from commit 26eba5ac7d)

Co-authored-by: Jack May <jack@solana.com>
2020-10-27 08:33:13 +00:00
mergify[bot]
e20e79f412 ignore .so files (#13188) (#13192)
(cherry picked from commit bb6ab3a62d)

Co-authored-by: Jack May <jack@solana.com>
2020-10-27 07:23:50 +00:00
mergify[bot]
f118db81ce check sysvar id for AccountInfo (#13175) (#13185)
(cherry picked from commit 322c667655)

Co-authored-by: Jack May <jack@solana.com>
2020-10-27 00:22:27 -07:00
mergify[bot]
4ecb78d303 Move KeyedAccount out of solana-program. Native programs are not supported by solana-program (bp #13159) (#13181)
* Move KeyedAccount out of solana-program.  Native programs are not supported by solana-program

(cherry picked from commit 1b343665a1)

# Conflicts:
#	programs/bpf/benches/bpf_loader.rs

* rebase

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-27 05:43:14 +00:00
mergify[bot]
0a28e40606 fix .gitignore (#13177) (#13190)
(cherry picked from commit e3c0cc980b)

Co-authored-by: Jack May <jack@solana.com>
2020-10-27 05:26:59 +00:00
mergify[bot]
4d7a5a9daf macos portable rust-bpf (#13176) (#13187)
(cherry picked from commit fc83a666fc)

Co-authored-by: Jack May <jack@solana.com>
2020-10-27 04:51:14 +00:00
mergify[bot]
64cf6b4388 Add SSH key for buildkite-agent on achille (#13183)
(cherry picked from commit ff4b34202c)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-27 03:19:32 +00:00
mergify[bot]
f334c3b895 Add Bank::get_signature_status_with_blockhash() (#13167) (#13178)
Get the signature status in O(1) time, instead of O(n) where
n is the number of blockhashes in the StatusCache.

(cherry picked from commit f58bc8589d)

Co-authored-by: Greg Fitzgerald <greg@solana.com>
2020-10-27 01:29:16 +00:00
Michael Vines
15a7bcd4fe Delete .lib.rs.swo 2020-10-26 16:01:43 -07:00
mergify[bot]
8d6636d02a CLI: Surface deploy transaction errors (#13170)
(cherry picked from commit a82971879f)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-26 22:43:32 +00:00
mergify[bot]
cf896dbeee Use bank timestamp to populate Blockstore::blocktime_cf when correction active (#13158) (#13160)
(cherry picked from commit 39686ef098)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-26 20:34:15 +00:00
mergify[bot]
e5b60b75f8 Docs: Testnet has a faucet now (#13165)
(cherry picked from commit 8b1638f026)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-26 20:28:59 +00:00
mergify[bot]
0e155fdbd9 update call depth docs (#13155) (#13162)
(cherry picked from commit 35f77ccc73)

Co-authored-by: Jack May <jack@solana.com>
2020-10-26 19:58:55 +00:00
Michael Vines
b79a337ddd Don't reuse BPF target build artifacts
(cherry picked from commit 41a56e14fc)
2020-10-26 12:01:38 -07:00
Michael Vines
c4050f541d Fix reward type encoding
(cherry picked from commit 0a89bb4d3c)
2020-10-26 12:01:38 -07:00
mergify[bot]
f0b74a4ecf marks pull request creation time only once per peer (#13113) (#13156)
mark_pull_request_creation time requires an exclusive lock on gossip:
https://github.com/solana-labs/solana/blob/16944e218/core/src/cluster_info.rs#L1547-L1548
Current code is redundantly marking each peer once for each request.
There are at most only 2 unique peers, whereas there are hundreds of
requests per each. So the lock is acquired hundreds of time longer than
necessary.

(cherry picked from commit 4bfda3e766)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-10-26 18:27:26 +00:00
mergify[bot]
f7979378fd Fix test_optimistic_confirmation_violation_without_tower() (#13043) (#13145)
Co-authored-by: Carl Lin <carl@solana.com>
(cherry picked from commit dd6cccaf7e)

Co-authored-by: carllin <wumu727@gmail.com>
2020-10-26 06:33:20 +00:00
mergify[bot]
d7c5607982 Hide noisy specialization warnings for frozen abi (#13141) (#13144)
(cherry picked from commit 5caf81dbf8)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-26 06:19:58 +00:00
Michael Vines
91ab5ae990 Remove program feature from SPL builds 2020-10-25 21:08:53 -07:00
mergify[bot]
605e767259 Allow existence of vote on root in saved tower (#13135) (#13139)
(cherry picked from commit 66c7a98009)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-26 12:34:58 +09:00
Michael Vines
597618846b Bump version to v1.4.4 2020-10-24 22:10:32 +00:00
Michael Vines
712267bf51 Rename "everything" feature to "full"
(cherry picked from commit 0cc9c94c43)
2020-10-24 13:21:11 -07:00
mergify[bot]
eb9cef0cd4 Separate the "program" feature of solana-sdk into a new crate called solana-program (bp #12989) (#13131)
* Add solana-program-sdk boilerplate

(cherry picked from commit 3718771ffb)

# Conflicts:
#	sdk/Cargo.toml

* Initial population of solana-program-sdk

(cherry picked from commit 63db324204)

# Conflicts:
#	Cargo.lock

* Port programs to solana-program-sdk

(cherry picked from commit fe68f7f786)

# Conflicts:
#	programs/bpf/Cargo.lock
#	programs/bpf/rust/128bit/Cargo.toml
#	programs/bpf/rust/128bit_dep/Cargo.toml
#	programs/bpf/rust/alloc/Cargo.toml
#	programs/bpf/rust/call_depth/Cargo.toml
#	programs/bpf/rust/custom_heap/Cargo.toml
#	programs/bpf/rust/dep_crate/Cargo.toml
#	programs/bpf/rust/deprecated_loader/Cargo.toml
#	programs/bpf/rust/dup_accounts/Cargo.toml
#	programs/bpf/rust/error_handling/Cargo.toml
#	programs/bpf/rust/external_spend/Cargo.toml
#	programs/bpf/rust/instruction_introspection/Cargo.toml
#	programs/bpf/rust/invoke/Cargo.toml
#	programs/bpf/rust/invoked/Cargo.toml
#	programs/bpf/rust/iter/Cargo.toml
#	programs/bpf/rust/many_args/Cargo.toml
#	programs/bpf/rust/many_args_dep/Cargo.toml
#	programs/bpf/rust/noop/Cargo.toml
#	programs/bpf/rust/panic/Cargo.toml
#	programs/bpf/rust/param_passing/Cargo.toml
#	programs/bpf/rust/param_passing_dep/Cargo.toml
#	programs/bpf/rust/rand/Cargo.toml
#	programs/bpf/rust/ristretto/Cargo.toml
#	programs/bpf/rust/sanity/Cargo.toml
#	programs/bpf/rust/sha256/Cargo.toml
#	programs/bpf/rust/sysval/Cargo.toml

* Only activate legacy program feature for the solana-sdk crate

(cherry picked from commit 85c51f5787)

* Run serum-dex unit tests

(cherry picked from commit 92ce381d60)

* Rename solana-program-sdk to solana-program

(cherry picked from commit dd711ab5fb)

# Conflicts:
#	programs/bpf/rust/128bit/Cargo.toml
#	programs/bpf/rust/128bit_dep/Cargo.toml
#	programs/bpf/rust/alloc/Cargo.toml
#	programs/bpf/rust/call_depth/Cargo.toml
#	programs/bpf/rust/custom_heap/Cargo.toml
#	programs/bpf/rust/dep_crate/Cargo.toml
#	programs/bpf/rust/deprecated_loader/Cargo.toml
#	programs/bpf/rust/dup_accounts/Cargo.toml
#	programs/bpf/rust/error_handling/Cargo.toml
#	programs/bpf/rust/external_spend/Cargo.toml
#	programs/bpf/rust/instruction_introspection/Cargo.toml
#	programs/bpf/rust/invoke/Cargo.toml
#	programs/bpf/rust/invoked/Cargo.toml
#	programs/bpf/rust/iter/Cargo.toml
#	programs/bpf/rust/many_args/Cargo.toml
#	programs/bpf/rust/many_args_dep/Cargo.toml
#	programs/bpf/rust/noop/Cargo.toml
#	programs/bpf/rust/panic/Cargo.toml
#	programs/bpf/rust/param_passing/Cargo.toml
#	programs/bpf/rust/param_passing_dep/Cargo.toml
#	programs/bpf/rust/rand/Cargo.toml
#	programs/bpf/rust/ristretto/Cargo.toml
#	programs/bpf/rust/sanity/Cargo.toml
#	programs/bpf/rust/sha256/Cargo.toml
#	programs/bpf/rust/sysval/Cargo.toml

* Update frozen_abi hashes

The movement of files in sdk/ caused ABI hashes to change

(cherry picked from commit a4956844bd)

* Resolve merge conflicts

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-24 17:25:22 +00:00
mergify[bot]
62e0e19961 add precompile verification to simulate_transaction (#13080) (#13126)
(cherry picked from commit 766406fd23)

Co-authored-by: Josh <josh.hundley@gmail.com>
2020-10-24 05:02:41 +00:00
mergify[bot]
9aee9cb867 Clean up opt conf verifier and vote state tracker (#13081) (#13124)
* Clean up opt conf verifier and vote state tracker

* Update test to follow new message and some knob

* Rename

(cherry picked from commit 0264147d42)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-24 04:12:02 +00:00
mergify[bot]
2b11558b36 Shorten magic install URL (#13122)
(cherry picked from commit b5170b993e)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-23 23:28:19 +00:00
mergify[bot]
18c4e1b023 ci: Add downstream project build testing (bp #13112) (#13119)
* Use local cargo for CI

(cherry picked from commit c7c50bd32c)

* Add downstream project build testing

(cherry picked from commit c7f4f15e60)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-23 22:13:51 +00:00
mergify[bot]
6bac44ed92 Move bpf sdk packaging from publish-tarball to cargo-install-all (#13117)
(cherry picked from commit 965ea97b56)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-23 21:39:41 +00:00
mergify[bot]
8cb622084f Cli: deploy programs via TPU (#13090) (#13111)
* Deploy: send write transactions to leader tpu

* Less apparent stalling during confirmation

* Add EpochInfo mock

* Only get cluster nodes once

* Send deploy writes to next leader

(cherry picked from commit 16944e218f)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-23 17:29:41 +00:00
mergify[bot]
38f7e9a979 shrink debug (#13089) (#13109)
(cherry picked from commit 7d2729f6bd)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-10-23 17:02:00 +00:00
mergify[bot]
a536f779ee scans crds table in parallel for finding old labels (#13073) (#13107)
From runtime profiles, the majority time of ClusterInfo::handle_purge
https://github.com/solana-labs/solana/blob/0776fa05c/core/src/cluster_info.rs#L1605-L1626
is spent scanning crds table finding old labels:
https://github.com/solana-labs/solana/blob/0776fa05c/core/src/crds.rs#L175-L197

This can be done in parallel given that gossip thread-pool:
https://github.com/solana-labs/solana/blob/0776fa05c/core/src/cluster_info.rs#L1637-L1641
is idle when handle_purge is invoked:
https://github.com/solana-labs/solana/blob/0776fa05c/core/src/cluster_info.rs#L1681

(cherry picked from commit 37c8842bcb)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-10-23 15:36:06 +00:00
mergify[bot]
84a5e5ec97 Remove spammy invalid rpc log (#13100) (#13102)
(cherry picked from commit c95f6c4b83)

Co-authored-by: Justin Starry <justin@solana.com>
2020-10-23 08:32:46 +00:00
mergify[bot]
dd33aae3cf Add --bpf-out-dir argument to control where the final build products land (#13099)
(cherry picked from commit b169d9cfbe)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-23 07:56:35 +00:00
mergify[bot]
be2ace47e3 Add deploy err if program-account balance is too high (#13091) (#13098)
* Add deploy err if program-account balance is too high

* Review comments

* Add system-program check

* Rename and unhide flag

(cherry picked from commit 4669fa0f98)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-23 07:02:16 +00:00
Trent Nelson
53b074aa35 Bump version to 1.4.3 2020-10-23 04:20:28 +00:00
mergify[bot]
a4ad2925a2 Allow nodes to advertise a different rpc address over gossip (#13053) (#13078)
* Allow nodes to advertise a different rpc address over gossip

* Feedback

(cherry picked from commit 8b0242a5d8)

Co-authored-by: Justin Starry <justin@solana.com>
2020-10-22 07:06:27 +00:00
mergify[bot]
edfbd8d65a Add replacements for Pubkey::new_rand()/Hash::new_rand() (bp #12987) (#13076)
* Add pubkey_new_rand(), mark Pubkey::new_rand() deprecated

(cherry picked from commit 0e68ed6a8d)

* Add hash_new_rand(), mark Hash::new_rand() as deprecated

(cherry picked from commit 76f11c7dae)

* Run `codemod --extensions rs Pubkey::new_rand solana_sdk::pubkey::new_rand`

(cherry picked from commit 7bc073defe)

# Conflicts:
#	programs/bpf/benches/bpf_loader.rs
#	runtime/benches/accounts.rs
#	runtime/src/accounts.rs

* Run `codemod --extensions rs Hash::new_rand solana_sdk:#️⃣:new_rand`

(cherry picked from commit 17c391121a)

* Remove unused pubkey::Pubkey imports

(cherry picked from commit 959880db60)

# Conflicts:
#	runtime/src/accounts_index.rs

* Resolve conflicts

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-22 05:08:01 +00:00
mergify[bot]
e0ae54fd7e Add cargo-build-bpf (bp #13040) (#13075)
* Add cargo-build-bpf

(cherry picked from commit 07a853d6cc)

* Remove do.sh

(cherry picked from commit 61be155413)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-22 03:09:52 +00:00
mergify[bot]
60297951ec CLI: Print address ephemeral keypair seed phrase to stderr on deploy failure (bp #13046) (#13055)
* CLI: Print address ephemeral keypair seed phrase to stderr on deploy failure

(cherry picked from commit 2905ccc7ec)

# Conflicts:
#	cli/Cargo.toml

* Fix conflicts

Co-authored-by: Trent Nelson <trent@solana.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2020-10-21 15:29:56 -06:00
Tyera Eulberg
e0f9f72a2c RPC: Don't send base64 TXs to old clusters (#13072)
Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-21 20:25:36 +00:00
mergify[bot]
5236acf4b0 Add ledger-tool dead-slots and improve purge a lot (#13065) (#13071)
* Add ledger-tool dead-slots and improve purge a lot

* Reduce batch size...

* Add --dead-slots-only and fixed purge ordering

(cherry picked from commit 0776fa05c7)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-21 18:57:22 +00:00
mergify[bot]
5dd61b5db2 Port various rent fixes to runtime feature (#12842) (#13068)
* Port various rent fixes to runtime feature

* Fix CI

* Use more consistent naming...

(cherry picked from commit 608b81b412)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-21 14:47:48 +00:00
mergify[bot]
8752bf0826 Skip 'Stake by Feature Set' output when showing status of a single feature (#13052)
(cherry picked from commit ad65d4785e)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-21 07:14:49 +00:00
mergify[bot]
b1712e80ec Parse stake and system instructions (#13035) (#13045)
* Fix token account check

* Add helper to check num accounts

* Add parse_stake

* Add parse_system

* Fix AuthorizeNonce docs

* Remove jsonParsed unstable markers

* Clippy

(cherry picked from commit 46d0019955)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-21 04:35:05 +00:00
Trent Nelson
2fe1a4677c Ignore more paths in increment-cargo-version.sh
(cherry picked from commit c1c69ecc34)
2020-10-20 20:55:34 -07:00
mergify[bot]
f76c128f4f Various clean-ups before assert adjustment (#13006) (#13041)
* Various clean-ups before assert adjustment

* oops

(cherry picked from commit efdb560e97)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-21 02:41:20 +00:00
mergify[bot]
b143b9c3c2 Remove frozen ABI modules from solana-sdk (bp #13008) (#13036)
* Remove frozen ABI modules from solana-sdk

(cherry picked from commit 6858950f76)

# Conflicts:
#	Cargo.lock
#	core/Cargo.toml
#	frozen-abi/macro/Cargo.toml
#	programs/bpf/Cargo.lock
#	programs/stake/Cargo.toml
#	programs/vote/Cargo.toml
#	runtime/Cargo.toml
#	sdk/Cargo.toml
#	version/Cargo.toml

* rebase

* fix broken ci (#13039)

Co-authored-by: Michael Vines <mvines@gmail.com>
Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-21 02:00:45 +00:00
mergify[bot]
b4178b75e7 Add --eval flag to solana-install info (#13038)
(cherry picked from commit 6f930351d2)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-21 01:12:48 +00:00
Michael Vines
c54b751df7 Include sdk/bpf in the main release tarball
(cherry picked from commit f71677164f)
2020-10-20 16:25:04 -07:00
mergify[bot]
0fde9e893f Force unset CARGO to use correct version of cargo (#13027) (#13034)
(cherry picked from commit 81d0c8ae7f)

Co-authored-by: Jon Cinque <jon.cinque@gmail.com>
2020-10-20 23:06:57 +00:00
mergify[bot]
d24abbdac9 Fix secp256k1 instruction indexing and add tests (#13026) (#13032)
(cherry picked from commit 83c53ae4b5)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-10-20 22:25:59 +00:00
Michael Vines
3b03985f28 Remove unsupported metrics tarball from release artifacts
(cherry picked from commit 62f20bc170)
2020-10-20 13:16:35 -07:00
mergify[bot]
d05bfa08c7 improves threads' utilization in processing gossip packets (#12962) (#13023)
ClusterInfo::process_packets handles incoming packets in a thread_pool:
https://github.com/solana-labs/solana/blob/87311cce7/core/src/cluster_info.rs#L2118-L2134

However, profiling runtime shows that threads are not well utilized and
a lot of the processing is done sequentially.

This commit redistributes the work done in parallel. Testing on a gce
cluster shows 20%+ improvement in processing gossip packets with much
smaller variations.

(cherry picked from commit 75d62ca095)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-10-20 19:59:35 +00:00
mergify[bot]
9da2ac7a44 passes through feature-set to gossip requests handling (#12878) (#12991)
* passes through feature-set to down to gossip requests handling
* takes the feature-set from root_bank instead of working_bank

(cherry picked from commit 48283161c3)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-10-20 18:25:44 +00:00
mergify[bot]
9e95d0fb58 Add more info for --limit-ledger-size (#13021)
(cherry picked from commit de04a208c7)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-20 17:50:11 +00:00
mergify[bot]
94cad9873c Support Debug Bank (#13017) (#13019)
(cherry picked from commit c0675968b1)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-20 17:20:40 +00:00
Tyera Eulberg
f33171b32f Remove errant print 2020-10-20 09:02:51 -06:00
mergify[bot]
aa6406f263 implements DataBudget using atomics (#12856) (#12990)
(cherry picked from commit 05cf15a382)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-10-20 12:51:42 +00:00
mergify[bot]
77864a6bee Parse bpf loader instructions (#12998) (#13005)
* Add parsing for BpfLoader2 instructions

* Skip info if null

* Return account address in info map

(cherry picked from commit 942e4273ba)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-20 06:25:56 +00:00
mergify[bot]
b51715d33c validator: Activate RPC before halting on slot (#13002)
(cherry picked from commit 3b3f7341fa)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-20 03:31:04 +00:00
mergify[bot]
7d395177d4 Add everything feature (#12999)
(cherry picked from commit c5e16383b0)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-20 01:52:21 +00:00
mergify[bot]
77ba6d6784 sdk: Add SyscallStubs to enable syscall interception when building programs for non-BPF (bp #12984) (#12993)
* Add SyscallStubs to enable syscall interception when building programs for non-BPF

(cherry picked from commit 9c53e1dfb2)

* Remove program_stubs!()

(cherry picked from commit 6d5889bdb5)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-19 22:28:44 +00:00
Tyera Eulberg
4bf0a54ed7 Revert "CLI: Put deploy ephemeral keypair behind a flag (#12942)" (#12982)
This reverts commit 8cac6835c0.
2020-10-19 17:41:10 +00:00
mergify[bot]
8a526f2f53 Follow up to persistent tower with tests and API cleaning (#12350) (#12972)
* Follow up to persistent tower

* Ignore for now...

* Hard-code validator identities for easy reasoning

* Add a test for opt. conf violation without tower

* Fix compile with rust < 1.47

* Remove unused method

* More move of assert tweak to the asser pr

* Add comments

* Clean up

* Clean the test addressing various review comments

* Clean up a bit

(cherry picked from commit 54517ea454)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-19 08:47:58 +00:00
mergify[bot]
43f99bdb31 Improve vote-account "Recent Timestamp" output (#12971)
(cherry picked from commit 2cc3d7511a)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-19 07:30:29 +00:00
mergify[bot]
0008dc62e4 Fix zero-lamport accounts preventing slot cleanup (#12606) (#12969)
Co-authored-by: Carl Lin <carl@solana.com>
(cherry picked from commit 16d45b8480)

Co-authored-by: carllin <wumu727@gmail.com>
2020-10-19 07:07:08 +00:00
mergify[bot]
7e8174fb79 Minor doc typo (#12966)
(cherry picked from commit 6123d71489)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-19 03:23:24 +00:00
mergify[bot]
4ad2ebcde9 Mention monitoring and updating for exchanges (#12953) (#12959)
* Mention monitoring and updating for exchanges

* Fix link syntax...

* Apply suggestions from code review

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>

* Apply suggestions from code review

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

* More review comments and word-wrapping

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>
(cherry picked from commit 87311cce7f)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-17 06:49:59 +00:00
mergify[bot]
da183d655a keygen: add more mnemonic language support (#12944) (#12957)
(cherry picked from commit 4451042c76)

Co-authored-by: guanqun <guanqun.lu@gmail.com>
2020-10-17 04:00:29 +00:00
mergify[bot]
2e449276be Check payer balance for program account rent as needed (#12952) (#12955)
(cherry picked from commit b6bfed64cb)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-16 19:14:26 +00:00
mergify[bot]
8cac6835c0 CLI: Put deploy ephemeral keypair behind a flag (#12942)
(cherry picked from commit 5a5b7f39c1)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-16 16:56:50 +00:00
mergify[bot]
677c184e47 Another some tower logging improvements (#12940) (#12943)
(cherry picked from commit fd8ec27fe8)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-16 07:00:27 +00:00
mergify[bot]
f36cfb92f7 Convert Blockstore Rewards cf to protobuf (bp #12860) (#12935)
* Convert Blockstore Rewards cf to protobuf (#12860)

* Add Blockstore protobuf cf type

* Add Rewards message to proto and make generated pub

* Convert Rewards cf to ProtobufColumn

* Add bench

* Adjust tags

* Move solana proto definitions and conversion methods to new crate

(cherry picked from commit 359707c85e)

# Conflicts:
#	Cargo.lock
#	ledger/Cargo.toml
#	storage-bigtable/Cargo.toml

* v1.4-ify

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2020-10-16 01:27:37 +00:00
mergify[bot]
e7062de05f Report compute budget usage (#12931) (#12934)
(cherry picked from commit b510474dcb)

Co-authored-by: Jack May <jack@solana.com>
2020-10-16 00:05:18 +00:00
mergify[bot]
a443e2e773 Update get-block method in get_confirmed_transaction (#12923) (#12930)
* Update get-block method in get_confirmed_transaction

* Remove superfluous into()

(cherry picked from commit 42943ab86d)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-15 21:38:38 +00:00
mergify[bot]
3a6db787e2 Support arbitrary toolchains with cargo wrapper script (#12926)
(cherry picked from commit 99aecdaf65)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-15 20:26:35 +00:00
Trent Nelson
f3c986385f Bump version to 1.4.2 2020-10-15 20:26:29 +00:00
mergify[bot]
3df811348f program log pubkey as base58 (bp #12901) (#12911)
* program log pubkey as base58 (#12901)

(cherry picked from commit 3f9e6a600b)

# Conflicts:
#	programs/bpf/benches/bpf_loader.rs
#	programs/bpf/c/src/tuner/tuner.c

* resolve conflicts

* fix bench conflict

Co-authored-by: Jack May <jack@solana.com>
2020-10-15 19:40:20 +00:00
mergify[bot]
e8c86ed3e5 Drop 'Pubkey' in 'solana validators' header (#12919)
(cherry picked from commit 3073dc9801)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-15 19:02:10 +00:00
Tyera Eulberg
489a7bb576 Bump spl-memo and spl-token versions (#12914) 2020-10-15 18:05:41 +00:00
mergify[bot]
688dd85e61 Release: Use pinned cargo version to install spl-token-cli (#12916)
(cherry picked from commit bb2f0df9e1)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-15 17:59:24 +00:00
mergify[bot]
fe54a30084 Docs: Clarify validator disk requirements (#12921)
(cherry picked from commit cc0781e0ac)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-15 17:56:46 +00:00
mergify[bot]
80942841a2 Surface 'Program account allocation failed' error details (#12904)
(cherry picked from commit eec3d25ab9)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-15 16:22:12 +00:00
mergify[bot]
d2808a8e29 docs: Rework JSON RPC curl examples to be more readable (bp #12893) (#12899)
* Rework curl examples to be more readable

(cherry picked from commit f0d0bdc572)

# Conflicts:
#	docs/src/apps/jsonrpc-api.md

* rebase

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-15 15:47:49 +00:00
mergify[bot]
f8413a28b5 Better tower logs for SwitchForkDecision and etc (#12875) (#12905)
* Better tower logs for SwitchForkDecision and etc

* nits

* Update comment

(cherry picked from commit a44e4d386f)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-15 10:41:03 +00:00
mergify[bot]
bc96332899 Respect RefCell when calling invoke (#12858) (#12891)
* Respect RefCell when calling invoke

* nudge

(cherry picked from commit 969f7b015b)

Co-authored-by: Jack May <jack@solana.com>
2020-10-15 02:15:36 +00:00
mergify[bot]
ceeeb3c9dd Change developer CTA (#12857) (#12892)
* change `index.js`

(cherry picked from commit 9e7fad1fd2)

Co-authored-by: R. M. Shea <8948187+rmshea@users.noreply.github.com>
2020-10-15 01:45:06 +00:00
mergify[bot]
bd058ec8f1 Release: Include SPL Token in release tarballs (#12889)
(cherry picked from commit f70762913c)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-15 01:32:34 +00:00
mergify[bot]
4b5ac44fc8 RPC: Add metrics for TX encoding (#12880)
(cherry picked from commit c26512255d)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-14 19:39:08 +00:00
mergify[bot]
fef979f0e5 Don't report RewardType::Fee when none was awarded (#12877)
(cherry picked from commit 4b04ed86b6)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-14 17:15:41 +00:00
mergify[bot]
cca2cdf39b Expose program error constants (#12861) (#12871)
(cherry picked from commit d4e953277e)

Co-authored-by: Jack May <jack@solana.com>
2020-10-14 08:48:50 +00:00
Trent Nelson
6e91996606 Bump version to 1.4.1 2020-10-14 03:05:04 +00:00
mergify[bot]
99be00d61f Add separate push queue to reduce push lock contention (#12713) (#12867)
(cherry picked from commit 1f1eb9f26e)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-10-14 02:46:13 +00:00
mergify[bot]
68f808026e Add log_messages to proto file (#12859) (#12863)
(cherry picked from commit 67ed44c007)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-14 01:50:55 +00:00
mergify[bot]
0c7ab0a1bb Update programming-faq.md (#12864) (#12865)
Fix typo

(cherry picked from commit b8f03c9b0f)

Co-authored-by: kemargrant <kemargrant@gmail.com>
2020-10-14 01:15:52 +00:00
mergify[bot]
3d8ccbc079 terminology update, nonce to bump seed (#12840) (#12851)
(cherry picked from commit 56211378d3)

Co-authored-by: Jack May <jack@solana.com>
2020-10-13 18:31:48 +00:00
mergify[bot]
275d096a46 solana vote-account/solana stake-account now works with RPC servers without --enable-rpc-transaction-history (bp #12826) (#12849)
* Implementation-defined RPC server errors are now accessible to client/ users

(cherry picked from commit 247228ee61)

* Cleanly handle RPC servers that don't have --enable-rpc-transaction-history enabled

(cherry picked from commit 14d793b22c)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-13 18:28:15 +00:00
mergify[bot]
6d70a06b23 Add nop feature set for upcoming ported rent fixes (#12841) (#12847)
(cherry picked from commit 7de7efe96c)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-13 17:47:59 +00:00
mergify[bot]
7e68b2e1bd Add transaction log messages to |solana confirm -v| output (#12836)
(cherry picked from commit e9dbbdeb81)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-13 06:57:21 +00:00
mergify[bot]
f0d761630e get_vote_accounts: access HashMap directly instead of turning it into an iterator (#12829)
(cherry picked from commit 649fe6d3b6)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-13 05:25:31 +00:00
mergify[bot]
1986927eb6 Check ELF file for errors before deploy (bp #12741) (#12801)
* Check ELF file for errors before deploy (#12741)

* Check ELF file for errors before deploy

* Update cli/src/cli.rs

Co-authored-by: Michael Vines <mvines@gmail.com>

* Fix formatting

* Bump solana_rbpf

Co-authored-by: Michael Vines <mvines@gmail.com>
(cherry picked from commit 6bbf6a79b7)

# Conflicts:
#	cli/Cargo.toml

* rebase

Co-authored-by: Alexandre Esteves <2335822+alexfmpe@users.noreply.github.com>
Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-13 04:41:51 +00:00
mergify[bot]
9a0ea61007 Add docs on vote account key rotation (bp #12815) (#12831)
* Add docs on vote account key rotation

(cherry picked from commit 253114ca20)

* Update docs/src/running-validator/vote-accounts.md

Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>
(cherry picked from commit d83027c0cd)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-13 04:32:03 +00:00
mergify[bot]
51a70e52f2 CI: Fix crate publication (#12825)
(cherry picked from commit c38021502e)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-12 23:15:46 +00:00
mergify[bot]
9797c93db3 fix native_loader behavior for invalid accounts (#12814) (#12819)
(cherry picked from commit c24da1ee16)

Co-authored-by: Jack May <jack@solana.com>
2020-10-12 22:14:56 +00:00
mergify[bot]
9598114658 Use latest stable channel release if there's no beta release (#12823)
(cherry picked from commit 65213a1782)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-12 22:12:16 +00:00
mergify[bot]
d3ef061044 RpcClient: Encode TXs as base64 by default (#12817)
(cherry picked from commit efbe37ba20)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-12 21:56:18 +00:00
mergify[bot]
1f102d2617 Move no-0-rent rent dist. behavior under feature (#12804) (#12811)
(cherry picked from commit 2f5bb7e507)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-12 09:57:32 +00:00
mergify[bot]
5e97bd3d8a simulate_transaction_with_config() now passes full config to server (#12803)
(cherry picked from commit b3c2752bb0)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-12 05:58:30 +00:00
mergify[bot]
ed06e8b85d Pacify cargo audit (bp #12797) (#12799)
* Bump reqwest/rayon to get past `cargo audit`

(cherry picked from commit 8a119c1483)

# Conflicts:
#	dos/Cargo.toml
#	download-utils/Cargo.toml
#	metrics/Cargo.toml

* Switch to tempfile

(cherry picked from commit d3b0f87a49)

* Rework cargo audit ignores

(cherry picked from commit 2301dcf973)

* Cargo.lock

(cherry picked from commit 859eb606da)

* rebase

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-12 04:29:55 +00:00
mergify[bot]
10b9225edb Don't bother paying 0 rent (#12793)
(cherry picked from commit 1fc7c1ecee)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-10 18:12:14 +00:00
Michael Vines
b1b5ddd2b9 Update gossip entrypoints 2020-10-10 08:39:38 -07:00
mergify[bot]
6b9b107ead Fix various ledger-tool error due to no builtins (bp #12759) (#12766)
* Fix various ledger-tool error due to no builtins (#12759)

* Fix various ledger-tool error due to no builtins

* Add missing file...

(cherry picked from commit 1f4bcf70b0)

# Conflicts:
#	core/Cargo.toml
#	ledger/Cargo.toml

* Rebase

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-10 07:10:38 +00:00
mergify[bot]
3fef98fd1e Expose all rewards (fees, rent, voting and staking) in RPC getConfirmedBlock and the cli (bp #12768) (#12790)
* Expose all rewards (fees, rent, voting and staking) in RPC getConfirmedBlock and the cli

(cherry picked from commit c5c8da1ac0)

# Conflicts:
#	Cargo.lock
#	transaction-status/Cargo.toml

* fix: surface full block rewards type

(cherry picked from commit 1b16790325)

* resolve conflicts

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-10 06:11:42 +00:00
Jack May
e999823b4b document program address collisions (#12774)
(cherry picked from commit 9ac8db3533)
2020-10-09 22:35:47 -07:00
mergify[bot]
1e46a5b147 Fix typo (#12780) (#12784)
(cherry picked from commit 5800217998)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-10 02:09:09 +00:00
mergify[bot]
567a1cb944 Correct Bank timestamp drift every slot (#12737) (#12777)
* Move timestamp helper to sdk

* Add Bank method for getting timestamp estimate

* Return sysvar info from Bank::clock

* Add feature-gated timestamp correction

* Rename unix_timestamp method to be more descriptive

* Review comments

* Add timestamp metric

(cherry picked from commit b028c47d2b)

# Conflicts:
#	runtime/src/feature_set.rs

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-10 00:42:44 +00:00
mergify[bot]
2996cebfaa Add convenience script for working in stability branches (#12765) (#12773)
* Add convenience script for working in stability branches

* Update scripts/curgo.sh

Co-authored-by: Michael Vines <mvines@gmail.com>

* re{locate,name} to /cargo

Co-authored-by: Michael Vines <mvines@gmail.com>
(cherry picked from commit ed95071c27)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-09 23:02:21 +00:00
mergify[bot]
7a1889aaf9 Add adjustable stack size and call depth (bp #12728) (#12770)
* Add adjustable stack size and call depth (#12728)

(cherry picked from commit c3907be623)

# Conflicts:
#	programs/bpf/Cargo.toml
#	programs/bpf_loader/Cargo.toml

* resolve conflicts

Co-authored-by: Jack May <jack@solana.com>
2020-10-09 22:08:01 +00:00
mergify[bot]
9188153b7d Fix fee mismatch on snapshot deserialize (#12697) (#12754)
Co-authored-by: Carl Lin <carl@solana.com>
(cherry picked from commit c879e7c1ad)

Co-authored-by: carllin <wumu727@gmail.com>
2020-10-09 20:21:50 +00:00
mergify[bot]
4b9f2e987a Bump max invoke depth to 4 (#12742) (#12764)
(cherry picked from commit 2cd7cd3149)

Co-authored-by: Jack May <jack@solana.com>
2020-10-09 18:49:44 +00:00
mergify[bot]
bb5c76483a Advise setting --wal-recovery-mode, and using --private-rpc for mainnet-beta (#12761)
(cherry picked from commit 3fedcdc6bc)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-09 15:56:56 +00:00
mergify[bot]
aafbb251b9 Only fetch snapshot if it's newer than local (#12663) (#12752)
* Only fetch snapshot if it's newer than local

* Prefer as_ref over clone

* More nits

* Don't wait forwever for newer snapshot

(cherry picked from commit 81489ccb76)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-09 07:15:34 +00:00
mergify[bot]
dd32540ceb Add inflation_kill_switch feature (#12749)
(cherry picked from commit c8807d227a)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-09 05:55:51 +00:00
mergify[bot]
e1a9cbaf3c Add new internal accounts (#12740) (#12747)
Co-authored-by: publish-docs.sh <maintainers@solana.com>
(cherry picked from commit 2c5f83c264)

Co-authored-by: Dan Albert <dan@solana.com>
2020-10-09 02:19:55 +00:00
mergify[bot]
83740246fc Minor variable name cleanup (#12745)
(cherry picked from commit 3a04026599)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-09 02:12:28 +00:00
mergify[bot]
7a53ca18a6 Store program logs in blockstore / bigtable (TransactionWithStatusMeta) (#12678) (#12735)
* introduce store program logs in blockstore / bigtable

* fix test, transaction logs created for successful transactions

* fix test for legacy bincode implementation around log_messages

* only api nodes should record logs

* truncate transaction logs to 100KB

* refactor log truncate for improved coverage

(cherry picked from commit 8f5431551e)

Co-authored-by: Josh <josh.hundley@gmail.com>
2020-10-08 20:19:26 +00:00
mergify[bot]
c1a8637cb5 Support multiple connected HW wallets configured with the same seed phrase (bp #12716) (#12720)
* remote-wallet: Select hardware wallets based on host device path

(cherry picked from commit 8e3353d9ef)

* remote-wallet: Append wallet "name" to entries in selector UI

(cherry picked from commit f1a2ad1b7d)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-08 05:32:48 +00:00
mergify[bot]
d6831309cd Revert "Restore --expected-shred-version argument for mainnet-beta" (#12723)
This reverts commit 9410eab2af.

(cherry picked from commit dadc84fa8c)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-08 05:00:44 +00:00
497 changed files with 19379 additions and 8366 deletions

View File

@@ -31,4 +31,9 @@ export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"
mkdir -p "$CARGO_TARGET_CACHE"/target
rsync -a --delete --link-dest="$CARGO_TARGET_CACHE" "$CARGO_TARGET_CACHE"/target .
# Don't reuse BPF target build artifacts due to incremental build issues with
# `std:
# "found possibly newer version of crate `std` which `xyz` depends on
rm -rf target/bpfel-unknown-unknown
)

1321
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -16,6 +16,7 @@ members = [
"dos",
"download-utils",
"faucet",
"frozen-abi",
"perf",
"validator",
"genesis",
@@ -30,12 +31,14 @@ members = [
"merkle-tree",
"stake-o-matic",
"storage-bigtable",
"storage-proto",
"streamer",
"measure",
"metrics",
"net-shaper",
"notifier",
"poh-bench",
"program-test",
"programs/secp256k1",
"programs/bpf_loader",
"programs/budget",
@@ -51,6 +54,7 @@ members = [
"ramp-tps",
"runtime",
"sdk",
"sdk/cargo-build-bpf",
"scripts",
"stake-accounts",
"stake-monitor",

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-account-decoder"
version = "1.4.0"
version = "1.4.4"
description = "Solana account decoder"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -18,11 +18,11 @@ lazy_static = "1.4.0"
serde = "1.0.112"
serde_derive = "1.0.103"
serde_json = "1.0.56"
solana-config-program = { path = "../programs/config", version = "1.4.0" }
solana-sdk = { path = "../sdk", version = "1.4.0" }
solana-stake-program = { path = "../programs/stake", version = "1.4.0" }
solana-vote-program = { path = "../programs/vote", version = "1.4.0" }
spl-token-v2-0 = { package = "spl-token", version = "=2.0.6", features = ["skip-no-mangle"] }
solana-config-program = { path = "../programs/config", version = "1.4.4" }
solana-sdk = { path = "../sdk", version = "1.4.4" }
solana-stake-program = { path = "../programs/stake", version = "1.4.4" }
solana-vote-program = { path = "../programs/vote", version = "1.4.4" }
spl-token-v2-0 = { package = "spl-token", version = "=2.0.8" }
thiserror = "1.0"
[package.metadata.docs.rs]

View File

@@ -111,8 +111,8 @@ mod test {
#[test]
fn test_parse_account_data() {
let account_pubkey = Pubkey::new_rand();
let other_program = Pubkey::new_rand();
let account_pubkey = solana_sdk::pubkey::new_rand();
let other_program = solana_sdk::pubkey::new_rand();
let data = vec![0; 4];
assert!(parse_account_data(&account_pubkey, &other_program, &data, None).is_err());

View File

@@ -117,7 +117,7 @@ mod test {
}))
.unwrap(),
};
let info_pubkey = Pubkey::new_rand();
let info_pubkey = solana_sdk::pubkey::new_rand();
let validator_info_config_account = create_config_account(
vec![(validator_info::id(), false), (info_pubkey, true)],
&validator_info,

View File

@@ -134,7 +134,6 @@ impl From<Delegation> for UiDelegation {
mod test {
use super::*;
use bincode::serialize;
use solana_sdk::pubkey::Pubkey;
#[test]
fn test_parse_stake() {
@@ -145,8 +144,8 @@ mod test {
StakeAccountType::Uninitialized
);
let pubkey = Pubkey::new_rand();
let custodian = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
let custodian = solana_sdk::pubkey::new_rand();
let authorized = Authorized::auto(&pubkey);
let lockup = Lockup {
unix_timestamp: 0,
@@ -180,7 +179,7 @@ mod test {
})
);
let voter_pubkey = Pubkey::new_rand();
let voter_pubkey = solana_sdk::pubkey::new_rand();
let stake = Stake {
delegation: Delegation {
voter_pubkey,

View File

@@ -212,15 +212,14 @@ pub struct UiStakeHistoryEntry {
mod test {
use super::*;
use solana_sdk::{
fee_calculator::FeeCalculator,
hash::Hash,
sysvar::{recent_blockhashes::IterItem, Sysvar},
account::create_account, fee_calculator::FeeCalculator, hash::Hash,
sysvar::recent_blockhashes::IterItem,
};
use std::iter::FromIterator;
#[test]
fn test_parse_sysvars() {
let clock_sysvar = Clock::default().create_account(1);
let clock_sysvar = create_account(&Clock::default(), 1);
assert_eq!(
parse_sysvar(&clock_sysvar.data, &sysvar::clock::id()).unwrap(),
SysvarAccountType::Clock(UiClock::default()),
@@ -233,13 +232,13 @@ mod test {
first_normal_epoch: 1,
first_normal_slot: 12,
};
let epoch_schedule_sysvar = epoch_schedule.create_account(1);
let epoch_schedule_sysvar = create_account(&epoch_schedule, 1);
assert_eq!(
parse_sysvar(&epoch_schedule_sysvar.data, &sysvar::epoch_schedule::id()).unwrap(),
SysvarAccountType::EpochSchedule(epoch_schedule),
);
let fees_sysvar = Fees::default().create_account(1);
let fees_sysvar = create_account(&Fees::default(), 1);
assert_eq!(
parse_sysvar(&fees_sysvar.data, &sysvar::fees::id()).unwrap(),
SysvarAccountType::Fees(UiFees::default()),
@@ -251,7 +250,7 @@ mod test {
};
let recent_blockhashes =
RecentBlockhashes::from_iter(vec![IterItem(0, &hash, &fee_calculator)].into_iter());
let recent_blockhashes_sysvar = recent_blockhashes.create_account(1);
let recent_blockhashes_sysvar = create_account(&recent_blockhashes, 1);
assert_eq!(
parse_sysvar(
&recent_blockhashes_sysvar.data,
@@ -269,13 +268,13 @@ mod test {
exemption_threshold: 2.0,
burn_percent: 5,
};
let rent_sysvar = rent.create_account(1);
let rent_sysvar = create_account(&rent, 1);
assert_eq!(
parse_sysvar(&rent_sysvar.data, &sysvar::rent::id()).unwrap(),
SysvarAccountType::Rent(rent.into()),
);
let rewards_sysvar = Rewards::default().create_account(1);
let rewards_sysvar = create_account(&Rewards::default(), 1);
assert_eq!(
parse_sysvar(&rewards_sysvar.data, &sysvar::rewards::id()).unwrap(),
SysvarAccountType::Rewards(UiRewards::default()),
@@ -283,7 +282,7 @@ mod test {
let mut slot_hashes = SlotHashes::default();
slot_hashes.add(1, hash);
let slot_hashes_sysvar = slot_hashes.create_account(1);
let slot_hashes_sysvar = create_account(&slot_hashes, 1);
assert_eq!(
parse_sysvar(&slot_hashes_sysvar.data, &sysvar::slot_hashes::id()).unwrap(),
SysvarAccountType::SlotHashes(vec![UiSlotHashEntry {
@@ -294,7 +293,7 @@ mod test {
let mut slot_history = SlotHistory::default();
slot_history.add(42);
let slot_history_sysvar = slot_history.create_account(1);
let slot_history_sysvar = create_account(&slot_history, 1);
assert_eq!(
parse_sysvar(&slot_history_sysvar.data, &sysvar::slot_history::id()).unwrap(),
SysvarAccountType::SlotHistory(UiSlotHistory {
@@ -310,7 +309,7 @@ mod test {
deactivating: 3,
};
stake_history.add(1, stake_history_entry.clone());
let stake_history_sysvar = stake_history.create_account(1);
let stake_history_sysvar = create_account(&stake_history, 1);
assert_eq!(
parse_sysvar(&stake_history_sysvar.data, &sysvar::stake_history::id()).unwrap(),
SysvarAccountType::StakeHistory(vec![UiStakeHistoryEntry {
@@ -319,7 +318,7 @@ mod test {
}]),
);
let bad_pubkey = Pubkey::new_rand();
let bad_pubkey = solana_sdk::pubkey::new_rand();
assert!(parse_sysvar(&stake_history_sysvar.data, &bad_pubkey).is_err());
let bad_data = vec![0; 4];

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-accounts-bench"
version = "1.4.0"
version = "1.4.4"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -11,11 +11,11 @@ publish = false
[dependencies]
log = "0.4.6"
rayon = "1.4.0"
solana-logger = { path = "../logger", version = "1.4.0" }
solana-runtime = { path = "../runtime", version = "1.4.0" }
solana-measure = { path = "../measure", version = "1.4.0" }
solana-sdk = { path = "../sdk", version = "1.4.0" }
solana-version = { path = "../version", version = "1.4.0" }
solana-logger = { path = "../logger", version = "1.4.4" }
solana-runtime = { path = "../runtime", version = "1.4.4" }
solana-measure = { path = "../measure", version = "1.4.4" }
solana-sdk = { path = "../sdk", version = "1.4.4" }
solana-version = { path = "../version", version = "1.4.4" }
rand = "0.7.0"
clap = "2.33.1"
crossbeam-channel = "0.4"

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-banking-bench"
version = "1.4.0"
version = "1.4.4"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -14,16 +14,16 @@ crossbeam-channel = "0.4"
log = "0.4.6"
rand = "0.7.0"
rayon = "1.4.0"
solana-core = { path = "../core", version = "1.4.0" }
solana-clap-utils = { path = "../clap-utils", version = "1.4.0" }
solana-streamer = { path = "../streamer", version = "1.4.0" }
solana-perf = { path = "../perf", version = "1.4.0" }
solana-ledger = { path = "../ledger", version = "1.4.0" }
solana-logger = { path = "../logger", version = "1.4.0" }
solana-runtime = { path = "../runtime", version = "1.4.0" }
solana-measure = { path = "../measure", version = "1.4.0" }
solana-sdk = { path = "../sdk", version = "1.4.0" }
solana-version = { path = "../version", version = "1.4.0" }
solana-core = { path = "../core", version = "1.4.4" }
solana-clap-utils = { path = "../clap-utils", version = "1.4.4" }
solana-streamer = { path = "../streamer", version = "1.4.4" }
solana-perf = { path = "../perf", version = "1.4.4" }
solana-ledger = { path = "../ledger", version = "1.4.4" }
solana-logger = { path = "../logger", version = "1.4.4" }
solana-runtime = { path = "../runtime", version = "1.4.4" }
solana-measure = { path = "../measure", version = "1.4.4" }
solana-sdk = { path = "../sdk", version = "1.4.4" }
solana-version = { path = "../version", version = "1.4.4" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -20,7 +20,6 @@ use solana_perf::packet::to_packets_chunked;
use solana_runtime::{bank::Bank, bank_forks::BankForks};
use solana_sdk::{
hash::Hash,
pubkey::Pubkey,
signature::Keypair,
signature::Signature,
system_transaction,
@@ -69,7 +68,7 @@ fn make_accounts_txs(
hash: Hash,
same_payer: bool,
) -> Vec<Transaction> {
let to_pubkey = Pubkey::new_rand();
let to_pubkey = solana_sdk::pubkey::new_rand();
let payer_key = Keypair::new();
let dummy = system_transaction::transfer(&payer_key, &to_pubkey, 1, hash);
(0..total_num_transactions)
@@ -78,9 +77,9 @@ fn make_accounts_txs(
let mut new = dummy.clone();
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
if !same_payer {
new.message.account_keys[0] = Pubkey::new_rand();
new.message.account_keys[0] = solana_sdk::pubkey::new_rand();
}
new.message.account_keys[1] = Pubkey::new_rand();
new.message.account_keys[1] = solana_sdk::pubkey::new_rand();
new.signatures = vec![Signature::new(&sig[0..64])];
new
})
@@ -241,7 +240,7 @@ fn main() {
let base_tx_count = bank.transaction_count();
let mut txs_processed = 0;
let mut root = 1;
let collector = Pubkey::new_rand();
let collector = solana_sdk::pubkey::new_rand();
let config = Config {
packets_per_batch: packets_per_chunk,
chunk_len,

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-client"
version = "1.4.0"
version = "1.4.4"
description = "Solana banks client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,15 +12,15 @@ edition = "2018"
async-trait = "0.1.36"
bincode = "1.3.1"
futures = "0.3"
solana-banks-interface = { path = "../banks-interface", version = "1.4.0" }
solana-sdk = { path = "../sdk", version = "1.4.0" }
tarpc = { version = "0.22.0", features = ["full"] }
tokio = "0.2"
solana-banks-interface = { path = "../banks-interface", version = "1.4.4" }
solana-sdk = { path = "../sdk", version = "1.4.4" }
tarpc = { version = "0.23.0", features = ["full"] }
tokio = { version = "0.3", features = ["full"] }
tokio-serde = { version = "0.6", features = ["bincode"] }
[dev-dependencies]
solana-runtime = { path = "../runtime", version = "1.4.0" }
solana-banks-server = { path = "../banks-server", version = "1.4.0" }
solana-runtime = { path = "../runtime", version = "1.4.4" }
solana-banks-server = { path = "../banks-server", version = "1.4.4" }
[lib]
crate-type = ["lib"]

View File

@@ -213,10 +213,10 @@ mod tests {
use super::*;
use solana_banks_server::banks_server::start_local_server;
use solana_runtime::{bank::Bank, bank_forks::BankForks, genesis_utils::create_genesis_config};
use solana_sdk::{message::Message, pubkey::Pubkey, signature::Signer, system_instruction};
use solana_sdk::{message::Message, signature::Signer, system_instruction};
use std::sync::{Arc, RwLock};
use tarpc::transport;
use tokio::{runtime::Runtime, time::delay_for};
use tokio::{runtime::Runtime, time::sleep};
#[test]
fn test_banks_client_new() {
@@ -235,7 +235,7 @@ mod tests {
&genesis.genesis_config,
))));
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let mint_pubkey = genesis.mint_keypair.pubkey();
let instruction = system_instruction::transfer(&mint_pubkey, &bob_pubkey, 1);
let message = Message::new(&[instruction], Some(&mint_pubkey));
@@ -265,7 +265,7 @@ mod tests {
))));
let mint_pubkey = &genesis.mint_keypair.pubkey();
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let instruction = system_instruction::transfer(&mint_pubkey, &bob_pubkey, 1);
let message = Message::new(&[instruction], Some(&mint_pubkey));
@@ -285,7 +285,7 @@ mod tests {
if root_slot > last_valid_slot {
break;
}
delay_for(Duration::from_millis(100)).await;
sleep(Duration::from_millis(100)).await;
status = banks_client.get_transaction_status(signature).await?;
}
assert!(status.unwrap().err.is_none());

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-interface"
version = "1.4.0"
version = "1.4.4"
description = "Solana banks RPC interface"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -10,8 +10,8 @@ edition = "2018"
[dependencies]
serde = { version = "1.0.112", features = ["derive"] }
solana-sdk = { path = "../sdk", version = "1.4.0" }
tarpc = { version = "0.22.0", features = ["full"] }
solana-sdk = { path = "../sdk", version = "1.4.4" }
tarpc = { version = "0.23.0", features = ["full"] }
[lib]
crate-type = ["lib"]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-banks-server"
version = "1.4.0"
version = "1.4.4"
description = "Solana banks server"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,12 +12,12 @@ edition = "2018"
bincode = "1.3.1"
futures = "0.3"
log = "0.4.8"
solana-banks-interface = { path = "../banks-interface", version = "1.4.0" }
solana-runtime = { path = "../runtime", version = "1.4.0" }
solana-sdk = { path = "../sdk", version = "1.4.0" }
solana-metrics = { path = "../metrics", version = "1.4.0" }
tarpc = { version = "0.22.0", features = ["full"] }
tokio = "0.2"
solana-banks-interface = { path = "../banks-interface", version = "1.4.4" }
solana-runtime = { path = "../runtime", version = "1.4.4" }
solana-sdk = { path = "../sdk", version = "1.4.4" }
solana-metrics = { path = "../metrics", version = "1.4.4" }
tarpc = { version = "0.23.0", features = ["full"] }
tokio = { version = "0.3", features = ["full"] }
tokio-serde = { version = "0.6", features = ["bincode"] }
[lib]

View File

@@ -5,11 +5,7 @@ use futures::{
prelude::stream::{self, StreamExt},
};
use solana_banks_interface::{Banks, BanksRequest, BanksResponse, TransactionStatus};
use solana_runtime::{
bank::Bank,
bank_forks::BankForks,
commitment::{BlockCommitmentCache, CommitmentSlots},
};
use solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache};
use solana_sdk::{
account::Account,
clock::Slot,
@@ -21,7 +17,6 @@ use solana_sdk::{
transaction::{self, Transaction},
};
use std::{
collections::HashMap,
io,
net::{Ipv4Addr, SocketAddr},
sync::{
@@ -38,7 +33,7 @@ use tarpc::{
server::{self, Channel, Handler},
transport,
};
use tokio::time::delay_for;
use tokio::time::sleep;
use tokio_serde::formats::Bincode;
#[derive(Clone)]
@@ -84,11 +79,9 @@ impl BanksServer {
let (transaction_sender, transaction_receiver) = channel();
let bank = bank_forks.read().unwrap().working_bank();
let slot = bank.slot();
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::new(
HashMap::default(),
0,
CommitmentSlots::new_from_slot(slot),
)));
let block_commitment_cache = Arc::new(RwLock::new(
BlockCommitmentCache::new_for_tests_with_slots(slot, slot),
));
Builder::new()
.name("solana-bank-forks-client".to_string())
.spawn(move || Self::run(&bank, transaction_receiver))
@@ -109,18 +102,21 @@ impl BanksServer {
async fn poll_signature_status(
self,
signature: Signature,
signature: &Signature,
blockhash: &Hash,
last_valid_slot: Slot,
commitment: CommitmentLevel,
) -> Option<transaction::Result<()>> {
let mut status = self.bank(commitment).get_signature_status(&signature);
let mut status = self
.bank(commitment)
.get_signature_status_with_blockhash(signature, blockhash);
while status.is_none() {
delay_for(Duration::from_millis(200)).await;
sleep(Duration::from_millis(200)).await;
let bank = self.bank(commitment);
if bank.slot() > last_valid_slot {
break;
}
status = bank.get_signature_status(&signature);
status = bank.get_signature_status_with_blockhash(signature, blockhash);
}
status
}
@@ -193,13 +189,13 @@ impl Banks for BanksServer {
.read()
.unwrap()
.root_bank()
.get_blockhash_last_valid_slot(&blockhash)
.get_blockhash_last_valid_slot(blockhash)
.unwrap();
let signature = transaction.signatures.get(0).cloned().unwrap_or_default();
let info =
TransactionInfo::new(signature, serialize(&transaction).unwrap(), last_valid_slot);
self.transaction_sender.send(info).unwrap();
self.poll_signature_status(signature, last_valid_slot, commitment)
self.poll_signature_status(&signature, blockhash, last_valid_slot, commitment)
.await
}

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-bench-exchange"
version = "1.4.0"
version = "1.4.4"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -18,21 +18,21 @@ rand = "0.7.0"
rayon = "1.4.0"
serde_json = "1.0.56"
serde_yaml = "0.8.13"
solana-clap-utils = { path = "../clap-utils", version = "1.4.0" }
solana-core = { path = "../core", version = "1.4.0" }
solana-genesis = { path = "../genesis", version = "1.4.0" }
solana-client = { path = "../client", version = "1.4.0" }
solana-faucet = { path = "../faucet", version = "1.4.0" }
solana-exchange-program = { path = "../programs/exchange", version = "1.4.0" }
solana-logger = { path = "../logger", version = "1.4.0" }
solana-metrics = { path = "../metrics", version = "1.4.0" }
solana-net-utils = { path = "../net-utils", version = "1.4.0" }
solana-runtime = { path = "../runtime", version = "1.4.0" }
solana-sdk = { path = "../sdk", version = "1.4.0" }
solana-version = { path = "../version", version = "1.4.0" }
solana-clap-utils = { path = "../clap-utils", version = "1.4.4" }
solana-core = { path = "../core", version = "1.4.4" }
solana-genesis = { path = "../genesis", version = "1.4.4" }
solana-client = { path = "../client", version = "1.4.4" }
solana-faucet = { path = "../faucet", version = "1.4.4" }
solana-exchange-program = { path = "../programs/exchange", version = "1.4.4" }
solana-logger = { path = "../logger", version = "1.4.4" }
solana-metrics = { path = "../metrics", version = "1.4.4" }
solana-net-utils = { path = "../net-utils", version = "1.4.4" }
solana-runtime = { path = "../runtime", version = "1.4.4" }
solana-sdk = { path = "../sdk", version = "1.4.4" }
solana-version = { path = "../version", version = "1.4.4" }
[dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "1.4.0" }
solana-local-cluster = { path = "../local-cluster", version = "1.4.4" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -86,7 +86,7 @@ fn test_exchange_bank_client() {
solana_logger::setup();
let (genesis_config, identity) = create_genesis_config(100_000_000_000_000);
let mut bank = Bank::new(&genesis_config);
bank.add_builtin_program("exchange_program", id(), process_instruction);
bank.add_builtin("exchange_program", id(), process_instruction);
let clients = vec![BankClient::new(bank)];
let mut config = Config::default();

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-bench-streamer"
version = "1.4.0"
version = "1.4.4"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -10,11 +10,11 @@ publish = false
[dependencies]
clap = "2.33.1"
solana-clap-utils = { path = "../clap-utils", version = "1.4.0" }
solana-streamer = { path = "../streamer", version = "1.4.0" }
solana-logger = { path = "../logger", version = "1.4.0" }
solana-net-utils = { path = "../net-utils", version = "1.4.0" }
solana-version = { path = "../version", version = "1.4.0" }
solana-clap-utils = { path = "../clap-utils", version = "1.4.4" }
solana-streamer = { path = "../streamer", version = "1.4.4" }
solana-logger = { path = "../logger", version = "1.4.4" }
solana-net-utils = { path = "../net-utils", version = "1.4.4" }
solana-version = { path = "../version", version = "1.4.4" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-bench-tps"
version = "1.4.0"
version = "1.4.4"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -15,23 +15,23 @@ log = "0.4.8"
rayon = "1.4.0"
serde_json = "1.0.56"
serde_yaml = "0.8.13"
solana-clap-utils = { path = "../clap-utils", version = "1.4.0" }
solana-core = { path = "../core", version = "1.4.0" }
solana-genesis = { path = "../genesis", version = "1.4.0" }
solana-client = { path = "../client", version = "1.4.0" }
solana-faucet = { path = "../faucet", version = "1.4.0" }
solana-logger = { path = "../logger", version = "1.4.0" }
solana-metrics = { path = "../metrics", version = "1.4.0" }
solana-measure = { path = "../measure", version = "1.4.0" }
solana-net-utils = { path = "../net-utils", version = "1.4.0" }
solana-runtime = { path = "../runtime", version = "1.4.0" }
solana-sdk = { path = "../sdk", version = "1.4.0" }
solana-version = { path = "../version", version = "1.4.0" }
solana-clap-utils = { path = "../clap-utils", version = "1.4.4" }
solana-core = { path = "../core", version = "1.4.4" }
solana-genesis = { path = "../genesis", version = "1.4.4" }
solana-client = { path = "../client", version = "1.4.4" }
solana-faucet = { path = "../faucet", version = "1.4.4" }
solana-logger = { path = "../logger", version = "1.4.4" }
solana-metrics = { path = "../metrics", version = "1.4.4" }
solana-measure = { path = "../measure", version = "1.4.4" }
solana-net-utils = { path = "../net-utils", version = "1.4.4" }
solana-runtime = { path = "../runtime", version = "1.4.4" }
solana-sdk = { path = "../sdk", version = "1.4.4" }
solana-version = { path = "../version", version = "1.4.4" }
[dev-dependencies]
serial_test = "0.4.0"
serial_test_derive = "0.4.0"
solana-local-cluster = { path = "../local-cluster", version = "1.4.0" }
solana-local-cluster = { path = "../local-cluster", version = "1.4.4" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

31
cargo Executable file
View File

@@ -0,0 +1,31 @@
#!/usr/bin/env bash
# shellcheck source=ci/rust-version.sh
here=$(dirname "$0")
source "${here}"/ci/rust-version.sh all
toolchain=
case "$1" in
stable)
# shellcheck disable=SC2054 # rust_stable is sourced from rust-version.sh
toolchain="$rust_stable"
shift
;;
nightly)
# shellcheck disable=SC2054 # rust_nightly is sourced from rust-version.sh
toolchain="$rust_nightly"
shift
;;
+*)
toolchain="${1#+}"
shift
;;
*)
# shellcheck disable=SC2054 # rust_stable is sourced from rust-version.sh
toolchain="$rust_stable"
;;
esac
set -x
exec cargo "+${toolchain}" "${@}"

5
cargo-build-bpf Executable file
View File

@@ -0,0 +1,5 @@
#!/usr/bin/env bash
here=$(dirname "$0")
set -x
exec $here/cargo run --manifest-path $here/sdk/cargo-build-bpf/Cargo.toml -- --bpf-sdk $here/sdk/bpf "$@"

View File

@@ -175,6 +175,30 @@ EOF
"Stable-perf skipped as no relevant files were modified"
fi
# Downstream backwards compatibility
if affects \
.rs$ \
Cargo.lock$ \
Cargo.toml$ \
^ci/rust-version.sh \
^ci/test-stable-perf.sh \
^ci/test-stable.sh \
^ci/test-local-cluster.sh \
^core/build.rs \
^fetch-perf-libs.sh \
^programs/ \
^sdk/ \
^scripts/build-downstream-projects.sh \
; then
cat >> "$output_file" <<"EOF"
- command: "scripts/build-downstream-projects.sh"
name: "downstream-projects"
timeout_in_minutes: 30
EOF
else
annotate --style info \
"downstream-projects skipped as no relevant files were modified"
fi
# Benches...
if affects \
.rs$ \

View File

@@ -26,8 +26,9 @@ declare print_free_tree=(
':runtime/src/**.rs'
':sdk/bpf/rust/rust-utils/**.rs'
':sdk/**.rs'
':^sdk/src/program_option.rs'
':^sdk/src/program_stubs.rs'
':^sdk/cargo-build-bpf/**.rs'
':^sdk/program/src/program_option.rs'
':^sdk/program/src/program_stubs.rs'
':programs/**.rs'
':^**bin**.rs'
':^**bench**.rs'

View File

@@ -42,10 +42,10 @@ def get_packages():
sys.exit(1)
# Order dependencies
deleted_dependencies = []
sorted_dependency_graph = []
max_iterations = pow(len(dependency_graph),2)
while len(deleted_dependencies) < len(dependency_graph):
while dependency_graph:
deleted_packages = []
if max_iterations == 0:
# One day be more helpful and find the actual cycle for the user...
sys.exit('Error: Circular dependency suspected between these packages: \n {}\n'.format('\n '.join(dependency_graph.keys())))
@@ -53,13 +53,17 @@ def get_packages():
max_iterations -= 1
for package, dependencies in dependency_graph.items():
if package in deleted_packages:
continue
for dependency in dependencies:
if dependency in dependency_graph:
break
else:
deleted_dependencies.append(package)
deleted_packages.append(package)
sorted_dependency_graph.append((package, manifest_path[package]))
dependency_graph = {p: d for p, d in dependency_graph.items() if not p in deleted_packages }
return sorted_dependency_graph

View File

@@ -91,17 +91,15 @@ echo --- Creating release tarball
cp "${RELEASE_BASENAME}"/version.yml "${TARBALL_BASENAME}"-$TARGET.yml
)
# Metrics tarball is platform agnostic, only publish it from Linux
# Maybe tarballs are platform agnostic, only publish them from the Linux build
MAYBE_TARBALLS=
if [[ "$CI_OS_NAME" = linux ]]; then
metrics/create-metrics-tarball.sh
(
set -x
sdk/bpf/scripts/package.sh
[[ -f bpf-sdk.tar.bz2 ]]
)
MAYBE_TARBALLS="bpf-sdk.tar.bz2 solana-metrics.tar.bz2"
MAYBE_TARBALLS="bpf-sdk.tar.bz2"
fi
source ci/upload-ci-artifact.sh
@@ -126,7 +124,7 @@ for file in "${TARBALL_BASENAME}"-$TARGET.tar.bz2 "${TARBALL_BASENAME}"-$TARGET.
/usr/bin/s3cmd --acl-public put /solana/"$file" s3://release.solana.com/"$CHANNEL_OR_TAG"/"$file"
echo Published to:
$DRYRUN ci/format-url.sh http://release.solana.com/"$CHANNEL_OR_TAG"/"$file"
$DRYRUN ci/format-url.sh https://release.solana.com/"$CHANNEL_OR_TAG"/"$file"
)
if [[ -n $TAG ]]; then
@@ -149,4 +147,30 @@ for file in "${TARBALL_BASENAME}"-$TARGET.tar.bz2 "${TARBALL_BASENAME}"-$TARGET.
fi
done
# Create install wrapper for release.solana.com
if [[ -n $BUILDKITE ]]; then
cat > release.solana.com-install <<EOF
SOLANA_RELEASE=$CHANNEL_OR_TAG
SOLANA_INSTALL_INIT_ARGS=$CHANNEL_OR_TAG
SOLANA_DOWNLOAD_ROOT=http://release.solana.com
EOF
cat install/solana-install-init.sh >> release.solana.com-install
echo --- AWS S3 Store: "install"
(
set -x
$DRYRUN docker run \
--rm \
--env AWS_ACCESS_KEY_ID \
--env AWS_SECRET_ACCESS_KEY \
--volume "$PWD:/solana" \
eremite/aws-cli:2018.12.18 \
/usr/bin/s3cmd --acl-public put /solana/release.solana.com-install s3://release.solana.com/"$CHANNEL_OR_TAG"/install
echo Published to:
$DRYRUN ci/format-url.sh https://release.solana.com/"$CHANNEL_OR_TAG"/install
)
fi
echo --- ok

View File

@@ -41,7 +41,7 @@ if [[ $CI_BASE_BRANCH = "$EDGE_CHANNEL" ]]; then
echo "$0: [tree (for outdated Cargo.lock sync)|check (for compilation error)|update -p foo --precise x.y.z (for your Cargo.toml update)] ..." >&2
exit "$check_status"
fi
# Ensure nightly and --benches
_ scripts/cargo-for-all-lock-files.sh +"$rust_nightly" check --locked --all-targets
else
@@ -57,7 +57,23 @@ _ cargo +"$rust_nightly" clippy \
-Zunstable-options --workspace --all-targets \
-- --deny=warnings --allow=clippy::stable_sort_primitive
_ scripts/cargo-for-all-lock-files.sh +"$rust_stable" audit --ignore RUSTSEC-2020-0002 --ignore RUSTSEC-2020-0008
cargo_audit_ignores=(
# failure is officially deprecated/unmaintained
#
# Blocked on multiple upstream crates removing their `failure` dependency.
--ignore RUSTSEC-2020-0036
# `net2` crate has been deprecated; use `socket2` instead
#
# Blocked on https://github.com/paritytech/jsonrpc/issues/575
--ignore RUSTSEC-2020-0016
# stdweb is unmaintained
#
# Blocked on multiple upstream crates removing their `stdweb` dependency.
--ignore RUSTSEC-2020-0056
)
_ scripts/cargo-for-all-lock-files.sh +"$rust_stable" audit "${cargo_audit_ignores[@]}"
{
cd programs/bpf
@@ -68,7 +84,9 @@ _ scripts/cargo-for-all-lock-files.sh +"$rust_stable" audit --ignore RUSTSEC-202
cd "$project"
_ cargo +"$rust_stable" fmt -- --check
_ cargo +"$rust_nightly" test
_ cargo +"$rust_nightly" clippy -- --deny=warnings --allow=clippy::missing_safety_doc
_ cargo +"$rust_nightly" clippy -- --deny=warnings \
--allow=clippy::missing_safety_doc \
--allow=clippy::stable_sort_primitive
)
done
}

View File

@@ -40,6 +40,9 @@ test-stable)
_ cargo +"$rust_stable" test --jobs "$NPROC" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
;;
test-stable-perf)
# BPF solana-sdk legacy compile test
./cargo-build-bpf --manifest-path sdk/Cargo.toml
# BPF program tests
_ make -C programs/bpf/c tests
_ cargo +"$rust_stable" test \

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-clap-utils"
version = "1.4.0"
version = "1.4.4"
description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -11,8 +11,8 @@ edition = "2018"
[dependencies]
clap = "2.33.0"
rpassword = "4.0"
solana-remote-wallet = { path = "../remote-wallet", version = "1.4.0" }
solana-sdk = { path = "../sdk", version = "1.4.0" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.4.4" }
solana-sdk = { path = "../sdk", version = "1.4.4" }
thiserror = "1.0.20"
tiny-bip39 = "0.7.0"
url = "2.1.0"

View File

@@ -228,8 +228,8 @@ mod tests {
assert_eq!(values_of(&matches, "multiple"), Some(vec![50, 39]));
assert_eq!(values_of::<u64>(&matches, "single"), None);
let pubkey0 = Pubkey::new_rand();
let pubkey1 = Pubkey::new_rand();
let pubkey0 = solana_sdk::pubkey::new_rand();
let pubkey1 = solana_sdk::pubkey::new_rand();
let matches = app().clone().get_matches_from(vec![
"test",
"--multiple",
@@ -251,7 +251,7 @@ mod tests {
assert_eq!(value_of(&matches, "single"), Some(50));
assert_eq!(value_of::<u64>(&matches, "multiple"), None);
let pubkey = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
let matches = app()
.clone()
.get_matches_from(vec!["test", "--single", &pubkey.to_string()]);
@@ -331,8 +331,8 @@ mod tests {
#[test]
fn test_pubkeys_sigs_of() {
let key1 = Pubkey::new_rand();
let key2 = Pubkey::new_rand();
let key1 = solana_sdk::pubkey::new_rand();
let key2 = solana_sdk::pubkey::new_rand();
let sig1 = Keypair::new().sign_message(&[0u8]);
let sig2 = Keypair::new().sign_message(&[1u8]);
let signer1 = format!("{}={}", key1, sig1);

View File

@@ -298,7 +298,24 @@ pub fn keypair_from_seed_phrase(
keypair_from_seed_phrase_and_passphrase(&seed_phrase, &passphrase)?
} else {
let sanitized = sanitize_seed_phrase(seed_phrase);
let mnemonic = Mnemonic::from_phrase(&sanitized, Language::English)?;
let parse_language_fn = || {
for language in &[
Language::English,
Language::ChineseSimplified,
Language::ChineseTraditional,
Language::Japanese,
Language::Spanish,
Language::Korean,
Language::French,
Language::Italian,
] {
if let Ok(mnemonic) = Mnemonic::from_phrase(&sanitized, *language) {
return Ok(mnemonic);
}
}
Err("Can't get mnemonic from seed phrases")
};
let mnemonic = parse_language_fn()?;
let passphrase = prompt_passphrase(&passphrase_prompt)?;
let seed = Seed::new(&mnemonic, &passphrase);
keypair_from_seed(seed.as_bytes())?

View File

@@ -3,13 +3,13 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-cli-config"
description = "Blockchain, Rebuilt for Scale"
version = "1.4.0"
version = "1.4.4"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
dirs = "2.0.2"
dirs-next = "2.0.0"
lazy_static = "1.4.0"
serde = "1.0.112"
serde_derive = "1.0.103"

View File

@@ -5,7 +5,7 @@ use url::Url;
lazy_static! {
pub static ref CONFIG_FILE: Option<String> = {
dirs::home_dir().map(|mut path| {
dirs_next::home_dir().map(|mut path| {
path.extend(&[".config", "solana", "cli", "config.yml"]);
path.to_str().unwrap().to_string()
})
@@ -25,7 +25,7 @@ pub struct Config {
impl Default for Config {
fn default() -> Self {
let keypair_path = {
let mut keypair_path = dirs::home_dir().expect("home directory");
let mut keypair_path = dirs_next::home_dir().expect("home directory");
keypair_path.extend(&[".config", "solana", "id.json"]);
keypair_path.to_str().unwrap().to_string()
};

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-cli-output"
description = "Blockchain, Rebuilt for Scale"
version = "1.4.0"
version = "1.4.4"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -17,13 +17,13 @@ indicatif = "0.15.0"
serde = "1.0.112"
serde_derive = "1.0.103"
serde_json = "1.0.56"
solana-account-decoder = { path = "../account-decoder", version = "1.4.0" }
solana-clap-utils = { path = "../clap-utils", version = "1.4.0" }
solana-client = { path = "../client", version = "1.4.0" }
solana-sdk = { path = "../sdk", version = "1.4.0" }
solana-stake-program = { path = "../programs/stake", version = "1.4.0" }
solana-transaction-status = { path = "../transaction-status", version = "1.4.0" }
solana-vote-program = { path = "../programs/vote", version = "1.4.0" }
solana-account-decoder = { path = "../account-decoder", version = "1.4.4" }
solana-clap-utils = { path = "../clap-utils", version = "1.4.4" }
solana-client = { path = "../client", version = "1.4.4" }
solana-sdk = { path = "../sdk", version = "1.4.4" }
solana-stake-program = { path = "../programs/stake", version = "1.4.4" }
solana-transaction-status = { path = "../transaction-status", version = "1.4.4" }
solana-vote-program = { path = "../programs/vote", version = "1.4.4" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -132,7 +132,7 @@ impl fmt::Display for CliBlockProduction {
"{}",
style(format!(
" {:<44} {:>15} {:>15} {:>15} {:>23}",
"Identity Pubkey",
"Identity",
"Leader Slots",
"Blocks Produced",
"Skipped Slots",
@@ -301,7 +301,7 @@ pub struct CliValidatorsStakeByVersion {
pub struct CliValidators {
pub total_active_stake: u64,
pub total_current_stake: u64,
pub total_deliquent_stake: u64,
pub total_delinquent_stake: u64,
pub current_validators: Vec<CliValidator>,
pub delinquent_validators: Vec<CliValidator>,
pub stake_by_version: BTreeMap<String, CliValidatorsStakeByVersion>,
@@ -360,7 +360,7 @@ impl fmt::Display for CliValidators {
"Active Stake:",
&build_balance_message(self.total_active_stake, self.use_lamports_unit, true),
)?;
if self.total_deliquent_stake > 0 {
if self.total_delinquent_stake > 0 {
writeln_name_value(
f,
"Current Stake:",
@@ -376,11 +376,11 @@ impl fmt::Display for CliValidators {
&format!(
"{} ({:0.2}%)",
&build_balance_message(
self.total_deliquent_stake,
self.total_delinquent_stake,
self.use_lamports_unit,
true
),
100. * self.total_deliquent_stake as f64 / self.total_active_stake as f64
100. * self.total_delinquent_stake as f64 / self.total_active_stake as f64
),
)?;
}
@@ -412,8 +412,8 @@ impl fmt::Display for CliValidators {
"{}",
style(format!(
" {:<44} {:<38} {} {} {} {:>10} {:^8} {}",
"Identity Pubkey",
"Vote Account Pubkey",
"Identity",
"Vote Account",
"Commission",
"Last Vote",
"Root Block",
@@ -661,13 +661,8 @@ impl fmt::Display for CliStakeState {
if lockup.unix_timestamp != UnixTimestamp::default() {
writeln!(
f,
"Lockup Timestamp: {} (UnixTimestamp: {})",
DateTime::<Utc>::from_utc(
NaiveDateTime::from_timestamp(lockup.unix_timestamp, 0),
Utc
)
.to_rfc3339_opts(SecondsFormat::Secs, true),
lockup.unix_timestamp
"Lockup Timestamp: {}",
unix_timestamp_to_string(lockup.unix_timestamp)
)?;
}
if lockup.epoch != Epoch::default() {
@@ -952,8 +947,8 @@ impl VerboseDisplay for CliValidatorInfo {}
impl fmt::Display for CliValidatorInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln_name_value(f, "Validator Identity Pubkey:", &self.identity_pubkey)?;
writeln_name_value(f, " Info Pubkey:", &self.info_pubkey)?;
writeln_name_value(f, "Validator Identity:", &self.identity_pubkey)?;
writeln_name_value(f, " Info Address:", &self.info_pubkey)?;
for (key, value) in self.info.iter() {
writeln_name_value(
f,
@@ -1008,7 +1003,12 @@ impl fmt::Display for CliVoteAccount {
None => "~".to_string(),
}
)?;
writeln!(f, "Recent Timestamp: {:?}", self.recent_timestamp)?;
writeln!(
f,
"Recent Timestamp: {} from slot {}",
unix_timestamp_to_string(self.recent_timestamp.timestamp),
self.recent_timestamp.slot
)?;
if !self.votes.is_empty() {
writeln!(f, "Recent Votes:")?;
for vote in &self.votes {
@@ -1093,19 +1093,22 @@ pub struct CliBlockTime {
impl QuietDisplay for CliBlockTime {}
impl VerboseDisplay for CliBlockTime {}
fn unix_timestamp_to_string(unix_timestamp: UnixTimestamp) -> String {
format!(
"{} (UnixTimestamp: {})",
match NaiveDateTime::from_timestamp_opt(unix_timestamp, 0) {
Some(ndt) =>
DateTime::<Utc>::from_utc(ndt, Utc).to_rfc3339_opts(SecondsFormat::Secs, true),
None => "unknown".to_string(),
},
unix_timestamp,
)
}
impl fmt::Display for CliBlockTime {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln_name_value(f, "Block:", &self.slot.to_string())?;
writeln_name_value(
f,
"Date:",
&format!(
"{} (UnixTimestamp: {})",
DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(self.timestamp, 0), Utc)
.to_rfc3339_opts(SecondsFormat::Secs, true),
self.timestamp
),
)
writeln_name_value(f, "Date:", &unix_timestamp_to_string(self.timestamp))
}
}

View File

@@ -197,6 +197,15 @@ pub fn write_transaction<W: io::Write>(
)?;
}
}
if let Some(log_messages) = &transaction_status.log_messages {
if !log_messages.is_empty() {
writeln!(w, "{}Log Messages:", prefix,)?;
for log_message in log_messages {
writeln!(w, "{} {}", prefix, log_message,)?;
}
}
}
} else {
writeln!(w, "{}Status: Unavailable", prefix)?;
}

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018"
name = "solana-cli"
description = "Blockchain, Rebuilt for Scale"
version = "1.4.0"
version = "1.4.4"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -16,39 +16,41 @@ clap = "2.33.1"
criterion-stats = "0.3.0"
ctrlc = { version = "3.1.5", features = ["termination"] }
console = "0.11.3"
dirs = "2.0.2"
dirs-next = "2.0.0"
log = "0.4.8"
Inflector = "0.11.4"
indicatif = "0.15.0"
humantime = "2.0.1"
num-traits = "0.2"
pretty-hex = "0.1.1"
reqwest = { version = "0.10.6", default-features = false, features = ["blocking", "rustls-tls", "json"] }
reqwest = { version = "0.10.8", default-features = false, features = ["blocking", "rustls-tls", "json"] }
serde = "1.0.112"
serde_derive = "1.0.103"
serde_json = "1.0.56"
solana-account-decoder = { path = "../account-decoder", version = "1.4.0" }
solana-clap-utils = { path = "../clap-utils", version = "1.4.0" }
solana-cli-config = { path = "../cli-config", version = "1.4.0" }
solana-cli-output = { path = "../cli-output", version = "1.4.0" }
solana-client = { path = "../client", version = "1.4.0" }
solana-config-program = { path = "../programs/config", version = "1.4.0" }
solana-faucet = { path = "../faucet", version = "1.4.0" }
solana-logger = { path = "../logger", version = "1.4.0" }
solana-net-utils = { path = "../net-utils", version = "1.4.0" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.4.0" }
solana-runtime = { path = "../runtime", version = "1.4.0" }
solana-sdk = { path = "../sdk", version = "1.4.0" }
solana-stake-program = { path = "../programs/stake", version = "1.4.0" }
solana-transaction-status = { path = "../transaction-status", version = "1.4.0" }
solana-version = { path = "../version", version = "1.4.0" }
solana-vote-program = { path = "../programs/vote", version = "1.4.0" }
solana-vote-signer = { path = "../vote-signer", version = "1.4.0" }
solana-account-decoder = { path = "../account-decoder", version = "1.4.4" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.4.4" }
solana-clap-utils = { path = "../clap-utils", version = "1.4.4" }
solana-cli-config = { path = "../cli-config", version = "1.4.4" }
solana-cli-output = { path = "../cli-output", version = "1.4.4" }
solana-client = { path = "../client", version = "1.4.4" }
solana-config-program = { path = "../programs/config", version = "1.4.4" }
solana-faucet = { path = "../faucet", version = "1.4.4" }
solana-logger = { path = "../logger", version = "1.4.4" }
solana-net-utils = { path = "../net-utils", version = "1.4.4" }
solana_rbpf = "=0.1.32"
solana-remote-wallet = { path = "../remote-wallet", version = "1.4.4" }
solana-sdk = { path = "../sdk", version = "1.4.4" }
solana-stake-program = { path = "../programs/stake", version = "1.4.4" }
solana-transaction-status = { path = "../transaction-status", version = "1.4.4" }
solana-version = { path = "../version", version = "1.4.4" }
solana-vote-program = { path = "../programs/vote", version = "1.4.4" }
solana-vote-signer = { path = "../vote-signer", version = "1.4.4" }
thiserror = "1.0.20"
tiny-bip39 = "0.7.0"
url = "2.1.1"
[dev-dependencies]
solana-core = { path = "../core", version = "1.4.0" }
solana-core = { path = "../core", version = "1.4.4" }
tempfile = "3.1.0"
[[bin]]

View File

@@ -54,12 +54,42 @@ pub fn check_account_for_multiple_fees_with_commitment(
fee_calculator: &FeeCalculator,
messages: &[&Message],
commitment: CommitmentConfig,
) -> Result<(), CliError> {
check_account_for_spend_multiple_fees_with_commitment(
rpc_client,
account_pubkey,
0,
fee_calculator,
messages,
commitment,
)
}
pub fn check_account_for_spend_multiple_fees_with_commitment(
rpc_client: &RpcClient,
account_pubkey: &Pubkey,
balance: u64,
fee_calculator: &FeeCalculator,
messages: &[&Message],
commitment: CommitmentConfig,
) -> Result<(), CliError> {
let fee = calculate_fee(fee_calculator, messages);
if !check_account_for_balance_with_commitment(rpc_client, account_pubkey, fee, commitment)
.map_err(Into::<ClientError>::into)?
if !check_account_for_balance_with_commitment(
rpc_client,
account_pubkey,
balance + fee,
commitment,
)
.map_err(Into::<ClientError>::into)?
{
return Err(CliError::InsufficientFundsForFee(lamports_to_sol(fee)));
if balance > 0 {
return Err(CliError::InsufficientFundsForSpendAndFee(
lamports_to_sol(balance),
lamports_to_sol(fee),
));
} else {
return Err(CliError::InsufficientFundsForFee(lamports_to_sol(fee)));
}
}
Ok(())
}
@@ -131,7 +161,7 @@ mod tests {
context: RpcResponseContext { slot: 1 },
value: json!(account_balance),
});
let pubkey = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
let fee_calculator = FeeCalculator::new(1);
let pubkey0 = Pubkey::new(&[0; 32]);
@@ -191,7 +221,7 @@ mod tests {
context: RpcResponseContext { slot: 1 },
value: json!(account_balance),
});
let pubkey = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
let mut mocks = HashMap::new();
mocks.insert(RpcRequest::GetBalance, account_balance_response);
@@ -237,9 +267,9 @@ mod tests {
#[test]
fn test_check_unique_pubkeys() {
let pubkey0 = Pubkey::new_rand();
let pubkey0 = solana_sdk::pubkey::new_rand();
let pubkey_clone = pubkey0;
let pubkey1 = Pubkey::new_rand();
let pubkey1 = solana_sdk::pubkey::new_rand();
check_unique_pubkeys((&pubkey0, "foo".to_string()), (&pubkey1, "bar".to_string()))
.expect("unexpected result");

View File

@@ -1,12 +1,15 @@
use crate::{
checks::*, cluster_query::*, feature::*, inflation::*, nonce::*, spend_utils::*, stake::*,
validator_info::*, vote::*,
checks::*, cluster_query::*, feature::*, inflation::*, nonce::*, send_tpu::*, spend_utils::*,
stake::*, validator_info::*, vote::*,
};
use bincode::serialize;
use bip39::{Language, Mnemonic, MnemonicType, Seed};
use clap::{value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand};
use log::*;
use num_traits::FromPrimitive;
use serde_json::{self, json, Value};
use solana_account_decoder::{UiAccount, UiAccountEncoding};
use solana_bpf_loader_program::bpf_verifier;
use solana_clap_utils::{
self,
commitment::commitment_arg_with_default,
@@ -30,24 +33,26 @@ use solana_client::{
rpc_client::RpcClient,
rpc_config::{RpcLargestAccountsFilter, RpcSendTransactionConfig},
rpc_request::MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS,
rpc_response::RpcKeyedAccount,
rpc_response::{RpcKeyedAccount, RpcLeaderSchedule},
};
#[cfg(not(test))]
use solana_faucet::faucet::request_airdrop_transaction;
#[cfg(test)]
use solana_faucet::faucet_mock::request_airdrop_transaction;
use solana_rbpf::vm::EbpfVm;
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_sdk::{
bpf_loader, bpf_loader_deprecated,
clock::{Epoch, Slot, DEFAULT_TICKS_PER_SECOND},
clock::{Epoch, Slot},
commitment_config::CommitmentConfig,
decode_error::DecodeError,
hash::Hash,
instruction::{Instruction, InstructionError},
loader_instruction,
message::Message,
native_token::Sol,
pubkey::{Pubkey, MAX_SEED_LEN},
signature::{Keypair, Signature, Signer, SignerError},
signature::{keypair_from_seed, Keypair, Signature, Signer, SignerError},
signers::Signers,
system_instruction::{self, SystemError},
system_program,
@@ -60,12 +65,13 @@ use solana_stake_program::{
use solana_transaction_status::{EncodedTransaction, UiTransactionEncoding};
use solana_vote_program::vote_state::VoteAuthorize;
use std::{
cmp::min,
collections::HashMap,
error,
fmt::Write as FmtWrite,
fs::File,
io::{Read, Write},
net::{IpAddr, SocketAddr},
net::{IpAddr, SocketAddr, UdpSocket},
str::FromStr,
sync::Arc,
thread::sleep,
@@ -98,7 +104,7 @@ pub enum CliCommand {
Fees,
FirstAvailableBlock,
GetBlock {
slot: Slot,
slot: Option<Slot>,
},
GetBlockTime {
slot: Option<Slot>,
@@ -175,6 +181,7 @@ pub enum CliCommand {
program_location: String,
address: Option<SignerIndex>,
use_deprecated_loader: bool,
allow_excessive_balance: bool,
},
// Stake Commands
CreateStakeAccount {
@@ -606,13 +613,13 @@ pub fn parse_command(
signers.push(signer);
1
});
let use_deprecated_loader = matches.is_present("use_deprecated_loader");
Ok(CliCommandInfo {
command: CliCommand::Deploy {
program_location: matches.value_of("program_location").unwrap().to_string(),
address,
use_deprecated_loader,
use_deprecated_loader: matches.is_present("use_deprecated_loader"),
allow_excessive_balance: matches.is_present("allow_excessive_balance"),
},
signers,
})
@@ -1026,33 +1033,50 @@ fn send_and_confirm_transactions_with_spinner<T: Signers>(
) -> Result<(), Box<dyn error::Error>> {
let progress_bar = new_spinner_progress_bar();
let mut send_retries = 5;
let mut leader_schedule: Option<RpcLeaderSchedule> = None;
let mut leader_schedule_epoch = 0;
let send_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let cluster_nodes = rpc_client.get_cluster_nodes().ok();
loop {
let mut status_retries = 15;
progress_bar.set_message("Finding leader node...");
let epoch_info = rpc_client.get_epoch_info_with_commitment(commitment)?;
if epoch_info.epoch > leader_schedule_epoch || leader_schedule.is_none() {
leader_schedule = rpc_client
.get_leader_schedule_with_commitment(Some(epoch_info.absolute_slot), commitment)?;
leader_schedule_epoch = epoch_info.epoch;
}
let tpu_address = get_leader_tpu(
min(epoch_info.slot_index + 1, epoch_info.slots_in_epoch),
leader_schedule.as_ref(),
cluster_nodes.as_ref(),
);
// Send all transactions
let mut pending_transactions = HashMap::new();
let num_transactions = transactions.len();
for transaction in transactions {
if cfg!(not(test)) {
// Delay ~1 tick between write transactions in an attempt to reduce AccountInUse errors
// when all the write transactions modify the same program account (eg, deploying a
// new program)
sleep(Duration::from_millis(1000 / DEFAULT_TICKS_PER_SECOND));
if let Some(tpu_address) = tpu_address {
let wire_transaction =
serialize(&transaction).expect("serialization should succeed");
send_transaction_tpu(&send_socket, &tpu_address, &wire_transaction);
} else {
let _result = rpc_client
.send_transaction_with_config(
&transaction,
RpcSendTransactionConfig {
preflight_commitment: Some(commitment.commitment),
..RpcSendTransactionConfig::default()
},
)
.ok();
}
let _result = rpc_client
.send_transaction_with_config(
&transaction,
RpcSendTransactionConfig {
preflight_commitment: Some(commitment.commitment),
..RpcSendTransactionConfig::default()
},
)
.ok();
pending_transactions.insert(transaction.signatures[0], transaction);
progress_bar.set_message(&format!(
"[{}/{}] Transactions sent",
"[{}/{}] Total Transactions sent",
pending_transactions.len(),
num_transactions
));
@@ -1088,6 +1112,11 @@ fn send_and_confirm_transactions_with_spinner<T: Signers>(
let _ = pending_transactions.remove(&signature);
}
}
progress_bar.set_message(&format!(
"[{}/{}] Transactions confirmed",
num_transactions - pending_transactions.len(),
num_transactions
));
}
if pending_transactions.is_empty() {
@@ -1129,8 +1158,52 @@ fn process_deploy(
program_location: &str,
address: Option<SignerIndex>,
use_deprecated_loader: bool,
allow_excessive_balance: bool,
) -> ProcessResult {
const WORDS: usize = 12;
// Create ephemeral keypair to use for program address, if not provided
let mnemonic = Mnemonic::new(MnemonicType::for_word_count(WORDS)?, Language::English);
let seed = Seed::new(&mnemonic, "");
let new_keypair = keypair_from_seed(seed.as_bytes())?;
let result = do_process_deploy(
rpc_client,
config,
program_location,
address,
use_deprecated_loader,
allow_excessive_balance,
new_keypair,
);
if result.is_err() && address.is_none() {
let phrase: &str = mnemonic.phrase();
let divider = String::from_utf8(vec![b'='; phrase.len()]).unwrap();
eprintln!(
"{}\nTo reuse this address, recover the ephemeral keypair file with",
divider
);
eprintln!(
"`solana-keygen recover` and the following {}-word seed phrase,",
WORDS
);
eprintln!(
"then pass it as the [PROGRAM_ADDRESS_SIGNER] argument to `solana deploy ...`\n{}\n{}\n{}",
divider, phrase, divider
);
}
result
}
fn do_process_deploy(
rpc_client: &RpcClient,
config: &CliConfig,
program_location: &str,
address: Option<SignerIndex>,
use_deprecated_loader: bool,
allow_excessive_balance: bool,
new_keypair: Keypair,
) -> ProcessResult {
let new_keypair = Keypair::new(); // Create ephemeral keypair to use for program address, if not provided
let program_id = if let Some(i) = address {
config.signers[i]
} else {
@@ -1144,6 +1217,9 @@ fn process_deploy(
CliError::DynamicProgramError(format!("Unable to read program file: {}", err))
})?;
EbpfVm::create_executable_from_elf(&program_data, Some(|x| bpf_verifier::check(x, true)))
.map_err(|err| CliError::DynamicProgramError(format!("ELF error: {}", err)))?;
let loader_id = if use_deprecated_loader {
bpf_loader_deprecated::id()
} else {
@@ -1154,11 +1230,12 @@ fn process_deploy(
let signers = [config.signers[0], program_id];
// Check program account to see if partial initialization has occurred
let initial_instructions = if let Some(account) = rpc_client
let (initial_instructions, balance_needed) = if let Some(account) = rpc_client
.get_account_with_commitment(&program_id.pubkey(), config.commitment)?
.value
{
let mut instructions: Vec<Instruction> = vec![];
let mut balance_needed = 0;
if account.executable {
return Err(CliError::DynamicProgramError(
"Program account is already executable".to_string(),
@@ -1182,21 +1259,35 @@ fn process_deploy(
}
}
if account.lamports < minimum_balance {
let balance = minimum_balance - account.lamports;
instructions.push(system_instruction::transfer(
&config.signers[0].pubkey(),
&program_id.pubkey(),
minimum_balance - account.lamports,
balance,
));
balance_needed = balance;
} else if account.lamports > minimum_balance
&& system_program::check_id(&account.owner)
&& !allow_excessive_balance
{
return Err(CliError::DynamicProgramError(format!(
"Program account has a balance: {:?}; it may already be in use",
Sol(account.lamports)
))
.into());
}
instructions
(instructions, balance_needed)
} else {
vec![system_instruction::create_account(
&config.signers[0].pubkey(),
&program_id.pubkey(),
(
vec![system_instruction::create_account(
&config.signers[0].pubkey(),
&program_id.pubkey(),
minimum_balance,
program_data.len() as u64,
&loader_id,
)],
minimum_balance,
program_data.len() as u64,
&loader_id,
)]
)
};
let initial_message = if !initial_instructions.is_empty() {
Some(Message::new(
@@ -1239,9 +1330,10 @@ fn process_deploy(
.get_recent_blockhash_with_commitment(config.commitment)?
.value;
check_account_for_multiple_fees_with_commitment(
check_account_for_spend_multiple_fees_with_commitment(
rpc_client,
&config.signers[0].pubkey(),
balance_needed,
&fee_calculator,
&messages,
config.commitment,
@@ -1266,8 +1358,8 @@ fn process_deploy(
config.commitment,
config.send_transaction_config,
);
log_instruction_custom_error::<SystemError>(result, &config).map_err(|_| {
CliError::DynamicProgramError("Program account allocation failed".to_string())
log_instruction_custom_error::<SystemError>(result, &config).map_err(|err| {
CliError::DynamicProgramError(format!("Program account allocation failed: {}", err))
})?;
}
@@ -1290,8 +1382,8 @@ fn process_deploy(
config.commitment,
last_valid_slot,
)
.map_err(|_| {
CliError::DynamicProgramError("Data writes to program account failed".to_string())
.map_err(|err| {
CliError::DynamicProgramError(format!("Data writes to program account failed: {}", err))
})?;
let (blockhash, _, _) = rpc_client
@@ -1565,12 +1657,14 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
program_location,
address,
use_deprecated_loader,
allow_excessive_balance,
} => process_deploy(
&rpc_client,
config,
program_location,
*address,
*use_deprecated_loader,
*allow_excessive_balance,
),
// Stake Commands
@@ -2187,7 +2281,7 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
.arg(
Arg::with_name("address_signer")
.index(2)
.value_name("SIGNER_KEYPAIR")
.value_name("PROGRAM_ADDRESS_SIGNER")
.takes_value(true)
.validator(is_valid_signer)
.help("The signer for the desired address of the program [default: new random address]")
@@ -2199,6 +2293,12 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
.hidden(true) // Don't document this argument to discourage its use
.help("Use the deprecated BPF loader")
)
.arg(
Arg::with_name("allow_excessive_balance")
.long("allow-excessive-deploy-account-balance")
.takes_value(false)
.help("Use the designated program id, even if the account already holds a large balance of SOL")
)
.arg(commitment_arg_with_default("max")),
)
.subcommand(
@@ -2355,7 +2455,10 @@ mod tests {
.unwrap();
assert_eq!(signer_info.signers.len(), 1);
assert_eq!(signer_info.index_of(None), Some(0));
assert_eq!(signer_info.index_of(Some(Pubkey::new_rand())), None);
assert_eq!(
signer_info.index_of(Some(solana_sdk::pubkey::new_rand())),
None
);
let keypair0 = keypair_from_seed(&[1u8; 32]).unwrap();
let keypair0_pubkey = keypair0.pubkey();
@@ -2411,7 +2514,7 @@ mod tests {
fn test_cli_parse_command() {
let test_commands = app("test", "desc", "version");
let pubkey = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
let pubkey_string = format!("{}", pubkey);
let default_keypair = Keypair::new();
@@ -2507,7 +2610,7 @@ mod tests {
assert!(parse_command(&test_bad_signature, &default_signer, &mut None).is_err());
// Test CreateAddressWithSeed
let from_pubkey = Some(Pubkey::new_rand());
let from_pubkey = Some(solana_sdk::pubkey::new_rand());
let from_str = from_pubkey.unwrap().to_string();
for (name, program_id) in &[
("STAKE", solana_stake_program::id()),
@@ -2564,6 +2667,7 @@ mod tests {
program_location: "/Users/test/program.o".to_string(),
address: None,
use_deprecated_loader: false,
allow_excessive_balance: false,
},
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
}
@@ -2585,6 +2689,7 @@ mod tests {
program_location: "/Users/test/program.o".to_string(),
address: Some(1),
use_deprecated_loader: false,
allow_excessive_balance: false,
},
signers: vec![
read_keypair_file(&keypair_file).unwrap().into(),
@@ -2664,7 +2769,7 @@ mod tests {
let result = process_command(&config);
assert!(result.is_ok());
let new_authorized_pubkey = Pubkey::new_rand();
let new_authorized_pubkey = solana_sdk::pubkey::new_rand();
config.signers = vec![&bob_keypair];
config.command = CliCommand::VoteAuthorize {
vote_account_pubkey: bob_pubkey,
@@ -2686,7 +2791,7 @@ mod tests {
let bob_keypair = Keypair::new();
let bob_pubkey = bob_keypair.pubkey();
let custodian = Pubkey::new_rand();
let custodian = solana_sdk::pubkey::new_rand();
config.command = CliCommand::CreateStakeAccount {
stake_account: 1,
seed: None,
@@ -2709,8 +2814,8 @@ mod tests {
let result = process_command(&config);
assert!(result.is_ok());
let stake_account_pubkey = Pubkey::new_rand();
let to_pubkey = Pubkey::new_rand();
let stake_account_pubkey = solana_sdk::pubkey::new_rand();
let to_pubkey = solana_sdk::pubkey::new_rand();
config.command = CliCommand::WithdrawStake {
stake_account_pubkey,
destination_account_pubkey: to_pubkey,
@@ -2727,7 +2832,7 @@ mod tests {
let result = process_command(&config);
assert!(result.is_ok());
let stake_account_pubkey = Pubkey::new_rand();
let stake_account_pubkey = solana_sdk::pubkey::new_rand();
config.command = CliCommand::DeactivateStake {
stake_account_pubkey,
stake_authority: 0,
@@ -2740,7 +2845,7 @@ mod tests {
let result = process_command(&config);
assert!(result.is_ok());
let stake_account_pubkey = Pubkey::new_rand();
let stake_account_pubkey = solana_sdk::pubkey::new_rand();
let split_stake_account = Keypair::new();
config.command = CliCommand::SplitStake {
stake_account_pubkey,
@@ -2758,8 +2863,8 @@ mod tests {
let result = process_command(&config);
assert!(result.is_ok());
let stake_account_pubkey = Pubkey::new_rand();
let source_stake_account_pubkey = Pubkey::new_rand();
let stake_account_pubkey = solana_sdk::pubkey::new_rand();
let source_stake_account_pubkey = solana_sdk::pubkey::new_rand();
let merge_stake_account = Keypair::new();
config.command = CliCommand::MergeStake {
stake_account_pubkey,
@@ -2782,7 +2887,7 @@ mod tests {
assert_eq!(process_command(&config).unwrap(), "1234");
// CreateAddressWithSeed
let from_pubkey = Pubkey::new_rand();
let from_pubkey = solana_sdk::pubkey::new_rand();
config.signers = vec![];
config.command = CliCommand::CreateAddressWithSeed {
from_pubkey: Some(from_pubkey),
@@ -2795,7 +2900,7 @@ mod tests {
assert_eq!(address.unwrap(), expected_address.to_string());
// Need airdrop cases
let to = Pubkey::new_rand();
let to = solana_sdk::pubkey::new_rand();
config.signers = vec![&keypair];
config.command = CliCommand::Airdrop {
faucet_host: None,
@@ -2898,6 +3003,7 @@ mod tests {
program_location: pathbuf.to_str().unwrap().to_string(),
address: None,
use_deprecated_loader: false,
allow_excessive_balance: false,
};
let result = process_command(&config);
let json: Value = serde_json::from_str(&result.unwrap()).unwrap();
@@ -2916,6 +3022,7 @@ mod tests {
program_location: "bad/file/location.so".to_string(),
address: None,
use_deprecated_loader: false,
allow_excessive_balance: false,
};
assert!(process_command(&config).is_err());
}

View File

@@ -27,6 +27,7 @@ use solana_client::{
};
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_sdk::{
account::from_account,
account_utils::StateMut,
clock::{self, Clock, Slot},
commitment_config::CommitmentConfig,
@@ -38,8 +39,7 @@ use solana_sdk::{
system_instruction, system_program,
sysvar::{
self,
stake_history::{self, StakeHistory},
Sysvar,
stake_history::{self},
},
transaction::Transaction,
};
@@ -73,8 +73,7 @@ impl ClusterQuerySubCommands for App<'_, '_> {
.validator(is_slot)
.value_name("SLOT")
.takes_value(true)
.index(1)
.required(true),
.index(1),
),
)
.subcommand(
@@ -363,7 +362,7 @@ pub fn parse_cluster_ping(
}
pub fn parse_get_block(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let slot = value_t_or_exit!(matches, "slot", Slot);
let slot = value_of(matches, "slot");
Ok(CliCommandInfo {
command: CliCommand::GetBlock { slot },
signers: vec![],
@@ -625,7 +624,7 @@ pub fn process_cluster_date(rpc_client: &RpcClient, config: &CliConfig) -> Proce
let result = rpc_client
.get_account_with_commitment(&sysvar::clock::id(), CommitmentConfig::default())?;
if let Some(clock_account) = result.value {
let clock: Clock = Sysvar::from_account(&clock_account).ok_or_else(|| {
let clock: Clock = from_account(&clock_account).ok_or_else(|| {
CliError::RpcRequestError("Failed to deserialize clock sysvar".to_string())
})?;
let block_time = CliBlockTime {
@@ -700,7 +699,17 @@ pub fn process_leader_schedule(rpc_client: &RpcClient) -> ProcessResult {
Ok("".to_string())
}
pub fn process_get_block(rpc_client: &RpcClient, _config: &CliConfig, slot: Slot) -> ProcessResult {
pub fn process_get_block(
rpc_client: &RpcClient,
_config: &CliConfig,
slot: Option<Slot>,
) -> ProcessResult {
let slot = if let Some(slot) = slot {
slot
} else {
rpc_client.get_slot()?
};
let mut block =
rpc_client.get_confirmed_block_with_encoding(slot, UiTransactionEncoding::Base64)?;
@@ -716,18 +725,23 @@ pub fn process_get_block(rpc_client: &RpcClient, _config: &CliConfig, slot: Slot
let mut total_rewards = 0;
println!("Rewards:",);
println!(
" {:<44} {:<15} {:<13} {:>14}",
"Address", "Amount", "New Balance", "Percent Change"
" {:<44} {:^15} {:<15} {:<20} {:>14}",
"Address", "Type", "Amount", "New Balance", "Percent Change"
);
for reward in block.rewards {
let sign = if reward.lamports < 0 { "-" } else { "" };
total_rewards += reward.lamports;
println!(
" {:<44} {:>15} {}",
" {:<44} {:^15} {:>15} {}",
reward.pubkey,
if let Some(reward_type) = reward.reward_type {
format!("{}", reward_type)
} else {
"-".to_string()
},
format!(
"{}{:<14.4}",
"{}{:<14.9}",
sign,
lamports_to_sol(reward.lamports.abs() as u64)
),
@@ -735,7 +749,7 @@ pub fn process_get_block(rpc_client: &RpcClient, _config: &CliConfig, slot: Slot
" - -".to_string()
} else {
format!(
"{:<12.4} {:>13.4}%",
"{:<19.9} {:>13.9}%",
lamports_to_sol(reward.post_balance),
reward.lamports.abs() as f64
/ (reward.post_balance as f64 - reward.lamports as f64)
@@ -746,7 +760,7 @@ pub fn process_get_block(rpc_client: &RpcClient, _config: &CliConfig, slot: Slot
let sign = if total_rewards < 0 { "-" } else { "" };
println!(
"Total Rewards: {}{:12.9}",
"Total Rewards: {}{:<12.9}",
sign,
lamports_to_sol(total_rewards.abs() as u64)
);
@@ -1253,14 +1267,16 @@ pub fn process_show_gossip(rpc_client: &RpcClient, config: &CliConfig) -> Proces
.into_iter()
.map(|node| {
format!(
"{:15} | {:44} | {:6} | {:5} | {:5} | {}",
"{:15} | {:44} | {:6} | {:5} | {:21} | {}",
node.gossip
.map(|addr| addr.ip().to_string())
.unwrap_or_else(|| "none".to_string()),
format_labeled_address(&node.pubkey, &config.address_labels),
format_port(node.gossip),
format_port(node.tpu),
format_port(node.rpc),
node.rpc
.map(|addr| addr.to_string())
.unwrap_or_else(|| "none".to_string()),
node.version.unwrap_or_else(|| "unknown".to_string()),
)
})
@@ -1268,9 +1284,9 @@ pub fn process_show_gossip(rpc_client: &RpcClient, config: &CliConfig) -> Proces
Ok(format!(
"IP Address | Node identifier \
| Gossip | TPU | RPC | Version\n\
| Gossip | TPU | RPC Address | Version\n\
----------------+----------------------------------------------+\
--------+-------+-------+----------------\n\
--------+-------+-----------------------+----------------\n\
{}\n\
Nodes: {}",
s.join("\n"),
@@ -1325,12 +1341,12 @@ pub fn process_show_stakes(
.get_program_accounts_with_config(&solana_stake_program::id(), program_accounts_config)?;
let stake_history_account = rpc_client.get_account(&stake_history::id())?;
let clock_account = rpc_client.get_account(&sysvar::clock::id())?;
let clock: Clock = Sysvar::from_account(&clock_account).ok_or_else(|| {
let clock: Clock = from_account(&clock_account).ok_or_else(|| {
CliError::RpcRequestError("Failed to deserialize clock sysvar".to_string())
})?;
progress_bar.finish_and_clear();
let stake_history = StakeHistory::from_account(&stake_history_account).ok_or_else(|| {
let stake_history = from_account(&stake_history_account).ok_or_else(|| {
CliError::RpcRequestError("Failed to deserialize stake history".to_string())
})?;
@@ -1405,12 +1421,12 @@ pub fn process_show_validators(
.map(|vote_account| vote_account.activated_stake)
.sum();
let total_deliquent_stake = vote_accounts
let total_delinquent_stake = vote_accounts
.delinquent
.iter()
.map(|vote_account| vote_account.activated_stake)
.sum();
let total_current_stake = total_active_stake - total_deliquent_stake;
let total_current_stake = total_active_stake - total_delinquent_stake;
let mut current = vote_accounts.current;
current.sort_by(|a, b| b.activated_stake.cmp(&a.activated_stake));
@@ -1464,7 +1480,7 @@ pub fn process_show_validators(
let cli_validators = CliValidators {
total_active_stake,
total_current_stake,
total_deliquent_stake,
total_delinquent_stake,
current_validators,
delinquent_validators,
stake_by_version,

View File

@@ -9,12 +9,13 @@ use solana_clap_utils::{input_parsers::*, input_validators::*, keypair::*};
use solana_cli_output::{QuietDisplay, VerboseDisplay};
use solana_client::{client_error::ClientError, rpc_client::RpcClient};
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_runtime::{
use solana_sdk::{
clock::Slot,
feature::{self, Feature},
feature_set::FEATURE_NAMES,
};
use solana_sdk::{
clock::Slot, message::Message, pubkey::Pubkey, system_instruction, transaction::Transaction,
message::Message,
pubkey::Pubkey,
transaction::Transaction,
};
use std::{collections::HashMap, fmt, sync::Arc};
@@ -230,7 +231,7 @@ fn active_stake_by_feature_set(rpc_client: &RpcClient) -> Result<HashMap<u32, u6
}
// Feature activation is only allowed when 95% of the active stake is on the current feature set
fn feature_activation_allowed(rpc_client: &RpcClient) -> Result<bool, ClientError> {
fn feature_activation_allowed(rpc_client: &RpcClient, quiet: bool) -> Result<bool, ClientError> {
let my_feature_set = solana_version::Version::default().feature_set;
let active_stake_by_feature_set = active_stake_by_feature_set(rpc_client)?;
@@ -240,8 +241,8 @@ fn feature_activation_allowed(rpc_client: &RpcClient) -> Result<bool, ClientErro
.map(|percentage| *percentage >= 95)
.unwrap_or(false);
if !feature_activation_allowed {
println!("\n{}", style("Stake By Feature Set:").bold());
if !feature_activation_allowed && !quiet {
println!("{}", style("Stake By Feature Set:").bold());
for (feature_set, percentage) in active_stake_by_feature_set.iter() {
if *feature_set == 0 {
println!("unknown - {}%", percentage);
@@ -258,6 +259,7 @@ fn feature_activation_allowed(rpc_client: &RpcClient) -> Result<bool, ClientErro
);
}
}
println!();
}
Ok(feature_activation_allowed)
@@ -278,7 +280,7 @@ fn process_status(
let feature_id = &feature_ids[i];
let feature_name = FEATURE_NAMES.get(feature_id).unwrap();
if let Some(account) = account {
if let Some(feature) = Feature::from_account(&account) {
if let Some(feature) = feature::from_account(&account) {
let feature_status = match feature.activated_at {
None => CliFeatureStatus::Pending,
Some(activation_slot) => CliFeatureStatus::Active(activation_slot),
@@ -299,9 +301,10 @@ fn process_status(
});
}
let feature_activation_allowed = feature_activation_allowed(rpc_client, features.len() <= 1)?;
let feature_set = CliFeatures {
features,
feature_activation_allowed: feature_activation_allowed(rpc_client)?,
feature_activation_allowed,
inactive,
};
Ok(config.output_format.formatted_string(&feature_set))
@@ -318,12 +321,12 @@ fn process_activate(
.next()
.unwrap();
if let Some(account) = account {
if Feature::from_account(&account).is_some() {
if feature::from_account(&account).is_some() {
return Err(format!("{} has already been activated", feature_id).into());
}
}
if !feature_activation_allowed(rpc_client)? {
if !feature_activation_allowed(rpc_client, false)? {
return Err("Feature activation is not allowed at this time".into());
}
@@ -338,15 +341,11 @@ fn process_activate(
&config.signers[0].pubkey(),
|lamports| {
Message::new(
&[
system_instruction::transfer(
&config.signers[0].pubkey(),
&feature_id,
lamports,
),
system_instruction::allocate(&feature_id, Feature::size_of() as u64),
system_instruction::assign(&feature_id, &feature::id()),
],
&feature::activate_with_lamports(
&feature_id,
&config.signers[0].pubkey(),
lamports,
),
Some(&config.signers[0].pubkey()),
)
},

View File

@@ -26,6 +26,7 @@ pub mod cluster_query;
pub mod feature;
pub mod inflation;
pub mod nonce;
pub mod send_tpu;
pub mod spend_utils;
pub mod stake;
pub mod test_utils;

View File

@@ -580,6 +580,7 @@ mod tests {
fee_calculator::FeeCalculator,
hash::hash,
nonce::{self, state::Versions, State},
nonce_account,
signature::{read_keypair_file, write_keypair, Keypair, Signer},
system_program,
};
@@ -833,7 +834,7 @@ mod tests {
#[test]
fn test_check_nonce_account() {
let blockhash = Hash::default();
let nonce_pubkey = Pubkey::new_rand();
let nonce_pubkey = solana_sdk::pubkey::new_rand();
let data = Versions::new_current(State::Initialized(nonce::state::Data {
authority: nonce_pubkey,
blockhash,
@@ -869,7 +870,7 @@ mod tests {
}
let data = Versions::new_current(State::Initialized(nonce::state::Data {
authority: Pubkey::new_rand(),
authority: solana_sdk::pubkey::new_rand(),
blockhash,
fee_calculator: FeeCalculator::default(),
}));
@@ -891,7 +892,7 @@ mod tests {
#[test]
fn test_account_identity_ok() {
let nonce_account = nonce::create_account(1).into_inner();
let nonce_account = nonce_account::create_account(1).into_inner();
assert_eq!(account_identity_ok(&nonce_account), Ok(()));
let system_account = Account::new(1, 0, &system_program::id());
@@ -910,7 +911,7 @@ mod tests {
#[test]
fn test_state_from_account() {
let mut nonce_account = nonce::create_account(1).into_inner();
let mut nonce_account = nonce_account::create_account(1).into_inner();
assert_eq!(state_from_account(&nonce_account), Ok(State::Uninitialized));
let data = nonce::state::Data {
@@ -935,7 +936,7 @@ mod tests {
#[test]
fn test_data_from_helpers() {
let mut nonce_account = nonce::create_account(1).into_inner();
let mut nonce_account = nonce_account::create_account(1).into_inner();
let state = state_from_account(&nonce_account).unwrap();
assert_eq!(
data_from_state(&state),

29
cli/src/send_tpu.rs Normal file
View File

@@ -0,0 +1,29 @@
use log::*;
use solana_client::rpc_response::{RpcContactInfo, RpcLeaderSchedule};
use std::net::{SocketAddr, UdpSocket};
pub fn get_leader_tpu(
slot_index: u64,
leader_schedule: Option<&RpcLeaderSchedule>,
cluster_nodes: Option<&Vec<RpcContactInfo>>,
) -> Option<SocketAddr> {
leader_schedule?
.iter()
.find(|(_pubkey, slots)| slots.iter().any(|slot| *slot as u64 == slot_index))
.and_then(|(pubkey, _)| {
cluster_nodes?
.iter()
.find(|contact_info| contact_info.pubkey == *pubkey)
.and_then(|contact_info| contact_info.tpu)
})
}
pub fn send_transaction_tpu(
send_socket: &UdpSocket,
tpu_address: &SocketAddr,
wire_transaction: &[u8],
) {
if let Err(err) = send_socket.send_to(wire_transaction, tpu_address) {
warn!("Failed to send transaction to {}: {:?}", tpu_address, err);
}
}

View File

@@ -23,20 +23,24 @@ use solana_cli_output::{
CliStakeType,
};
use solana_client::{
blockhash_query::BlockhashQuery, nonce_utils, rpc_client::RpcClient,
rpc_request::DELINQUENT_VALIDATOR_SLOT_DISTANCE,
blockhash_query::BlockhashQuery,
client_error::{ClientError, ClientErrorKind},
nonce_utils,
rpc_client::RpcClient,
rpc_custom_error,
rpc_request::{self, DELINQUENT_VALIDATOR_SLOT_DISTANCE},
};
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_sdk::{
account::from_account,
account_utils::StateMut,
clock::{Clock, Epoch, Slot, UnixTimestamp},
clock::{Clock, Epoch, Slot, UnixTimestamp, SECONDS_PER_DAY},
message::Message,
pubkey::Pubkey,
system_instruction::SystemError,
sysvar::{
clock,
stake_history::{self, StakeHistory},
Sysvar,
},
transaction::Transaction,
};
@@ -1605,10 +1609,26 @@ pub(crate) fn fetch_epoch_rewards(
.get(0)
.ok_or_else(|| format!("Unable to fetch first confirmed block for epoch {}", epoch))?;
let first_confirmed_block = rpc_client.get_confirmed_block_with_encoding(
let first_confirmed_block = match rpc_client.get_confirmed_block_with_encoding(
first_confirmed_block_in_epoch,
solana_transaction_status::UiTransactionEncoding::Base64,
)?;
) {
Ok(first_confirmed_block) => first_confirmed_block,
Err(ClientError {
kind:
ClientErrorKind::RpcError(rpc_request::RpcError::RpcResponseError {
code: rpc_custom_error::JSON_RPC_SERVER_ERROR_BLOCK_NOT_AVAILABLE,
message: _,
}),
request: _,
}) => {
// RPC node doesn't have this block
break;
}
Err(err) => {
return Err(err.into());
}
};
let epoch_start_time = if let Some(block_time) = first_confirmed_block.block_time {
block_time
@@ -1620,13 +1640,13 @@ pub(crate) fn fetch_epoch_rewards(
let previous_epoch_rewards = first_confirmed_block.rewards;
if let Some((effective_slot, epoch_end_time, epoch_rewards)) = epoch_info {
let wall_clock_epoch_duration =
let wallclock_epoch_duration =
{ Local.timestamp(epoch_end_time, 0) - Local.timestamp(epoch_start_time, 0) }
.to_std()?
.as_secs_f64();
const SECONDS_PER_YEAR: f64 = (24 * 60 * 60 * 356) as f64;
let percent_of_year = SECONDS_PER_YEAR / wall_clock_epoch_duration;
let wallclock_epochs_per_year =
(SECONDS_PER_DAY * 356) as f64 / wallclock_epoch_duration;
if let Some(reward) = epoch_rewards
.into_iter()
@@ -1642,7 +1662,7 @@ pub(crate) fn fetch_epoch_rewards(
amount: reward.lamports.abs() as u64,
post_balance: reward.post_balance,
percent_change: balance_increase_percent,
apr: balance_increase_percent * percent_of_year,
apr: balance_increase_percent * wallclock_epochs_per_year,
});
}
}
@@ -1676,12 +1696,11 @@ pub fn process_show_stake_account(
match stake_account.state() {
Ok(stake_state) => {
let stake_history_account = rpc_client.get_account(&stake_history::id())?;
let stake_history =
StakeHistory::from_account(&stake_history_account).ok_or_else(|| {
CliError::RpcRequestError("Failed to deserialize stake history".to_string())
})?;
let stake_history = from_account(&stake_history_account).ok_or_else(|| {
CliError::RpcRequestError("Failed to deserialize stake history".to_string())
})?;
let clock_account = rpc_client.get_account(&clock::id())?;
let clock: Clock = Sysvar::from_account(&clock_account).ok_or_else(|| {
let clock: Clock = from_account(&clock_account).ok_or_else(|| {
CliError::RpcRequestError("Failed to deserialize clock sysvar".to_string())
})?;
@@ -1718,7 +1737,7 @@ pub fn process_show_stake_history(
use_lamports_unit: bool,
) -> ProcessResult {
let stake_history_account = rpc_client.get_account(&stake_history::id())?;
let stake_history = StakeHistory::from_account(&stake_history_account).ok_or_else(|| {
let stake_history = from_account::<StakeHistory>(&stake_history_account).ok_or_else(|| {
CliError::RpcRequestError("Failed to deserialize stake history".to_string())
})?;
@@ -2406,9 +2425,9 @@ mod tests {
);
// Test CreateStakeAccount SubCommand
let custodian = Pubkey::new_rand();
let custodian = solana_sdk::pubkey::new_rand();
let custodian_string = format!("{}", custodian);
let authorized = Pubkey::new_rand();
let authorized = solana_sdk::pubkey::new_rand();
let authorized_string = format!("{}", authorized);
let test_create_stake_account = test_commands.clone().get_matches_from(vec![
"test",
@@ -2546,7 +2565,7 @@ mod tests {
);
// Test DelegateStake Subcommand
let vote_account_pubkey = Pubkey::new_rand();
let vote_account_pubkey = solana_sdk::pubkey::new_rand();
let vote_account_string = vote_account_pubkey.to_string();
let test_delegate_stake = test_commands.clone().get_matches_from(vec![
"test",
@@ -2573,7 +2592,7 @@ mod tests {
);
// Test DelegateStake Subcommand w/ authority
let vote_account_pubkey = Pubkey::new_rand();
let vote_account_pubkey = solana_sdk::pubkey::new_rand();
let vote_account_string = vote_account_pubkey.to_string();
let test_delegate_stake = test_commands.clone().get_matches_from(vec![
"test",
@@ -2692,7 +2711,7 @@ mod tests {
);
// Test Delegate Subcommand w/ absent fee payer
let key1 = Pubkey::new_rand();
let key1 = solana_sdk::pubkey::new_rand();
let sig1 = Keypair::new().sign_message(&[0u8]);
let signer1 = format!("{}={}", key1, sig1);
let test_delegate_stake = test_commands.clone().get_matches_from(vec![
@@ -2732,7 +2751,7 @@ mod tests {
);
// Test Delegate Subcommand w/ absent fee payer and absent nonce authority
let key2 = Pubkey::new_rand();
let key2 = solana_sdk::pubkey::new_rand();
let sig2 = Keypair::new().sign_message(&[0u8]);
let signer2 = format!("{}={}", key2, sig2);
let test_delegate_stake = test_commands.clone().get_matches_from(vec![
@@ -3060,7 +3079,7 @@ mod tests {
);
// Test Deactivate Subcommand w/ absent fee payer
let key1 = Pubkey::new_rand();
let key1 = solana_sdk::pubkey::new_rand();
let sig1 = Keypair::new().sign_message(&[0u8]);
let signer1 = format!("{}={}", key1, sig1);
let test_deactivate_stake = test_commands.clone().get_matches_from(vec![
@@ -3097,7 +3116,7 @@ mod tests {
);
// Test Deactivate Subcommand w/ absent fee payer and nonce authority
let key2 = Pubkey::new_rand();
let key2 = solana_sdk::pubkey::new_rand();
let sig2 = Keypair::new().sign_message(&[0u8]);
let signer2 = format!("{}={}", key2, sig2);
let test_deactivate_stake = test_commands.clone().get_matches_from(vec![
@@ -3276,7 +3295,7 @@ mod tests {
let stake_account_keypair = Keypair::new();
write_keypair(&stake_account_keypair, tmp_file.as_file_mut()).unwrap();
let source_stake_account_pubkey = Pubkey::new_rand();
let source_stake_account_pubkey = solana_sdk::pubkey::new_rand();
let test_merge_stake_account = test_commands.clone().get_matches_from(vec![
"test",
"merge-stake",

View File

@@ -486,7 +486,7 @@ mod tests {
#[test]
fn test_parse_validator_info() {
let pubkey = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
let keys = vec![(validator_info::id(), false), (pubkey, true)];
let config = ConfigKeys { keys };

View File

@@ -915,7 +915,7 @@ mod tests {
);
// test init with an authed voter
let authed = Pubkey::new_rand();
let authed = solana_sdk::pubkey::new_rand();
let (keypair_file, mut tmp_file) = make_tmp_file();
let keypair = Keypair::new();
write_keypair(&keypair, tmp_file.as_file_mut()).unwrap();

View File

@@ -55,7 +55,7 @@ fn test_cli_deploy_program() {
faucet_host: None,
faucet_port: faucet_addr.port(),
pubkey: None,
lamports: 3 * minimum_balance_for_rent_exemption, // min balance for rent exemption for two programs + leftover for tx processing
lamports: 4 * minimum_balance_for_rent_exemption, // min balance for rent exemption for three programs + leftover for tx processing
};
config.signers = vec![&keypair];
process_command(&config).unwrap();
@@ -64,6 +64,7 @@ fn test_cli_deploy_program() {
program_location: pathbuf.to_str().unwrap().to_string(),
address: None,
use_deprecated_loader: false,
allow_excessive_balance: false,
};
let response = process_command(&config);
@@ -98,6 +99,7 @@ fn test_cli_deploy_program() {
program_location: pathbuf.to_str().unwrap().to_string(),
address: Some(1),
use_deprecated_loader: false,
allow_excessive_balance: false,
};
process_command(&config).unwrap();
let account1 = rpc_client
@@ -113,6 +115,44 @@ fn test_cli_deploy_program() {
// Attempt to redeploy to the same address
process_command(&config).unwrap_err();
// Attempt to deploy to account with excess balance
let custom_address_keypair = Keypair::new();
config.command = CliCommand::Airdrop {
faucet_host: None,
faucet_port: faucet_addr.port(),
pubkey: None,
lamports: 2 * minimum_balance_for_rent_exemption, // Anything over minimum_balance_for_rent_exemption should trigger err
};
config.signers = vec![&custom_address_keypair];
process_command(&config).unwrap();
config.signers = vec![&keypair, &custom_address_keypair];
config.command = CliCommand::Deploy {
program_location: pathbuf.to_str().unwrap().to_string(),
address: Some(1),
use_deprecated_loader: false,
allow_excessive_balance: false,
};
process_command(&config).unwrap_err();
// Use forcing parameter to deploy to account with excess balance
config.command = CliCommand::Deploy {
program_location: pathbuf.to_str().unwrap().to_string(),
address: Some(1),
use_deprecated_loader: false,
allow_excessive_balance: true,
};
process_command(&config).unwrap();
let account2 = rpc_client
.get_account_with_commitment(&custom_address_keypair.pubkey(), CommitmentConfig::recent())
.unwrap()
.value
.unwrap();
assert_eq!(account2.lamports, 2 * minimum_balance_for_rent_exemption);
assert_eq!(account2.owner, bpf_loader::id());
assert_eq!(account2.executable, true);
assert_eq!(account0.data, account2.data);
server.close().unwrap();
remove_dir_all(ledger_path).unwrap();
}

View File

@@ -172,7 +172,7 @@ fn full_battery_tests(
assert_ne!(first_nonce, third_nonce);
// Withdraw from nonce account
let payee_pubkey = Pubkey::new_rand();
let payee_pubkey = solana_sdk::pubkey::new_rand();
config_payer.signers = authorized_signers;
config_payer.command = CliCommand::WithdrawFromNonceAccount {
nonce_account,

View File

@@ -12,7 +12,6 @@ use solana_faucet::faucet::run_local_faucet;
use solana_sdk::{
account_utils::StateMut,
commitment_config::CommitmentConfig,
pubkey::Pubkey,
signature::{Keypair, Signer},
};
use solana_vote_program::vote_state::{VoteAuthorize, VoteState, VoteStateVersions};
@@ -110,7 +109,7 @@ fn test_vote_authorize_and_withdraw() {
assert_eq!(authorized_withdrawer, withdraw_authority.pubkey());
// Withdraw from vote account
let destination_account = Pubkey::new_rand(); // Send withdrawal to new account to make balance check easy
let destination_account = solana_sdk::pubkey::new_rand(); // Send withdrawal to new account to make balance check easy
config.signers = vec![&default_signer, &withdraw_authority];
config.command = CliCommand::WithdrawFromVoteAccount {
vote_account_pubkey,

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-client"
version = "1.4.0"
version = "1.4.4"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana"
@@ -17,16 +17,18 @@ indicatif = "0.15.0"
jsonrpc-core = "15.0.0"
log = "0.4.8"
rayon = "1.4.0"
reqwest = { version = "0.10.6", default-features = false, features = ["blocking", "rustls-tls", "json"] }
reqwest = { version = "0.10.8", default-features = false, features = ["blocking", "rustls-tls", "json"] }
semver = "0.11.0"
serde = "1.0.112"
serde_derive = "1.0.103"
serde_json = "1.0.56"
solana-account-decoder = { path = "../account-decoder", version = "1.4.0" }
solana-clap-utils = { path = "../clap-utils", version = "1.4.0" }
solana-net-utils = { path = "../net-utils", version = "1.4.0" }
solana-sdk = { path = "../sdk", version = "1.4.0" }
solana-transaction-status = { path = "../transaction-status", version = "1.4.0" }
solana-vote-program = { path = "../programs/vote", version = "1.4.0" }
solana-account-decoder = { path = "../account-decoder", version = "1.4.4" }
solana-clap-utils = { path = "../clap-utils", version = "1.4.4" }
solana-net-utils = { path = "../net-utils", version = "1.4.4" }
solana-sdk = { path = "../sdk", version = "1.4.4" }
solana-transaction-status = { path = "../transaction-status", version = "1.4.4" }
solana-version = { path = "../version", version = "1.4.4" }
solana-vote-program = { path = "../programs/vote", version = "1.4.4" }
thiserror = "1.0"
tungstenite = "0.10.1"
url = "2.1.1"
@@ -35,7 +37,7 @@ url = "2.1.1"
assert_matches = "1.3.0"
jsonrpc-core = "15.0.0"
jsonrpc-http-server = "15.0.0"
solana-logger = { path = "../logger", version = "1.4.0" }
solana-logger = { path = "../logger", version = "1.4.4" }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]

View File

@@ -50,10 +50,10 @@ impl Into<TransportError> for ClientErrorKind {
#[derive(Error, Debug)]
#[error("{kind}")]
pub struct ClientError {
request: Option<rpc_request::RpcRequest>,
pub request: Option<rpc_request::RpcRequest>,
#[source]
kind: ClientErrorKind,
pub kind: ClientErrorKind,
}
impl ClientError {

View File

@@ -27,6 +27,13 @@ impl HttpSender {
}
}
#[derive(Deserialize, Debug)]
struct RpcErrorObject {
code: i64,
message: String,
/*data field omitted*/
}
impl RpcSender for HttpSender {
fn send(&self, request: RpcRequest, params: serde_json::Value) -> Result<serde_json::Value> {
// Concurrent requests are not supported so reuse the same request id for all requests
@@ -63,11 +70,20 @@ impl RpcSender for HttpSender {
let json: serde_json::Value = serde_json::from_str(&response.text()?)?;
if json["error"].is_object() {
return Err(RpcError::RpcRequestError(format!(
"RPC Error response: {}",
serde_json::to_string(&json["error"]).unwrap()
))
.into());
return match serde_json::from_value::<RpcErrorObject>(json["error"].clone())
{
Ok(rpc_error_object) => Err(RpcError::RpcResponseError {
code: rpc_error_object.code,
message: rpc_error_object.message,
}
.into()),
Err(err) => Err(RpcError::RpcRequestError(format!(
"Failed to deserialize RPC error response: {} [{}]",
serde_json::to_string(&json["error"]).unwrap(),
err
))
.into()),
};
}
return Ok(json["result"].clone());
}

View File

@@ -10,6 +10,7 @@ pub mod perf_utils;
pub mod pubsub_client;
pub mod rpc_client;
pub mod rpc_config;
pub mod rpc_custom_error;
pub mod rpc_filter;
pub mod rpc_request;
pub mod rpc_response;

View File

@@ -1,17 +1,19 @@
use crate::{
client_error::Result,
rpc_request::RpcRequest,
rpc_response::{Response, RpcResponseContext},
rpc_response::{Response, RpcResponseContext, RpcVersionInfo},
rpc_sender::RpcSender,
};
use serde_json::{Number, Value};
use serde_json::{json, Number, Value};
use solana_sdk::{
epoch_info::EpochInfo,
fee_calculator::{FeeCalculator, FeeRateGovernor},
instruction::InstructionError,
signature::Signature,
transaction::{self, Transaction, TransactionError},
};
use solana_transaction_status::TransactionStatus;
use solana_version::Version;
use std::{collections::HashMap, sync::RwLock};
pub const PUBKEY: &str = "7RoSF9fUmdphVCpabEoefH81WwrW7orsWonXWqTXkKV8";
@@ -57,6 +59,13 @@ impl RpcSender for MockSender {
serde_json::to_value(FeeCalculator::default()).unwrap(),
),
})?,
RpcRequest::GetEpochInfo => serde_json::to_value(EpochInfo {
epoch: 1,
slot_index: 2,
slots_in_epoch: 32,
absolute_slot: 34,
block_height: 34,
})?,
RpcRequest::GetFeeCalculatorForBlockhash => {
let value = if self.url == "blockhash_expired" {
Value::Null
@@ -112,13 +121,20 @@ impl RpcSender for MockSender {
Signature::new(&[8; 64]).to_string()
} else {
let tx_str = params.as_array().unwrap()[0].as_str().unwrap().to_string();
let data = bs58::decode(tx_str).into_vec().unwrap();
let data = base64::decode(tx_str).unwrap();
let tx: Transaction = bincode::deserialize(&data).unwrap();
tx.signatures[0].to_string()
};
Value::String(signature)
}
RpcRequest::GetMinimumBalanceForRentExemption => Value::Number(Number::from(20)),
RpcRequest::GetVersion => {
let version = Version::default();
json!(RpcVersionInfo {
solana_core: version.to_string(),
feature_set: Some(version.feature_set),
})
}
_ => Value::Null,
};
Ok(val)

View File

@@ -41,12 +41,14 @@ use solana_transaction_status::{
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
use std::{
net::SocketAddr,
sync::RwLock,
thread::sleep,
time::{Duration, Instant},
};
pub struct RpcClient {
sender: Box<dyn RpcSender + Send + Sync + 'static>,
default_cluster_transaction_encoding: RwLock<Option<UiTransactionEncoding>>,
}
fn serialize_encode_transaction(
@@ -73,6 +75,7 @@ impl RpcClient {
pub fn new_sender<T: RpcSender + Send + Sync + 'static>(sender: T) -> Self {
Self {
sender: Box::new(sender),
default_cluster_transaction_encoding: RwLock::new(None),
}
}
@@ -128,12 +131,45 @@ impl RpcClient {
self.send_transaction_with_config(transaction, RpcSendTransactionConfig::default())
}
fn default_cluster_transaction_encoding(&self) -> Result<UiTransactionEncoding, RpcError> {
let default_cluster_transaction_encoding =
self.default_cluster_transaction_encoding.read().unwrap();
if let Some(encoding) = *default_cluster_transaction_encoding {
Ok(encoding)
} else {
drop(default_cluster_transaction_encoding);
let cluster_version = self.get_version().map_err(|e| {
RpcError::RpcRequestError(format!("cluster version query failed: {}", e))
})?;
let cluster_version =
semver::Version::parse(&cluster_version.solana_core).map_err(|e| {
RpcError::RpcRequestError(format!("failed to parse cluster version: {}", e))
})?;
// Prefer base64 since 1.3.16
let encoding = if cluster_version < semver::Version::new(1, 3, 16) {
UiTransactionEncoding::Base58
} else {
UiTransactionEncoding::Base64
};
*self.default_cluster_transaction_encoding.write().unwrap() = Some(encoding);
Ok(encoding)
}
}
pub fn send_transaction_with_config(
&self,
transaction: &Transaction,
config: RpcSendTransactionConfig,
) -> ClientResult<Signature> {
let encoding = config.encoding.unwrap_or(UiTransactionEncoding::Base58);
let encoding = if let Some(encoding) = config.encoding {
encoding
} else {
self.default_cluster_transaction_encoding()?
};
let config = RpcSendTransactionConfig {
encoding: Some(encoding),
..config
};
let serialized_encoded = serialize_encode_transaction(transaction, encoding)?;
let signature_base58_str: String = self.send(
RpcRequest::SendTransaction,
@@ -161,26 +197,28 @@ impl RpcClient {
pub fn simulate_transaction(
&self,
transaction: &Transaction,
sig_verify: bool,
) -> RpcResult<RpcSimulateTransactionResult> {
self.simulate_transaction_with_config(
transaction,
sig_verify,
RpcSimulateTransactionConfig::default(),
)
self.simulate_transaction_with_config(transaction, RpcSimulateTransactionConfig::default())
}
pub fn simulate_transaction_with_config(
&self,
transaction: &Transaction,
sig_verify: bool,
config: RpcSimulateTransactionConfig,
) -> RpcResult<RpcSimulateTransactionResult> {
let encoding = config.encoding.unwrap_or(UiTransactionEncoding::Base58);
let encoding = if let Some(encoding) = config.encoding {
encoding
} else {
self.default_cluster_transaction_encoding()?
};
let config = RpcSimulateTransactionConfig {
encoding: Some(encoding),
..config
};
let serialized_encoded = serialize_encode_transaction(transaction, encoding)?;
self.send(
RpcRequest::SimulateTransaction,
json!([serialized_encoded, { "sigVerify": sig_verify }]),
json!([serialized_encoded, config]),
)
}
@@ -1418,7 +1456,7 @@ mod tests {
let rpc_client = RpcClient::new_mock("succeeds".to_string());
let key = Keypair::new();
let to = Pubkey::new_rand();
let to = solana_sdk::pubkey::new_rand();
let blockhash = Hash::default();
let tx = system_transaction::transfer(&key, &to, 50, blockhash);
@@ -1471,7 +1509,7 @@ mod tests {
let rpc_client = RpcClient::new_mock("succeeds".to_string());
let key = Keypair::new();
let to = Pubkey::new_rand();
let to = solana_sdk::pubkey::new_rand();
let blockhash = Hash::default();
let tx = system_transaction::transfer(&key, &to, 50, blockhash);
let result = rpc_client.send_and_confirm_transaction(&tx);

View File

@@ -1,13 +1,15 @@
//! Implementation defined RPC server errors
use crate::rpc_response::RpcSimulateTransactionResult;
use jsonrpc_core::{Error, ErrorCode};
use solana_client::rpc_response::RpcSimulateTransactionResult;
use solana_sdk::clock::Slot;
const JSON_RPC_SERVER_ERROR_1: i64 = -32001;
const JSON_RPC_SERVER_ERROR_2: i64 = -32002;
const JSON_RPC_SERVER_ERROR_3: i64 = -32003;
const JSON_RPC_SERVER_ERROR_4: i64 = -32004;
const JSON_RPC_SERVER_ERROR_5: i64 = -32005;
const JSON_RPC_SERVER_ERROR_6: i64 = -32006;
pub const JSON_RPC_SERVER_ERROR_BLOCK_CLEANED_UP: i64 = -32001;
pub const JSON_RPC_SERVER_ERROR_SEND_TRANSACTION_PREFLIGHT_FAILURE: i64 = -32002;
pub const JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_VERIFICATION_FAILURE: i64 = -32003;
pub const JSON_RPC_SERVER_ERROR_BLOCK_NOT_AVAILABLE: i64 = -32004;
pub const JSON_RPC_SERVER_ERROR_NODE_UNHEALTHLY: i64 = -32005;
pub const JSON_RPC_SERVER_ERROR_TRANSACTION_PRECOMPILE_VERIFICATION_FAILURE: i64 = -32006;
pub enum RpcCustomError {
BlockCleanedUp {
@@ -33,7 +35,7 @@ impl From<RpcCustomError> for Error {
slot,
first_available_block,
} => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_1),
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_BLOCK_CLEANED_UP),
message: format!(
"Block {} cleaned up, does not exist on node. First available block: {}",
slot, first_available_block,
@@ -41,27 +43,33 @@ impl From<RpcCustomError> for Error {
data: None,
},
RpcCustomError::SendTransactionPreflightFailure { message, result } => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_2),
code: ErrorCode::ServerError(
JSON_RPC_SERVER_ERROR_SEND_TRANSACTION_PREFLIGHT_FAILURE,
),
message,
data: Some(serde_json::json!(result)),
},
RpcCustomError::TransactionSignatureVerificationFailure => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_3),
code: ErrorCode::ServerError(
JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_VERIFICATION_FAILURE,
),
message: "Transaction signature verification failure".to_string(),
data: None,
},
RpcCustomError::BlockNotAvailable { slot } => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_4),
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_BLOCK_NOT_AVAILABLE),
message: format!("Block not available for slot {}", slot),
data: None,
},
RpcCustomError::RpcNodeUnhealthy => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_5),
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_NODE_UNHEALTHLY),
message: "RPC node is unhealthy".to_string(),
data: None,
},
RpcCustomError::TransactionPrecompileVerificationFailure(e) => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_6),
code: ErrorCode::ServerError(
JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_VERIFICATION_FAILURE,
),
message: format!("Transaction precompile verification failure {:?}", e),
data: None,
},

View File

@@ -140,8 +140,10 @@ impl RpcRequest {
#[derive(Debug, Error)]
pub enum RpcError {
#[error("rpc request error: {0}")]
#[error("RPC request error: {0}")]
RpcRequestError(String),
#[error("RPC response error {code}: {message}")]
RpcResponseError { code: i64, message: String },
#[error("parse error: expected {0}")]
ParseError(String), /* "expected" */
// Anything in a `ForUser` needs to die. The caller should be
@@ -226,7 +228,7 @@ mod tests {
// Test request with CommitmentConfig and params
let test_request = RpcRequest::GetTokenAccountsByOwner;
let mint = Pubkey::new_rand();
let mint = solana_sdk::pubkey::new_rand();
let token_account_filter = RpcTokenAccountsFilter::Mint(mint.to_string());
let request = test_request
.build_request_json(1, json!([addr, token_account_filter, commitment_config]));

View File

@@ -1,7 +1,7 @@
[package]
name = "solana-core"
description = "Blockchain, Rebuilt for Scale"
version = "1.4.0"
version = "1.4.4"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "../README.md"
@@ -34,53 +34,54 @@ jsonrpc-http-server = "15.0.0"
jsonrpc-pubsub = "15.0.0"
jsonrpc-ws-server = "15.0.0"
log = "0.4.8"
lru = "0.6.0"
num_cpus = "1.13.0"
num-traits = "0.2"
rand = "0.7.0"
rand_chacha = "0.2.2"
raptorq = "1.4.2"
rayon = "1.4.0"
rayon = "1.4.1"
regex = "1.3.9"
serde = "1.0.112"
serde_derive = "1.0.103"
serde_json = "1.0.56"
solana-account-decoder = { path = "../account-decoder", version = "1.4.0" }
solana-banks-server = { path = "../banks-server", version = "1.4.0" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.4.0" }
solana-clap-utils = { path = "../clap-utils", version = "1.4.0" }
solana-client = { path = "../client", version = "1.4.0" }
solana-faucet = { path = "../faucet", version = "1.4.0" }
solana-ledger = { path = "../ledger", version = "1.4.0" }
solana-logger = { path = "../logger", version = "1.4.0" }
solana-merkle-tree = { path = "../merkle-tree", version = "1.4.0" }
solana-metrics = { path = "../metrics", version = "1.4.0" }
solana-measure = { path = "../measure", version = "1.4.0" }
solana-net-utils = { path = "../net-utils", version = "1.4.0" }
solana-perf = { path = "../perf", version = "1.4.0" }
solana-runtime = { path = "../runtime", version = "1.4.0" }
solana-sdk = { path = "../sdk", version = "1.4.0" }
solana-sdk-macro-frozen-abi = { path = "../sdk/macro-frozen-abi", version = "1.4.0" }
solana-stake-program = { path = "../programs/stake", version = "1.4.0" }
solana-storage-bigtable = { path = "../storage-bigtable", version = "1.4.0" }
solana-streamer = { path = "../streamer", version = "1.4.0" }
solana-sys-tuner = { path = "../sys-tuner", version = "1.4.0" }
solana-transaction-status = { path = "../transaction-status", version = "1.4.0" }
solana-version = { path = "../version", version = "1.4.0" }
solana-vote-program = { path = "../programs/vote", version = "1.4.0" }
solana-vote-signer = { path = "../vote-signer", version = "1.4.0" }
spl-token-v2-0 = { package = "spl-token", version = "=2.0.6", features = ["skip-no-mangle"] }
solana-account-decoder = { path = "../account-decoder", version = "1.4.4" }
solana-banks-server = { path = "../banks-server", version = "1.4.4" }
solana-clap-utils = { path = "../clap-utils", version = "1.4.4" }
solana-client = { path = "../client", version = "1.4.4" }
solana-faucet = { path = "../faucet", version = "1.4.4" }
solana-frozen-abi = { path = "../frozen-abi", version = "1.4.4" }
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "1.4.4" }
solana-ledger = { path = "../ledger", version = "1.4.4" }
solana-logger = { path = "../logger", version = "1.4.4" }
solana-merkle-tree = { path = "../merkle-tree", version = "1.4.4" }
solana-metrics = { path = "../metrics", version = "1.4.4" }
solana-measure = { path = "../measure", version = "1.4.4" }
solana-net-utils = { path = "../net-utils", version = "1.4.4" }
solana-perf = { path = "../perf", version = "1.4.4" }
solana-runtime = { path = "../runtime", version = "1.4.4" }
solana-sdk = { path = "../sdk", version = "1.4.4" }
solana-stake-program = { path = "../programs/stake", version = "1.4.4" }
solana-storage-bigtable = { path = "../storage-bigtable", version = "1.4.4" }
solana-streamer = { path = "../streamer", version = "1.4.4" }
solana-sys-tuner = { path = "../sys-tuner", version = "1.4.4" }
solana-transaction-status = { path = "../transaction-status", version = "1.4.4" }
solana-version = { path = "../version", version = "1.4.4" }
solana-vote-program = { path = "../programs/vote", version = "1.4.4" }
solana-vote-signer = { path = "../vote-signer", version = "1.4.4" }
spl-token-v2-0 = { package = "spl-token", version = "=2.0.8" }
tempfile = "3.1.0"
thiserror = "1.0"
tokio = { version = "0.2.22", features = ["full"] }
tokio = { version = "0.2", features = ["full"] }
tokio_01 = { version = "0.1", package = "tokio" }
tokio_fs_01 = { version = "0.1", package = "tokio-fs" }
tokio_io_01 = { version = "0.1", package = "tokio-io" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.4.0" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.4.4" }
trees = "0.2.1"
[dev-dependencies]
matches = "0.1.6"
reqwest = { version = "0.10.6", default-features = false, features = ["blocking", "rustls-tls", "json"] }
reqwest = { version = "0.10.8", default-features = false, features = ["blocking", "rustls-tls", "json"] }
serial_test = "0.4.0"
serial_test_derive = "0.4.0"
systemstat = "0.1.5"
@@ -94,6 +95,9 @@ name = "banking_stage"
[[bench]]
name = "blockstore"
[[bench]]
name = "crds"
[[bench]]
name = "crds_gossip_pull"

View File

@@ -20,7 +20,7 @@ use solana_runtime::bank::Bank;
use solana_sdk::genesis_config::GenesisConfig;
use solana_sdk::hash::Hash;
use solana_sdk::message::Message;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::pubkey;
use solana_sdk::signature::Keypair;
use solana_sdk::signature::Signature;
use solana_sdk::signature::Signer;
@@ -56,7 +56,7 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(100_000);
let bank = Arc::new(Bank::new(&genesis_config));
let ledger_path = get_tmp_ledger_path!();
let my_pubkey = Pubkey::new_rand();
let my_pubkey = pubkey::new_rand();
{
let blockstore = Arc::new(
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
@@ -94,15 +94,15 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
}
fn make_accounts_txs(txes: usize, mint_keypair: &Keypair, hash: Hash) -> Vec<Transaction> {
let to_pubkey = Pubkey::new_rand();
let to_pubkey = pubkey::new_rand();
let dummy = system_transaction::transfer(mint_keypair, &to_pubkey, 1, hash);
(0..txes)
.into_par_iter()
.map(|_| {
let mut new = dummy.clone();
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
new.message.account_keys[0] = Pubkey::new_rand();
new.message.account_keys[1] = Pubkey::new_rand();
new.message.account_keys[0] = pubkey::new_rand();
new.message.account_keys[1] = pubkey::new_rand();
new.signatures = vec![Signature::new(&sig[0..64])];
new
})
@@ -117,7 +117,7 @@ fn make_programs_txs(txes: usize, hash: Hash) -> Vec<Transaction> {
let mut instructions = vec![];
let from_key = Keypair::new();
for _ in 1..progs {
let to_key = Pubkey::new_rand();
let to_key = pubkey::new_rand();
instructions.push(system_instruction::transfer(&from_key.pubkey(), &to_key, 1));
}
let message = Message::new(&instructions, Some(&from_key.pubkey()));

View File

@@ -8,7 +8,7 @@ use solana_core::broadcast_stage::{broadcast_shreds, get_broadcast_peers};
use solana_core::cluster_info::{ClusterInfo, Node};
use solana_core::contact_info::ContactInfo;
use solana_ledger::shred::Shred;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::pubkey;
use solana_sdk::timing::timestamp;
use std::{
collections::HashMap,
@@ -20,7 +20,7 @@ use test::Bencher;
#[bench]
fn broadcast_shreds_bench(bencher: &mut Bencher) {
solana_logger::setup();
let leader_pubkey = Pubkey::new_rand();
let leader_pubkey = pubkey::new_rand();
let leader_info = Node::new_localhost_with_pubkey(&leader_pubkey);
let cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info);
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
@@ -30,7 +30,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
let mut stakes = HashMap::new();
const NUM_PEERS: usize = 200;
for _ in 0..NUM_PEERS {
let id = Pubkey::new_rand();
let id = pubkey::new_rand();
let contact_info = ContactInfo::new_localhost(&id, timestamp());
cluster_info.insert_info(contact_info);
stakes.insert(id, thread_rng().gen_range(1, NUM_PEERS) as u64);

31
core/benches/crds.rs Normal file
View File

@@ -0,0 +1,31 @@
#![feature(test)]
extern crate test;
use rand::{thread_rng, Rng};
use rayon::ThreadPoolBuilder;
use solana_core::crds::Crds;
use solana_core::crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
use solana_core::crds_value::CrdsValue;
use solana_sdk::pubkey::Pubkey;
use std::collections::HashMap;
use test::Bencher;
#[bench]
fn bench_find_old_labels(bencher: &mut Bencher) {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut rng = thread_rng();
let mut crds = Crds::default();
let now = CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS + CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS / 1000;
std::iter::repeat_with(|| (CrdsValue::new_rand(&mut rng), rng.gen_range(0, now)))
.take(50_000)
.for_each(|(v, ts)| assert!(crds.insert(v, ts).is_ok()));
let mut timeouts = HashMap::new();
timeouts.insert(Pubkey::default(), CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS);
bencher.iter(|| {
let out = crds.find_old_labels(&thread_pool, now, &timeouts);
assert!(out.len() > 10);
assert!(out.len() < 250);
out
});
}

View File

@@ -8,13 +8,13 @@ use solana_core::cluster_info::MAX_BLOOM_SIZE;
use solana_core::crds::Crds;
use solana_core::crds_gossip_pull::{CrdsFilter, CrdsGossipPull};
use solana_core::crds_value::CrdsValue;
use solana_sdk::hash::Hash;
use solana_sdk::hash;
use test::Bencher;
#[bench]
fn bench_hash_as_u64(bencher: &mut Bencher) {
let mut rng = thread_rng();
let hashes: Vec<_> = std::iter::repeat_with(|| Hash::new_rand(&mut rng))
let hashes: Vec<_> = std::iter::repeat_with(|| hash::new_rand(&mut rng))
.take(1000)
.collect();
bencher.iter(|| {
@@ -34,7 +34,7 @@ fn bench_build_crds_filters(bencher: &mut Bencher) {
for _ in 0..50_000 {
crds_gossip_pull
.purged_values
.push_back((Hash::new_rand(&mut rng), rng.gen()));
.push_back((solana_sdk::hash::new_rand(&mut rng), rng.gen()));
}
let mut num_inserts = 0;
for _ in 0..90_000 {

View File

@@ -7,14 +7,14 @@ use solana_core::contact_info::ContactInfo;
use solana_core::crds::VersionedCrdsValue;
use solana_core::crds_shards::CrdsShards;
use solana_core::crds_value::{CrdsData, CrdsValue};
use solana_sdk::pubkey::Pubkey;
use solana_sdk::pubkey;
use solana_sdk::timing::timestamp;
use test::Bencher;
const CRDS_SHARDS_BITS: u32 = 8;
fn new_test_crds_value() -> VersionedCrdsValue {
let data = CrdsData::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), timestamp()));
let data = CrdsData::ContactInfo(ContactInfo::new_localhost(&pubkey::new_rand(), timestamp()));
VersionedCrdsValue::new(timestamp(), CrdsValue::new_unsigned(data))
}

View File

@@ -14,7 +14,7 @@ use solana_perf::packet::to_packets_chunked;
use solana_perf::test_tx::test_tx;
use solana_runtime::bank::Bank;
use solana_runtime::bank_forks::BankForks;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::pubkey;
use solana_sdk::timing::timestamp;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicUsize, Ordering};
@@ -34,7 +34,7 @@ fn bench_retransmitter(bencher: &mut Bencher) {
const NUM_PEERS: usize = 4;
let mut peer_sockets = Vec::new();
for _ in 0..NUM_PEERS {
let id = Pubkey::new_rand();
let id = pubkey::new_rand();
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut contact_info = ContactInfo::new_localhost(&id, timestamp());
contact_info.tvu = socket.local_addr().unwrap();

View File

@@ -1 +1 @@
../sdk/build.rs
../frozen-abi/build.rs

View File

@@ -204,6 +204,7 @@ mod tests {
{
let message = make_accounts_hashes_message(&validator1, vec![(0, hash1)]).unwrap();
cluster_info.push_message(message);
cluster_info.flush_push_queue();
}
slot_to_hash.insert(0, hash2);
trusted_validators.insert(validator1.pubkey());
@@ -254,6 +255,7 @@ mod tests {
100,
);
}
cluster_info.flush_push_queue();
let cluster_hashes = cluster_info
.get_accounts_hash_for_node(&keypair.pubkey(), |c| c.clone())
.unwrap();

View File

@@ -60,7 +60,7 @@ impl ForkChoice for BankWeightForkChoice {
trace!("frozen_banks {}", frozen_banks.len());
let num_old_banks = frozen_banks
.iter()
.filter(|b| b.slot() < tower.root().unwrap_or(0))
.filter(|b| b.slot() < tower.root())
.count();
let last_voted_slot = tower.last_voted_slot();

View File

@@ -534,13 +534,14 @@ impl BankingStage {
mut loaded_accounts,
results,
inner_instructions,
transaction_logs,
mut retryable_txs,
tx_count,
signature_count,
) = bank.load_and_execute_transactions(
batch,
MAX_PROCESSING_AGE,
None,
transaction_status_sender.is_some(),
transaction_status_sender.is_some(),
);
load_execute_time.stop();
@@ -580,6 +581,7 @@ impl BankingStage {
tx_results.processing_results,
TransactionBalancesSet::new(pre_balances, post_balances),
inner_instructions,
transaction_logs,
sender,
);
}
@@ -1244,16 +1246,16 @@ mod tests {
bank.process_transaction(&fund_tx).unwrap();
// good tx
let to = Pubkey::new_rand();
let to = solana_sdk::pubkey::new_rand();
let tx = system_transaction::transfer(&mint_keypair, &to, 1, start_hash);
// good tx, but no verify
let to2 = Pubkey::new_rand();
let to2 = solana_sdk::pubkey::new_rand();
let tx_no_ver = system_transaction::transfer(&keypair, &to2, 2, start_hash);
// bad tx, AccountNotFound
let keypair = Keypair::new();
let to3 = Pubkey::new_rand();
let to3 = solana_sdk::pubkey::new_rand();
let tx_anf = system_transaction::transfer(&keypair, &to3, 1, start_hash);
// send 'em over
@@ -1446,9 +1448,9 @@ mod tests {
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
poh_recorder.lock().unwrap().set_working_bank(working_bank);
let pubkey = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
let keypair2 = Keypair::new();
let pubkey2 = Pubkey::new_rand();
let pubkey2 = solana_sdk::pubkey::new_rand();
let transactions = vec![
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()),
@@ -1526,7 +1528,7 @@ mod tests {
mint_keypair,
..
} = create_genesis_config(10_000);
let pubkey = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
let transactions = vec![
None,
@@ -1607,7 +1609,7 @@ mod tests {
mint_keypair,
..
} = create_genesis_config(10_000);
let pubkey = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
let transactions = vec![
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()),
@@ -1678,8 +1680,8 @@ mod tests {
#[test]
fn test_should_process_or_forward_packets() {
let my_pubkey = Pubkey::new_rand();
let my_pubkey1 = Pubkey::new_rand();
let my_pubkey = solana_sdk::pubkey::new_rand();
let my_pubkey1 = solana_sdk::pubkey::new_rand();
assert_eq!(
BankingStage::consume_or_forward_packets(&my_pubkey, None, true, false,),
@@ -1725,7 +1727,7 @@ mod tests {
..
} = create_genesis_config(10_000);
let bank = Arc::new(Bank::new(&genesis_config));
let pubkey = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
let transactions = vec![system_transaction::transfer(
&mint_keypair,
@@ -1822,8 +1824,8 @@ mod tests {
..
} = create_genesis_config(10_000);
let bank = Arc::new(Bank::new(&genesis_config));
let pubkey = Pubkey::new_rand();
let pubkey1 = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
let pubkey1 = solana_sdk::pubkey::new_rand();
let transactions = vec![
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()),
@@ -1918,7 +1920,7 @@ mod tests {
} = create_genesis_config(10_000);
let bank = Arc::new(Bank::new(&genesis_config));
let pubkey = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
let transactions =
vec![
@@ -1936,7 +1938,7 @@ mod tests {
bank.slot(),
Some((4, 4)),
bank.ticks_per_slot(),
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
@@ -1976,8 +1978,8 @@ mod tests {
..
} = create_genesis_config(10_000);
let bank = Arc::new(Bank::new(&genesis_config));
let pubkey = Pubkey::new_rand();
let pubkey1 = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
let pubkey1 = solana_sdk::pubkey::new_rand();
let keypair1 = Keypair::new();
let success_tx =

View File

@@ -140,13 +140,12 @@ impl BroadcastRun for BroadcastFakeShredsRun {
mod tests {
use super::*;
use crate::contact_info::ContactInfo;
use solana_sdk::pubkey::Pubkey;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
#[test]
fn test_tvu_peers_ordering() {
let cluster = ClusterInfo::new_with_invalid_keypair(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
));
cluster.insert_info(ContactInfo::new_with_socketaddr(&SocketAddr::new(

View File

@@ -92,7 +92,7 @@ mod tests {
let bank0 = Arc::new(Bank::new(&genesis_config));
let tx = system_transaction::transfer(
&mint_keypair,
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
1,
genesis_config.hash(),
);

View File

@@ -2,7 +2,7 @@ use crossbeam_channel::{Receiver, RecvTimeoutError, Sender};
use solana_ledger::blockstore::Blockstore;
use solana_measure::measure::Measure;
use solana_runtime::bank::Bank;
use solana_sdk::timing::slot_duration_from_slots_per_year;
use solana_sdk::{feature_set, timing::slot_duration_from_slots_per_year};
use std::{
collections::HashMap,
sync::{
@@ -60,13 +60,24 @@ impl CacheBlockTimeService {
}
fn cache_block_time(bank: Arc<Bank>, blockstore: &Arc<Blockstore>) {
let slot_duration = slot_duration_from_slots_per_year(bank.slots_per_year());
let epoch = bank.epoch_schedule().get_epoch(bank.slot());
let stakes = HashMap::new();
let stakes = bank.epoch_vote_accounts(epoch).unwrap_or(&stakes);
if bank
.feature_set
.is_active(&feature_set::timestamp_correction::id())
{
if let Err(e) = blockstore.cache_block_time(bank.slot(), bank.clock().unix_timestamp) {
error!("cache_block_time failed: slot {:?} {:?}", bank.slot(), e);
}
} else {
let slot_duration = slot_duration_from_slots_per_year(bank.slots_per_year());
let epoch = bank.epoch_schedule().get_epoch(bank.slot());
let stakes = HashMap::new();
let stakes = bank.epoch_vote_accounts(epoch).unwrap_or(&stakes);
if let Err(e) = blockstore.cache_block_time(bank.slot(), slot_duration, stakes) {
error!("cache_block_time failed: slot {:?} {:?}", bank.slot(), e);
if let Err(e) =
blockstore.cache_block_time_from_slot_entries(bank.slot(), slot_duration, stakes)
{
error!("cache_block_time failed: slot {:?} {:?}", bank.slot(), e);
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -27,7 +27,7 @@ use solana_runtime::{
vote_sender_types::{ReplayVoteReceiver, ReplayedVote},
};
use solana_sdk::{
clock::{Epoch, Slot},
clock::{Epoch, Slot, DEFAULT_MS_PER_SLOT},
epoch_schedule::EpochSchedule,
hash::Hash,
pubkey::Pubkey,
@@ -98,7 +98,7 @@ impl VoteTracker {
epoch_schedule: *root_bank.epoch_schedule(),
..VoteTracker::default()
};
vote_tracker.process_new_root_bank(&root_bank);
vote_tracker.progress_with_new_root_bank(&root_bank);
assert_eq!(
*vote_tracker.leader_schedule_epoch.read().unwrap(),
root_bank.get_leader_schedule_epoch(root_bank.slot())
@@ -174,7 +174,7 @@ impl VoteTracker {
self.keys.get_or_insert(&pubkey);
}
fn update_leader_schedule_epoch(&self, root_bank: &Bank) {
fn progress_leader_schedule_epoch(&self, root_bank: &Bank) {
// Update with any newly calculated epoch state about future epochs
let start_leader_schedule_epoch = *self.leader_schedule_epoch.read().unwrap();
let mut greatest_leader_schedule_epoch = start_leader_schedule_epoch;
@@ -205,7 +205,7 @@ impl VoteTracker {
}
}
fn update_new_root(&self, root_bank: &Bank) {
fn purge_stale_state(&self, root_bank: &Bank) {
// Purge any outdated slot data
let new_root = root_bank.slot();
let root_epoch = root_bank.epoch();
@@ -220,15 +220,15 @@ impl VoteTracker {
self.epoch_authorized_voters
.write()
.unwrap()
.retain(|epoch, _| epoch >= &root_epoch);
.retain(|epoch, _| *epoch >= root_epoch);
self.keys.purge();
*self.current_epoch.write().unwrap() = root_epoch;
}
}
fn process_new_root_bank(&self, root_bank: &Bank) {
self.update_leader_schedule_epoch(root_bank);
self.update_new_root(root_bank);
fn progress_with_new_root_bank(&self, root_bank: &Bank) {
self.progress_leader_schedule_epoch(root_bank);
self.purge_stale_state(root_bank);
}
}
@@ -425,7 +425,7 @@ impl ClusterInfoVoteListener {
blockstore: Arc<Blockstore>,
bank_notification_sender: Option<BankNotificationSender>,
) -> Result<()> {
let mut optimistic_confirmation_verifier =
let mut confirmation_verifier =
OptimisticConfirmationVerifier::new(bank_forks.read().unwrap().root());
let mut last_process_root = Instant::now();
loop {
@@ -434,21 +434,21 @@ impl ClusterInfoVoteListener {
}
let root_bank = bank_forks.read().unwrap().root_bank().clone();
if last_process_root.elapsed().as_millis() > 400 {
let unrooted_optimistic_slots = optimistic_confirmation_verifier
.get_unrooted_optimistic_slots(&root_bank, &blockstore);
if last_process_root.elapsed().as_millis() > DEFAULT_MS_PER_SLOT as u128 {
let unrooted_optimistic_slots = confirmation_verifier
.verify_for_unrooted_optimistic_slots(&root_bank, &blockstore);
// SlotVoteTracker's for all `slots` in `unrooted_optimistic_slots`
// should still be available because we haven't purged in
// `process_new_root_bank()` yet, which is called below
// `progress_with_new_root_bank()` yet, which is called below
OptimisticConfirmationVerifier::log_unrooted_optimistic_slots(
&root_bank,
&vote_tracker,
&unrooted_optimistic_slots,
);
vote_tracker.process_new_root_bank(&root_bank);
vote_tracker.progress_with_new_root_bank(&root_bank);
last_process_root = Instant::now();
}
let optimistic_confirmed_slots = Self::get_and_process_votes(
let confirmed_slots = Self::listen_and_confirm_votes(
&gossip_vote_txs_receiver,
&vote_tracker,
&root_bank,
@@ -457,19 +457,17 @@ impl ClusterInfoVoteListener {
&replay_votes_receiver,
&bank_notification_sender,
);
if let Err(e) = optimistic_confirmed_slots {
match e {
match confirmed_slots {
Ok(confirmed_slots) => {
confirmation_verifier.add_new_optimistic_confirmed_slots(confirmed_slots);
}
Err(e) => match e {
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Timeout)
| Error::ReadyTimeoutError => (),
_ => {
error!("thread {:?} error {:?}", thread::current().name(), e);
}
}
} else {
let optimistic_confirmed_slots = optimistic_confirmed_slots.unwrap();
optimistic_confirmation_verifier
.add_new_optimistic_confirmed_slots(optimistic_confirmed_slots);
},
}
}
}
@@ -483,7 +481,7 @@ impl ClusterInfoVoteListener {
verified_vote_sender: &VerifiedVoteSender,
replay_votes_receiver: &ReplayVoteReceiver,
) -> Result<Vec<(Slot, Hash)>> {
Self::get_and_process_votes(
Self::listen_and_confirm_votes(
gossip_vote_txs_receiver,
vote_tracker,
root_bank,
@@ -494,7 +492,7 @@ impl ClusterInfoVoteListener {
)
}
fn get_and_process_votes(
fn listen_and_confirm_votes(
gossip_vote_txs_receiver: &VerifiedVoteTransactionsReceiver,
vote_tracker: &VoteTracker,
root_bank: &Bank,
@@ -523,7 +521,7 @@ impl ClusterInfoVoteListener {
let gossip_vote_txs: Vec<_> = gossip_vote_txs_receiver.try_iter().flatten().collect();
let replay_votes: Vec<_> = replay_votes_receiver.try_iter().collect();
if !gossip_vote_txs.is_empty() || !replay_votes.is_empty() {
return Ok(Self::process_votes(
return Ok(Self::filter_and_confirm_with_new_votes(
vote_tracker,
gossip_vote_txs,
replay_votes,
@@ -541,7 +539,7 @@ impl ClusterInfoVoteListener {
}
#[allow(clippy::too_many_arguments)]
fn update_new_votes(
fn track_new_votes_and_notify_confirmations(
vote: Vote,
vote_pubkey: &Pubkey,
vote_tracker: &VoteTracker,
@@ -557,56 +555,52 @@ impl ClusterInfoVoteListener {
return;
}
let last_vote_slot = vote.slots.last().unwrap();
let last_vote_slot = *vote.slots.last().unwrap();
let last_vote_hash = vote.hash;
let root = root_bank.slot();
let last_vote_hash = vote.hash;
let mut is_new_vote = false;
for slot in vote.slots.iter().rev() {
// If slot is before the root, or so far ahead we don't have
// stake information, then ignore it
let epoch = root_bank.epoch_schedule().get_epoch(*slot);
// If slot is before the root, ignore it
for slot in vote.slots.iter().filter(|slot| **slot > root).rev() {
let slot = *slot;
// if we don't have stake information, ignore it
let epoch = root_bank.epoch_schedule().get_epoch(slot);
let epoch_stakes = root_bank.epoch_stakes(epoch);
if *slot <= root || epoch_stakes.is_none() {
if epoch_stakes.is_none() {
continue;
}
let epoch_stakes = epoch_stakes.unwrap();
let epoch_vote_accounts = Stakes::vote_accounts(epoch_stakes.stakes());
let total_epoch_stake = epoch_stakes.total_stake();
let unduplicated_pubkey = vote_tracker.keys.get_or_insert(&vote_pubkey);
// The last vote slot, which is the greatest slot in the stack
// of votes in a vote transaction, qualifies for optimistic confirmation.
let update_optimistic_confirmation_info = if slot == last_vote_slot {
let stake = epoch_vote_accounts
if slot == last_vote_slot {
let vote_accounts = Stakes::vote_accounts(epoch_stakes.stakes());
let stake = vote_accounts
.get(&vote_pubkey)
.map(|(stake, _)| *stake)
.unwrap_or(0);
Some((stake, last_vote_hash))
} else {
None
};
.unwrap_or_default();
let total_stake = epoch_stakes.total_stake();
// If this vote for this slot qualifies for optimistic confirmation
if let Some((stake, hash)) = update_optimistic_confirmation_info {
// Fast track processing of the last slot in a vote transactions
// so that notifications for optimistic confirmation can be sent
// as soon as possible.
let (is_confirmed, is_new) = Self::add_optimistic_confirmation_vote(
let (is_confirmed, is_new) = Self::track_optimistic_confirmation_vote(
vote_tracker,
*slot,
hash,
last_vote_slot,
last_vote_hash,
unduplicated_pubkey.clone(),
stake,
total_epoch_stake,
total_stake,
);
if is_confirmed {
new_optimistic_confirmed_slots.push((*slot, last_vote_hash));
new_optimistic_confirmed_slots.push((last_vote_slot, last_vote_hash));
// Notify subscribers about new optimistic confirmation
if let Some(sender) = bank_notification_sender {
sender
.send(BankNotification::OptimisticallyConfirmed(*slot))
.send(BankNotification::OptimisticallyConfirmed(last_vote_slot))
.unwrap_or_else(|err| {
warn!("bank_notification_sender failed: {:?}", err)
});
@@ -617,7 +611,7 @@ impl ClusterInfoVoteListener {
// By now:
// 1) The vote must have come from ReplayStage,
// 2) We've seen this vote from replay for this hash before
// (`add_optimistic_confirmation_vote()` will not set `is_new == true`
// (`track_optimistic_confirmation_vote()` will not set `is_new == true`
// for same slot different hash), so short circuit because this vote
// has no new information
@@ -629,7 +623,7 @@ impl ClusterInfoVoteListener {
is_new_vote = is_new;
}
diff.entry(*slot)
diff.entry(slot)
.or_default()
.entry(unduplicated_pubkey)
.and_modify(|seen_in_gossip_previously| {
@@ -644,7 +638,40 @@ impl ClusterInfoVoteListener {
}
}
fn process_votes(
fn filter_gossip_votes(
vote_tracker: &VoteTracker,
vote_pubkey: &Pubkey,
vote: &Vote,
gossip_tx: &Transaction,
) -> bool {
if vote.slots.is_empty() {
return false;
}
let last_vote_slot = vote.slots.last().unwrap();
// Votes from gossip need to be verified as they have not been
// verified by the replay pipeline. Determine the authorized voter
// based on the last vote slot. This will drop votes from authorized
// voters trying to make votes for slots earlier than the epoch for
// which they are authorized
let actual_authorized_voter =
vote_tracker.get_authorized_voter(&vote_pubkey, *last_vote_slot);
if actual_authorized_voter.is_none() {
return false;
}
// Voting without the correct authorized pubkey, dump the vote
if !VoteTracker::vote_contains_authorized_voter(
&gossip_tx,
&actual_authorized_voter.unwrap(),
) {
return false;
}
true
}
fn filter_and_confirm_with_new_votes(
vote_tracker: &VoteTracker,
gossip_vote_txs: Vec<Transaction>,
replayed_votes: Vec<ReplayedVote>,
@@ -662,37 +689,13 @@ impl ClusterInfoVoteListener {
.filter_map(|gossip_tx| {
vote_transaction::parse_vote_transaction(gossip_tx)
.filter(|(vote_pubkey, vote, _)| {
if vote.slots.is_empty() {
return false;
}
let last_vote_slot = vote.slots.last().unwrap();
// Votes from gossip need to be verified as they have not been
// verified by the replay pipeline. Determine the authorized voter
// based on the last vote slot. This will drop votes from authorized
// voters trying to make votes for slots earlier than the epoch for
// which they are authorized
let actual_authorized_voter =
vote_tracker.get_authorized_voter(&vote_pubkey, *last_vote_slot);
if actual_authorized_voter.is_none() {
return false;
}
// Voting without the correct authorized pubkey, dump the vote
if !VoteTracker::vote_contains_authorized_voter(
&gossip_tx,
&actual_authorized_voter.unwrap(),
) {
return false;
}
true
Self::filter_gossip_votes(vote_tracker, vote_pubkey, vote, gossip_tx)
})
.map(|v| (true, v))
})
.chain(replayed_votes.into_iter().map(|v| (false, v)))
{
Self::update_new_votes(
Self::track_new_votes_and_notify_confirmations(
vote,
&vote_pubkey,
&vote_tracker,
@@ -757,7 +760,7 @@ impl ClusterInfoVoteListener {
// Returns if the slot was optimistically confirmed, and whether
// the slot was new
fn add_optimistic_confirmation_vote(
fn track_optimistic_confirmation_vote(
vote_tracker: &VoteTracker,
slot: Slot,
hash: Hash,
@@ -898,7 +901,7 @@ mod tests {
let (vote_tracker, bank, _, _) = setup();
// Check outdated slots are purged with new root
let new_voter = Arc::new(Pubkey::new_rand());
let new_voter = Arc::new(solana_sdk::pubkey::new_rand());
// Make separate copy so the original doesn't count toward
// the ref count, which would prevent cleanup
let new_voter_ = Arc::new(*new_voter);
@@ -909,7 +912,7 @@ mod tests {
.unwrap()
.contains_key(&bank.slot()));
let bank1 = Bank::new_from_parent(&bank, &Pubkey::default(), bank.slot() + 1);
vote_tracker.process_new_root_bank(&bank1);
vote_tracker.progress_with_new_root_bank(&bank1);
assert!(!vote_tracker
.slot_vote_trackers
.read()
@@ -926,7 +929,7 @@ mod tests {
bank.epoch_schedule()
.get_first_slot_in_epoch(current_epoch + 1),
);
vote_tracker.process_new_root_bank(&new_epoch_bank);
vote_tracker.progress_with_new_root_bank(&new_epoch_bank);
assert!(!vote_tracker.keys.0.read().unwrap().contains(&new_voter));
assert_eq!(
*vote_tracker.current_epoch.read().unwrap(),
@@ -956,7 +959,7 @@ mod tests {
);
let next_leader_schedule_bank =
Bank::new_from_parent(&bank, &Pubkey::default(), next_leader_schedule_computed);
vote_tracker.update_leader_schedule_epoch(&next_leader_schedule_bank);
vote_tracker.progress_leader_schedule_epoch(&next_leader_schedule_bank);
assert_eq!(
*vote_tracker.leader_schedule_epoch.read().unwrap(),
next_leader_schedule_epoch
@@ -1007,7 +1010,7 @@ mod tests {
&votes_sender,
&replay_votes_sender,
);
ClusterInfoVoteListener::get_and_process_votes(
ClusterInfoVoteListener::listen_and_confirm_votes(
&votes_receiver,
&vote_tracker,
&bank3,
@@ -1036,7 +1039,7 @@ mod tests {
&votes_sender,
&replay_votes_sender,
);
ClusterInfoVoteListener::get_and_process_votes(
ClusterInfoVoteListener::listen_and_confirm_votes(
&votes_receiver,
&vote_tracker,
&bank3,
@@ -1114,7 +1117,7 @@ mod tests {
);
// Check that all the votes were registered for each validator correctly
ClusterInfoVoteListener::get_and_process_votes(
ClusterInfoVoteListener::listen_and_confirm_votes(
&votes_txs_receiver,
&vote_tracker,
&bank0,
@@ -1233,7 +1236,7 @@ mod tests {
}
// Read and process votes from channel `votes_receiver`
ClusterInfoVoteListener::get_and_process_votes(
ClusterInfoVoteListener::listen_and_confirm_votes(
&votes_txs_receiver,
&vote_tracker,
&bank0,
@@ -1328,7 +1331,7 @@ mod tests {
))
.unwrap();
}
let _ = ClusterInfoVoteListener::get_and_process_votes(
let _ = ClusterInfoVoteListener::listen_and_confirm_votes(
&votes_receiver,
&vote_tracker,
&bank,
@@ -1474,7 +1477,7 @@ mod tests {
)];
let (verified_vote_sender, _verified_vote_receiver) = unbounded();
ClusterInfoVoteListener::process_votes(
ClusterInfoVoteListener::filter_and_confirm_with_new_votes(
&vote_tracker,
vote_tx,
// Add gossip vote for same slot, should not affect outcome
@@ -1545,7 +1548,7 @@ mod tests {
let new_root_bank =
Bank::new_from_parent(&bank, &Pubkey::default(), first_slot_in_new_epoch - 2);
ClusterInfoVoteListener::process_votes(
ClusterInfoVoteListener::filter_and_confirm_with_new_votes(
&vote_tracker,
vote_txs,
vec![(
@@ -1681,7 +1684,7 @@ mod tests {
fn run_test_verify_votes_1_pass(hash: Option<Hash>) {
let vote_tx = test_vote_tx(hash);
let votes = vec![vote_tx];
let labels = vec![CrdsValueLabel::Vote(0, Pubkey::new_rand())];
let labels = vec![CrdsValueLabel::Vote(0, solana_sdk::pubkey::new_rand())];
let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, labels);
assert_eq!(vote_txs.len(), 1);
verify_packets_len(&packets, 1);
@@ -1698,7 +1701,7 @@ mod tests {
let mut bad_vote = vote_tx.clone();
bad_vote.signatures[0] = Signature::default();
let votes = vec![vote_tx.clone(), bad_vote, vote_tx];
let label = CrdsValueLabel::Vote(0, Pubkey::new_rand());
let label = CrdsValueLabel::Vote(0, solana_sdk::pubkey::new_rand());
let labels: Vec<_> = (0..votes.len()).map(|_| label.clone()).collect();
let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, labels);
assert_eq!(vote_txs.len(), 2);

View File

@@ -237,8 +237,8 @@ mod tests {
let mut c1 = ContactInfo::default();
let mut c2 = ContactInfo::default();
let mut map = HashMap::new();
let k1 = Pubkey::new_rand();
let k2 = Pubkey::new_rand();
let k1 = solana_sdk::pubkey::new_rand();
let k2 = solana_sdk::pubkey::new_rand();
map.insert(Arc::new(k1), std::u64::MAX / 2);
map.insert(Arc::new(k2), 0);
cs.cluster_slots
@@ -259,8 +259,8 @@ mod tests {
let mut c1 = ContactInfo::default();
let mut c2 = ContactInfo::default();
let mut map = HashMap::new();
let k1 = Pubkey::new_rand();
let k2 = Pubkey::new_rand();
let k1 = solana_sdk::pubkey::new_rand();
let k2 = solana_sdk::pubkey::new_rand();
map.insert(Arc::new(k2), 0);
cs.cluster_slots
.write()
@@ -290,7 +290,7 @@ mod tests {
let cs = ClusterSlots::default();
let mut contact_infos = vec![ContactInfo::default(); 2];
for ci in contact_infos.iter_mut() {
ci.id = Pubkey::new_rand();
ci.id = solana_sdk::pubkey::new_rand();
}
let slot = 9;
@@ -359,7 +359,7 @@ mod tests {
let mut epoch_slot = EpochSlots::default();
epoch_slot.fill(&[1], 0);
cs.update_internal(0, (vec![epoch_slot], None));
let self_id = Pubkey::new_rand();
let self_id = solana_sdk::pubkey::new_rand();
assert_eq!(
cs.generate_repairs_for_missing_slots(&self_id, 0),
vec![RepairType::HighestShred(1, 0)]

View File

@@ -181,6 +181,7 @@ mod test {
let node_info = Node::new_localhost_with_pubkey(&Pubkey::default());
let cluster_info = ClusterInfo::new_with_invalid_keypair(node_info.info);
ClusterSlotsService::update_lowest_slot(&Pubkey::default(), 5, &cluster_info);
cluster_info.flush_push_queue();
let lowest = cluster_info
.get_lowest_slot_for_node(&Pubkey::default(), None, |lowest_slot, _| {
lowest_slot.clone()

View File

@@ -375,19 +375,22 @@ mod tests {
let rooted_stake_amount = 40;
let sk1 = Pubkey::new_rand();
let pk1 = Pubkey::new_rand();
let mut vote_account1 = vote_state::create_account(&pk1, &Pubkey::new_rand(), 0, 100);
let sk1 = solana_sdk::pubkey::new_rand();
let pk1 = solana_sdk::pubkey::new_rand();
let mut vote_account1 =
vote_state::create_account(&pk1, &solana_sdk::pubkey::new_rand(), 0, 100);
let stake_account1 =
stake_state::create_account(&sk1, &pk1, &vote_account1, &genesis_config.rent, 100);
let sk2 = Pubkey::new_rand();
let pk2 = Pubkey::new_rand();
let mut vote_account2 = vote_state::create_account(&pk2, &Pubkey::new_rand(), 0, 50);
let sk2 = solana_sdk::pubkey::new_rand();
let pk2 = solana_sdk::pubkey::new_rand();
let mut vote_account2 =
vote_state::create_account(&pk2, &solana_sdk::pubkey::new_rand(), 0, 50);
let stake_account2 =
stake_state::create_account(&sk2, &pk2, &vote_account2, &genesis_config.rent, 50);
let sk3 = Pubkey::new_rand();
let pk3 = Pubkey::new_rand();
let mut vote_account3 = vote_state::create_account(&pk3, &Pubkey::new_rand(), 0, 1);
let sk3 = solana_sdk::pubkey::new_rand();
let pk3 = solana_sdk::pubkey::new_rand();
let mut vote_account3 =
vote_state::create_account(&pk3, &solana_sdk::pubkey::new_rand(), 0, 1);
let stake_account3 = stake_state::create_account(
&sk3,
&pk3,
@@ -395,9 +398,10 @@ mod tests {
&genesis_config.rent,
rooted_stake_amount,
);
let sk4 = Pubkey::new_rand();
let pk4 = Pubkey::new_rand();
let mut vote_account4 = vote_state::create_account(&pk4, &Pubkey::new_rand(), 0, 1);
let sk4 = solana_sdk::pubkey::new_rand();
let pk4 = solana_sdk::pubkey::new_rand();
let mut vote_account4 =
vote_state::create_account(&pk4, &solana_sdk::pubkey::new_rand(), 0, 1);
let stake_account4 = stake_state::create_account(
&sk4,
&pk4,

View File

@@ -30,11 +30,11 @@ use std::{
};
use thiserror::Error;
#[derive(PartialEq, Clone, Debug)]
#[derive(PartialEq, Clone, Debug, AbiExample)]
pub enum SwitchForkDecision {
SwitchProof(Hash),
NoSwitch,
FailedSwitchThreshold,
SameFork,
FailedSwitchThreshold(u64, u64),
}
impl SwitchForkDecision {
@@ -45,8 +45,11 @@ impl SwitchForkDecision {
authorized_voter_pubkey: &Pubkey,
) -> Option<Instruction> {
match self {
SwitchForkDecision::FailedSwitchThreshold => None,
SwitchForkDecision::NoSwitch => Some(vote_instruction::vote(
SwitchForkDecision::FailedSwitchThreshold(_, total_stake) => {
assert_ne!(*total_stake, 0);
None
}
SwitchForkDecision::SameFork => Some(vote_instruction::vote(
vote_account_pubkey,
authorized_voter_pubkey,
vote,
@@ -61,6 +64,10 @@ impl SwitchForkDecision {
}
}
}
pub fn can_vote(&self) -> bool {
!matches!(self, SwitchForkDecision::FailedSwitchThreshold(_, _))
}
}
pub const VOTE_THRESHOLD_DEPTH: usize = 8;
@@ -82,7 +89,7 @@ pub(crate) struct ComputedBankState {
pub pubkey_votes: Arc<PubkeyVotes>,
}
#[frozen_abi(digest = "2ZUeCLMVQxmHYbeqMH7M97ifVSKoVErGvRHzyxcQRjgU")]
#[frozen_abi(digest = "Eay84NBbJqiMBfE7HHH2o6e51wcvoU79g8zCi5sw6uj3")]
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, AbiExample)]
pub struct Tower {
node_pubkey: Pubkey,
@@ -100,7 +107,11 @@ pub struct Tower {
// (This is a special field for slashing-free validator restart with edge cases).
// This could be emptied after some time; but left intact indefinitely for easier
// implementation
// Further, stray slot can be stale or not. `Stale` here means whether given
// bank_forks (=~ ledger) lacks the slot or not.
stray_restored_slot: Option<Slot>,
#[serde(skip)]
pub last_switch_threshold_check: Option<(Slot, SwitchForkDecision)>,
}
impl Default for Tower {
@@ -115,6 +126,7 @@ impl Default for Tower {
path: PathBuf::default(),
tmp_path: PathBuf::default(),
stray_restored_slot: Option::default(),
last_switch_threshold_check: Option::default(),
};
// VoteState::root_slot is ensured to be Some in Tower
tower.lockouts.root_slot = Some(Slot::default());
@@ -377,17 +389,14 @@ impl Tower {
pub fn record_bank_vote(&mut self, vote: Vote) -> Option<Slot> {
let slot = vote.last_voted_slot().unwrap_or(0);
trace!("{} record_vote for {}", self.node_pubkey, slot);
let root_slot = self.lockouts.root_slot;
let old_root = self.root();
self.lockouts.process_vote_unchecked(&vote);
self.last_vote = vote;
let new_root = self.root();
datapoint_info!(
"tower-vote",
("latest", slot, i64),
("root", self.lockouts.root_slot.unwrap_or(0), i64)
);
if root_slot != self.lockouts.root_slot {
Some(self.lockouts.root_slot.unwrap())
datapoint_info!("tower-vote", ("latest", slot, i64), ("root", new_root, i64));
if old_root != new_root {
Some(new_root)
} else {
None
}
@@ -431,13 +440,13 @@ impl Tower {
// root may be forcibly set by arbitrary replay root slot, for example from a root
// after replaying a snapshot.
// Also, tower.root() couldn't be None; do_initialize_lockouts() ensures that.
// Also, tower.root() couldn't be None; initialize_lockouts() ensures that.
// Conceptually, every tower must have been constructed from a concrete starting point,
// which establishes the origin of trust (i.e. root) whether booting from genesis (slot 0) or
// snapshot (slot N). In other words, there should be no possibility a Tower doesn't have
// root, unlike young vote accounts.
pub fn root(&self) -> Option<Slot> {
self.lockouts.root_slot
pub fn root(&self) -> Slot {
self.lockouts.root_slot.unwrap()
}
// a slot is recent if it's newer than the last vote we have
@@ -493,7 +502,7 @@ impl Tower {
false
}
pub(crate) fn check_switch_threshold(
fn make_check_switch_threshold_decision(
&self,
switch_slot: u64,
ancestors: &HashMap<Slot, HashSet<u64>>,
@@ -504,13 +513,66 @@ impl Tower {
) -> SwitchForkDecision {
self.last_voted_slot()
.map(|last_voted_slot| {
let root = self.lockouts.root_slot.unwrap_or(0);
let root = self.root();
let empty_ancestors = HashSet::default();
let empty_ancestors_due_to_minor_unsynced_ledger = || {
// This condition (stale stray last vote) shouldn't occur under normal validator
// operation, indicating something unusual happened.
// This condition could be introduced by manual ledger mishandling,
// validator SEGV, OS/HW crash, or plain No Free Space FS error.
// However, returning empty ancestors as a fallback here shouldn't result in
// slashing by itself (Note that we couldn't fully preclude any kind of slashing if
// the failure was OS or HW level).
// Firstly, lockout is ensured elsewhere.
// Also, there is no risk of optimistic conf. violation. Although empty ancestors
// could result in incorrect (= more than actual) locked_out_stake and
// false-positive SwitchProof later in this function, there should be no such a
// heavier fork candidate, first of all, if the last vote (or any of its
// unavailable ancestors) were already optimistically confirmed.
// The only exception is that other validator is already violating it...
if self.is_first_switch_check() && switch_slot < last_voted_slot {
// `switch < last` is needed not to warn! this message just because of using
// newer snapshots on validator restart
let message = format!(
"bank_forks doesn't have corresponding data for the stray restored \
last vote({}), meaning some inconsistency between saved tower and ledger.",
last_voted_slot
);
warn!("{}", message);
datapoint_warn!("tower_warn", ("warn", message, String));
}
&empty_ancestors
};
let suspended_decision_due_to_major_unsynced_ledger = || {
// This peculiar corner handling is needed mainly for a tower which is newer than
// blockstore. (Yeah, we tolerate it for ease of maintaining validator by operators)
// This condition could be introduced by manual ledger mishandling,
// validator SEGV, OS/HW crash, or plain No Free Space FS error.
// When we're in this clause, it basically means validator is badly running
// with a future tower while replaying past slots, especially problematic is
// last_voted_slot.
// So, don't re-vote on it by returning pseudo FailedSwitchThreshold, otherwise
// there would be slashing because of double vote on one of last_vote_ancestors.
// (Well, needless to say, re-creating the duplicate block must be handled properly
// at the banking stage: https://github.com/solana-labs/solana/issues/8232)
//
// To be specific, the replay stage is tricked into a false perception where
// last_vote_ancestors is AVAILABLE for descendant-of-`switch_slot`, stale, and
// stray slots (which should always be empty_ancestors).
//
// This is covered by test_future_tower_* in local_cluster
SwitchForkDecision::FailedSwitchThreshold(0, total_stake)
};
let last_vote_ancestors =
ancestors.get(&last_voted_slot).unwrap_or_else(|| {
if !self.is_stray_last_vote() {
// Unless last vote is stray, ancestors.get(last_voted_slot) must
// Unless last vote is stray and stale, ancestors.get(last_voted_slot) must
// return Some(_), justifying to panic! here.
// Also, adjust_lockouts_after_replay() correctly makes last_voted_slot None,
// if all saved votes are ancestors of replayed_root_slot. So this code shouldn't be
@@ -520,10 +582,7 @@ impl Tower {
// all of them.
panic!("no ancestors found with slot: {}", last_voted_slot);
} else {
// bank_forks doesn't have corresponding data for the stray restored last vote,
// meaning some inconsistency between saved tower and ledger.
// (newer snapshot, or only a saved tower is moved over to new setup?)
&empty_ancestors
empty_ancestors_due_to_minor_unsynced_ledger()
}
});
@@ -532,12 +591,21 @@ impl Tower {
if switch_slot == last_voted_slot || switch_slot_ancestors.contains(&last_voted_slot) {
// If the `switch_slot is a descendant of the last vote,
// no switching proof is necessary
return SwitchForkDecision::NoSwitch;
return SwitchForkDecision::SameFork;
}
// Should never consider switching to an ancestor
// of your last vote
assert!(!last_vote_ancestors.contains(&switch_slot));
if last_vote_ancestors.contains(&switch_slot) {
if !self.is_stray_last_vote() {
panic!(
"Should never consider switching to slot ({}), which is ancestors({:?}) of last vote: {}",
switch_slot,
last_vote_ancestors,
last_voted_slot
);
} else {
return suspended_decision_due_to_major_unsynced_ledger();
}
}
// By this point, we know the `switch_slot` is on a different fork
// (is neither an ancestor nor descendant of `last_vote`), so a
@@ -598,7 +666,7 @@ impl Tower {
}
// Only count lockouts on slots that are:
// 1) Not ancestors of `last_vote`
// 1) Not ancestors of `last_vote`, meaning being on different fork
// 2) Not from before the current root as we can't determine if
// anything before the root was an ancestor of `last_vote` or not
if !last_vote_ancestors.contains(lockout_interval_start)
@@ -622,10 +690,43 @@ impl Tower {
if (locked_out_stake as f64 / total_stake as f64) > SWITCH_FORK_THRESHOLD {
SwitchForkDecision::SwitchProof(switch_proof)
} else {
SwitchForkDecision::FailedSwitchThreshold
SwitchForkDecision::FailedSwitchThreshold(locked_out_stake, total_stake)
}
})
.unwrap_or(SwitchForkDecision::NoSwitch)
.unwrap_or(SwitchForkDecision::SameFork)
}
pub(crate) fn check_switch_threshold(
&mut self,
switch_slot: u64,
ancestors: &HashMap<Slot, HashSet<u64>>,
descendants: &HashMap<Slot, HashSet<u64>>,
progress: &ProgressMap,
total_stake: u64,
epoch_vote_accounts: &HashMap<Pubkey, (u64, Account)>,
) -> SwitchForkDecision {
let decision = self.make_check_switch_threshold_decision(
switch_slot,
ancestors,
descendants,
progress,
total_stake,
epoch_vote_accounts,
);
let new_check = Some((switch_slot, decision.clone()));
if new_check != self.last_switch_threshold_check {
trace!(
"new switch threshold check: slot {}: {:?}",
switch_slot,
decision,
);
self.last_switch_threshold_check = new_check;
}
decision
}
fn is_first_switch_check(&self) -> bool {
self.last_switch_threshold_check.is_none()
}
pub fn check_vote_stake_threshold(
@@ -760,28 +861,21 @@ impl Tower {
// The tower root can be older/newer if the validator booted from a newer/older snapshot, so
// tower lockouts may need adjustment
pub fn adjust_lockouts_after_replay(
self,
mut self,
replayed_root: Slot,
slot_history: &SlotHistory,
) -> Result<Self> {
info!(
"adjusting lockouts (after replay up to {}): {:?}",
replayed_root,
self.voted_slots()
);
// sanity assertions for roots
assert_eq!(slot_history.check(replayed_root), Check::Found);
assert!(self.root().is_some());
let tower_root = self.root().unwrap();
// reconcile_blockstore_roots_with_tower() should already have aligned these.
assert!(
tower_root <= replayed_root,
format!(
"tower root: {:?} >= replayed root slot: {}",
tower_root, replayed_root
)
let tower_root = self.root();
info!(
"adjusting lockouts (after replay up to {}): {:?} tower root: {} replayed root: {}",
replayed_root,
self.voted_slots(),
tower_root,
replayed_root,
);
assert_eq!(slot_history.check(replayed_root), Check::Found);
assert!(
self.last_vote == Vote::default() && self.lockouts.votes.is_empty()
|| self.last_vote != Vote::default() && !self.lockouts.votes.is_empty(),
@@ -791,30 +885,67 @@ impl Tower {
)
);
// return immediately if votes are empty...
if self.lockouts.votes.is_empty() {
return Ok(self);
if let Some(last_voted_slot) = self.last_voted_slot() {
if tower_root <= replayed_root {
// Normally, we goes into this clause with possible help of
// reconcile_blockstore_roots_with_tower()
if slot_history.check(last_voted_slot) == Check::TooOld {
// We could try hard to anchor with other older votes, but opt to simplify the
// following logic
return Err(TowerError::TooOldTower(
last_voted_slot,
slot_history.oldest(),
));
}
self.adjust_lockouts_with_slot_history(slot_history)?;
self.initialize_root(replayed_root);
} else {
// This should never occur under normal operation.
// While this validator's voting is suspended this way,
// suspended_decision_due_to_major_unsynced_ledger() will be also touched.
let message = format!(
"For some reason, we're REPROCESSING slots which has already been \
voted and ROOTED by us; \
VOTING will be SUSPENDED UNTIL {}!",
last_voted_slot,
);
error!("{}", message);
datapoint_error!("tower_error", ("error", message, String));
// Let's pass-through adjust_lockouts_with_slot_history just for sanitization,
// using a synthesized SlotHistory.
let mut warped_slot_history = (*slot_history).clone();
// Blockstore doesn't have the tower_root slot because of
// (replayed_root < tower_root) in this else clause, meaning the tower is from
// the future from the view of blockstore.
// Pretend the blockstore has the future tower_root to anchor exactly with that
// slot by adding tower_root to a slot history. The added slot will be newer
// than all slots in the slot history (remember tower_root > replayed_root),
// satisfying the slot history invariant.
// Thus, the whole process will be safe as well because tower_root exists
// within both tower and slot history, guaranteeing the success of adjustment
// and retaining all of future votes correctly while sanitizing.
warped_slot_history.add(tower_root);
self.adjust_lockouts_with_slot_history(&warped_slot_history)?;
// don't update root; future tower's root should be kept across validator
// restarts to continue to show the scary messages at restarts until the next
// voting.
}
} else {
// This else clause is for newly created tower.
// initialize_lockouts_from_bank() should ensure the following invariant,
// otherwise we're screwing something up.
assert_eq!(tower_root, replayed_root);
}
let last_voted_slot = self.last_voted_slot().unwrap();
if slot_history.check(last_voted_slot) == Check::TooOld {
// We could try hard to anchor with other older votes, but opt to simplify the
// following logic
return Err(TowerError::TooOldTower(
last_voted_slot,
slot_history.oldest(),
));
}
self.do_adjust_lockouts_after_replay(tower_root, replayed_root, slot_history)
Ok(self)
}
fn do_adjust_lockouts_after_replay(
mut self,
tower_root: Slot,
replayed_root: Slot,
slot_history: &SlotHistory,
) -> Result<Self> {
fn adjust_lockouts_with_slot_history(&mut self, slot_history: &SlotHistory) -> Result<()> {
let tower_root = self.root();
// retained slots will be consisted only from divergent slots
let mut retain_flags_for_each_vote_in_reverse: Vec<_> =
Vec::with_capacity(self.lockouts.votes.len());
@@ -855,14 +986,20 @@ impl Tower {
}
if let Some(checked_slot) = checked_slot {
// This is really special, only if tower is initialized (root = slot 0) for genesis and contains
// a vote (= slot 0) for the genesis, the slot 0 can repeat only once
let voting_from_genesis = *slot_in_tower == checked_slot && *slot_in_tower == 0;
// This is really special, only if tower is initialized and contains
// a vote for the root, the root slot can repeat only once
let voting_for_root =
*slot_in_tower == checked_slot && *slot_in_tower == tower_root;
if !voting_from_genesis {
if !voting_for_root {
// Unless we're voting since genesis, slots_in_tower must always be older than last checked_slot
// including all vote slot and the root slot.
assert!(*slot_in_tower < checked_slot)
assert!(
*slot_in_tower < checked_slot,
"slot_in_tower({}) < checked_slot({})",
*slot_in_tower,
checked_slot
);
}
}
@@ -890,15 +1027,10 @@ impl Tower {
retain_flags_for_each_vote_in_reverse.into_iter().rev();
let original_votes_len = self.lockouts.votes.len();
self.do_initialize_lockouts(replayed_root, move |_| {
retain_flags_for_each_vote.next().unwrap()
});
self.initialize_lockouts(move |_| retain_flags_for_each_vote.next().unwrap());
if self.lockouts.votes.is_empty() {
info!(
"All restored votes were behind replayed_root({}); resetting root_slot and last_vote in tower!",
replayed_root
);
info!("All restored votes were behind; resetting root_slot and last_vote in tower!");
// we might not have banks for those votes so just reset.
// That's because the votes may well past replayed_root
self.last_vote = Vote::default();
@@ -917,7 +1049,7 @@ impl Tower {
self.stray_restored_slot = Some(self.last_vote.last_voted_slot().unwrap());
}
Ok(self)
Ok(())
}
fn initialize_lockouts_from_bank(
@@ -930,18 +1062,19 @@ impl Tower {
let vote_state = VoteState::deserialize(&vote_account.data)
.expect("vote_account isn't a VoteState?");
self.lockouts = vote_state;
self.do_initialize_lockouts(root, |v| v.slot > root);
self.initialize_root(root);
self.initialize_lockouts(|v| v.slot > root);
trace!(
"{} lockouts initialized to {:?}",
"Lockouts in tower for {} is initialized using bank {}",
self.node_pubkey,
self.lockouts
bank.slot(),
);
assert_eq!(
self.lockouts.node_pubkey, self.node_pubkey,
"vote account's node_pubkey doesn't match",
);
} else {
self.do_initialize_lockouts(root, |_| true);
self.initialize_root(root);
info!(
"vote account({}) not found in bank (slot={})",
vote_account_pubkey,
@@ -950,13 +1083,16 @@ impl Tower {
}
}
fn do_initialize_lockouts<F: FnMut(&Lockout) -> bool>(&mut self, root: Slot, should_retain: F) {
// Updating root is needed to correctly restore from newly-saved tower for the next
// boot
self.lockouts.root_slot = Some(root);
fn initialize_lockouts<F: FnMut(&Lockout) -> bool>(&mut self, should_retain: F) {
self.lockouts.votes.retain(should_retain);
}
// Updating root is needed to correctly restore from newly-saved tower for the next
// boot
fn initialize_root(&mut self, root: Slot) {
self.lockouts.root_slot = Some(root);
}
pub fn get_filename(path: &Path, node_pubkey: &Pubkey) -> PathBuf {
path.join(format!("tower-{}", node_pubkey))
.with_extension("bin")
@@ -986,6 +1122,7 @@ impl Tower {
bincode::serialize_into(&mut file, &saved_tower)?;
// file.sync_all() hurts performance; pipeline sync-ing and submitting votes to the cluster!
}
trace!("persisted votes: {:?}", self.voted_slots());
fs::rename(&new_filename, &filename)?;
// self.path.parent().sync_all() hurts performance same as the above sync
@@ -1047,6 +1184,16 @@ pub enum TowerError {
FatallyInconsistent(&'static str),
}
impl TowerError {
pub fn is_file_missing(&self) -> bool {
if let TowerError::IOError(io_err) = &self {
io_err.kind() == std::io::ErrorKind::NotFound
} else {
false
}
}
}
#[frozen_abi(digest = "Gaxfwvx5MArn52mKZQgzHmDCyn5YfCuTHvp5Et3rFfpp")]
#[derive(Default, Clone, Serialize, Deserialize, Debug, PartialEq, AbiExample)]
pub struct SavedTower {
@@ -1070,33 +1217,46 @@ impl SavedTower {
}
}
// Given an untimely crash, tower may have roots that are not reflected in blockstore because
// `ReplayState::handle_votable_bank()` saves tower before setting blockstore roots
// Given an untimely crash, tower may have roots that are not reflected in blockstore,
// or the reverse of this.
// That's because we don't impose any ordering guarantee or any kind of write barriers
// between tower (plain old POSIX fs calls) and blockstore (through RocksDB), when
// `ReplayState::handle_votable_bank()` saves tower before setting blockstore roots.
pub fn reconcile_blockstore_roots_with_tower(
tower: &Tower,
blockstore: &Blockstore,
) -> blockstore_db::Result<()> {
if let Some(tower_root) = tower.root() {
let last_blockstore_root = blockstore.last_root();
if last_blockstore_root < tower_root {
// Ensure tower_root itself to exist and be marked as rooted in the blockstore
// in addition to its ancestors.
let new_roots: Vec<_> = AncestorIterator::new_inclusive(tower_root, &blockstore)
.take_while(|current| match current.cmp(&last_blockstore_root) {
Ordering::Greater => true,
Ordering::Equal => false,
Ordering::Less => panic!(
"couldn't find a last_blockstore_root upwards from: {}!?",
tower_root
),
})
.collect();
assert!(
!new_roots.is_empty(),
"at least 1 parent slot must be found"
let tower_root = tower.root();
let last_blockstore_root = blockstore.last_root();
if last_blockstore_root < tower_root {
// Ensure tower_root itself to exist and be marked as rooted in the blockstore
// in addition to its ancestors.
let new_roots: Vec<_> = AncestorIterator::new_inclusive(tower_root, &blockstore)
.take_while(|current| match current.cmp(&last_blockstore_root) {
Ordering::Greater => true,
Ordering::Equal => false,
Ordering::Less => panic!(
"couldn't find a last_blockstore_root upwards from: {}!?",
tower_root
),
})
.collect();
if !new_roots.is_empty() {
info!(
"Reconciling slots as root based on tower root: {:?} ({}..{}) ",
new_roots, tower_root, last_blockstore_root
);
blockstore.set_roots(&new_roots)?;
} else {
// This indicates we're in bad state; but still don't panic here.
// That's because we might have a chance of recovering properly with
// newer snapshot.
warn!(
"Couldn't find any ancestor slots from tower root ({}) \
towards blockstore root ({}); blockstore pruned or only \
tower moved into new ledger?",
tower_root, last_blockstore_root,
);
blockstore.set_roots(&new_roots)?
}
}
Ok(())
@@ -1267,7 +1427,7 @@ pub mod test {
&ancestors,
&descendants,
&self.progress,
&tower,
tower,
);
// Make sure this slot isn't locked out or failing threshold
@@ -1456,7 +1616,7 @@ pub mod test {
&mut account.data,
)
.expect("serialize state");
stakes.push((Pubkey::new_rand(), (*lamports, account)));
stakes.push((solana_sdk::pubkey::new_rand(), (*lamports, account)));
}
stakes
}
@@ -1464,11 +1624,11 @@ pub mod test {
#[test]
fn test_to_vote_instruction() {
let vote = Vote::default();
let mut decision = SwitchForkDecision::FailedSwitchThreshold;
let mut decision = SwitchForkDecision::FailedSwitchThreshold(0, 1);
assert!(decision
.to_vote_instruction(vote.clone(), &Pubkey::default(), &Pubkey::default())
.is_none());
decision = SwitchForkDecision::NoSwitch;
decision = SwitchForkDecision::SameFork;
assert_eq!(
decision.to_vote_instruction(vote.clone(), &Pubkey::default(), &Pubkey::default()),
Some(vote_instruction::vote(
@@ -1571,7 +1731,7 @@ pub mod test {
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
),
SwitchForkDecision::NoSwitch
SwitchForkDecision::SameFork
);
// Trying to switch to another fork at 110 should fail
@@ -1584,7 +1744,7 @@ pub mod test {
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
),
SwitchForkDecision::FailedSwitchThreshold
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
);
// Adding another validator lockout on a descendant of last vote should
@@ -1599,7 +1759,7 @@ pub mod test {
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
),
SwitchForkDecision::FailedSwitchThreshold
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
);
// Adding another validator lockout on an ancestor of last vote should
@@ -1614,7 +1774,7 @@ pub mod test {
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
),
SwitchForkDecision::FailedSwitchThreshold
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
);
// Adding another validator lockout on a different fork, but the lockout
@@ -1629,7 +1789,7 @@ pub mod test {
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
),
SwitchForkDecision::FailedSwitchThreshold
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
);
// Adding another validator lockout on a different fork, and the lockout
@@ -1646,7 +1806,7 @@ pub mod test {
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
),
SwitchForkDecision::FailedSwitchThreshold
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
);
// Adding another validator lockout on a different fork, and the lockout
@@ -1697,7 +1857,7 @@ pub mod test {
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
),
SwitchForkDecision::FailedSwitchThreshold
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
);
}
@@ -2365,7 +2525,7 @@ pub mod test {
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
),
SwitchForkDecision::NoSwitch
SwitchForkDecision::SameFork
);
// Trying to switch to another fork at 110 should fail
@@ -2378,7 +2538,7 @@ pub mod test {
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
),
SwitchForkDecision::FailedSwitchThreshold
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
);
vote_simulator.simulate_lockout_interval(111, (10, 49), &other_vote_account);
@@ -2456,7 +2616,7 @@ pub mod test {
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
),
SwitchForkDecision::FailedSwitchThreshold
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
);
// Add lockout_interval which should be excluded
@@ -2470,7 +2630,7 @@ pub mod test {
total_stake,
bank0.epoch_vote_accounts(0).unwrap(),
),
SwitchForkDecision::FailedSwitchThreshold
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
);
// Add lockout_interval which should not be excluded
@@ -2619,8 +2779,7 @@ pub mod test {
}
#[test]
#[should_panic(expected = "at least 1 parent slot must be found")]
fn test_reconcile_blockstore_roots_with_tower_panic_no_parent() {
fn test_reconcile_blockstore_roots_with_tower_nop_no_parent() {
solana_logger::setup();
let blockstore_path = get_tmp_ledger_path!();
{
@@ -2636,7 +2795,9 @@ pub mod test {
let mut tower = Tower::new_with_key(&Pubkey::default());
tower.lockouts.root_slot = Some(4);
assert_eq!(blockstore.last_root(), 0);
reconcile_blockstore_roots_with_tower(&tower, &blockstore).unwrap();
assert_eq!(blockstore.last_root(), 0);
}
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
@@ -2660,13 +2821,13 @@ pub mod test {
.unwrap();
assert_eq!(tower.voted_slots(), vec![2, 3]);
assert_eq!(tower.root(), Some(replayed_root_slot));
assert_eq!(tower.root(), replayed_root_slot);
tower = tower
.adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
.unwrap();
assert_eq!(tower.voted_slots(), vec![2, 3]);
assert_eq!(tower.root(), Some(replayed_root_slot));
assert_eq!(tower.root(), replayed_root_slot);
}
#[test]
@@ -2688,7 +2849,7 @@ pub mod test {
.unwrap();
assert_eq!(tower.voted_slots(), vec![2, 3]);
assert_eq!(tower.root(), Some(replayed_root_slot));
assert_eq!(tower.root(), replayed_root_slot);
}
#[test]
@@ -2712,12 +2873,12 @@ pub mod test {
.unwrap();
assert_eq!(tower.voted_slots(), vec![] as Vec<Slot>);
assert_eq!(tower.root(), Some(replayed_root_slot));
assert_eq!(tower.root(), replayed_root_slot);
assert_eq!(tower.stray_restored_slot, None);
}
#[test]
fn test_adjust_lockouts_after_relay_all_rooted_with_too_old() {
fn test_adjust_lockouts_after_replay_all_rooted_with_too_old() {
use solana_sdk::slot_history::MAX_ENTRIES;
let mut tower = Tower::new_for_tests(10, 0.9);
@@ -2735,7 +2896,7 @@ pub mod test {
.adjust_lockouts_after_replay(MAX_ENTRIES, &slot_history)
.unwrap();
assert_eq!(tower.voted_slots(), vec![] as Vec<Slot>);
assert_eq!(tower.root(), Some(MAX_ENTRIES));
assert_eq!(tower.root(), MAX_ENTRIES);
}
#[test]
@@ -2758,7 +2919,7 @@ pub mod test {
.unwrap();
assert_eq!(tower.voted_slots(), vec![3, 4]);
assert_eq!(tower.root(), Some(replayed_root_slot));
assert_eq!(tower.root(), replayed_root_slot);
}
#[test]
@@ -2779,7 +2940,7 @@ pub mod test {
.unwrap();
assert_eq!(tower.voted_slots(), vec![5, 6]);
assert_eq!(tower.root(), Some(replayed_root_slot));
assert_eq!(tower.root(), replayed_root_slot);
}
#[test]
@@ -2823,7 +2984,7 @@ pub mod test {
.unwrap();
assert_eq!(tower.voted_slots(), vec![3, 4, 5]);
assert_eq!(tower.root(), Some(replayed_root_slot));
assert_eq!(tower.root(), replayed_root_slot);
}
#[test]
@@ -2839,7 +3000,7 @@ pub mod test {
.unwrap();
assert_eq!(tower.voted_slots(), vec![] as Vec<Slot>);
assert_eq!(tower.root(), Some(replayed_root_slot));
assert_eq!(tower.root(), replayed_root_slot);
}
#[test]
@@ -2920,4 +3081,92 @@ pub mod test {
"The tower is fatally inconsistent with blockstore: not too old once after got too old?"
);
}
#[test]
#[should_panic(expected = "slot_in_tower(2) < checked_slot(1)")]
fn test_adjust_lockouts_after_replay_reversed_votes() {
let mut tower = Tower::new_for_tests(10, 0.9);
tower.lockouts.votes.push_back(Lockout::new(2));
tower.lockouts.votes.push_back(Lockout::new(1));
let vote = Vote::new(vec![1], Hash::default());
tower.last_vote = vote;
let mut slot_history = SlotHistory::default();
slot_history.add(0);
slot_history.add(2);
tower
.adjust_lockouts_after_replay(2, &slot_history)
.unwrap();
}
#[test]
#[should_panic(expected = "slot_in_tower(3) < checked_slot(3)")]
fn test_adjust_lockouts_after_replay_repeated_non_root_votes() {
let mut tower = Tower::new_for_tests(10, 0.9);
tower.lockouts.votes.push_back(Lockout::new(2));
tower.lockouts.votes.push_back(Lockout::new(3));
tower.lockouts.votes.push_back(Lockout::new(3));
let vote = Vote::new(vec![3], Hash::default());
tower.last_vote = vote;
let mut slot_history = SlotHistory::default();
slot_history.add(0);
slot_history.add(2);
tower
.adjust_lockouts_after_replay(2, &slot_history)
.unwrap();
}
#[test]
fn test_adjust_lockouts_after_replay_vote_on_root() {
let mut tower = Tower::new_for_tests(10, 0.9);
tower.lockouts.root_slot = Some(42);
tower.lockouts.votes.push_back(Lockout::new(42));
tower.lockouts.votes.push_back(Lockout::new(43));
tower.lockouts.votes.push_back(Lockout::new(44));
let vote = Vote::new(vec![44], Hash::default());
tower.last_vote = vote;
let mut slot_history = SlotHistory::default();
slot_history.add(42);
let tower = tower.adjust_lockouts_after_replay(42, &slot_history);
assert_eq!(tower.unwrap().voted_slots(), [43, 44]);
}
#[test]
fn test_adjust_lockouts_after_replay_vote_on_genesis() {
let mut tower = Tower::new_for_tests(10, 0.9);
tower.lockouts.votes.push_back(Lockout::new(0));
let vote = Vote::new(vec![0], Hash::default());
tower.last_vote = vote;
let mut slot_history = SlotHistory::default();
slot_history.add(0);
assert!(tower.adjust_lockouts_after_replay(0, &slot_history).is_ok());
}
#[test]
fn test_adjust_lockouts_after_replay_future_tower() {
let mut tower = Tower::new_for_tests(10, 0.9);
tower.lockouts.votes.push_back(Lockout::new(13));
tower.lockouts.votes.push_back(Lockout::new(14));
let vote = Vote::new(vec![14], Hash::default());
tower.last_vote = vote;
tower.initialize_root(12);
let mut slot_history = SlotHistory::default();
slot_history.add(0);
slot_history.add(2);
let tower = tower
.adjust_lockouts_after_replay(2, &slot_history)
.unwrap();
assert_eq!(tower.root(), 12);
assert_eq!(tower.voted_slots(), vec![13, 14]);
assert_eq!(tower.stray_restored_slot, Some(14));
}
}

View File

@@ -130,7 +130,7 @@ impl ContactInfo {
let addr = socketaddr!("224.0.1.255:1000");
assert!(addr.ip().is_multicast());
Self {
id: Pubkey::new_rand(),
id: solana_sdk::pubkey::new_rand(),
gossip: addr,
tvu: addr,
tvu_forwards: addr,

View File

@@ -28,6 +28,7 @@ use crate::crds_shards::CrdsShards;
use crate::crds_value::{CrdsValue, CrdsValueLabel};
use bincode::serialize;
use indexmap::map::{Entry, IndexMap};
use rayon::{prelude::*, ThreadPool};
use solana_sdk::hash::{hash, Hash};
use solana_sdk::pubkey::Pubkey;
use std::cmp;
@@ -176,37 +177,40 @@ impl Crds {
/// * timeouts - Pubkey specific timeouts with Pubkey::default() as the default timeout.
pub fn find_old_labels(
&self,
thread_pool: &ThreadPool,
now: u64,
timeouts: &HashMap<Pubkey, u64>,
) -> Vec<CrdsValueLabel> {
let default_timeout = *timeouts
.get(&Pubkey::default())
.expect("must have default timeout");
self.table
.iter()
.filter_map(|(k, v)| {
let timeout = timeouts.get(&k.pubkey()).unwrap_or(&default_timeout);
if v.local_timestamp.saturating_add(*timeout) <= now {
Some(k)
} else {
None
}
})
.cloned()
.collect()
thread_pool.install(|| {
self.table
.par_iter()
.with_min_len(1024)
.filter_map(|(k, v)| {
let timeout = timeouts.get(&k.pubkey()).unwrap_or(&default_timeout);
if v.local_timestamp.saturating_add(*timeout) <= now {
Some(k.clone())
} else {
None
}
})
.collect()
})
}
pub fn remove(&mut self, key: &CrdsValueLabel) {
if let Some((index, _, value)) = self.table.swap_remove_full(key) {
assert!(self.shards.remove(index, &value));
// The previously last element in the table is now moved to the
// 'index' position. Shards need to be updated accordingly.
if index < self.table.len() {
let value = self.table.index(index);
assert!(self.shards.remove(self.table.len(), value));
assert!(self.shards.insert(index, value));
}
pub fn remove(&mut self, key: &CrdsValueLabel) -> Option<VersionedCrdsValue> {
let (index, _, value) = self.table.swap_remove_full(key)?;
assert!(self.shards.remove(index, &value));
// The previously last element in the table is now moved to the
// 'index' position. Shards need to be updated accordingly.
if index < self.table.len() {
let value = self.table.index(index);
assert!(self.shards.remove(self.table.len(), value));
assert!(self.shards.insert(index, value));
}
Some(value)
}
}
@@ -216,6 +220,7 @@ mod test {
use crate::contact_info::ContactInfo;
use crate::crds_value::CrdsData;
use rand::{thread_rng, Rng};
use rayon::ThreadPoolBuilder;
#[test]
fn test_insert() {
@@ -288,48 +293,67 @@ mod test {
}
#[test]
fn test_find_old_records_default() {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut crds = Crds::default();
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
assert_eq!(crds.insert(val.clone(), 1), Ok(None));
let mut set = HashMap::new();
set.insert(Pubkey::default(), 0);
assert!(crds.find_old_labels(0, &set).is_empty());
assert!(crds.find_old_labels(&thread_pool, 0, &set).is_empty());
set.insert(Pubkey::default(), 1);
assert_eq!(crds.find_old_labels(2, &set), vec![val.label()]);
assert_eq!(
crds.find_old_labels(&thread_pool, 2, &set),
vec![val.label()]
);
set.insert(Pubkey::default(), 2);
assert_eq!(crds.find_old_labels(4, &set), vec![val.label()]);
assert_eq!(
crds.find_old_labels(&thread_pool, 4, &set),
vec![val.label()]
);
}
#[test]
fn test_find_old_records_with_override() {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut rng = thread_rng();
let mut crds = Crds::default();
let mut timeouts = HashMap::new();
let val = CrdsValue::new_rand(&mut rng);
timeouts.insert(Pubkey::default(), 3);
assert_eq!(crds.insert(val.clone(), 0), Ok(None));
assert!(crds.find_old_labels(2, &timeouts).is_empty());
assert!(crds.find_old_labels(&thread_pool, 2, &timeouts).is_empty());
timeouts.insert(val.pubkey(), 1);
assert_eq!(crds.find_old_labels(2, &timeouts), vec![val.label()]);
assert_eq!(
crds.find_old_labels(&thread_pool, 2, &timeouts),
vec![val.label()]
);
timeouts.insert(val.pubkey(), u64::MAX);
assert!(crds.find_old_labels(2, &timeouts).is_empty());
assert!(crds.find_old_labels(&thread_pool, 2, &timeouts).is_empty());
timeouts.insert(Pubkey::default(), 1);
assert!(crds.find_old_labels(2, &timeouts).is_empty());
assert!(crds.find_old_labels(&thread_pool, 2, &timeouts).is_empty());
timeouts.remove(&val.pubkey());
assert_eq!(crds.find_old_labels(2, &timeouts), vec![val.label()]);
assert_eq!(
crds.find_old_labels(&thread_pool, 2, &timeouts),
vec![val.label()]
);
}
#[test]
fn test_remove_default() {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut crds = Crds::default();
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
assert_matches!(crds.insert(val.clone(), 1), Ok(_));
let mut set = HashMap::new();
set.insert(Pubkey::default(), 1);
assert_eq!(crds.find_old_labels(2, &set), vec![val.label()]);
assert_eq!(
crds.find_old_labels(&thread_pool, 2, &set),
vec![val.label()]
);
crds.remove(&val.label());
assert!(crds.find_old_labels(2, &set).is_empty());
assert!(crds.find_old_labels(&thread_pool, 2, &set).is_empty());
}
#[test]
fn test_find_old_records_staked() {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut crds = Crds::default();
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
assert_eq!(crds.insert(val.clone(), 1), Ok(None));
@@ -337,20 +361,26 @@ mod test {
//now < timestamp
set.insert(Pubkey::default(), 0);
set.insert(val.pubkey(), 0);
assert!(crds.find_old_labels(0, &set).is_empty());
assert!(crds.find_old_labels(&thread_pool, 0, &set).is_empty());
//pubkey shouldn't expire since its timeout is MAX
set.insert(val.pubkey(), std::u64::MAX);
assert!(crds.find_old_labels(2, &set).is_empty());
assert!(crds.find_old_labels(&thread_pool, 2, &set).is_empty());
//default has max timeout, but pubkey should still expire
set.insert(Pubkey::default(), std::u64::MAX);
set.insert(val.pubkey(), 1);
assert_eq!(crds.find_old_labels(2, &set), vec![val.label()]);
assert_eq!(
crds.find_old_labels(&thread_pool, 2, &set),
vec![val.label()]
);
set.insert(val.pubkey(), 2);
assert!(crds.find_old_labels(2, &set).is_empty());
assert_eq!(crds.find_old_labels(3, &set), vec![val.label()]);
assert!(crds.find_old_labels(&thread_pool, 2, &set).is_empty());
assert_eq!(
crds.find_old_labels(&thread_pool, 3, &set),
vec![val.label()]
);
}
#[test]
@@ -361,7 +391,9 @@ mod test {
}
let mut crds = Crds::default();
let pubkeys: Vec<_> = std::iter::repeat_with(Pubkey::new_rand).take(256).collect();
let pubkeys: Vec<_> = std::iter::repeat_with(solana_sdk::pubkey::new_rand)
.take(256)
.collect();
let mut rng = thread_rng();
let mut num_inserts = 0;
for _ in 0..4096 {
@@ -394,6 +426,7 @@ mod test {
#[test]
fn test_remove_staked() {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut crds = Crds::default();
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
assert_matches!(crds.insert(val.clone(), 1), Ok(_));
@@ -402,9 +435,12 @@ mod test {
//default has max timeout, but pubkey should still expire
set.insert(Pubkey::default(), std::u64::MAX);
set.insert(val.pubkey(), 1);
assert_eq!(crds.find_old_labels(2, &set), vec![val.label()]);
assert_eq!(
crds.find_old_labels(&thread_pool, 2, &set),
vec![val.label()]
);
crds.remove(&val.label());
assert!(crds.find_old_labels(2, &set).is_empty());
assert!(crds.find_old_labels(&thread_pool, 2, &set).is_empty());
}
#[test]
@@ -484,14 +520,14 @@ mod test {
let v1 = VersionedCrdsValue::new(
1,
CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
))),
);
let v2 = VersionedCrdsValue::new(
1,
CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
))),
);

View File

@@ -88,7 +88,20 @@ impl CrdsGossip {
prune_map
}
pub fn new_push_messages(&mut self, now: u64) -> (Pubkey, HashMap<Pubkey, Vec<CrdsValue>>) {
pub fn process_push_messages(&mut self, pending_push_messages: Vec<(CrdsValue, u64)>) {
for (push_message, timestamp) in pending_push_messages {
let _ =
self.push
.process_push_message(&mut self.crds, &self.id, push_message, timestamp);
}
}
pub fn new_push_messages(
&mut self,
pending_push_messages: Vec<(CrdsValue, u64)>,
now: u64,
) -> (Pubkey, HashMap<Pubkey, Vec<CrdsValue>>) {
self.process_push_messages(pending_push_messages);
let push_messages = self.push.new_push_messages(&self.crds, now);
(self.id, push_messages)
}
@@ -161,9 +174,12 @@ impl CrdsGossip {
self.pull.mark_pull_request_creation_time(from, now)
}
/// process a pull request and create a response
pub fn process_pull_requests(&mut self, filters: Vec<(CrdsValue, CrdsFilter)>, now: u64) {
pub fn process_pull_requests<I>(&mut self, callers: I, now: u64)
where
I: IntoIterator<Item = CrdsValue>,
{
self.pull
.process_pull_requests(&mut self.crds, filters, now);
.process_pull_requests(&mut self.crds, callers, now);
}
pub fn generate_pull_responses(
@@ -219,7 +235,12 @@ impl CrdsGossip {
self.pull.make_timeouts(&self.id, stakes, epoch_ms)
}
pub fn purge(&mut self, now: u64, timeouts: &HashMap<Pubkey, u64>) -> usize {
pub fn purge(
&mut self,
thread_pool: &ThreadPool,
now: u64,
timeouts: &HashMap<Pubkey, u64>,
) -> usize {
let mut rv = 0;
if now > self.push.msg_timeout {
let min = now - self.push.msg_timeout;
@@ -234,7 +255,9 @@ impl CrdsGossip {
let min = self.pull.crds_timeout;
assert_eq!(timeouts[&self.id], std::u64::MAX);
assert_eq!(timeouts[&Pubkey::default()], min);
rv = self.pull.purge_active(&mut self.crds, now, &timeouts);
rv = self
.pull
.purge_active(thread_pool, &mut self.crds, now, &timeouts);
}
if now > 5 * self.pull.crds_timeout {
let min = now - 5 * self.pull.crds_timeout;

View File

@@ -273,20 +273,18 @@ impl CrdsGossipPull {
}
/// process a pull request
pub fn process_pull_requests(
&mut self,
crds: &mut Crds,
requests: Vec<(CrdsValue, CrdsFilter)>,
now: u64,
) {
requests.into_iter().for_each(|(caller, _)| {
pub fn process_pull_requests<I>(&mut self, crds: &mut Crds, callers: I, now: u64)
where
I: IntoIterator<Item = CrdsValue>,
{
for caller in callers {
let key = caller.label().pubkey();
if let Ok(Some(val)) = crds.insert(caller, now) {
self.purged_values
.push_back((val.value_hash, val.local_timestamp));
}
crds.update_record_timestamp(&key, now);
});
}
}
/// Create gossip responses to pull requests
@@ -537,24 +535,21 @@ impl CrdsGossipPull {
/// The value_hash of an active item is put into self.purged_values queue
pub fn purge_active(
&mut self,
thread_pool: &ThreadPool,
crds: &mut Crds,
now: u64,
timeouts: &HashMap<Pubkey, u64>,
) -> usize {
let old = crds.find_old_labels(now, timeouts);
let mut purged: VecDeque<_> = old
.iter()
.filter_map(|label| {
let rv = crds
.lookup_versioned(label)
.map(|val| (val.value_hash, val.local_timestamp));
crds.remove(label);
rv
})
.collect();
let ret = purged.len();
self.purged_values.append(&mut purged);
ret
let num_purged_values = self.purged_values.len();
self.purged_values.extend(
crds.find_old_labels(thread_pool, now, timeouts)
.into_iter()
.filter_map(|label| {
let val = crds.remove(&label)?;
Some((val.value_hash, val.local_timestamp))
}),
);
self.purged_values.len() - num_purged_values
}
/// Purge values from the `self.purged_values` queue that are older then purge_timeout
pub fn purge_purged(&mut self, min_ts: u64) {
@@ -626,7 +621,7 @@ mod test {
}
let mut rng = thread_rng();
for _ in 0..100 {
let hash = Hash::new_rand(&mut rng);
let hash = solana_sdk::hash::new_rand(&mut rng);
assert_eq!(CrdsFilter::hash_as_u64(&hash), hash_as_u64_bitops(&hash));
}
}
@@ -638,7 +633,7 @@ mod test {
assert_eq!(filter.mask, mask);
let mut rng = thread_rng();
for _ in 0..10 {
let hash = Hash::new_rand(&mut rng);
let hash = solana_sdk::hash::new_rand(&mut rng);
assert!(filter.test_mask(&hash));
}
}
@@ -649,13 +644,13 @@ mod test {
let mut stakes = HashMap::new();
let node = CrdsGossipPull::default();
let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
crds.insert(me.clone(), 0).unwrap();
for i in 1..=30 {
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
let id = entry.label().pubkey();
@@ -682,25 +677,25 @@ mod test {
let gossip = socketaddr!("127.0.0.1:1234");
let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
id: solana_sdk::pubkey::new_rand(),
shred_version: 123,
gossip,
..ContactInfo::default()
}));
let spy = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
id: solana_sdk::pubkey::new_rand(),
shred_version: 0,
gossip,
..ContactInfo::default()
}));
let node_123 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
id: solana_sdk::pubkey::new_rand(),
shred_version: 123,
gossip,
..ContactInfo::default()
}));
let node_456 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
id: solana_sdk::pubkey::new_rand(),
shred_version: 456,
gossip,
..ContactInfo::default()
@@ -741,12 +736,12 @@ mod test {
let gossip = socketaddr!("127.0.0.1:1234");
let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
id: solana_sdk::pubkey::new_rand(),
gossip,
..ContactInfo::default()
}));
let node_123 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
id: solana_sdk::pubkey::new_rand(),
gossip,
..ContactInfo::default()
}));
@@ -767,7 +762,7 @@ mod test {
assert!(options.is_empty());
// Unknown pubkey in gossip_validators -- will pull from nobody
gossip_validators.insert(Pubkey::new_rand());
gossip_validators.insert(solana_sdk::pubkey::new_rand());
let options = node.pull_options(
&crds,
&me.label().pubkey(),
@@ -797,7 +792,7 @@ mod test {
let mut rng = thread_rng();
let crds_filter_set =
CrdsFilterSet::new(/*num_items=*/ 9672788, /*max_bytes=*/ 8196);
let hash_values: Vec<_> = std::iter::repeat_with(|| Hash::new_rand(&mut rng))
let hash_values: Vec<_> = std::iter::repeat_with(|| solana_sdk::hash::new_rand(&mut rng))
.take(1024)
.collect();
for hash_value in &hash_values {
@@ -849,7 +844,7 @@ mod test {
for _ in 0..10_000 {
crds_gossip_pull
.purged_values
.push_back((Hash::new_rand(&mut rng), rng.gen()));
.push_back((solana_sdk::hash::new_rand(&mut rng), rng.gen()));
}
let mut num_inserts = 0;
for _ in 0..20_000 {
@@ -898,7 +893,7 @@ mod test {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut crds = Crds::default();
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
let id = entry.label().pubkey();
@@ -933,7 +928,7 @@ mod test {
);
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
crds.insert(new.clone(), 0).unwrap();
@@ -957,19 +952,19 @@ mod test {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut crds = Crds::default();
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
let node_pubkey = entry.label().pubkey();
let mut node = CrdsGossipPull::default();
crds.insert(entry.clone(), 0).unwrap();
let old = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
crds.insert(old.clone(), 0).unwrap();
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
crds.insert(new.clone(), 0).unwrap();
@@ -1000,14 +995,14 @@ mod test {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut node_crds = Crds::default();
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
let node_pubkey = entry.label().pubkey();
let node = CrdsGossipPull::default();
node_crds.insert(entry, 0).unwrap();
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
node_crds.insert(new, 0).unwrap();
@@ -1031,7 +1026,7 @@ mod test {
assert_eq!(rsp[0].len(), 0);
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS,
)));
dest_crds
@@ -1047,7 +1042,7 @@ mod test {
filters.push(filters[0].clone());
//should return new value since caller is new
filters[1].0 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS + 1,
)));
@@ -1063,14 +1058,14 @@ mod test {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut node_crds = Crds::default();
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
let node_pubkey = entry.label().pubkey();
let node = CrdsGossipPull::default();
node_crds.insert(entry, 0).unwrap();
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
node_crds.insert(new, 0).unwrap();
@@ -1090,7 +1085,11 @@ mod test {
let (_, filters, caller) = req.unwrap();
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
let rsp = dest.generate_pull_responses(&dest_crds, &filters, 0);
dest.process_pull_requests(&mut dest_crds, filters, 1);
dest.process_pull_requests(
&mut dest_crds,
filters.into_iter().map(|(caller, _)| caller),
1,
);
assert!(rsp.iter().all(|rsp| rsp.is_empty()));
assert!(dest_crds.lookup(&caller.label()).is_some());
assert_eq!(
@@ -1113,7 +1112,7 @@ mod test {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut node_crds = Crds::default();
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
1,
)));
let node_pubkey = entry.label().pubkey();
@@ -1121,14 +1120,14 @@ mod test {
node_crds.insert(entry, 0).unwrap();
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
1,
)));
node_crds.insert(new, 0).unwrap();
let mut dest = CrdsGossipPull::default();
let mut dest_crds = Crds::default();
let new_id = Pubkey::new_rand();
let new_id = solana_sdk::pubkey::new_rand();
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&new_id, 1,
)));
@@ -1164,7 +1163,11 @@ mod test {
let (_, filters, caller) = req.unwrap();
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
let mut rsp = dest.generate_pull_responses(&dest_crds, &filters, 0);
dest.process_pull_requests(&mut dest_crds, filters, 0);
dest.process_pull_requests(
&mut dest_crds,
filters.into_iter().map(|(caller, _)| caller),
0,
);
// if there is a false positive this is empty
// prob should be around 0.1 per iteration
if rsp.is_empty() {
@@ -1210,7 +1213,7 @@ mod test {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut node_crds = Crds::default();
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
let node_label = entry.label();
@@ -1218,7 +1221,7 @@ mod test {
let mut node = CrdsGossipPull::default();
node_crds.insert(entry, 0).unwrap();
let old = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
node_crds.insert(old.clone(), 0).unwrap();
@@ -1229,7 +1232,7 @@ mod test {
// purge
let timeouts = node.make_timeouts_def(&node_pubkey, &HashMap::new(), 0, 1);
node.purge_active(&mut node_crds, 2, &timeouts);
node.purge_active(&thread_pool, &mut node_crds, 2, &timeouts);
//verify self is still valid after purge
assert_eq!(node_crds.lookup(&node_label).unwrap().label(), node_label);
@@ -1330,7 +1333,7 @@ mod test {
let mut node_crds = Crds::default();
let mut node = CrdsGossipPull::default();
let peer_pubkey = Pubkey::new_rand();
let peer_pubkey = solana_sdk::pubkey::new_rand();
let peer_entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(
ContactInfo::new_localhost(&peer_pubkey, 0),
));

View File

@@ -35,7 +35,7 @@ pub const CRDS_GOSSIP_PUSH_FANOUT: usize = 6;
pub const CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS: u64 = 30000;
pub const CRDS_GOSSIP_PRUNE_MSG_TIMEOUT_MS: u64 = 500;
pub const CRDS_GOSSIP_PRUNE_STAKE_THRESHOLD_PCT: f64 = 0.15;
pub const CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES: usize = 2;
pub const CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES: usize = 3;
// Do not push to peers which have not been updated for this long.
const PUSH_ACTIVE_TIMEOUT_MS: u64 = 60_000;
@@ -136,8 +136,12 @@ impl CrdsGossipPush {
let mut keep = HashSet::new();
let mut peer_stake_sum = 0;
keep.insert(*origin);
for next in shuffle {
let (next_peer, next_stake) = staked_peers[next];
if next_peer == *origin {
continue;
}
keep.insert(next_peer);
peer_stake_sum += next_stake;
if peer_stake_sum >= prune_stake_threshold
@@ -284,12 +288,11 @@ impl CrdsGossipPush {
/// add the `from` to the peer's filter of nodes
pub fn process_prune_msg(&mut self, self_pubkey: &Pubkey, peer: &Pubkey, origins: &[Pubkey]) {
for origin in origins {
if origin == self_pubkey {
continue;
}
if let Some(p) = self.active_set.get_mut(peer) {
p.add(origin)
if let Some(peer) = self.active_set.get_mut(peer) {
for origin in origins {
if origin != self_pubkey {
peer.add(origin);
}
}
}
}
@@ -438,15 +441,15 @@ mod test {
let mut push = CrdsGossipPush::default();
let mut stakes = HashMap::new();
let self_id = Pubkey::new_rand();
let origin = Pubkey::new_rand();
let self_id = solana_sdk::pubkey::new_rand();
let origin = solana_sdk::pubkey::new_rand();
stakes.insert(self_id, 100);
stakes.insert(origin, 100);
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&origin, 0,
)));
let low_staked_peers = (0..10).map(|_| Pubkey::new_rand());
let low_staked_peers = (0..10).map(|_| solana_sdk::pubkey::new_rand());
let mut low_staked_set = HashSet::new();
low_staked_peers.for_each(|p| {
let _ = push.process_push_message(&mut crds, &p, value.clone(), 0);
@@ -460,7 +463,7 @@ mod test {
"should not prune if min threshold has not been reached"
);
let high_staked_peer = Pubkey::new_rand();
let high_staked_peer = solana_sdk::pubkey::new_rand();
let high_stake = CrdsGossipPush::prune_stake_threshold(100, 100) + 10;
stakes.insert(high_staked_peer, high_stake);
let _ = push.process_push_message(&mut crds, &high_staked_peer, value, 0);
@@ -483,7 +486,7 @@ mod test {
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
let label = value.label();
@@ -504,7 +507,7 @@ mod test {
fn test_process_push_old_version() {
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
let mut ci = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
ci.wallclock = 1;
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
@@ -527,7 +530,7 @@ mod test {
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let timeout = push.msg_timeout;
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
let mut ci = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
// push a version to far in the future
ci.wallclock = timeout + 1;
@@ -549,7 +552,7 @@ mod test {
fn test_process_push_update() {
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
let mut ci = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
ci.wallclock = 0;
let value_old = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
@@ -584,7 +587,7 @@ mod test {
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let value1 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
@@ -593,7 +596,7 @@ mod test {
assert!(push.active_set.get(&value1.label().pubkey()).is_some());
let value2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
assert!(push.active_set.get(&value2.label().pubkey()).is_none());
@@ -608,7 +611,7 @@ mod test {
for _ in 0..push.num_active {
let value2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(
ContactInfo::new_localhost(&Pubkey::new_rand(), 0),
ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0),
));
assert_eq!(crds.insert(value2.clone(), now), Ok(None));
}
@@ -624,7 +627,7 @@ mod test {
let mut stakes = HashMap::new();
for i in 1..=100 {
let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
time,
)));
let id = peer.label().pubkey();
@@ -652,25 +655,25 @@ mod test {
let gossip = socketaddr!("127.0.0.1:1234");
let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
id: solana_sdk::pubkey::new_rand(),
shred_version: 123,
gossip,
..ContactInfo::default()
}));
let spy = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
id: solana_sdk::pubkey::new_rand(),
shred_version: 0,
gossip,
..ContactInfo::default()
}));
let node_123 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
id: solana_sdk::pubkey::new_rand(),
shred_version: 123,
gossip,
..ContactInfo::default()
}));
let node_456 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
id: solana_sdk::pubkey::new_rand(),
shred_version: 456,
gossip,
..ContactInfo::default()
@@ -709,12 +712,12 @@ mod test {
let gossip = socketaddr!("127.0.0.1:1234");
let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
id: solana_sdk::pubkey::new_rand(),
gossip,
..ContactInfo::default()
}));
let node_123 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(),
id: solana_sdk::pubkey::new_rand(),
gossip,
..ContactInfo::default()
}));
@@ -735,7 +738,7 @@ mod test {
assert!(options.is_empty());
// Unknown pubkey in gossip_validators -- will push to nobody
gossip_validators.insert(Pubkey::new_rand());
gossip_validators.insert(solana_sdk::pubkey::new_rand());
let options = node.push_options(
&crds,
&me.label().pubkey(),
@@ -765,14 +768,14 @@ mod test {
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
assert_eq!(crds.insert(peer.clone(), now), Ok(None));
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
let mut expected = HashMap::new();
@@ -790,17 +793,17 @@ mod test {
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let peer_1 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
assert_eq!(crds.insert(peer_1.clone(), now), Ok(None));
let peer_2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
assert_eq!(crds.insert(peer_2.clone(), now), Ok(None));
let peer_3 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
now,
)));
assert_eq!(
@@ -823,17 +826,17 @@ mod test {
#[test]
fn test_process_prune() {
let mut crds = Crds::default();
let self_id = Pubkey::new_rand();
let self_id = solana_sdk::pubkey::new_rand();
let mut push = CrdsGossipPush::default();
let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
assert_eq!(crds.insert(peer.clone(), 0), Ok(None));
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
let expected = HashMap::new();
@@ -853,13 +856,13 @@ mod test {
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
&solana_sdk::pubkey::new_rand(),
0,
)));
assert_eq!(crds.insert(peer, 0), Ok(None));
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
let mut ci = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
ci.wallclock = 1;
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci));
let expected = HashMap::new();
@@ -875,7 +878,7 @@ mod test {
fn test_purge_old_received_cache() {
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
let mut ci = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
ci.wallclock = 0;
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci));
let label = value.label();

View File

@@ -135,14 +135,15 @@ mod test {
use crate::contact_info::ContactInfo;
use crate::crds_value::{CrdsData, CrdsValue};
use rand::{thread_rng, Rng};
use solana_sdk::pubkey::Pubkey;
use solana_sdk::timing::timestamp;
use std::collections::HashSet;
use std::ops::Index;
fn new_test_crds_value() -> VersionedCrdsValue {
let data =
CrdsData::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), timestamp()));
let data = CrdsData::ContactInfo(ContactInfo::new_localhost(
&solana_sdk::pubkey::new_rand(),
timestamp(),
));
VersionedCrdsValue::new(timestamp(), CrdsValue::new_unsigned(data))
}

View File

@@ -318,7 +318,7 @@ impl CrdsValue {
R: rand::Rng,
{
let now = rng.gen();
let contact_info = ContactInfo::new_localhost(&Pubkey::new_rand(), now);
let contact_info = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), now);
Self::new_signed(CrdsData::ContactInfo(contact_info), &Keypair::new())
}

112
core/src/data_budget.rs Normal file
View File

@@ -0,0 +1,112 @@
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
#[derive(Default)]
pub struct DataBudget {
// Amount of bytes we have in the budget to send.
bytes: AtomicUsize,
// Last time that we upped the bytes count, used
// to detect when to up the bytes budget again
last_timestamp_ms: AtomicU64,
}
impl DataBudget {
// If there are enough bytes in the budget, consumes from
// the budget and returns true. Otherwise returns false.
#[must_use]
pub fn take(&self, size: usize) -> bool {
let mut budget = self.bytes.load(Ordering::Acquire);
loop {
if budget < size {
return false;
}
match self.bytes.compare_exchange_weak(
budget,
budget - size,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => return true,
Err(bytes) => budget = bytes,
}
}
}
// Updates timestamp and returns true, if at least given milliseconds
// has passed since last update. Otherwise returns false.
fn can_update(&self, duration_millis: u64) -> bool {
let now = solana_sdk::timing::timestamp();
let mut last_timestamp = self.last_timestamp_ms.load(Ordering::Acquire);
loop {
if now < last_timestamp + duration_millis {
return false;
}
match self.last_timestamp_ms.compare_exchange_weak(
last_timestamp,
now,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => return true,
Err(ts) => last_timestamp = ts,
}
}
}
// Updates the budget if at least given milliseconds has passed since last
// update. Updater function maps current value of bytes to the new one.
pub fn update<F>(&self, duration_millis: u64, updater: F)
where
F: Fn(usize) -> usize,
{
if !self.can_update(duration_millis) {
return;
}
let mut bytes = self.bytes.load(Ordering::Acquire);
loop {
match self.bytes.compare_exchange_weak(
bytes,
updater(bytes),
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => break,
Err(b) => bytes = b,
}
}
}
// Non-atomic clone only for tests and simulations.
pub fn clone_non_atomic(&self) -> Self {
Self {
bytes: AtomicUsize::new(self.bytes.load(Ordering::Acquire)),
last_timestamp_ms: AtomicU64::new(self.last_timestamp_ms.load(Ordering::Acquire)),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::time::Duration;
#[test]
fn test_data_budget() {
let budget = DataBudget::default();
assert!(!budget.take(1)); // budget = 0.
budget.update(1000, |bytes| bytes + 5); // budget updates to 5.
assert!(budget.take(1));
assert!(budget.take(2));
assert!(!budget.take(3)); // budget = 2, out of budget.
budget.update(30, |_| 10); // no update, budget = 2.
assert!(!budget.take(3)); // budget = 2, out of budget.
std::thread::sleep(Duration::from_millis(50));
budget.update(30, |bytes| bytes * 2); // budget updates to 4.
assert!(budget.take(3));
assert!(budget.take(1));
assert!(!budget.take(1)); // budget = 0.
}
}

View File

@@ -306,8 +306,8 @@ mod tests {
#[test]
fn test_gossip_services_spy() {
let keypair = Keypair::new();
let peer0 = Pubkey::new_rand();
let peer1 = Pubkey::new_rand();
let peer0 = solana_sdk::pubkey::new_rand();
let peer1 = solana_sdk::pubkey::new_rand();
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
let peer0_info = ContactInfo::new_localhost(&peer0, 0);
let peer1_info = ContactInfo::new_localhost(&peer1, 0);
@@ -335,7 +335,7 @@ mod tests {
spy_ref.clone(),
None,
Some(0),
Some(Pubkey::new_rand()),
Some(solana_sdk::pubkey::new_rand()),
None,
);
assert_eq!(met_criteria, false);
@@ -349,7 +349,7 @@ mod tests {
spy_ref.clone(),
Some(1),
Some(0),
Some(Pubkey::new_rand()),
Some(solana_sdk::pubkey::new_rand()),
None,
);
assert_eq!(met_criteria, false);

View File

@@ -187,6 +187,7 @@ impl HeaviestSubtreeForkChoice {
.expect("new root must exist in fork_infos map")
.parent = None;
self.root = new_root;
self.last_root_time = Instant::now();
}
pub fn add_root_parent(&mut self, root_parent: Slot) {
@@ -498,7 +499,7 @@ impl HeaviestSubtreeForkChoice {
let heaviest_slot_on_same_voted_fork = self.best_slot(last_voted_slot);
if heaviest_slot_on_same_voted_fork.is_none() {
if !tower.is_stray_last_vote() {
// Unless last vote is stray, self.bast_slot(last_voted_slot) must return
// Unless last vote is stray and stale, self.bast_slot(last_voted_slot) must return
// Some(_), justifying to panic! here.
// Also, adjust_lockouts_after_replay() correctly makes last_voted_slot None,
// if all saved votes are ancestors of replayed_root_slot. So this code shouldn't be
@@ -507,12 +508,12 @@ impl HeaviestSubtreeForkChoice {
// validator has been running, so we must be able to fetch best_slots for all of
// them.
panic!(
"a bank at last_voted_slot({}) is a frozen bank so must have been\
"a bank at last_voted_slot({}) is a frozen bank so must have been \
added to heaviest_subtree_fork_choice at time of freezing",
last_voted_slot,
)
} else {
// fork_infos doesn't have corresponding data for the stray restored last vote,
// fork_infos doesn't have corresponding data for the stale stray last vote,
// meaning some inconsistency between saved tower and ledger.
// (newer snapshot, or only a saved tower is moved over to new setup?)
return None;

View File

@@ -6,14 +6,10 @@
//! command-line tools to spin up validators and a Rust library
//!
#[macro_use]
extern crate solana_bpf_loader_program;
pub mod accounts_hash_verifier;
pub mod banking_stage;
pub mod bigtable_upload_service;
pub mod broadcast_stage;
mod builtins;
pub mod cache_block_time_service;
pub mod cluster_info_vote_listener;
pub mod commitment_service;
@@ -35,6 +31,7 @@ pub mod crds_gossip_pull;
pub mod crds_gossip_push;
pub mod crds_shards;
pub mod crds_value;
pub mod data_budget;
pub mod epoch_slots;
pub mod fetch_stage;
pub mod fork_choice;
@@ -46,6 +43,7 @@ pub mod local_vote_signer_service;
pub mod non_circulating_supply;
pub mod optimistic_confirmation_verifier;
pub mod optimistically_confirmed_bank_tracker;
pub mod ping_pong;
pub mod poh_recorder;
pub mod poh_service;
pub mod progress_map;
@@ -59,7 +57,6 @@ mod result;
pub mod retransmit_stage;
pub mod rewards_recorder_service;
pub mod rpc;
pub mod rpc_error;
pub mod rpc_health;
pub mod rpc_pubsub;
pub mod rpc_pubsub_service;
@@ -97,7 +94,7 @@ extern crate serde_json;
extern crate solana_metrics;
#[macro_use]
extern crate solana_sdk_macro_frozen_abi;
extern crate solana_frozen_abi_macro;
#[cfg(test)]
#[macro_use]

View File

@@ -79,6 +79,26 @@ solana_sdk::pubkeys!(
"GumSE5HsMV5HCwBTv2D2D81yy9x17aDkvobkqAfTRgmo",
"AzVV9ZZDxTgW4wWfJmsG6ytaHpQGSe1yz76Nyy84VbQF",
"8CUUMKYNGxdgYio5CLHRHyzMEhhVRMcqefgE6dLqnVRK",
"CQDYc4ET2mbFhVpgj41gXahL6Exn5ZoPcGAzSHuYxwmE",
"5PLJZLJiRR9vf7d1JCCg7UuWjtyN9nkab9uok6TqSyuP",
"7xJ9CLtEAcEShw9kW2gSoZkRWL566Dg12cvgzANJwbTr",
"BuCEvc9ze8UoAQwwsQLy8d447C8sA4zeVtVpc6m5wQeS",
"8ndGYFjav6NDXvzYcxs449Aub3AxYv4vYpk89zRDwgj7",
"8W58E8JVJjH1jCy5CeHJQgvwFXTyAVyesuXRZGbcSUGG",
"GNiz4Mq886bTNDT3pijGsu2gbw6it7sqrwncro45USeB",
"GhsotwFMH6XUrRLJCxcx62h7748N2Uq8mf87hUGkmPhg",
"Fgyh8EeYGZtbW8sS33YmNQnzx54WXPrJ5KWNPkCfWPot",
"8UVjvYyoqP6sqcctTso3xpCdCfgTMiv3VRh7vraC2eJk",
"BhvLngiqqKeZ8rpxch2uGjeCiC88zzewoWPRuoxpp1aS",
"63DtkW7zuARcd185EmHAkfF44bDcC2SiTSEj2spLP3iA",
"GvpCiTgq9dmEeojCDBivoLoZqc4AkbUDACpqPMwYLWKh",
"7Y8smnoUrYKGGuDq2uaFKVxJYhojgg7DVixHyAtGTYEV",
"DUS1KxwUhUyDKB4A81E8vdnTe3hSahd92Abtn9CXsEcj",
"F9MWFw8cnYVwsRq8Am1PGfFL3cQUZV37mbGoxZftzLjN",
"8vqrX3H2BYLaXVintse3gorPEM4TgTwTFZNN1Fm9TdYs",
"CUageMFi49kzoDqtdU8NvQ4Bq3sbtJygjKDAXJ45nmAi",
"5smrYwb1Hr2T8XMnvsqccTgXxuqQs14iuE8RbHFYf2Cf",
"xQadXQiUTCCFhfHjvQx1hyJK6KVWr1w2fD6DT3cdwj7",
]
);
@@ -115,7 +135,7 @@ mod tests {
let num_genesis_accounts = 10;
for _ in 0..num_genesis_accounts {
accounts.insert(
Pubkey::new_rand(),
solana_sdk::pubkey::new_rand(),
Account::new(balance, 0, &Pubkey::default()),
);
}
@@ -127,7 +147,7 @@ mod tests {
let num_stake_accounts = 3;
for _ in 0..num_stake_accounts {
let pubkey = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
let meta = Meta {
authorized: Authorized::auto(&pubkey),
lockup: Lockup {

View File

@@ -20,7 +20,7 @@ impl OptimisticConfirmationVerifier {
}
// Returns any optimistic slots that were not rooted
pub fn get_unrooted_optimistic_slots(
pub fn verify_for_unrooted_optimistic_slots(
&mut self,
root_bank: &Bank,
blockstore: &Blockstore,
@@ -34,8 +34,8 @@ impl OptimisticConfirmationVerifier {
std::mem::swap(&mut slots_before_root, &mut self.unchecked_slots);
slots_before_root
.into_iter()
.filter(|(optimistic_slot, hash)| {
(*optimistic_slot == root && *hash != root_bank.hash())
.filter(|(optimistic_slot, optimistic_hash)| {
(*optimistic_slot == root && *optimistic_hash != root_bank.hash())
|| (!root_ancestors.contains_key(&optimistic_slot) &&
// In this second part of the `and`, we account for the possibility that
// there was some other root `rootX` set in BankForks where:
@@ -76,6 +76,10 @@ impl OptimisticConfirmationVerifier {
self.last_optimistic_slot_ts = Instant::now();
}
pub fn format_optimistic_confirmd_slot_violation_log(slot: Slot) -> String {
format!("Optimistically confirmed slot {} was not rooted", slot)
}
pub fn log_unrooted_optimistic_slots(
root_bank: &Bank,
vote_tracker: &VoteTracker,
@@ -96,7 +100,7 @@ impl OptimisticConfirmationVerifier {
.unwrap_or(0);
error!(
"Optimistic slot {} was not rooted,
"{},
hash: {},
epoch: {},
voted keys: {:?},
@@ -105,7 +109,7 @@ impl OptimisticConfirmationVerifier {
voted stake: {},
total epoch stake: {},
pct: {}",
optimistic_slot,
Self::format_optimistic_confirmd_slot_violation_log(*optimistic_slot),
hash,
epoch,
r_slot_tracker
@@ -181,7 +185,8 @@ mod test {
.cloned()
.unwrap();
assert_eq!(
optimistic_confirmation_verifier.get_unrooted_optimistic_slots(&bank1, &blockstore),
optimistic_confirmation_verifier
.verify_for_unrooted_optimistic_slots(&bank1, &blockstore),
vec![(1, bad_bank_hash)]
);
assert_eq!(optimistic_confirmation_verifier.unchecked_slots.len(), 1);
@@ -228,7 +233,7 @@ mod test {
.cloned()
.unwrap();
assert!(optimistic_confirmation_verifier
.get_unrooted_optimistic_slots(&bank5, &blockstore)
.verify_for_unrooted_optimistic_slots(&bank5, &blockstore)
.is_empty());
// 5 is >= than all the unchecked slots, so should clear everything
assert!(optimistic_confirmation_verifier.unchecked_slots.is_empty());
@@ -244,7 +249,7 @@ mod test {
.cloned()
.unwrap();
assert!(optimistic_confirmation_verifier
.get_unrooted_optimistic_slots(&bank3, &blockstore)
.verify_for_unrooted_optimistic_slots(&bank3, &blockstore)
.is_empty());
// 3 is bigger than only slot 1, so slot 5 should be left over
assert_eq!(optimistic_confirmation_verifier.unchecked_slots.len(), 1);
@@ -264,7 +269,8 @@ mod test {
.cloned()
.unwrap();
assert_eq!(
optimistic_confirmation_verifier.get_unrooted_optimistic_slots(&bank4, &blockstore),
optimistic_confirmation_verifier
.verify_for_unrooted_optimistic_slots(&bank4, &blockstore),
vec![optimistic_slots[1]]
);
// 4 is bigger than only slots 1 and 3, so slot 5 should be left over
@@ -303,7 +309,8 @@ mod test {
optimistic_confirmation_verifier
.add_new_optimistic_confirmed_slots(optimistic_slots.clone());
assert_eq!(
optimistic_confirmation_verifier.get_unrooted_optimistic_slots(&bank7, &blockstore),
optimistic_confirmation_verifier
.verify_for_unrooted_optimistic_slots(&bank7, &blockstore),
optimistic_slots[0..=1].to_vec()
);
assert!(optimistic_confirmation_verifier.unchecked_slots.is_empty());
@@ -312,7 +319,7 @@ mod test {
blockstore.set_roots(&[1, 3]).unwrap();
optimistic_confirmation_verifier.add_new_optimistic_confirmed_slots(optimistic_slots);
assert!(optimistic_confirmation_verifier
.get_unrooted_optimistic_slots(&bank7, &blockstore)
.verify_for_unrooted_optimistic_slots(&bank7, &blockstore)
.is_empty());
assert!(optimistic_confirmation_verifier.unchecked_slots.is_empty());
}

400
core/src/ping_pong.rs Normal file
View File

@@ -0,0 +1,400 @@
use bincode::{serialize, Error};
use lru::LruCache;
use rand::{AsByteSliceMut, CryptoRng, Rng};
use serde::Serialize;
use solana_sdk::hash::{self, Hash};
use solana_sdk::pubkey::Pubkey;
use solana_sdk::sanitize::{Sanitize, SanitizeError};
use solana_sdk::signature::{Keypair, Signable, Signature, Signer};
use std::borrow::Cow;
use std::net::SocketAddr;
use std::time::{Duration, Instant};
#[derive(AbiExample, Debug, Deserialize, Serialize)]
pub struct Ping<T> {
from: Pubkey,
token: T,
signature: Signature,
}
#[derive(AbiExample, Debug, Deserialize, Serialize)]
pub struct Pong {
from: Pubkey,
hash: Hash, // Hash of received ping token.
signature: Signature,
}
/// Maintains records of remote nodes which have returned a valid response to a
/// ping message, and on-the-fly ping messages pending a pong response from the
/// remote node.
pub struct PingCache {
// Time-to-live of received pong messages.
ttl: Duration,
// Timestamp of last ping message sent to a remote node.
// Used to rate limit pings to remote nodes.
pings: LruCache<(Pubkey, SocketAddr), Instant>,
// Verified pong responses from remote nodes.
pongs: LruCache<(Pubkey, SocketAddr), Instant>,
// Hash of ping tokens sent out to remote nodes,
// pending a pong response back.
pending_cache: LruCache<Hash, (Pubkey, SocketAddr)>,
}
impl<T: Serialize> Ping<T> {
pub fn new(token: T, keypair: &Keypair) -> Result<Self, Error> {
let signature = keypair.sign_message(&serialize(&token)?);
let ping = Ping {
from: keypair.pubkey(),
token,
signature,
};
Ok(ping)
}
}
impl<T> Ping<T>
where
T: Serialize + AsByteSliceMut + Default,
{
pub fn new_rand<R>(rng: &mut R, keypair: &Keypair) -> Result<Self, Error>
where
R: Rng + CryptoRng,
{
let mut token = T::default();
rng.fill(&mut token);
Ping::new(token, keypair)
}
}
impl<T> Sanitize for Ping<T> {
fn sanitize(&self) -> Result<(), SanitizeError> {
self.from.sanitize()?;
// TODO Add self.token.sanitize()?; when rust's
// specialization feature becomes stable.
self.signature.sanitize()
}
}
impl<T: Serialize> Signable for Ping<T> {
fn pubkey(&self) -> Pubkey {
self.from
}
fn signable_data(&self) -> Cow<[u8]> {
Cow::Owned(serialize(&self.token).unwrap())
}
fn get_signature(&self) -> Signature {
self.signature
}
fn set_signature(&mut self, signature: Signature) {
self.signature = signature;
}
}
impl Pong {
pub fn new<T: Serialize>(ping: &Ping<T>, keypair: &Keypair) -> Result<Self, Error> {
let hash = hash::hash(&serialize(&ping.token)?);
let pong = Pong {
from: keypair.pubkey(),
hash,
signature: keypair.sign_message(hash.as_ref()),
};
Ok(pong)
}
}
impl Sanitize for Pong {
fn sanitize(&self) -> Result<(), SanitizeError> {
self.from.sanitize()?;
self.hash.sanitize()?;
self.signature.sanitize()
}
}
impl Signable for Pong {
fn pubkey(&self) -> Pubkey {
self.from
}
fn signable_data(&self) -> Cow<[u8]> {
Cow::Owned(self.hash.as_ref().into())
}
fn get_signature(&self) -> Signature {
self.signature
}
fn set_signature(&mut self, signature: Signature) {
self.signature = signature;
}
}
impl PingCache {
pub fn new(ttl: Duration, cap: usize) -> Self {
Self {
ttl,
pings: LruCache::new(cap),
pongs: LruCache::new(cap),
pending_cache: LruCache::new(cap),
}
}
/// Checks if the pong hash, pubkey and socket match a ping message sent
/// out previously. If so records current timestamp for the remote node and
/// returns true.
/// Note: Does not verify the signature.
pub fn add(&mut self, pong: &Pong, socket: SocketAddr, now: Instant) -> bool {
let node = (pong.pubkey(), socket);
match self.pending_cache.peek(&pong.hash) {
Some(value) if *value == node => {
self.pings.pop(&node);
self.pongs.put(node, now);
self.pending_cache.pop(&pong.hash);
true
}
_ => false,
}
}
/// Checks if the remote node has been pinged recently. If not, calls the
/// given function to generates a new ping message, records current
/// timestamp and hash of ping token, and returns the ping message.
fn maybe_ping<T, F>(
&mut self,
now: Instant,
node: (Pubkey, SocketAddr),
mut pingf: F,
) -> Option<Ping<T>>
where
T: Serialize,
F: FnMut() -> Option<Ping<T>>,
{
// Rate limit consecutive pings sent to a remote node.
let delay = self.ttl / 64;
match self.pings.peek(&node) {
Some(t) if now.saturating_duration_since(*t) < delay => None,
_ => {
let ping = pingf()?;
let hash = hash::hash(&serialize(&ping.token).ok()?);
self.pings.put(node, now);
self.pending_cache.put(hash, node);
Some(ping)
}
}
}
/// Returns true if the remote node has responded to a ping message.
/// Removes expired pong messages. In order to extend verifications before
/// expiration, if the pong message is not too recent, and the node has not
/// been pinged recently, calls the given function to generates a new ping
/// message, records current timestamp and hash of ping token, and returns
/// the ping message.
/// Caller should verify if the socket address is valid. (e.g. by using
/// ContactInfo::is_valid_address).
pub fn check<T, F>(
&mut self,
now: Instant,
node: (Pubkey, SocketAddr),
pingf: F,
) -> (bool, Option<Ping<T>>)
where
T: Serialize,
F: FnMut() -> Option<Ping<T>>,
{
let (check, should_ping) = match self.pongs.get(&node) {
None => (false, true),
Some(t) => {
let age = now.saturating_duration_since(*t);
// Pop if the pong message has expired.
if age > self.ttl {
self.pongs.pop(&node);
}
// If the pong message is not too recent, generate a new ping
// message to extend remote node verification.
(true, age > self.ttl / 8)
}
};
let ping = if should_ping {
self.maybe_ping(now, node, pingf)
} else {
None
};
(check, ping)
}
// Only for tests and simulations.
pub(crate) fn mock_clone(&self) -> Self {
let mut clone = Self {
ttl: self.ttl,
pings: LruCache::new(self.pings.cap()),
pongs: LruCache::new(self.pongs.cap()),
pending_cache: LruCache::new(self.pending_cache.cap()),
};
for (k, v) in self.pongs.iter().rev() {
clone.pings.put(*k, *v);
}
for (k, v) in self.pongs.iter().rev() {
clone.pongs.put(*k, *v);
}
for (k, v) in self.pending_cache.iter().rev() {
clone.pending_cache.put(*k, *v);
}
clone
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashSet;
use std::iter::repeat_with;
use std::net::{Ipv4Addr, SocketAddrV4};
type Token = [u8; 32];
#[test]
fn test_ping_pong() {
let mut rng = rand::thread_rng();
let keypair = Keypair::new();
let ping = Ping::<Token>::new_rand(&mut rng, &keypair).unwrap();
assert!(ping.verify());
assert!(ping.sanitize().is_ok());
let pong = Pong::new(&ping, &keypair).unwrap();
assert!(pong.verify());
assert!(pong.sanitize().is_ok());
assert_eq!(hash::hash(&ping.token), pong.hash);
}
#[test]
fn test_ping_cache() {
let now = Instant::now();
let mut rng = rand::thread_rng();
let ttl = Duration::from_millis(256);
let mut cache = PingCache::new(ttl, /*cap=*/ 1000);
let this_node = Keypair::new();
let keypairs: Vec<_> = repeat_with(Keypair::new).take(8).collect();
let sockets: Vec<_> = repeat_with(|| {
SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::new(rng.gen(), rng.gen(), rng.gen(), rng.gen()),
rng.gen(),
))
})
.take(8)
.collect();
let remote_nodes: Vec<(&Keypair, SocketAddr)> = repeat_with(|| {
let keypair = &keypairs[rng.gen_range(0, keypairs.len())];
let socket = sockets[rng.gen_range(0, sockets.len())];
(keypair, socket)
})
.take(128)
.collect();
// Initially all checks should fail. The first observation of each node
// should create a ping packet.
let mut seen_nodes = HashSet::<(Pubkey, SocketAddr)>::new();
let pings: Vec<Option<Ping<Token>>> = remote_nodes
.iter()
.map(|(keypair, socket)| {
let node = (keypair.pubkey(), *socket);
let pingf = || Ping::<Token>::new_rand(&mut rng, &this_node).ok();
let (check, ping) = cache.check(now, node, pingf);
assert!(!check);
assert_eq!(seen_nodes.insert(node), ping.is_some());
ping
})
.collect();
let now = now + Duration::from_millis(1);
let panic_ping = || -> Option<Ping<Token>> { panic!("this should not happen!") };
for ((keypair, socket), ping) in remote_nodes.iter().zip(&pings) {
match ping {
None => {
// Already have a recent ping packets for nodes, so no new
// ping packet will be generated.
let node = (keypair.pubkey(), *socket);
let (check, ping) = cache.check(now, node, panic_ping);
assert!(check);
assert!(ping.is_none());
}
Some(ping) => {
let pong = Pong::new(ping, keypair).unwrap();
assert!(cache.add(&pong, *socket, now));
}
}
}
let now = now + Duration::from_millis(1);
// All nodes now have a recent pong packet.
for (keypair, socket) in &remote_nodes {
let node = (keypair.pubkey(), *socket);
let (check, ping) = cache.check(now, node, panic_ping);
assert!(check);
assert!(ping.is_none());
}
let now = now + ttl / 8;
// All nodes still have a valid pong packet, but the cache will create
// a new ping packet to extend verification.
seen_nodes.clear();
for (keypair, socket) in &remote_nodes {
let node = (keypair.pubkey(), *socket);
let pingf = || Ping::<Token>::new_rand(&mut rng, &this_node).ok();
let (check, ping) = cache.check(now, node, pingf);
assert!(check);
assert_eq!(seen_nodes.insert(node), ping.is_some());
}
let now = now + Duration::from_millis(1);
// All nodes still have a valid pong packet, and a very recent ping
// packet pending response. So no new ping packet will be created.
for (keypair, socket) in &remote_nodes {
let node = (keypair.pubkey(), *socket);
let (check, ping) = cache.check(now, node, panic_ping);
assert!(check);
assert!(ping.is_none());
}
let now = now + ttl;
// Pong packets are still valid but expired. The first observation of
// each node will remove the pong packet from cache and create a new
// ping packet.
seen_nodes.clear();
for (keypair, socket) in &remote_nodes {
let node = (keypair.pubkey(), *socket);
let pingf = || Ping::<Token>::new_rand(&mut rng, &this_node).ok();
let (check, ping) = cache.check(now, node, pingf);
if seen_nodes.insert(node) {
assert!(check);
assert!(ping.is_some());
} else {
assert!(!check);
assert!(ping.is_none());
}
}
let now = now + Duration::from_millis(1);
// No valid pong packet in the cache. A recent ping packet already
// created, so no new one will be created.
for (keypair, socket) in &remote_nodes {
let node = (keypair.pubkey(), *socket);
let (check, ping) = cache.check(now, node, panic_ping);
assert!(!check);
assert!(ping.is_none());
}
let now = now + ttl / 64;
// No valid pong packet in the cache. Another ping packet will be
// created for the first observation of each node.
seen_nodes.clear();
for (keypair, socket) in &remote_nodes {
let node = (keypair.pubkey(), *socket);
let pingf = || Ping::<Token>::new_rand(&mut rng, &this_node).ok();
let (check, ping) = cache.check(now, node, pingf);
assert!(!check);
assert_eq!(seen_nodes.insert(node), ping.is_some());
}
}
}

View File

@@ -401,7 +401,7 @@ mod test {
fn test_add_vote_pubkey() {
let mut stats = PropagatedStats::default();
let mut all_pubkeys = PubkeyReferences::default();
let mut vote_pubkey = Pubkey::new_rand();
let mut vote_pubkey = solana_sdk::pubkey::new_rand();
all_pubkeys.get_or_insert(&vote_pubkey);
// Add a vote pubkey, the number of references in all_pubkeys
@@ -420,7 +420,7 @@ mod test {
assert_eq!(stats.propagated_validators_stake, 1);
// Adding another pubkey should succeed
vote_pubkey = Pubkey::new_rand();
vote_pubkey = solana_sdk::pubkey::new_rand();
stats.add_vote_pubkey(&vote_pubkey, &mut all_pubkeys, 2);
assert!(stats.propagated_validators.contains(&vote_pubkey));
assert_eq!(stats.propagated_validators_stake, 3);
@@ -434,7 +434,7 @@ mod test {
fn test_add_node_pubkey_internal() {
let num_vote_accounts = 10;
let staked_vote_accounts = 5;
let vote_account_pubkeys: Vec<_> = std::iter::repeat_with(Pubkey::new_rand)
let vote_account_pubkeys: Vec<_> = std::iter::repeat_with(solana_sdk::pubkey::new_rand)
.take(num_vote_accounts)
.collect();
let epoch_vote_accounts: HashMap<_, _> = vote_account_pubkeys
@@ -445,7 +445,7 @@ mod test {
let mut stats = PropagatedStats::default();
let mut all_pubkeys = PubkeyReferences::default();
let mut node_pubkey = Pubkey::new_rand();
let mut node_pubkey = solana_sdk::pubkey::new_rand();
all_pubkeys.get_or_insert(&node_pubkey);
// Add a vote pubkey, the number of references in all_pubkeys
@@ -481,7 +481,7 @@ mod test {
// Adding another pubkey with same vote accounts should succeed, but stake
// shouldn't increase
node_pubkey = Pubkey::new_rand();
node_pubkey = solana_sdk::pubkey::new_rand();
stats.add_node_pubkey_internal(
&node_pubkey,
&mut all_pubkeys,
@@ -500,8 +500,8 @@ mod test {
// Adding another pubkey with different vote accounts should succeed
// and increase stake
node_pubkey = Pubkey::new_rand();
let vote_account_pubkeys: Vec<_> = std::iter::repeat_with(Pubkey::new_rand)
node_pubkey = solana_sdk::pubkey::new_rand();
let vote_account_pubkeys: Vec<_> = std::iter::repeat_with(solana_sdk::pubkey::new_rand)
.take(num_vote_accounts)
.collect();
let epoch_vote_accounts: HashMap<_, _> = vote_account_pubkeys

View File

@@ -291,7 +291,6 @@ impl ReplayStage {
&bank_forks,
&leader_schedule_cache,
&subscriptions,
rewards_recorder_sender.clone(),
&mut progress,
&mut all_pubkeys,
);
@@ -313,6 +312,7 @@ impl ReplayStage {
&mut heaviest_subtree_fork_choice,
&replay_vote_sender,
&bank_notification_sender,
&rewards_recorder_sender,
);
replay_active_banks_time.stop();
Self::report_memory(&allocated, "replay_active_banks", start);
@@ -408,7 +408,7 @@ impl ReplayStage {
&ancestors,
&descendants,
&progress,
&tower,
&mut tower,
);
select_vote_and_reset_forks_time.stop();
@@ -554,7 +554,6 @@ impl ReplayStage {
&poh_recorder,
&leader_schedule_cache,
&subscriptions,
rewards_recorder_sender.clone(),
&progress,
&retransmit_slots_sender,
&mut skipped_slots_info,
@@ -854,7 +853,6 @@ impl ReplayStage {
poh_recorder: &Arc<Mutex<PohRecorder>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
subscriptions: &Arc<RpcSubscriptions>,
rewards_recorder_sender: Option<RewardsRecorderSender>,
progress_map: &ProgressMap,
retransmit_slots_sender: &RetransmitSlotsSender,
skipped_slots_info: &mut SkippedSlotsInfo,
@@ -953,7 +951,6 @@ impl ReplayStage {
poh_slot,
root_slot,
my_pubkey,
&rewards_recorder_sender,
subscriptions,
);
@@ -1265,6 +1262,7 @@ impl ReplayStage {
heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice,
replay_vote_sender: &ReplayVoteSender,
bank_notification_sender: &Option<BankNotificationSender>,
rewards_recorder_sender: &Option<RewardsRecorderSender>,
) -> bool {
let mut did_complete_bank = false;
let mut tx_count = 0;
@@ -1340,6 +1338,8 @@ impl ReplayStage {
.send(BankNotification::Frozen(bank.clone()))
.unwrap_or_else(|err| warn!("bank_notification_sender failed: {:?}", err));
}
Self::record_rewards(&bank, &rewards_recorder_sender);
} else {
trace!(
"bank {} not completed tick_height: {}, max_tick_height: {}",
@@ -1525,7 +1525,7 @@ impl ReplayStage {
ancestors: &HashMap<u64, HashSet<u64>>,
descendants: &HashMap<u64, HashSet<u64>>,
progress: &ProgressMap,
tower: &Tower,
tower: &mut Tower,
) -> SelectVoteAndResetForkResult {
// Try to vote on the actual heaviest fork. If the heaviest bank is
// locked out or fails the threshold check, the validator will:
@@ -1552,7 +1552,7 @@ impl ReplayStage {
.epoch_vote_accounts(heaviest_bank.epoch())
.expect("Bank epoch vote accounts must contain entry for the bank's own epoch"),
);
if switch_fork_decision == SwitchForkDecision::FailedSwitchThreshold {
if let SwitchForkDecision::FailedSwitchThreshold(_, _) = switch_fork_decision {
// If we can't switch, then reset to the the next votable
// bank on the same fork as our last vote, but don't vote
info!(
@@ -1601,7 +1601,7 @@ impl ReplayStage {
if !is_locked_out
&& vote_threshold
&& propagation_confirmed
&& switch_fork_decision != SwitchForkDecision::FailedSwitchThreshold
&& switch_fork_decision.can_vote()
{
info!("voting: {} {}", bank.slot(), fork_weight);
SelectVoteAndResetForkResult {
@@ -1817,7 +1817,6 @@ impl ReplayStage {
bank_forks: &RwLock<BankForks>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
subscriptions: &Arc<RpcSubscriptions>,
rewards_recorder_sender: Option<RewardsRecorderSender>,
progress: &mut ProgressMap,
all_pubkeys: &mut PubkeyReferences,
) {
@@ -1863,7 +1862,6 @@ impl ReplayStage {
child_slot,
forks.root(),
&leader,
&rewards_recorder_sender,
subscriptions,
);
let empty: Vec<&Pubkey> = vec![];
@@ -1891,21 +1889,18 @@ impl ReplayStage {
slot: u64,
root_slot: u64,
leader: &Pubkey,
rewards_recorder_sender: &Option<RewardsRecorderSender>,
subscriptions: &Arc<RpcSubscriptions>,
) -> Bank {
subscriptions.notify_slot(slot, parent.slot(), root_slot);
let child_bank = Bank::new_from_parent(parent, leader, slot);
Self::record_rewards(&child_bank, &rewards_recorder_sender);
child_bank
Bank::new_from_parent(parent, leader, slot)
}
fn record_rewards(bank: &Bank, rewards_recorder_sender: &Option<RewardsRecorderSender>) {
if let Some(rewards_recorder_sender) = rewards_recorder_sender {
if let Some(ref rewards) = bank.rewards {
let rewards = bank.rewards.read().unwrap();
if !rewards.is_empty() {
rewards_recorder_sender
.send((bank.slot(), rewards.iter().copied().collect()))
.send((bank.slot(), rewards.clone()))
.unwrap_or_else(|err| warn!("rewards_recorder_sender failed: {:?}", err));
}
}
@@ -2155,7 +2150,6 @@ pub(crate) mod tests {
&bank_forks,
&leader_schedule_cache,
&rpc_subscriptions,
None,
&mut progress,
&mut PubkeyReferences::default(),
);
@@ -2179,7 +2173,6 @@ pub(crate) mod tests {
&bank_forks,
&leader_schedule_cache,
&rpc_subscriptions,
None,
&mut progress,
&mut PubkeyReferences::default(),
);
@@ -2561,7 +2554,7 @@ pub(crate) mod tests {
bank.store_account(&pubkey, &leader_vote_account);
}
let leader_pubkey = Pubkey::new_rand();
let leader_pubkey = solana_sdk::pubkey::new_rand();
let leader_lamports = 3;
let genesis_config_info =
create_genesis_config_with_leader(50, &leader_pubkey, leader_lamports);
@@ -2603,7 +2596,11 @@ pub(crate) mod tests {
.is_none());
let bank1 = Bank::new_from_parent(&arc_bank0, &Pubkey::default(), arc_bank0.slot() + 1);
let _res = bank1.transfer(10, &genesis_config_info.mint_keypair, &Pubkey::new_rand());
let _res = bank1.transfer(
10,
&genesis_config_info.mint_keypair,
&solana_sdk::pubkey::new_rand(),
);
for _ in 0..genesis_config.ticks_per_slot {
bank1.register_tick(&Hash::default());
}
@@ -2619,7 +2616,11 @@ pub(crate) mod tests {
);
let bank2 = Bank::new_from_parent(&arc_bank1, &Pubkey::default(), arc_bank1.slot() + 1);
let _res = bank2.transfer(10, &genesis_config_info.mint_keypair, &Pubkey::new_rand());
let _res = bank2.transfer(
10,
&genesis_config_info.mint_keypair,
&solana_sdk::pubkey::new_rand(),
);
for _ in 0..genesis_config.ticks_per_slot {
bank2.register_tick(&Hash::default());
}

View File

@@ -504,7 +504,6 @@ mod tests {
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_net_utils::find_available_port_in_range;
use solana_perf::packet::{Meta, Packet, Packets};
use solana_sdk::pubkey::Pubkey;
use std::net::{IpAddr, Ipv4Addr};
#[test]
@@ -521,7 +520,7 @@ mod tests {
let leader_schedule_cache = Arc::new(cached_leader_schedule);
let bank_forks = Arc::new(RwLock::new(bank_forks));
let mut me = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
let mut me = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
let port = find_available_port_in_range(ip_addr, (8000, 10000)).unwrap();
let me_retransmit = UdpSocket::bind(format!("127.0.0.1:{}", port)).unwrap();
@@ -533,7 +532,7 @@ mod tests {
.local_addr()
.unwrap();
let other = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
let other = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
let cluster_info = ClusterInfo::new_with_invalid_keypair(other);
cluster_info.insert_info(me);

View File

@@ -54,6 +54,7 @@ impl RewardsRecorderService {
pubkey: pubkey.to_string(),
lamports: reward_info.lamports,
post_balance: reward_info.post_balance,
reward_type: Some(reward_info.reward_type),
})
.collect();

View File

@@ -5,7 +5,6 @@ use crate::{
contact_info::ContactInfo,
non_circulating_supply::calculate_non_circulating_supply,
optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank,
rpc_error::RpcCustomError,
rpc_health::*,
send_transaction_service::{SendTransactionService, TransactionInfo},
validator::ValidatorExit,
@@ -23,6 +22,7 @@ use solana_account_decoder::{
};
use solana_client::{
rpc_config::*,
rpc_custom_error::RpcCustomError,
rpc_filter::{Memcmp, MemcmpEncodedBytes, RpcFilterType},
rpc_request::{
TokenAccountsFilter, DELINQUENT_VALIDATOR_SLOT_DISTANCE, MAX_GET_CONFIRMED_BLOCKS_RANGE,
@@ -35,6 +35,7 @@ use solana_client::{
};
use solana_faucet::faucet::request_airdrop_transaction;
use solana_ledger::{blockstore::Blockstore, blockstore_db::BlockstoreError, get_tmp_ledger_path};
use solana_metrics::inc_new_counter_info;
use solana_perf::packet::PACKET_DATA_SIZE;
use solana_runtime::{
accounts::AccountAddressFilter,
@@ -54,7 +55,7 @@ use solana_sdk::{
signature::Signature,
stake_history::StakeHistory,
system_instruction,
sysvar::{stake_history, Sysvar},
sysvar::stake_history,
transaction::{self, Transaction},
};
use solana_stake_program::stake_state::StakeState;
@@ -548,9 +549,6 @@ impl JsonRpcRequestProcessor {
} else {
0
};
let epoch_vote_account = epoch_vote_accounts
.iter()
.any(|(epoch_vote_pubkey, _)| epoch_vote_pubkey == pubkey);
RpcVoteAccountInfo {
vote_pubkey: (pubkey).to_string(),
node_pubkey: vote_state.node_pubkey.to_string(),
@@ -558,7 +556,7 @@ impl JsonRpcRequestProcessor {
commission: vote_state.commission,
root_slot: vote_state.root_slot.unwrap_or(0),
epoch_credits: vote_state.epoch_credits().clone(),
epoch_vote_account,
epoch_vote_account: epoch_vote_accounts.contains_key(pubkey),
last_vote,
}
})
@@ -1038,7 +1036,8 @@ impl JsonRpcRequestProcessor {
.get_account(&stake_history::id())
.ok_or_else(Error::internal_error)?;
let stake_history =
StakeHistory::from_account(&stake_history_account).ok_or_else(Error::internal_error)?;
solana_sdk::account::from_account::<StakeHistory>(&stake_history_account)
.ok_or_else(Error::internal_error)?;
let (active, activating, deactivating) =
delegation.stake_activating_and_deactivating(epoch, Some(&stake_history));
@@ -1269,6 +1268,18 @@ impl JsonRpcRequestProcessor {
}
}
fn verify_transaction(transaction: &Transaction) -> Result<()> {
if transaction.verify().is_err() {
return Err(RpcCustomError::TransactionSignatureVerificationFailure.into());
}
if let Err(e) = transaction.verify_precompiles() {
return Err(RpcCustomError::TransactionPrecompileVerificationFailure(e).into());
}
Ok(())
}
fn verify_filter(input: &RpcFilterType) -> Result<()> {
input
.verify()
@@ -2276,12 +2287,8 @@ impl RpcSol for RpcSolImpl {
.unwrap_or(0);
if !config.skip_preflight {
if transaction.verify().is_err() {
return Err(RpcCustomError::TransactionSignatureVerificationFailure.into());
}
if let Err(e) = transaction.verify_precompiles() {
return Err(RpcCustomError::TransactionPrecompileVerificationFailure(e).into());
if let Err(e) = verify_transaction(&transaction) {
return Err(e);
}
if meta.health.check() != RpcHealthStatus::Ok {
@@ -2318,26 +2325,20 @@ impl RpcSol for RpcSolImpl {
let encoding = config.encoding.unwrap_or(UiTransactionEncoding::Base58);
let (_, transaction) = deserialize_transaction(data, encoding)?;
let mut result = if config.sig_verify {
transaction.verify()
} else {
Ok(())
};
if config.sig_verify {
if let Err(e) = verify_transaction(&transaction) {
return Err(e);
}
}
let bank = &*meta.bank(config.commitment);
let logs = if result.is_ok() {
let (transaction_result, log_messages) = bank.simulate_transaction(transaction);
result = transaction_result;
Some(log_messages)
} else {
None
};
let (result, logs) = bank.simulate_transaction(transaction);
Ok(new_response(
&bank,
RpcSimulateTransactionResult {
err: result.err(),
logs,
logs: Some(logs),
},
))
}
@@ -2609,6 +2610,7 @@ fn deserialize_transaction(
) -> Result<(Vec<u8>, Transaction)> {
let wire_transaction = match encoding {
UiTransactionEncoding::Base58 => {
inc_new_counter_info!("rpc-base58_encoded_tx", 1);
if encoded_transaction.len() > WORST_CASE_BASE58_TX {
return Err(Error::invalid_params(format!(
"encoded transaction too large: {} bytes (max: encoded/raw {}/{})",
@@ -2622,6 +2624,7 @@ fn deserialize_transaction(
.map_err(|e| Error::invalid_params(format!("{:?}", e)))?
}
UiTransactionEncoding::Base64 => {
inc_new_counter_info!("rpc-base64_encoded_tx", 1);
if encoded_transaction.len() > WORST_CASE_BASE64_TX {
return Err(Error::invalid_params(format!(
"encoded transaction too large: {} bytes (max: encoded/raw {}/{})",
@@ -2687,10 +2690,8 @@ pub mod tests {
use jsonrpc_core_client::transports::local;
use solana_client::rpc_filter::{Memcmp, MemcmpEncodedBytes};
use solana_ledger::{
blockstore::entries_to_test_shreds,
blockstore_meta::PerfSample,
blockstore_processor::fill_blockstore_slot_with_ticks,
entry::next_entry_mut,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
};
use solana_runtime::commitment::BlockCommitment;
@@ -2718,7 +2719,7 @@ pub mod tests {
state::AccountState as TokenAccountState,
state::Mint,
};
use std::{collections::HashMap, time::Duration};
use std::collections::HashMap;
const TEST_MINT_LAMPORTS: u64 = 1_000_000;
const TEST_SLOTS_PER_EPOCH: u64 = DELINQUENT_VALIDATOR_SLOT_DISTANCE + 1;
@@ -2737,13 +2738,12 @@ pub mod tests {
}
fn start_rpc_handler_with_tx(pubkey: &Pubkey) -> RpcHandler {
start_rpc_handler_with_tx_and_blockstore(pubkey, vec![], 0)
start_rpc_handler_with_tx_and_blockstore(pubkey, vec![])
}
fn start_rpc_handler_with_tx_and_blockstore(
pubkey: &Pubkey,
blockstore_roots: Vec<Slot>,
default_timestamp: i64,
) -> RpcHandler {
let (bank_forks, alice, leader_vote_keypair) = new_bank_forks();
let bank = bank_forks.read().unwrap().working_bank();
@@ -2775,29 +2775,6 @@ pub mod tests {
CommitmentSlots::new_from_slot(bank.slot()),
)));
// Add timestamp vote to blockstore
let vote = Vote {
slots: vec![1],
hash: Hash::default(),
timestamp: Some(default_timestamp),
};
let vote_ix = vote_instruction::vote(
&leader_vote_keypair.pubkey(),
&leader_vote_keypair.pubkey(),
vote,
);
let vote_msg = Message::new(&[vote_ix], Some(&leader_vote_keypair.pubkey()));
let vote_tx = Transaction::new(&[&*leader_vote_keypair], vote_msg, Hash::default());
let shreds = entries_to_test_shreds(
vec![next_entry_mut(&mut Hash::default(), 0, vec![vote_tx])],
1,
0,
true,
0,
);
blockstore.insert_shreds(shreds, None, false).unwrap();
blockstore.set_roots(&[1]).unwrap();
let mut roots = blockstore_roots;
if !roots.is_empty() {
roots.retain(|&x| x > 0);
@@ -2821,9 +2798,14 @@ pub mod tests {
bank_forks.write().unwrap().set_root(*root, &None, Some(0));
let mut stakes = HashMap::new();
stakes.insert(leader_vote_keypair.pubkey(), (1, Account::default()));
blockstore
.cache_block_time(*root, Duration::from_millis(400), &stakes)
.unwrap();
let block_time = bank_forks
.read()
.unwrap()
.get(*root)
.unwrap()
.clock()
.unix_timestamp;
blockstore.cache_block_time(*root, block_time).unwrap();
}
}
@@ -2904,7 +2886,7 @@ pub mod tests {
#[test]
fn test_rpc_request_processor_new() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let genesis = create_genesis_config(100);
let bank = Arc::new(Bank::new(&genesis.genesis_config));
bank.transfer(20, &genesis.mint_keypair, &bob_pubkey)
@@ -2964,7 +2946,7 @@ pub mod tests {
#[test]
fn test_rpc_get_cluster_nodes() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler {
io,
meta,
@@ -2991,7 +2973,7 @@ pub mod tests {
#[test]
fn test_rpc_get_recent_performance_samples() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey);
let req = r#"{"jsonrpc":"2.0","id":1,"method":"getRecentPerformanceSamples"}"#;
@@ -3020,7 +3002,7 @@ pub mod tests {
#[test]
fn test_rpc_get_recent_performance_samples_invalid_limit() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey);
let req =
@@ -3046,7 +3028,7 @@ pub mod tests {
#[test]
fn test_rpc_get_slot_leader() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler {
io,
meta,
@@ -3066,7 +3048,7 @@ pub mod tests {
#[test]
fn test_rpc_get_tx_count() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let genesis = create_genesis_config(10);
let bank = Arc::new(Bank::new(&genesis.genesis_config));
// Add 4 transactions
@@ -3096,7 +3078,7 @@ pub mod tests {
#[test]
fn test_rpc_minimum_ledger_slot() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey);
let req = r#"{"jsonrpc":"2.0","id":1,"method":"minimumLedgerSlot"}"#;
@@ -3111,7 +3093,7 @@ pub mod tests {
#[test]
fn test_rpc_get_total_supply() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey);
let req = r#"{"jsonrpc":"2.0","id":1,"method":"getTotalSupply"}"#;
@@ -3136,7 +3118,7 @@ pub mod tests {
#[test]
fn test_get_supply() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey);
let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSupply"}"#;
let res = io.handle_request_sync(&req, meta);
@@ -3161,7 +3143,7 @@ pub mod tests {
#[test]
fn test_get_largest_accounts() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler {
io, meta, alice, ..
} = start_rpc_handler_with_tx(&bob_pubkey);
@@ -3220,7 +3202,7 @@ pub mod tests {
#[test]
fn test_rpc_get_minimum_balance_for_rent_exemption() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let data_len = 50;
let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey);
@@ -3252,7 +3234,7 @@ pub mod tests {
#[test]
fn test_rpc_get_inflation() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey);
let req = r#"{"jsonrpc":"2.0","id":1,"method":"getInflationGovernor"}"#;
@@ -3299,7 +3281,7 @@ pub mod tests {
#[test]
fn test_rpc_get_epoch_schedule() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey);
let req = r#"{"jsonrpc":"2.0","id":1,"method":"getEpochSchedule"}"#;
@@ -3321,7 +3303,7 @@ pub mod tests {
#[test]
fn test_rpc_get_leader_schedule() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey);
for req in [
@@ -3377,7 +3359,7 @@ pub mod tests {
#[test]
fn test_rpc_get_account_info() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey);
let req = format!(
@@ -3405,7 +3387,7 @@ pub mod tests {
.expect("actual response deserialization");
assert_eq!(expected, result);
let address = Pubkey::new_rand();
let address = solana_sdk::pubkey::new_rand();
let data = vec![1, 2, 3, 4, 5];
let mut account = Account::new(42, 5, &Pubkey::default());
account.data = data.clone();
@@ -3459,7 +3441,7 @@ pub mod tests {
#[test]
fn test_rpc_get_multiple_accounts() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey);
let address = Pubkey::new(&[9; 32]);
@@ -3586,7 +3568,7 @@ pub mod tests {
..
} = start_rpc_handler_with_tx(&bob.pubkey());
let new_program_id = Pubkey::new_rand();
let new_program_id = solana_sdk::pubkey::new_rand();
let tx = system_transaction::assign(&bob, blockhash, &new_program_id);
bank.process_transaction(&tx).unwrap();
let req = format!(
@@ -3633,7 +3615,7 @@ pub mod tests {
bank.process_transaction(&tx).unwrap();
let nonce_keypair1 = Keypair::new();
let authority = Pubkey::new_rand();
let authority = solana_sdk::pubkey::new_rand();
let instruction = system_instruction::create_nonce_account(
&alice.pubkey(),
&nonce_keypair1.pubkey(),
@@ -3776,7 +3758,7 @@ pub mod tests {
#[test]
fn test_rpc_simulate_transaction() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler {
io,
meta,
@@ -3820,13 +3802,14 @@ pub mod tests {
);
let res = io.handle_request_sync(&req, meta.clone());
let expected = json!({
"jsonrpc": "2.0",
"result": {
"context":{"slot":0},
"value":{"err":"SignatureFailure", "logs":null}
"jsonrpc":"2.0",
"error": {
"code": -32003,
"message": "Transaction signature verification failure"
},
"id": 1,
"id":1
});
let expected: Response =
serde_json::from_value(expected).expect("expected response deserialization");
let result: Response = serde_json::from_str(&res.expect("actual response"))
@@ -3877,7 +3860,7 @@ pub mod tests {
#[test]
#[should_panic]
fn test_rpc_simulate_transaction_panic_on_unfrozen_bank() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler {
io,
meta,
@@ -3903,7 +3886,7 @@ pub mod tests {
#[test]
fn test_rpc_confirm_tx() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler {
io,
meta,
@@ -3935,7 +3918,7 @@ pub mod tests {
#[test]
fn test_rpc_get_signature_status() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler {
io,
meta,
@@ -4005,7 +3988,7 @@ pub mod tests {
#[test]
fn test_rpc_get_signature_statuses() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler {
io,
meta,
@@ -4061,7 +4044,7 @@ pub mod tests {
#[test]
fn test_rpc_get_recent_blockhash() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler {
io,
meta,
@@ -4092,7 +4075,7 @@ pub mod tests {
#[test]
fn test_rpc_get_fees() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler {
io,
meta,
@@ -4124,7 +4107,7 @@ pub mod tests {
#[test]
fn test_rpc_get_fee_calculator_for_blockhash() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey);
let (blockhash, fee_calculator) = bank.last_blockhash_with_fee_calculator();
@@ -4172,7 +4155,7 @@ pub mod tests {
#[test]
fn test_rpc_get_fee_rate_governor() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey);
let req = r#"{"jsonrpc":"2.0","id":1,"method":"getFeeRateGovernor"}"#;
@@ -4201,7 +4184,7 @@ pub mod tests {
#[test]
fn test_rpc_fail_request_airdrop() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey);
// Expect internal error because no faucet is available
@@ -4271,8 +4254,12 @@ pub mod tests {
);
SendTransactionService::new(tpu_address, &bank_forks, None, receiver);
let mut bad_transaction =
system_transaction::transfer(&mint_keypair, &Pubkey::new_rand(), 42, Hash::default());
let mut bad_transaction = system_transaction::transfer(
&mint_keypair,
&solana_sdk::pubkey::new_rand(),
42,
Hash::default(),
);
// sendTransaction will fail because the blockhash is invalid
let req = format!(
@@ -4302,8 +4289,12 @@ pub mod tests {
r#"{"jsonrpc":"2.0","error":{"code":-32002,"message":"Transaction simulation failed: Transaction failed to sanitize accounts offsets correctly","data":{"err":"SanitizeFailure","logs":[]}},"id":1}"#.to_string(),
)
);
let mut bad_transaction =
system_transaction::transfer(&mint_keypair, &Pubkey::new_rand(), 42, recent_blockhash);
let mut bad_transaction = system_transaction::transfer(
&mint_keypair,
&solana_sdk::pubkey::new_rand(),
42,
recent_blockhash,
);
// sendTransaction will fail due to poor node health
health.stub_set_health_status(Some(RpcHealthStatus::Behind));
@@ -4386,7 +4377,7 @@ pub mod tests {
#[test]
fn test_rpc_verify_pubkey() {
let pubkey = Pubkey::new_rand();
let pubkey = solana_sdk::pubkey::new_rand();
assert_eq!(verify_pubkey(pubkey.to_string()).unwrap(), pubkey);
let bad_pubkey = "a1b2c3d4";
assert_eq!(
@@ -4397,7 +4388,12 @@ pub mod tests {
#[test]
fn test_rpc_verify_signature() {
let tx = system_transaction::transfer(&Keypair::new(), &Pubkey::new_rand(), 20, hash(&[0]));
let tx = system_transaction::transfer(
&Keypair::new(),
&solana_sdk::pubkey::new_rand(),
20,
hash(&[0]),
);
assert_eq!(
verify_signature(&tx.signatures[0].to_string()).unwrap(),
tx.signatures[0]
@@ -4489,7 +4485,7 @@ pub mod tests {
#[test]
fn test_rpc_get_identity() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey);
let req = r#"{"jsonrpc":"2.0","id":1,"method":"getIdentity"}"#;
@@ -4510,7 +4506,7 @@ pub mod tests {
#[test]
fn test_rpc_get_version() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey);
let req = r#"{"jsonrpc":"2.0","id":1,"method":"getVersion"}"#;
@@ -4597,7 +4593,7 @@ pub mod tests {
#[test]
fn test_rpc_get_block_commitment() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler {
io,
meta,
@@ -4651,7 +4647,7 @@ pub mod tests {
#[test]
fn test_get_confirmed_block() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler {
io,
meta,
@@ -4749,14 +4745,14 @@ pub mod tests {
#[test]
fn test_get_confirmed_blocks() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let roots = vec![0, 1, 3, 4, 8];
let RpcHandler {
io,
meta,
block_commitment_cache,
..
} = start_rpc_handler_with_tx_and_blockstore(&bob_pubkey, roots.clone(), 0);
} = start_rpc_handler_with_tx_and_blockstore(&bob_pubkey, roots.clone());
block_commitment_cache
.write()
.unwrap()
@@ -4826,14 +4822,14 @@ pub mod tests {
#[test]
fn test_get_confirmed_blocks_with_limit() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let roots = vec![0, 1, 3, 4, 8];
let RpcHandler {
io,
meta,
block_commitment_cache,
..
} = start_rpc_handler_with_tx_and_blockstore(&bob_pubkey, roots, 0);
} = start_rpc_handler_with_tx_and_blockstore(&bob_pubkey, roots);
block_commitment_cache
.write()
.unwrap()
@@ -4889,19 +4885,21 @@ pub mod tests {
#[test]
fn test_get_block_time() {
let bob_pubkey = Pubkey::new_rand();
let base_timestamp = 1_576_183_541;
let bob_pubkey = solana_sdk::pubkey::new_rand();
let RpcHandler {
io,
meta,
bank,
block_commitment_cache,
bank_forks,
..
} = start_rpc_handler_with_tx_and_blockstore(
&bob_pubkey,
vec![1, 2, 3, 4, 5, 6, 7],
base_timestamp,
);
} = start_rpc_handler_with_tx_and_blockstore(&bob_pubkey, vec![1, 2, 3, 4, 5, 6, 7]);
let base_timestamp = bank_forks
.read()
.unwrap()
.get(0)
.unwrap()
.unix_timestamp_from_genesis();
block_commitment_cache
.write()
.unwrap()
@@ -4976,7 +4974,7 @@ pub mod tests {
leader_vote_keypair,
block_commitment_cache,
..
} = start_rpc_handler_with_tx(&Pubkey::new_rand());
} = start_rpc_handler_with_tx(&solana_sdk::pubkey::new_rand());
assert_eq!(bank.vote_accounts().len(), 1);
@@ -5192,7 +5190,8 @@ pub mod tests {
#[test]
fn test_token_rpcs() {
let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&Pubkey::new_rand());
let RpcHandler { io, meta, bank, .. } =
start_rpc_handler_with_tx(&solana_sdk::pubkey::new_rand());
let mut account_data = vec![0; TokenAccount::get_packed_len()];
let mint = SplTokenPubkey::new(&[2; 32]);
@@ -5215,7 +5214,7 @@ pub mod tests {
owner: spl_token_id_v2_0(),
..Account::default()
};
let token_account_pubkey = Pubkey::new_rand();
let token_account_pubkey = solana_sdk::pubkey::new_rand();
bank.store_account(&token_account_pubkey, &token_account);
// Add the mint
@@ -5253,7 +5252,7 @@ pub mod tests {
// Test non-existent token account
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getTokenAccountBalance","params":["{}"]}}"#,
Pubkey::new_rand(),
solana_sdk::pubkey::new_rand(),
);
let res = io.handle_request_sync(&req, meta.clone());
let result: Value = serde_json::from_str(&res.expect("actual response"))
@@ -5278,7 +5277,7 @@ pub mod tests {
// Test non-existent mint address
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getTokenSupply","params":["{}"]}}"#,
Pubkey::new_rand(),
solana_sdk::pubkey::new_rand(),
);
let res = io.handle_request_sync(&req, meta.clone());
let result: Value = serde_json::from_str(&res.expect("actual response"))
@@ -5286,7 +5285,7 @@ pub mod tests {
assert!(result.get("error").is_some());
// Add another token account with the same owner, delegate, and mint
let other_token_account_pubkey = Pubkey::new_rand();
let other_token_account_pubkey = solana_sdk::pubkey::new_rand();
bank.store_account(&other_token_account_pubkey, &token_account);
// Add another token account with the same owner and delegate but different mint
@@ -5309,7 +5308,7 @@ pub mod tests {
owner: spl_token_id_v2_0(),
..Account::default()
};
let token_with_different_mint_pubkey = Pubkey::new_rand();
let token_with_different_mint_pubkey = solana_sdk::pubkey::new_rand();
bank.store_account(&token_with_different_mint_pubkey, &token_account);
// Test getTokenAccountsByOwner with Token program id returns all accounts, regardless of Mint address
@@ -5390,7 +5389,7 @@ pub mod tests {
"params":["{}", {{"programId": "{}"}}]
}}"#,
owner,
Pubkey::new_rand(),
solana_sdk::pubkey::new_rand(),
);
let res = io.handle_request_sync(&req, meta.clone());
let result: Value = serde_json::from_str(&res.expect("actual response"))
@@ -5404,7 +5403,7 @@ pub mod tests {
"params":["{}", {{"mint": "{}"}}]
}}"#,
owner,
Pubkey::new_rand(),
solana_sdk::pubkey::new_rand(),
);
let res = io.handle_request_sync(&req, meta.clone());
let result: Value = serde_json::from_str(&res.expect("actual response"))
@@ -5419,7 +5418,7 @@ pub mod tests {
"method":"getTokenAccountsByOwner",
"params":["{}", {{"programId": "{}"}}]
}}"#,
Pubkey::new_rand(),
solana_sdk::pubkey::new_rand(),
spl_token_id_v2_0(),
);
let res = io.handle_request_sync(&req, meta.clone());
@@ -5473,7 +5472,7 @@ pub mod tests {
"params":["{}", {{"programId": "{}"}}]
}}"#,
delegate,
Pubkey::new_rand(),
solana_sdk::pubkey::new_rand(),
);
let res = io.handle_request_sync(&req, meta.clone());
let result: Value = serde_json::from_str(&res.expect("actual response"))
@@ -5487,7 +5486,7 @@ pub mod tests {
"params":["{}", {{"mint": "{}"}}]
}}"#,
delegate,
Pubkey::new_rand(),
solana_sdk::pubkey::new_rand(),
);
let res = io.handle_request_sync(&req, meta.clone());
let result: Value = serde_json::from_str(&res.expect("actual response"))
@@ -5502,7 +5501,7 @@ pub mod tests {
"method":"getTokenAccountsByDelegate",
"params":["{}", {{"programId": "{}"}}]
}}"#,
Pubkey::new_rand(),
solana_sdk::pubkey::new_rand(),
spl_token_id_v2_0(),
);
let res = io.handle_request_sync(&req, meta.clone());
@@ -5550,7 +5549,7 @@ pub mod tests {
owner: spl_token_id_v2_0(),
..Account::default()
};
let token_with_smaller_balance = Pubkey::new_rand();
let token_with_smaller_balance = solana_sdk::pubkey::new_rand();
bank.store_account(&token_with_smaller_balance, &token_account);
// Test largest token accounts
@@ -5588,7 +5587,8 @@ pub mod tests {
#[test]
fn test_token_parsing() {
let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&Pubkey::new_rand());
let RpcHandler { io, meta, bank, .. } =
start_rpc_handler_with_tx(&solana_sdk::pubkey::new_rand());
let mut account_data = vec![0; TokenAccount::get_packed_len()];
let mint = SplTokenPubkey::new(&[2; 32]);
@@ -5611,7 +5611,7 @@ pub mod tests {
owner: spl_token_id_v2_0(),
..Account::default()
};
let token_account_pubkey = Pubkey::new_rand();
let token_account_pubkey = solana_sdk::pubkey::new_rand();
bank.store_account(&token_account_pubkey, &token_account);
// Add the mint

View File

@@ -505,7 +505,7 @@ mod tests {
mint_keypair: alice,
..
} = create_genesis_config(10_000);
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let bank = Bank::new(&genesis_config);
let blockhash = bank.last_blockhash();
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
@@ -551,7 +551,7 @@ mod tests {
..
} = create_genesis_config(10_000);
let new_stake_authority = Pubkey::new_rand();
let new_stake_authority = solana_sdk::pubkey::new_rand();
let stake_authority = Keypair::new();
let from = Keypair::new();
let stake_account = Keypair::new();
@@ -748,7 +748,7 @@ mod tests {
#[test]
#[serial]
fn test_account_unsubscribe() {
let bob_pubkey = Pubkey::new_rand();
let bob_pubkey = solana_sdk::pubkey::new_rand();
let session = create_session();
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
let bank_forks = Arc::new(RwLock::new(BankForks::new(Bank::new(&genesis_config))));

View File

@@ -550,7 +550,11 @@ mod tests {
let cluster_info = Arc::new(ClusterInfo::default());
let health_check_slot_distance = 123;
let override_health_check = Arc::new(AtomicBool::new(false));
let trusted_validators = vec![Pubkey::new_rand(), Pubkey::new_rand(), Pubkey::new_rand()];
let trusted_validators = vec![
solana_sdk::pubkey::new_rand(),
solana_sdk::pubkey::new_rand(),
solana_sdk::pubkey::new_rand(),
];
let health = Arc::new(RpcHealth::new(
cluster_info.clone(),
@@ -566,6 +570,7 @@ mod tests {
// No account hashes for any trusted validators == "behind"
cluster_info.push_accounts_hashes(vec![(1000, Hash::default()), (900, Hash::default())]);
cluster_info.flush_push_queue();
assert_eq!(rm.health_check(), "behind");
override_health_check.store(true, Ordering::Relaxed);
assert_eq!(rm.health_check(), "ok");

View File

@@ -1249,8 +1249,11 @@ pub(crate) mod tests {
.process_transaction(&past_bank_tx)
.unwrap();
let next_bank =
Bank::new_from_parent(&bank_forks.banks[&0].clone(), &Pubkey::new_rand(), 1);
let next_bank = Bank::new_from_parent(
&bank_forks.banks[&0].clone(),
&solana_sdk::pubkey::new_rand(),
1,
);
bank_forks.insert(next_bank);
bank_forks

Some files were not shown because too many files have changed in this diff Show More