Compare commits

...

230 Commits

Author SHA1 Message Date
437c356626 Discard pre hard fork persisted tower if hard-forking (#13536) (#13550)
* Discard pre hard fork persisted tower if hard-forking

* Relax config.require_tower

* Add cluster test

* nits

* Remove unnecessary check

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
Co-authored-by: Carl Lin <carl@solana.com>
(cherry picked from commit 9821a7754c)

Co-authored-by: carllin <wumu727@gmail.com>
2020-11-12 15:43:43 +00:00
fd68f8ba2e program-test now generates new blockhashes for test usage 2020-11-11 20:29:20 -08:00
2374664e95 Custom heap is BPF only (#13537) 2020-11-11 16:37:18 -08:00
2cb9ca5966 Fix slow/stuck unstaking due to toggling in epoch (#13501) (#13535)
* Fix slow/stuck unstaking due to toggling in epoch

* nits

* nits

* Add stake_program_v2 feature status check to cli

Co-authored-by: Tyera Eulberg <tyera@solana.com>
(cherry picked from commit 89b474e192)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-11-11 22:27:10 +00:00
4f247a232f Validator: Periodically log what we're waiting for during --wait-for-supermajority (#13531)
(cherry picked from commit 38f15e41b5)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-11-11 21:29:50 +00:00
15a2c73826 Use a non-zero fee_calculator for a more realistic test environment (#13525)
(cherry picked from commit 58724cb687)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-11-11 09:59:57 +00:00
d23f1436c5 docs: add log messages to json-rpc docs (#13317) 2020-11-11 00:43:32 -08:00
70c87d1a23 Add stubs and heap region definitions (#13521) (#13523)
* Add stubs and heap region definitions

* nudge

(cherry picked from commit e390c8cb7f)

Co-authored-by: Jack May <jack@solana.com>
2020-11-11 06:48:02 +00:00
053ce10ce5 Refactor function (#13294) (#13520)
Co-authored-by: Carl Lin <carl@solana.com>
(cherry picked from commit 2c2432fddc)

Co-authored-by: carllin <wumu727@gmail.com>
2020-11-11 04:07:51 +00:00
055eb360c2 Add printf declaration (#13514) (#13518)
(cherry picked from commit 9ca8e98525)

Co-authored-by: Jack May <jack@solana.com>
2020-11-11 01:50:42 +00:00
25cd1ceeeb Fix parsing CreateAccountWithSeed instructions (#13513) (#13517)
* Reduce required num_system_accounts and handle 2-account instructions properly

* Update CreateAccountWithSeed account docs to be correct

* Add CreateAccountWithSeed test

(cherry picked from commit 91f4e99b4c)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-11-11 01:13:12 +00:00
52ee3b1cee watchtower: Fix all clear duration message (#13510)
(cherry picked from commit 2a96e722b4)

Co-authored-by: Justin Starry <justin@solana.com>
2020-11-10 19:11:51 +00:00
bbadcca414 Bump token version fetched for localnet (#13490) (#13506)
(cherry picked from commit 3282334741)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-11-10 18:04:15 +00:00
e9eba97299 Fix signature access (#13491) (#13503)
(cherry picked from commit 70c4626efe)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-11-10 17:53:43 +00:00
920b63944e Make testnet section less ambiguous (#13504) (#13508)
(cherry picked from commit 599dae8f09)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-11-10 17:12:57 +00:00
8104895a07 Send RPC notification when account is deleted (#13440) (#13496)
* Send RPC notification when account is deleted

* Remove unwrap

(cherry picked from commit a97c04b400)

Co-authored-by: Justin Starry <justin@solana.com>
2020-11-10 13:07:51 +00:00
c9e646b86b Bump version to v1.4.7 (#13488) 2020-11-10 05:55:26 +00:00
7c47db1e3d align cluster and sim logging as hex (#13484) (#13486)
(cherry picked from commit c280d40b40)

Co-authored-by: Jack May <jack@solana.com>
2020-11-10 03:58:19 +00:00
c619e9b560 Docs update rpc getaccountinfo (bp #13483) (#13487)
* docs: Wrap RPC `getAccountInfo` at 80 char

(cherry picked from commit 1d7c00c915)

* docs: Consistently use "jsonParsed" param for RPC `getAccountInfo`

(cherry picked from commit 87924c7111)

* docs: Consistent used of "jsonParsed" throughout RPC reference

(cherry picked from commit fb815294b3)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-11-10 03:41:26 +00:00
ccd48923a0 Fix Bank accounts hash mismatch related to Clock::unix_timestamp (#13477) (#13485)
* Test for different ancestors with mismatch bank hash

* Test cleanup

* Remove nondeterministic ancestor check

* Update timestamp bounding feature key

* Update design doc

* Filter recent_timestamps to nodes voting within the last epoch

Co-authored-by: Stephen Akridge <sakridge@gmail.com>

Co-authored-by: Stephen Akridge <sakridge@gmail.com>
2020-11-10 03:35:22 +00:00
4e797cc867 Clean up Delegation::stake_activating_and_deactivating (#13471) (#13473)
(cherry picked from commit 5306eb93cc)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-11-09 09:57:32 +00:00
9627bfced3 propagates errors out of Packet::from_data (#13445) (#13470)
Packet::from_data is ignoring serialization errors:
https://github.com/solana-labs/solana/blob/d08c3232e/sdk/src/packet.rs#L42-L48
This is likely never useful as the packet will be sent over the wire
taking bandwidth but at the receiving end will either fail to
deserialize or it will be invalid.
This commit will propagate the errors out of the function to the
call-site, allowing the call-site to handle the error.

(cherry picked from commit 73ac104df2)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-11-08 16:25:36 +00:00
f823b10597 Clean up Delegation::stake_and_activating (#13460) (#13469)
(cherry picked from commit 737d3e376d)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-11-08 09:47:47 +00:00
c9e56c9749 Output more inflation calc details in ledger-tool (#13345) (#13467)
* Output more inflation calc details in ledger-tool

* Fix broken ci...

* Rename confusing variables

* Fix panic by wrapping PointValue with Opiton...

* Minor modifications

* Remove explict needless flush; Drop already does

* Yet another csv field adjustments

* Add data_size and rename epochs to earned_epochs

* Introduce null_tracer

* Unwrap Option in new_from_parent_with_tracer

* Don't shorten identifiers

* Allow irrefutable_let_patterns temporalily

* More null_tracer

* More field adjustments

(cherry picked from commit a81e7e7749)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-11-08 09:01:36 +00:00
da7482d631 Add PATH to GNU readlink 2020-11-07 11:28:28 -08:00
97650c7f37 Install coreutils on osx for 'readlink -f' support 2020-11-06 23:01:32 -08:00
e738bf1c9a Bump version to v1.4.6 2020-11-07 02:49:14 +00:00
afebb2a8a5 CLI: Make clear that nonce account 'Nonce' field is a blockhash
(cherry picked from commit b4790120cb)
2020-11-06 17:19:52 -08:00
4e4fd03b65 Add builtin mem tests (bp #13429) (#13437)
* Add builtin mem tests (#13429)


(cherry picked from commit 84b139cc94)

* resolve crate version

* nudge

Co-authored-by: Jack May <jack@solana.com>
2020-11-07 01:15:35 +00:00
049ca18dc5 Fix stake split rent-exempt adjustment (#13357) (#13453)
* Add failing tests

* Fix stake split

* Calculate split rent-exempt-reserve and use

* Add comment in rent.rs

* Add tests for edge cases when splitting to larger accounts, and reject overflow splits

* Reframe InsufficientFunds checks in terms of lamports var

* Test hardening review comments

(cherry picked from commit 4c5f345798)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-11-06 22:05:57 +00:00
495c64556e cargo-build-bpf/cargo-test-bpf now support --workspace/--all (#13451)
(cherry picked from commit 0ea795caa8)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-11-06 21:43:12 +00:00
747e91d434 Fix stake redelegate (bp #13358) (#13450)
* stake: Add redelegation failing test

(cherry picked from commit 491ad59d2e)

* stake: Consider withdraws we redelegating

(cherry picked from commit fe1e08b9ad)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-11-06 20:07:46 +00:00
6d4f6e79b0 cargo-test-bpf now sets the "test-bpf" feature for crate tests (#13447)
The feature allows for tests to distinguish between `cargo test` and
`cargo test-bpf` primarily for the purpose of excluding CPI tests that
require the system program under `cargo test`, as the path to enabling
CPI in `cargo test`-based testing is unclear

(cherry picked from commit 1a70a2a25b)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-11-06 19:39:18 +00:00
98e9f34704 Feature-gate stake program (#13394) (#13439)
* Add legacy stake-program handling

* Strip out duplicative legacy code

* Add feature for stake-program-fix

* Feature-deploy new stake program

* Expand comment

(cherry picked from commit 1b1d9f6b0c)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-11-06 09:25:34 +00:00
70f74174e8 program-test: Remove special case for Rent sysvar (bp #13434) (#13435)
* Revert "Include Rent in ProgramTest::start() output"

This reverts commit c3d2d2134c.

(cherry picked from commit 920cd5285a)

* Add get_rent()

(cherry picked from commit 9a1c1fbab8)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-11-06 05:24:44 +00:00
70985f82f1 CI: Check monorepo for consistent crate versions (bp #13431) (#13433)
* increment-cargo-version.sh: Add check subcommand

(cherry picked from commit 5d4015358a)

* CI: Check monorepo for consistent crate versions

(cherry picked from commit 7a4e293b3b)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-11-06 02:29:26 +00:00
3b2bdd9f8a Fix duplicate records of inner instructions (#13380) (#13413)
* Fix duplicate records of inner instructions

* fix tests

* fix clippy

* Remove bad_inner_instructions

(cherry picked from commit c24fbb6f8b)

Co-authored-by: Justin Starry <justin@solana.com>
2020-11-06 01:30:24 +00:00
d33ae59fbf SPL Associated Token Account plumbing (bp #13398) (#13430)
* Fetch associated-program-account

(cherry picked from commit 4d553f4879)

* Add SPL Associated Token Account Program label to explorer

(cherry picked from commit 2d24160376)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-11-06 00:01:31 +00:00
9ead7ca11a Define BPF_OUT_DIR for program-test, also handle -- arguments better 2020-11-05 22:42:35 +00:00
dbcef35f7d Search BPF_OUT_DIR for programs 2020-11-05 22:42:35 +00:00
9e733d7d9b Check file modification times before strip and dump 2020-11-05 22:42:35 +00:00
39f1240ec2 Cargo.lock 2020-11-05 22:42:35 +00:00
fa249721fa Search for program files in 'target/deploy' 2020-11-05 22:42:35 +00:00
137793cd4c Add cargo-test-bpf 2020-11-05 22:42:35 +00:00
47d8608aee Remove unneeded .gitignore 2020-11-05 22:42:35 +00:00
ed410aea10 adds the missing slash in cargo path (#13424) (#13427)
(cherry picked from commit 44b12a1594)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-11-05 22:10:06 +00:00
957dfa8f73 docs: Clarify the commitment levels based on questions (#13387)
* Clarify the commitment levels based on questions

Many people have asked about what commitment levels mean, and which to
choose.  This update includes some of the language at
`sdk/src/commitment_config.rs` and a recommendation for different use
cases.

Additionally, the preflight commitment documentation was out of date,
specifying that "max" was always used, and this is no longer the case.

* Update docs/src/developing/clients/jsonrpc-api.md

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

* Update docs/src/developing/clients/jsonrpc-api.md

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

* Update docs/src/developing/clients/jsonrpc-api.md

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

* Fix typo

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
(cherry picked from commit ede891a6c6)
2020-11-05 13:25:23 -08:00
98095b6f8d drops older gossip packets when load shedding (#13364) (#13423)
Gossip drops incoming packets when overloaded:
https://github.com/solana-labs/solana/blob/f6a73098a/core/src/cluster_info.rs#L2462-L2475
However newer packets are dropped in favor of the older ones.
This is probably not ideal as newer packets are more likely to contain
more recent data, so dropping them will keep the validator state
lagging.

(cherry picked from commit 7f4debdad5)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-11-05 18:30:00 +00:00
a2c32d7d0e shares the lock on gossip when processing prune messages (#13339) (#13422)
Processing prune messages acquires an exclusive lock on gossip:
https://github.com/solana-labs/solana/blob/55b0428ff/core/src/cluster_info.rs#L1824-L1825
This can be reduced to a shared lock if active-sets are changed to use
atomic bloom filters:
https://github.com/solana-labs/solana/blob/55b0428ff/core/src/crds_gossip_push.rs#L50

(cherry picked from commit 8f0796436a)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-11-05 17:18:22 +00:00
b15d826476 Allow feature builtins to overwrite existing builtins (#13403) (#13420)
* Allow feature builtins to overwrite existing builtins

* Add feature_builtin ActivationType

* Correctly retain idempotent for replacing case

* Fix test

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
(cherry picked from commit bc62313c66)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-11-05 16:51:25 +00:00
ed97a2578d measures processing time of each kind of gossip packets (#13366) (#13418)
(cherry picked from commit 118ce47b97)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-11-05 16:46:25 +00:00
89f61f0b41 Bump low end validator RAM requirement (#13406) (#13408) 2020-11-05 15:24:16 +08:00
04cc9c1148 CI: Use branch-versioned cargo throughout (#13411)
(cherry picked from commit 66c3c6c2b3)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-11-05 06:25:11 +00:00
8314ab4508 Improve invoke_signed() docs (#13405)
(cherry picked from commit 8c4995b22b)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-11-05 01:07:24 +00:00
3a98042753 Final program ELF is now placed in ./target/deploy/ instead of . (#13404)
(cherry picked from commit 661a935075)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-11-05 00:19:19 +00:00
60d316c9fd Bump spl-token and spl-memo crate versions (#13400)
Co-authored-by: Michael Vines <mvines@gmail.com>
2020-11-04 23:16:41 +00:00
e324c221a6 uses thread-pool when handling push messages (#13338) (#13395)
From runtime profiles, the majority time of solana-listen thread:
https://github.com/solana-labs/solana/blob/55b0428ff/core/src/cluster_info.rs#L2720
is spent handling push messages. The code here:
https://github.com/solana-labs/solana/blob/55b0428ff/core/src/cluster_info.rs#L2272-L2364
may utilize the idle gossip thread-pool.

(cherry picked from commit 10fa4f45ab)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-11-04 20:28:56 +00:00
61246999ac Update SPL Token exchange documentation to include associated-token-account workflow (#13397)
(cherry picked from commit 97284adabb)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-11-04 19:57:02 +00:00
e476dc4eaa Comment Stakes::clone_with_epoch (#13388) (#13390)
(cherry picked from commit b0d1ae1d8b)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-11-04 12:29:13 +00:00
ee18e7668b Refine transaction log count message (#13378)
(cherry picked from commit b5ef319038)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-11-04 06:44:49 +00:00
62db7f6562 Surface transaction logs in rpc client (#13376)
(cherry picked from commit 6d9ca0ae15)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-11-04 02:12:46 +00:00
2e9b501355 more informative feature error message (#13373) (#13375)
(cherry picked from commit 04c5e6cc48)

Co-authored-by: Jack May <jack@solana.com>
2020-11-04 01:35:22 +00:00
089a99f1e3 docs: Mainnet-beta totally has smart contract enabled (#13369)
(cherry picked from commit fefa297877)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-11-03 19:21:18 +00:00
57961b1d17 Update sol_log_compute_units (#13360) (#13363)
(cherry picked from commit f6a73098a4)

Co-authored-by: Jack May <jack@solana.com>
2020-11-03 17:28:44 +00:00
fe8b2b7850 Include Rent in ProgramTest::start() output (#13356)
(cherry picked from commit c3d2d2134c)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-11-03 03:12:17 +00:00
0bf45cbab6 Small code cleanup and typo fixes (#13325) (#13341)
* Small code cleanup and typo fixes

* Clean up calculate_points_and_credits

(cherry picked from commit 0e4509c497)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-11-02 02:42:38 +00:00
5877427389 Bump version to v1.4.5 2020-11-01 17:05:45 +00:00
25141288f4 Fix typos (#13334) (#13335)
(cherry picked from commit af9a3f004e)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-11-01 07:06:46 +00:00
b28d10d46f Add bank timestamp bounding (bp #13120) (#13331)
* Add bounding feature

(cherry picked from commit 96b8aa8bd1)

* Repurpose unused as Clock::epoch_start_timestamp; add gated update

(cherry picked from commit 0049ab69fb)

* Add bounded timestamp-estimation method

(cherry picked from commit 80db6c0980)

* Use bounded timestamp-correction when feature enabled

(cherry picked from commit 90778615f6)

* Prevent block times from ever going backward

(cherry picked from commit eb2560e782)

* Sample votes from ancestors back to root

(cherry picked from commit 4260b3b416)

* Add Clock sysvar details, update struct docs

(cherry picked from commit 3a1e125ce3)

* Add design proposal and update validator-timestamp-oracle

(cherry picked from commit a3912bc084)

* Adapt to feature::create_account

Co-authored-by: Tyera Eulberg <tyera@solana.com>
Co-authored-by: Michael Vines <mvines@gmail.com>
2020-11-01 06:02:34 +00:00
b6dc48da75 Add solana-program-test crate (bp #13324) (#13329)
* MockInvokeContext::get_programs() implementation

(cherry picked from commit 8acc47ee1b)

* start_local_server() now works with Banks > 0

(cherry picked from commit fa4bab4608)

* Add solana-program-test crate

(cherry picked from commit 52a292a75b)

* rebase

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-11-01 05:43:43 +00:00
f2d929c12d Move Feature struct to solana-program (#13321)
(cherry picked from commit 4b65e32f22)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-31 20:33:35 +00:00
c49b89091a cargo update -p futures-task / cargo update -p futures-util 2020-10-31 18:50:51 +00:00
23fe3a86d9 Switch to dirs-next 2020-10-31 18:50:51 +00:00
2f778725d6 Ignore stdweb 2020-10-31 18:50:51 +00:00
93a119a51e Print the entry type as well when checking archive (#13312) (#13314)
(cherry picked from commit bc7133d752)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-30 17:48:23 +00:00
65a7b536c9 Update AccountInfo comments (#13302)
(cherry picked from commit 72d41e5801)
2020-10-30 08:09:37 -07:00
1281483a8c Fix tower/blockstore unsync due to external causes (#12671) (#13310)
* Fix tower/blockstore unsync due to external causes

* Add and clean up long comments

* Clean up test

* Comment about warped_slot_history

* Run test_future_tower with master-only/master-slave

* Update comments about false leader condition

(cherry picked from commit 1df15d85c3)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-30 11:47:22 +00:00
4312841433 de-mut some InvokeContext methods (bp #13301) (#13309)
* de-mut some InvokeContext methods

(cherry picked from commit da9548fd12)

* Simplify CPI interface into MessageProcessor

(cherry picked from commit 9263ae1c60)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-30 10:35:59 +00:00
b859acbfea Upgrade tarpc and tokio (bp #13293) (#13300)
* Upgrade tarpc and tokio (#13293)

(cherry picked from commit ca00197009)

# Conflicts:
#	banks-client/Cargo.toml
#	banks-interface/Cargo.toml
#	banks-server/Cargo.toml

* rebase

Co-authored-by: Greg Fitzgerald <greg@solana.com>
Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-30 08:05:27 +00:00
40a3885d3b Native/builtin programs now receive an InvokeContext (bp #13286) (#13298)
* Native/builtin programs now receive an InvokeContext

(cherry picked from commit df8dab9d2b)

* Remove MessageProcessor::loaders

(cherry picked from commit 2664a1f7ef)

* Remove Entrypoint type

(cherry picked from commit 225bed11c7)

* Remove programs clone()

(cherry picked from commit 33884d847a)

* Add sol_log_compute_units syscall

(cherry picked from commit 66e51a7363)

* Add Bank::set_bpf_compute_budget()

(cherry picked from commit 7d686b72a0)

* Rebase

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-30 07:47:17 +00:00
36b7c2ea97 Refactors the common code of test and bench targets into the solana_runtime::bpf_test_utils module. (#13203)
(cherry picked from commit 65ee3a6bdd)
2020-10-29 22:03:09 -07:00
24bd4ff6d4 clarify comment (#13289) (#13292)
(cherry picked from commit b5c8b86e7c)

Co-authored-by: Jack May <jack@solana.com>
2020-10-29 22:38:26 +00:00
69b3f10207 move Account to solana-sdk (bp #13198) (#13269)
* move Account to solana-sdk (#13198)

(cherry picked from commit c458d4b213)

# Conflicts:
#	programs/bpf/benches/bpf_loader.rs

* resolve conflicts

Co-authored-by: Jack May <jack@solana.com>
2020-10-29 19:16:52 +00:00
9922f09a1d adds more parallel processing to gossip packets handling (#12988) (#13282)
(cherry picked from commit 3738611f5c)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-10-29 16:47:37 +00:00
38a99c0c25 Disable eager rent collection for less noise (#13275) (#13280)
(cherry picked from commit 363c148dbe)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-29 15:49:02 +00:00
7031235714 excludes origin from prune set (#13204) (#13276)
On the receiving end, prune messages are ignored if the origin points to
the node itself:
https://github.com/solana-labs/solana/blob/631f029fe/core/src/crds_gossip_push.rs#L285-L295
So to avoid sending these over the wire, the requester can exclude
origin from the prune set.

(cherry picked from commit be80f6d5c5)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-10-29 14:19:06 +00:00
dfb2356a9a Update FeatureSet::active to include slot-activated (#13256) (#13263)
* Update FeatureSet::active to include slot-activated

* Clippy suggestion

(cherry picked from commit c2dbf53d76)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-29 03:42:42 +00:00
010794806a Finer grained AccountsIndex locking (#12787) (#13240)
Co-authored-by: Carl Lin <carl@solana.com>

Co-authored-by: carllin <wumu727@gmail.com>
Co-authored-by: Carl Lin <carl@solana.com>
2020-10-28 23:46:54 +00:00
6f95d5f72a Update links from sdk to program (#13248) (#13249)
(cherry picked from commit db9ddc7e5b)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-28 21:32:12 +00:00
2720b939fd Calculate accounts hash async in accounts background service (#12852) (#13244)
(cherry picked from commit 456eae6ccb)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-10-28 21:20:07 +00:00
a25c3fcf7d Add doc page on sysvar accounts (#13237) (#13246)
* Add doc page on sysvar accounts

* Update with suggestions

(cherry picked from commit 664b6125b6)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-28 21:16:10 +00:00
7cc4810174 docs: Metrics update (bp #13239) (#13241)
* docs: Remove stale metrics steps

(cherry picked from commit 4dc4fefee2)

* docs: Reference metrics envvars for each cluster

(cherry picked from commit eb597cd60f)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-28 19:46:59 +00:00
c1a55bf249 Improve final report of ledger-tool capitalization (#13232) (#13236)
(cherry picked from commit 4698ee5e4a)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-28 18:45:19 +00:00
f19778b7d9 implements ping-pong packets between nodes (#12794) (#13234)
https://hackerone.com/reports/991106

> It’s possible to use UDP gossip protocol to amplify DDoS attacks. An attacker
> can spoof IP address in UDP packet when sending PullRequest to the node.
> There's no any validation if provided source IP address is not spoofed and
> the node can send much larger PullResponse to victim's IP. As I checked,
> PullRequest is about 290 bytes, while PullResponse is about 10 kB. It means
> that amplification is about 34x. This way an attacker can easily perform DDoS
> attack both on Solana node and third-party server.
>
> To prevent it, need for example to implement ping-pong mechanism similar as
> in Ethereum: Before accepting requests from remote client needs to validate
> his IP. Local node sends Ping packet to the remote node and it needs to reply
> with Pong packet that contains hash of matching Ping packet. Content of Ping
> packet is unpredictable. If hash from Pong packet matches, local node can
> remember IP where Ping packet was sent as correct and allow further
> communication.
>
> More info:
> https://github.com/ethereum/devp2p/blob/master/discv4.md#endpoint-proof
> https://github.com/ethereum/devp2p/blob/master/discv4.md#wire-protocol

The commit adds a PingCache, which maintains records of remote nodes
which have returned a valid response to a ping message, and on-the-fly
ping messages pending a pong response from the remote node.

When handling pull-requests, those from addresses which have not passed
the ping-pong check are filtered out, and additionally ping packets are
added for addresses which need to be (re)verified.

(cherry picked from commit ae91270961)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-10-28 18:36:28 +00:00
eecdacac42 Don't hold dashmap write lock in store create (#13007) (#13230)
Co-authored-by: Carl Lin <carl@solana.com>
(cherry picked from commit c8fc0a6ba1)

Co-authored-by: carllin <wumu727@gmail.com>
2020-10-28 11:36:28 +00:00
429f130532 Switch accounts storage lock to DashMap (#12126) (#13223)
Co-authored-by: Carl Lin <carl@solana.com>
(cherry picked from commit f8d338c9cb)

Co-authored-by: carllin <wumu727@gmail.com>
2020-10-28 08:07:28 +00:00
19b9839dfc Use pico inflation for ledger-tool capitalization --enable-inflation (#13215) (#13222)
* Use pico inflation for ledger-tool capitalization --enable-inflation

* rust fmt

(cherry picked from commit 7d2962135d)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-28 07:40:21 +00:00
ad2bf3afa6 more portable install.sh (#13114) (#13220)
(cherry picked from commit 4e0d1b1d4a)

Co-authored-by: Jack May <jack@solana.com>
2020-10-28 06:45:32 +00:00
5c739ba236 Use zstd for create-snapshot (#13214) (#13218)
(cherry picked from commit 6d4c69b7c3)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-28 06:38:20 +00:00
9fac507606 Fix log (#13207) (#13211)
Co-authored-by: Carl Lin <carl@solana.com>
(cherry picked from commit f96ab5a818)

Co-authored-by: carllin <wumu727@gmail.com>
2020-10-28 03:21:01 +00:00
d5a37cb06e Parse vote instructions (#13202) (#13209)
(cherry picked from commit c4962af9eb)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-28 01:19:29 +00:00
86eb0157c0 Account for forward delay in transaction simulation (#13199) (#13201)
(cherry picked from commit 631f029fe9)

Co-authored-by: Justin Starry <justin@solana.com>
2020-10-27 18:35:43 +00:00
072dab0948 Fix pr crossing for sysvar keyed-accounts (#13189) (#13191)
(cherry picked from commit 26eba5ac7d)

Co-authored-by: Jack May <jack@solana.com>
2020-10-27 08:33:13 +00:00
e20e79f412 ignore .so files (#13188) (#13192)
(cherry picked from commit bb6ab3a62d)

Co-authored-by: Jack May <jack@solana.com>
2020-10-27 07:23:50 +00:00
f118db81ce check sysvar id for AccountInfo (#13175) (#13185)
(cherry picked from commit 322c667655)

Co-authored-by: Jack May <jack@solana.com>
2020-10-27 00:22:27 -07:00
4ecb78d303 Move KeyedAccount out of solana-program. Native programs are not supported by solana-program (bp #13159) (#13181)
* Move KeyedAccount out of solana-program.  Native programs are not supported by solana-program

(cherry picked from commit 1b343665a1)

# Conflicts:
#	programs/bpf/benches/bpf_loader.rs

* rebase

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-27 05:43:14 +00:00
0a28e40606 fix .gitignore (#13177) (#13190)
(cherry picked from commit e3c0cc980b)

Co-authored-by: Jack May <jack@solana.com>
2020-10-27 05:26:59 +00:00
4d7a5a9daf macos portable rust-bpf (#13176) (#13187)
(cherry picked from commit fc83a666fc)

Co-authored-by: Jack May <jack@solana.com>
2020-10-27 04:51:14 +00:00
64cf6b4388 Add SSH key for buildkite-agent on achille (#13183)
(cherry picked from commit ff4b34202c)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-27 03:19:32 +00:00
f334c3b895 Add Bank::get_signature_status_with_blockhash() (#13167) (#13178)
Get the signature status in O(1) time, instead of O(n) where
n is the number of blockhashes in the StatusCache.

(cherry picked from commit f58bc8589d)

Co-authored-by: Greg Fitzgerald <greg@solana.com>
2020-10-27 01:29:16 +00:00
15a7bcd4fe Delete .lib.rs.swo 2020-10-26 16:01:43 -07:00
8d6636d02a CLI: Surface deploy transaction errors (#13170)
(cherry picked from commit a82971879f)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-26 22:43:32 +00:00
cf896dbeee Use bank timestamp to populate Blockstore::blocktime_cf when correction active (#13158) (#13160)
(cherry picked from commit 39686ef098)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-26 20:34:15 +00:00
e5b60b75f8 Docs: Testnet has a faucet now (#13165)
(cherry picked from commit 8b1638f026)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-26 20:28:59 +00:00
0e155fdbd9 update call depth docs (#13155) (#13162)
(cherry picked from commit 35f77ccc73)

Co-authored-by: Jack May <jack@solana.com>
2020-10-26 19:58:55 +00:00
b79a337ddd Don't reuse BPF target build artifacts
(cherry picked from commit 41a56e14fc)
2020-10-26 12:01:38 -07:00
c4050f541d Fix reward type encoding
(cherry picked from commit 0a89bb4d3c)
2020-10-26 12:01:38 -07:00
f0b74a4ecf marks pull request creation time only once per peer (#13113) (#13156)
mark_pull_request_creation time requires an exclusive lock on gossip:
https://github.com/solana-labs/solana/blob/16944e218/core/src/cluster_info.rs#L1547-L1548
Current code is redundantly marking each peer once for each request.
There are at most only 2 unique peers, whereas there are hundreds of
requests per each. So the lock is acquired hundreds of time longer than
necessary.

(cherry picked from commit 4bfda3e766)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-10-26 18:27:26 +00:00
f7979378fd Fix test_optimistic_confirmation_violation_without_tower() (#13043) (#13145)
Co-authored-by: Carl Lin <carl@solana.com>
(cherry picked from commit dd6cccaf7e)

Co-authored-by: carllin <wumu727@gmail.com>
2020-10-26 06:33:20 +00:00
d7c5607982 Hide noisy specialization warnings for frozen abi (#13141) (#13144)
(cherry picked from commit 5caf81dbf8)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-26 06:19:58 +00:00
91ab5ae990 Remove program feature from SPL builds 2020-10-25 21:08:53 -07:00
605e767259 Allow existence of vote on root in saved tower (#13135) (#13139)
(cherry picked from commit 66c7a98009)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-26 12:34:58 +09:00
597618846b Bump version to v1.4.4 2020-10-24 22:10:32 +00:00
712267bf51 Rename "everything" feature to "full"
(cherry picked from commit 0cc9c94c43)
2020-10-24 13:21:11 -07:00
eb9cef0cd4 Separate the "program" feature of solana-sdk into a new crate called solana-program (bp #12989) (#13131)
* Add solana-program-sdk boilerplate

(cherry picked from commit 3718771ffb)

# Conflicts:
#	sdk/Cargo.toml

* Initial population of solana-program-sdk

(cherry picked from commit 63db324204)

# Conflicts:
#	Cargo.lock

* Port programs to solana-program-sdk

(cherry picked from commit fe68f7f786)

# Conflicts:
#	programs/bpf/Cargo.lock
#	programs/bpf/rust/128bit/Cargo.toml
#	programs/bpf/rust/128bit_dep/Cargo.toml
#	programs/bpf/rust/alloc/Cargo.toml
#	programs/bpf/rust/call_depth/Cargo.toml
#	programs/bpf/rust/custom_heap/Cargo.toml
#	programs/bpf/rust/dep_crate/Cargo.toml
#	programs/bpf/rust/deprecated_loader/Cargo.toml
#	programs/bpf/rust/dup_accounts/Cargo.toml
#	programs/bpf/rust/error_handling/Cargo.toml
#	programs/bpf/rust/external_spend/Cargo.toml
#	programs/bpf/rust/instruction_introspection/Cargo.toml
#	programs/bpf/rust/invoke/Cargo.toml
#	programs/bpf/rust/invoked/Cargo.toml
#	programs/bpf/rust/iter/Cargo.toml
#	programs/bpf/rust/many_args/Cargo.toml
#	programs/bpf/rust/many_args_dep/Cargo.toml
#	programs/bpf/rust/noop/Cargo.toml
#	programs/bpf/rust/panic/Cargo.toml
#	programs/bpf/rust/param_passing/Cargo.toml
#	programs/bpf/rust/param_passing_dep/Cargo.toml
#	programs/bpf/rust/rand/Cargo.toml
#	programs/bpf/rust/ristretto/Cargo.toml
#	programs/bpf/rust/sanity/Cargo.toml
#	programs/bpf/rust/sha256/Cargo.toml
#	programs/bpf/rust/sysval/Cargo.toml

* Only activate legacy program feature for the solana-sdk crate

(cherry picked from commit 85c51f5787)

* Run serum-dex unit tests

(cherry picked from commit 92ce381d60)

* Rename solana-program-sdk to solana-program

(cherry picked from commit dd711ab5fb)

# Conflicts:
#	programs/bpf/rust/128bit/Cargo.toml
#	programs/bpf/rust/128bit_dep/Cargo.toml
#	programs/bpf/rust/alloc/Cargo.toml
#	programs/bpf/rust/call_depth/Cargo.toml
#	programs/bpf/rust/custom_heap/Cargo.toml
#	programs/bpf/rust/dep_crate/Cargo.toml
#	programs/bpf/rust/deprecated_loader/Cargo.toml
#	programs/bpf/rust/dup_accounts/Cargo.toml
#	programs/bpf/rust/error_handling/Cargo.toml
#	programs/bpf/rust/external_spend/Cargo.toml
#	programs/bpf/rust/instruction_introspection/Cargo.toml
#	programs/bpf/rust/invoke/Cargo.toml
#	programs/bpf/rust/invoked/Cargo.toml
#	programs/bpf/rust/iter/Cargo.toml
#	programs/bpf/rust/many_args/Cargo.toml
#	programs/bpf/rust/many_args_dep/Cargo.toml
#	programs/bpf/rust/noop/Cargo.toml
#	programs/bpf/rust/panic/Cargo.toml
#	programs/bpf/rust/param_passing/Cargo.toml
#	programs/bpf/rust/param_passing_dep/Cargo.toml
#	programs/bpf/rust/rand/Cargo.toml
#	programs/bpf/rust/ristretto/Cargo.toml
#	programs/bpf/rust/sanity/Cargo.toml
#	programs/bpf/rust/sha256/Cargo.toml
#	programs/bpf/rust/sysval/Cargo.toml

* Update frozen_abi hashes

The movement of files in sdk/ caused ABI hashes to change

(cherry picked from commit a4956844bd)

* Resolve merge conflicts

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-24 17:25:22 +00:00
62e0e19961 add precompile verification to simulate_transaction (#13080) (#13126)
(cherry picked from commit 766406fd23)

Co-authored-by: Josh <josh.hundley@gmail.com>
2020-10-24 05:02:41 +00:00
9aee9cb867 Clean up opt conf verifier and vote state tracker (#13081) (#13124)
* Clean up opt conf verifier and vote state tracker

* Update test to follow new message and some knob

* Rename

(cherry picked from commit 0264147d42)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-24 04:12:02 +00:00
2b11558b36 Shorten magic install URL (#13122)
(cherry picked from commit b5170b993e)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-23 23:28:19 +00:00
18c4e1b023 ci: Add downstream project build testing (bp #13112) (#13119)
* Use local cargo for CI

(cherry picked from commit c7c50bd32c)

* Add downstream project build testing

(cherry picked from commit c7f4f15e60)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-23 22:13:51 +00:00
6bac44ed92 Move bpf sdk packaging from publish-tarball to cargo-install-all (#13117)
(cherry picked from commit 965ea97b56)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-23 21:39:41 +00:00
8cb622084f Cli: deploy programs via TPU (#13090) (#13111)
* Deploy: send write transactions to leader tpu

* Less apparent stalling during confirmation

* Add EpochInfo mock

* Only get cluster nodes once

* Send deploy writes to next leader

(cherry picked from commit 16944e218f)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-23 17:29:41 +00:00
38f7e9a979 shrink debug (#13089) (#13109)
(cherry picked from commit 7d2729f6bd)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-10-23 17:02:00 +00:00
a536f779ee scans crds table in parallel for finding old labels (#13073) (#13107)
From runtime profiles, the majority time of ClusterInfo::handle_purge
https://github.com/solana-labs/solana/blob/0776fa05c/core/src/cluster_info.rs#L1605-L1626
is spent scanning crds table finding old labels:
https://github.com/solana-labs/solana/blob/0776fa05c/core/src/crds.rs#L175-L197

This can be done in parallel given that gossip thread-pool:
https://github.com/solana-labs/solana/blob/0776fa05c/core/src/cluster_info.rs#L1637-L1641
is idle when handle_purge is invoked:
https://github.com/solana-labs/solana/blob/0776fa05c/core/src/cluster_info.rs#L1681

(cherry picked from commit 37c8842bcb)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-10-23 15:36:06 +00:00
84a5e5ec97 Remove spammy invalid rpc log (#13100) (#13102)
(cherry picked from commit c95f6c4b83)

Co-authored-by: Justin Starry <justin@solana.com>
2020-10-23 08:32:46 +00:00
dd33aae3cf Add --bpf-out-dir argument to control where the final build products land (#13099)
(cherry picked from commit b169d9cfbe)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-23 07:56:35 +00:00
be2ace47e3 Add deploy err if program-account balance is too high (#13091) (#13098)
* Add deploy err if program-account balance is too high

* Review comments

* Add system-program check

* Rename and unhide flag

(cherry picked from commit 4669fa0f98)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-23 07:02:16 +00:00
53b074aa35 Bump version to 1.4.3 2020-10-23 04:20:28 +00:00
a4ad2925a2 Allow nodes to advertise a different rpc address over gossip (#13053) (#13078)
* Allow nodes to advertise a different rpc address over gossip

* Feedback

(cherry picked from commit 8b0242a5d8)

Co-authored-by: Justin Starry <justin@solana.com>
2020-10-22 07:06:27 +00:00
edfbd8d65a Add replacements for Pubkey::new_rand()/Hash::new_rand() (bp #12987) (#13076)
* Add pubkey_new_rand(), mark Pubkey::new_rand() deprecated

(cherry picked from commit 0e68ed6a8d)

* Add hash_new_rand(), mark Hash::new_rand() as deprecated

(cherry picked from commit 76f11c7dae)

* Run `codemod --extensions rs Pubkey::new_rand solana_sdk::pubkey::new_rand`

(cherry picked from commit 7bc073defe)

# Conflicts:
#	programs/bpf/benches/bpf_loader.rs
#	runtime/benches/accounts.rs
#	runtime/src/accounts.rs

* Run `codemod --extensions rs Hash::new_rand solana_sdk:#️⃣:new_rand`

(cherry picked from commit 17c391121a)

* Remove unused pubkey::Pubkey imports

(cherry picked from commit 959880db60)

# Conflicts:
#	runtime/src/accounts_index.rs

* Resolve conflicts

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-22 05:08:01 +00:00
e0ae54fd7e Add cargo-build-bpf (bp #13040) (#13075)
* Add cargo-build-bpf

(cherry picked from commit 07a853d6cc)

* Remove do.sh

(cherry picked from commit 61be155413)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-22 03:09:52 +00:00
60297951ec CLI: Print address ephemeral keypair seed phrase to stderr on deploy failure (bp #13046) (#13055)
* CLI: Print address ephemeral keypair seed phrase to stderr on deploy failure

(cherry picked from commit 2905ccc7ec)

# Conflicts:
#	cli/Cargo.toml

* Fix conflicts

Co-authored-by: Trent Nelson <trent@solana.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2020-10-21 15:29:56 -06:00
e0f9f72a2c RPC: Don't send base64 TXs to old clusters (#13072)
Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-21 20:25:36 +00:00
5236acf4b0 Add ledger-tool dead-slots and improve purge a lot (#13065) (#13071)
* Add ledger-tool dead-slots and improve purge a lot

* Reduce batch size...

* Add --dead-slots-only and fixed purge ordering

(cherry picked from commit 0776fa05c7)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-21 18:57:22 +00:00
5dd61b5db2 Port various rent fixes to runtime feature (#12842) (#13068)
* Port various rent fixes to runtime feature

* Fix CI

* Use more consistent naming...

(cherry picked from commit 608b81b412)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-21 14:47:48 +00:00
8752bf0826 Skip 'Stake by Feature Set' output when showing status of a single feature (#13052)
(cherry picked from commit ad65d4785e)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-21 07:14:49 +00:00
b1712e80ec Parse stake and system instructions (#13035) (#13045)
* Fix token account check

* Add helper to check num accounts

* Add parse_stake

* Add parse_system

* Fix AuthorizeNonce docs

* Remove jsonParsed unstable markers

* Clippy

(cherry picked from commit 46d0019955)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-21 04:35:05 +00:00
2fe1a4677c Ignore more paths in increment-cargo-version.sh
(cherry picked from commit c1c69ecc34)
2020-10-20 20:55:34 -07:00
f76c128f4f Various clean-ups before assert adjustment (#13006) (#13041)
* Various clean-ups before assert adjustment

* oops

(cherry picked from commit efdb560e97)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-21 02:41:20 +00:00
b143b9c3c2 Remove frozen ABI modules from solana-sdk (bp #13008) (#13036)
* Remove frozen ABI modules from solana-sdk

(cherry picked from commit 6858950f76)

# Conflicts:
#	Cargo.lock
#	core/Cargo.toml
#	frozen-abi/macro/Cargo.toml
#	programs/bpf/Cargo.lock
#	programs/stake/Cargo.toml
#	programs/vote/Cargo.toml
#	runtime/Cargo.toml
#	sdk/Cargo.toml
#	version/Cargo.toml

* rebase

* fix broken ci (#13039)

Co-authored-by: Michael Vines <mvines@gmail.com>
Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-21 02:00:45 +00:00
b4178b75e7 Add --eval flag to solana-install info (#13038)
(cherry picked from commit 6f930351d2)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-21 01:12:48 +00:00
c54b751df7 Include sdk/bpf in the main release tarball
(cherry picked from commit f71677164f)
2020-10-20 16:25:04 -07:00
0fde9e893f Force unset CARGO to use correct version of cargo (#13027) (#13034)
(cherry picked from commit 81d0c8ae7f)

Co-authored-by: Jon Cinque <jon.cinque@gmail.com>
2020-10-20 23:06:57 +00:00
d24abbdac9 Fix secp256k1 instruction indexing and add tests (#13026) (#13032)
(cherry picked from commit 83c53ae4b5)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-10-20 22:25:59 +00:00
3b03985f28 Remove unsupported metrics tarball from release artifacts
(cherry picked from commit 62f20bc170)
2020-10-20 13:16:35 -07:00
d05bfa08c7 improves threads' utilization in processing gossip packets (#12962) (#13023)
ClusterInfo::process_packets handles incoming packets in a thread_pool:
https://github.com/solana-labs/solana/blob/87311cce7/core/src/cluster_info.rs#L2118-L2134

However, profiling runtime shows that threads are not well utilized and
a lot of the processing is done sequentially.

This commit redistributes the work done in parallel. Testing on a gce
cluster shows 20%+ improvement in processing gossip packets with much
smaller variations.

(cherry picked from commit 75d62ca095)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-10-20 19:59:35 +00:00
9da2ac7a44 passes through feature-set to gossip requests handling (#12878) (#12991)
* passes through feature-set to down to gossip requests handling
* takes the feature-set from root_bank instead of working_bank

(cherry picked from commit 48283161c3)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-10-20 18:25:44 +00:00
9e95d0fb58 Add more info for --limit-ledger-size (#13021)
(cherry picked from commit de04a208c7)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-20 17:50:11 +00:00
94cad9873c Support Debug Bank (#13017) (#13019)
(cherry picked from commit c0675968b1)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-20 17:20:40 +00:00
f33171b32f Remove errant print 2020-10-20 09:02:51 -06:00
aa6406f263 implements DataBudget using atomics (#12856) (#12990)
(cherry picked from commit 05cf15a382)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
2020-10-20 12:51:42 +00:00
77864a6bee Parse bpf loader instructions (#12998) (#13005)
* Add parsing for BpfLoader2 instructions

* Skip info if null

* Return account address in info map

(cherry picked from commit 942e4273ba)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-20 06:25:56 +00:00
b51715d33c validator: Activate RPC before halting on slot (#13002)
(cherry picked from commit 3b3f7341fa)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-20 03:31:04 +00:00
7d395177d4 Add everything feature (#12999)
(cherry picked from commit c5e16383b0)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-20 01:52:21 +00:00
77ba6d6784 sdk: Add SyscallStubs to enable syscall interception when building programs for non-BPF (bp #12984) (#12993)
* Add SyscallStubs to enable syscall interception when building programs for non-BPF

(cherry picked from commit 9c53e1dfb2)

* Remove program_stubs!()

(cherry picked from commit 6d5889bdb5)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-19 22:28:44 +00:00
4bf0a54ed7 Revert "CLI: Put deploy ephemeral keypair behind a flag (#12942)" (#12982)
This reverts commit 8cac6835c0.
2020-10-19 17:41:10 +00:00
8a526f2f53 Follow up to persistent tower with tests and API cleaning (#12350) (#12972)
* Follow up to persistent tower

* Ignore for now...

* Hard-code validator identities for easy reasoning

* Add a test for opt. conf violation without tower

* Fix compile with rust < 1.47

* Remove unused method

* More move of assert tweak to the asser pr

* Add comments

* Clean up

* Clean the test addressing various review comments

* Clean up a bit

(cherry picked from commit 54517ea454)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-19 08:47:58 +00:00
43f99bdb31 Improve vote-account "Recent Timestamp" output (#12971)
(cherry picked from commit 2cc3d7511a)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-19 07:30:29 +00:00
0008dc62e4 Fix zero-lamport accounts preventing slot cleanup (#12606) (#12969)
Co-authored-by: Carl Lin <carl@solana.com>
(cherry picked from commit 16d45b8480)

Co-authored-by: carllin <wumu727@gmail.com>
2020-10-19 07:07:08 +00:00
7e8174fb79 Minor doc typo (#12966)
(cherry picked from commit 6123d71489)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-19 03:23:24 +00:00
4ad2ebcde9 Mention monitoring and updating for exchanges (#12953) (#12959)
* Mention monitoring and updating for exchanges

* Fix link syntax...

* Apply suggestions from code review

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>

* Apply suggestions from code review

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>

* More review comments and word-wrapping

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>
(cherry picked from commit 87311cce7f)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-17 06:49:59 +00:00
da183d655a keygen: add more mnemonic language support (#12944) (#12957)
(cherry picked from commit 4451042c76)

Co-authored-by: guanqun <guanqun.lu@gmail.com>
2020-10-17 04:00:29 +00:00
2e449276be Check payer balance for program account rent as needed (#12952) (#12955)
(cherry picked from commit b6bfed64cb)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-16 19:14:26 +00:00
8cac6835c0 CLI: Put deploy ephemeral keypair behind a flag (#12942)
(cherry picked from commit 5a5b7f39c1)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-16 16:56:50 +00:00
677c184e47 Another some tower logging improvements (#12940) (#12943)
(cherry picked from commit fd8ec27fe8)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-16 07:00:27 +00:00
f36cfb92f7 Convert Blockstore Rewards cf to protobuf (bp #12860) (#12935)
* Convert Blockstore Rewards cf to protobuf (#12860)

* Add Blockstore protobuf cf type

* Add Rewards message to proto and make generated pub

* Convert Rewards cf to ProtobufColumn

* Add bench

* Adjust tags

* Move solana proto definitions and conversion methods to new crate

(cherry picked from commit 359707c85e)

# Conflicts:
#	Cargo.lock
#	ledger/Cargo.toml
#	storage-bigtable/Cargo.toml

* v1.4-ify

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
Co-authored-by: Tyera Eulberg <tyera@solana.com>
2020-10-16 01:27:37 +00:00
e7062de05f Report compute budget usage (#12931) (#12934)
(cherry picked from commit b510474dcb)

Co-authored-by: Jack May <jack@solana.com>
2020-10-16 00:05:18 +00:00
a443e2e773 Update get-block method in get_confirmed_transaction (#12923) (#12930)
* Update get-block method in get_confirmed_transaction

* Remove superfluous into()

(cherry picked from commit 42943ab86d)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-15 21:38:38 +00:00
3a6db787e2 Support arbitrary toolchains with cargo wrapper script (#12926)
(cherry picked from commit 99aecdaf65)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-15 20:26:35 +00:00
f3c986385f Bump version to 1.4.2 2020-10-15 20:26:29 +00:00
3df811348f program log pubkey as base58 (bp #12901) (#12911)
* program log pubkey as base58 (#12901)

(cherry picked from commit 3f9e6a600b)

# Conflicts:
#	programs/bpf/benches/bpf_loader.rs
#	programs/bpf/c/src/tuner/tuner.c

* resolve conflicts

* fix bench conflict

Co-authored-by: Jack May <jack@solana.com>
2020-10-15 19:40:20 +00:00
e8c86ed3e5 Drop 'Pubkey' in 'solana validators' header (#12919)
(cherry picked from commit 3073dc9801)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-15 19:02:10 +00:00
489a7bb576 Bump spl-memo and spl-token versions (#12914) 2020-10-15 18:05:41 +00:00
688dd85e61 Release: Use pinned cargo version to install spl-token-cli (#12916)
(cherry picked from commit bb2f0df9e1)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-15 17:59:24 +00:00
fe54a30084 Docs: Clarify validator disk requirements (#12921)
(cherry picked from commit cc0781e0ac)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-15 17:56:46 +00:00
80942841a2 Surface 'Program account allocation failed' error details (#12904)
(cherry picked from commit eec3d25ab9)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-15 16:22:12 +00:00
d2808a8e29 docs: Rework JSON RPC curl examples to be more readable (bp #12893) (#12899)
* Rework curl examples to be more readable

(cherry picked from commit f0d0bdc572)

# Conflicts:
#	docs/src/apps/jsonrpc-api.md

* rebase

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-15 15:47:49 +00:00
f8413a28b5 Better tower logs for SwitchForkDecision and etc (#12875) (#12905)
* Better tower logs for SwitchForkDecision and etc

* nits

* Update comment

(cherry picked from commit a44e4d386f)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-15 10:41:03 +00:00
bc96332899 Respect RefCell when calling invoke (#12858) (#12891)
* Respect RefCell when calling invoke

* nudge

(cherry picked from commit 969f7b015b)

Co-authored-by: Jack May <jack@solana.com>
2020-10-15 02:15:36 +00:00
ceeeb3c9dd Change developer CTA (#12857) (#12892)
* change `index.js`

(cherry picked from commit 9e7fad1fd2)

Co-authored-by: R. M. Shea <8948187+rmshea@users.noreply.github.com>
2020-10-15 01:45:06 +00:00
bd058ec8f1 Release: Include SPL Token in release tarballs (#12889)
(cherry picked from commit f70762913c)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-15 01:32:34 +00:00
4b5ac44fc8 RPC: Add metrics for TX encoding (#12880)
(cherry picked from commit c26512255d)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-14 19:39:08 +00:00
fef979f0e5 Don't report RewardType::Fee when none was awarded (#12877)
(cherry picked from commit 4b04ed86b6)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-14 17:15:41 +00:00
cca2cdf39b Expose program error constants (#12861) (#12871)
(cherry picked from commit d4e953277e)

Co-authored-by: Jack May <jack@solana.com>
2020-10-14 08:48:50 +00:00
6e91996606 Bump version to 1.4.1 2020-10-14 03:05:04 +00:00
99be00d61f Add separate push queue to reduce push lock contention (#12713) (#12867)
(cherry picked from commit 1f1eb9f26e)

Co-authored-by: sakridge <sakridge@gmail.com>
2020-10-14 02:46:13 +00:00
68f808026e Add log_messages to proto file (#12859) (#12863)
(cherry picked from commit 67ed44c007)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-14 01:50:55 +00:00
0c7ab0a1bb Update programming-faq.md (#12864) (#12865)
Fix typo

(cherry picked from commit b8f03c9b0f)

Co-authored-by: kemargrant <kemargrant@gmail.com>
2020-10-14 01:15:52 +00:00
3d8ccbc079 terminology update, nonce to bump seed (#12840) (#12851)
(cherry picked from commit 56211378d3)

Co-authored-by: Jack May <jack@solana.com>
2020-10-13 18:31:48 +00:00
275d096a46 solana vote-account/solana stake-account now works with RPC servers without --enable-rpc-transaction-history (bp #12826) (#12849)
* Implementation-defined RPC server errors are now accessible to client/ users

(cherry picked from commit 247228ee61)

* Cleanly handle RPC servers that don't have --enable-rpc-transaction-history enabled

(cherry picked from commit 14d793b22c)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-13 18:28:15 +00:00
6d70a06b23 Add nop feature set for upcoming ported rent fixes (#12841) (#12847)
(cherry picked from commit 7de7efe96c)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-13 17:47:59 +00:00
7e68b2e1bd Add transaction log messages to |solana confirm -v| output (#12836)
(cherry picked from commit e9dbbdeb81)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-13 06:57:21 +00:00
f0d761630e get_vote_accounts: access HashMap directly instead of turning it into an iterator (#12829)
(cherry picked from commit 649fe6d3b6)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-13 05:25:31 +00:00
1986927eb6 Check ELF file for errors before deploy (bp #12741) (#12801)
* Check ELF file for errors before deploy (#12741)

* Check ELF file for errors before deploy

* Update cli/src/cli.rs

Co-authored-by: Michael Vines <mvines@gmail.com>

* Fix formatting

* Bump solana_rbpf

Co-authored-by: Michael Vines <mvines@gmail.com>
(cherry picked from commit 6bbf6a79b7)

# Conflicts:
#	cli/Cargo.toml

* rebase

Co-authored-by: Alexandre Esteves <2335822+alexfmpe@users.noreply.github.com>
Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-13 04:41:51 +00:00
9a0ea61007 Add docs on vote account key rotation (bp #12815) (#12831)
* Add docs on vote account key rotation

(cherry picked from commit 253114ca20)

* Update docs/src/running-validator/vote-accounts.md

Co-authored-by: Trent Nelson <trent.a.b.nelson@gmail.com>
(cherry picked from commit d83027c0cd)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-13 04:32:03 +00:00
51a70e52f2 CI: Fix crate publication (#12825)
(cherry picked from commit c38021502e)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-12 23:15:46 +00:00
9797c93db3 fix native_loader behavior for invalid accounts (#12814) (#12819)
(cherry picked from commit c24da1ee16)

Co-authored-by: Jack May <jack@solana.com>
2020-10-12 22:14:56 +00:00
9598114658 Use latest stable channel release if there's no beta release (#12823)
(cherry picked from commit 65213a1782)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-12 22:12:16 +00:00
d3ef061044 RpcClient: Encode TXs as base64 by default (#12817)
(cherry picked from commit efbe37ba20)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-12 21:56:18 +00:00
1f102d2617 Move no-0-rent rent dist. behavior under feature (#12804) (#12811)
(cherry picked from commit 2f5bb7e507)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-12 09:57:32 +00:00
5e97bd3d8a simulate_transaction_with_config() now passes full config to server (#12803)
(cherry picked from commit b3c2752bb0)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-12 05:58:30 +00:00
ed06e8b85d Pacify cargo audit (bp #12797) (#12799)
* Bump reqwest/rayon to get past `cargo audit`

(cherry picked from commit 8a119c1483)

# Conflicts:
#	dos/Cargo.toml
#	download-utils/Cargo.toml
#	metrics/Cargo.toml

* Switch to tempfile

(cherry picked from commit d3b0f87a49)

* Rework cargo audit ignores

(cherry picked from commit 2301dcf973)

* Cargo.lock

(cherry picked from commit 859eb606da)

* rebase

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-12 04:29:55 +00:00
10b9225edb Don't bother paying 0 rent (#12793)
(cherry picked from commit 1fc7c1ecee)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-10 18:12:14 +00:00
b1b5ddd2b9 Update gossip entrypoints 2020-10-10 08:39:38 -07:00
6b9b107ead Fix various ledger-tool error due to no builtins (bp #12759) (#12766)
* Fix various ledger-tool error due to no builtins (#12759)

* Fix various ledger-tool error due to no builtins

* Add missing file...

(cherry picked from commit 1f4bcf70b0)

# Conflicts:
#	core/Cargo.toml
#	ledger/Cargo.toml

* Rebase

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-10 07:10:38 +00:00
3fef98fd1e Expose all rewards (fees, rent, voting and staking) in RPC getConfirmedBlock and the cli (bp #12768) (#12790)
* Expose all rewards (fees, rent, voting and staking) in RPC getConfirmedBlock and the cli

(cherry picked from commit c5c8da1ac0)

# Conflicts:
#	Cargo.lock
#	transaction-status/Cargo.toml

* fix: surface full block rewards type

(cherry picked from commit 1b16790325)

* resolve conflicts

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-10 06:11:42 +00:00
e999823b4b document program address collisions (#12774)
(cherry picked from commit 9ac8db3533)
2020-10-09 22:35:47 -07:00
1e46a5b147 Fix typo (#12780) (#12784)
(cherry picked from commit 5800217998)

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-10 02:09:09 +00:00
567a1cb944 Correct Bank timestamp drift every slot (#12737) (#12777)
* Move timestamp helper to sdk

* Add Bank method for getting timestamp estimate

* Return sysvar info from Bank::clock

* Add feature-gated timestamp correction

* Rename unix_timestamp method to be more descriptive

* Review comments

* Add timestamp metric

(cherry picked from commit b028c47d2b)

# Conflicts:
#	runtime/src/feature_set.rs

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-10-10 00:42:44 +00:00
2996cebfaa Add convenience script for working in stability branches (#12765) (#12773)
* Add convenience script for working in stability branches

* Update scripts/curgo.sh

Co-authored-by: Michael Vines <mvines@gmail.com>

* re{locate,name} to /cargo

Co-authored-by: Michael Vines <mvines@gmail.com>
(cherry picked from commit ed95071c27)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-09 23:02:21 +00:00
7a1889aaf9 Add adjustable stack size and call depth (bp #12728) (#12770)
* Add adjustable stack size and call depth (#12728)

(cherry picked from commit c3907be623)

# Conflicts:
#	programs/bpf/Cargo.toml
#	programs/bpf_loader/Cargo.toml

* resolve conflicts

Co-authored-by: Jack May <jack@solana.com>
2020-10-09 22:08:01 +00:00
9188153b7d Fix fee mismatch on snapshot deserialize (#12697) (#12754)
Co-authored-by: Carl Lin <carl@solana.com>
(cherry picked from commit c879e7c1ad)

Co-authored-by: carllin <wumu727@gmail.com>
2020-10-09 20:21:50 +00:00
4b9f2e987a Bump max invoke depth to 4 (#12742) (#12764)
(cherry picked from commit 2cd7cd3149)

Co-authored-by: Jack May <jack@solana.com>
2020-10-09 18:49:44 +00:00
bb5c76483a Advise setting --wal-recovery-mode, and using --private-rpc for mainnet-beta (#12761)
(cherry picked from commit 3fedcdc6bc)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-09 15:56:56 +00:00
aafbb251b9 Only fetch snapshot if it's newer than local (#12663) (#12752)
* Only fetch snapshot if it's newer than local

* Prefer as_ref over clone

* More nits

* Don't wait forwever for newer snapshot

(cherry picked from commit 81489ccb76)

Co-authored-by: Ryo Onodera <ryoqun@gmail.com>
2020-10-09 07:15:34 +00:00
dd32540ceb Add inflation_kill_switch feature (#12749)
(cherry picked from commit c8807d227a)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-09 05:55:51 +00:00
e1a9cbaf3c Add new internal accounts (#12740) (#12747)
Co-authored-by: publish-docs.sh <maintainers@solana.com>
(cherry picked from commit 2c5f83c264)

Co-authored-by: Dan Albert <dan@solana.com>
2020-10-09 02:19:55 +00:00
83740246fc Minor variable name cleanup (#12745)
(cherry picked from commit 3a04026599)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-09 02:12:28 +00:00
7a53ca18a6 Store program logs in blockstore / bigtable (TransactionWithStatusMeta) (#12678) (#12735)
* introduce store program logs in blockstore / bigtable

* fix test, transaction logs created for successful transactions

* fix test for legacy bincode implementation around log_messages

* only api nodes should record logs

* truncate transaction logs to 100KB

* refactor log truncate for improved coverage

(cherry picked from commit 8f5431551e)

Co-authored-by: Josh <josh.hundley@gmail.com>
2020-10-08 20:19:26 +00:00
c1a8637cb5 Support multiple connected HW wallets configured with the same seed phrase (bp #12716) (#12720)
* remote-wallet: Select hardware wallets based on host device path

(cherry picked from commit 8e3353d9ef)

* remote-wallet: Append wallet "name" to entries in selector UI

(cherry picked from commit f1a2ad1b7d)

Co-authored-by: Trent Nelson <trent@solana.com>
2020-10-08 05:32:48 +00:00
d6831309cd Revert "Restore --expected-shred-version argument for mainnet-beta" (#12723)
This reverts commit 9410eab2af.

(cherry picked from commit dadc84fa8c)

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-10-08 05:00:44 +00:00
519 changed files with 26724 additions and 9039 deletions

View File

@ -31,4 +31,9 @@ export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"
mkdir -p "$CARGO_TARGET_CACHE"/target mkdir -p "$CARGO_TARGET_CACHE"/target
rsync -a --delete --link-dest="$CARGO_TARGET_CACHE" "$CARGO_TARGET_CACHE"/target . rsync -a --delete --link-dest="$CARGO_TARGET_CACHE" "$CARGO_TARGET_CACHE"/target .
# Don't reuse BPF target build artifacts due to incremental build issues with
# `std:
# "found possibly newer version of crate `std` which `xyz` depends on
rm -rf target/bpfel-unknown-unknown
) )

View File

@ -34,6 +34,8 @@ jobs:
- stable - stable
install: install:
- source ci/rust-version.sh - source ci/rust-version.sh
- PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH"
- readlink -f .
script: script:
- source ci/env.sh - source ci/env.sh
- ci/publish-tarball.sh - ci/publish-tarball.sh

1386
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -16,6 +16,7 @@ members = [
"dos", "dos",
"download-utils", "download-utils",
"faucet", "faucet",
"frozen-abi",
"perf", "perf",
"validator", "validator",
"genesis", "genesis",
@ -30,12 +31,14 @@ members = [
"merkle-tree", "merkle-tree",
"stake-o-matic", "stake-o-matic",
"storage-bigtable", "storage-bigtable",
"storage-proto",
"streamer", "streamer",
"measure", "measure",
"metrics", "metrics",
"net-shaper", "net-shaper",
"notifier", "notifier",
"poh-bench", "poh-bench",
"program-test",
"programs/secp256k1", "programs/secp256k1",
"programs/bpf_loader", "programs/bpf_loader",
"programs/budget", "programs/budget",
@ -51,6 +54,8 @@ members = [
"ramp-tps", "ramp-tps",
"runtime", "runtime",
"sdk", "sdk",
"sdk/cargo-build-bpf",
"sdk/cargo-test-bpf",
"scripts", "scripts",
"stake-accounts", "stake-accounts",
"stake-monitor", "stake-monitor",

View File

@ -61,8 +61,9 @@ $ cargo test
### Starting a local testnet ### Starting a local testnet
Start your own testnet locally, instructions are in the [online docs](https://docs.solana.com/cluster/bench-tps). Start your own testnet locally, instructions are in the [online docs](https://docs.solana.com/cluster/bench-tps).
### Accessing the remote testnet ### Accessing the remote development cluster
* `testnet` - public stable testnet accessible via devnet.solana.com. Runs 24/7 * `devnet` - stable public cluster for development accessible via
devnet.solana.com. Runs 24/7. Learn more about the [public clusters](https://docs.solana.com/clusters)
# Benchmarking # Benchmarking

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-account-decoder" name = "solana-account-decoder"
version = "1.4.0" version = "1.4.7"
description = "Solana account decoder" description = "Solana account decoder"
authors = ["Solana Maintainers <maintainers@solana.foundation>"] authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -18,11 +18,11 @@ lazy_static = "1.4.0"
serde = "1.0.112" serde = "1.0.112"
serde_derive = "1.0.103" serde_derive = "1.0.103"
serde_json = "1.0.56" serde_json = "1.0.56"
solana-config-program = { path = "../programs/config", version = "1.4.0" } solana-config-program = { path = "../programs/config", version = "1.4.7" }
solana-sdk = { path = "../sdk", version = "1.4.0" } solana-sdk = { path = "../sdk", version = "1.4.7" }
solana-stake-program = { path = "../programs/stake", version = "1.4.0" } solana-stake-program = { path = "../programs/stake", version = "1.4.7" }
solana-vote-program = { path = "../programs/vote", version = "1.4.0" } solana-vote-program = { path = "../programs/vote", version = "1.4.7" }
spl-token-v2-0 = { package = "spl-token", version = "=2.0.6", features = ["skip-no-mangle"] } spl-token-v2-0 = { package = "spl-token", version = "=3.0.0", features = ["no-entrypoint"] }
thiserror = "1.0" thiserror = "1.0"
[package.metadata.docs.rs] [package.metadata.docs.rs]

View File

@ -111,8 +111,8 @@ mod test {
#[test] #[test]
fn test_parse_account_data() { fn test_parse_account_data() {
let account_pubkey = Pubkey::new_rand(); let account_pubkey = solana_sdk::pubkey::new_rand();
let other_program = Pubkey::new_rand(); let other_program = solana_sdk::pubkey::new_rand();
let data = vec![0; 4]; let data = vec![0; 4];
assert!(parse_account_data(&account_pubkey, &other_program, &data, None).is_err()); assert!(parse_account_data(&account_pubkey, &other_program, &data, None).is_err());

View File

@ -117,7 +117,7 @@ mod test {
})) }))
.unwrap(), .unwrap(),
}; };
let info_pubkey = Pubkey::new_rand(); let info_pubkey = solana_sdk::pubkey::new_rand();
let validator_info_config_account = create_config_account( let validator_info_config_account = create_config_account(
vec![(validator_info::id(), false), (info_pubkey, true)], vec![(validator_info::id(), false), (info_pubkey, true)],
&validator_info, &validator_info,

View File

@ -134,7 +134,6 @@ impl From<Delegation> for UiDelegation {
mod test { mod test {
use super::*; use super::*;
use bincode::serialize; use bincode::serialize;
use solana_sdk::pubkey::Pubkey;
#[test] #[test]
fn test_parse_stake() { fn test_parse_stake() {
@ -145,8 +144,8 @@ mod test {
StakeAccountType::Uninitialized StakeAccountType::Uninitialized
); );
let pubkey = Pubkey::new_rand(); let pubkey = solana_sdk::pubkey::new_rand();
let custodian = Pubkey::new_rand(); let custodian = solana_sdk::pubkey::new_rand();
let authorized = Authorized::auto(&pubkey); let authorized = Authorized::auto(&pubkey);
let lockup = Lockup { let lockup = Lockup {
unix_timestamp: 0, unix_timestamp: 0,
@ -180,7 +179,7 @@ mod test {
}) })
); );
let voter_pubkey = Pubkey::new_rand(); let voter_pubkey = solana_sdk::pubkey::new_rand();
let stake = Stake { let stake = Stake {
delegation: Delegation { delegation: Delegation {
voter_pubkey, voter_pubkey,

View File

@ -212,15 +212,14 @@ pub struct UiStakeHistoryEntry {
mod test { mod test {
use super::*; use super::*;
use solana_sdk::{ use solana_sdk::{
fee_calculator::FeeCalculator, account::create_account, fee_calculator::FeeCalculator, hash::Hash,
hash::Hash, sysvar::recent_blockhashes::IterItem,
sysvar::{recent_blockhashes::IterItem, Sysvar},
}; };
use std::iter::FromIterator; use std::iter::FromIterator;
#[test] #[test]
fn test_parse_sysvars() { fn test_parse_sysvars() {
let clock_sysvar = Clock::default().create_account(1); let clock_sysvar = create_account(&Clock::default(), 1);
assert_eq!( assert_eq!(
parse_sysvar(&clock_sysvar.data, &sysvar::clock::id()).unwrap(), parse_sysvar(&clock_sysvar.data, &sysvar::clock::id()).unwrap(),
SysvarAccountType::Clock(UiClock::default()), SysvarAccountType::Clock(UiClock::default()),
@ -233,13 +232,13 @@ mod test {
first_normal_epoch: 1, first_normal_epoch: 1,
first_normal_slot: 12, first_normal_slot: 12,
}; };
let epoch_schedule_sysvar = epoch_schedule.create_account(1); let epoch_schedule_sysvar = create_account(&epoch_schedule, 1);
assert_eq!( assert_eq!(
parse_sysvar(&epoch_schedule_sysvar.data, &sysvar::epoch_schedule::id()).unwrap(), parse_sysvar(&epoch_schedule_sysvar.data, &sysvar::epoch_schedule::id()).unwrap(),
SysvarAccountType::EpochSchedule(epoch_schedule), SysvarAccountType::EpochSchedule(epoch_schedule),
); );
let fees_sysvar = Fees::default().create_account(1); let fees_sysvar = create_account(&Fees::default(), 1);
assert_eq!( assert_eq!(
parse_sysvar(&fees_sysvar.data, &sysvar::fees::id()).unwrap(), parse_sysvar(&fees_sysvar.data, &sysvar::fees::id()).unwrap(),
SysvarAccountType::Fees(UiFees::default()), SysvarAccountType::Fees(UiFees::default()),
@ -251,7 +250,7 @@ mod test {
}; };
let recent_blockhashes = let recent_blockhashes =
RecentBlockhashes::from_iter(vec![IterItem(0, &hash, &fee_calculator)].into_iter()); RecentBlockhashes::from_iter(vec![IterItem(0, &hash, &fee_calculator)].into_iter());
let recent_blockhashes_sysvar = recent_blockhashes.create_account(1); let recent_blockhashes_sysvar = create_account(&recent_blockhashes, 1);
assert_eq!( assert_eq!(
parse_sysvar( parse_sysvar(
&recent_blockhashes_sysvar.data, &recent_blockhashes_sysvar.data,
@ -269,13 +268,13 @@ mod test {
exemption_threshold: 2.0, exemption_threshold: 2.0,
burn_percent: 5, burn_percent: 5,
}; };
let rent_sysvar = rent.create_account(1); let rent_sysvar = create_account(&rent, 1);
assert_eq!( assert_eq!(
parse_sysvar(&rent_sysvar.data, &sysvar::rent::id()).unwrap(), parse_sysvar(&rent_sysvar.data, &sysvar::rent::id()).unwrap(),
SysvarAccountType::Rent(rent.into()), SysvarAccountType::Rent(rent.into()),
); );
let rewards_sysvar = Rewards::default().create_account(1); let rewards_sysvar = create_account(&Rewards::default(), 1);
assert_eq!( assert_eq!(
parse_sysvar(&rewards_sysvar.data, &sysvar::rewards::id()).unwrap(), parse_sysvar(&rewards_sysvar.data, &sysvar::rewards::id()).unwrap(),
SysvarAccountType::Rewards(UiRewards::default()), SysvarAccountType::Rewards(UiRewards::default()),
@ -283,7 +282,7 @@ mod test {
let mut slot_hashes = SlotHashes::default(); let mut slot_hashes = SlotHashes::default();
slot_hashes.add(1, hash); slot_hashes.add(1, hash);
let slot_hashes_sysvar = slot_hashes.create_account(1); let slot_hashes_sysvar = create_account(&slot_hashes, 1);
assert_eq!( assert_eq!(
parse_sysvar(&slot_hashes_sysvar.data, &sysvar::slot_hashes::id()).unwrap(), parse_sysvar(&slot_hashes_sysvar.data, &sysvar::slot_hashes::id()).unwrap(),
SysvarAccountType::SlotHashes(vec![UiSlotHashEntry { SysvarAccountType::SlotHashes(vec![UiSlotHashEntry {
@ -294,7 +293,7 @@ mod test {
let mut slot_history = SlotHistory::default(); let mut slot_history = SlotHistory::default();
slot_history.add(42); slot_history.add(42);
let slot_history_sysvar = slot_history.create_account(1); let slot_history_sysvar = create_account(&slot_history, 1);
assert_eq!( assert_eq!(
parse_sysvar(&slot_history_sysvar.data, &sysvar::slot_history::id()).unwrap(), parse_sysvar(&slot_history_sysvar.data, &sysvar::slot_history::id()).unwrap(),
SysvarAccountType::SlotHistory(UiSlotHistory { SysvarAccountType::SlotHistory(UiSlotHistory {
@ -310,7 +309,7 @@ mod test {
deactivating: 3, deactivating: 3,
}; };
stake_history.add(1, stake_history_entry.clone()); stake_history.add(1, stake_history_entry.clone());
let stake_history_sysvar = stake_history.create_account(1); let stake_history_sysvar = create_account(&stake_history, 1);
assert_eq!( assert_eq!(
parse_sysvar(&stake_history_sysvar.data, &sysvar::stake_history::id()).unwrap(), parse_sysvar(&stake_history_sysvar.data, &sysvar::stake_history::id()).unwrap(),
SysvarAccountType::StakeHistory(vec![UiStakeHistoryEntry { SysvarAccountType::StakeHistory(vec![UiStakeHistoryEntry {
@ -319,7 +318,7 @@ mod test {
}]), }]),
); );
let bad_pubkey = Pubkey::new_rand(); let bad_pubkey = solana_sdk::pubkey::new_rand();
assert!(parse_sysvar(&stake_history_sysvar.data, &bad_pubkey).is_err()); assert!(parse_sysvar(&stake_history_sysvar.data, &bad_pubkey).is_err());
let bad_data = vec![0; 4]; let bad_data = vec![0; 4];

View File

@ -4,7 +4,9 @@ use crate::{
}; };
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
use spl_token_v2_0::{ use spl_token_v2_0::{
solana_sdk::{program_option::COption, program_pack::Pack, pubkey::Pubkey as SplTokenPubkey}, solana_program::{
program_option::COption, program_pack::Pack, pubkey::Pubkey as SplTokenPubkey,
},
state::{Account, AccountState, Mint, Multisig}, state::{Account, AccountState, Mint, Multisig},
}; };
use std::str::FromStr; use std::str::FromStr;

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"] authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018" edition = "2018"
name = "solana-accounts-bench" name = "solana-accounts-bench"
version = "1.4.0" version = "1.4.7"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@ -11,11 +11,11 @@ publish = false
[dependencies] [dependencies]
log = "0.4.6" log = "0.4.6"
rayon = "1.4.0" rayon = "1.4.0"
solana-logger = { path = "../logger", version = "1.4.0" } solana-logger = { path = "../logger", version = "1.4.7" }
solana-runtime = { path = "../runtime", version = "1.4.0" } solana-runtime = { path = "../runtime", version = "1.4.7" }
solana-measure = { path = "../measure", version = "1.4.0" } solana-measure = { path = "../measure", version = "1.4.7" }
solana-sdk = { path = "../sdk", version = "1.4.0" } solana-sdk = { path = "../sdk", version = "1.4.7" }
solana-version = { path = "../version", version = "1.4.0" } solana-version = { path = "../version", version = "1.4.7" }
rand = "0.7.0" rand = "0.7.0"
clap = "2.33.1" clap = "2.33.1"
crossbeam-channel = "0.4" crossbeam-channel = "0.4"

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"] authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018" edition = "2018"
name = "solana-banking-bench" name = "solana-banking-bench"
version = "1.4.0" version = "1.4.7"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@ -14,16 +14,16 @@ crossbeam-channel = "0.4"
log = "0.4.6" log = "0.4.6"
rand = "0.7.0" rand = "0.7.0"
rayon = "1.4.0" rayon = "1.4.0"
solana-core = { path = "../core", version = "1.4.0" } solana-core = { path = "../core", version = "1.4.7" }
solana-clap-utils = { path = "../clap-utils", version = "1.4.0" } solana-clap-utils = { path = "../clap-utils", version = "1.4.7" }
solana-streamer = { path = "../streamer", version = "1.4.0" } solana-streamer = { path = "../streamer", version = "1.4.7" }
solana-perf = { path = "../perf", version = "1.4.0" } solana-perf = { path = "../perf", version = "1.4.7" }
solana-ledger = { path = "../ledger", version = "1.4.0" } solana-ledger = { path = "../ledger", version = "1.4.7" }
solana-logger = { path = "../logger", version = "1.4.0" } solana-logger = { path = "../logger", version = "1.4.7" }
solana-runtime = { path = "../runtime", version = "1.4.0" } solana-runtime = { path = "../runtime", version = "1.4.7" }
solana-measure = { path = "../measure", version = "1.4.0" } solana-measure = { path = "../measure", version = "1.4.7" }
solana-sdk = { path = "../sdk", version = "1.4.0" } solana-sdk = { path = "../sdk", version = "1.4.7" }
solana-version = { path = "../version", version = "1.4.0" } solana-version = { path = "../version", version = "1.4.7" }
[package.metadata.docs.rs] [package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"] targets = ["x86_64-unknown-linux-gnu"]

View File

@ -20,7 +20,6 @@ use solana_perf::packet::to_packets_chunked;
use solana_runtime::{bank::Bank, bank_forks::BankForks}; use solana_runtime::{bank::Bank, bank_forks::BankForks};
use solana_sdk::{ use solana_sdk::{
hash::Hash, hash::Hash,
pubkey::Pubkey,
signature::Keypair, signature::Keypair,
signature::Signature, signature::Signature,
system_transaction, system_transaction,
@ -69,7 +68,7 @@ fn make_accounts_txs(
hash: Hash, hash: Hash,
same_payer: bool, same_payer: bool,
) -> Vec<Transaction> { ) -> Vec<Transaction> {
let to_pubkey = Pubkey::new_rand(); let to_pubkey = solana_sdk::pubkey::new_rand();
let payer_key = Keypair::new(); let payer_key = Keypair::new();
let dummy = system_transaction::transfer(&payer_key, &to_pubkey, 1, hash); let dummy = system_transaction::transfer(&payer_key, &to_pubkey, 1, hash);
(0..total_num_transactions) (0..total_num_transactions)
@ -78,9 +77,9 @@ fn make_accounts_txs(
let mut new = dummy.clone(); let mut new = dummy.clone();
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect(); let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
if !same_payer { if !same_payer {
new.message.account_keys[0] = Pubkey::new_rand(); new.message.account_keys[0] = solana_sdk::pubkey::new_rand();
} }
new.message.account_keys[1] = Pubkey::new_rand(); new.message.account_keys[1] = solana_sdk::pubkey::new_rand();
new.signatures = vec![Signature::new(&sig[0..64])]; new.signatures = vec![Signature::new(&sig[0..64])];
new new
}) })
@ -241,7 +240,7 @@ fn main() {
let base_tx_count = bank.transaction_count(); let base_tx_count = bank.transaction_count();
let mut txs_processed = 0; let mut txs_processed = 0;
let mut root = 1; let mut root = 1;
let collector = Pubkey::new_rand(); let collector = solana_sdk::pubkey::new_rand();
let config = Config { let config = Config {
packets_per_batch: packets_per_chunk, packets_per_batch: packets_per_chunk,
chunk_len, chunk_len,

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-banks-client" name = "solana-banks-client"
version = "1.4.0" version = "1.4.7"
description = "Solana banks client" description = "Solana banks client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"] authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -12,15 +12,15 @@ edition = "2018"
async-trait = "0.1.36" async-trait = "0.1.36"
bincode = "1.3.1" bincode = "1.3.1"
futures = "0.3" futures = "0.3"
solana-banks-interface = { path = "../banks-interface", version = "1.4.0" } solana-banks-interface = { path = "../banks-interface", version = "1.4.7" }
solana-sdk = { path = "../sdk", version = "1.4.0" } solana-sdk = { path = "../sdk", version = "1.4.7" }
tarpc = { version = "0.22.0", features = ["full"] } tarpc = { version = "0.23.0", features = ["full"] }
tokio = "0.2" tokio = { version = "0.3", features = ["full"] }
tokio-serde = { version = "0.6", features = ["bincode"] } tokio-serde = { version = "0.6", features = ["bincode"] }
[dev-dependencies] [dev-dependencies]
solana-runtime = { path = "../runtime", version = "1.4.0" } solana-runtime = { path = "../runtime", version = "1.4.7" }
solana-banks-server = { path = "../banks-server", version = "1.4.0" } solana-banks-server = { path = "../banks-server", version = "1.4.7" }
[lib] [lib]
crate-type = ["lib"] crate-type = ["lib"]

View File

@ -10,9 +10,17 @@ use futures::future::join_all;
pub use solana_banks_interface::{BanksClient, TransactionStatus}; pub use solana_banks_interface::{BanksClient, TransactionStatus};
use solana_banks_interface::{BanksRequest, BanksResponse}; use solana_banks_interface::{BanksRequest, BanksResponse};
use solana_sdk::{ use solana_sdk::{
account::Account, clock::Slot, commitment_config::CommitmentLevel, account::{from_account, Account},
fee_calculator::FeeCalculator, hash::Hash, pubkey::Pubkey, signature::Signature, clock::Slot,
transaction::Transaction, transport, commitment_config::CommitmentLevel,
fee_calculator::FeeCalculator,
hash::Hash,
pubkey::Pubkey,
rent::Rent,
signature::Signature,
sysvar,
transaction::Transaction,
transport,
}; };
use std::io::{self, Error, ErrorKind}; use std::io::{self, Error, ErrorKind};
use tarpc::{ use tarpc::{
@ -40,6 +48,9 @@ pub trait BanksClientExt {
/// use them to calculate the transaction fee. /// use them to calculate the transaction fee.
async fn get_fees(&mut self) -> io::Result<(FeeCalculator, Hash, Slot)>; async fn get_fees(&mut self) -> io::Result<(FeeCalculator, Hash, Slot)>;
/// Return the cluster rent
async fn get_rent(&mut self) -> io::Result<Rent>;
/// Send a transaction and return after the transaction has been rejected or /// Send a transaction and return after the transaction has been rejected or
/// reached the given level of commitment. /// reached the given level of commitment.
async fn process_transaction_with_commitment( async fn process_transaction_with_commitment(
@ -108,6 +119,17 @@ impl BanksClientExt for BanksClient {
.await .await
} }
async fn get_rent(&mut self) -> io::Result<Rent> {
let rent_sysvar = self
.get_account(sysvar::rent::id())
.await?
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Rent sysvar not present"))?;
from_account::<Rent>(&rent_sysvar).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Failed to deserialize Rent sysvar")
})
}
async fn get_recent_blockhash(&mut self) -> io::Result<Hash> { async fn get_recent_blockhash(&mut self) -> io::Result<Hash> {
Ok(self.get_fees().await?.1) Ok(self.get_fees().await?.1)
} }
@ -213,10 +235,10 @@ mod tests {
use super::*; use super::*;
use solana_banks_server::banks_server::start_local_server; use solana_banks_server::banks_server::start_local_server;
use solana_runtime::{bank::Bank, bank_forks::BankForks, genesis_utils::create_genesis_config}; use solana_runtime::{bank::Bank, bank_forks::BankForks, genesis_utils::create_genesis_config};
use solana_sdk::{message::Message, pubkey::Pubkey, signature::Signer, system_instruction}; use solana_sdk::{message::Message, signature::Signer, system_instruction};
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use tarpc::transport; use tarpc::transport;
use tokio::{runtime::Runtime, time::delay_for}; use tokio::{runtime::Runtime, time::sleep};
#[test] #[test]
fn test_banks_client_new() { fn test_banks_client_new() {
@ -235,7 +257,7 @@ mod tests {
&genesis.genesis_config, &genesis.genesis_config,
)))); ))));
let bob_pubkey = Pubkey::new_rand(); let bob_pubkey = solana_sdk::pubkey::new_rand();
let mint_pubkey = genesis.mint_keypair.pubkey(); let mint_pubkey = genesis.mint_keypair.pubkey();
let instruction = system_instruction::transfer(&mint_pubkey, &bob_pubkey, 1); let instruction = system_instruction::transfer(&mint_pubkey, &bob_pubkey, 1);
let message = Message::new(&[instruction], Some(&mint_pubkey)); let message = Message::new(&[instruction], Some(&mint_pubkey));
@ -265,7 +287,7 @@ mod tests {
)))); ))));
let mint_pubkey = &genesis.mint_keypair.pubkey(); let mint_pubkey = &genesis.mint_keypair.pubkey();
let bob_pubkey = Pubkey::new_rand(); let bob_pubkey = solana_sdk::pubkey::new_rand();
let instruction = system_instruction::transfer(&mint_pubkey, &bob_pubkey, 1); let instruction = system_instruction::transfer(&mint_pubkey, &bob_pubkey, 1);
let message = Message::new(&[instruction], Some(&mint_pubkey)); let message = Message::new(&[instruction], Some(&mint_pubkey));
@ -285,7 +307,7 @@ mod tests {
if root_slot > last_valid_slot { if root_slot > last_valid_slot {
break; break;
} }
delay_for(Duration::from_millis(100)).await; sleep(Duration::from_millis(100)).await;
status = banks_client.get_transaction_status(signature).await?; status = banks_client.get_transaction_status(signature).await?;
} }
assert!(status.unwrap().err.is_none()); assert!(status.unwrap().err.is_none());

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-banks-interface" name = "solana-banks-interface"
version = "1.4.0" version = "1.4.7"
description = "Solana banks RPC interface" description = "Solana banks RPC interface"
authors = ["Solana Maintainers <maintainers@solana.foundation>"] authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -10,8 +10,8 @@ edition = "2018"
[dependencies] [dependencies]
serde = { version = "1.0.112", features = ["derive"] } serde = { version = "1.0.112", features = ["derive"] }
solana-sdk = { path = "../sdk", version = "1.4.0" } solana-sdk = { path = "../sdk", version = "1.4.7" }
tarpc = { version = "0.22.0", features = ["full"] } tarpc = { version = "0.23.0", features = ["full"] }
[lib] [lib]
crate-type = ["lib"] crate-type = ["lib"]

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-banks-server" name = "solana-banks-server"
version = "1.4.0" version = "1.4.7"
description = "Solana banks server" description = "Solana banks server"
authors = ["Solana Maintainers <maintainers@solana.foundation>"] authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -12,12 +12,12 @@ edition = "2018"
bincode = "1.3.1" bincode = "1.3.1"
futures = "0.3" futures = "0.3"
log = "0.4.8" log = "0.4.8"
solana-banks-interface = { path = "../banks-interface", version = "1.4.0" } solana-banks-interface = { path = "../banks-interface", version = "1.4.7" }
solana-runtime = { path = "../runtime", version = "1.4.0" } solana-runtime = { path = "../runtime", version = "1.4.7" }
solana-sdk = { path = "../sdk", version = "1.4.0" } solana-sdk = { path = "../sdk", version = "1.4.7" }
solana-metrics = { path = "../metrics", version = "1.4.0" } solana-metrics = { path = "../metrics", version = "1.4.7" }
tarpc = { version = "0.22.0", features = ["full"] } tarpc = { version = "0.23.0", features = ["full"] }
tokio = "0.2" tokio = { version = "0.3", features = ["full"] }
tokio-serde = { version = "0.6", features = ["bincode"] } tokio-serde = { version = "0.6", features = ["bincode"] }
[lib] [lib]

View File

@ -5,11 +5,7 @@ use futures::{
prelude::stream::{self, StreamExt}, prelude::stream::{self, StreamExt},
}; };
use solana_banks_interface::{Banks, BanksRequest, BanksResponse, TransactionStatus}; use solana_banks_interface::{Banks, BanksRequest, BanksResponse, TransactionStatus};
use solana_runtime::{ use solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache};
bank::Bank,
bank_forks::BankForks,
commitment::{BlockCommitmentCache, CommitmentSlots},
};
use solana_sdk::{ use solana_sdk::{
account::Account, account::Account,
clock::Slot, clock::Slot,
@ -21,7 +17,6 @@ use solana_sdk::{
transaction::{self, Transaction}, transaction::{self, Transaction},
}; };
use std::{ use std::{
collections::HashMap,
io, io,
net::{Ipv4Addr, SocketAddr}, net::{Ipv4Addr, SocketAddr},
sync::{ sync::{
@ -38,7 +33,7 @@ use tarpc::{
server::{self, Channel, Handler}, server::{self, Channel, Handler},
transport, transport,
}; };
use tokio::time::delay_for; use tokio::time::sleep;
use tokio_serde::formats::Bincode; use tokio_serde::formats::Bincode;
#[derive(Clone)] #[derive(Clone)]
@ -84,11 +79,9 @@ impl BanksServer {
let (transaction_sender, transaction_receiver) = channel(); let (transaction_sender, transaction_receiver) = channel();
let bank = bank_forks.read().unwrap().working_bank(); let bank = bank_forks.read().unwrap().working_bank();
let slot = bank.slot(); let slot = bank.slot();
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::new( let block_commitment_cache = Arc::new(RwLock::new(
HashMap::default(), BlockCommitmentCache::new_for_tests_with_slots(slot, slot),
0, ));
CommitmentSlots::new_from_slot(slot),
)));
Builder::new() Builder::new()
.name("solana-bank-forks-client".to_string()) .name("solana-bank-forks-client".to_string())
.spawn(move || Self::run(&bank, transaction_receiver)) .spawn(move || Self::run(&bank, transaction_receiver))
@ -109,18 +102,21 @@ impl BanksServer {
async fn poll_signature_status( async fn poll_signature_status(
self, self,
signature: Signature, signature: &Signature,
blockhash: &Hash,
last_valid_slot: Slot, last_valid_slot: Slot,
commitment: CommitmentLevel, commitment: CommitmentLevel,
) -> Option<transaction::Result<()>> { ) -> Option<transaction::Result<()>> {
let mut status = self.bank(commitment).get_signature_status(&signature); let mut status = self
.bank(commitment)
.get_signature_status_with_blockhash(signature, blockhash);
while status.is_none() { while status.is_none() {
delay_for(Duration::from_millis(200)).await; sleep(Duration::from_millis(200)).await;
let bank = self.bank(commitment); let bank = self.bank(commitment);
if bank.slot() > last_valid_slot { if bank.slot() > last_valid_slot {
break; break;
} }
status = bank.get_signature_status(&signature); status = bank.get_signature_status_with_blockhash(signature, blockhash);
} }
status status
} }
@ -193,13 +189,13 @@ impl Banks for BanksServer {
.read() .read()
.unwrap() .unwrap()
.root_bank() .root_bank()
.get_blockhash_last_valid_slot(&blockhash) .get_blockhash_last_valid_slot(blockhash)
.unwrap(); .unwrap();
let signature = transaction.signatures.get(0).cloned().unwrap_or_default(); let signature = transaction.signatures.get(0).cloned().unwrap_or_default();
let info = let info =
TransactionInfo::new(signature, serialize(&transaction).unwrap(), last_valid_slot); TransactionInfo::new(signature, serialize(&transaction).unwrap(), last_valid_slot);
self.transaction_sender.send(info).unwrap(); self.transaction_sender.send(info).unwrap();
self.poll_signature_status(signature, last_valid_slot, commitment) self.poll_signature_status(&signature, blockhash, last_valid_slot, commitment)
.await .await
} }

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"] authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018" edition = "2018"
name = "solana-bench-exchange" name = "solana-bench-exchange"
version = "1.4.0" version = "1.4.7"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@ -18,21 +18,21 @@ rand = "0.7.0"
rayon = "1.4.0" rayon = "1.4.0"
serde_json = "1.0.56" serde_json = "1.0.56"
serde_yaml = "0.8.13" serde_yaml = "0.8.13"
solana-clap-utils = { path = "../clap-utils", version = "1.4.0" } solana-clap-utils = { path = "../clap-utils", version = "1.4.7" }
solana-core = { path = "../core", version = "1.4.0" } solana-core = { path = "../core", version = "1.4.7" }
solana-genesis = { path = "../genesis", version = "1.4.0" } solana-genesis = { path = "../genesis", version = "1.4.7" }
solana-client = { path = "../client", version = "1.4.0" } solana-client = { path = "../client", version = "1.4.7" }
solana-faucet = { path = "../faucet", version = "1.4.0" } solana-faucet = { path = "../faucet", version = "1.4.7" }
solana-exchange-program = { path = "../programs/exchange", version = "1.4.0" } solana-exchange-program = { path = "../programs/exchange", version = "1.4.7" }
solana-logger = { path = "../logger", version = "1.4.0" } solana-logger = { path = "../logger", version = "1.4.7" }
solana-metrics = { path = "../metrics", version = "1.4.0" } solana-metrics = { path = "../metrics", version = "1.4.7" }
solana-net-utils = { path = "../net-utils", version = "1.4.0" } solana-net-utils = { path = "../net-utils", version = "1.4.7" }
solana-runtime = { path = "../runtime", version = "1.4.0" } solana-runtime = { path = "../runtime", version = "1.4.7" }
solana-sdk = { path = "../sdk", version = "1.4.0" } solana-sdk = { path = "../sdk", version = "1.4.7" }
solana-version = { path = "../version", version = "1.4.0" } solana-version = { path = "../version", version = "1.4.7" }
[dev-dependencies] [dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "1.4.0" } solana-local-cluster = { path = "../local-cluster", version = "1.4.7" }
[package.metadata.docs.rs] [package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"] targets = ["x86_64-unknown-linux-gnu"]

View File

@ -86,7 +86,7 @@ fn test_exchange_bank_client() {
solana_logger::setup(); solana_logger::setup();
let (genesis_config, identity) = create_genesis_config(100_000_000_000_000); let (genesis_config, identity) = create_genesis_config(100_000_000_000_000);
let mut bank = Bank::new(&genesis_config); let mut bank = Bank::new(&genesis_config);
bank.add_builtin_program("exchange_program", id(), process_instruction); bank.add_builtin("exchange_program", id(), process_instruction);
let clients = vec![BankClient::new(bank)]; let clients = vec![BankClient::new(bank)];
let mut config = Config::default(); let mut config = Config::default();

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"] authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018" edition = "2018"
name = "solana-bench-streamer" name = "solana-bench-streamer"
version = "1.4.0" version = "1.4.7"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@ -10,11 +10,11 @@ publish = false
[dependencies] [dependencies]
clap = "2.33.1" clap = "2.33.1"
solana-clap-utils = { path = "../clap-utils", version = "1.4.0" } solana-clap-utils = { path = "../clap-utils", version = "1.4.7" }
solana-streamer = { path = "../streamer", version = "1.4.0" } solana-streamer = { path = "../streamer", version = "1.4.7" }
solana-logger = { path = "../logger", version = "1.4.0" } solana-logger = { path = "../logger", version = "1.4.7" }
solana-net-utils = { path = "../net-utils", version = "1.4.0" } solana-net-utils = { path = "../net-utils", version = "1.4.7" }
solana-version = { path = "../version", version = "1.4.0" } solana-version = { path = "../version", version = "1.4.7" }
[package.metadata.docs.rs] [package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"] targets = ["x86_64-unknown-linux-gnu"]

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.foundation>"] authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018" edition = "2018"
name = "solana-bench-tps" name = "solana-bench-tps"
version = "1.4.0" version = "1.4.7"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@ -15,23 +15,23 @@ log = "0.4.8"
rayon = "1.4.0" rayon = "1.4.0"
serde_json = "1.0.56" serde_json = "1.0.56"
serde_yaml = "0.8.13" serde_yaml = "0.8.13"
solana-clap-utils = { path = "../clap-utils", version = "1.4.0" } solana-clap-utils = { path = "../clap-utils", version = "1.4.7" }
solana-core = { path = "../core", version = "1.4.0" } solana-core = { path = "../core", version = "1.4.7" }
solana-genesis = { path = "../genesis", version = "1.4.0" } solana-genesis = { path = "../genesis", version = "1.4.7" }
solana-client = { path = "../client", version = "1.4.0" } solana-client = { path = "../client", version = "1.4.7" }
solana-faucet = { path = "../faucet", version = "1.4.0" } solana-faucet = { path = "../faucet", version = "1.4.7" }
solana-logger = { path = "../logger", version = "1.4.0" } solana-logger = { path = "../logger", version = "1.4.7" }
solana-metrics = { path = "../metrics", version = "1.4.0" } solana-metrics = { path = "../metrics", version = "1.4.7" }
solana-measure = { path = "../measure", version = "1.4.0" } solana-measure = { path = "../measure", version = "1.4.7" }
solana-net-utils = { path = "../net-utils", version = "1.4.0" } solana-net-utils = { path = "../net-utils", version = "1.4.7" }
solana-runtime = { path = "../runtime", version = "1.4.0" } solana-runtime = { path = "../runtime", version = "1.4.7" }
solana-sdk = { path = "../sdk", version = "1.4.0" } solana-sdk = { path = "../sdk", version = "1.4.7" }
solana-version = { path = "../version", version = "1.4.0" } solana-version = { path = "../version", version = "1.4.7" }
[dev-dependencies] [dev-dependencies]
serial_test = "0.4.0" serial_test = "0.4.0"
serial_test_derive = "0.4.0" serial_test_derive = "0.4.0"
solana-local-cluster = { path = "../local-cluster", version = "1.4.0" } solana-local-cluster = { path = "../local-cluster", version = "1.4.7" }
[package.metadata.docs.rs] [package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"] targets = ["x86_64-unknown-linux-gnu"]

31
cargo Executable file
View File

@ -0,0 +1,31 @@
#!/usr/bin/env bash
# shellcheck source=ci/rust-version.sh
here=$(dirname "$0")
source "${here}"/ci/rust-version.sh all
toolchain=
case "$1" in
stable)
# shellcheck disable=SC2054 # rust_stable is sourced from rust-version.sh
toolchain="$rust_stable"
shift
;;
nightly)
# shellcheck disable=SC2054 # rust_nightly is sourced from rust-version.sh
toolchain="$rust_nightly"
shift
;;
+*)
toolchain="${1#+}"
shift
;;
*)
# shellcheck disable=SC2054 # rust_stable is sourced from rust-version.sh
toolchain="$rust_stable"
;;
esac
set -x
exec cargo "+${toolchain}" "${@}"

13
cargo-build-bpf Executable file
View File

@ -0,0 +1,13 @@
#!/usr/bin/env bash
here=$(dirname "$0")
maybe_bpf_sdk="--bpf-sdk $here/sdk/bpf"
for a in "$@"; do
if [[ $a = --bpf-sdk ]]; then
maybe_bpf_sdk=
fi
done
set -x
exec "$here"/cargo run --manifest-path "$here"/sdk/cargo-build-bpf/Cargo.toml -- $maybe_bpf_sdk "$@"

14
cargo-test-bpf Executable file
View File

@ -0,0 +1,14 @@
#!/usr/bin/env bash
here=$(dirname "$0")
maybe_bpf_sdk="--bpf-sdk $here/sdk/bpf"
for a in "$@"; do
if [[ $a = --bpf-sdk ]]; then
maybe_bpf_sdk=
fi
done
export CARGO_BUILD_BPF="$here"/cargo-build-bpf
set -x
exec "$here"/cargo run --manifest-path "$here"/sdk/cargo-test-bpf/Cargo.toml -- $maybe_bpf_sdk "$@"

View File

@ -175,6 +175,30 @@ EOF
"Stable-perf skipped as no relevant files were modified" "Stable-perf skipped as no relevant files were modified"
fi fi
# Downstream backwards compatibility
if affects \
.rs$ \
Cargo.lock$ \
Cargo.toml$ \
^ci/rust-version.sh \
^ci/test-stable-perf.sh \
^ci/test-stable.sh \
^ci/test-local-cluster.sh \
^core/build.rs \
^fetch-perf-libs.sh \
^programs/ \
^sdk/ \
^scripts/build-downstream-projects.sh \
; then
cat >> "$output_file" <<"EOF"
- command: "scripts/build-downstream-projects.sh"
name: "downstream-projects"
timeout_in_minutes: 30
EOF
else
annotate --style info \
"downstream-projects skipped as no relevant files were modified"
fi
# Benches... # Benches...
if affects \ if affects \
.rs$ \ .rs$ \

View File

@ -26,8 +26,9 @@ declare print_free_tree=(
':runtime/src/**.rs' ':runtime/src/**.rs'
':sdk/bpf/rust/rust-utils/**.rs' ':sdk/bpf/rust/rust-utils/**.rs'
':sdk/**.rs' ':sdk/**.rs'
':^sdk/src/program_option.rs' ':^sdk/cargo-build-bpf/**.rs'
':^sdk/src/program_stubs.rs' ':^sdk/program/src/program_option.rs'
':^sdk/program/src/program_stubs.rs'
':programs/**.rs' ':programs/**.rs'
':^**bin**.rs' ':^**bin**.rs'
':^**bench**.rs' ':^**bench**.rs'

View File

@ -42,10 +42,10 @@ def get_packages():
sys.exit(1) sys.exit(1)
# Order dependencies # Order dependencies
deleted_dependencies = []
sorted_dependency_graph = [] sorted_dependency_graph = []
max_iterations = pow(len(dependency_graph),2) max_iterations = pow(len(dependency_graph),2)
while len(deleted_dependencies) < len(dependency_graph): while dependency_graph:
deleted_packages = []
if max_iterations == 0: if max_iterations == 0:
# One day be more helpful and find the actual cycle for the user... # One day be more helpful and find the actual cycle for the user...
sys.exit('Error: Circular dependency suspected between these packages: \n {}\n'.format('\n '.join(dependency_graph.keys()))) sys.exit('Error: Circular dependency suspected between these packages: \n {}\n'.format('\n '.join(dependency_graph.keys())))
@ -53,13 +53,17 @@ def get_packages():
max_iterations -= 1 max_iterations -= 1
for package, dependencies in dependency_graph.items(): for package, dependencies in dependency_graph.items():
if package in deleted_packages:
continue
for dependency in dependencies: for dependency in dependencies:
if dependency in dependency_graph: if dependency in dependency_graph:
break break
else: else:
deleted_dependencies.append(package) deleted_packages.append(package)
sorted_dependency_graph.append((package, manifest_path[package])) sorted_dependency_graph.append((package, manifest_path[package]))
dependency_graph = {p: d for p, d in dependency_graph.items() if not p in deleted_packages }
return sorted_dependency_graph return sorted_dependency_graph

View File

@ -4,6 +4,8 @@ cd "$(dirname "$0")/.."
source ci/semver_bash/semver.sh source ci/semver_bash/semver.sh
source ci/rust-version.sh stable source ci/rust-version.sh stable
cargo="$(readlink -f ./cargo)"
# shellcheck disable=SC2086 # shellcheck disable=SC2086
is_crate_version_uploaded() { is_crate_version_uploaded() {
name=$1 name=$1
@ -66,11 +68,11 @@ for Cargo_toml in $Cargo_tomls; do
( (
set -x set -x
rm -rf crate-test rm -rf crate-test
cargo +"$rust_stable" init crate-test "$cargo" stable init crate-test
cd crate-test/ cd crate-test/
echo "${crate_name} = \"${expectedCrateVersion}\"" >> Cargo.toml echo "${crate_name} = \"${expectedCrateVersion}\"" >> Cargo.toml
echo "[workspace]" >> Cargo.toml echo "[workspace]" >> Cargo.toml
cargo +"$rust_stable" check "$cargo" stable check
) && really_uploaded=1 ) && really_uploaded=1
if ((really_uploaded)); then if ((really_uploaded)); then
break; break;

View File

@ -91,17 +91,15 @@ echo --- Creating release tarball
cp "${RELEASE_BASENAME}"/version.yml "${TARBALL_BASENAME}"-$TARGET.yml cp "${RELEASE_BASENAME}"/version.yml "${TARBALL_BASENAME}"-$TARGET.yml
) )
# Metrics tarball is platform agnostic, only publish it from Linux # Maybe tarballs are platform agnostic, only publish them from the Linux build
MAYBE_TARBALLS= MAYBE_TARBALLS=
if [[ "$CI_OS_NAME" = linux ]]; then if [[ "$CI_OS_NAME" = linux ]]; then
metrics/create-metrics-tarball.sh
( (
set -x set -x
sdk/bpf/scripts/package.sh sdk/bpf/scripts/package.sh
[[ -f bpf-sdk.tar.bz2 ]] [[ -f bpf-sdk.tar.bz2 ]]
) )
MAYBE_TARBALLS="bpf-sdk.tar.bz2 solana-metrics.tar.bz2" MAYBE_TARBALLS="bpf-sdk.tar.bz2"
fi fi
source ci/upload-ci-artifact.sh source ci/upload-ci-artifact.sh
@ -126,7 +124,7 @@ for file in "${TARBALL_BASENAME}"-$TARGET.tar.bz2 "${TARBALL_BASENAME}"-$TARGET.
/usr/bin/s3cmd --acl-public put /solana/"$file" s3://release.solana.com/"$CHANNEL_OR_TAG"/"$file" /usr/bin/s3cmd --acl-public put /solana/"$file" s3://release.solana.com/"$CHANNEL_OR_TAG"/"$file"
echo Published to: echo Published to:
$DRYRUN ci/format-url.sh http://release.solana.com/"$CHANNEL_OR_TAG"/"$file" $DRYRUN ci/format-url.sh https://release.solana.com/"$CHANNEL_OR_TAG"/"$file"
) )
if [[ -n $TAG ]]; then if [[ -n $TAG ]]; then
@ -149,4 +147,30 @@ for file in "${TARBALL_BASENAME}"-$TARGET.tar.bz2 "${TARBALL_BASENAME}"-$TARGET.
fi fi
done done
# Create install wrapper for release.solana.com
if [[ -n $BUILDKITE ]]; then
cat > release.solana.com-install <<EOF
SOLANA_RELEASE=$CHANNEL_OR_TAG
SOLANA_INSTALL_INIT_ARGS=$CHANNEL_OR_TAG
SOLANA_DOWNLOAD_ROOT=http://release.solana.com
EOF
cat install/solana-install-init.sh >> release.solana.com-install
echo --- AWS S3 Store: "install"
(
set -x
$DRYRUN docker run \
--rm \
--env AWS_ACCESS_KEY_ID \
--env AWS_SECRET_ACCESS_KEY \
--volume "$PWD:/solana" \
eremite/aws-cli:2018.12.18 \
/usr/bin/s3cmd --acl-public put /solana/release.solana.com-install s3://release.solana.com/"$CHANNEL_OR_TAG"/install
echo Published to:
$DRYRUN ci/format-url.sh https://release.solana.com/"$CHANNEL_OR_TAG"/install
)
fi
echo --- ok echo --- ok

View File

@ -6,7 +6,8 @@ source ci/_
source ci/upload-ci-artifact.sh source ci/upload-ci-artifact.sh
eval "$(ci/channel-info.sh)" eval "$(ci/channel-info.sh)"
source ci/rust-version.sh all
cargo="$(readlink -f "./cargo")"
set -o pipefail set -o pipefail
export RUST_BACKTRACE=1 export RUST_BACKTRACE=1
@ -27,35 +28,35 @@ test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
# Ensure all dependencies are built # Ensure all dependencies are built
_ cargo +$rust_nightly build --release _ "$cargo" nightly build --release
# Remove "BENCH_FILE", if it exists so that the following commands can append # Remove "BENCH_FILE", if it exists so that the following commands can append
rm -f "$BENCH_FILE" rm -f "$BENCH_FILE"
# Run sdk benches # Run sdk benches
_ cargo +$rust_nightly bench --manifest-path sdk/Cargo.toml ${V:+--verbose} \ _ "$cargo" nightly bench --manifest-path sdk/Cargo.toml ${V:+--verbose} \
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE" -- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
# Run runtime benches # Run runtime benches
_ cargo +$rust_nightly bench --manifest-path runtime/Cargo.toml ${V:+--verbose} \ _ "$cargo" nightly bench --manifest-path runtime/Cargo.toml ${V:+--verbose} \
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE" -- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
# Run core benches # Run core benches
_ cargo +$rust_nightly bench --manifest-path core/Cargo.toml ${V:+--verbose} \ _ "$cargo" nightly bench --manifest-path core/Cargo.toml ${V:+--verbose} \
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE" -- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
# Run bpf benches # Run bpf benches
_ cargo +$rust_nightly bench --manifest-path programs/bpf/Cargo.toml ${V:+--verbose} --features=bpf_c \ _ "$cargo" nightly bench --manifest-path programs/bpf/Cargo.toml ${V:+--verbose} --features=bpf_c \
-- -Z unstable-options --format=json --nocapture | tee -a "$BENCH_FILE" -- -Z unstable-options --format=json --nocapture | tee -a "$BENCH_FILE"
# Run banking/accounts bench. Doesn't require nightly, but use since it is already built. # Run banking/accounts bench. Doesn't require nightly, but use since it is already built.
_ cargo +$rust_nightly run --release --manifest-path banking-bench/Cargo.toml ${V:+--verbose} | tee -a "$BENCH_FILE" _ "$cargo" nightly run --release --manifest-path banking-bench/Cargo.toml ${V:+--verbose} | tee -a "$BENCH_FILE"
_ cargo +$rust_nightly run --release --manifest-path accounts-bench/Cargo.toml ${V:+--verbose} -- --num_accounts 10000 --num_slots 4 | tee -a "$BENCH_FILE" _ "$cargo" nightly run --release --manifest-path accounts-bench/Cargo.toml ${V:+--verbose} -- --num_accounts 10000 --num_slots 4 | tee -a "$BENCH_FILE"
# `solana-upload-perf` disabled as it can take over 30 minutes to complete for some # `solana-upload-perf` disabled as it can take over 30 minutes to complete for some
# reason # reason
exit 0 exit 0
_ cargo +$rust_nightly run --release --package solana-upload-perf \ _ "$cargo" nightly run --release --package solana-upload-perf \
-- "$BENCH_FILE" "$TARGET_BRANCH" "$UPLOAD_METRICS" | tee "$BENCH_ARTIFACT" -- "$BENCH_FILE" "$TARGET_BRANCH" "$UPLOAD_METRICS" | tee "$BENCH_ARTIFACT"
upload-ci-artifact "$BENCH_FILE" upload-ci-artifact "$BENCH_FILE"

View File

@ -8,6 +8,9 @@ source ci/_
source ci/rust-version.sh stable source ci/rust-version.sh stable
source ci/rust-version.sh nightly source ci/rust-version.sh nightly
eval "$(ci/channel-info.sh)" eval "$(ci/channel-info.sh)"
cargo="$(readlink -f "./cargo")"
scripts/increment-cargo-version.sh check
echo --- build environment echo --- build environment
( (
@ -16,14 +19,14 @@ echo --- build environment
rustup run "$rust_stable" rustc --version --verbose rustup run "$rust_stable" rustc --version --verbose
rustup run "$rust_nightly" rustc --version --verbose rustup run "$rust_nightly" rustc --version --verbose
cargo +"$rust_stable" --version --verbose "$cargo" stable --version --verbose
cargo +"$rust_nightly" --version --verbose "$cargo" nightly --version --verbose
cargo +"$rust_stable" clippy --version --verbose "$cargo" stable clippy --version --verbose
cargo +"$rust_nightly" clippy --version --verbose "$cargo" nightly clippy --version --verbose
# audit is done only with stable # audit is done only with stable
cargo +"$rust_stable" audit --version "$cargo" stable audit --version
) )
export RUST_BACKTRACE=1 export RUST_BACKTRACE=1
@ -41,7 +44,7 @@ if [[ $CI_BASE_BRANCH = "$EDGE_CHANNEL" ]]; then
echo "$0: [tree (for outdated Cargo.lock sync)|check (for compilation error)|update -p foo --precise x.y.z (for your Cargo.toml update)] ..." >&2 echo "$0: [tree (for outdated Cargo.lock sync)|check (for compilation error)|update -p foo --precise x.y.z (for your Cargo.toml update)] ..." >&2
exit "$check_status" exit "$check_status"
fi fi
# Ensure nightly and --benches # Ensure nightly and --benches
_ scripts/cargo-for-all-lock-files.sh +"$rust_nightly" check --locked --all-targets _ scripts/cargo-for-all-lock-files.sh +"$rust_nightly" check --locked --all-targets
else else
@ -49,26 +52,44 @@ else
fi fi
_ ci/order-crates-for-publishing.py _ ci/order-crates-for-publishing.py
_ cargo +"$rust_stable" fmt --all -- --check _ "$cargo" stable fmt --all -- --check
# -Z... is needed because of clippy bug: https://github.com/rust-lang/rust-clippy/issues/4612 # -Z... is needed because of clippy bug: https://github.com/rust-lang/rust-clippy/issues/4612
# run nightly clippy for `sdk/` as there's a moderate amount of nightly-only code there # run nightly clippy for `sdk/` as there's a moderate amount of nightly-only code there
_ cargo +"$rust_nightly" clippy \ _ "$cargo" nightly clippy \
-Zunstable-options --workspace --all-targets \ -Zunstable-options --workspace --all-targets \
-- --deny=warnings --allow=clippy::stable_sort_primitive -- --deny=warnings --allow=clippy::stable_sort_primitive
_ scripts/cargo-for-all-lock-files.sh +"$rust_stable" audit --ignore RUSTSEC-2020-0002 --ignore RUSTSEC-2020-0008 cargo_audit_ignores=(
# failure is officially deprecated/unmaintained
#
# Blocked on multiple upstream crates removing their `failure` dependency.
--ignore RUSTSEC-2020-0036
# `net2` crate has been deprecated; use `socket2` instead
#
# Blocked on https://github.com/paritytech/jsonrpc/issues/575
--ignore RUSTSEC-2020-0016
# stdweb is unmaintained
#
# Blocked on multiple upstream crates removing their `stdweb` dependency.
--ignore RUSTSEC-2020-0056
)
_ scripts/cargo-for-all-lock-files.sh +"$rust_stable" audit "${cargo_audit_ignores[@]}"
{ {
cd programs/bpf cd programs/bpf
_ cargo +"$rust_stable" audit _ "$cargo" stable audit
for project in rust/*/ ; do for project in rust/*/ ; do
echo "+++ do_bpf_checks $project" echo "+++ do_bpf_checks $project"
( (
cd "$project" cd "$project"
_ cargo +"$rust_stable" fmt -- --check _ "$cargo" stable fmt -- --check
_ cargo +"$rust_nightly" test _ "$cargo" nightly test
_ cargo +"$rust_nightly" clippy -- --deny=warnings --allow=clippy::missing_safety_doc _ "$cargo" nightly clippy -- --deny=warnings \
--allow=clippy::missing_safety_doc \
--allow=clippy::stable_sort_primitive
) )
done done
} }

View File

@ -2,6 +2,8 @@
set -e set -e
cd "$(dirname "$0")/.." cd "$(dirname "$0")/.."
cargo="$(readlink -f "./cargo")"
source ci/_ source ci/_
annotate() { annotate() {
@ -37,12 +39,15 @@ NPROC=$((NPROC>14 ? 14 : NPROC))
echo "Executing $testName" echo "Executing $testName"
case $testName in case $testName in
test-stable) test-stable)
_ cargo +"$rust_stable" test --jobs "$NPROC" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture _ "$cargo" stable test --jobs "$NPROC" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
;; ;;
test-stable-perf) test-stable-perf)
# BPF solana-sdk legacy compile test
./cargo-build-bpf --manifest-path sdk/Cargo.toml
# BPF program tests # BPF program tests
_ make -C programs/bpf/c tests _ make -C programs/bpf/c tests
_ cargo +"$rust_stable" test \ _ "$cargo" stable test \
--manifest-path programs/bpf/Cargo.toml \ --manifest-path programs/bpf/Cargo.toml \
--no-default-features --features=bpf_c,bpf_rust -- --nocapture --no-default-features --features=bpf_c,bpf_rust -- --nocapture
@ -62,13 +67,13 @@ test-stable-perf)
export SOLANA_CUDA=1 export SOLANA_CUDA=1
fi fi
_ cargo +"$rust_stable" build --bins ${V:+--verbose} _ "$cargo" stable build --bins ${V:+--verbose}
_ cargo +"$rust_stable" test --package solana-perf --package solana-ledger --package solana-core --lib ${V:+--verbose} -- --nocapture _ "$cargo" stable test --package solana-perf --package solana-ledger --package solana-core --lib ${V:+--verbose} -- --nocapture
_ cargo +"$rust_stable" run --manifest-path poh-bench/Cargo.toml ${V:+--verbose} -- --hashes-per-tick 10 _ "$cargo" stable run --manifest-path poh-bench/Cargo.toml ${V:+--verbose} -- --hashes-per-tick 10
;; ;;
test-local-cluster) test-local-cluster)
_ cargo +"$rust_stable" build --release --bins ${V:+--verbose} _ "$cargo" stable build --release --bins ${V:+--verbose}
_ cargo +"$rust_stable" test --release --package solana-local-cluster ${V:+--verbose} -- --nocapture --test-threads=1 _ "$cargo" stable test --release --package solana-local-cluster ${V:+--verbose} -- --nocapture --test-threads=1
exit 0 exit 0
;; ;;
*) *)

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-clap-utils" name = "solana-clap-utils"
version = "1.4.0" version = "1.4.7"
description = "Solana utilities for the clap" description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.foundation>"] authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -11,8 +11,8 @@ edition = "2018"
[dependencies] [dependencies]
clap = "2.33.0" clap = "2.33.0"
rpassword = "4.0" rpassword = "4.0"
solana-remote-wallet = { path = "../remote-wallet", version = "1.4.0" } solana-remote-wallet = { path = "../remote-wallet", version = "1.4.7" }
solana-sdk = { path = "../sdk", version = "1.4.0" } solana-sdk = { path = "../sdk", version = "1.4.7" }
thiserror = "1.0.20" thiserror = "1.0.20"
tiny-bip39 = "0.7.0" tiny-bip39 = "0.7.0"
url = "2.1.0" url = "2.1.0"

View File

@ -228,8 +228,8 @@ mod tests {
assert_eq!(values_of(&matches, "multiple"), Some(vec![50, 39])); assert_eq!(values_of(&matches, "multiple"), Some(vec![50, 39]));
assert_eq!(values_of::<u64>(&matches, "single"), None); assert_eq!(values_of::<u64>(&matches, "single"), None);
let pubkey0 = Pubkey::new_rand(); let pubkey0 = solana_sdk::pubkey::new_rand();
let pubkey1 = Pubkey::new_rand(); let pubkey1 = solana_sdk::pubkey::new_rand();
let matches = app().clone().get_matches_from(vec![ let matches = app().clone().get_matches_from(vec![
"test", "test",
"--multiple", "--multiple",
@ -251,7 +251,7 @@ mod tests {
assert_eq!(value_of(&matches, "single"), Some(50)); assert_eq!(value_of(&matches, "single"), Some(50));
assert_eq!(value_of::<u64>(&matches, "multiple"), None); assert_eq!(value_of::<u64>(&matches, "multiple"), None);
let pubkey = Pubkey::new_rand(); let pubkey = solana_sdk::pubkey::new_rand();
let matches = app() let matches = app()
.clone() .clone()
.get_matches_from(vec!["test", "--single", &pubkey.to_string()]); .get_matches_from(vec!["test", "--single", &pubkey.to_string()]);
@ -331,8 +331,8 @@ mod tests {
#[test] #[test]
fn test_pubkeys_sigs_of() { fn test_pubkeys_sigs_of() {
let key1 = Pubkey::new_rand(); let key1 = solana_sdk::pubkey::new_rand();
let key2 = Pubkey::new_rand(); let key2 = solana_sdk::pubkey::new_rand();
let sig1 = Keypair::new().sign_message(&[0u8]); let sig1 = Keypair::new().sign_message(&[0u8]);
let sig2 = Keypair::new().sign_message(&[1u8]); let sig2 = Keypair::new().sign_message(&[1u8]);
let signer1 = format!("{}={}", key1, sig1); let signer1 = format!("{}={}", key1, sig1);

View File

@ -298,7 +298,24 @@ pub fn keypair_from_seed_phrase(
keypair_from_seed_phrase_and_passphrase(&seed_phrase, &passphrase)? keypair_from_seed_phrase_and_passphrase(&seed_phrase, &passphrase)?
} else { } else {
let sanitized = sanitize_seed_phrase(seed_phrase); let sanitized = sanitize_seed_phrase(seed_phrase);
let mnemonic = Mnemonic::from_phrase(&sanitized, Language::English)?; let parse_language_fn = || {
for language in &[
Language::English,
Language::ChineseSimplified,
Language::ChineseTraditional,
Language::Japanese,
Language::Spanish,
Language::Korean,
Language::French,
Language::Italian,
] {
if let Ok(mnemonic) = Mnemonic::from_phrase(&sanitized, *language) {
return Ok(mnemonic);
}
}
Err("Can't get mnemonic from seed phrases")
};
let mnemonic = parse_language_fn()?;
let passphrase = prompt_passphrase(&passphrase_prompt)?; let passphrase = prompt_passphrase(&passphrase_prompt)?;
let seed = Seed::new(&mnemonic, &passphrase); let seed = Seed::new(&mnemonic, &passphrase);
keypair_from_seed(seed.as_bytes())? keypair_from_seed(seed.as_bytes())?

View File

@ -3,13 +3,13 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018" edition = "2018"
name = "solana-cli-config" name = "solana-cli-config"
description = "Blockchain, Rebuilt for Scale" description = "Blockchain, Rebuilt for Scale"
version = "1.4.0" version = "1.4.7"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
[dependencies] [dependencies]
dirs = "2.0.2" dirs-next = "2.0.0"
lazy_static = "1.4.0" lazy_static = "1.4.0"
serde = "1.0.112" serde = "1.0.112"
serde_derive = "1.0.103" serde_derive = "1.0.103"

View File

@ -5,7 +5,7 @@ use url::Url;
lazy_static! { lazy_static! {
pub static ref CONFIG_FILE: Option<String> = { pub static ref CONFIG_FILE: Option<String> = {
dirs::home_dir().map(|mut path| { dirs_next::home_dir().map(|mut path| {
path.extend(&[".config", "solana", "cli", "config.yml"]); path.extend(&[".config", "solana", "cli", "config.yml"]);
path.to_str().unwrap().to_string() path.to_str().unwrap().to_string()
}) })
@ -25,7 +25,7 @@ pub struct Config {
impl Default for Config { impl Default for Config {
fn default() -> Self { fn default() -> Self {
let keypair_path = { let keypair_path = {
let mut keypair_path = dirs::home_dir().expect("home directory"); let mut keypair_path = dirs_next::home_dir().expect("home directory");
keypair_path.extend(&[".config", "solana", "id.json"]); keypair_path.extend(&[".config", "solana", "id.json"]);
keypair_path.to_str().unwrap().to_string() keypair_path.to_str().unwrap().to_string()
}; };

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018" edition = "2018"
name = "solana-cli-output" name = "solana-cli-output"
description = "Blockchain, Rebuilt for Scale" description = "Blockchain, Rebuilt for Scale"
version = "1.4.0" version = "1.4.7"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@ -17,13 +17,13 @@ indicatif = "0.15.0"
serde = "1.0.112" serde = "1.0.112"
serde_derive = "1.0.103" serde_derive = "1.0.103"
serde_json = "1.0.56" serde_json = "1.0.56"
solana-account-decoder = { path = "../account-decoder", version = "1.4.0" } solana-account-decoder = { path = "../account-decoder", version = "1.4.7" }
solana-clap-utils = { path = "../clap-utils", version = "1.4.0" } solana-clap-utils = { path = "../clap-utils", version = "1.4.7" }
solana-client = { path = "../client", version = "1.4.0" } solana-client = { path = "../client", version = "1.4.7" }
solana-sdk = { path = "../sdk", version = "1.4.0" } solana-sdk = { path = "../sdk", version = "1.4.7" }
solana-stake-program = { path = "../programs/stake", version = "1.4.0" } solana-stake-program = { path = "../programs/stake", version = "1.4.7" }
solana-transaction-status = { path = "../transaction-status", version = "1.4.0" } solana-transaction-status = { path = "../transaction-status", version = "1.4.7" }
solana-vote-program = { path = "../programs/vote", version = "1.4.0" } solana-vote-program = { path = "../programs/vote", version = "1.4.7" }
[package.metadata.docs.rs] [package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"] targets = ["x86_64-unknown-linux-gnu"]

View File

@ -132,7 +132,7 @@ impl fmt::Display for CliBlockProduction {
"{}", "{}",
style(format!( style(format!(
" {:<44} {:>15} {:>15} {:>15} {:>23}", " {:<44} {:>15} {:>15} {:>15} {:>23}",
"Identity Pubkey", "Identity",
"Leader Slots", "Leader Slots",
"Blocks Produced", "Blocks Produced",
"Skipped Slots", "Skipped Slots",
@ -301,7 +301,7 @@ pub struct CliValidatorsStakeByVersion {
pub struct CliValidators { pub struct CliValidators {
pub total_active_stake: u64, pub total_active_stake: u64,
pub total_current_stake: u64, pub total_current_stake: u64,
pub total_deliquent_stake: u64, pub total_delinquent_stake: u64,
pub current_validators: Vec<CliValidator>, pub current_validators: Vec<CliValidator>,
pub delinquent_validators: Vec<CliValidator>, pub delinquent_validators: Vec<CliValidator>,
pub stake_by_version: BTreeMap<String, CliValidatorsStakeByVersion>, pub stake_by_version: BTreeMap<String, CliValidatorsStakeByVersion>,
@ -360,7 +360,7 @@ impl fmt::Display for CliValidators {
"Active Stake:", "Active Stake:",
&build_balance_message(self.total_active_stake, self.use_lamports_unit, true), &build_balance_message(self.total_active_stake, self.use_lamports_unit, true),
)?; )?;
if self.total_deliquent_stake > 0 { if self.total_delinquent_stake > 0 {
writeln_name_value( writeln_name_value(
f, f,
"Current Stake:", "Current Stake:",
@ -376,11 +376,11 @@ impl fmt::Display for CliValidators {
&format!( &format!(
"{} ({:0.2}%)", "{} ({:0.2}%)",
&build_balance_message( &build_balance_message(
self.total_deliquent_stake, self.total_delinquent_stake,
self.use_lamports_unit, self.use_lamports_unit,
true true
), ),
100. * self.total_deliquent_stake as f64 / self.total_active_stake as f64 100. * self.total_delinquent_stake as f64 / self.total_active_stake as f64
), ),
)?; )?;
} }
@ -412,8 +412,8 @@ impl fmt::Display for CliValidators {
"{}", "{}",
style(format!( style(format!(
" {:<44} {:<38} {} {} {} {:>10} {:^8} {}", " {:<44} {:<38} {} {} {} {:>10} {:^8} {}",
"Identity Pubkey", "Identity",
"Vote Account Pubkey", "Vote Account",
"Commission", "Commission",
"Last Vote", "Last Vote",
"Root Block", "Root Block",
@ -520,7 +520,7 @@ impl fmt::Display for CliNonceAccount {
) )
)?; )?;
let nonce = self.nonce.as_deref().unwrap_or("uninitialized"); let nonce = self.nonce.as_deref().unwrap_or("uninitialized");
writeln!(f, "Nonce: {}", nonce)?; writeln!(f, "Nonce blockhash: {}", nonce)?;
if let Some(fees) = self.lamports_per_signature { if let Some(fees) = self.lamports_per_signature {
writeln!(f, "Fee: {} lamports per signature", fees)?; writeln!(f, "Fee: {} lamports per signature", fees)?;
} else { } else {
@ -661,13 +661,8 @@ impl fmt::Display for CliStakeState {
if lockup.unix_timestamp != UnixTimestamp::default() { if lockup.unix_timestamp != UnixTimestamp::default() {
writeln!( writeln!(
f, f,
"Lockup Timestamp: {} (UnixTimestamp: {})", "Lockup Timestamp: {}",
DateTime::<Utc>::from_utc( unix_timestamp_to_string(lockup.unix_timestamp)
NaiveDateTime::from_timestamp(lockup.unix_timestamp, 0),
Utc
)
.to_rfc3339_opts(SecondsFormat::Secs, true),
lockup.unix_timestamp
)?; )?;
} }
if lockup.epoch != Epoch::default() { if lockup.epoch != Epoch::default() {
@ -952,8 +947,8 @@ impl VerboseDisplay for CliValidatorInfo {}
impl fmt::Display for CliValidatorInfo { impl fmt::Display for CliValidatorInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln_name_value(f, "Validator Identity Pubkey:", &self.identity_pubkey)?; writeln_name_value(f, "Validator Identity:", &self.identity_pubkey)?;
writeln_name_value(f, " Info Pubkey:", &self.info_pubkey)?; writeln_name_value(f, " Info Address:", &self.info_pubkey)?;
for (key, value) in self.info.iter() { for (key, value) in self.info.iter() {
writeln_name_value( writeln_name_value(
f, f,
@ -1008,7 +1003,12 @@ impl fmt::Display for CliVoteAccount {
None => "~".to_string(), None => "~".to_string(),
} }
)?; )?;
writeln!(f, "Recent Timestamp: {:?}", self.recent_timestamp)?; writeln!(
f,
"Recent Timestamp: {} from slot {}",
unix_timestamp_to_string(self.recent_timestamp.timestamp),
self.recent_timestamp.slot
)?;
if !self.votes.is_empty() { if !self.votes.is_empty() {
writeln!(f, "Recent Votes:")?; writeln!(f, "Recent Votes:")?;
for vote in &self.votes { for vote in &self.votes {
@ -1093,19 +1093,22 @@ pub struct CliBlockTime {
impl QuietDisplay for CliBlockTime {} impl QuietDisplay for CliBlockTime {}
impl VerboseDisplay for CliBlockTime {} impl VerboseDisplay for CliBlockTime {}
fn unix_timestamp_to_string(unix_timestamp: UnixTimestamp) -> String {
format!(
"{} (UnixTimestamp: {})",
match NaiveDateTime::from_timestamp_opt(unix_timestamp, 0) {
Some(ndt) =>
DateTime::<Utc>::from_utc(ndt, Utc).to_rfc3339_opts(SecondsFormat::Secs, true),
None => "unknown".to_string(),
},
unix_timestamp,
)
}
impl fmt::Display for CliBlockTime { impl fmt::Display for CliBlockTime {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln_name_value(f, "Block:", &self.slot.to_string())?; writeln_name_value(f, "Block:", &self.slot.to_string())?;
writeln_name_value( writeln_name_value(f, "Date:", &unix_timestamp_to_string(self.timestamp))
f,
"Date:",
&format!(
"{} (UnixTimestamp: {})",
DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(self.timestamp, 0), Utc)
.to_rfc3339_opts(SecondsFormat::Secs, true),
self.timestamp
),
)
} }
} }

View File

@ -197,6 +197,15 @@ pub fn write_transaction<W: io::Write>(
)?; )?;
} }
} }
if let Some(log_messages) = &transaction_status.log_messages {
if !log_messages.is_empty() {
writeln!(w, "{}Log Messages:", prefix,)?;
for log_message in log_messages {
writeln!(w, "{} {}", prefix, log_message,)?;
}
}
}
} else { } else {
writeln!(w, "{}Status: Unavailable", prefix)?; writeln!(w, "{}Status: Unavailable", prefix)?;
} }

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
edition = "2018" edition = "2018"
name = "solana-cli" name = "solana-cli"
description = "Blockchain, Rebuilt for Scale" description = "Blockchain, Rebuilt for Scale"
version = "1.4.0" version = "1.4.7"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@ -16,39 +16,41 @@ clap = "2.33.1"
criterion-stats = "0.3.0" criterion-stats = "0.3.0"
ctrlc = { version = "3.1.5", features = ["termination"] } ctrlc = { version = "3.1.5", features = ["termination"] }
console = "0.11.3" console = "0.11.3"
dirs = "2.0.2" dirs-next = "2.0.0"
log = "0.4.8" log = "0.4.8"
Inflector = "0.11.4" Inflector = "0.11.4"
indicatif = "0.15.0" indicatif = "0.15.0"
humantime = "2.0.1" humantime = "2.0.1"
num-traits = "0.2" num-traits = "0.2"
pretty-hex = "0.1.1" pretty-hex = "0.1.1"
reqwest = { version = "0.10.6", default-features = false, features = ["blocking", "rustls-tls", "json"] } reqwest = { version = "0.10.8", default-features = false, features = ["blocking", "rustls-tls", "json"] }
serde = "1.0.112" serde = "1.0.112"
serde_derive = "1.0.103" serde_derive = "1.0.103"
serde_json = "1.0.56" serde_json = "1.0.56"
solana-account-decoder = { path = "../account-decoder", version = "1.4.0" } solana-account-decoder = { path = "../account-decoder", version = "1.4.7" }
solana-clap-utils = { path = "../clap-utils", version = "1.4.0" } solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.4.7" }
solana-cli-config = { path = "../cli-config", version = "1.4.0" } solana-clap-utils = { path = "../clap-utils", version = "1.4.7" }
solana-cli-output = { path = "../cli-output", version = "1.4.0" } solana-cli-config = { path = "../cli-config", version = "1.4.7" }
solana-client = { path = "../client", version = "1.4.0" } solana-cli-output = { path = "../cli-output", version = "1.4.7" }
solana-config-program = { path = "../programs/config", version = "1.4.0" } solana-client = { path = "../client", version = "1.4.7" }
solana-faucet = { path = "../faucet", version = "1.4.0" } solana-config-program = { path = "../programs/config", version = "1.4.7" }
solana-logger = { path = "../logger", version = "1.4.0" } solana-faucet = { path = "../faucet", version = "1.4.7" }
solana-net-utils = { path = "../net-utils", version = "1.4.0" } solana-logger = { path = "../logger", version = "1.4.7" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.4.0" } solana-net-utils = { path = "../net-utils", version = "1.4.7" }
solana-runtime = { path = "../runtime", version = "1.4.0" } solana_rbpf = "=0.1.32"
solana-sdk = { path = "../sdk", version = "1.4.0" } solana-remote-wallet = { path = "../remote-wallet", version = "1.4.7" }
solana-stake-program = { path = "../programs/stake", version = "1.4.0" } solana-sdk = { path = "../sdk", version = "1.4.7" }
solana-transaction-status = { path = "../transaction-status", version = "1.4.0" } solana-stake-program = { path = "../programs/stake", version = "1.4.7" }
solana-version = { path = "../version", version = "1.4.0" } solana-transaction-status = { path = "../transaction-status", version = "1.4.7" }
solana-vote-program = { path = "../programs/vote", version = "1.4.0" } solana-version = { path = "../version", version = "1.4.7" }
solana-vote-signer = { path = "../vote-signer", version = "1.4.0" } solana-vote-program = { path = "../programs/vote", version = "1.4.7" }
solana-vote-signer = { path = "../vote-signer", version = "1.4.7" }
thiserror = "1.0.20" thiserror = "1.0.20"
tiny-bip39 = "0.7.0"
url = "2.1.1" url = "2.1.1"
[dev-dependencies] [dev-dependencies]
solana-core = { path = "../core", version = "1.4.0" } solana-core = { path = "../core", version = "1.4.7" }
tempfile = "3.1.0" tempfile = "3.1.0"
[[bin]] [[bin]]

View File

@ -54,12 +54,42 @@ pub fn check_account_for_multiple_fees_with_commitment(
fee_calculator: &FeeCalculator, fee_calculator: &FeeCalculator,
messages: &[&Message], messages: &[&Message],
commitment: CommitmentConfig, commitment: CommitmentConfig,
) -> Result<(), CliError> {
check_account_for_spend_multiple_fees_with_commitment(
rpc_client,
account_pubkey,
0,
fee_calculator,
messages,
commitment,
)
}
pub fn check_account_for_spend_multiple_fees_with_commitment(
rpc_client: &RpcClient,
account_pubkey: &Pubkey,
balance: u64,
fee_calculator: &FeeCalculator,
messages: &[&Message],
commitment: CommitmentConfig,
) -> Result<(), CliError> { ) -> Result<(), CliError> {
let fee = calculate_fee(fee_calculator, messages); let fee = calculate_fee(fee_calculator, messages);
if !check_account_for_balance_with_commitment(rpc_client, account_pubkey, fee, commitment) if !check_account_for_balance_with_commitment(
.map_err(Into::<ClientError>::into)? rpc_client,
account_pubkey,
balance + fee,
commitment,
)
.map_err(Into::<ClientError>::into)?
{ {
return Err(CliError::InsufficientFundsForFee(lamports_to_sol(fee))); if balance > 0 {
return Err(CliError::InsufficientFundsForSpendAndFee(
lamports_to_sol(balance),
lamports_to_sol(fee),
));
} else {
return Err(CliError::InsufficientFundsForFee(lamports_to_sol(fee)));
}
} }
Ok(()) Ok(())
} }
@ -131,7 +161,7 @@ mod tests {
context: RpcResponseContext { slot: 1 }, context: RpcResponseContext { slot: 1 },
value: json!(account_balance), value: json!(account_balance),
}); });
let pubkey = Pubkey::new_rand(); let pubkey = solana_sdk::pubkey::new_rand();
let fee_calculator = FeeCalculator::new(1); let fee_calculator = FeeCalculator::new(1);
let pubkey0 = Pubkey::new(&[0; 32]); let pubkey0 = Pubkey::new(&[0; 32]);
@ -191,7 +221,7 @@ mod tests {
context: RpcResponseContext { slot: 1 }, context: RpcResponseContext { slot: 1 },
value: json!(account_balance), value: json!(account_balance),
}); });
let pubkey = Pubkey::new_rand(); let pubkey = solana_sdk::pubkey::new_rand();
let mut mocks = HashMap::new(); let mut mocks = HashMap::new();
mocks.insert(RpcRequest::GetBalance, account_balance_response); mocks.insert(RpcRequest::GetBalance, account_balance_response);
@ -237,9 +267,9 @@ mod tests {
#[test] #[test]
fn test_check_unique_pubkeys() { fn test_check_unique_pubkeys() {
let pubkey0 = Pubkey::new_rand(); let pubkey0 = solana_sdk::pubkey::new_rand();
let pubkey_clone = pubkey0; let pubkey_clone = pubkey0;
let pubkey1 = Pubkey::new_rand(); let pubkey1 = solana_sdk::pubkey::new_rand();
check_unique_pubkeys((&pubkey0, "foo".to_string()), (&pubkey1, "bar".to_string())) check_unique_pubkeys((&pubkey0, "foo".to_string()), (&pubkey1, "bar".to_string()))
.expect("unexpected result"); .expect("unexpected result");

View File

@ -1,12 +1,15 @@
use crate::{ use crate::{
checks::*, cluster_query::*, feature::*, inflation::*, nonce::*, spend_utils::*, stake::*, checks::*, cluster_query::*, feature::*, inflation::*, nonce::*, send_tpu::*, spend_utils::*,
validator_info::*, vote::*, stake::*, validator_info::*, vote::*,
}; };
use bincode::serialize;
use bip39::{Language, Mnemonic, MnemonicType, Seed};
use clap::{value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand}; use clap::{value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand};
use log::*; use log::*;
use num_traits::FromPrimitive; use num_traits::FromPrimitive;
use serde_json::{self, json, Value}; use serde_json::{self, json, Value};
use solana_account_decoder::{UiAccount, UiAccountEncoding}; use solana_account_decoder::{UiAccount, UiAccountEncoding};
use solana_bpf_loader_program::bpf_verifier;
use solana_clap_utils::{ use solana_clap_utils::{
self, self,
commitment::commitment_arg_with_default, commitment::commitment_arg_with_default,
@ -30,24 +33,26 @@ use solana_client::{
rpc_client::RpcClient, rpc_client::RpcClient,
rpc_config::{RpcLargestAccountsFilter, RpcSendTransactionConfig}, rpc_config::{RpcLargestAccountsFilter, RpcSendTransactionConfig},
rpc_request::MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS, rpc_request::MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS,
rpc_response::RpcKeyedAccount, rpc_response::{RpcKeyedAccount, RpcLeaderSchedule},
}; };
#[cfg(not(test))] #[cfg(not(test))]
use solana_faucet::faucet::request_airdrop_transaction; use solana_faucet::faucet::request_airdrop_transaction;
#[cfg(test)] #[cfg(test)]
use solana_faucet::faucet_mock::request_airdrop_transaction; use solana_faucet::faucet_mock::request_airdrop_transaction;
use solana_rbpf::vm::EbpfVm;
use solana_remote_wallet::remote_wallet::RemoteWalletManager; use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_sdk::{ use solana_sdk::{
bpf_loader, bpf_loader_deprecated, bpf_loader, bpf_loader_deprecated,
clock::{Epoch, Slot, DEFAULT_TICKS_PER_SECOND}, clock::{Epoch, Slot},
commitment_config::CommitmentConfig, commitment_config::CommitmentConfig,
decode_error::DecodeError, decode_error::DecodeError,
hash::Hash, hash::Hash,
instruction::{Instruction, InstructionError}, instruction::{Instruction, InstructionError},
loader_instruction, loader_instruction,
message::Message, message::Message,
native_token::Sol,
pubkey::{Pubkey, MAX_SEED_LEN}, pubkey::{Pubkey, MAX_SEED_LEN},
signature::{Keypair, Signature, Signer, SignerError}, signature::{keypair_from_seed, Keypair, Signature, Signer, SignerError},
signers::Signers, signers::Signers,
system_instruction::{self, SystemError}, system_instruction::{self, SystemError},
system_program, system_program,
@ -60,12 +65,13 @@ use solana_stake_program::{
use solana_transaction_status::{EncodedTransaction, UiTransactionEncoding}; use solana_transaction_status::{EncodedTransaction, UiTransactionEncoding};
use solana_vote_program::vote_state::VoteAuthorize; use solana_vote_program::vote_state::VoteAuthorize;
use std::{ use std::{
cmp::min,
collections::HashMap, collections::HashMap,
error, error,
fmt::Write as FmtWrite, fmt::Write as FmtWrite,
fs::File, fs::File,
io::{Read, Write}, io::{Read, Write},
net::{IpAddr, SocketAddr}, net::{IpAddr, SocketAddr, UdpSocket},
str::FromStr, str::FromStr,
sync::Arc, sync::Arc,
thread::sleep, thread::sleep,
@ -98,7 +104,7 @@ pub enum CliCommand {
Fees, Fees,
FirstAvailableBlock, FirstAvailableBlock,
GetBlock { GetBlock {
slot: Slot, slot: Option<Slot>,
}, },
GetBlockTime { GetBlockTime {
slot: Option<Slot>, slot: Option<Slot>,
@ -175,6 +181,7 @@ pub enum CliCommand {
program_location: String, program_location: String,
address: Option<SignerIndex>, address: Option<SignerIndex>,
use_deprecated_loader: bool, use_deprecated_loader: bool,
allow_excessive_balance: bool,
}, },
// Stake Commands // Stake Commands
CreateStakeAccount { CreateStakeAccount {
@ -606,13 +613,13 @@ pub fn parse_command(
signers.push(signer); signers.push(signer);
1 1
}); });
let use_deprecated_loader = matches.is_present("use_deprecated_loader");
Ok(CliCommandInfo { Ok(CliCommandInfo {
command: CliCommand::Deploy { command: CliCommand::Deploy {
program_location: matches.value_of("program_location").unwrap().to_string(), program_location: matches.value_of("program_location").unwrap().to_string(),
address, address,
use_deprecated_loader, use_deprecated_loader: matches.is_present("use_deprecated_loader"),
allow_excessive_balance: matches.is_present("allow_excessive_balance"),
}, },
signers, signers,
}) })
@ -1026,33 +1033,50 @@ fn send_and_confirm_transactions_with_spinner<T: Signers>(
) -> Result<(), Box<dyn error::Error>> { ) -> Result<(), Box<dyn error::Error>> {
let progress_bar = new_spinner_progress_bar(); let progress_bar = new_spinner_progress_bar();
let mut send_retries = 5; let mut send_retries = 5;
let mut leader_schedule: Option<RpcLeaderSchedule> = None;
let mut leader_schedule_epoch = 0;
let send_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let cluster_nodes = rpc_client.get_cluster_nodes().ok();
loop { loop {
let mut status_retries = 15; let mut status_retries = 15;
progress_bar.set_message("Finding leader node...");
let epoch_info = rpc_client.get_epoch_info_with_commitment(commitment)?;
if epoch_info.epoch > leader_schedule_epoch || leader_schedule.is_none() {
leader_schedule = rpc_client
.get_leader_schedule_with_commitment(Some(epoch_info.absolute_slot), commitment)?;
leader_schedule_epoch = epoch_info.epoch;
}
let tpu_address = get_leader_tpu(
min(epoch_info.slot_index + 1, epoch_info.slots_in_epoch),
leader_schedule.as_ref(),
cluster_nodes.as_ref(),
);
// Send all transactions // Send all transactions
let mut pending_transactions = HashMap::new(); let mut pending_transactions = HashMap::new();
let num_transactions = transactions.len(); let num_transactions = transactions.len();
for transaction in transactions { for transaction in transactions {
if cfg!(not(test)) { if let Some(tpu_address) = tpu_address {
// Delay ~1 tick between write transactions in an attempt to reduce AccountInUse errors let wire_transaction =
// when all the write transactions modify the same program account (eg, deploying a serialize(&transaction).expect("serialization should succeed");
// new program) send_transaction_tpu(&send_socket, &tpu_address, &wire_transaction);
sleep(Duration::from_millis(1000 / DEFAULT_TICKS_PER_SECOND)); } else {
let _result = rpc_client
.send_transaction_with_config(
&transaction,
RpcSendTransactionConfig {
preflight_commitment: Some(commitment.commitment),
..RpcSendTransactionConfig::default()
},
)
.ok();
} }
let _result = rpc_client
.send_transaction_with_config(
&transaction,
RpcSendTransactionConfig {
preflight_commitment: Some(commitment.commitment),
..RpcSendTransactionConfig::default()
},
)
.ok();
pending_transactions.insert(transaction.signatures[0], transaction); pending_transactions.insert(transaction.signatures[0], transaction);
progress_bar.set_message(&format!( progress_bar.set_message(&format!(
"[{}/{}] Transactions sent", "[{}/{}] Total Transactions sent",
pending_transactions.len(), pending_transactions.len(),
num_transactions num_transactions
)); ));
@ -1088,6 +1112,11 @@ fn send_and_confirm_transactions_with_spinner<T: Signers>(
let _ = pending_transactions.remove(&signature); let _ = pending_transactions.remove(&signature);
} }
} }
progress_bar.set_message(&format!(
"[{}/{}] Transactions confirmed",
num_transactions - pending_transactions.len(),
num_transactions
));
} }
if pending_transactions.is_empty() { if pending_transactions.is_empty() {
@ -1129,8 +1158,52 @@ fn process_deploy(
program_location: &str, program_location: &str,
address: Option<SignerIndex>, address: Option<SignerIndex>,
use_deprecated_loader: bool, use_deprecated_loader: bool,
allow_excessive_balance: bool,
) -> ProcessResult {
const WORDS: usize = 12;
// Create ephemeral keypair to use for program address, if not provided
let mnemonic = Mnemonic::new(MnemonicType::for_word_count(WORDS)?, Language::English);
let seed = Seed::new(&mnemonic, "");
let new_keypair = keypair_from_seed(seed.as_bytes())?;
let result = do_process_deploy(
rpc_client,
config,
program_location,
address,
use_deprecated_loader,
allow_excessive_balance,
new_keypair,
);
if result.is_err() && address.is_none() {
let phrase: &str = mnemonic.phrase();
let divider = String::from_utf8(vec![b'='; phrase.len()]).unwrap();
eprintln!(
"{}\nTo reuse this address, recover the ephemeral keypair file with",
divider
);
eprintln!(
"`solana-keygen recover` and the following {}-word seed phrase,",
WORDS
);
eprintln!(
"then pass it as the [PROGRAM_ADDRESS_SIGNER] argument to `solana deploy ...`\n{}\n{}\n{}",
divider, phrase, divider
);
}
result
}
fn do_process_deploy(
rpc_client: &RpcClient,
config: &CliConfig,
program_location: &str,
address: Option<SignerIndex>,
use_deprecated_loader: bool,
allow_excessive_balance: bool,
new_keypair: Keypair,
) -> ProcessResult { ) -> ProcessResult {
let new_keypair = Keypair::new(); // Create ephemeral keypair to use for program address, if not provided
let program_id = if let Some(i) = address { let program_id = if let Some(i) = address {
config.signers[i] config.signers[i]
} else { } else {
@ -1144,6 +1217,9 @@ fn process_deploy(
CliError::DynamicProgramError(format!("Unable to read program file: {}", err)) CliError::DynamicProgramError(format!("Unable to read program file: {}", err))
})?; })?;
EbpfVm::create_executable_from_elf(&program_data, Some(|x| bpf_verifier::check(x, true)))
.map_err(|err| CliError::DynamicProgramError(format!("ELF error: {}", err)))?;
let loader_id = if use_deprecated_loader { let loader_id = if use_deprecated_loader {
bpf_loader_deprecated::id() bpf_loader_deprecated::id()
} else { } else {
@ -1154,11 +1230,12 @@ fn process_deploy(
let signers = [config.signers[0], program_id]; let signers = [config.signers[0], program_id];
// Check program account to see if partial initialization has occurred // Check program account to see if partial initialization has occurred
let initial_instructions = if let Some(account) = rpc_client let (initial_instructions, balance_needed) = if let Some(account) = rpc_client
.get_account_with_commitment(&program_id.pubkey(), config.commitment)? .get_account_with_commitment(&program_id.pubkey(), config.commitment)?
.value .value
{ {
let mut instructions: Vec<Instruction> = vec![]; let mut instructions: Vec<Instruction> = vec![];
let mut balance_needed = 0;
if account.executable { if account.executable {
return Err(CliError::DynamicProgramError( return Err(CliError::DynamicProgramError(
"Program account is already executable".to_string(), "Program account is already executable".to_string(),
@ -1182,21 +1259,35 @@ fn process_deploy(
} }
} }
if account.lamports < minimum_balance { if account.lamports < minimum_balance {
let balance = minimum_balance - account.lamports;
instructions.push(system_instruction::transfer( instructions.push(system_instruction::transfer(
&config.signers[0].pubkey(), &config.signers[0].pubkey(),
&program_id.pubkey(), &program_id.pubkey(),
minimum_balance - account.lamports, balance,
)); ));
balance_needed = balance;
} else if account.lamports > minimum_balance
&& system_program::check_id(&account.owner)
&& !allow_excessive_balance
{
return Err(CliError::DynamicProgramError(format!(
"Program account has a balance: {:?}; it may already be in use",
Sol(account.lamports)
))
.into());
} }
instructions (instructions, balance_needed)
} else { } else {
vec![system_instruction::create_account( (
&config.signers[0].pubkey(), vec![system_instruction::create_account(
&program_id.pubkey(), &config.signers[0].pubkey(),
&program_id.pubkey(),
minimum_balance,
program_data.len() as u64,
&loader_id,
)],
minimum_balance, minimum_balance,
program_data.len() as u64, )
&loader_id,
)]
}; };
let initial_message = if !initial_instructions.is_empty() { let initial_message = if !initial_instructions.is_empty() {
Some(Message::new( Some(Message::new(
@ -1239,9 +1330,10 @@ fn process_deploy(
.get_recent_blockhash_with_commitment(config.commitment)? .get_recent_blockhash_with_commitment(config.commitment)?
.value; .value;
check_account_for_multiple_fees_with_commitment( check_account_for_spend_multiple_fees_with_commitment(
rpc_client, rpc_client,
&config.signers[0].pubkey(), &config.signers[0].pubkey(),
balance_needed,
&fee_calculator, &fee_calculator,
&messages, &messages,
config.commitment, config.commitment,
@ -1266,8 +1358,8 @@ fn process_deploy(
config.commitment, config.commitment,
config.send_transaction_config, config.send_transaction_config,
); );
log_instruction_custom_error::<SystemError>(result, &config).map_err(|_| { log_instruction_custom_error::<SystemError>(result, &config).map_err(|err| {
CliError::DynamicProgramError("Program account allocation failed".to_string()) CliError::DynamicProgramError(format!("Program account allocation failed: {}", err))
})?; })?;
} }
@ -1290,8 +1382,8 @@ fn process_deploy(
config.commitment, config.commitment,
last_valid_slot, last_valid_slot,
) )
.map_err(|_| { .map_err(|err| {
CliError::DynamicProgramError("Data writes to program account failed".to_string()) CliError::DynamicProgramError(format!("Data writes to program account failed: {}", err))
})?; })?;
let (blockhash, _, _) = rpc_client let (blockhash, _, _) = rpc_client
@ -1565,12 +1657,14 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
program_location, program_location,
address, address,
use_deprecated_loader, use_deprecated_loader,
allow_excessive_balance,
} => process_deploy( } => process_deploy(
&rpc_client, &rpc_client,
config, config,
program_location, program_location,
*address, *address,
*use_deprecated_loader, *use_deprecated_loader,
*allow_excessive_balance,
), ),
// Stake Commands // Stake Commands
@ -2187,7 +2281,7 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
.arg( .arg(
Arg::with_name("address_signer") Arg::with_name("address_signer")
.index(2) .index(2)
.value_name("SIGNER_KEYPAIR") .value_name("PROGRAM_ADDRESS_SIGNER")
.takes_value(true) .takes_value(true)
.validator(is_valid_signer) .validator(is_valid_signer)
.help("The signer for the desired address of the program [default: new random address]") .help("The signer for the desired address of the program [default: new random address]")
@ -2199,6 +2293,12 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
.hidden(true) // Don't document this argument to discourage its use .hidden(true) // Don't document this argument to discourage its use
.help("Use the deprecated BPF loader") .help("Use the deprecated BPF loader")
) )
.arg(
Arg::with_name("allow_excessive_balance")
.long("allow-excessive-deploy-account-balance")
.takes_value(false)
.help("Use the designated program id, even if the account already holds a large balance of SOL")
)
.arg(commitment_arg_with_default("max")), .arg(commitment_arg_with_default("max")),
) )
.subcommand( .subcommand(
@ -2355,7 +2455,10 @@ mod tests {
.unwrap(); .unwrap();
assert_eq!(signer_info.signers.len(), 1); assert_eq!(signer_info.signers.len(), 1);
assert_eq!(signer_info.index_of(None), Some(0)); assert_eq!(signer_info.index_of(None), Some(0));
assert_eq!(signer_info.index_of(Some(Pubkey::new_rand())), None); assert_eq!(
signer_info.index_of(Some(solana_sdk::pubkey::new_rand())),
None
);
let keypair0 = keypair_from_seed(&[1u8; 32]).unwrap(); let keypair0 = keypair_from_seed(&[1u8; 32]).unwrap();
let keypair0_pubkey = keypair0.pubkey(); let keypair0_pubkey = keypair0.pubkey();
@ -2411,7 +2514,7 @@ mod tests {
fn test_cli_parse_command() { fn test_cli_parse_command() {
let test_commands = app("test", "desc", "version"); let test_commands = app("test", "desc", "version");
let pubkey = Pubkey::new_rand(); let pubkey = solana_sdk::pubkey::new_rand();
let pubkey_string = format!("{}", pubkey); let pubkey_string = format!("{}", pubkey);
let default_keypair = Keypair::new(); let default_keypair = Keypair::new();
@ -2507,7 +2610,7 @@ mod tests {
assert!(parse_command(&test_bad_signature, &default_signer, &mut None).is_err()); assert!(parse_command(&test_bad_signature, &default_signer, &mut None).is_err());
// Test CreateAddressWithSeed // Test CreateAddressWithSeed
let from_pubkey = Some(Pubkey::new_rand()); let from_pubkey = Some(solana_sdk::pubkey::new_rand());
let from_str = from_pubkey.unwrap().to_string(); let from_str = from_pubkey.unwrap().to_string();
for (name, program_id) in &[ for (name, program_id) in &[
("STAKE", solana_stake_program::id()), ("STAKE", solana_stake_program::id()),
@ -2564,6 +2667,7 @@ mod tests {
program_location: "/Users/test/program.o".to_string(), program_location: "/Users/test/program.o".to_string(),
address: None, address: None,
use_deprecated_loader: false, use_deprecated_loader: false,
allow_excessive_balance: false,
}, },
signers: vec![read_keypair_file(&keypair_file).unwrap().into()], signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
} }
@ -2585,6 +2689,7 @@ mod tests {
program_location: "/Users/test/program.o".to_string(), program_location: "/Users/test/program.o".to_string(),
address: Some(1), address: Some(1),
use_deprecated_loader: false, use_deprecated_loader: false,
allow_excessive_balance: false,
}, },
signers: vec![ signers: vec![
read_keypair_file(&keypair_file).unwrap().into(), read_keypair_file(&keypair_file).unwrap().into(),
@ -2664,7 +2769,7 @@ mod tests {
let result = process_command(&config); let result = process_command(&config);
assert!(result.is_ok()); assert!(result.is_ok());
let new_authorized_pubkey = Pubkey::new_rand(); let new_authorized_pubkey = solana_sdk::pubkey::new_rand();
config.signers = vec![&bob_keypair]; config.signers = vec![&bob_keypair];
config.command = CliCommand::VoteAuthorize { config.command = CliCommand::VoteAuthorize {
vote_account_pubkey: bob_pubkey, vote_account_pubkey: bob_pubkey,
@ -2686,7 +2791,7 @@ mod tests {
let bob_keypair = Keypair::new(); let bob_keypair = Keypair::new();
let bob_pubkey = bob_keypair.pubkey(); let bob_pubkey = bob_keypair.pubkey();
let custodian = Pubkey::new_rand(); let custodian = solana_sdk::pubkey::new_rand();
config.command = CliCommand::CreateStakeAccount { config.command = CliCommand::CreateStakeAccount {
stake_account: 1, stake_account: 1,
seed: None, seed: None,
@ -2709,8 +2814,8 @@ mod tests {
let result = process_command(&config); let result = process_command(&config);
assert!(result.is_ok()); assert!(result.is_ok());
let stake_account_pubkey = Pubkey::new_rand(); let stake_account_pubkey = solana_sdk::pubkey::new_rand();
let to_pubkey = Pubkey::new_rand(); let to_pubkey = solana_sdk::pubkey::new_rand();
config.command = CliCommand::WithdrawStake { config.command = CliCommand::WithdrawStake {
stake_account_pubkey, stake_account_pubkey,
destination_account_pubkey: to_pubkey, destination_account_pubkey: to_pubkey,
@ -2727,7 +2832,7 @@ mod tests {
let result = process_command(&config); let result = process_command(&config);
assert!(result.is_ok()); assert!(result.is_ok());
let stake_account_pubkey = Pubkey::new_rand(); let stake_account_pubkey = solana_sdk::pubkey::new_rand();
config.command = CliCommand::DeactivateStake { config.command = CliCommand::DeactivateStake {
stake_account_pubkey, stake_account_pubkey,
stake_authority: 0, stake_authority: 0,
@ -2740,7 +2845,7 @@ mod tests {
let result = process_command(&config); let result = process_command(&config);
assert!(result.is_ok()); assert!(result.is_ok());
let stake_account_pubkey = Pubkey::new_rand(); let stake_account_pubkey = solana_sdk::pubkey::new_rand();
let split_stake_account = Keypair::new(); let split_stake_account = Keypair::new();
config.command = CliCommand::SplitStake { config.command = CliCommand::SplitStake {
stake_account_pubkey, stake_account_pubkey,
@ -2758,8 +2863,8 @@ mod tests {
let result = process_command(&config); let result = process_command(&config);
assert!(result.is_ok()); assert!(result.is_ok());
let stake_account_pubkey = Pubkey::new_rand(); let stake_account_pubkey = solana_sdk::pubkey::new_rand();
let source_stake_account_pubkey = Pubkey::new_rand(); let source_stake_account_pubkey = solana_sdk::pubkey::new_rand();
let merge_stake_account = Keypair::new(); let merge_stake_account = Keypair::new();
config.command = CliCommand::MergeStake { config.command = CliCommand::MergeStake {
stake_account_pubkey, stake_account_pubkey,
@ -2782,7 +2887,7 @@ mod tests {
assert_eq!(process_command(&config).unwrap(), "1234"); assert_eq!(process_command(&config).unwrap(), "1234");
// CreateAddressWithSeed // CreateAddressWithSeed
let from_pubkey = Pubkey::new_rand(); let from_pubkey = solana_sdk::pubkey::new_rand();
config.signers = vec![]; config.signers = vec![];
config.command = CliCommand::CreateAddressWithSeed { config.command = CliCommand::CreateAddressWithSeed {
from_pubkey: Some(from_pubkey), from_pubkey: Some(from_pubkey),
@ -2795,7 +2900,7 @@ mod tests {
assert_eq!(address.unwrap(), expected_address.to_string()); assert_eq!(address.unwrap(), expected_address.to_string());
// Need airdrop cases // Need airdrop cases
let to = Pubkey::new_rand(); let to = solana_sdk::pubkey::new_rand();
config.signers = vec![&keypair]; config.signers = vec![&keypair];
config.command = CliCommand::Airdrop { config.command = CliCommand::Airdrop {
faucet_host: None, faucet_host: None,
@ -2898,6 +3003,7 @@ mod tests {
program_location: pathbuf.to_str().unwrap().to_string(), program_location: pathbuf.to_str().unwrap().to_string(),
address: None, address: None,
use_deprecated_loader: false, use_deprecated_loader: false,
allow_excessive_balance: false,
}; };
let result = process_command(&config); let result = process_command(&config);
let json: Value = serde_json::from_str(&result.unwrap()).unwrap(); let json: Value = serde_json::from_str(&result.unwrap()).unwrap();
@ -2916,6 +3022,7 @@ mod tests {
program_location: "bad/file/location.so".to_string(), program_location: "bad/file/location.so".to_string(),
address: None, address: None,
use_deprecated_loader: false, use_deprecated_loader: false,
allow_excessive_balance: false,
}; };
assert!(process_command(&config).is_err()); assert!(process_command(&config).is_err());
} }

View File

@ -1,6 +1,7 @@
use crate::{ use crate::{
cli::{CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult}, cli::{CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult},
spend_utils::{resolve_spend_tx_and_check_account_balance, SpendAmount}, spend_utils::{resolve_spend_tx_and_check_account_balance, SpendAmount},
stake::is_stake_program_v2_enabled,
}; };
use chrono::{Local, TimeZone}; use chrono::{Local, TimeZone};
use clap::{value_t, value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand}; use clap::{value_t, value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand};
@ -27,6 +28,7 @@ use solana_client::{
}; };
use solana_remote_wallet::remote_wallet::RemoteWalletManager; use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_sdk::{ use solana_sdk::{
account::from_account,
account_utils::StateMut, account_utils::StateMut,
clock::{self, Clock, Slot}, clock::{self, Clock, Slot},
commitment_config::CommitmentConfig, commitment_config::CommitmentConfig,
@ -38,8 +40,7 @@ use solana_sdk::{
system_instruction, system_program, system_instruction, system_program,
sysvar::{ sysvar::{
self, self,
stake_history::{self, StakeHistory}, stake_history::{self},
Sysvar,
}, },
transaction::Transaction, transaction::Transaction,
}; };
@ -73,8 +74,7 @@ impl ClusterQuerySubCommands for App<'_, '_> {
.validator(is_slot) .validator(is_slot)
.value_name("SLOT") .value_name("SLOT")
.takes_value(true) .takes_value(true)
.index(1) .index(1),
.required(true),
), ),
) )
.subcommand( .subcommand(
@ -363,7 +363,7 @@ pub fn parse_cluster_ping(
} }
pub fn parse_get_block(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> { pub fn parse_get_block(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let slot = value_t_or_exit!(matches, "slot", Slot); let slot = value_of(matches, "slot");
Ok(CliCommandInfo { Ok(CliCommandInfo {
command: CliCommand::GetBlock { slot }, command: CliCommand::GetBlock { slot },
signers: vec![], signers: vec![],
@ -625,7 +625,7 @@ pub fn process_cluster_date(rpc_client: &RpcClient, config: &CliConfig) -> Proce
let result = rpc_client let result = rpc_client
.get_account_with_commitment(&sysvar::clock::id(), CommitmentConfig::default())?; .get_account_with_commitment(&sysvar::clock::id(), CommitmentConfig::default())?;
if let Some(clock_account) = result.value { if let Some(clock_account) = result.value {
let clock: Clock = Sysvar::from_account(&clock_account).ok_or_else(|| { let clock: Clock = from_account(&clock_account).ok_or_else(|| {
CliError::RpcRequestError("Failed to deserialize clock sysvar".to_string()) CliError::RpcRequestError("Failed to deserialize clock sysvar".to_string())
})?; })?;
let block_time = CliBlockTime { let block_time = CliBlockTime {
@ -700,7 +700,17 @@ pub fn process_leader_schedule(rpc_client: &RpcClient) -> ProcessResult {
Ok("".to_string()) Ok("".to_string())
} }
pub fn process_get_block(rpc_client: &RpcClient, _config: &CliConfig, slot: Slot) -> ProcessResult { pub fn process_get_block(
rpc_client: &RpcClient,
_config: &CliConfig,
slot: Option<Slot>,
) -> ProcessResult {
let slot = if let Some(slot) = slot {
slot
} else {
rpc_client.get_slot()?
};
let mut block = let mut block =
rpc_client.get_confirmed_block_with_encoding(slot, UiTransactionEncoding::Base64)?; rpc_client.get_confirmed_block_with_encoding(slot, UiTransactionEncoding::Base64)?;
@ -716,18 +726,23 @@ pub fn process_get_block(rpc_client: &RpcClient, _config: &CliConfig, slot: Slot
let mut total_rewards = 0; let mut total_rewards = 0;
println!("Rewards:",); println!("Rewards:",);
println!( println!(
" {:<44} {:<15} {:<13} {:>14}", " {:<44} {:^15} {:<15} {:<20} {:>14}",
"Address", "Amount", "New Balance", "Percent Change" "Address", "Type", "Amount", "New Balance", "Percent Change"
); );
for reward in block.rewards { for reward in block.rewards {
let sign = if reward.lamports < 0 { "-" } else { "" }; let sign = if reward.lamports < 0 { "-" } else { "" };
total_rewards += reward.lamports; total_rewards += reward.lamports;
println!( println!(
" {:<44} {:>15} {}", " {:<44} {:^15} {:>15} {}",
reward.pubkey, reward.pubkey,
if let Some(reward_type) = reward.reward_type {
format!("{}", reward_type)
} else {
"-".to_string()
},
format!( format!(
"{}{:<14.4}", "{}{:<14.9}",
sign, sign,
lamports_to_sol(reward.lamports.abs() as u64) lamports_to_sol(reward.lamports.abs() as u64)
), ),
@ -735,7 +750,7 @@ pub fn process_get_block(rpc_client: &RpcClient, _config: &CliConfig, slot: Slot
" - -".to_string() " - -".to_string()
} else { } else {
format!( format!(
"{:<12.4} {:>13.4}%", "{:<19.9} {:>13.9}%",
lamports_to_sol(reward.post_balance), lamports_to_sol(reward.post_balance),
reward.lamports.abs() as f64 reward.lamports.abs() as f64
/ (reward.post_balance as f64 - reward.lamports as f64) / (reward.post_balance as f64 - reward.lamports as f64)
@ -746,7 +761,7 @@ pub fn process_get_block(rpc_client: &RpcClient, _config: &CliConfig, slot: Slot
let sign = if total_rewards < 0 { "-" } else { "" }; let sign = if total_rewards < 0 { "-" } else { "" };
println!( println!(
"Total Rewards: {}{:12.9}", "Total Rewards: {}{:<12.9}",
sign, sign,
lamports_to_sol(total_rewards.abs() as u64) lamports_to_sol(total_rewards.abs() as u64)
); );
@ -1253,14 +1268,16 @@ pub fn process_show_gossip(rpc_client: &RpcClient, config: &CliConfig) -> Proces
.into_iter() .into_iter()
.map(|node| { .map(|node| {
format!( format!(
"{:15} | {:44} | {:6} | {:5} | {:5} | {}", "{:15} | {:44} | {:6} | {:5} | {:21} | {}",
node.gossip node.gossip
.map(|addr| addr.ip().to_string()) .map(|addr| addr.ip().to_string())
.unwrap_or_else(|| "none".to_string()), .unwrap_or_else(|| "none".to_string()),
format_labeled_address(&node.pubkey, &config.address_labels), format_labeled_address(&node.pubkey, &config.address_labels),
format_port(node.gossip), format_port(node.gossip),
format_port(node.tpu), format_port(node.tpu),
format_port(node.rpc), node.rpc
.map(|addr| addr.to_string())
.unwrap_or_else(|| "none".to_string()),
node.version.unwrap_or_else(|| "unknown".to_string()), node.version.unwrap_or_else(|| "unknown".to_string()),
) )
}) })
@ -1268,9 +1285,9 @@ pub fn process_show_gossip(rpc_client: &RpcClient, config: &CliConfig) -> Proces
Ok(format!( Ok(format!(
"IP Address | Node identifier \ "IP Address | Node identifier \
| Gossip | TPU | RPC | Version\n\ | Gossip | TPU | RPC Address | Version\n\
----------------+----------------------------------------------+\ ----------------+----------------------------------------------+\
--------+-------+-------+----------------\n\ --------+-------+-----------------------+----------------\n\
{}\n\ {}\n\
Nodes: {}", Nodes: {}",
s.join("\n"), s.join("\n"),
@ -1325,14 +1342,16 @@ pub fn process_show_stakes(
.get_program_accounts_with_config(&solana_stake_program::id(), program_accounts_config)?; .get_program_accounts_with_config(&solana_stake_program::id(), program_accounts_config)?;
let stake_history_account = rpc_client.get_account(&stake_history::id())?; let stake_history_account = rpc_client.get_account(&stake_history::id())?;
let clock_account = rpc_client.get_account(&sysvar::clock::id())?; let clock_account = rpc_client.get_account(&sysvar::clock::id())?;
let clock: Clock = Sysvar::from_account(&clock_account).ok_or_else(|| { let clock: Clock = from_account(&clock_account).ok_or_else(|| {
CliError::RpcRequestError("Failed to deserialize clock sysvar".to_string()) CliError::RpcRequestError("Failed to deserialize clock sysvar".to_string())
})?; })?;
progress_bar.finish_and_clear(); progress_bar.finish_and_clear();
let stake_history = StakeHistory::from_account(&stake_history_account).ok_or_else(|| { let stake_history = from_account(&stake_history_account).ok_or_else(|| {
CliError::RpcRequestError("Failed to deserialize stake history".to_string()) CliError::RpcRequestError("Failed to deserialize stake history".to_string())
})?; })?;
// At v1.6, this check can be removed and simply passed as `true`
let stake_program_v2_enabled = is_stake_program_v2_enabled(rpc_client);
let mut stake_accounts: Vec<CliKeyedStakeState> = vec![]; let mut stake_accounts: Vec<CliKeyedStakeState> = vec![];
for (stake_pubkey, stake_account) in all_stake_accounts { for (stake_pubkey, stake_account) in all_stake_accounts {
@ -1348,6 +1367,7 @@ pub fn process_show_stakes(
use_lamports_unit, use_lamports_unit,
&stake_history, &stake_history,
&clock, &clock,
stake_program_v2_enabled,
), ),
}); });
} }
@ -1366,6 +1386,7 @@ pub fn process_show_stakes(
use_lamports_unit, use_lamports_unit,
&stake_history, &stake_history,
&clock, &clock,
stake_program_v2_enabled,
), ),
}); });
} }
@ -1405,12 +1426,12 @@ pub fn process_show_validators(
.map(|vote_account| vote_account.activated_stake) .map(|vote_account| vote_account.activated_stake)
.sum(); .sum();
let total_deliquent_stake = vote_accounts let total_delinquent_stake = vote_accounts
.delinquent .delinquent
.iter() .iter()
.map(|vote_account| vote_account.activated_stake) .map(|vote_account| vote_account.activated_stake)
.sum(); .sum();
let total_current_stake = total_active_stake - total_deliquent_stake; let total_current_stake = total_active_stake - total_delinquent_stake;
let mut current = vote_accounts.current; let mut current = vote_accounts.current;
current.sort_by(|a, b| b.activated_stake.cmp(&a.activated_stake)); current.sort_by(|a, b| b.activated_stake.cmp(&a.activated_stake));
@ -1464,7 +1485,7 @@ pub fn process_show_validators(
let cli_validators = CliValidators { let cli_validators = CliValidators {
total_active_stake, total_active_stake,
total_current_stake, total_current_stake,
total_deliquent_stake, total_delinquent_stake,
current_validators, current_validators,
delinquent_validators, delinquent_validators,
stake_by_version, stake_by_version,

View File

@ -9,12 +9,13 @@ use solana_clap_utils::{input_parsers::*, input_validators::*, keypair::*};
use solana_cli_output::{QuietDisplay, VerboseDisplay}; use solana_cli_output::{QuietDisplay, VerboseDisplay};
use solana_client::{client_error::ClientError, rpc_client::RpcClient}; use solana_client::{client_error::ClientError, rpc_client::RpcClient};
use solana_remote_wallet::remote_wallet::RemoteWalletManager; use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_runtime::{ use solana_sdk::{
clock::Slot,
feature::{self, Feature}, feature::{self, Feature},
feature_set::FEATURE_NAMES, feature_set::FEATURE_NAMES,
}; message::Message,
use solana_sdk::{ pubkey::Pubkey,
clock::Slot, message::Message, pubkey::Pubkey, system_instruction, transaction::Transaction, transaction::Transaction,
}; };
use std::{collections::HashMap, fmt, sync::Arc}; use std::{collections::HashMap, fmt, sync::Arc};
@ -230,7 +231,7 @@ fn active_stake_by_feature_set(rpc_client: &RpcClient) -> Result<HashMap<u32, u6
} }
// Feature activation is only allowed when 95% of the active stake is on the current feature set // Feature activation is only allowed when 95% of the active stake is on the current feature set
fn feature_activation_allowed(rpc_client: &RpcClient) -> Result<bool, ClientError> { fn feature_activation_allowed(rpc_client: &RpcClient, quiet: bool) -> Result<bool, ClientError> {
let my_feature_set = solana_version::Version::default().feature_set; let my_feature_set = solana_version::Version::default().feature_set;
let active_stake_by_feature_set = active_stake_by_feature_set(rpc_client)?; let active_stake_by_feature_set = active_stake_by_feature_set(rpc_client)?;
@ -240,8 +241,23 @@ fn feature_activation_allowed(rpc_client: &RpcClient) -> Result<bool, ClientErro
.map(|percentage| *percentage >= 95) .map(|percentage| *percentage >= 95)
.unwrap_or(false); .unwrap_or(false);
if !feature_activation_allowed { if !feature_activation_allowed && !quiet {
println!("\n{}", style("Stake By Feature Set:").bold()); if active_stake_by_feature_set.get(&my_feature_set).is_none() {
println!(
"{}",
style("To activate features the tool and cluster feature sets must match, select a tool version that matches the cluster")
.bold());
} else {
println!(
"{}",
style("To activate features the stake must be >= 95%").bold()
);
}
println!(
"{}",
style(format!("Tool Feture Set: {}", my_feature_set)).bold()
);
println!("{}", style("Cluster Feature Sets and Stakes:").bold());
for (feature_set, percentage) in active_stake_by_feature_set.iter() { for (feature_set, percentage) in active_stake_by_feature_set.iter() {
if *feature_set == 0 { if *feature_set == 0 {
println!("unknown - {}%", percentage); println!("unknown - {}%", percentage);
@ -258,6 +274,7 @@ fn feature_activation_allowed(rpc_client: &RpcClient) -> Result<bool, ClientErro
); );
} }
} }
println!();
} }
Ok(feature_activation_allowed) Ok(feature_activation_allowed)
@ -278,7 +295,7 @@ fn process_status(
let feature_id = &feature_ids[i]; let feature_id = &feature_ids[i];
let feature_name = FEATURE_NAMES.get(feature_id).unwrap(); let feature_name = FEATURE_NAMES.get(feature_id).unwrap();
if let Some(account) = account { if let Some(account) = account {
if let Some(feature) = Feature::from_account(&account) { if let Some(feature) = feature::from_account(&account) {
let feature_status = match feature.activated_at { let feature_status = match feature.activated_at {
None => CliFeatureStatus::Pending, None => CliFeatureStatus::Pending,
Some(activation_slot) => CliFeatureStatus::Active(activation_slot), Some(activation_slot) => CliFeatureStatus::Active(activation_slot),
@ -299,9 +316,10 @@ fn process_status(
}); });
} }
let feature_activation_allowed = feature_activation_allowed(rpc_client, features.len() <= 1)?;
let feature_set = CliFeatures { let feature_set = CliFeatures {
features, features,
feature_activation_allowed: feature_activation_allowed(rpc_client)?, feature_activation_allowed,
inactive, inactive,
}; };
Ok(config.output_format.formatted_string(&feature_set)) Ok(config.output_format.formatted_string(&feature_set))
@ -318,12 +336,12 @@ fn process_activate(
.next() .next()
.unwrap(); .unwrap();
if let Some(account) = account { if let Some(account) = account {
if Feature::from_account(&account).is_some() { if feature::from_account(&account).is_some() {
return Err(format!("{} has already been activated", feature_id).into()); return Err(format!("{} has already been activated", feature_id).into());
} }
} }
if !feature_activation_allowed(rpc_client)? { if !feature_activation_allowed(rpc_client, false)? {
return Err("Feature activation is not allowed at this time".into()); return Err("Feature activation is not allowed at this time".into());
} }
@ -338,15 +356,11 @@ fn process_activate(
&config.signers[0].pubkey(), &config.signers[0].pubkey(),
|lamports| { |lamports| {
Message::new( Message::new(
&[ &feature::activate_with_lamports(
system_instruction::transfer( &feature_id,
&config.signers[0].pubkey(), &config.signers[0].pubkey(),
&feature_id, lamports,
lamports, ),
),
system_instruction::allocate(&feature_id, Feature::size_of() as u64),
system_instruction::assign(&feature_id, &feature::id()),
],
Some(&config.signers[0].pubkey()), Some(&config.signers[0].pubkey()),
) )
}, },

View File

@ -26,6 +26,7 @@ pub mod cluster_query;
pub mod feature; pub mod feature;
pub mod inflation; pub mod inflation;
pub mod nonce; pub mod nonce;
pub mod send_tpu;
pub mod spend_utils; pub mod spend_utils;
pub mod stake; pub mod stake;
pub mod test_utils; pub mod test_utils;

View File

@ -580,6 +580,7 @@ mod tests {
fee_calculator::FeeCalculator, fee_calculator::FeeCalculator,
hash::hash, hash::hash,
nonce::{self, state::Versions, State}, nonce::{self, state::Versions, State},
nonce_account,
signature::{read_keypair_file, write_keypair, Keypair, Signer}, signature::{read_keypair_file, write_keypair, Keypair, Signer},
system_program, system_program,
}; };
@ -833,7 +834,7 @@ mod tests {
#[test] #[test]
fn test_check_nonce_account() { fn test_check_nonce_account() {
let blockhash = Hash::default(); let blockhash = Hash::default();
let nonce_pubkey = Pubkey::new_rand(); let nonce_pubkey = solana_sdk::pubkey::new_rand();
let data = Versions::new_current(State::Initialized(nonce::state::Data { let data = Versions::new_current(State::Initialized(nonce::state::Data {
authority: nonce_pubkey, authority: nonce_pubkey,
blockhash, blockhash,
@ -869,7 +870,7 @@ mod tests {
} }
let data = Versions::new_current(State::Initialized(nonce::state::Data { let data = Versions::new_current(State::Initialized(nonce::state::Data {
authority: Pubkey::new_rand(), authority: solana_sdk::pubkey::new_rand(),
blockhash, blockhash,
fee_calculator: FeeCalculator::default(), fee_calculator: FeeCalculator::default(),
})); }));
@ -891,7 +892,7 @@ mod tests {
#[test] #[test]
fn test_account_identity_ok() { fn test_account_identity_ok() {
let nonce_account = nonce::create_account(1).into_inner(); let nonce_account = nonce_account::create_account(1).into_inner();
assert_eq!(account_identity_ok(&nonce_account), Ok(())); assert_eq!(account_identity_ok(&nonce_account), Ok(()));
let system_account = Account::new(1, 0, &system_program::id()); let system_account = Account::new(1, 0, &system_program::id());
@ -910,7 +911,7 @@ mod tests {
#[test] #[test]
fn test_state_from_account() { fn test_state_from_account() {
let mut nonce_account = nonce::create_account(1).into_inner(); let mut nonce_account = nonce_account::create_account(1).into_inner();
assert_eq!(state_from_account(&nonce_account), Ok(State::Uninitialized)); assert_eq!(state_from_account(&nonce_account), Ok(State::Uninitialized));
let data = nonce::state::Data { let data = nonce::state::Data {
@ -935,7 +936,7 @@ mod tests {
#[test] #[test]
fn test_data_from_helpers() { fn test_data_from_helpers() {
let mut nonce_account = nonce::create_account(1).into_inner(); let mut nonce_account = nonce_account::create_account(1).into_inner();
let state = state_from_account(&nonce_account).unwrap(); let state = state_from_account(&nonce_account).unwrap();
assert_eq!( assert_eq!(
data_from_state(&state), data_from_state(&state),

29
cli/src/send_tpu.rs Normal file
View File

@ -0,0 +1,29 @@
use log::*;
use solana_client::rpc_response::{RpcContactInfo, RpcLeaderSchedule};
use std::net::{SocketAddr, UdpSocket};
pub fn get_leader_tpu(
slot_index: u64,
leader_schedule: Option<&RpcLeaderSchedule>,
cluster_nodes: Option<&Vec<RpcContactInfo>>,
) -> Option<SocketAddr> {
leader_schedule?
.iter()
.find(|(_pubkey, slots)| slots.iter().any(|slot| *slot as u64 == slot_index))
.and_then(|(pubkey, _)| {
cluster_nodes?
.iter()
.find(|contact_info| contact_info.pubkey == *pubkey)
.and_then(|contact_info| contact_info.tpu)
})
}
pub fn send_transaction_tpu(
send_socket: &UdpSocket,
tpu_address: &SocketAddr,
wire_transaction: &[u8],
) {
if let Err(err) = send_socket.send_to(wire_transaction, tpu_address) {
warn!("Failed to send transaction to {}: {:?}", tpu_address, err);
}
}

View File

@ -23,20 +23,25 @@ use solana_cli_output::{
CliStakeType, CliStakeType,
}; };
use solana_client::{ use solana_client::{
blockhash_query::BlockhashQuery, nonce_utils, rpc_client::RpcClient, blockhash_query::BlockhashQuery,
rpc_request::DELINQUENT_VALIDATOR_SLOT_DISTANCE, client_error::{ClientError, ClientErrorKind},
nonce_utils,
rpc_client::RpcClient,
rpc_custom_error,
rpc_request::{self, DELINQUENT_VALIDATOR_SLOT_DISTANCE},
}; };
use solana_remote_wallet::remote_wallet::RemoteWalletManager; use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_sdk::{ use solana_sdk::{
account::from_account,
account_utils::StateMut, account_utils::StateMut,
clock::{Clock, Epoch, Slot, UnixTimestamp}, clock::{Clock, Epoch, Slot, UnixTimestamp, SECONDS_PER_DAY},
feature, feature_set,
message::Message, message::Message,
pubkey::Pubkey, pubkey::Pubkey,
system_instruction::SystemError, system_instruction::SystemError,
sysvar::{ sysvar::{
clock, clock,
stake_history::{self, StakeHistory}, stake_history::{self, StakeHistory},
Sysvar,
}, },
transaction::Transaction, transaction::Transaction,
}; };
@ -1497,6 +1502,7 @@ pub fn build_stake_state(
use_lamports_unit: bool, use_lamports_unit: bool,
stake_history: &StakeHistory, stake_history: &StakeHistory,
clock: &Clock, clock: &Clock,
stake_program_v2_enabled: bool,
) -> CliStakeState { ) -> CliStakeState {
match stake_state { match stake_state {
StakeState::Stake( StakeState::Stake(
@ -1508,9 +1514,12 @@ pub fn build_stake_state(
stake, stake,
) => { ) => {
let current_epoch = clock.epoch; let current_epoch = clock.epoch;
let (active_stake, activating_stake, deactivating_stake) = stake let (active_stake, activating_stake, deactivating_stake) =
.delegation stake.delegation.stake_activating_and_deactivating(
.stake_activating_and_deactivating(current_epoch, Some(stake_history)); current_epoch,
Some(stake_history),
stake_program_v2_enabled,
);
let lockup = if lockup.is_in_force(clock, None) { let lockup = if lockup.is_in_force(clock, None) {
Some(lockup.into()) Some(lockup.into())
} else { } else {
@ -1605,10 +1614,26 @@ pub(crate) fn fetch_epoch_rewards(
.get(0) .get(0)
.ok_or_else(|| format!("Unable to fetch first confirmed block for epoch {}", epoch))?; .ok_or_else(|| format!("Unable to fetch first confirmed block for epoch {}", epoch))?;
let first_confirmed_block = rpc_client.get_confirmed_block_with_encoding( let first_confirmed_block = match rpc_client.get_confirmed_block_with_encoding(
first_confirmed_block_in_epoch, first_confirmed_block_in_epoch,
solana_transaction_status::UiTransactionEncoding::Base64, solana_transaction_status::UiTransactionEncoding::Base64,
)?; ) {
Ok(first_confirmed_block) => first_confirmed_block,
Err(ClientError {
kind:
ClientErrorKind::RpcError(rpc_request::RpcError::RpcResponseError {
code: rpc_custom_error::JSON_RPC_SERVER_ERROR_BLOCK_NOT_AVAILABLE,
..
}),
..
}) => {
// RPC node doesn't have this block
break;
}
Err(err) => {
return Err(err.into());
}
};
let epoch_start_time = if let Some(block_time) = first_confirmed_block.block_time { let epoch_start_time = if let Some(block_time) = first_confirmed_block.block_time {
block_time block_time
@ -1620,13 +1645,13 @@ pub(crate) fn fetch_epoch_rewards(
let previous_epoch_rewards = first_confirmed_block.rewards; let previous_epoch_rewards = first_confirmed_block.rewards;
if let Some((effective_slot, epoch_end_time, epoch_rewards)) = epoch_info { if let Some((effective_slot, epoch_end_time, epoch_rewards)) = epoch_info {
let wall_clock_epoch_duration = let wallclock_epoch_duration =
{ Local.timestamp(epoch_end_time, 0) - Local.timestamp(epoch_start_time, 0) } { Local.timestamp(epoch_end_time, 0) - Local.timestamp(epoch_start_time, 0) }
.to_std()? .to_std()?
.as_secs_f64(); .as_secs_f64();
const SECONDS_PER_YEAR: f64 = (24 * 60 * 60 * 356) as f64; let wallclock_epochs_per_year =
let percent_of_year = SECONDS_PER_YEAR / wall_clock_epoch_duration; (SECONDS_PER_DAY * 356) as f64 / wallclock_epoch_duration;
if let Some(reward) = epoch_rewards if let Some(reward) = epoch_rewards
.into_iter() .into_iter()
@ -1642,7 +1667,7 @@ pub(crate) fn fetch_epoch_rewards(
amount: reward.lamports.abs() as u64, amount: reward.lamports.abs() as u64,
post_balance: reward.post_balance, post_balance: reward.post_balance,
percent_change: balance_increase_percent, percent_change: balance_increase_percent,
apr: balance_increase_percent * percent_of_year, apr: balance_increase_percent * wallclock_epochs_per_year,
}); });
} }
} }
@ -1676,12 +1701,11 @@ pub fn process_show_stake_account(
match stake_account.state() { match stake_account.state() {
Ok(stake_state) => { Ok(stake_state) => {
let stake_history_account = rpc_client.get_account(&stake_history::id())?; let stake_history_account = rpc_client.get_account(&stake_history::id())?;
let stake_history = let stake_history = from_account(&stake_history_account).ok_or_else(|| {
StakeHistory::from_account(&stake_history_account).ok_or_else(|| { CliError::RpcRequestError("Failed to deserialize stake history".to_string())
CliError::RpcRequestError("Failed to deserialize stake history".to_string()) })?;
})?;
let clock_account = rpc_client.get_account(&clock::id())?; let clock_account = rpc_client.get_account(&clock::id())?;
let clock: Clock = Sysvar::from_account(&clock_account).ok_or_else(|| { let clock: Clock = from_account(&clock_account).ok_or_else(|| {
CliError::RpcRequestError("Failed to deserialize clock sysvar".to_string()) CliError::RpcRequestError("Failed to deserialize clock sysvar".to_string())
})?; })?;
@ -1691,6 +1715,7 @@ pub fn process_show_stake_account(
use_lamports_unit, use_lamports_unit,
&stake_history, &stake_history,
&clock, &clock,
is_stake_program_v2_enabled(rpc_client), // At v1.6, this check can be removed and simply passed as `true`
); );
if state.stake_type == CliStakeType::Stake { if state.stake_type == CliStakeType::Stake {
@ -1718,7 +1743,7 @@ pub fn process_show_stake_history(
use_lamports_unit: bool, use_lamports_unit: bool,
) -> ProcessResult { ) -> ProcessResult {
let stake_history_account = rpc_client.get_account(&stake_history::id())?; let stake_history_account = rpc_client.get_account(&stake_history::id())?;
let stake_history = StakeHistory::from_account(&stake_history_account).ok_or_else(|| { let stake_history = from_account::<StakeHistory>(&stake_history_account).ok_or_else(|| {
CliError::RpcRequestError("Failed to deserialize stake history".to_string()) CliError::RpcRequestError("Failed to deserialize stake history".to_string())
})?; })?;
@ -1861,6 +1886,15 @@ pub fn process_delegate_stake(
} }
} }
pub fn is_stake_program_v2_enabled(rpc_client: &RpcClient) -> bool {
rpc_client
.get_account(&feature_set::stake_program_v2::id())
.ok()
.and_then(|account| feature::from_account(&account))
.and_then(|feature| feature.activated_at)
.is_some()
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
@ -2406,9 +2440,9 @@ mod tests {
); );
// Test CreateStakeAccount SubCommand // Test CreateStakeAccount SubCommand
let custodian = Pubkey::new_rand(); let custodian = solana_sdk::pubkey::new_rand();
let custodian_string = format!("{}", custodian); let custodian_string = format!("{}", custodian);
let authorized = Pubkey::new_rand(); let authorized = solana_sdk::pubkey::new_rand();
let authorized_string = format!("{}", authorized); let authorized_string = format!("{}", authorized);
let test_create_stake_account = test_commands.clone().get_matches_from(vec![ let test_create_stake_account = test_commands.clone().get_matches_from(vec![
"test", "test",
@ -2546,7 +2580,7 @@ mod tests {
); );
// Test DelegateStake Subcommand // Test DelegateStake Subcommand
let vote_account_pubkey = Pubkey::new_rand(); let vote_account_pubkey = solana_sdk::pubkey::new_rand();
let vote_account_string = vote_account_pubkey.to_string(); let vote_account_string = vote_account_pubkey.to_string();
let test_delegate_stake = test_commands.clone().get_matches_from(vec![ let test_delegate_stake = test_commands.clone().get_matches_from(vec![
"test", "test",
@ -2573,7 +2607,7 @@ mod tests {
); );
// Test DelegateStake Subcommand w/ authority // Test DelegateStake Subcommand w/ authority
let vote_account_pubkey = Pubkey::new_rand(); let vote_account_pubkey = solana_sdk::pubkey::new_rand();
let vote_account_string = vote_account_pubkey.to_string(); let vote_account_string = vote_account_pubkey.to_string();
let test_delegate_stake = test_commands.clone().get_matches_from(vec![ let test_delegate_stake = test_commands.clone().get_matches_from(vec![
"test", "test",
@ -2692,7 +2726,7 @@ mod tests {
); );
// Test Delegate Subcommand w/ absent fee payer // Test Delegate Subcommand w/ absent fee payer
let key1 = Pubkey::new_rand(); let key1 = solana_sdk::pubkey::new_rand();
let sig1 = Keypair::new().sign_message(&[0u8]); let sig1 = Keypair::new().sign_message(&[0u8]);
let signer1 = format!("{}={}", key1, sig1); let signer1 = format!("{}={}", key1, sig1);
let test_delegate_stake = test_commands.clone().get_matches_from(vec![ let test_delegate_stake = test_commands.clone().get_matches_from(vec![
@ -2732,7 +2766,7 @@ mod tests {
); );
// Test Delegate Subcommand w/ absent fee payer and absent nonce authority // Test Delegate Subcommand w/ absent fee payer and absent nonce authority
let key2 = Pubkey::new_rand(); let key2 = solana_sdk::pubkey::new_rand();
let sig2 = Keypair::new().sign_message(&[0u8]); let sig2 = Keypair::new().sign_message(&[0u8]);
let signer2 = format!("{}={}", key2, sig2); let signer2 = format!("{}={}", key2, sig2);
let test_delegate_stake = test_commands.clone().get_matches_from(vec![ let test_delegate_stake = test_commands.clone().get_matches_from(vec![
@ -3060,7 +3094,7 @@ mod tests {
); );
// Test Deactivate Subcommand w/ absent fee payer // Test Deactivate Subcommand w/ absent fee payer
let key1 = Pubkey::new_rand(); let key1 = solana_sdk::pubkey::new_rand();
let sig1 = Keypair::new().sign_message(&[0u8]); let sig1 = Keypair::new().sign_message(&[0u8]);
let signer1 = format!("{}={}", key1, sig1); let signer1 = format!("{}={}", key1, sig1);
let test_deactivate_stake = test_commands.clone().get_matches_from(vec![ let test_deactivate_stake = test_commands.clone().get_matches_from(vec![
@ -3097,7 +3131,7 @@ mod tests {
); );
// Test Deactivate Subcommand w/ absent fee payer and nonce authority // Test Deactivate Subcommand w/ absent fee payer and nonce authority
let key2 = Pubkey::new_rand(); let key2 = solana_sdk::pubkey::new_rand();
let sig2 = Keypair::new().sign_message(&[0u8]); let sig2 = Keypair::new().sign_message(&[0u8]);
let signer2 = format!("{}={}", key2, sig2); let signer2 = format!("{}={}", key2, sig2);
let test_deactivate_stake = test_commands.clone().get_matches_from(vec![ let test_deactivate_stake = test_commands.clone().get_matches_from(vec![
@ -3276,7 +3310,7 @@ mod tests {
let stake_account_keypair = Keypair::new(); let stake_account_keypair = Keypair::new();
write_keypair(&stake_account_keypair, tmp_file.as_file_mut()).unwrap(); write_keypair(&stake_account_keypair, tmp_file.as_file_mut()).unwrap();
let source_stake_account_pubkey = Pubkey::new_rand(); let source_stake_account_pubkey = solana_sdk::pubkey::new_rand();
let test_merge_stake_account = test_commands.clone().get_matches_from(vec![ let test_merge_stake_account = test_commands.clone().get_matches_from(vec![
"test", "test",
"merge-stake", "merge-stake",

View File

@ -486,7 +486,7 @@ mod tests {
#[test] #[test]
fn test_parse_validator_info() { fn test_parse_validator_info() {
let pubkey = Pubkey::new_rand(); let pubkey = solana_sdk::pubkey::new_rand();
let keys = vec![(validator_info::id(), false), (pubkey, true)]; let keys = vec![(validator_info::id(), false), (pubkey, true)];
let config = ConfigKeys { keys }; let config = ConfigKeys { keys };

View File

@ -915,7 +915,7 @@ mod tests {
); );
// test init with an authed voter // test init with an authed voter
let authed = Pubkey::new_rand(); let authed = solana_sdk::pubkey::new_rand();
let (keypair_file, mut tmp_file) = make_tmp_file(); let (keypair_file, mut tmp_file) = make_tmp_file();
let keypair = Keypair::new(); let keypair = Keypair::new();
write_keypair(&keypair, tmp_file.as_file_mut()).unwrap(); write_keypair(&keypair, tmp_file.as_file_mut()).unwrap();

View File

@ -55,7 +55,7 @@ fn test_cli_deploy_program() {
faucet_host: None, faucet_host: None,
faucet_port: faucet_addr.port(), faucet_port: faucet_addr.port(),
pubkey: None, pubkey: None,
lamports: 3 * minimum_balance_for_rent_exemption, // min balance for rent exemption for two programs + leftover for tx processing lamports: 4 * minimum_balance_for_rent_exemption, // min balance for rent exemption for three programs + leftover for tx processing
}; };
config.signers = vec![&keypair]; config.signers = vec![&keypair];
process_command(&config).unwrap(); process_command(&config).unwrap();
@ -64,6 +64,7 @@ fn test_cli_deploy_program() {
program_location: pathbuf.to_str().unwrap().to_string(), program_location: pathbuf.to_str().unwrap().to_string(),
address: None, address: None,
use_deprecated_loader: false, use_deprecated_loader: false,
allow_excessive_balance: false,
}; };
let response = process_command(&config); let response = process_command(&config);
@ -98,6 +99,7 @@ fn test_cli_deploy_program() {
program_location: pathbuf.to_str().unwrap().to_string(), program_location: pathbuf.to_str().unwrap().to_string(),
address: Some(1), address: Some(1),
use_deprecated_loader: false, use_deprecated_loader: false,
allow_excessive_balance: false,
}; };
process_command(&config).unwrap(); process_command(&config).unwrap();
let account1 = rpc_client let account1 = rpc_client
@ -113,6 +115,44 @@ fn test_cli_deploy_program() {
// Attempt to redeploy to the same address // Attempt to redeploy to the same address
process_command(&config).unwrap_err(); process_command(&config).unwrap_err();
// Attempt to deploy to account with excess balance
let custom_address_keypair = Keypair::new();
config.command = CliCommand::Airdrop {
faucet_host: None,
faucet_port: faucet_addr.port(),
pubkey: None,
lamports: 2 * minimum_balance_for_rent_exemption, // Anything over minimum_balance_for_rent_exemption should trigger err
};
config.signers = vec![&custom_address_keypair];
process_command(&config).unwrap();
config.signers = vec![&keypair, &custom_address_keypair];
config.command = CliCommand::Deploy {
program_location: pathbuf.to_str().unwrap().to_string(),
address: Some(1),
use_deprecated_loader: false,
allow_excessive_balance: false,
};
process_command(&config).unwrap_err();
// Use forcing parameter to deploy to account with excess balance
config.command = CliCommand::Deploy {
program_location: pathbuf.to_str().unwrap().to_string(),
address: Some(1),
use_deprecated_loader: false,
allow_excessive_balance: true,
};
process_command(&config).unwrap();
let account2 = rpc_client
.get_account_with_commitment(&custom_address_keypair.pubkey(), CommitmentConfig::recent())
.unwrap()
.value
.unwrap();
assert_eq!(account2.lamports, 2 * minimum_balance_for_rent_exemption);
assert_eq!(account2.owner, bpf_loader::id());
assert_eq!(account2.executable, true);
assert_eq!(account0.data, account2.data);
server.close().unwrap(); server.close().unwrap();
remove_dir_all(ledger_path).unwrap(); remove_dir_all(ledger_path).unwrap();
} }

View File

@ -172,7 +172,7 @@ fn full_battery_tests(
assert_ne!(first_nonce, third_nonce); assert_ne!(first_nonce, third_nonce);
// Withdraw from nonce account // Withdraw from nonce account
let payee_pubkey = Pubkey::new_rand(); let payee_pubkey = solana_sdk::pubkey::new_rand();
config_payer.signers = authorized_signers; config_payer.signers = authorized_signers;
config_payer.command = CliCommand::WithdrawFromNonceAccount { config_payer.command = CliCommand::WithdrawFromNonceAccount {
nonce_account, nonce_account,

View File

@ -12,7 +12,6 @@ use solana_faucet::faucet::run_local_faucet;
use solana_sdk::{ use solana_sdk::{
account_utils::StateMut, account_utils::StateMut,
commitment_config::CommitmentConfig, commitment_config::CommitmentConfig,
pubkey::Pubkey,
signature::{Keypair, Signer}, signature::{Keypair, Signer},
}; };
use solana_vote_program::vote_state::{VoteAuthorize, VoteState, VoteStateVersions}; use solana_vote_program::vote_state::{VoteAuthorize, VoteState, VoteStateVersions};
@ -110,7 +109,7 @@ fn test_vote_authorize_and_withdraw() {
assert_eq!(authorized_withdrawer, withdraw_authority.pubkey()); assert_eq!(authorized_withdrawer, withdraw_authority.pubkey());
// Withdraw from vote account // Withdraw from vote account
let destination_account = Pubkey::new_rand(); // Send withdrawal to new account to make balance check easy let destination_account = solana_sdk::pubkey::new_rand(); // Send withdrawal to new account to make balance check easy
config.signers = vec![&default_signer, &withdraw_authority]; config.signers = vec![&default_signer, &withdraw_authority];
config.command = CliCommand::WithdrawFromVoteAccount { config.command = CliCommand::WithdrawFromVoteAccount {
vote_account_pubkey, vote_account_pubkey,

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-client" name = "solana-client"
version = "1.4.0" version = "1.4.7"
description = "Solana Client" description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.foundation>"] authors = ["Solana Maintainers <maintainers@solana.foundation>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -17,16 +17,18 @@ indicatif = "0.15.0"
jsonrpc-core = "15.0.0" jsonrpc-core = "15.0.0"
log = "0.4.8" log = "0.4.8"
rayon = "1.4.0" rayon = "1.4.0"
reqwest = { version = "0.10.6", default-features = false, features = ["blocking", "rustls-tls", "json"] } reqwest = { version = "0.10.8", default-features = false, features = ["blocking", "rustls-tls", "json"] }
semver = "0.11.0"
serde = "1.0.112" serde = "1.0.112"
serde_derive = "1.0.103" serde_derive = "1.0.103"
serde_json = "1.0.56" serde_json = "1.0.56"
solana-account-decoder = { path = "../account-decoder", version = "1.4.0" } solana-account-decoder = { path = "../account-decoder", version = "1.4.7" }
solana-clap-utils = { path = "../clap-utils", version = "1.4.0" } solana-clap-utils = { path = "../clap-utils", version = "1.4.7" }
solana-net-utils = { path = "../net-utils", version = "1.4.0" } solana-net-utils = { path = "../net-utils", version = "1.4.7" }
solana-sdk = { path = "../sdk", version = "1.4.0" } solana-sdk = { path = "../sdk", version = "1.4.7" }
solana-transaction-status = { path = "../transaction-status", version = "1.4.0" } solana-transaction-status = { path = "../transaction-status", version = "1.4.7" }
solana-vote-program = { path = "../programs/vote", version = "1.4.0" } solana-version = { path = "../version", version = "1.4.7" }
solana-vote-program = { path = "../programs/vote", version = "1.4.7" }
thiserror = "1.0" thiserror = "1.0"
tungstenite = "0.10.1" tungstenite = "0.10.1"
url = "2.1.1" url = "2.1.1"
@ -35,7 +37,7 @@ url = "2.1.1"
assert_matches = "1.3.0" assert_matches = "1.3.0"
jsonrpc-core = "15.0.0" jsonrpc-core = "15.0.0"
jsonrpc-http-server = "15.0.0" jsonrpc-http-server = "15.0.0"
solana-logger = { path = "../logger", version = "1.4.0" } solana-logger = { path = "../logger", version = "1.4.7" }
[package.metadata.docs.rs] [package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"] targets = ["x86_64-unknown-linux-gnu"]

View File

@ -50,10 +50,10 @@ impl Into<TransportError> for ClientErrorKind {
#[derive(Error, Debug)] #[derive(Error, Debug)]
#[error("{kind}")] #[error("{kind}")]
pub struct ClientError { pub struct ClientError {
request: Option<rpc_request::RpcRequest>, pub request: Option<rpc_request::RpcRequest>,
#[source] #[source]
kind: ClientErrorKind, pub kind: ClientErrorKind,
} }
impl ClientError { impl ClientError {

View File

@ -1,6 +1,8 @@
use crate::{ use crate::{
client_error::Result, client_error::Result,
rpc_request::{RpcError, RpcRequest}, rpc_custom_error,
rpc_request::{RpcError, RpcRequest, RpcResponseErrorData},
rpc_response::RpcSimulateTransactionResult,
rpc_sender::RpcSender, rpc_sender::RpcSender,
}; };
use log::*; use log::*;
@ -27,6 +29,13 @@ impl HttpSender {
} }
} }
#[derive(Deserialize, Debug)]
struct RpcErrorObject {
code: i64,
message: String,
data: serde_json::Value,
}
impl RpcSender for HttpSender { impl RpcSender for HttpSender {
fn send(&self, request: RpcRequest, params: serde_json::Value) -> Result<serde_json::Value> { fn send(&self, request: RpcRequest, params: serde_json::Value) -> Result<serde_json::Value> {
// Concurrent requests are not supported so reuse the same request id for all requests // Concurrent requests are not supported so reuse the same request id for all requests
@ -63,11 +72,36 @@ impl RpcSender for HttpSender {
let json: serde_json::Value = serde_json::from_str(&response.text()?)?; let json: serde_json::Value = serde_json::from_str(&response.text()?)?;
if json["error"].is_object() { if json["error"].is_object() {
return Err(RpcError::RpcRequestError(format!( return match serde_json::from_value::<RpcErrorObject>(json["error"].clone())
"RPC Error response: {}", {
serde_json::to_string(&json["error"]).unwrap() Ok(rpc_error_object) => {
)) let data = match rpc_error_object.code {
.into()); rpc_custom_error::JSON_RPC_SERVER_ERROR_SEND_TRANSACTION_PREFLIGHT_FAILURE => {
match serde_json::from_value::<RpcSimulateTransactionResult>(json["error"]["data"].clone()) {
Ok(data) => RpcResponseErrorData::SendTransactionPreflightFailure(data),
Err(err) => {
debug!("Failed to deserialize RpcSimulateTransactionResult: {:?}", err);
RpcResponseErrorData::Empty
}
}
},
_ => RpcResponseErrorData::Empty
};
Err(RpcError::RpcResponseError {
code: rpc_error_object.code,
message: rpc_error_object.message,
data,
}
.into())
}
Err(err) => Err(RpcError::RpcRequestError(format!(
"Failed to deserialize RPC error response: {} [{}]",
serde_json::to_string(&json["error"]).unwrap(),
err
))
.into()),
};
} }
return Ok(json["result"].clone()); return Ok(json["result"].clone());
} }

View File

@ -10,6 +10,7 @@ pub mod perf_utils;
pub mod pubsub_client; pub mod pubsub_client;
pub mod rpc_client; pub mod rpc_client;
pub mod rpc_config; pub mod rpc_config;
pub mod rpc_custom_error;
pub mod rpc_filter; pub mod rpc_filter;
pub mod rpc_request; pub mod rpc_request;
pub mod rpc_response; pub mod rpc_response;

View File

@ -1,17 +1,19 @@
use crate::{ use crate::{
client_error::Result, client_error::Result,
rpc_request::RpcRequest, rpc_request::RpcRequest,
rpc_response::{Response, RpcResponseContext}, rpc_response::{Response, RpcResponseContext, RpcVersionInfo},
rpc_sender::RpcSender, rpc_sender::RpcSender,
}; };
use serde_json::{Number, Value}; use serde_json::{json, Number, Value};
use solana_sdk::{ use solana_sdk::{
epoch_info::EpochInfo,
fee_calculator::{FeeCalculator, FeeRateGovernor}, fee_calculator::{FeeCalculator, FeeRateGovernor},
instruction::InstructionError, instruction::InstructionError,
signature::Signature, signature::Signature,
transaction::{self, Transaction, TransactionError}, transaction::{self, Transaction, TransactionError},
}; };
use solana_transaction_status::TransactionStatus; use solana_transaction_status::TransactionStatus;
use solana_version::Version;
use std::{collections::HashMap, sync::RwLock}; use std::{collections::HashMap, sync::RwLock};
pub const PUBKEY: &str = "7RoSF9fUmdphVCpabEoefH81WwrW7orsWonXWqTXkKV8"; pub const PUBKEY: &str = "7RoSF9fUmdphVCpabEoefH81WwrW7orsWonXWqTXkKV8";
@ -57,6 +59,13 @@ impl RpcSender for MockSender {
serde_json::to_value(FeeCalculator::default()).unwrap(), serde_json::to_value(FeeCalculator::default()).unwrap(),
), ),
})?, })?,
RpcRequest::GetEpochInfo => serde_json::to_value(EpochInfo {
epoch: 1,
slot_index: 2,
slots_in_epoch: 32,
absolute_slot: 34,
block_height: 34,
})?,
RpcRequest::GetFeeCalculatorForBlockhash => { RpcRequest::GetFeeCalculatorForBlockhash => {
let value = if self.url == "blockhash_expired" { let value = if self.url == "blockhash_expired" {
Value::Null Value::Null
@ -112,13 +121,20 @@ impl RpcSender for MockSender {
Signature::new(&[8; 64]).to_string() Signature::new(&[8; 64]).to_string()
} else { } else {
let tx_str = params.as_array().unwrap()[0].as_str().unwrap().to_string(); let tx_str = params.as_array().unwrap()[0].as_str().unwrap().to_string();
let data = bs58::decode(tx_str).into_vec().unwrap(); let data = base64::decode(tx_str).unwrap();
let tx: Transaction = bincode::deserialize(&data).unwrap(); let tx: Transaction = bincode::deserialize(&data).unwrap();
tx.signatures[0].to_string() tx.signatures[0].to_string()
}; };
Value::String(signature) Value::String(signature)
} }
RpcRequest::GetMinimumBalanceForRentExemption => Value::Number(Number::from(20)), RpcRequest::GetMinimumBalanceForRentExemption => Value::Number(Number::from(20)),
RpcRequest::GetVersion => {
let version = Version::default();
json!(RpcVersionInfo {
solana_core: version.to_string(),
feature_set: Some(version.feature_set),
})
}
_ => Value::Null, _ => Value::Null,
}; };
Ok(val) Ok(val)

View File

@ -8,7 +8,7 @@ use crate::{
RpcProgramAccountsConfig, RpcSendTransactionConfig, RpcSimulateTransactionConfig, RpcProgramAccountsConfig, RpcSendTransactionConfig, RpcSimulateTransactionConfig,
RpcTokenAccountsFilter, RpcTokenAccountsFilter,
}, },
rpc_request::{RpcError, RpcRequest, TokenAccountsFilter}, rpc_request::{RpcError, RpcRequest, RpcResponseErrorData, TokenAccountsFilter},
rpc_response::*, rpc_response::*,
rpc_sender::RpcSender, rpc_sender::RpcSender,
}; };
@ -41,12 +41,14 @@ use solana_transaction_status::{
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY; use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
use std::{ use std::{
net::SocketAddr, net::SocketAddr,
sync::RwLock,
thread::sleep, thread::sleep,
time::{Duration, Instant}, time::{Duration, Instant},
}; };
pub struct RpcClient { pub struct RpcClient {
sender: Box<dyn RpcSender + Send + Sync + 'static>, sender: Box<dyn RpcSender + Send + Sync + 'static>,
default_cluster_transaction_encoding: RwLock<Option<UiTransactionEncoding>>,
} }
fn serialize_encode_transaction( fn serialize_encode_transaction(
@ -73,6 +75,7 @@ impl RpcClient {
pub fn new_sender<T: RpcSender + Send + Sync + 'static>(sender: T) -> Self { pub fn new_sender<T: RpcSender + Send + Sync + 'static>(sender: T) -> Self {
Self { Self {
sender: Box::new(sender), sender: Box::new(sender),
default_cluster_transaction_encoding: RwLock::new(None),
} }
} }
@ -128,17 +131,73 @@ impl RpcClient {
self.send_transaction_with_config(transaction, RpcSendTransactionConfig::default()) self.send_transaction_with_config(transaction, RpcSendTransactionConfig::default())
} }
fn default_cluster_transaction_encoding(&self) -> Result<UiTransactionEncoding, RpcError> {
let default_cluster_transaction_encoding =
self.default_cluster_transaction_encoding.read().unwrap();
if let Some(encoding) = *default_cluster_transaction_encoding {
Ok(encoding)
} else {
drop(default_cluster_transaction_encoding);
let cluster_version = self.get_version().map_err(|e| {
RpcError::RpcRequestError(format!("cluster version query failed: {}", e))
})?;
let cluster_version =
semver::Version::parse(&cluster_version.solana_core).map_err(|e| {
RpcError::RpcRequestError(format!("failed to parse cluster version: {}", e))
})?;
// Prefer base64 since 1.3.16
let encoding = if cluster_version < semver::Version::new(1, 3, 16) {
UiTransactionEncoding::Base58
} else {
UiTransactionEncoding::Base64
};
*self.default_cluster_transaction_encoding.write().unwrap() = Some(encoding);
Ok(encoding)
}
}
pub fn send_transaction_with_config( pub fn send_transaction_with_config(
&self, &self,
transaction: &Transaction, transaction: &Transaction,
config: RpcSendTransactionConfig, config: RpcSendTransactionConfig,
) -> ClientResult<Signature> { ) -> ClientResult<Signature> {
let encoding = config.encoding.unwrap_or(UiTransactionEncoding::Base58); let encoding = if let Some(encoding) = config.encoding {
encoding
} else {
self.default_cluster_transaction_encoding()?
};
let config = RpcSendTransactionConfig {
encoding: Some(encoding),
..config
};
let serialized_encoded = serialize_encode_transaction(transaction, encoding)?; let serialized_encoded = serialize_encode_transaction(transaction, encoding)?;
let signature_base58_str: String = self.send( let signature_base58_str: String = match self.send(
RpcRequest::SendTransaction, RpcRequest::SendTransaction,
json!([serialized_encoded, config]), json!([serialized_encoded, config]),
)?; ) {
Ok(signature_base58_str) => signature_base58_str,
Err(err) => {
if let ClientErrorKind::RpcError(RpcError::RpcResponseError {
code,
message,
data,
}) = &err.kind
{
debug!("{} {}", code, message);
if let RpcResponseErrorData::SendTransactionPreflightFailure(
RpcSimulateTransactionResult {
logs: Some(logs), ..
},
) = data
{
for (i, log) in logs.iter().enumerate() {
debug!("{:>3}: {}", i + 1, log);
}
}
}
return Err(err);
}
};
let signature = signature_base58_str let signature = signature_base58_str
.parse::<Signature>() .parse::<Signature>()
@ -161,26 +220,28 @@ impl RpcClient {
pub fn simulate_transaction( pub fn simulate_transaction(
&self, &self,
transaction: &Transaction, transaction: &Transaction,
sig_verify: bool,
) -> RpcResult<RpcSimulateTransactionResult> { ) -> RpcResult<RpcSimulateTransactionResult> {
self.simulate_transaction_with_config( self.simulate_transaction_with_config(transaction, RpcSimulateTransactionConfig::default())
transaction,
sig_verify,
RpcSimulateTransactionConfig::default(),
)
} }
pub fn simulate_transaction_with_config( pub fn simulate_transaction_with_config(
&self, &self,
transaction: &Transaction, transaction: &Transaction,
sig_verify: bool,
config: RpcSimulateTransactionConfig, config: RpcSimulateTransactionConfig,
) -> RpcResult<RpcSimulateTransactionResult> { ) -> RpcResult<RpcSimulateTransactionResult> {
let encoding = config.encoding.unwrap_or(UiTransactionEncoding::Base58); let encoding = if let Some(encoding) = config.encoding {
encoding
} else {
self.default_cluster_transaction_encoding()?
};
let config = RpcSimulateTransactionConfig {
encoding: Some(encoding),
..config
};
let serialized_encoded = serialize_encode_transaction(transaction, encoding)?; let serialized_encoded = serialize_encode_transaction(transaction, encoding)?;
self.send( self.send(
RpcRequest::SimulateTransaction, RpcRequest::SimulateTransaction,
json!([serialized_encoded, { "sigVerify": sig_verify }]), json!([serialized_encoded, config]),
) )
} }
@ -1418,7 +1479,7 @@ mod tests {
let rpc_client = RpcClient::new_mock("succeeds".to_string()); let rpc_client = RpcClient::new_mock("succeeds".to_string());
let key = Keypair::new(); let key = Keypair::new();
let to = Pubkey::new_rand(); let to = solana_sdk::pubkey::new_rand();
let blockhash = Hash::default(); let blockhash = Hash::default();
let tx = system_transaction::transfer(&key, &to, 50, blockhash); let tx = system_transaction::transfer(&key, &to, 50, blockhash);
@ -1471,7 +1532,7 @@ mod tests {
let rpc_client = RpcClient::new_mock("succeeds".to_string()); let rpc_client = RpcClient::new_mock("succeeds".to_string());
let key = Keypair::new(); let key = Keypair::new();
let to = Pubkey::new_rand(); let to = solana_sdk::pubkey::new_rand();
let blockhash = Hash::default(); let blockhash = Hash::default();
let tx = system_transaction::transfer(&key, &to, 50, blockhash); let tx = system_transaction::transfer(&key, &to, 50, blockhash);
let result = rpc_client.send_and_confirm_transaction(&tx); let result = rpc_client.send_and_confirm_transaction(&tx);

View File

@ -1,13 +1,15 @@
//! Implementation defined RPC server errors
use crate::rpc_response::RpcSimulateTransactionResult;
use jsonrpc_core::{Error, ErrorCode}; use jsonrpc_core::{Error, ErrorCode};
use solana_client::rpc_response::RpcSimulateTransactionResult;
use solana_sdk::clock::Slot; use solana_sdk::clock::Slot;
const JSON_RPC_SERVER_ERROR_1: i64 = -32001; pub const JSON_RPC_SERVER_ERROR_BLOCK_CLEANED_UP: i64 = -32001;
const JSON_RPC_SERVER_ERROR_2: i64 = -32002; pub const JSON_RPC_SERVER_ERROR_SEND_TRANSACTION_PREFLIGHT_FAILURE: i64 = -32002;
const JSON_RPC_SERVER_ERROR_3: i64 = -32003; pub const JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_VERIFICATION_FAILURE: i64 = -32003;
const JSON_RPC_SERVER_ERROR_4: i64 = -32004; pub const JSON_RPC_SERVER_ERROR_BLOCK_NOT_AVAILABLE: i64 = -32004;
const JSON_RPC_SERVER_ERROR_5: i64 = -32005; pub const JSON_RPC_SERVER_ERROR_NODE_UNHEALTHLY: i64 = -32005;
const JSON_RPC_SERVER_ERROR_6: i64 = -32006; pub const JSON_RPC_SERVER_ERROR_TRANSACTION_PRECOMPILE_VERIFICATION_FAILURE: i64 = -32006;
pub enum RpcCustomError { pub enum RpcCustomError {
BlockCleanedUp { BlockCleanedUp {
@ -33,7 +35,7 @@ impl From<RpcCustomError> for Error {
slot, slot,
first_available_block, first_available_block,
} => Self { } => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_1), code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_BLOCK_CLEANED_UP),
message: format!( message: format!(
"Block {} cleaned up, does not exist on node. First available block: {}", "Block {} cleaned up, does not exist on node. First available block: {}",
slot, first_available_block, slot, first_available_block,
@ -41,27 +43,33 @@ impl From<RpcCustomError> for Error {
data: None, data: None,
}, },
RpcCustomError::SendTransactionPreflightFailure { message, result } => Self { RpcCustomError::SendTransactionPreflightFailure { message, result } => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_2), code: ErrorCode::ServerError(
JSON_RPC_SERVER_ERROR_SEND_TRANSACTION_PREFLIGHT_FAILURE,
),
message, message,
data: Some(serde_json::json!(result)), data: Some(serde_json::json!(result)),
}, },
RpcCustomError::TransactionSignatureVerificationFailure => Self { RpcCustomError::TransactionSignatureVerificationFailure => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_3), code: ErrorCode::ServerError(
JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_VERIFICATION_FAILURE,
),
message: "Transaction signature verification failure".to_string(), message: "Transaction signature verification failure".to_string(),
data: None, data: None,
}, },
RpcCustomError::BlockNotAvailable { slot } => Self { RpcCustomError::BlockNotAvailable { slot } => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_4), code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_BLOCK_NOT_AVAILABLE),
message: format!("Block not available for slot {}", slot), message: format!("Block not available for slot {}", slot),
data: None, data: None,
}, },
RpcCustomError::RpcNodeUnhealthy => Self { RpcCustomError::RpcNodeUnhealthy => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_5), code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_NODE_UNHEALTHLY),
message: "RPC node is unhealthy".to_string(), message: "RPC node is unhealthy".to_string(),
data: None, data: None,
}, },
RpcCustomError::TransactionPrecompileVerificationFailure(e) => Self { RpcCustomError::TransactionPrecompileVerificationFailure(e) => Self {
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_6), code: ErrorCode::ServerError(
JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_VERIFICATION_FAILURE,
),
message: format!("Transaction precompile verification failure {:?}", e), message: format!("Transaction precompile verification failure {:?}", e),
data: None, data: None,
}, },

View File

@ -1,3 +1,4 @@
use crate::rpc_response::RpcSimulateTransactionResult;
use serde_json::{json, Value}; use serde_json::{json, Value};
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
use std::fmt; use std::fmt;
@ -138,10 +139,42 @@ impl RpcRequest {
} }
} }
#[derive(Debug)]
pub enum RpcResponseErrorData {
Empty,
SendTransactionPreflightFailure(RpcSimulateTransactionResult),
}
impl fmt::Display for RpcResponseErrorData {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
RpcResponseErrorData::SendTransactionPreflightFailure(
RpcSimulateTransactionResult {
logs: Some(logs), ..
},
) => {
if logs.is_empty() {
Ok(())
} else {
// Give the user a hint that there is more useful logging information available...
write!(f, "[{} log messages]", logs.len())
}
}
_ => Ok(()),
}
}
}
#[derive(Debug, Error)] #[derive(Debug, Error)]
pub enum RpcError { pub enum RpcError {
#[error("rpc request error: {0}")] #[error("RPC request error: {0}")]
RpcRequestError(String), RpcRequestError(String),
#[error("RPC response error {code}: {message} {data}")]
RpcResponseError {
code: i64,
message: String,
data: RpcResponseErrorData,
},
#[error("parse error: expected {0}")] #[error("parse error: expected {0}")]
ParseError(String), /* "expected" */ ParseError(String), /* "expected" */
// Anything in a `ForUser` needs to die. The caller should be // Anything in a `ForUser` needs to die. The caller should be
@ -226,7 +259,7 @@ mod tests {
// Test request with CommitmentConfig and params // Test request with CommitmentConfig and params
let test_request = RpcRequest::GetTokenAccountsByOwner; let test_request = RpcRequest::GetTokenAccountsByOwner;
let mint = Pubkey::new_rand(); let mint = solana_sdk::pubkey::new_rand();
let token_account_filter = RpcTokenAccountsFilter::Mint(mint.to_string()); let token_account_filter = RpcTokenAccountsFilter::Mint(mint.to_string());
let request = test_request let request = test_request
.build_request_json(1, json!([addr, token_account_filter, commitment_config])); .build_request_json(1, json!([addr, token_account_filter, commitment_config]));

View File

@ -1,7 +1,7 @@
[package] [package]
name = "solana-core" name = "solana-core"
description = "Blockchain, Rebuilt for Scale" description = "Blockchain, Rebuilt for Scale"
version = "1.4.0" version = "1.4.7"
documentation = "https://docs.rs/solana" documentation = "https://docs.rs/solana"
homepage = "https://solana.com/" homepage = "https://solana.com/"
readme = "../README.md" readme = "../README.md"
@ -34,53 +34,54 @@ jsonrpc-http-server = "15.0.0"
jsonrpc-pubsub = "15.0.0" jsonrpc-pubsub = "15.0.0"
jsonrpc-ws-server = "15.0.0" jsonrpc-ws-server = "15.0.0"
log = "0.4.8" log = "0.4.8"
lru = "0.6.0"
num_cpus = "1.13.0" num_cpus = "1.13.0"
num-traits = "0.2" num-traits = "0.2"
rand = "0.7.0" rand = "0.7.0"
rand_chacha = "0.2.2" rand_chacha = "0.2.2"
raptorq = "1.4.2" raptorq = "1.4.2"
rayon = "1.4.0" rayon = "1.4.1"
regex = "1.3.9" regex = "1.3.9"
serde = "1.0.112" serde = "1.0.112"
serde_derive = "1.0.103" serde_derive = "1.0.103"
serde_json = "1.0.56" serde_json = "1.0.56"
solana-account-decoder = { path = "../account-decoder", version = "1.4.0" } solana-account-decoder = { path = "../account-decoder", version = "1.4.7" }
solana-banks-server = { path = "../banks-server", version = "1.4.0" } solana-banks-server = { path = "../banks-server", version = "1.4.7" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.4.0" } solana-clap-utils = { path = "../clap-utils", version = "1.4.7" }
solana-clap-utils = { path = "../clap-utils", version = "1.4.0" } solana-client = { path = "../client", version = "1.4.7" }
solana-client = { path = "../client", version = "1.4.0" } solana-faucet = { path = "../faucet", version = "1.4.7" }
solana-faucet = { path = "../faucet", version = "1.4.0" } solana-frozen-abi = { path = "../frozen-abi", version = "1.4.7" }
solana-ledger = { path = "../ledger", version = "1.4.0" } solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "1.4.7" }
solana-logger = { path = "../logger", version = "1.4.0" } solana-ledger = { path = "../ledger", version = "1.4.7" }
solana-merkle-tree = { path = "../merkle-tree", version = "1.4.0" } solana-logger = { path = "../logger", version = "1.4.7" }
solana-metrics = { path = "../metrics", version = "1.4.0" } solana-merkle-tree = { path = "../merkle-tree", version = "1.4.7" }
solana-measure = { path = "../measure", version = "1.4.0" } solana-metrics = { path = "../metrics", version = "1.4.7" }
solana-net-utils = { path = "../net-utils", version = "1.4.0" } solana-measure = { path = "../measure", version = "1.4.7" }
solana-perf = { path = "../perf", version = "1.4.0" } solana-net-utils = { path = "../net-utils", version = "1.4.7" }
solana-runtime = { path = "../runtime", version = "1.4.0" } solana-perf = { path = "../perf", version = "1.4.7" }
solana-sdk = { path = "../sdk", version = "1.4.0" } solana-runtime = { path = "../runtime", version = "1.4.7" }
solana-sdk-macro-frozen-abi = { path = "../sdk/macro-frozen-abi", version = "1.4.0" } solana-sdk = { path = "../sdk", version = "1.4.7" }
solana-stake-program = { path = "../programs/stake", version = "1.4.0" } solana-stake-program = { path = "../programs/stake", version = "1.4.7" }
solana-storage-bigtable = { path = "../storage-bigtable", version = "1.4.0" } solana-storage-bigtable = { path = "../storage-bigtable", version = "1.4.7" }
solana-streamer = { path = "../streamer", version = "1.4.0" } solana-streamer = { path = "../streamer", version = "1.4.7" }
solana-sys-tuner = { path = "../sys-tuner", version = "1.4.0" } solana-sys-tuner = { path = "../sys-tuner", version = "1.4.7" }
solana-transaction-status = { path = "../transaction-status", version = "1.4.0" } solana-transaction-status = { path = "../transaction-status", version = "1.4.7" }
solana-version = { path = "../version", version = "1.4.0" } solana-version = { path = "../version", version = "1.4.7" }
solana-vote-program = { path = "../programs/vote", version = "1.4.0" } solana-vote-program = { path = "../programs/vote", version = "1.4.7" }
solana-vote-signer = { path = "../vote-signer", version = "1.4.0" } solana-vote-signer = { path = "../vote-signer", version = "1.4.7" }
spl-token-v2-0 = { package = "spl-token", version = "=2.0.6", features = ["skip-no-mangle"] } spl-token-v2-0 = { package = "spl-token", version = "=3.0.0", features = ["no-entrypoint"] }
tempfile = "3.1.0" tempfile = "3.1.0"
thiserror = "1.0" thiserror = "1.0"
tokio = { version = "0.2.22", features = ["full"] } tokio = { version = "0.2", features = ["full"] }
tokio_01 = { version = "0.1", package = "tokio" } tokio_01 = { version = "0.1", package = "tokio" }
tokio_fs_01 = { version = "0.1", package = "tokio-fs" } tokio_fs_01 = { version = "0.1", package = "tokio-fs" }
tokio_io_01 = { version = "0.1", package = "tokio-io" } tokio_io_01 = { version = "0.1", package = "tokio-io" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.4.0" } solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.4.7" }
trees = "0.2.1" trees = "0.2.1"
[dev-dependencies] [dev-dependencies]
matches = "0.1.6" matches = "0.1.6"
reqwest = { version = "0.10.6", default-features = false, features = ["blocking", "rustls-tls", "json"] } reqwest = { version = "0.10.8", default-features = false, features = ["blocking", "rustls-tls", "json"] }
serial_test = "0.4.0" serial_test = "0.4.0"
serial_test_derive = "0.4.0" serial_test_derive = "0.4.0"
systemstat = "0.1.5" systemstat = "0.1.5"
@ -94,6 +95,9 @@ name = "banking_stage"
[[bench]] [[bench]]
name = "blockstore" name = "blockstore"
[[bench]]
name = "crds"
[[bench]] [[bench]]
name = "crds_gossip_pull" name = "crds_gossip_pull"

View File

@ -20,7 +20,7 @@ use solana_runtime::bank::Bank;
use solana_sdk::genesis_config::GenesisConfig; use solana_sdk::genesis_config::GenesisConfig;
use solana_sdk::hash::Hash; use solana_sdk::hash::Hash;
use solana_sdk::message::Message; use solana_sdk::message::Message;
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey;
use solana_sdk::signature::Keypair; use solana_sdk::signature::Keypair;
use solana_sdk::signature::Signature; use solana_sdk::signature::Signature;
use solana_sdk::signature::Signer; use solana_sdk::signature::Signer;
@ -56,7 +56,7 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(100_000); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(100_000);
let bank = Arc::new(Bank::new(&genesis_config)); let bank = Arc::new(Bank::new(&genesis_config));
let ledger_path = get_tmp_ledger_path!(); let ledger_path = get_tmp_ledger_path!();
let my_pubkey = Pubkey::new_rand(); let my_pubkey = pubkey::new_rand();
{ {
let blockstore = Arc::new( let blockstore = Arc::new(
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"), Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
@ -94,15 +94,15 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
} }
fn make_accounts_txs(txes: usize, mint_keypair: &Keypair, hash: Hash) -> Vec<Transaction> { fn make_accounts_txs(txes: usize, mint_keypair: &Keypair, hash: Hash) -> Vec<Transaction> {
let to_pubkey = Pubkey::new_rand(); let to_pubkey = pubkey::new_rand();
let dummy = system_transaction::transfer(mint_keypair, &to_pubkey, 1, hash); let dummy = system_transaction::transfer(mint_keypair, &to_pubkey, 1, hash);
(0..txes) (0..txes)
.into_par_iter() .into_par_iter()
.map(|_| { .map(|_| {
let mut new = dummy.clone(); let mut new = dummy.clone();
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect(); let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
new.message.account_keys[0] = Pubkey::new_rand(); new.message.account_keys[0] = pubkey::new_rand();
new.message.account_keys[1] = Pubkey::new_rand(); new.message.account_keys[1] = pubkey::new_rand();
new.signatures = vec![Signature::new(&sig[0..64])]; new.signatures = vec![Signature::new(&sig[0..64])];
new new
}) })
@ -117,7 +117,7 @@ fn make_programs_txs(txes: usize, hash: Hash) -> Vec<Transaction> {
let mut instructions = vec![]; let mut instructions = vec![];
let from_key = Keypair::new(); let from_key = Keypair::new();
for _ in 1..progs { for _ in 1..progs {
let to_key = Pubkey::new_rand(); let to_key = pubkey::new_rand();
instructions.push(system_instruction::transfer(&from_key.pubkey(), &to_key, 1)); instructions.push(system_instruction::transfer(&from_key.pubkey(), &to_key, 1));
} }
let message = Message::new(&instructions, Some(&from_key.pubkey())); let message = Message::new(&instructions, Some(&from_key.pubkey()));

View File

@ -8,7 +8,7 @@ use solana_core::broadcast_stage::{broadcast_shreds, get_broadcast_peers};
use solana_core::cluster_info::{ClusterInfo, Node}; use solana_core::cluster_info::{ClusterInfo, Node};
use solana_core::contact_info::ContactInfo; use solana_core::contact_info::ContactInfo;
use solana_ledger::shred::Shred; use solana_ledger::shred::Shred;
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey;
use solana_sdk::timing::timestamp; use solana_sdk::timing::timestamp;
use std::{ use std::{
collections::HashMap, collections::HashMap,
@ -20,7 +20,7 @@ use test::Bencher;
#[bench] #[bench]
fn broadcast_shreds_bench(bencher: &mut Bencher) { fn broadcast_shreds_bench(bencher: &mut Bencher) {
solana_logger::setup(); solana_logger::setup();
let leader_pubkey = Pubkey::new_rand(); let leader_pubkey = pubkey::new_rand();
let leader_info = Node::new_localhost_with_pubkey(&leader_pubkey); let leader_info = Node::new_localhost_with_pubkey(&leader_pubkey);
let cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info); let cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info);
let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
@ -30,7 +30,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
let mut stakes = HashMap::new(); let mut stakes = HashMap::new();
const NUM_PEERS: usize = 200; const NUM_PEERS: usize = 200;
for _ in 0..NUM_PEERS { for _ in 0..NUM_PEERS {
let id = Pubkey::new_rand(); let id = pubkey::new_rand();
let contact_info = ContactInfo::new_localhost(&id, timestamp()); let contact_info = ContactInfo::new_localhost(&id, timestamp());
cluster_info.insert_info(contact_info); cluster_info.insert_info(contact_info);
stakes.insert(id, thread_rng().gen_range(1, NUM_PEERS) as u64); stakes.insert(id, thread_rng().gen_range(1, NUM_PEERS) as u64);

31
core/benches/crds.rs Normal file
View File

@ -0,0 +1,31 @@
#![feature(test)]
extern crate test;
use rand::{thread_rng, Rng};
use rayon::ThreadPoolBuilder;
use solana_core::crds::Crds;
use solana_core::crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
use solana_core::crds_value::CrdsValue;
use solana_sdk::pubkey::Pubkey;
use std::collections::HashMap;
use test::Bencher;
#[bench]
fn bench_find_old_labels(bencher: &mut Bencher) {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut rng = thread_rng();
let mut crds = Crds::default();
let now = CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS + CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS / 1000;
std::iter::repeat_with(|| (CrdsValue::new_rand(&mut rng), rng.gen_range(0, now)))
.take(50_000)
.for_each(|(v, ts)| assert!(crds.insert(v, ts).is_ok()));
let mut timeouts = HashMap::new();
timeouts.insert(Pubkey::default(), CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS);
bencher.iter(|| {
let out = crds.find_old_labels(&thread_pool, now, &timeouts);
assert!(out.len() > 10);
assert!(out.len() < 250);
out
});
}

View File

@ -8,13 +8,13 @@ use solana_core::cluster_info::MAX_BLOOM_SIZE;
use solana_core::crds::Crds; use solana_core::crds::Crds;
use solana_core::crds_gossip_pull::{CrdsFilter, CrdsGossipPull}; use solana_core::crds_gossip_pull::{CrdsFilter, CrdsGossipPull};
use solana_core::crds_value::CrdsValue; use solana_core::crds_value::CrdsValue;
use solana_sdk::hash::Hash; use solana_sdk::hash;
use test::Bencher; use test::Bencher;
#[bench] #[bench]
fn bench_hash_as_u64(bencher: &mut Bencher) { fn bench_hash_as_u64(bencher: &mut Bencher) {
let mut rng = thread_rng(); let mut rng = thread_rng();
let hashes: Vec<_> = std::iter::repeat_with(|| Hash::new_rand(&mut rng)) let hashes: Vec<_> = std::iter::repeat_with(|| hash::new_rand(&mut rng))
.take(1000) .take(1000)
.collect(); .collect();
bencher.iter(|| { bencher.iter(|| {
@ -34,7 +34,7 @@ fn bench_build_crds_filters(bencher: &mut Bencher) {
for _ in 0..50_000 { for _ in 0..50_000 {
crds_gossip_pull crds_gossip_pull
.purged_values .purged_values
.push_back((Hash::new_rand(&mut rng), rng.gen())); .push_back((solana_sdk::hash::new_rand(&mut rng), rng.gen()));
} }
let mut num_inserts = 0; let mut num_inserts = 0;
for _ in 0..90_000 { for _ in 0..90_000 {

View File

@ -7,14 +7,14 @@ use solana_core::contact_info::ContactInfo;
use solana_core::crds::VersionedCrdsValue; use solana_core::crds::VersionedCrdsValue;
use solana_core::crds_shards::CrdsShards; use solana_core::crds_shards::CrdsShards;
use solana_core::crds_value::{CrdsData, CrdsValue}; use solana_core::crds_value::{CrdsData, CrdsValue};
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey;
use solana_sdk::timing::timestamp; use solana_sdk::timing::timestamp;
use test::Bencher; use test::Bencher;
const CRDS_SHARDS_BITS: u32 = 8; const CRDS_SHARDS_BITS: u32 = 8;
fn new_test_crds_value() -> VersionedCrdsValue { fn new_test_crds_value() -> VersionedCrdsValue {
let data = CrdsData::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), timestamp())); let data = CrdsData::ContactInfo(ContactInfo::new_localhost(&pubkey::new_rand(), timestamp()));
VersionedCrdsValue::new(timestamp(), CrdsValue::new_unsigned(data)) VersionedCrdsValue::new(timestamp(), CrdsValue::new_unsigned(data))
} }

View File

@ -14,7 +14,7 @@ use solana_perf::packet::to_packets_chunked;
use solana_perf::test_tx::test_tx; use solana_perf::test_tx::test_tx;
use solana_runtime::bank::Bank; use solana_runtime::bank::Bank;
use solana_runtime::bank_forks::BankForks; use solana_runtime::bank_forks::BankForks;
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey;
use solana_sdk::timing::timestamp; use solana_sdk::timing::timestamp;
use std::net::UdpSocket; use std::net::UdpSocket;
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
@ -34,7 +34,7 @@ fn bench_retransmitter(bencher: &mut Bencher) {
const NUM_PEERS: usize = 4; const NUM_PEERS: usize = 4;
let mut peer_sockets = Vec::new(); let mut peer_sockets = Vec::new();
for _ in 0..NUM_PEERS { for _ in 0..NUM_PEERS {
let id = Pubkey::new_rand(); let id = pubkey::new_rand();
let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut contact_info = ContactInfo::new_localhost(&id, timestamp()); let mut contact_info = ContactInfo::new_localhost(&id, timestamp());
contact_info.tvu = socket.local_addr().unwrap(); contact_info.tvu = socket.local_addr().unwrap();

View File

@ -1 +1 @@
../sdk/build.rs ../frozen-abi/build.rs

View File

@ -204,6 +204,7 @@ mod tests {
{ {
let message = make_accounts_hashes_message(&validator1, vec![(0, hash1)]).unwrap(); let message = make_accounts_hashes_message(&validator1, vec![(0, hash1)]).unwrap();
cluster_info.push_message(message); cluster_info.push_message(message);
cluster_info.flush_push_queue();
} }
slot_to_hash.insert(0, hash2); slot_to_hash.insert(0, hash2);
trusted_validators.insert(validator1.pubkey()); trusted_validators.insert(validator1.pubkey());
@ -254,6 +255,7 @@ mod tests {
100, 100,
); );
} }
cluster_info.flush_push_queue();
let cluster_hashes = cluster_info let cluster_hashes = cluster_info
.get_accounts_hash_for_node(&keypair.pubkey(), |c| c.clone()) .get_accounts_hash_for_node(&keypair.pubkey(), |c| c.clone())
.unwrap(); .unwrap();

View File

@ -60,7 +60,7 @@ impl ForkChoice for BankWeightForkChoice {
trace!("frozen_banks {}", frozen_banks.len()); trace!("frozen_banks {}", frozen_banks.len());
let num_old_banks = frozen_banks let num_old_banks = frozen_banks
.iter() .iter()
.filter(|b| b.slot() < tower.root().unwrap_or(0)) .filter(|b| b.slot() < tower.root())
.count(); .count();
let last_voted_slot = tower.last_voted_slot(); let last_voted_slot = tower.last_voted_slot();

View File

@ -534,13 +534,14 @@ impl BankingStage {
mut loaded_accounts, mut loaded_accounts,
results, results,
inner_instructions, inner_instructions,
transaction_logs,
mut retryable_txs, mut retryable_txs,
tx_count, tx_count,
signature_count, signature_count,
) = bank.load_and_execute_transactions( ) = bank.load_and_execute_transactions(
batch, batch,
MAX_PROCESSING_AGE, MAX_PROCESSING_AGE,
None, transaction_status_sender.is_some(),
transaction_status_sender.is_some(), transaction_status_sender.is_some(),
); );
load_execute_time.stop(); load_execute_time.stop();
@ -580,6 +581,7 @@ impl BankingStage {
tx_results.processing_results, tx_results.processing_results,
TransactionBalancesSet::new(pre_balances, post_balances), TransactionBalancesSet::new(pre_balances, post_balances),
inner_instructions, inner_instructions,
transaction_logs,
sender, sender,
); );
} }
@ -1244,16 +1246,16 @@ mod tests {
bank.process_transaction(&fund_tx).unwrap(); bank.process_transaction(&fund_tx).unwrap();
// good tx // good tx
let to = Pubkey::new_rand(); let to = solana_sdk::pubkey::new_rand();
let tx = system_transaction::transfer(&mint_keypair, &to, 1, start_hash); let tx = system_transaction::transfer(&mint_keypair, &to, 1, start_hash);
// good tx, but no verify // good tx, but no verify
let to2 = Pubkey::new_rand(); let to2 = solana_sdk::pubkey::new_rand();
let tx_no_ver = system_transaction::transfer(&keypair, &to2, 2, start_hash); let tx_no_ver = system_transaction::transfer(&keypair, &to2, 2, start_hash);
// bad tx, AccountNotFound // bad tx, AccountNotFound
let keypair = Keypair::new(); let keypair = Keypair::new();
let to3 = Pubkey::new_rand(); let to3 = solana_sdk::pubkey::new_rand();
let tx_anf = system_transaction::transfer(&keypair, &to3, 1, start_hash); let tx_anf = system_transaction::transfer(&keypair, &to3, 1, start_hash);
// send 'em over // send 'em over
@ -1446,9 +1448,9 @@ mod tests {
let poh_recorder = Arc::new(Mutex::new(poh_recorder)); let poh_recorder = Arc::new(Mutex::new(poh_recorder));
poh_recorder.lock().unwrap().set_working_bank(working_bank); poh_recorder.lock().unwrap().set_working_bank(working_bank);
let pubkey = Pubkey::new_rand(); let pubkey = solana_sdk::pubkey::new_rand();
let keypair2 = Keypair::new(); let keypair2 = Keypair::new();
let pubkey2 = Pubkey::new_rand(); let pubkey2 = solana_sdk::pubkey::new_rand();
let transactions = vec![ let transactions = vec![
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()), system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()),
@ -1526,7 +1528,7 @@ mod tests {
mint_keypair, mint_keypair,
.. ..
} = create_genesis_config(10_000); } = create_genesis_config(10_000);
let pubkey = Pubkey::new_rand(); let pubkey = solana_sdk::pubkey::new_rand();
let transactions = vec![ let transactions = vec![
None, None,
@ -1607,7 +1609,7 @@ mod tests {
mint_keypair, mint_keypair,
.. ..
} = create_genesis_config(10_000); } = create_genesis_config(10_000);
let pubkey = Pubkey::new_rand(); let pubkey = solana_sdk::pubkey::new_rand();
let transactions = vec![ let transactions = vec![
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()), system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()),
@ -1678,8 +1680,8 @@ mod tests {
#[test] #[test]
fn test_should_process_or_forward_packets() { fn test_should_process_or_forward_packets() {
let my_pubkey = Pubkey::new_rand(); let my_pubkey = solana_sdk::pubkey::new_rand();
let my_pubkey1 = Pubkey::new_rand(); let my_pubkey1 = solana_sdk::pubkey::new_rand();
assert_eq!( assert_eq!(
BankingStage::consume_or_forward_packets(&my_pubkey, None, true, false,), BankingStage::consume_or_forward_packets(&my_pubkey, None, true, false,),
@ -1725,7 +1727,7 @@ mod tests {
.. ..
} = create_genesis_config(10_000); } = create_genesis_config(10_000);
let bank = Arc::new(Bank::new(&genesis_config)); let bank = Arc::new(Bank::new(&genesis_config));
let pubkey = Pubkey::new_rand(); let pubkey = solana_sdk::pubkey::new_rand();
let transactions = vec![system_transaction::transfer( let transactions = vec![system_transaction::transfer(
&mint_keypair, &mint_keypair,
@ -1822,8 +1824,8 @@ mod tests {
.. ..
} = create_genesis_config(10_000); } = create_genesis_config(10_000);
let bank = Arc::new(Bank::new(&genesis_config)); let bank = Arc::new(Bank::new(&genesis_config));
let pubkey = Pubkey::new_rand(); let pubkey = solana_sdk::pubkey::new_rand();
let pubkey1 = Pubkey::new_rand(); let pubkey1 = solana_sdk::pubkey::new_rand();
let transactions = vec![ let transactions = vec![
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()), system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()),
@ -1918,7 +1920,7 @@ mod tests {
} = create_genesis_config(10_000); } = create_genesis_config(10_000);
let bank = Arc::new(Bank::new(&genesis_config)); let bank = Arc::new(Bank::new(&genesis_config));
let pubkey = Pubkey::new_rand(); let pubkey = solana_sdk::pubkey::new_rand();
let transactions = let transactions =
vec![ vec![
@ -1936,7 +1938,7 @@ mod tests {
bank.slot(), bank.slot(),
Some((4, 4)), Some((4, 4)),
bank.ticks_per_slot(), bank.ticks_per_slot(),
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
&Arc::new(blockstore), &Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()), &Arc::new(PohConfig::default()),
@ -1976,8 +1978,8 @@ mod tests {
.. ..
} = create_genesis_config(10_000); } = create_genesis_config(10_000);
let bank = Arc::new(Bank::new(&genesis_config)); let bank = Arc::new(Bank::new(&genesis_config));
let pubkey = Pubkey::new_rand(); let pubkey = solana_sdk::pubkey::new_rand();
let pubkey1 = Pubkey::new_rand(); let pubkey1 = solana_sdk::pubkey::new_rand();
let keypair1 = Keypair::new(); let keypair1 = Keypair::new();
let success_tx = let success_tx =

View File

@ -140,13 +140,12 @@ impl BroadcastRun for BroadcastFakeShredsRun {
mod tests { mod tests {
use super::*; use super::*;
use crate::contact_info::ContactInfo; use crate::contact_info::ContactInfo;
use solana_sdk::pubkey::Pubkey;
use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::net::{IpAddr, Ipv4Addr, SocketAddr};
#[test] #[test]
fn test_tvu_peers_ordering() { fn test_tvu_peers_ordering() {
let cluster = ClusterInfo::new_with_invalid_keypair(ContactInfo::new_localhost( let cluster = ClusterInfo::new_with_invalid_keypair(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
0, 0,
)); ));
cluster.insert_info(ContactInfo::new_with_socketaddr(&SocketAddr::new( cluster.insert_info(ContactInfo::new_with_socketaddr(&SocketAddr::new(

View File

@ -92,7 +92,7 @@ mod tests {
let bank0 = Arc::new(Bank::new(&genesis_config)); let bank0 = Arc::new(Bank::new(&genesis_config));
let tx = system_transaction::transfer( let tx = system_transaction::transfer(
&mint_keypair, &mint_keypair,
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
1, 1,
genesis_config.hash(), genesis_config.hash(),
); );

View File

@ -2,7 +2,7 @@ use crossbeam_channel::{Receiver, RecvTimeoutError, Sender};
use solana_ledger::blockstore::Blockstore; use solana_ledger::blockstore::Blockstore;
use solana_measure::measure::Measure; use solana_measure::measure::Measure;
use solana_runtime::bank::Bank; use solana_runtime::bank::Bank;
use solana_sdk::timing::slot_duration_from_slots_per_year; use solana_sdk::{feature_set, timing::slot_duration_from_slots_per_year};
use std::{ use std::{
collections::HashMap, collections::HashMap,
sync::{ sync::{
@ -60,13 +60,24 @@ impl CacheBlockTimeService {
} }
fn cache_block_time(bank: Arc<Bank>, blockstore: &Arc<Blockstore>) { fn cache_block_time(bank: Arc<Bank>, blockstore: &Arc<Blockstore>) {
let slot_duration = slot_duration_from_slots_per_year(bank.slots_per_year()); if bank
let epoch = bank.epoch_schedule().get_epoch(bank.slot()); .feature_set
let stakes = HashMap::new(); .is_active(&feature_set::timestamp_correction::id())
let stakes = bank.epoch_vote_accounts(epoch).unwrap_or(&stakes); {
if let Err(e) = blockstore.cache_block_time(bank.slot(), bank.clock().unix_timestamp) {
error!("cache_block_time failed: slot {:?} {:?}", bank.slot(), e);
}
} else {
let slot_duration = slot_duration_from_slots_per_year(bank.slots_per_year());
let epoch = bank.epoch_schedule().get_epoch(bank.slot());
let stakes = HashMap::new();
let stakes = bank.epoch_vote_accounts(epoch).unwrap_or(&stakes);
if let Err(e) = blockstore.cache_block_time(bank.slot(), slot_duration, stakes) { if let Err(e) =
error!("cache_block_time failed: slot {:?} {:?}", bank.slot(), e); blockstore.cache_block_time_from_slot_entries(bank.slot(), slot_duration, stakes)
{
error!("cache_block_time failed: slot {:?} {:?}", bank.slot(), e);
}
} }
} }

File diff suppressed because it is too large Load Diff

View File

@ -27,7 +27,7 @@ use solana_runtime::{
vote_sender_types::{ReplayVoteReceiver, ReplayedVote}, vote_sender_types::{ReplayVoteReceiver, ReplayedVote},
}; };
use solana_sdk::{ use solana_sdk::{
clock::{Epoch, Slot}, clock::{Epoch, Slot, DEFAULT_MS_PER_SLOT},
epoch_schedule::EpochSchedule, epoch_schedule::EpochSchedule,
hash::Hash, hash::Hash,
pubkey::Pubkey, pubkey::Pubkey,
@ -98,7 +98,7 @@ impl VoteTracker {
epoch_schedule: *root_bank.epoch_schedule(), epoch_schedule: *root_bank.epoch_schedule(),
..VoteTracker::default() ..VoteTracker::default()
}; };
vote_tracker.process_new_root_bank(&root_bank); vote_tracker.progress_with_new_root_bank(&root_bank);
assert_eq!( assert_eq!(
*vote_tracker.leader_schedule_epoch.read().unwrap(), *vote_tracker.leader_schedule_epoch.read().unwrap(),
root_bank.get_leader_schedule_epoch(root_bank.slot()) root_bank.get_leader_schedule_epoch(root_bank.slot())
@ -174,7 +174,7 @@ impl VoteTracker {
self.keys.get_or_insert(&pubkey); self.keys.get_or_insert(&pubkey);
} }
fn update_leader_schedule_epoch(&self, root_bank: &Bank) { fn progress_leader_schedule_epoch(&self, root_bank: &Bank) {
// Update with any newly calculated epoch state about future epochs // Update with any newly calculated epoch state about future epochs
let start_leader_schedule_epoch = *self.leader_schedule_epoch.read().unwrap(); let start_leader_schedule_epoch = *self.leader_schedule_epoch.read().unwrap();
let mut greatest_leader_schedule_epoch = start_leader_schedule_epoch; let mut greatest_leader_schedule_epoch = start_leader_schedule_epoch;
@ -205,7 +205,7 @@ impl VoteTracker {
} }
} }
fn update_new_root(&self, root_bank: &Bank) { fn purge_stale_state(&self, root_bank: &Bank) {
// Purge any outdated slot data // Purge any outdated slot data
let new_root = root_bank.slot(); let new_root = root_bank.slot();
let root_epoch = root_bank.epoch(); let root_epoch = root_bank.epoch();
@ -220,15 +220,15 @@ impl VoteTracker {
self.epoch_authorized_voters self.epoch_authorized_voters
.write() .write()
.unwrap() .unwrap()
.retain(|epoch, _| epoch >= &root_epoch); .retain(|epoch, _| *epoch >= root_epoch);
self.keys.purge(); self.keys.purge();
*self.current_epoch.write().unwrap() = root_epoch; *self.current_epoch.write().unwrap() = root_epoch;
} }
} }
fn process_new_root_bank(&self, root_bank: &Bank) { fn progress_with_new_root_bank(&self, root_bank: &Bank) {
self.update_leader_schedule_epoch(root_bank); self.progress_leader_schedule_epoch(root_bank);
self.update_new_root(root_bank); self.purge_stale_state(root_bank);
} }
} }
@ -425,7 +425,7 @@ impl ClusterInfoVoteListener {
blockstore: Arc<Blockstore>, blockstore: Arc<Blockstore>,
bank_notification_sender: Option<BankNotificationSender>, bank_notification_sender: Option<BankNotificationSender>,
) -> Result<()> { ) -> Result<()> {
let mut optimistic_confirmation_verifier = let mut confirmation_verifier =
OptimisticConfirmationVerifier::new(bank_forks.read().unwrap().root()); OptimisticConfirmationVerifier::new(bank_forks.read().unwrap().root());
let mut last_process_root = Instant::now(); let mut last_process_root = Instant::now();
loop { loop {
@ -434,21 +434,21 @@ impl ClusterInfoVoteListener {
} }
let root_bank = bank_forks.read().unwrap().root_bank().clone(); let root_bank = bank_forks.read().unwrap().root_bank().clone();
if last_process_root.elapsed().as_millis() > 400 { if last_process_root.elapsed().as_millis() > DEFAULT_MS_PER_SLOT as u128 {
let unrooted_optimistic_slots = optimistic_confirmation_verifier let unrooted_optimistic_slots = confirmation_verifier
.get_unrooted_optimistic_slots(&root_bank, &blockstore); .verify_for_unrooted_optimistic_slots(&root_bank, &blockstore);
// SlotVoteTracker's for all `slots` in `unrooted_optimistic_slots` // SlotVoteTracker's for all `slots` in `unrooted_optimistic_slots`
// should still be available because we haven't purged in // should still be available because we haven't purged in
// `process_new_root_bank()` yet, which is called below // `progress_with_new_root_bank()` yet, which is called below
OptimisticConfirmationVerifier::log_unrooted_optimistic_slots( OptimisticConfirmationVerifier::log_unrooted_optimistic_slots(
&root_bank, &root_bank,
&vote_tracker, &vote_tracker,
&unrooted_optimistic_slots, &unrooted_optimistic_slots,
); );
vote_tracker.process_new_root_bank(&root_bank); vote_tracker.progress_with_new_root_bank(&root_bank);
last_process_root = Instant::now(); last_process_root = Instant::now();
} }
let optimistic_confirmed_slots = Self::get_and_process_votes( let confirmed_slots = Self::listen_and_confirm_votes(
&gossip_vote_txs_receiver, &gossip_vote_txs_receiver,
&vote_tracker, &vote_tracker,
&root_bank, &root_bank,
@ -457,19 +457,17 @@ impl ClusterInfoVoteListener {
&replay_votes_receiver, &replay_votes_receiver,
&bank_notification_sender, &bank_notification_sender,
); );
match confirmed_slots {
if let Err(e) = optimistic_confirmed_slots { Ok(confirmed_slots) => {
match e { confirmation_verifier.add_new_optimistic_confirmed_slots(confirmed_slots);
}
Err(e) => match e {
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Timeout) Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Timeout)
| Error::ReadyTimeoutError => (), | Error::ReadyTimeoutError => (),
_ => { _ => {
error!("thread {:?} error {:?}", thread::current().name(), e); error!("thread {:?} error {:?}", thread::current().name(), e);
} }
} },
} else {
let optimistic_confirmed_slots = optimistic_confirmed_slots.unwrap();
optimistic_confirmation_verifier
.add_new_optimistic_confirmed_slots(optimistic_confirmed_slots);
} }
} }
} }
@ -483,7 +481,7 @@ impl ClusterInfoVoteListener {
verified_vote_sender: &VerifiedVoteSender, verified_vote_sender: &VerifiedVoteSender,
replay_votes_receiver: &ReplayVoteReceiver, replay_votes_receiver: &ReplayVoteReceiver,
) -> Result<Vec<(Slot, Hash)>> { ) -> Result<Vec<(Slot, Hash)>> {
Self::get_and_process_votes( Self::listen_and_confirm_votes(
gossip_vote_txs_receiver, gossip_vote_txs_receiver,
vote_tracker, vote_tracker,
root_bank, root_bank,
@ -494,7 +492,7 @@ impl ClusterInfoVoteListener {
) )
} }
fn get_and_process_votes( fn listen_and_confirm_votes(
gossip_vote_txs_receiver: &VerifiedVoteTransactionsReceiver, gossip_vote_txs_receiver: &VerifiedVoteTransactionsReceiver,
vote_tracker: &VoteTracker, vote_tracker: &VoteTracker,
root_bank: &Bank, root_bank: &Bank,
@ -523,7 +521,7 @@ impl ClusterInfoVoteListener {
let gossip_vote_txs: Vec<_> = gossip_vote_txs_receiver.try_iter().flatten().collect(); let gossip_vote_txs: Vec<_> = gossip_vote_txs_receiver.try_iter().flatten().collect();
let replay_votes: Vec<_> = replay_votes_receiver.try_iter().collect(); let replay_votes: Vec<_> = replay_votes_receiver.try_iter().collect();
if !gossip_vote_txs.is_empty() || !replay_votes.is_empty() { if !gossip_vote_txs.is_empty() || !replay_votes.is_empty() {
return Ok(Self::process_votes( return Ok(Self::filter_and_confirm_with_new_votes(
vote_tracker, vote_tracker,
gossip_vote_txs, gossip_vote_txs,
replay_votes, replay_votes,
@ -541,7 +539,7 @@ impl ClusterInfoVoteListener {
} }
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
fn update_new_votes( fn track_new_votes_and_notify_confirmations(
vote: Vote, vote: Vote,
vote_pubkey: &Pubkey, vote_pubkey: &Pubkey,
vote_tracker: &VoteTracker, vote_tracker: &VoteTracker,
@ -557,56 +555,52 @@ impl ClusterInfoVoteListener {
return; return;
} }
let last_vote_slot = vote.slots.last().unwrap(); let last_vote_slot = *vote.slots.last().unwrap();
let last_vote_hash = vote.hash;
let root = root_bank.slot(); let root = root_bank.slot();
let last_vote_hash = vote.hash;
let mut is_new_vote = false; let mut is_new_vote = false;
for slot in vote.slots.iter().rev() { // If slot is before the root, ignore it
// If slot is before the root, or so far ahead we don't have for slot in vote.slots.iter().filter(|slot| **slot > root).rev() {
// stake information, then ignore it let slot = *slot;
let epoch = root_bank.epoch_schedule().get_epoch(*slot);
// if we don't have stake information, ignore it
let epoch = root_bank.epoch_schedule().get_epoch(slot);
let epoch_stakes = root_bank.epoch_stakes(epoch); let epoch_stakes = root_bank.epoch_stakes(epoch);
if *slot <= root || epoch_stakes.is_none() { if epoch_stakes.is_none() {
continue; continue;
} }
let epoch_stakes = epoch_stakes.unwrap(); let epoch_stakes = epoch_stakes.unwrap();
let epoch_vote_accounts = Stakes::vote_accounts(epoch_stakes.stakes());
let total_epoch_stake = epoch_stakes.total_stake();
let unduplicated_pubkey = vote_tracker.keys.get_or_insert(&vote_pubkey); let unduplicated_pubkey = vote_tracker.keys.get_or_insert(&vote_pubkey);
// The last vote slot, which is the greatest slot in the stack // The last vote slot, which is the greatest slot in the stack
// of votes in a vote transaction, qualifies for optimistic confirmation. // of votes in a vote transaction, qualifies for optimistic confirmation.
let update_optimistic_confirmation_info = if slot == last_vote_slot { if slot == last_vote_slot {
let stake = epoch_vote_accounts let vote_accounts = Stakes::vote_accounts(epoch_stakes.stakes());
let stake = vote_accounts
.get(&vote_pubkey) .get(&vote_pubkey)
.map(|(stake, _)| *stake) .map(|(stake, _)| *stake)
.unwrap_or(0); .unwrap_or_default();
Some((stake, last_vote_hash)) let total_stake = epoch_stakes.total_stake();
} else {
None
};
// If this vote for this slot qualifies for optimistic confirmation
if let Some((stake, hash)) = update_optimistic_confirmation_info {
// Fast track processing of the last slot in a vote transactions // Fast track processing of the last slot in a vote transactions
// so that notifications for optimistic confirmation can be sent // so that notifications for optimistic confirmation can be sent
// as soon as possible. // as soon as possible.
let (is_confirmed, is_new) = Self::add_optimistic_confirmation_vote( let (is_confirmed, is_new) = Self::track_optimistic_confirmation_vote(
vote_tracker, vote_tracker,
*slot, last_vote_slot,
hash, last_vote_hash,
unduplicated_pubkey.clone(), unduplicated_pubkey.clone(),
stake, stake,
total_epoch_stake, total_stake,
); );
if is_confirmed { if is_confirmed {
new_optimistic_confirmed_slots.push((*slot, last_vote_hash)); new_optimistic_confirmed_slots.push((last_vote_slot, last_vote_hash));
// Notify subscribers about new optimistic confirmation // Notify subscribers about new optimistic confirmation
if let Some(sender) = bank_notification_sender { if let Some(sender) = bank_notification_sender {
sender sender
.send(BankNotification::OptimisticallyConfirmed(*slot)) .send(BankNotification::OptimisticallyConfirmed(last_vote_slot))
.unwrap_or_else(|err| { .unwrap_or_else(|err| {
warn!("bank_notification_sender failed: {:?}", err) warn!("bank_notification_sender failed: {:?}", err)
}); });
@ -617,7 +611,7 @@ impl ClusterInfoVoteListener {
// By now: // By now:
// 1) The vote must have come from ReplayStage, // 1) The vote must have come from ReplayStage,
// 2) We've seen this vote from replay for this hash before // 2) We've seen this vote from replay for this hash before
// (`add_optimistic_confirmation_vote()` will not set `is_new == true` // (`track_optimistic_confirmation_vote()` will not set `is_new == true`
// for same slot different hash), so short circuit because this vote // for same slot different hash), so short circuit because this vote
// has no new information // has no new information
@ -629,7 +623,7 @@ impl ClusterInfoVoteListener {
is_new_vote = is_new; is_new_vote = is_new;
} }
diff.entry(*slot) diff.entry(slot)
.or_default() .or_default()
.entry(unduplicated_pubkey) .entry(unduplicated_pubkey)
.and_modify(|seen_in_gossip_previously| { .and_modify(|seen_in_gossip_previously| {
@ -644,7 +638,40 @@ impl ClusterInfoVoteListener {
} }
} }
fn process_votes( fn filter_gossip_votes(
vote_tracker: &VoteTracker,
vote_pubkey: &Pubkey,
vote: &Vote,
gossip_tx: &Transaction,
) -> bool {
if vote.slots.is_empty() {
return false;
}
let last_vote_slot = vote.slots.last().unwrap();
// Votes from gossip need to be verified as they have not been
// verified by the replay pipeline. Determine the authorized voter
// based on the last vote slot. This will drop votes from authorized
// voters trying to make votes for slots earlier than the epoch for
// which they are authorized
let actual_authorized_voter =
vote_tracker.get_authorized_voter(&vote_pubkey, *last_vote_slot);
if actual_authorized_voter.is_none() {
return false;
}
// Voting without the correct authorized pubkey, dump the vote
if !VoteTracker::vote_contains_authorized_voter(
&gossip_tx,
&actual_authorized_voter.unwrap(),
) {
return false;
}
true
}
fn filter_and_confirm_with_new_votes(
vote_tracker: &VoteTracker, vote_tracker: &VoteTracker,
gossip_vote_txs: Vec<Transaction>, gossip_vote_txs: Vec<Transaction>,
replayed_votes: Vec<ReplayedVote>, replayed_votes: Vec<ReplayedVote>,
@ -662,37 +689,13 @@ impl ClusterInfoVoteListener {
.filter_map(|gossip_tx| { .filter_map(|gossip_tx| {
vote_transaction::parse_vote_transaction(gossip_tx) vote_transaction::parse_vote_transaction(gossip_tx)
.filter(|(vote_pubkey, vote, _)| { .filter(|(vote_pubkey, vote, _)| {
if vote.slots.is_empty() { Self::filter_gossip_votes(vote_tracker, vote_pubkey, vote, gossip_tx)
return false;
}
let last_vote_slot = vote.slots.last().unwrap();
// Votes from gossip need to be verified as they have not been
// verified by the replay pipeline. Determine the authorized voter
// based on the last vote slot. This will drop votes from authorized
// voters trying to make votes for slots earlier than the epoch for
// which they are authorized
let actual_authorized_voter =
vote_tracker.get_authorized_voter(&vote_pubkey, *last_vote_slot);
if actual_authorized_voter.is_none() {
return false;
}
// Voting without the correct authorized pubkey, dump the vote
if !VoteTracker::vote_contains_authorized_voter(
&gossip_tx,
&actual_authorized_voter.unwrap(),
) {
return false;
}
true
}) })
.map(|v| (true, v)) .map(|v| (true, v))
}) })
.chain(replayed_votes.into_iter().map(|v| (false, v))) .chain(replayed_votes.into_iter().map(|v| (false, v)))
{ {
Self::update_new_votes( Self::track_new_votes_and_notify_confirmations(
vote, vote,
&vote_pubkey, &vote_pubkey,
&vote_tracker, &vote_tracker,
@ -757,7 +760,7 @@ impl ClusterInfoVoteListener {
// Returns if the slot was optimistically confirmed, and whether // Returns if the slot was optimistically confirmed, and whether
// the slot was new // the slot was new
fn add_optimistic_confirmation_vote( fn track_optimistic_confirmation_vote(
vote_tracker: &VoteTracker, vote_tracker: &VoteTracker,
slot: Slot, slot: Slot,
hash: Hash, hash: Hash,
@ -898,7 +901,7 @@ mod tests {
let (vote_tracker, bank, _, _) = setup(); let (vote_tracker, bank, _, _) = setup();
// Check outdated slots are purged with new root // Check outdated slots are purged with new root
let new_voter = Arc::new(Pubkey::new_rand()); let new_voter = Arc::new(solana_sdk::pubkey::new_rand());
// Make separate copy so the original doesn't count toward // Make separate copy so the original doesn't count toward
// the ref count, which would prevent cleanup // the ref count, which would prevent cleanup
let new_voter_ = Arc::new(*new_voter); let new_voter_ = Arc::new(*new_voter);
@ -909,7 +912,7 @@ mod tests {
.unwrap() .unwrap()
.contains_key(&bank.slot())); .contains_key(&bank.slot()));
let bank1 = Bank::new_from_parent(&bank, &Pubkey::default(), bank.slot() + 1); let bank1 = Bank::new_from_parent(&bank, &Pubkey::default(), bank.slot() + 1);
vote_tracker.process_new_root_bank(&bank1); vote_tracker.progress_with_new_root_bank(&bank1);
assert!(!vote_tracker assert!(!vote_tracker
.slot_vote_trackers .slot_vote_trackers
.read() .read()
@ -926,7 +929,7 @@ mod tests {
bank.epoch_schedule() bank.epoch_schedule()
.get_first_slot_in_epoch(current_epoch + 1), .get_first_slot_in_epoch(current_epoch + 1),
); );
vote_tracker.process_new_root_bank(&new_epoch_bank); vote_tracker.progress_with_new_root_bank(&new_epoch_bank);
assert!(!vote_tracker.keys.0.read().unwrap().contains(&new_voter)); assert!(!vote_tracker.keys.0.read().unwrap().contains(&new_voter));
assert_eq!( assert_eq!(
*vote_tracker.current_epoch.read().unwrap(), *vote_tracker.current_epoch.read().unwrap(),
@ -956,7 +959,7 @@ mod tests {
); );
let next_leader_schedule_bank = let next_leader_schedule_bank =
Bank::new_from_parent(&bank, &Pubkey::default(), next_leader_schedule_computed); Bank::new_from_parent(&bank, &Pubkey::default(), next_leader_schedule_computed);
vote_tracker.update_leader_schedule_epoch(&next_leader_schedule_bank); vote_tracker.progress_leader_schedule_epoch(&next_leader_schedule_bank);
assert_eq!( assert_eq!(
*vote_tracker.leader_schedule_epoch.read().unwrap(), *vote_tracker.leader_schedule_epoch.read().unwrap(),
next_leader_schedule_epoch next_leader_schedule_epoch
@ -1007,7 +1010,7 @@ mod tests {
&votes_sender, &votes_sender,
&replay_votes_sender, &replay_votes_sender,
); );
ClusterInfoVoteListener::get_and_process_votes( ClusterInfoVoteListener::listen_and_confirm_votes(
&votes_receiver, &votes_receiver,
&vote_tracker, &vote_tracker,
&bank3, &bank3,
@ -1036,7 +1039,7 @@ mod tests {
&votes_sender, &votes_sender,
&replay_votes_sender, &replay_votes_sender,
); );
ClusterInfoVoteListener::get_and_process_votes( ClusterInfoVoteListener::listen_and_confirm_votes(
&votes_receiver, &votes_receiver,
&vote_tracker, &vote_tracker,
&bank3, &bank3,
@ -1114,7 +1117,7 @@ mod tests {
); );
// Check that all the votes were registered for each validator correctly // Check that all the votes were registered for each validator correctly
ClusterInfoVoteListener::get_and_process_votes( ClusterInfoVoteListener::listen_and_confirm_votes(
&votes_txs_receiver, &votes_txs_receiver,
&vote_tracker, &vote_tracker,
&bank0, &bank0,
@ -1233,7 +1236,7 @@ mod tests {
} }
// Read and process votes from channel `votes_receiver` // Read and process votes from channel `votes_receiver`
ClusterInfoVoteListener::get_and_process_votes( ClusterInfoVoteListener::listen_and_confirm_votes(
&votes_txs_receiver, &votes_txs_receiver,
&vote_tracker, &vote_tracker,
&bank0, &bank0,
@ -1328,7 +1331,7 @@ mod tests {
)) ))
.unwrap(); .unwrap();
} }
let _ = ClusterInfoVoteListener::get_and_process_votes( let _ = ClusterInfoVoteListener::listen_and_confirm_votes(
&votes_receiver, &votes_receiver,
&vote_tracker, &vote_tracker,
&bank, &bank,
@ -1474,7 +1477,7 @@ mod tests {
)]; )];
let (verified_vote_sender, _verified_vote_receiver) = unbounded(); let (verified_vote_sender, _verified_vote_receiver) = unbounded();
ClusterInfoVoteListener::process_votes( ClusterInfoVoteListener::filter_and_confirm_with_new_votes(
&vote_tracker, &vote_tracker,
vote_tx, vote_tx,
// Add gossip vote for same slot, should not affect outcome // Add gossip vote for same slot, should not affect outcome
@ -1545,7 +1548,7 @@ mod tests {
let new_root_bank = let new_root_bank =
Bank::new_from_parent(&bank, &Pubkey::default(), first_slot_in_new_epoch - 2); Bank::new_from_parent(&bank, &Pubkey::default(), first_slot_in_new_epoch - 2);
ClusterInfoVoteListener::process_votes( ClusterInfoVoteListener::filter_and_confirm_with_new_votes(
&vote_tracker, &vote_tracker,
vote_txs, vote_txs,
vec![( vec![(
@ -1681,7 +1684,7 @@ mod tests {
fn run_test_verify_votes_1_pass(hash: Option<Hash>) { fn run_test_verify_votes_1_pass(hash: Option<Hash>) {
let vote_tx = test_vote_tx(hash); let vote_tx = test_vote_tx(hash);
let votes = vec![vote_tx]; let votes = vec![vote_tx];
let labels = vec![CrdsValueLabel::Vote(0, Pubkey::new_rand())]; let labels = vec![CrdsValueLabel::Vote(0, solana_sdk::pubkey::new_rand())];
let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, labels); let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, labels);
assert_eq!(vote_txs.len(), 1); assert_eq!(vote_txs.len(), 1);
verify_packets_len(&packets, 1); verify_packets_len(&packets, 1);
@ -1698,7 +1701,7 @@ mod tests {
let mut bad_vote = vote_tx.clone(); let mut bad_vote = vote_tx.clone();
bad_vote.signatures[0] = Signature::default(); bad_vote.signatures[0] = Signature::default();
let votes = vec![vote_tx.clone(), bad_vote, vote_tx]; let votes = vec![vote_tx.clone(), bad_vote, vote_tx];
let label = CrdsValueLabel::Vote(0, Pubkey::new_rand()); let label = CrdsValueLabel::Vote(0, solana_sdk::pubkey::new_rand());
let labels: Vec<_> = (0..votes.len()).map(|_| label.clone()).collect(); let labels: Vec<_> = (0..votes.len()).map(|_| label.clone()).collect();
let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, labels); let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, labels);
assert_eq!(vote_txs.len(), 2); assert_eq!(vote_txs.len(), 2);

View File

@ -237,8 +237,8 @@ mod tests {
let mut c1 = ContactInfo::default(); let mut c1 = ContactInfo::default();
let mut c2 = ContactInfo::default(); let mut c2 = ContactInfo::default();
let mut map = HashMap::new(); let mut map = HashMap::new();
let k1 = Pubkey::new_rand(); let k1 = solana_sdk::pubkey::new_rand();
let k2 = Pubkey::new_rand(); let k2 = solana_sdk::pubkey::new_rand();
map.insert(Arc::new(k1), std::u64::MAX / 2); map.insert(Arc::new(k1), std::u64::MAX / 2);
map.insert(Arc::new(k2), 0); map.insert(Arc::new(k2), 0);
cs.cluster_slots cs.cluster_slots
@ -259,8 +259,8 @@ mod tests {
let mut c1 = ContactInfo::default(); let mut c1 = ContactInfo::default();
let mut c2 = ContactInfo::default(); let mut c2 = ContactInfo::default();
let mut map = HashMap::new(); let mut map = HashMap::new();
let k1 = Pubkey::new_rand(); let k1 = solana_sdk::pubkey::new_rand();
let k2 = Pubkey::new_rand(); let k2 = solana_sdk::pubkey::new_rand();
map.insert(Arc::new(k2), 0); map.insert(Arc::new(k2), 0);
cs.cluster_slots cs.cluster_slots
.write() .write()
@ -290,7 +290,7 @@ mod tests {
let cs = ClusterSlots::default(); let cs = ClusterSlots::default();
let mut contact_infos = vec![ContactInfo::default(); 2]; let mut contact_infos = vec![ContactInfo::default(); 2];
for ci in contact_infos.iter_mut() { for ci in contact_infos.iter_mut() {
ci.id = Pubkey::new_rand(); ci.id = solana_sdk::pubkey::new_rand();
} }
let slot = 9; let slot = 9;
@ -359,7 +359,7 @@ mod tests {
let mut epoch_slot = EpochSlots::default(); let mut epoch_slot = EpochSlots::default();
epoch_slot.fill(&[1], 0); epoch_slot.fill(&[1], 0);
cs.update_internal(0, (vec![epoch_slot], None)); cs.update_internal(0, (vec![epoch_slot], None));
let self_id = Pubkey::new_rand(); let self_id = solana_sdk::pubkey::new_rand();
assert_eq!( assert_eq!(
cs.generate_repairs_for_missing_slots(&self_id, 0), cs.generate_repairs_for_missing_slots(&self_id, 0),
vec![RepairType::HighestShred(1, 0)] vec![RepairType::HighestShred(1, 0)]

View File

@ -181,6 +181,7 @@ mod test {
let node_info = Node::new_localhost_with_pubkey(&Pubkey::default()); let node_info = Node::new_localhost_with_pubkey(&Pubkey::default());
let cluster_info = ClusterInfo::new_with_invalid_keypair(node_info.info); let cluster_info = ClusterInfo::new_with_invalid_keypair(node_info.info);
ClusterSlotsService::update_lowest_slot(&Pubkey::default(), 5, &cluster_info); ClusterSlotsService::update_lowest_slot(&Pubkey::default(), 5, &cluster_info);
cluster_info.flush_push_queue();
let lowest = cluster_info let lowest = cluster_info
.get_lowest_slot_for_node(&Pubkey::default(), None, |lowest_slot, _| { .get_lowest_slot_for_node(&Pubkey::default(), None, |lowest_slot, _| {
lowest_slot.clone() lowest_slot.clone()

View File

@ -375,19 +375,22 @@ mod tests {
let rooted_stake_amount = 40; let rooted_stake_amount = 40;
let sk1 = Pubkey::new_rand(); let sk1 = solana_sdk::pubkey::new_rand();
let pk1 = Pubkey::new_rand(); let pk1 = solana_sdk::pubkey::new_rand();
let mut vote_account1 = vote_state::create_account(&pk1, &Pubkey::new_rand(), 0, 100); let mut vote_account1 =
vote_state::create_account(&pk1, &solana_sdk::pubkey::new_rand(), 0, 100);
let stake_account1 = let stake_account1 =
stake_state::create_account(&sk1, &pk1, &vote_account1, &genesis_config.rent, 100); stake_state::create_account(&sk1, &pk1, &vote_account1, &genesis_config.rent, 100);
let sk2 = Pubkey::new_rand(); let sk2 = solana_sdk::pubkey::new_rand();
let pk2 = Pubkey::new_rand(); let pk2 = solana_sdk::pubkey::new_rand();
let mut vote_account2 = vote_state::create_account(&pk2, &Pubkey::new_rand(), 0, 50); let mut vote_account2 =
vote_state::create_account(&pk2, &solana_sdk::pubkey::new_rand(), 0, 50);
let stake_account2 = let stake_account2 =
stake_state::create_account(&sk2, &pk2, &vote_account2, &genesis_config.rent, 50); stake_state::create_account(&sk2, &pk2, &vote_account2, &genesis_config.rent, 50);
let sk3 = Pubkey::new_rand(); let sk3 = solana_sdk::pubkey::new_rand();
let pk3 = Pubkey::new_rand(); let pk3 = solana_sdk::pubkey::new_rand();
let mut vote_account3 = vote_state::create_account(&pk3, &Pubkey::new_rand(), 0, 1); let mut vote_account3 =
vote_state::create_account(&pk3, &solana_sdk::pubkey::new_rand(), 0, 1);
let stake_account3 = stake_state::create_account( let stake_account3 = stake_state::create_account(
&sk3, &sk3,
&pk3, &pk3,
@ -395,9 +398,10 @@ mod tests {
&genesis_config.rent, &genesis_config.rent,
rooted_stake_amount, rooted_stake_amount,
); );
let sk4 = Pubkey::new_rand(); let sk4 = solana_sdk::pubkey::new_rand();
let pk4 = Pubkey::new_rand(); let pk4 = solana_sdk::pubkey::new_rand();
let mut vote_account4 = vote_state::create_account(&pk4, &Pubkey::new_rand(), 0, 1); let mut vote_account4 =
vote_state::create_account(&pk4, &solana_sdk::pubkey::new_rand(), 0, 1);
let stake_account4 = stake_state::create_account( let stake_account4 = stake_state::create_account(
&sk4, &sk4,
&pk4, &pk4,

View File

@ -1,6 +1,7 @@
use crate::rpc_subscriptions::RpcSubscriptions; use crate::rpc_subscriptions::RpcSubscriptions;
use crossbeam_channel::{Receiver, RecvTimeoutError, Sender}; use crossbeam_channel::{Receiver, RecvTimeoutError, Sender};
use solana_ledger::blockstore::{Blockstore, CompletedDataSetInfo}; use solana_ledger::blockstore::{Blockstore, CompletedDataSetInfo};
use solana_ledger::entry::Entry;
use solana_sdk::signature::Signature; use solana_sdk::signature::Signature;
use std::{ use std::{
sync::{ sync::{
@ -61,10 +62,7 @@ impl CompletedDataSetsService {
} = completed_set_info; } = completed_set_info;
match blockstore.get_entries_in_data_block(slot, start_index, end_index, None) { match blockstore.get_entries_in_data_block(slot, start_index, end_index, None) {
Ok(entries) => { Ok(entries) => {
let transactions = entries let transactions = Self::get_transaction_signatures(entries);
.into_iter()
.flat_map(|e| e.transactions.into_iter().map(|t| t.signatures[0]))
.collect::<Vec<Signature>>();
if !transactions.is_empty() { if !transactions.is_empty() {
rpc_subscriptions.notify_signatures_received((slot, transactions)); rpc_subscriptions.notify_signatures_received((slot, transactions));
} }
@ -76,7 +74,51 @@ impl CompletedDataSetsService {
Ok(()) Ok(())
} }
fn get_transaction_signatures(entries: Vec<Entry>) -> Vec<Signature> {
entries
.into_iter()
.flat_map(|e| {
e.transactions
.into_iter()
.filter_map(|mut t| t.signatures.drain(..).next())
})
.collect::<Vec<Signature>>()
}
pub fn join(self) -> thread::Result<()> { pub fn join(self) -> thread::Result<()> {
self.thread_hdl.join() self.thread_hdl.join()
} }
} }
#[cfg(test)]
pub mod test {
use super::*;
use solana_sdk::hash::Hash;
use solana_sdk::signature::{Keypair, Signer};
use solana_sdk::transaction::Transaction;
#[test]
fn test_zero_signatures() {
let tx = Transaction::new_with_payer(&[], None);
let entries = vec![Entry::new(&Hash::default(), 1, vec![tx])];
let signatures = CompletedDataSetsService::get_transaction_signatures(entries);
assert!(signatures.is_empty());
}
#[test]
fn test_multi_signatures() {
let kp = Keypair::new();
let tx =
Transaction::new_signed_with_payer(&[], Some(&kp.pubkey()), &[&kp], Hash::default());
let entries = vec![Entry::new(&Hash::default(), 1, vec![tx.clone()])];
let signatures = CompletedDataSetsService::get_transaction_signatures(entries);
assert_eq!(signatures.len(), 1);
let entries = vec![
Entry::new(&Hash::default(), 1, vec![tx.clone(), tx.clone()]),
Entry::new(&Hash::default(), 1, vec![tx]),
];
let signatures = CompletedDataSetsService::get_transaction_signatures(entries);
assert_eq!(signatures.len(), 3);
}
}

View File

@ -30,11 +30,11 @@ use std::{
}; };
use thiserror::Error; use thiserror::Error;
#[derive(PartialEq, Clone, Debug)] #[derive(PartialEq, Clone, Debug, AbiExample)]
pub enum SwitchForkDecision { pub enum SwitchForkDecision {
SwitchProof(Hash), SwitchProof(Hash),
NoSwitch, SameFork,
FailedSwitchThreshold, FailedSwitchThreshold(u64, u64),
} }
impl SwitchForkDecision { impl SwitchForkDecision {
@ -45,8 +45,11 @@ impl SwitchForkDecision {
authorized_voter_pubkey: &Pubkey, authorized_voter_pubkey: &Pubkey,
) -> Option<Instruction> { ) -> Option<Instruction> {
match self { match self {
SwitchForkDecision::FailedSwitchThreshold => None, SwitchForkDecision::FailedSwitchThreshold(_, total_stake) => {
SwitchForkDecision::NoSwitch => Some(vote_instruction::vote( assert_ne!(*total_stake, 0);
None
}
SwitchForkDecision::SameFork => Some(vote_instruction::vote(
vote_account_pubkey, vote_account_pubkey,
authorized_voter_pubkey, authorized_voter_pubkey,
vote, vote,
@ -61,6 +64,10 @@ impl SwitchForkDecision {
} }
} }
} }
pub fn can_vote(&self) -> bool {
!matches!(self, SwitchForkDecision::FailedSwitchThreshold(_, _))
}
} }
pub const VOTE_THRESHOLD_DEPTH: usize = 8; pub const VOTE_THRESHOLD_DEPTH: usize = 8;
@ -82,7 +89,7 @@ pub(crate) struct ComputedBankState {
pub pubkey_votes: Arc<PubkeyVotes>, pub pubkey_votes: Arc<PubkeyVotes>,
} }
#[frozen_abi(digest = "2ZUeCLMVQxmHYbeqMH7M97ifVSKoVErGvRHzyxcQRjgU")] #[frozen_abi(digest = "Eay84NBbJqiMBfE7HHH2o6e51wcvoU79g8zCi5sw6uj3")]
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, AbiExample)] #[derive(Clone, Serialize, Deserialize, Debug, PartialEq, AbiExample)]
pub struct Tower { pub struct Tower {
node_pubkey: Pubkey, node_pubkey: Pubkey,
@ -100,7 +107,11 @@ pub struct Tower {
// (This is a special field for slashing-free validator restart with edge cases). // (This is a special field for slashing-free validator restart with edge cases).
// This could be emptied after some time; but left intact indefinitely for easier // This could be emptied after some time; but left intact indefinitely for easier
// implementation // implementation
// Further, stray slot can be stale or not. `Stale` here means whether given
// bank_forks (=~ ledger) lacks the slot or not.
stray_restored_slot: Option<Slot>, stray_restored_slot: Option<Slot>,
#[serde(skip)]
pub last_switch_threshold_check: Option<(Slot, SwitchForkDecision)>,
} }
impl Default for Tower { impl Default for Tower {
@ -115,6 +126,7 @@ impl Default for Tower {
path: PathBuf::default(), path: PathBuf::default(),
tmp_path: PathBuf::default(), tmp_path: PathBuf::default(),
stray_restored_slot: Option::default(), stray_restored_slot: Option::default(),
last_switch_threshold_check: Option::default(),
}; };
// VoteState::root_slot is ensured to be Some in Tower // VoteState::root_slot is ensured to be Some in Tower
tower.lockouts.root_slot = Some(Slot::default()); tower.lockouts.root_slot = Some(Slot::default());
@ -377,17 +389,14 @@ impl Tower {
pub fn record_bank_vote(&mut self, vote: Vote) -> Option<Slot> { pub fn record_bank_vote(&mut self, vote: Vote) -> Option<Slot> {
let slot = vote.last_voted_slot().unwrap_or(0); let slot = vote.last_voted_slot().unwrap_or(0);
trace!("{} record_vote for {}", self.node_pubkey, slot); trace!("{} record_vote for {}", self.node_pubkey, slot);
let root_slot = self.lockouts.root_slot; let old_root = self.root();
self.lockouts.process_vote_unchecked(&vote); self.lockouts.process_vote_unchecked(&vote);
self.last_vote = vote; self.last_vote = vote;
let new_root = self.root();
datapoint_info!( datapoint_info!("tower-vote", ("latest", slot, i64), ("root", new_root, i64));
"tower-vote", if old_root != new_root {
("latest", slot, i64), Some(new_root)
("root", self.lockouts.root_slot.unwrap_or(0), i64)
);
if root_slot != self.lockouts.root_slot {
Some(self.lockouts.root_slot.unwrap())
} else { } else {
None None
} }
@ -431,13 +440,13 @@ impl Tower {
// root may be forcibly set by arbitrary replay root slot, for example from a root // root may be forcibly set by arbitrary replay root slot, for example from a root
// after replaying a snapshot. // after replaying a snapshot.
// Also, tower.root() couldn't be None; do_initialize_lockouts() ensures that. // Also, tower.root() couldn't be None; initialize_lockouts() ensures that.
// Conceptually, every tower must have been constructed from a concrete starting point, // Conceptually, every tower must have been constructed from a concrete starting point,
// which establishes the origin of trust (i.e. root) whether booting from genesis (slot 0) or // which establishes the origin of trust (i.e. root) whether booting from genesis (slot 0) or
// snapshot (slot N). In other words, there should be no possibility a Tower doesn't have // snapshot (slot N). In other words, there should be no possibility a Tower doesn't have
// root, unlike young vote accounts. // root, unlike young vote accounts.
pub fn root(&self) -> Option<Slot> { pub fn root(&self) -> Slot {
self.lockouts.root_slot self.lockouts.root_slot.unwrap()
} }
// a slot is recent if it's newer than the last vote we have // a slot is recent if it's newer than the last vote we have
@ -493,7 +502,7 @@ impl Tower {
false false
} }
pub(crate) fn check_switch_threshold( fn make_check_switch_threshold_decision(
&self, &self,
switch_slot: u64, switch_slot: u64,
ancestors: &HashMap<Slot, HashSet<u64>>, ancestors: &HashMap<Slot, HashSet<u64>>,
@ -504,13 +513,66 @@ impl Tower {
) -> SwitchForkDecision { ) -> SwitchForkDecision {
self.last_voted_slot() self.last_voted_slot()
.map(|last_voted_slot| { .map(|last_voted_slot| {
let root = self.lockouts.root_slot.unwrap_or(0); let root = self.root();
let empty_ancestors = HashSet::default(); let empty_ancestors = HashSet::default();
let empty_ancestors_due_to_minor_unsynced_ledger = || {
// This condition (stale stray last vote) shouldn't occur under normal validator
// operation, indicating something unusual happened.
// This condition could be introduced by manual ledger mishandling,
// validator SEGV, OS/HW crash, or plain No Free Space FS error.
// However, returning empty ancestors as a fallback here shouldn't result in
// slashing by itself (Note that we couldn't fully preclude any kind of slashing if
// the failure was OS or HW level).
// Firstly, lockout is ensured elsewhere.
// Also, there is no risk of optimistic conf. violation. Although empty ancestors
// could result in incorrect (= more than actual) locked_out_stake and
// false-positive SwitchProof later in this function, there should be no such a
// heavier fork candidate, first of all, if the last vote (or any of its
// unavailable ancestors) were already optimistically confirmed.
// The only exception is that other validator is already violating it...
if self.is_first_switch_check() && switch_slot < last_voted_slot {
// `switch < last` is needed not to warn! this message just because of using
// newer snapshots on validator restart
let message = format!(
"bank_forks doesn't have corresponding data for the stray restored \
last vote({}), meaning some inconsistency between saved tower and ledger.",
last_voted_slot
);
warn!("{}", message);
datapoint_warn!("tower_warn", ("warn", message, String));
}
&empty_ancestors
};
let suspended_decision_due_to_major_unsynced_ledger = || {
// This peculiar corner handling is needed mainly for a tower which is newer than
// blockstore. (Yeah, we tolerate it for ease of maintaining validator by operators)
// This condition could be introduced by manual ledger mishandling,
// validator SEGV, OS/HW crash, or plain No Free Space FS error.
// When we're in this clause, it basically means validator is badly running
// with a future tower while replaying past slots, especially problematic is
// last_voted_slot.
// So, don't re-vote on it by returning pseudo FailedSwitchThreshold, otherwise
// there would be slashing because of double vote on one of last_vote_ancestors.
// (Well, needless to say, re-creating the duplicate block must be handled properly
// at the banking stage: https://github.com/solana-labs/solana/issues/8232)
//
// To be specific, the replay stage is tricked into a false perception where
// last_vote_ancestors is AVAILABLE for descendant-of-`switch_slot`, stale, and
// stray slots (which should always be empty_ancestors).
//
// This is covered by test_future_tower_* in local_cluster
SwitchForkDecision::FailedSwitchThreshold(0, total_stake)
};
let last_vote_ancestors = let last_vote_ancestors =
ancestors.get(&last_voted_slot).unwrap_or_else(|| { ancestors.get(&last_voted_slot).unwrap_or_else(|| {
if !self.is_stray_last_vote() { if !self.is_stray_last_vote() {
// Unless last vote is stray, ancestors.get(last_voted_slot) must // Unless last vote is stray and stale, ancestors.get(last_voted_slot) must
// return Some(_), justifying to panic! here. // return Some(_), justifying to panic! here.
// Also, adjust_lockouts_after_replay() correctly makes last_voted_slot None, // Also, adjust_lockouts_after_replay() correctly makes last_voted_slot None,
// if all saved votes are ancestors of replayed_root_slot. So this code shouldn't be // if all saved votes are ancestors of replayed_root_slot. So this code shouldn't be
@ -520,10 +582,7 @@ impl Tower {
// all of them. // all of them.
panic!("no ancestors found with slot: {}", last_voted_slot); panic!("no ancestors found with slot: {}", last_voted_slot);
} else { } else {
// bank_forks doesn't have corresponding data for the stray restored last vote, empty_ancestors_due_to_minor_unsynced_ledger()
// meaning some inconsistency between saved tower and ledger.
// (newer snapshot, or only a saved tower is moved over to new setup?)
&empty_ancestors
} }
}); });
@ -532,12 +591,21 @@ impl Tower {
if switch_slot == last_voted_slot || switch_slot_ancestors.contains(&last_voted_slot) { if switch_slot == last_voted_slot || switch_slot_ancestors.contains(&last_voted_slot) {
// If the `switch_slot is a descendant of the last vote, // If the `switch_slot is a descendant of the last vote,
// no switching proof is necessary // no switching proof is necessary
return SwitchForkDecision::NoSwitch; return SwitchForkDecision::SameFork;
} }
// Should never consider switching to an ancestor if last_vote_ancestors.contains(&switch_slot) {
// of your last vote if !self.is_stray_last_vote() {
assert!(!last_vote_ancestors.contains(&switch_slot)); panic!(
"Should never consider switching to slot ({}), which is ancestors({:?}) of last vote: {}",
switch_slot,
last_vote_ancestors,
last_voted_slot
);
} else {
return suspended_decision_due_to_major_unsynced_ledger();
}
}
// By this point, we know the `switch_slot` is on a different fork // By this point, we know the `switch_slot` is on a different fork
// (is neither an ancestor nor descendant of `last_vote`), so a // (is neither an ancestor nor descendant of `last_vote`), so a
@ -598,7 +666,7 @@ impl Tower {
} }
// Only count lockouts on slots that are: // Only count lockouts on slots that are:
// 1) Not ancestors of `last_vote` // 1) Not ancestors of `last_vote`, meaning being on different fork
// 2) Not from before the current root as we can't determine if // 2) Not from before the current root as we can't determine if
// anything before the root was an ancestor of `last_vote` or not // anything before the root was an ancestor of `last_vote` or not
if !last_vote_ancestors.contains(lockout_interval_start) if !last_vote_ancestors.contains(lockout_interval_start)
@ -622,10 +690,43 @@ impl Tower {
if (locked_out_stake as f64 / total_stake as f64) > SWITCH_FORK_THRESHOLD { if (locked_out_stake as f64 / total_stake as f64) > SWITCH_FORK_THRESHOLD {
SwitchForkDecision::SwitchProof(switch_proof) SwitchForkDecision::SwitchProof(switch_proof)
} else { } else {
SwitchForkDecision::FailedSwitchThreshold SwitchForkDecision::FailedSwitchThreshold(locked_out_stake, total_stake)
} }
}) })
.unwrap_or(SwitchForkDecision::NoSwitch) .unwrap_or(SwitchForkDecision::SameFork)
}
pub(crate) fn check_switch_threshold(
&mut self,
switch_slot: u64,
ancestors: &HashMap<Slot, HashSet<u64>>,
descendants: &HashMap<Slot, HashSet<u64>>,
progress: &ProgressMap,
total_stake: u64,
epoch_vote_accounts: &HashMap<Pubkey, (u64, Account)>,
) -> SwitchForkDecision {
let decision = self.make_check_switch_threshold_decision(
switch_slot,
ancestors,
descendants,
progress,
total_stake,
epoch_vote_accounts,
);
let new_check = Some((switch_slot, decision.clone()));
if new_check != self.last_switch_threshold_check {
trace!(
"new switch threshold check: slot {}: {:?}",
switch_slot,
decision,
);
self.last_switch_threshold_check = new_check;
}
decision
}
fn is_first_switch_check(&self) -> bool {
self.last_switch_threshold_check.is_none()
} }
pub fn check_vote_stake_threshold( pub fn check_vote_stake_threshold(
@ -760,28 +861,21 @@ impl Tower {
// The tower root can be older/newer if the validator booted from a newer/older snapshot, so // The tower root can be older/newer if the validator booted from a newer/older snapshot, so
// tower lockouts may need adjustment // tower lockouts may need adjustment
pub fn adjust_lockouts_after_replay( pub fn adjust_lockouts_after_replay(
self, mut self,
replayed_root: Slot, replayed_root: Slot,
slot_history: &SlotHistory, slot_history: &SlotHistory,
) -> Result<Self> { ) -> Result<Self> {
info!(
"adjusting lockouts (after replay up to {}): {:?}",
replayed_root,
self.voted_slots()
);
// sanity assertions for roots // sanity assertions for roots
assert_eq!(slot_history.check(replayed_root), Check::Found); let tower_root = self.root();
assert!(self.root().is_some()); info!(
let tower_root = self.root().unwrap(); "adjusting lockouts (after replay up to {}): {:?} tower root: {} replayed root: {}",
// reconcile_blockstore_roots_with_tower() should already have aligned these. replayed_root,
assert!( self.voted_slots(),
tower_root <= replayed_root, tower_root,
format!( replayed_root,
"tower root: {:?} >= replayed root slot: {}",
tower_root, replayed_root
)
); );
assert_eq!(slot_history.check(replayed_root), Check::Found);
assert!( assert!(
self.last_vote == Vote::default() && self.lockouts.votes.is_empty() self.last_vote == Vote::default() && self.lockouts.votes.is_empty()
|| self.last_vote != Vote::default() && !self.lockouts.votes.is_empty(), || self.last_vote != Vote::default() && !self.lockouts.votes.is_empty(),
@ -791,30 +885,67 @@ impl Tower {
) )
); );
// return immediately if votes are empty... if let Some(last_voted_slot) = self.last_voted_slot() {
if self.lockouts.votes.is_empty() { if tower_root <= replayed_root {
return Ok(self); // Normally, we goes into this clause with possible help of
// reconcile_blockstore_roots_with_tower()
if slot_history.check(last_voted_slot) == Check::TooOld {
// We could try hard to anchor with other older votes, but opt to simplify the
// following logic
return Err(TowerError::TooOldTower(
last_voted_slot,
slot_history.oldest(),
));
}
self.adjust_lockouts_with_slot_history(slot_history)?;
self.initialize_root(replayed_root);
} else {
// This should never occur under normal operation.
// While this validator's voting is suspended this way,
// suspended_decision_due_to_major_unsynced_ledger() will be also touched.
let message = format!(
"For some reason, we're REPROCESSING slots which has already been \
voted and ROOTED by us; \
VOTING will be SUSPENDED UNTIL {}!",
last_voted_slot,
);
error!("{}", message);
datapoint_error!("tower_error", ("error", message, String));
// Let's pass-through adjust_lockouts_with_slot_history just for sanitization,
// using a synthesized SlotHistory.
let mut warped_slot_history = (*slot_history).clone();
// Blockstore doesn't have the tower_root slot because of
// (replayed_root < tower_root) in this else clause, meaning the tower is from
// the future from the view of blockstore.
// Pretend the blockstore has the future tower_root to anchor exactly with that
// slot by adding tower_root to a slot history. The added slot will be newer
// than all slots in the slot history (remember tower_root > replayed_root),
// satisfying the slot history invariant.
// Thus, the whole process will be safe as well because tower_root exists
// within both tower and slot history, guaranteeing the success of adjustment
// and retaining all of future votes correctly while sanitizing.
warped_slot_history.add(tower_root);
self.adjust_lockouts_with_slot_history(&warped_slot_history)?;
// don't update root; future tower's root should be kept across validator
// restarts to continue to show the scary messages at restarts until the next
// voting.
}
} else {
// This else clause is for newly created tower.
// initialize_lockouts_from_bank() should ensure the following invariant,
// otherwise we're screwing something up.
assert_eq!(tower_root, replayed_root);
} }
let last_voted_slot = self.last_voted_slot().unwrap(); Ok(self)
if slot_history.check(last_voted_slot) == Check::TooOld {
// We could try hard to anchor with other older votes, but opt to simplify the
// following logic
return Err(TowerError::TooOldTower(
last_voted_slot,
slot_history.oldest(),
));
}
self.do_adjust_lockouts_after_replay(tower_root, replayed_root, slot_history)
} }
fn do_adjust_lockouts_after_replay( fn adjust_lockouts_with_slot_history(&mut self, slot_history: &SlotHistory) -> Result<()> {
mut self, let tower_root = self.root();
tower_root: Slot,
replayed_root: Slot,
slot_history: &SlotHistory,
) -> Result<Self> {
// retained slots will be consisted only from divergent slots // retained slots will be consisted only from divergent slots
let mut retain_flags_for_each_vote_in_reverse: Vec<_> = let mut retain_flags_for_each_vote_in_reverse: Vec<_> =
Vec::with_capacity(self.lockouts.votes.len()); Vec::with_capacity(self.lockouts.votes.len());
@ -855,14 +986,20 @@ impl Tower {
} }
if let Some(checked_slot) = checked_slot { if let Some(checked_slot) = checked_slot {
// This is really special, only if tower is initialized (root = slot 0) for genesis and contains // This is really special, only if tower is initialized and contains
// a vote (= slot 0) for the genesis, the slot 0 can repeat only once // a vote for the root, the root slot can repeat only once
let voting_from_genesis = *slot_in_tower == checked_slot && *slot_in_tower == 0; let voting_for_root =
*slot_in_tower == checked_slot && *slot_in_tower == tower_root;
if !voting_from_genesis { if !voting_for_root {
// Unless we're voting since genesis, slots_in_tower must always be older than last checked_slot // Unless we're voting since genesis, slots_in_tower must always be older than last checked_slot
// including all vote slot and the root slot. // including all vote slot and the root slot.
assert!(*slot_in_tower < checked_slot) assert!(
*slot_in_tower < checked_slot,
"slot_in_tower({}) < checked_slot({})",
*slot_in_tower,
checked_slot
);
} }
} }
@ -890,15 +1027,10 @@ impl Tower {
retain_flags_for_each_vote_in_reverse.into_iter().rev(); retain_flags_for_each_vote_in_reverse.into_iter().rev();
let original_votes_len = self.lockouts.votes.len(); let original_votes_len = self.lockouts.votes.len();
self.do_initialize_lockouts(replayed_root, move |_| { self.initialize_lockouts(move |_| retain_flags_for_each_vote.next().unwrap());
retain_flags_for_each_vote.next().unwrap()
});
if self.lockouts.votes.is_empty() { if self.lockouts.votes.is_empty() {
info!( info!("All restored votes were behind; resetting root_slot and last_vote in tower!");
"All restored votes were behind replayed_root({}); resetting root_slot and last_vote in tower!",
replayed_root
);
// we might not have banks for those votes so just reset. // we might not have banks for those votes so just reset.
// That's because the votes may well past replayed_root // That's because the votes may well past replayed_root
self.last_vote = Vote::default(); self.last_vote = Vote::default();
@ -917,7 +1049,7 @@ impl Tower {
self.stray_restored_slot = Some(self.last_vote.last_voted_slot().unwrap()); self.stray_restored_slot = Some(self.last_vote.last_voted_slot().unwrap());
} }
Ok(self) Ok(())
} }
fn initialize_lockouts_from_bank( fn initialize_lockouts_from_bank(
@ -930,18 +1062,19 @@ impl Tower {
let vote_state = VoteState::deserialize(&vote_account.data) let vote_state = VoteState::deserialize(&vote_account.data)
.expect("vote_account isn't a VoteState?"); .expect("vote_account isn't a VoteState?");
self.lockouts = vote_state; self.lockouts = vote_state;
self.do_initialize_lockouts(root, |v| v.slot > root); self.initialize_root(root);
self.initialize_lockouts(|v| v.slot > root);
trace!( trace!(
"{} lockouts initialized to {:?}", "Lockouts in tower for {} is initialized using bank {}",
self.node_pubkey, self.node_pubkey,
self.lockouts bank.slot(),
); );
assert_eq!( assert_eq!(
self.lockouts.node_pubkey, self.node_pubkey, self.lockouts.node_pubkey, self.node_pubkey,
"vote account's node_pubkey doesn't match", "vote account's node_pubkey doesn't match",
); );
} else { } else {
self.do_initialize_lockouts(root, |_| true); self.initialize_root(root);
info!( info!(
"vote account({}) not found in bank (slot={})", "vote account({}) not found in bank (slot={})",
vote_account_pubkey, vote_account_pubkey,
@ -950,13 +1083,16 @@ impl Tower {
} }
} }
fn do_initialize_lockouts<F: FnMut(&Lockout) -> bool>(&mut self, root: Slot, should_retain: F) { fn initialize_lockouts<F: FnMut(&Lockout) -> bool>(&mut self, should_retain: F) {
// Updating root is needed to correctly restore from newly-saved tower for the next
// boot
self.lockouts.root_slot = Some(root);
self.lockouts.votes.retain(should_retain); self.lockouts.votes.retain(should_retain);
} }
// Updating root is needed to correctly restore from newly-saved tower for the next
// boot
fn initialize_root(&mut self, root: Slot) {
self.lockouts.root_slot = Some(root);
}
pub fn get_filename(path: &Path, node_pubkey: &Pubkey) -> PathBuf { pub fn get_filename(path: &Path, node_pubkey: &Pubkey) -> PathBuf {
path.join(format!("tower-{}", node_pubkey)) path.join(format!("tower-{}", node_pubkey))
.with_extension("bin") .with_extension("bin")
@ -986,6 +1122,7 @@ impl Tower {
bincode::serialize_into(&mut file, &saved_tower)?; bincode::serialize_into(&mut file, &saved_tower)?;
// file.sync_all() hurts performance; pipeline sync-ing and submitting votes to the cluster! // file.sync_all() hurts performance; pipeline sync-ing and submitting votes to the cluster!
} }
trace!("persisted votes: {:?}", self.voted_slots());
fs::rename(&new_filename, &filename)?; fs::rename(&new_filename, &filename)?;
// self.path.parent().sync_all() hurts performance same as the above sync // self.path.parent().sync_all() hurts performance same as the above sync
@ -1045,6 +1182,19 @@ pub enum TowerError {
#[error("The tower is fatally inconsistent with blockstore: {0}")] #[error("The tower is fatally inconsistent with blockstore: {0}")]
FatallyInconsistent(&'static str), FatallyInconsistent(&'static str),
#[error("The tower is useless because of new hard fork: {0}")]
HardFork(Slot),
}
impl TowerError {
pub fn is_file_missing(&self) -> bool {
if let TowerError::IOError(io_err) = &self {
io_err.kind() == std::io::ErrorKind::NotFound
} else {
false
}
}
} }
#[frozen_abi(digest = "Gaxfwvx5MArn52mKZQgzHmDCyn5YfCuTHvp5Et3rFfpp")] #[frozen_abi(digest = "Gaxfwvx5MArn52mKZQgzHmDCyn5YfCuTHvp5Et3rFfpp")]
@ -1070,33 +1220,46 @@ impl SavedTower {
} }
} }
// Given an untimely crash, tower may have roots that are not reflected in blockstore because // Given an untimely crash, tower may have roots that are not reflected in blockstore,
// `ReplayState::handle_votable_bank()` saves tower before setting blockstore roots // or the reverse of this.
// That's because we don't impose any ordering guarantee or any kind of write barriers
// between tower (plain old POSIX fs calls) and blockstore (through RocksDB), when
// `ReplayState::handle_votable_bank()` saves tower before setting blockstore roots.
pub fn reconcile_blockstore_roots_with_tower( pub fn reconcile_blockstore_roots_with_tower(
tower: &Tower, tower: &Tower,
blockstore: &Blockstore, blockstore: &Blockstore,
) -> blockstore_db::Result<()> { ) -> blockstore_db::Result<()> {
if let Some(tower_root) = tower.root() { let tower_root = tower.root();
let last_blockstore_root = blockstore.last_root(); let last_blockstore_root = blockstore.last_root();
if last_blockstore_root < tower_root { if last_blockstore_root < tower_root {
// Ensure tower_root itself to exist and be marked as rooted in the blockstore // Ensure tower_root itself to exist and be marked as rooted in the blockstore
// in addition to its ancestors. // in addition to its ancestors.
let new_roots: Vec<_> = AncestorIterator::new_inclusive(tower_root, &blockstore) let new_roots: Vec<_> = AncestorIterator::new_inclusive(tower_root, &blockstore)
.take_while(|current| match current.cmp(&last_blockstore_root) { .take_while(|current| match current.cmp(&last_blockstore_root) {
Ordering::Greater => true, Ordering::Greater => true,
Ordering::Equal => false, Ordering::Equal => false,
Ordering::Less => panic!( Ordering::Less => panic!(
"couldn't find a last_blockstore_root upwards from: {}!?", "couldn't find a last_blockstore_root upwards from: {}!?",
tower_root tower_root
), ),
}) })
.collect(); .collect();
assert!( if !new_roots.is_empty() {
!new_roots.is_empty(), info!(
"at least 1 parent slot must be found" "Reconciling slots as root based on tower root: {:?} ({}..{}) ",
new_roots, tower_root, last_blockstore_root
);
blockstore.set_roots(&new_roots)?;
} else {
// This indicates we're in bad state; but still don't panic here.
// That's because we might have a chance of recovering properly with
// newer snapshot.
warn!(
"Couldn't find any ancestor slots from tower root ({}) \
towards blockstore root ({}); blockstore pruned or only \
tower moved into new ledger?",
tower_root, last_blockstore_root,
); );
blockstore.set_roots(&new_roots)?
} }
} }
Ok(()) Ok(())
@ -1267,7 +1430,7 @@ pub mod test {
&ancestors, &ancestors,
&descendants, &descendants,
&self.progress, &self.progress,
&tower, tower,
); );
// Make sure this slot isn't locked out or failing threshold // Make sure this slot isn't locked out or failing threshold
@ -1456,7 +1619,7 @@ pub mod test {
&mut account.data, &mut account.data,
) )
.expect("serialize state"); .expect("serialize state");
stakes.push((Pubkey::new_rand(), (*lamports, account))); stakes.push((solana_sdk::pubkey::new_rand(), (*lamports, account)));
} }
stakes stakes
} }
@ -1464,11 +1627,11 @@ pub mod test {
#[test] #[test]
fn test_to_vote_instruction() { fn test_to_vote_instruction() {
let vote = Vote::default(); let vote = Vote::default();
let mut decision = SwitchForkDecision::FailedSwitchThreshold; let mut decision = SwitchForkDecision::FailedSwitchThreshold(0, 1);
assert!(decision assert!(decision
.to_vote_instruction(vote.clone(), &Pubkey::default(), &Pubkey::default()) .to_vote_instruction(vote.clone(), &Pubkey::default(), &Pubkey::default())
.is_none()); .is_none());
decision = SwitchForkDecision::NoSwitch; decision = SwitchForkDecision::SameFork;
assert_eq!( assert_eq!(
decision.to_vote_instruction(vote.clone(), &Pubkey::default(), &Pubkey::default()), decision.to_vote_instruction(vote.clone(), &Pubkey::default(), &Pubkey::default()),
Some(vote_instruction::vote( Some(vote_instruction::vote(
@ -1571,7 +1734,7 @@ pub mod test {
total_stake, total_stake,
bank0.epoch_vote_accounts(0).unwrap(), bank0.epoch_vote_accounts(0).unwrap(),
), ),
SwitchForkDecision::NoSwitch SwitchForkDecision::SameFork
); );
// Trying to switch to another fork at 110 should fail // Trying to switch to another fork at 110 should fail
@ -1584,7 +1747,7 @@ pub mod test {
total_stake, total_stake,
bank0.epoch_vote_accounts(0).unwrap(), bank0.epoch_vote_accounts(0).unwrap(),
), ),
SwitchForkDecision::FailedSwitchThreshold SwitchForkDecision::FailedSwitchThreshold(0, 20000)
); );
// Adding another validator lockout on a descendant of last vote should // Adding another validator lockout on a descendant of last vote should
@ -1599,7 +1762,7 @@ pub mod test {
total_stake, total_stake,
bank0.epoch_vote_accounts(0).unwrap(), bank0.epoch_vote_accounts(0).unwrap(),
), ),
SwitchForkDecision::FailedSwitchThreshold SwitchForkDecision::FailedSwitchThreshold(0, 20000)
); );
// Adding another validator lockout on an ancestor of last vote should // Adding another validator lockout on an ancestor of last vote should
@ -1614,7 +1777,7 @@ pub mod test {
total_stake, total_stake,
bank0.epoch_vote_accounts(0).unwrap(), bank0.epoch_vote_accounts(0).unwrap(),
), ),
SwitchForkDecision::FailedSwitchThreshold SwitchForkDecision::FailedSwitchThreshold(0, 20000)
); );
// Adding another validator lockout on a different fork, but the lockout // Adding another validator lockout on a different fork, but the lockout
@ -1629,7 +1792,7 @@ pub mod test {
total_stake, total_stake,
bank0.epoch_vote_accounts(0).unwrap(), bank0.epoch_vote_accounts(0).unwrap(),
), ),
SwitchForkDecision::FailedSwitchThreshold SwitchForkDecision::FailedSwitchThreshold(0, 20000)
); );
// Adding another validator lockout on a different fork, and the lockout // Adding another validator lockout on a different fork, and the lockout
@ -1646,7 +1809,7 @@ pub mod test {
total_stake, total_stake,
bank0.epoch_vote_accounts(0).unwrap(), bank0.epoch_vote_accounts(0).unwrap(),
), ),
SwitchForkDecision::FailedSwitchThreshold SwitchForkDecision::FailedSwitchThreshold(0, 20000)
); );
// Adding another validator lockout on a different fork, and the lockout // Adding another validator lockout on a different fork, and the lockout
@ -1697,7 +1860,7 @@ pub mod test {
total_stake, total_stake,
bank0.epoch_vote_accounts(0).unwrap(), bank0.epoch_vote_accounts(0).unwrap(),
), ),
SwitchForkDecision::FailedSwitchThreshold SwitchForkDecision::FailedSwitchThreshold(0, 20000)
); );
} }
@ -2365,7 +2528,7 @@ pub mod test {
total_stake, total_stake,
bank0.epoch_vote_accounts(0).unwrap(), bank0.epoch_vote_accounts(0).unwrap(),
), ),
SwitchForkDecision::NoSwitch SwitchForkDecision::SameFork
); );
// Trying to switch to another fork at 110 should fail // Trying to switch to another fork at 110 should fail
@ -2378,7 +2541,7 @@ pub mod test {
total_stake, total_stake,
bank0.epoch_vote_accounts(0).unwrap(), bank0.epoch_vote_accounts(0).unwrap(),
), ),
SwitchForkDecision::FailedSwitchThreshold SwitchForkDecision::FailedSwitchThreshold(0, 20000)
); );
vote_simulator.simulate_lockout_interval(111, (10, 49), &other_vote_account); vote_simulator.simulate_lockout_interval(111, (10, 49), &other_vote_account);
@ -2456,7 +2619,7 @@ pub mod test {
total_stake, total_stake,
bank0.epoch_vote_accounts(0).unwrap(), bank0.epoch_vote_accounts(0).unwrap(),
), ),
SwitchForkDecision::FailedSwitchThreshold SwitchForkDecision::FailedSwitchThreshold(0, 20000)
); );
// Add lockout_interval which should be excluded // Add lockout_interval which should be excluded
@ -2470,7 +2633,7 @@ pub mod test {
total_stake, total_stake,
bank0.epoch_vote_accounts(0).unwrap(), bank0.epoch_vote_accounts(0).unwrap(),
), ),
SwitchForkDecision::FailedSwitchThreshold SwitchForkDecision::FailedSwitchThreshold(0, 20000)
); );
// Add lockout_interval which should not be excluded // Add lockout_interval which should not be excluded
@ -2619,8 +2782,7 @@ pub mod test {
} }
#[test] #[test]
#[should_panic(expected = "at least 1 parent slot must be found")] fn test_reconcile_blockstore_roots_with_tower_nop_no_parent() {
fn test_reconcile_blockstore_roots_with_tower_panic_no_parent() {
solana_logger::setup(); solana_logger::setup();
let blockstore_path = get_tmp_ledger_path!(); let blockstore_path = get_tmp_ledger_path!();
{ {
@ -2636,7 +2798,9 @@ pub mod test {
let mut tower = Tower::new_with_key(&Pubkey::default()); let mut tower = Tower::new_with_key(&Pubkey::default());
tower.lockouts.root_slot = Some(4); tower.lockouts.root_slot = Some(4);
assert_eq!(blockstore.last_root(), 0);
reconcile_blockstore_roots_with_tower(&tower, &blockstore).unwrap(); reconcile_blockstore_roots_with_tower(&tower, &blockstore).unwrap();
assert_eq!(blockstore.last_root(), 0);
} }
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
} }
@ -2660,13 +2824,13 @@ pub mod test {
.unwrap(); .unwrap();
assert_eq!(tower.voted_slots(), vec![2, 3]); assert_eq!(tower.voted_slots(), vec![2, 3]);
assert_eq!(tower.root(), Some(replayed_root_slot)); assert_eq!(tower.root(), replayed_root_slot);
tower = tower tower = tower
.adjust_lockouts_after_replay(replayed_root_slot, &slot_history) .adjust_lockouts_after_replay(replayed_root_slot, &slot_history)
.unwrap(); .unwrap();
assert_eq!(tower.voted_slots(), vec![2, 3]); assert_eq!(tower.voted_slots(), vec![2, 3]);
assert_eq!(tower.root(), Some(replayed_root_slot)); assert_eq!(tower.root(), replayed_root_slot);
} }
#[test] #[test]
@ -2688,7 +2852,7 @@ pub mod test {
.unwrap(); .unwrap();
assert_eq!(tower.voted_slots(), vec![2, 3]); assert_eq!(tower.voted_slots(), vec![2, 3]);
assert_eq!(tower.root(), Some(replayed_root_slot)); assert_eq!(tower.root(), replayed_root_slot);
} }
#[test] #[test]
@ -2712,12 +2876,12 @@ pub mod test {
.unwrap(); .unwrap();
assert_eq!(tower.voted_slots(), vec![] as Vec<Slot>); assert_eq!(tower.voted_slots(), vec![] as Vec<Slot>);
assert_eq!(tower.root(), Some(replayed_root_slot)); assert_eq!(tower.root(), replayed_root_slot);
assert_eq!(tower.stray_restored_slot, None); assert_eq!(tower.stray_restored_slot, None);
} }
#[test] #[test]
fn test_adjust_lockouts_after_relay_all_rooted_with_too_old() { fn test_adjust_lockouts_after_replay_all_rooted_with_too_old() {
use solana_sdk::slot_history::MAX_ENTRIES; use solana_sdk::slot_history::MAX_ENTRIES;
let mut tower = Tower::new_for_tests(10, 0.9); let mut tower = Tower::new_for_tests(10, 0.9);
@ -2735,7 +2899,7 @@ pub mod test {
.adjust_lockouts_after_replay(MAX_ENTRIES, &slot_history) .adjust_lockouts_after_replay(MAX_ENTRIES, &slot_history)
.unwrap(); .unwrap();
assert_eq!(tower.voted_slots(), vec![] as Vec<Slot>); assert_eq!(tower.voted_slots(), vec![] as Vec<Slot>);
assert_eq!(tower.root(), Some(MAX_ENTRIES)); assert_eq!(tower.root(), MAX_ENTRIES);
} }
#[test] #[test]
@ -2758,7 +2922,7 @@ pub mod test {
.unwrap(); .unwrap();
assert_eq!(tower.voted_slots(), vec![3, 4]); assert_eq!(tower.voted_slots(), vec![3, 4]);
assert_eq!(tower.root(), Some(replayed_root_slot)); assert_eq!(tower.root(), replayed_root_slot);
} }
#[test] #[test]
@ -2779,7 +2943,7 @@ pub mod test {
.unwrap(); .unwrap();
assert_eq!(tower.voted_slots(), vec![5, 6]); assert_eq!(tower.voted_slots(), vec![5, 6]);
assert_eq!(tower.root(), Some(replayed_root_slot)); assert_eq!(tower.root(), replayed_root_slot);
} }
#[test] #[test]
@ -2823,7 +2987,7 @@ pub mod test {
.unwrap(); .unwrap();
assert_eq!(tower.voted_slots(), vec![3, 4, 5]); assert_eq!(tower.voted_slots(), vec![3, 4, 5]);
assert_eq!(tower.root(), Some(replayed_root_slot)); assert_eq!(tower.root(), replayed_root_slot);
} }
#[test] #[test]
@ -2839,7 +3003,7 @@ pub mod test {
.unwrap(); .unwrap();
assert_eq!(tower.voted_slots(), vec![] as Vec<Slot>); assert_eq!(tower.voted_slots(), vec![] as Vec<Slot>);
assert_eq!(tower.root(), Some(replayed_root_slot)); assert_eq!(tower.root(), replayed_root_slot);
} }
#[test] #[test]
@ -2920,4 +3084,92 @@ pub mod test {
"The tower is fatally inconsistent with blockstore: not too old once after got too old?" "The tower is fatally inconsistent with blockstore: not too old once after got too old?"
); );
} }
#[test]
#[should_panic(expected = "slot_in_tower(2) < checked_slot(1)")]
fn test_adjust_lockouts_after_replay_reversed_votes() {
let mut tower = Tower::new_for_tests(10, 0.9);
tower.lockouts.votes.push_back(Lockout::new(2));
tower.lockouts.votes.push_back(Lockout::new(1));
let vote = Vote::new(vec![1], Hash::default());
tower.last_vote = vote;
let mut slot_history = SlotHistory::default();
slot_history.add(0);
slot_history.add(2);
tower
.adjust_lockouts_after_replay(2, &slot_history)
.unwrap();
}
#[test]
#[should_panic(expected = "slot_in_tower(3) < checked_slot(3)")]
fn test_adjust_lockouts_after_replay_repeated_non_root_votes() {
let mut tower = Tower::new_for_tests(10, 0.9);
tower.lockouts.votes.push_back(Lockout::new(2));
tower.lockouts.votes.push_back(Lockout::new(3));
tower.lockouts.votes.push_back(Lockout::new(3));
let vote = Vote::new(vec![3], Hash::default());
tower.last_vote = vote;
let mut slot_history = SlotHistory::default();
slot_history.add(0);
slot_history.add(2);
tower
.adjust_lockouts_after_replay(2, &slot_history)
.unwrap();
}
#[test]
fn test_adjust_lockouts_after_replay_vote_on_root() {
let mut tower = Tower::new_for_tests(10, 0.9);
tower.lockouts.root_slot = Some(42);
tower.lockouts.votes.push_back(Lockout::new(42));
tower.lockouts.votes.push_back(Lockout::new(43));
tower.lockouts.votes.push_back(Lockout::new(44));
let vote = Vote::new(vec![44], Hash::default());
tower.last_vote = vote;
let mut slot_history = SlotHistory::default();
slot_history.add(42);
let tower = tower.adjust_lockouts_after_replay(42, &slot_history);
assert_eq!(tower.unwrap().voted_slots(), [43, 44]);
}
#[test]
fn test_adjust_lockouts_after_replay_vote_on_genesis() {
let mut tower = Tower::new_for_tests(10, 0.9);
tower.lockouts.votes.push_back(Lockout::new(0));
let vote = Vote::new(vec![0], Hash::default());
tower.last_vote = vote;
let mut slot_history = SlotHistory::default();
slot_history.add(0);
assert!(tower.adjust_lockouts_after_replay(0, &slot_history).is_ok());
}
#[test]
fn test_adjust_lockouts_after_replay_future_tower() {
let mut tower = Tower::new_for_tests(10, 0.9);
tower.lockouts.votes.push_back(Lockout::new(13));
tower.lockouts.votes.push_back(Lockout::new(14));
let vote = Vote::new(vec![14], Hash::default());
tower.last_vote = vote;
tower.initialize_root(12);
let mut slot_history = SlotHistory::default();
slot_history.add(0);
slot_history.add(2);
let tower = tower
.adjust_lockouts_after_replay(2, &slot_history)
.unwrap();
assert_eq!(tower.root(), 12);
assert_eq!(tower.voted_slots(), vec![13, 14]);
assert_eq!(tower.stray_restored_slot, Some(14));
}
} }

View File

@ -130,7 +130,7 @@ impl ContactInfo {
let addr = socketaddr!("224.0.1.255:1000"); let addr = socketaddr!("224.0.1.255:1000");
assert!(addr.ip().is_multicast()); assert!(addr.ip().is_multicast());
Self { Self {
id: Pubkey::new_rand(), id: solana_sdk::pubkey::new_rand(),
gossip: addr, gossip: addr,
tvu: addr, tvu: addr,
tvu_forwards: addr, tvu_forwards: addr,

View File

@ -24,10 +24,12 @@
//! A value is updated to a new version if the labels match, and the value //! A value is updated to a new version if the labels match, and the value
//! wallclock is later, or the value hash is greater. //! wallclock is later, or the value hash is greater.
use crate::contact_info::ContactInfo;
use crate::crds_shards::CrdsShards; use crate::crds_shards::CrdsShards;
use crate::crds_value::{CrdsValue, CrdsValueLabel}; use crate::crds_value::{CrdsValue, CrdsValueLabel};
use bincode::serialize; use bincode::serialize;
use indexmap::map::{Entry, IndexMap}; use indexmap::map::{Entry, IndexMap};
use rayon::{prelude::*, ThreadPool};
use solana_sdk::hash::{hash, Hash}; use solana_sdk::hash::{hash, Hash};
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
use std::cmp; use std::cmp;
@ -159,6 +161,11 @@ impl Crds {
self.table.get(label) self.table.get(label)
} }
pub fn get_contact_info(&self, pubkey: &Pubkey) -> Option<&ContactInfo> {
let label = CrdsValueLabel::ContactInfo(*pubkey);
self.table.get(&label)?.value.contact_info()
}
fn update_label_timestamp(&mut self, id: &CrdsValueLabel, now: u64) { fn update_label_timestamp(&mut self, id: &CrdsValueLabel, now: u64) {
if let Some(e) = self.table.get_mut(id) { if let Some(e) = self.table.get_mut(id) {
e.local_timestamp = cmp::max(e.local_timestamp, now); e.local_timestamp = cmp::max(e.local_timestamp, now);
@ -176,37 +183,40 @@ impl Crds {
/// * timeouts - Pubkey specific timeouts with Pubkey::default() as the default timeout. /// * timeouts - Pubkey specific timeouts with Pubkey::default() as the default timeout.
pub fn find_old_labels( pub fn find_old_labels(
&self, &self,
thread_pool: &ThreadPool,
now: u64, now: u64,
timeouts: &HashMap<Pubkey, u64>, timeouts: &HashMap<Pubkey, u64>,
) -> Vec<CrdsValueLabel> { ) -> Vec<CrdsValueLabel> {
let default_timeout = *timeouts let default_timeout = *timeouts
.get(&Pubkey::default()) .get(&Pubkey::default())
.expect("must have default timeout"); .expect("must have default timeout");
self.table thread_pool.install(|| {
.iter() self.table
.filter_map(|(k, v)| { .par_iter()
let timeout = timeouts.get(&k.pubkey()).unwrap_or(&default_timeout); .with_min_len(1024)
if v.local_timestamp.saturating_add(*timeout) <= now { .filter_map(|(k, v)| {
Some(k) let timeout = timeouts.get(&k.pubkey()).unwrap_or(&default_timeout);
} else { if v.local_timestamp.saturating_add(*timeout) <= now {
None Some(k.clone())
} } else {
}) None
.cloned() }
.collect() })
.collect()
})
} }
pub fn remove(&mut self, key: &CrdsValueLabel) { pub fn remove(&mut self, key: &CrdsValueLabel) -> Option<VersionedCrdsValue> {
if let Some((index, _, value)) = self.table.swap_remove_full(key) { let (index, _, value) = self.table.swap_remove_full(key)?;
assert!(self.shards.remove(index, &value)); assert!(self.shards.remove(index, &value));
// The previously last element in the table is now moved to the // The previously last element in the table is now moved to the
// 'index' position. Shards need to be updated accordingly. // 'index' position. Shards need to be updated accordingly.
if index < self.table.len() { if index < self.table.len() {
let value = self.table.index(index); let value = self.table.index(index);
assert!(self.shards.remove(self.table.len(), value)); assert!(self.shards.remove(self.table.len(), value));
assert!(self.shards.insert(index, value)); assert!(self.shards.insert(index, value));
}
} }
Some(value)
} }
} }
@ -216,6 +226,7 @@ mod test {
use crate::contact_info::ContactInfo; use crate::contact_info::ContactInfo;
use crate::crds_value::CrdsData; use crate::crds_value::CrdsData;
use rand::{thread_rng, Rng}; use rand::{thread_rng, Rng};
use rayon::ThreadPoolBuilder;
#[test] #[test]
fn test_insert() { fn test_insert() {
@ -288,48 +299,67 @@ mod test {
} }
#[test] #[test]
fn test_find_old_records_default() { fn test_find_old_records_default() {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut crds = Crds::default(); let mut crds = Crds::default();
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default())); let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
assert_eq!(crds.insert(val.clone(), 1), Ok(None)); assert_eq!(crds.insert(val.clone(), 1), Ok(None));
let mut set = HashMap::new(); let mut set = HashMap::new();
set.insert(Pubkey::default(), 0); set.insert(Pubkey::default(), 0);
assert!(crds.find_old_labels(0, &set).is_empty()); assert!(crds.find_old_labels(&thread_pool, 0, &set).is_empty());
set.insert(Pubkey::default(), 1); set.insert(Pubkey::default(), 1);
assert_eq!(crds.find_old_labels(2, &set), vec![val.label()]); assert_eq!(
crds.find_old_labels(&thread_pool, 2, &set),
vec![val.label()]
);
set.insert(Pubkey::default(), 2); set.insert(Pubkey::default(), 2);
assert_eq!(crds.find_old_labels(4, &set), vec![val.label()]); assert_eq!(
crds.find_old_labels(&thread_pool, 4, &set),
vec![val.label()]
);
} }
#[test] #[test]
fn test_find_old_records_with_override() { fn test_find_old_records_with_override() {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut rng = thread_rng(); let mut rng = thread_rng();
let mut crds = Crds::default(); let mut crds = Crds::default();
let mut timeouts = HashMap::new(); let mut timeouts = HashMap::new();
let val = CrdsValue::new_rand(&mut rng); let val = CrdsValue::new_rand(&mut rng);
timeouts.insert(Pubkey::default(), 3); timeouts.insert(Pubkey::default(), 3);
assert_eq!(crds.insert(val.clone(), 0), Ok(None)); assert_eq!(crds.insert(val.clone(), 0), Ok(None));
assert!(crds.find_old_labels(2, &timeouts).is_empty()); assert!(crds.find_old_labels(&thread_pool, 2, &timeouts).is_empty());
timeouts.insert(val.pubkey(), 1); timeouts.insert(val.pubkey(), 1);
assert_eq!(crds.find_old_labels(2, &timeouts), vec![val.label()]); assert_eq!(
crds.find_old_labels(&thread_pool, 2, &timeouts),
vec![val.label()]
);
timeouts.insert(val.pubkey(), u64::MAX); timeouts.insert(val.pubkey(), u64::MAX);
assert!(crds.find_old_labels(2, &timeouts).is_empty()); assert!(crds.find_old_labels(&thread_pool, 2, &timeouts).is_empty());
timeouts.insert(Pubkey::default(), 1); timeouts.insert(Pubkey::default(), 1);
assert!(crds.find_old_labels(2, &timeouts).is_empty()); assert!(crds.find_old_labels(&thread_pool, 2, &timeouts).is_empty());
timeouts.remove(&val.pubkey()); timeouts.remove(&val.pubkey());
assert_eq!(crds.find_old_labels(2, &timeouts), vec![val.label()]); assert_eq!(
crds.find_old_labels(&thread_pool, 2, &timeouts),
vec![val.label()]
);
} }
#[test] #[test]
fn test_remove_default() { fn test_remove_default() {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut crds = Crds::default(); let mut crds = Crds::default();
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default())); let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
assert_matches!(crds.insert(val.clone(), 1), Ok(_)); assert_matches!(crds.insert(val.clone(), 1), Ok(_));
let mut set = HashMap::new(); let mut set = HashMap::new();
set.insert(Pubkey::default(), 1); set.insert(Pubkey::default(), 1);
assert_eq!(crds.find_old_labels(2, &set), vec![val.label()]); assert_eq!(
crds.find_old_labels(&thread_pool, 2, &set),
vec![val.label()]
);
crds.remove(&val.label()); crds.remove(&val.label());
assert!(crds.find_old_labels(2, &set).is_empty()); assert!(crds.find_old_labels(&thread_pool, 2, &set).is_empty());
} }
#[test] #[test]
fn test_find_old_records_staked() { fn test_find_old_records_staked() {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut crds = Crds::default(); let mut crds = Crds::default();
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default())); let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
assert_eq!(crds.insert(val.clone(), 1), Ok(None)); assert_eq!(crds.insert(val.clone(), 1), Ok(None));
@ -337,20 +367,26 @@ mod test {
//now < timestamp //now < timestamp
set.insert(Pubkey::default(), 0); set.insert(Pubkey::default(), 0);
set.insert(val.pubkey(), 0); set.insert(val.pubkey(), 0);
assert!(crds.find_old_labels(0, &set).is_empty()); assert!(crds.find_old_labels(&thread_pool, 0, &set).is_empty());
//pubkey shouldn't expire since its timeout is MAX //pubkey shouldn't expire since its timeout is MAX
set.insert(val.pubkey(), std::u64::MAX); set.insert(val.pubkey(), std::u64::MAX);
assert!(crds.find_old_labels(2, &set).is_empty()); assert!(crds.find_old_labels(&thread_pool, 2, &set).is_empty());
//default has max timeout, but pubkey should still expire //default has max timeout, but pubkey should still expire
set.insert(Pubkey::default(), std::u64::MAX); set.insert(Pubkey::default(), std::u64::MAX);
set.insert(val.pubkey(), 1); set.insert(val.pubkey(), 1);
assert_eq!(crds.find_old_labels(2, &set), vec![val.label()]); assert_eq!(
crds.find_old_labels(&thread_pool, 2, &set),
vec![val.label()]
);
set.insert(val.pubkey(), 2); set.insert(val.pubkey(), 2);
assert!(crds.find_old_labels(2, &set).is_empty()); assert!(crds.find_old_labels(&thread_pool, 2, &set).is_empty());
assert_eq!(crds.find_old_labels(3, &set), vec![val.label()]); assert_eq!(
crds.find_old_labels(&thread_pool, 3, &set),
vec![val.label()]
);
} }
#[test] #[test]
@ -361,7 +397,9 @@ mod test {
} }
let mut crds = Crds::default(); let mut crds = Crds::default();
let pubkeys: Vec<_> = std::iter::repeat_with(Pubkey::new_rand).take(256).collect(); let pubkeys: Vec<_> = std::iter::repeat_with(solana_sdk::pubkey::new_rand)
.take(256)
.collect();
let mut rng = thread_rng(); let mut rng = thread_rng();
let mut num_inserts = 0; let mut num_inserts = 0;
for _ in 0..4096 { for _ in 0..4096 {
@ -394,6 +432,7 @@ mod test {
#[test] #[test]
fn test_remove_staked() { fn test_remove_staked() {
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut crds = Crds::default(); let mut crds = Crds::default();
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default())); let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
assert_matches!(crds.insert(val.clone(), 1), Ok(_)); assert_matches!(crds.insert(val.clone(), 1), Ok(_));
@ -402,9 +441,12 @@ mod test {
//default has max timeout, but pubkey should still expire //default has max timeout, but pubkey should still expire
set.insert(Pubkey::default(), std::u64::MAX); set.insert(Pubkey::default(), std::u64::MAX);
set.insert(val.pubkey(), 1); set.insert(val.pubkey(), 1);
assert_eq!(crds.find_old_labels(2, &set), vec![val.label()]); assert_eq!(
crds.find_old_labels(&thread_pool, 2, &set),
vec![val.label()]
);
crds.remove(&val.label()); crds.remove(&val.label());
assert!(crds.find_old_labels(2, &set).is_empty()); assert!(crds.find_old_labels(&thread_pool, 2, &set).is_empty());
} }
#[test] #[test]
@ -484,14 +526,14 @@ mod test {
let v1 = VersionedCrdsValue::new( let v1 = VersionedCrdsValue::new(
1, 1,
CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
0, 0,
))), ))),
); );
let v2 = VersionedCrdsValue::new( let v2 = VersionedCrdsValue::new(
1, 1,
CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
0, 0,
))), ))),
); );

View File

@ -17,7 +17,6 @@ use std::collections::{HashMap, HashSet};
///The min size for bloom filters ///The min size for bloom filters
pub const CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS: usize = 500; pub const CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS: usize = 500;
#[derive(Clone)]
pub struct CrdsGossip { pub struct CrdsGossip {
pub crds: Crds, pub crds: Crds,
pub id: Pubkey, pub id: Pubkey,
@ -88,14 +87,27 @@ impl CrdsGossip {
prune_map prune_map
} }
pub fn new_push_messages(&mut self, now: u64) -> (Pubkey, HashMap<Pubkey, Vec<CrdsValue>>) { pub fn process_push_messages(&mut self, pending_push_messages: Vec<(CrdsValue, u64)>) {
for (push_message, timestamp) in pending_push_messages {
let _ =
self.push
.process_push_message(&mut self.crds, &self.id, push_message, timestamp);
}
}
pub fn new_push_messages(
&mut self,
pending_push_messages: Vec<(CrdsValue, u64)>,
now: u64,
) -> (Pubkey, HashMap<Pubkey, Vec<CrdsValue>>) {
self.process_push_messages(pending_push_messages);
let push_messages = self.push.new_push_messages(&self.crds, now); let push_messages = self.push.new_push_messages(&self.crds, now);
(self.id, push_messages) (self.id, push_messages)
} }
/// add the `from` to the peer's filter of nodes /// add the `from` to the peer's filter of nodes
pub fn process_prune_msg( pub fn process_prune_msg(
&mut self, &self,
peer: &Pubkey, peer: &Pubkey,
destination: &Pubkey, destination: &Pubkey,
origin: &[Pubkey], origin: &[Pubkey],
@ -161,9 +173,12 @@ impl CrdsGossip {
self.pull.mark_pull_request_creation_time(from, now) self.pull.mark_pull_request_creation_time(from, now)
} }
/// process a pull request and create a response /// process a pull request and create a response
pub fn process_pull_requests(&mut self, filters: Vec<(CrdsValue, CrdsFilter)>, now: u64) { pub fn process_pull_requests<I>(&mut self, callers: I, now: u64)
where
I: IntoIterator<Item = CrdsValue>,
{
self.pull self.pull
.process_pull_requests(&mut self.crds, filters, now); .process_pull_requests(&mut self.crds, callers, now);
} }
pub fn generate_pull_responses( pub fn generate_pull_responses(
@ -219,7 +234,12 @@ impl CrdsGossip {
self.pull.make_timeouts(&self.id, stakes, epoch_ms) self.pull.make_timeouts(&self.id, stakes, epoch_ms)
} }
pub fn purge(&mut self, now: u64, timeouts: &HashMap<Pubkey, u64>) -> usize { pub fn purge(
&mut self,
thread_pool: &ThreadPool,
now: u64,
timeouts: &HashMap<Pubkey, u64>,
) -> usize {
let mut rv = 0; let mut rv = 0;
if now > self.push.msg_timeout { if now > self.push.msg_timeout {
let min = now - self.push.msg_timeout; let min = now - self.push.msg_timeout;
@ -234,7 +254,9 @@ impl CrdsGossip {
let min = self.pull.crds_timeout; let min = self.pull.crds_timeout;
assert_eq!(timeouts[&self.id], std::u64::MAX); assert_eq!(timeouts[&self.id], std::u64::MAX);
assert_eq!(timeouts[&Pubkey::default()], min); assert_eq!(timeouts[&Pubkey::default()], min);
rv = self.pull.purge_active(&mut self.crds, now, &timeouts); rv = self
.pull
.purge_active(thread_pool, &mut self.crds, now, &timeouts);
} }
if now > 5 * self.pull.crds_timeout { if now > 5 * self.pull.crds_timeout {
let min = now - 5 * self.pull.crds_timeout; let min = now - 5 * self.pull.crds_timeout;
@ -243,6 +265,16 @@ impl CrdsGossip {
self.pull.purge_failed_inserts(now); self.pull.purge_failed_inserts(now);
rv rv
} }
// Only for tests and simulations.
pub(crate) fn mock_clone(&self) -> Self {
Self {
crds: self.crds.clone(),
push: self.push.mock_clone(),
pull: self.pull.clone(),
..*self
}
}
} }
/// Computes a normalized(log of actual stake) stake /// Computes a normalized(log of actual stake) stake

View File

@ -273,20 +273,18 @@ impl CrdsGossipPull {
} }
/// process a pull request /// process a pull request
pub fn process_pull_requests( pub fn process_pull_requests<I>(&mut self, crds: &mut Crds, callers: I, now: u64)
&mut self, where
crds: &mut Crds, I: IntoIterator<Item = CrdsValue>,
requests: Vec<(CrdsValue, CrdsFilter)>, {
now: u64, for caller in callers {
) {
requests.into_iter().for_each(|(caller, _)| {
let key = caller.label().pubkey(); let key = caller.label().pubkey();
if let Ok(Some(val)) = crds.insert(caller, now) { if let Ok(Some(val)) = crds.insert(caller, now) {
self.purged_values self.purged_values
.push_back((val.value_hash, val.local_timestamp)); .push_back((val.value_hash, val.local_timestamp));
} }
crds.update_record_timestamp(&key, now); crds.update_record_timestamp(&key, now);
}); }
} }
/// Create gossip responses to pull requests /// Create gossip responses to pull requests
@ -537,24 +535,21 @@ impl CrdsGossipPull {
/// The value_hash of an active item is put into self.purged_values queue /// The value_hash of an active item is put into self.purged_values queue
pub fn purge_active( pub fn purge_active(
&mut self, &mut self,
thread_pool: &ThreadPool,
crds: &mut Crds, crds: &mut Crds,
now: u64, now: u64,
timeouts: &HashMap<Pubkey, u64>, timeouts: &HashMap<Pubkey, u64>,
) -> usize { ) -> usize {
let old = crds.find_old_labels(now, timeouts); let num_purged_values = self.purged_values.len();
let mut purged: VecDeque<_> = old self.purged_values.extend(
.iter() crds.find_old_labels(thread_pool, now, timeouts)
.filter_map(|label| { .into_iter()
let rv = crds .filter_map(|label| {
.lookup_versioned(label) let val = crds.remove(&label)?;
.map(|val| (val.value_hash, val.local_timestamp)); Some((val.value_hash, val.local_timestamp))
crds.remove(label); }),
rv );
}) self.purged_values.len() - num_purged_values
.collect();
let ret = purged.len();
self.purged_values.append(&mut purged);
ret
} }
/// Purge values from the `self.purged_values` queue that are older then purge_timeout /// Purge values from the `self.purged_values` queue that are older then purge_timeout
pub fn purge_purged(&mut self, min_ts: u64) { pub fn purge_purged(&mut self, min_ts: u64) {
@ -626,7 +621,7 @@ mod test {
} }
let mut rng = thread_rng(); let mut rng = thread_rng();
for _ in 0..100 { for _ in 0..100 {
let hash = Hash::new_rand(&mut rng); let hash = solana_sdk::hash::new_rand(&mut rng);
assert_eq!(CrdsFilter::hash_as_u64(&hash), hash_as_u64_bitops(&hash)); assert_eq!(CrdsFilter::hash_as_u64(&hash), hash_as_u64_bitops(&hash));
} }
} }
@ -638,7 +633,7 @@ mod test {
assert_eq!(filter.mask, mask); assert_eq!(filter.mask, mask);
let mut rng = thread_rng(); let mut rng = thread_rng();
for _ in 0..10 { for _ in 0..10 {
let hash = Hash::new_rand(&mut rng); let hash = solana_sdk::hash::new_rand(&mut rng);
assert!(filter.test_mask(&hash)); assert!(filter.test_mask(&hash));
} }
} }
@ -649,13 +644,13 @@ mod test {
let mut stakes = HashMap::new(); let mut stakes = HashMap::new();
let node = CrdsGossipPull::default(); let node = CrdsGossipPull::default();
let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
0, 0,
))); )));
crds.insert(me.clone(), 0).unwrap(); crds.insert(me.clone(), 0).unwrap();
for i in 1..=30 { for i in 1..=30 {
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
0, 0,
))); )));
let id = entry.label().pubkey(); let id = entry.label().pubkey();
@ -682,25 +677,25 @@ mod test {
let gossip = socketaddr!("127.0.0.1:1234"); let gossip = socketaddr!("127.0.0.1:1234");
let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo { let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(), id: solana_sdk::pubkey::new_rand(),
shred_version: 123, shred_version: 123,
gossip, gossip,
..ContactInfo::default() ..ContactInfo::default()
})); }));
let spy = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo { let spy = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(), id: solana_sdk::pubkey::new_rand(),
shred_version: 0, shred_version: 0,
gossip, gossip,
..ContactInfo::default() ..ContactInfo::default()
})); }));
let node_123 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo { let node_123 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(), id: solana_sdk::pubkey::new_rand(),
shred_version: 123, shred_version: 123,
gossip, gossip,
..ContactInfo::default() ..ContactInfo::default()
})); }));
let node_456 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo { let node_456 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(), id: solana_sdk::pubkey::new_rand(),
shred_version: 456, shred_version: 456,
gossip, gossip,
..ContactInfo::default() ..ContactInfo::default()
@ -741,12 +736,12 @@ mod test {
let gossip = socketaddr!("127.0.0.1:1234"); let gossip = socketaddr!("127.0.0.1:1234");
let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo { let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(), id: solana_sdk::pubkey::new_rand(),
gossip, gossip,
..ContactInfo::default() ..ContactInfo::default()
})); }));
let node_123 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo { let node_123 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(), id: solana_sdk::pubkey::new_rand(),
gossip, gossip,
..ContactInfo::default() ..ContactInfo::default()
})); }));
@ -767,7 +762,7 @@ mod test {
assert!(options.is_empty()); assert!(options.is_empty());
// Unknown pubkey in gossip_validators -- will pull from nobody // Unknown pubkey in gossip_validators -- will pull from nobody
gossip_validators.insert(Pubkey::new_rand()); gossip_validators.insert(solana_sdk::pubkey::new_rand());
let options = node.pull_options( let options = node.pull_options(
&crds, &crds,
&me.label().pubkey(), &me.label().pubkey(),
@ -797,7 +792,7 @@ mod test {
let mut rng = thread_rng(); let mut rng = thread_rng();
let crds_filter_set = let crds_filter_set =
CrdsFilterSet::new(/*num_items=*/ 9672788, /*max_bytes=*/ 8196); CrdsFilterSet::new(/*num_items=*/ 9672788, /*max_bytes=*/ 8196);
let hash_values: Vec<_> = std::iter::repeat_with(|| Hash::new_rand(&mut rng)) let hash_values: Vec<_> = std::iter::repeat_with(|| solana_sdk::hash::new_rand(&mut rng))
.take(1024) .take(1024)
.collect(); .collect();
for hash_value in &hash_values { for hash_value in &hash_values {
@ -849,7 +844,7 @@ mod test {
for _ in 0..10_000 { for _ in 0..10_000 {
crds_gossip_pull crds_gossip_pull
.purged_values .purged_values
.push_back((Hash::new_rand(&mut rng), rng.gen())); .push_back((solana_sdk::hash::new_rand(&mut rng), rng.gen()));
} }
let mut num_inserts = 0; let mut num_inserts = 0;
for _ in 0..20_000 { for _ in 0..20_000 {
@ -898,7 +893,7 @@ mod test {
let thread_pool = ThreadPoolBuilder::new().build().unwrap(); let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut crds = Crds::default(); let mut crds = Crds::default();
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
0, 0,
))); )));
let id = entry.label().pubkey(); let id = entry.label().pubkey();
@ -933,7 +928,7 @@ mod test {
); );
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
0, 0,
))); )));
crds.insert(new.clone(), 0).unwrap(); crds.insert(new.clone(), 0).unwrap();
@ -957,19 +952,19 @@ mod test {
let thread_pool = ThreadPoolBuilder::new().build().unwrap(); let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut crds = Crds::default(); let mut crds = Crds::default();
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
0, 0,
))); )));
let node_pubkey = entry.label().pubkey(); let node_pubkey = entry.label().pubkey();
let mut node = CrdsGossipPull::default(); let mut node = CrdsGossipPull::default();
crds.insert(entry.clone(), 0).unwrap(); crds.insert(entry.clone(), 0).unwrap();
let old = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let old = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
0, 0,
))); )));
crds.insert(old.clone(), 0).unwrap(); crds.insert(old.clone(), 0).unwrap();
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
0, 0,
))); )));
crds.insert(new.clone(), 0).unwrap(); crds.insert(new.clone(), 0).unwrap();
@ -1000,14 +995,14 @@ mod test {
let thread_pool = ThreadPoolBuilder::new().build().unwrap(); let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut node_crds = Crds::default(); let mut node_crds = Crds::default();
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
0, 0,
))); )));
let node_pubkey = entry.label().pubkey(); let node_pubkey = entry.label().pubkey();
let node = CrdsGossipPull::default(); let node = CrdsGossipPull::default();
node_crds.insert(entry, 0).unwrap(); node_crds.insert(entry, 0).unwrap();
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
0, 0,
))); )));
node_crds.insert(new, 0).unwrap(); node_crds.insert(new, 0).unwrap();
@ -1031,7 +1026,7 @@ mod test {
assert_eq!(rsp[0].len(), 0); assert_eq!(rsp[0].len(), 0);
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS, CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS,
))); )));
dest_crds dest_crds
@ -1047,7 +1042,7 @@ mod test {
filters.push(filters[0].clone()); filters.push(filters[0].clone());
//should return new value since caller is new //should return new value since caller is new
filters[1].0 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( filters[1].0 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS + 1, CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS + 1,
))); )));
@ -1063,14 +1058,14 @@ mod test {
let thread_pool = ThreadPoolBuilder::new().build().unwrap(); let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut node_crds = Crds::default(); let mut node_crds = Crds::default();
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
0, 0,
))); )));
let node_pubkey = entry.label().pubkey(); let node_pubkey = entry.label().pubkey();
let node = CrdsGossipPull::default(); let node = CrdsGossipPull::default();
node_crds.insert(entry, 0).unwrap(); node_crds.insert(entry, 0).unwrap();
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
0, 0,
))); )));
node_crds.insert(new, 0).unwrap(); node_crds.insert(new, 0).unwrap();
@ -1090,7 +1085,11 @@ mod test {
let (_, filters, caller) = req.unwrap(); let (_, filters, caller) = req.unwrap();
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect(); let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
let rsp = dest.generate_pull_responses(&dest_crds, &filters, 0); let rsp = dest.generate_pull_responses(&dest_crds, &filters, 0);
dest.process_pull_requests(&mut dest_crds, filters, 1); dest.process_pull_requests(
&mut dest_crds,
filters.into_iter().map(|(caller, _)| caller),
1,
);
assert!(rsp.iter().all(|rsp| rsp.is_empty())); assert!(rsp.iter().all(|rsp| rsp.is_empty()));
assert!(dest_crds.lookup(&caller.label()).is_some()); assert!(dest_crds.lookup(&caller.label()).is_some());
assert_eq!( assert_eq!(
@ -1113,7 +1112,7 @@ mod test {
let thread_pool = ThreadPoolBuilder::new().build().unwrap(); let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut node_crds = Crds::default(); let mut node_crds = Crds::default();
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
1, 1,
))); )));
let node_pubkey = entry.label().pubkey(); let node_pubkey = entry.label().pubkey();
@ -1121,14 +1120,14 @@ mod test {
node_crds.insert(entry, 0).unwrap(); node_crds.insert(entry, 0).unwrap();
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
1, 1,
))); )));
node_crds.insert(new, 0).unwrap(); node_crds.insert(new, 0).unwrap();
let mut dest = CrdsGossipPull::default(); let mut dest = CrdsGossipPull::default();
let mut dest_crds = Crds::default(); let mut dest_crds = Crds::default();
let new_id = Pubkey::new_rand(); let new_id = solana_sdk::pubkey::new_rand();
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&new_id, 1, &new_id, 1,
))); )));
@ -1164,7 +1163,11 @@ mod test {
let (_, filters, caller) = req.unwrap(); let (_, filters, caller) = req.unwrap();
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect(); let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
let mut rsp = dest.generate_pull_responses(&dest_crds, &filters, 0); let mut rsp = dest.generate_pull_responses(&dest_crds, &filters, 0);
dest.process_pull_requests(&mut dest_crds, filters, 0); dest.process_pull_requests(
&mut dest_crds,
filters.into_iter().map(|(caller, _)| caller),
0,
);
// if there is a false positive this is empty // if there is a false positive this is empty
// prob should be around 0.1 per iteration // prob should be around 0.1 per iteration
if rsp.is_empty() { if rsp.is_empty() {
@ -1210,7 +1213,7 @@ mod test {
let thread_pool = ThreadPoolBuilder::new().build().unwrap(); let thread_pool = ThreadPoolBuilder::new().build().unwrap();
let mut node_crds = Crds::default(); let mut node_crds = Crds::default();
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
0, 0,
))); )));
let node_label = entry.label(); let node_label = entry.label();
@ -1218,7 +1221,7 @@ mod test {
let mut node = CrdsGossipPull::default(); let mut node = CrdsGossipPull::default();
node_crds.insert(entry, 0).unwrap(); node_crds.insert(entry, 0).unwrap();
let old = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let old = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
0, 0,
))); )));
node_crds.insert(old.clone(), 0).unwrap(); node_crds.insert(old.clone(), 0).unwrap();
@ -1229,7 +1232,7 @@ mod test {
// purge // purge
let timeouts = node.make_timeouts_def(&node_pubkey, &HashMap::new(), 0, 1); let timeouts = node.make_timeouts_def(&node_pubkey, &HashMap::new(), 0, 1);
node.purge_active(&mut node_crds, 2, &timeouts); node.purge_active(&thread_pool, &mut node_crds, 2, &timeouts);
//verify self is still valid after purge //verify self is still valid after purge
assert_eq!(node_crds.lookup(&node_label).unwrap().label(), node_label); assert_eq!(node_crds.lookup(&node_label).unwrap().label(), node_label);
@ -1330,7 +1333,7 @@ mod test {
let mut node_crds = Crds::default(); let mut node_crds = Crds::default();
let mut node = CrdsGossipPull::default(); let mut node = CrdsGossipPull::default();
let peer_pubkey = Pubkey::new_rand(); let peer_pubkey = solana_sdk::pubkey::new_rand();
let peer_entry = CrdsValue::new_unsigned(CrdsData::ContactInfo( let peer_entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(
ContactInfo::new_localhost(&peer_pubkey, 0), ContactInfo::new_localhost(&peer_pubkey, 0),
)); ));

View File

@ -20,7 +20,7 @@ use bincode::serialized_size;
use indexmap::map::IndexMap; use indexmap::map::IndexMap;
use itertools::Itertools; use itertools::Itertools;
use rand::{seq::SliceRandom, Rng}; use rand::{seq::SliceRandom, Rng};
use solana_runtime::bloom::Bloom; use solana_runtime::bloom::{AtomicBloom, Bloom};
use solana_sdk::{hash::Hash, packet::PACKET_DATA_SIZE, pubkey::Pubkey, timing::timestamp}; use solana_sdk::{hash::Hash, packet::PACKET_DATA_SIZE, pubkey::Pubkey, timing::timestamp};
use std::{ use std::{
cmp, cmp,
@ -35,19 +35,18 @@ pub const CRDS_GOSSIP_PUSH_FANOUT: usize = 6;
pub const CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS: u64 = 30000; pub const CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS: u64 = 30000;
pub const CRDS_GOSSIP_PRUNE_MSG_TIMEOUT_MS: u64 = 500; pub const CRDS_GOSSIP_PRUNE_MSG_TIMEOUT_MS: u64 = 500;
pub const CRDS_GOSSIP_PRUNE_STAKE_THRESHOLD_PCT: f64 = 0.15; pub const CRDS_GOSSIP_PRUNE_STAKE_THRESHOLD_PCT: f64 = 0.15;
pub const CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES: usize = 2; pub const CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES: usize = 3;
// Do not push to peers which have not been updated for this long. // Do not push to peers which have not been updated for this long.
const PUSH_ACTIVE_TIMEOUT_MS: u64 = 60_000; const PUSH_ACTIVE_TIMEOUT_MS: u64 = 60_000;
// 10 minutes // 10 minutes
const MAX_PUSHED_TO_TIMEOUT_MS: u64 = 10 * 60 * 1000; const MAX_PUSHED_TO_TIMEOUT_MS: u64 = 10 * 60 * 1000;
#[derive(Clone)]
pub struct CrdsGossipPush { pub struct CrdsGossipPush {
/// max bytes per message /// max bytes per message
pub max_bytes: usize, pub max_bytes: usize,
/// active set of validators for push /// active set of validators for push
active_set: IndexMap<Pubkey, Bloom<Pubkey>>, active_set: IndexMap<Pubkey, AtomicBloom<Pubkey>>,
/// push message queue /// push message queue
push_messages: HashMap<CrdsValueLabel, Hash>, push_messages: HashMap<CrdsValueLabel, Hash>,
/// Cache that tracks which validators a message was received from /// Cache that tracks which validators a message was received from
@ -136,8 +135,12 @@ impl CrdsGossipPush {
let mut keep = HashSet::new(); let mut keep = HashSet::new();
let mut peer_stake_sum = 0; let mut peer_stake_sum = 0;
keep.insert(*origin);
for next in shuffle { for next in shuffle {
let (next_peer, next_stake) = staked_peers[next]; let (next_peer, next_stake) = staked_peers[next];
if next_peer == *origin {
continue;
}
keep.insert(next_peer); keep.insert(next_peer);
peer_stake_sum += next_stake; peer_stake_sum += next_stake;
if peer_stake_sum >= prune_stake_threshold if peer_stake_sum >= prune_stake_threshold
@ -283,13 +286,12 @@ impl CrdsGossipPush {
} }
/// add the `from` to the peer's filter of nodes /// add the `from` to the peer's filter of nodes
pub fn process_prune_msg(&mut self, self_pubkey: &Pubkey, peer: &Pubkey, origins: &[Pubkey]) { pub fn process_prune_msg(&self, self_pubkey: &Pubkey, peer: &Pubkey, origins: &[Pubkey]) {
for origin in origins { if let Some(filter) = self.active_set.get(peer) {
if origin == self_pubkey { for origin in origins {
continue; if origin != self_pubkey {
} filter.add(origin);
if let Some(p) = self.active_set.get_mut(peer) { }
p.add(origin)
} }
} }
} }
@ -345,7 +347,7 @@ impl CrdsGossipPush {
continue; continue;
} }
let size = cmp::max(CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS, network_size); let size = cmp::max(CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS, network_size);
let mut bloom = Bloom::random(size, 0.1, 1024 * 8 * 4); let bloom: AtomicBloom<_> = Bloom::random(size, 0.1, 1024 * 8 * 4).into();
bloom.add(&item.id); bloom.add(&item.id);
new_items.insert(item.id, bloom); new_items.insert(item.id, bloom);
} }
@ -424,6 +426,21 @@ impl CrdsGossipPush {
!v.is_empty() !v.is_empty()
}); });
} }
// Only for tests and simulations.
pub(crate) fn mock_clone(&self) -> Self {
let mut active_set = IndexMap::<Pubkey, AtomicBloom<Pubkey>>::new();
for (k, v) in &self.active_set {
active_set.insert(*k, v.mock_clone());
}
Self {
active_set,
push_messages: self.push_messages.clone(),
received_cache: self.received_cache.clone(),
last_pushed_to: self.last_pushed_to.clone(),
..*self
}
}
} }
#[cfg(test)] #[cfg(test)]
@ -438,15 +455,15 @@ mod test {
let mut push = CrdsGossipPush::default(); let mut push = CrdsGossipPush::default();
let mut stakes = HashMap::new(); let mut stakes = HashMap::new();
let self_id = Pubkey::new_rand(); let self_id = solana_sdk::pubkey::new_rand();
let origin = Pubkey::new_rand(); let origin = solana_sdk::pubkey::new_rand();
stakes.insert(self_id, 100); stakes.insert(self_id, 100);
stakes.insert(origin, 100); stakes.insert(origin, 100);
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&origin, 0, &origin, 0,
))); )));
let low_staked_peers = (0..10).map(|_| Pubkey::new_rand()); let low_staked_peers = (0..10).map(|_| solana_sdk::pubkey::new_rand());
let mut low_staked_set = HashSet::new(); let mut low_staked_set = HashSet::new();
low_staked_peers.for_each(|p| { low_staked_peers.for_each(|p| {
let _ = push.process_push_message(&mut crds, &p, value.clone(), 0); let _ = push.process_push_message(&mut crds, &p, value.clone(), 0);
@ -460,7 +477,7 @@ mod test {
"should not prune if min threshold has not been reached" "should not prune if min threshold has not been reached"
); );
let high_staked_peer = Pubkey::new_rand(); let high_staked_peer = solana_sdk::pubkey::new_rand();
let high_stake = CrdsGossipPush::prune_stake_threshold(100, 100) + 10; let high_stake = CrdsGossipPush::prune_stake_threshold(100, 100) + 10;
stakes.insert(high_staked_peer, high_stake); stakes.insert(high_staked_peer, high_stake);
let _ = push.process_push_message(&mut crds, &high_staked_peer, value, 0); let _ = push.process_push_message(&mut crds, &high_staked_peer, value, 0);
@ -483,7 +500,7 @@ mod test {
let mut crds = Crds::default(); let mut crds = Crds::default();
let mut push = CrdsGossipPush::default(); let mut push = CrdsGossipPush::default();
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
0, 0,
))); )));
let label = value.label(); let label = value.label();
@ -504,7 +521,7 @@ mod test {
fn test_process_push_old_version() { fn test_process_push_old_version() {
let mut crds = Crds::default(); let mut crds = Crds::default();
let mut push = CrdsGossipPush::default(); let mut push = CrdsGossipPush::default();
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0); let mut ci = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
ci.wallclock = 1; ci.wallclock = 1;
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone())); let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
@ -527,7 +544,7 @@ mod test {
let mut crds = Crds::default(); let mut crds = Crds::default();
let mut push = CrdsGossipPush::default(); let mut push = CrdsGossipPush::default();
let timeout = push.msg_timeout; let timeout = push.msg_timeout;
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0); let mut ci = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
// push a version to far in the future // push a version to far in the future
ci.wallclock = timeout + 1; ci.wallclock = timeout + 1;
@ -549,7 +566,7 @@ mod test {
fn test_process_push_update() { fn test_process_push_update() {
let mut crds = Crds::default(); let mut crds = Crds::default();
let mut push = CrdsGossipPush::default(); let mut push = CrdsGossipPush::default();
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0); let mut ci = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
ci.wallclock = 0; ci.wallclock = 0;
let value_old = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone())); let value_old = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
@ -584,7 +601,7 @@ mod test {
let mut crds = Crds::default(); let mut crds = Crds::default();
let mut push = CrdsGossipPush::default(); let mut push = CrdsGossipPush::default();
let value1 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let value1 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
0, 0,
))); )));
@ -593,7 +610,7 @@ mod test {
assert!(push.active_set.get(&value1.label().pubkey()).is_some()); assert!(push.active_set.get(&value1.label().pubkey()).is_some());
let value2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let value2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
0, 0,
))); )));
assert!(push.active_set.get(&value2.label().pubkey()).is_none()); assert!(push.active_set.get(&value2.label().pubkey()).is_none());
@ -608,7 +625,7 @@ mod test {
for _ in 0..push.num_active { for _ in 0..push.num_active {
let value2 = CrdsValue::new_unsigned(CrdsData::ContactInfo( let value2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(
ContactInfo::new_localhost(&Pubkey::new_rand(), 0), ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0),
)); ));
assert_eq!(crds.insert(value2.clone(), now), Ok(None)); assert_eq!(crds.insert(value2.clone(), now), Ok(None));
} }
@ -624,7 +641,7 @@ mod test {
let mut stakes = HashMap::new(); let mut stakes = HashMap::new();
for i in 1..=100 { for i in 1..=100 {
let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
time, time,
))); )));
let id = peer.label().pubkey(); let id = peer.label().pubkey();
@ -652,25 +669,25 @@ mod test {
let gossip = socketaddr!("127.0.0.1:1234"); let gossip = socketaddr!("127.0.0.1:1234");
let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo { let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(), id: solana_sdk::pubkey::new_rand(),
shred_version: 123, shred_version: 123,
gossip, gossip,
..ContactInfo::default() ..ContactInfo::default()
})); }));
let spy = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo { let spy = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(), id: solana_sdk::pubkey::new_rand(),
shred_version: 0, shred_version: 0,
gossip, gossip,
..ContactInfo::default() ..ContactInfo::default()
})); }));
let node_123 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo { let node_123 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(), id: solana_sdk::pubkey::new_rand(),
shred_version: 123, shred_version: 123,
gossip, gossip,
..ContactInfo::default() ..ContactInfo::default()
})); }));
let node_456 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo { let node_456 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(), id: solana_sdk::pubkey::new_rand(),
shred_version: 456, shred_version: 456,
gossip, gossip,
..ContactInfo::default() ..ContactInfo::default()
@ -709,12 +726,12 @@ mod test {
let gossip = socketaddr!("127.0.0.1:1234"); let gossip = socketaddr!("127.0.0.1:1234");
let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo { let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(), id: solana_sdk::pubkey::new_rand(),
gossip, gossip,
..ContactInfo::default() ..ContactInfo::default()
})); }));
let node_123 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo { let node_123 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
id: Pubkey::new_rand(), id: solana_sdk::pubkey::new_rand(),
gossip, gossip,
..ContactInfo::default() ..ContactInfo::default()
})); }));
@ -735,7 +752,7 @@ mod test {
assert!(options.is_empty()); assert!(options.is_empty());
// Unknown pubkey in gossip_validators -- will push to nobody // Unknown pubkey in gossip_validators -- will push to nobody
gossip_validators.insert(Pubkey::new_rand()); gossip_validators.insert(solana_sdk::pubkey::new_rand());
let options = node.push_options( let options = node.push_options(
&crds, &crds,
&me.label().pubkey(), &me.label().pubkey(),
@ -765,14 +782,14 @@ mod test {
let mut crds = Crds::default(); let mut crds = Crds::default();
let mut push = CrdsGossipPush::default(); let mut push = CrdsGossipPush::default();
let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
0, 0,
))); )));
assert_eq!(crds.insert(peer.clone(), now), Ok(None)); assert_eq!(crds.insert(peer.clone(), now), Ok(None));
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1); push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
0, 0,
))); )));
let mut expected = HashMap::new(); let mut expected = HashMap::new();
@ -790,17 +807,17 @@ mod test {
let mut crds = Crds::default(); let mut crds = Crds::default();
let mut push = CrdsGossipPush::default(); let mut push = CrdsGossipPush::default();
let peer_1 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let peer_1 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
0, 0,
))); )));
assert_eq!(crds.insert(peer_1.clone(), now), Ok(None)); assert_eq!(crds.insert(peer_1.clone(), now), Ok(None));
let peer_2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let peer_2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
0, 0,
))); )));
assert_eq!(crds.insert(peer_2.clone(), now), Ok(None)); assert_eq!(crds.insert(peer_2.clone(), now), Ok(None));
let peer_3 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let peer_3 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
now, now,
))); )));
assert_eq!( assert_eq!(
@ -823,17 +840,17 @@ mod test {
#[test] #[test]
fn test_process_prune() { fn test_process_prune() {
let mut crds = Crds::default(); let mut crds = Crds::default();
let self_id = Pubkey::new_rand(); let self_id = solana_sdk::pubkey::new_rand();
let mut push = CrdsGossipPush::default(); let mut push = CrdsGossipPush::default();
let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
0, 0,
))); )));
assert_eq!(crds.insert(peer.clone(), 0), Ok(None)); assert_eq!(crds.insert(peer.clone(), 0), Ok(None));
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1); push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
0, 0,
))); )));
let expected = HashMap::new(); let expected = HashMap::new();
@ -853,13 +870,13 @@ mod test {
let mut crds = Crds::default(); let mut crds = Crds::default();
let mut push = CrdsGossipPush::default(); let mut push = CrdsGossipPush::default();
let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(), &solana_sdk::pubkey::new_rand(),
0, 0,
))); )));
assert_eq!(crds.insert(peer, 0), Ok(None)); assert_eq!(crds.insert(peer, 0), Ok(None));
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1); push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0); let mut ci = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
ci.wallclock = 1; ci.wallclock = 1;
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci)); let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci));
let expected = HashMap::new(); let expected = HashMap::new();
@ -875,7 +892,7 @@ mod test {
fn test_purge_old_received_cache() { fn test_purge_old_received_cache() {
let mut crds = Crds::default(); let mut crds = Crds::default();
let mut push = CrdsGossipPush::default(); let mut push = CrdsGossipPush::default();
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0); let mut ci = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
ci.wallclock = 0; ci.wallclock = 0;
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci)); let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci));
let label = value.label(); let label = value.label();

View File

@ -135,14 +135,15 @@ mod test {
use crate::contact_info::ContactInfo; use crate::contact_info::ContactInfo;
use crate::crds_value::{CrdsData, CrdsValue}; use crate::crds_value::{CrdsData, CrdsValue};
use rand::{thread_rng, Rng}; use rand::{thread_rng, Rng};
use solana_sdk::pubkey::Pubkey;
use solana_sdk::timing::timestamp; use solana_sdk::timing::timestamp;
use std::collections::HashSet; use std::collections::HashSet;
use std::ops::Index; use std::ops::Index;
fn new_test_crds_value() -> VersionedCrdsValue { fn new_test_crds_value() -> VersionedCrdsValue {
let data = let data = CrdsData::ContactInfo(ContactInfo::new_localhost(
CrdsData::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), timestamp())); &solana_sdk::pubkey::new_rand(),
timestamp(),
));
VersionedCrdsValue::new(timestamp(), CrdsValue::new_unsigned(data)) VersionedCrdsValue::new(timestamp(), CrdsValue::new_unsigned(data))
} }

View File

@ -318,7 +318,7 @@ impl CrdsValue {
R: rand::Rng, R: rand::Rng,
{ {
let now = rng.gen(); let now = rng.gen();
let contact_info = ContactInfo::new_localhost(&Pubkey::new_rand(), now); let contact_info = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), now);
Self::new_signed(CrdsData::ContactInfo(contact_info), &Keypair::new()) Self::new_signed(CrdsData::ContactInfo(contact_info), &Keypair::new())
} }

112
core/src/data_budget.rs Normal file
View File

@ -0,0 +1,112 @@
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
#[derive(Default)]
pub struct DataBudget {
// Amount of bytes we have in the budget to send.
bytes: AtomicUsize,
// Last time that we upped the bytes count, used
// to detect when to up the bytes budget again
last_timestamp_ms: AtomicU64,
}
impl DataBudget {
// If there are enough bytes in the budget, consumes from
// the budget and returns true. Otherwise returns false.
#[must_use]
pub fn take(&self, size: usize) -> bool {
let mut budget = self.bytes.load(Ordering::Acquire);
loop {
if budget < size {
return false;
}
match self.bytes.compare_exchange_weak(
budget,
budget - size,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => return true,
Err(bytes) => budget = bytes,
}
}
}
// Updates timestamp and returns true, if at least given milliseconds
// has passed since last update. Otherwise returns false.
fn can_update(&self, duration_millis: u64) -> bool {
let now = solana_sdk::timing::timestamp();
let mut last_timestamp = self.last_timestamp_ms.load(Ordering::Acquire);
loop {
if now < last_timestamp + duration_millis {
return false;
}
match self.last_timestamp_ms.compare_exchange_weak(
last_timestamp,
now,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => return true,
Err(ts) => last_timestamp = ts,
}
}
}
// Updates the budget if at least given milliseconds has passed since last
// update. Updater function maps current value of bytes to the new one.
pub fn update<F>(&self, duration_millis: u64, updater: F)
where
F: Fn(usize) -> usize,
{
if !self.can_update(duration_millis) {
return;
}
let mut bytes = self.bytes.load(Ordering::Acquire);
loop {
match self.bytes.compare_exchange_weak(
bytes,
updater(bytes),
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => break,
Err(b) => bytes = b,
}
}
}
// Non-atomic clone only for tests and simulations.
pub fn clone_non_atomic(&self) -> Self {
Self {
bytes: AtomicUsize::new(self.bytes.load(Ordering::Acquire)),
last_timestamp_ms: AtomicU64::new(self.last_timestamp_ms.load(Ordering::Acquire)),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::time::Duration;
#[test]
fn test_data_budget() {
let budget = DataBudget::default();
assert!(!budget.take(1)); // budget = 0.
budget.update(1000, |bytes| bytes + 5); // budget updates to 5.
assert!(budget.take(1));
assert!(budget.take(2));
assert!(!budget.take(3)); // budget = 2, out of budget.
budget.update(30, |_| 10); // no update, budget = 2.
assert!(!budget.take(3)); // budget = 2, out of budget.
std::thread::sleep(Duration::from_millis(50));
budget.update(30, |bytes| bytes * 2); // budget updates to 4.
assert!(budget.take(3));
assert!(budget.take(1));
assert!(!budget.take(1)); // budget = 0.
}
}

View File

@ -306,8 +306,8 @@ mod tests {
#[test] #[test]
fn test_gossip_services_spy() { fn test_gossip_services_spy() {
let keypair = Keypair::new(); let keypair = Keypair::new();
let peer0 = Pubkey::new_rand(); let peer0 = solana_sdk::pubkey::new_rand();
let peer1 = Pubkey::new_rand(); let peer1 = solana_sdk::pubkey::new_rand();
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0); let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
let peer0_info = ContactInfo::new_localhost(&peer0, 0); let peer0_info = ContactInfo::new_localhost(&peer0, 0);
let peer1_info = ContactInfo::new_localhost(&peer1, 0); let peer1_info = ContactInfo::new_localhost(&peer1, 0);
@ -335,7 +335,7 @@ mod tests {
spy_ref.clone(), spy_ref.clone(),
None, None,
Some(0), Some(0),
Some(Pubkey::new_rand()), Some(solana_sdk::pubkey::new_rand()),
None, None,
); );
assert_eq!(met_criteria, false); assert_eq!(met_criteria, false);
@ -349,7 +349,7 @@ mod tests {
spy_ref.clone(), spy_ref.clone(),
Some(1), Some(1),
Some(0), Some(0),
Some(Pubkey::new_rand()), Some(solana_sdk::pubkey::new_rand()),
None, None,
); );
assert_eq!(met_criteria, false); assert_eq!(met_criteria, false);

View File

@ -187,6 +187,7 @@ impl HeaviestSubtreeForkChoice {
.expect("new root must exist in fork_infos map") .expect("new root must exist in fork_infos map")
.parent = None; .parent = None;
self.root = new_root; self.root = new_root;
self.last_root_time = Instant::now();
} }
pub fn add_root_parent(&mut self, root_parent: Slot) { pub fn add_root_parent(&mut self, root_parent: Slot) {
@ -498,7 +499,7 @@ impl HeaviestSubtreeForkChoice {
let heaviest_slot_on_same_voted_fork = self.best_slot(last_voted_slot); let heaviest_slot_on_same_voted_fork = self.best_slot(last_voted_slot);
if heaviest_slot_on_same_voted_fork.is_none() { if heaviest_slot_on_same_voted_fork.is_none() {
if !tower.is_stray_last_vote() { if !tower.is_stray_last_vote() {
// Unless last vote is stray, self.bast_slot(last_voted_slot) must return // Unless last vote is stray and stale, self.bast_slot(last_voted_slot) must return
// Some(_), justifying to panic! here. // Some(_), justifying to panic! here.
// Also, adjust_lockouts_after_replay() correctly makes last_voted_slot None, // Also, adjust_lockouts_after_replay() correctly makes last_voted_slot None,
// if all saved votes are ancestors of replayed_root_slot. So this code shouldn't be // if all saved votes are ancestors of replayed_root_slot. So this code shouldn't be
@ -507,12 +508,12 @@ impl HeaviestSubtreeForkChoice {
// validator has been running, so we must be able to fetch best_slots for all of // validator has been running, so we must be able to fetch best_slots for all of
// them. // them.
panic!( panic!(
"a bank at last_voted_slot({}) is a frozen bank so must have been\ "a bank at last_voted_slot({}) is a frozen bank so must have been \
added to heaviest_subtree_fork_choice at time of freezing", added to heaviest_subtree_fork_choice at time of freezing",
last_voted_slot, last_voted_slot,
) )
} else { } else {
// fork_infos doesn't have corresponding data for the stray restored last vote, // fork_infos doesn't have corresponding data for the stale stray last vote,
// meaning some inconsistency between saved tower and ledger. // meaning some inconsistency between saved tower and ledger.
// (newer snapshot, or only a saved tower is moved over to new setup?) // (newer snapshot, or only a saved tower is moved over to new setup?)
return None; return None;

View File

@ -6,14 +6,10 @@
//! command-line tools to spin up validators and a Rust library //! command-line tools to spin up validators and a Rust library
//! //!
#[macro_use]
extern crate solana_bpf_loader_program;
pub mod accounts_hash_verifier; pub mod accounts_hash_verifier;
pub mod banking_stage; pub mod banking_stage;
pub mod bigtable_upload_service; pub mod bigtable_upload_service;
pub mod broadcast_stage; pub mod broadcast_stage;
mod builtins;
pub mod cache_block_time_service; pub mod cache_block_time_service;
pub mod cluster_info_vote_listener; pub mod cluster_info_vote_listener;
pub mod commitment_service; pub mod commitment_service;
@ -35,6 +31,7 @@ pub mod crds_gossip_pull;
pub mod crds_gossip_push; pub mod crds_gossip_push;
pub mod crds_shards; pub mod crds_shards;
pub mod crds_value; pub mod crds_value;
pub mod data_budget;
pub mod epoch_slots; pub mod epoch_slots;
pub mod fetch_stage; pub mod fetch_stage;
pub mod fork_choice; pub mod fork_choice;
@ -46,6 +43,7 @@ pub mod local_vote_signer_service;
pub mod non_circulating_supply; pub mod non_circulating_supply;
pub mod optimistic_confirmation_verifier; pub mod optimistic_confirmation_verifier;
pub mod optimistically_confirmed_bank_tracker; pub mod optimistically_confirmed_bank_tracker;
pub mod ping_pong;
pub mod poh_recorder; pub mod poh_recorder;
pub mod poh_service; pub mod poh_service;
pub mod progress_map; pub mod progress_map;
@ -59,7 +57,6 @@ mod result;
pub mod retransmit_stage; pub mod retransmit_stage;
pub mod rewards_recorder_service; pub mod rewards_recorder_service;
pub mod rpc; pub mod rpc;
pub mod rpc_error;
pub mod rpc_health; pub mod rpc_health;
pub mod rpc_pubsub; pub mod rpc_pubsub;
pub mod rpc_pubsub_service; pub mod rpc_pubsub_service;
@ -97,7 +94,7 @@ extern crate serde_json;
extern crate solana_metrics; extern crate solana_metrics;
#[macro_use] #[macro_use]
extern crate solana_sdk_macro_frozen_abi; extern crate solana_frozen_abi_macro;
#[cfg(test)] #[cfg(test)]
#[macro_use] #[macro_use]

View File

@ -79,6 +79,26 @@ solana_sdk::pubkeys!(
"GumSE5HsMV5HCwBTv2D2D81yy9x17aDkvobkqAfTRgmo", "GumSE5HsMV5HCwBTv2D2D81yy9x17aDkvobkqAfTRgmo",
"AzVV9ZZDxTgW4wWfJmsG6ytaHpQGSe1yz76Nyy84VbQF", "AzVV9ZZDxTgW4wWfJmsG6ytaHpQGSe1yz76Nyy84VbQF",
"8CUUMKYNGxdgYio5CLHRHyzMEhhVRMcqefgE6dLqnVRK", "8CUUMKYNGxdgYio5CLHRHyzMEhhVRMcqefgE6dLqnVRK",
"CQDYc4ET2mbFhVpgj41gXahL6Exn5ZoPcGAzSHuYxwmE",
"5PLJZLJiRR9vf7d1JCCg7UuWjtyN9nkab9uok6TqSyuP",
"7xJ9CLtEAcEShw9kW2gSoZkRWL566Dg12cvgzANJwbTr",
"BuCEvc9ze8UoAQwwsQLy8d447C8sA4zeVtVpc6m5wQeS",
"8ndGYFjav6NDXvzYcxs449Aub3AxYv4vYpk89zRDwgj7",
"8W58E8JVJjH1jCy5CeHJQgvwFXTyAVyesuXRZGbcSUGG",
"GNiz4Mq886bTNDT3pijGsu2gbw6it7sqrwncro45USeB",
"GhsotwFMH6XUrRLJCxcx62h7748N2Uq8mf87hUGkmPhg",
"Fgyh8EeYGZtbW8sS33YmNQnzx54WXPrJ5KWNPkCfWPot",
"8UVjvYyoqP6sqcctTso3xpCdCfgTMiv3VRh7vraC2eJk",
"BhvLngiqqKeZ8rpxch2uGjeCiC88zzewoWPRuoxpp1aS",
"63DtkW7zuARcd185EmHAkfF44bDcC2SiTSEj2spLP3iA",
"GvpCiTgq9dmEeojCDBivoLoZqc4AkbUDACpqPMwYLWKh",
"7Y8smnoUrYKGGuDq2uaFKVxJYhojgg7DVixHyAtGTYEV",
"DUS1KxwUhUyDKB4A81E8vdnTe3hSahd92Abtn9CXsEcj",
"F9MWFw8cnYVwsRq8Am1PGfFL3cQUZV37mbGoxZftzLjN",
"8vqrX3H2BYLaXVintse3gorPEM4TgTwTFZNN1Fm9TdYs",
"CUageMFi49kzoDqtdU8NvQ4Bq3sbtJygjKDAXJ45nmAi",
"5smrYwb1Hr2T8XMnvsqccTgXxuqQs14iuE8RbHFYf2Cf",
"xQadXQiUTCCFhfHjvQx1hyJK6KVWr1w2fD6DT3cdwj7",
] ]
); );
@ -115,7 +135,7 @@ mod tests {
let num_genesis_accounts = 10; let num_genesis_accounts = 10;
for _ in 0..num_genesis_accounts { for _ in 0..num_genesis_accounts {
accounts.insert( accounts.insert(
Pubkey::new_rand(), solana_sdk::pubkey::new_rand(),
Account::new(balance, 0, &Pubkey::default()), Account::new(balance, 0, &Pubkey::default()),
); );
} }
@ -127,7 +147,7 @@ mod tests {
let num_stake_accounts = 3; let num_stake_accounts = 3;
for _ in 0..num_stake_accounts { for _ in 0..num_stake_accounts {
let pubkey = Pubkey::new_rand(); let pubkey = solana_sdk::pubkey::new_rand();
let meta = Meta { let meta = Meta {
authorized: Authorized::auto(&pubkey), authorized: Authorized::auto(&pubkey),
lockup: Lockup { lockup: Lockup {

View File

@ -20,7 +20,7 @@ impl OptimisticConfirmationVerifier {
} }
// Returns any optimistic slots that were not rooted // Returns any optimistic slots that were not rooted
pub fn get_unrooted_optimistic_slots( pub fn verify_for_unrooted_optimistic_slots(
&mut self, &mut self,
root_bank: &Bank, root_bank: &Bank,
blockstore: &Blockstore, blockstore: &Blockstore,
@ -34,8 +34,8 @@ impl OptimisticConfirmationVerifier {
std::mem::swap(&mut slots_before_root, &mut self.unchecked_slots); std::mem::swap(&mut slots_before_root, &mut self.unchecked_slots);
slots_before_root slots_before_root
.into_iter() .into_iter()
.filter(|(optimistic_slot, hash)| { .filter(|(optimistic_slot, optimistic_hash)| {
(*optimistic_slot == root && *hash != root_bank.hash()) (*optimistic_slot == root && *optimistic_hash != root_bank.hash())
|| (!root_ancestors.contains_key(&optimistic_slot) && || (!root_ancestors.contains_key(&optimistic_slot) &&
// In this second part of the `and`, we account for the possibility that // In this second part of the `and`, we account for the possibility that
// there was some other root `rootX` set in BankForks where: // there was some other root `rootX` set in BankForks where:
@ -76,6 +76,10 @@ impl OptimisticConfirmationVerifier {
self.last_optimistic_slot_ts = Instant::now(); self.last_optimistic_slot_ts = Instant::now();
} }
pub fn format_optimistic_confirmd_slot_violation_log(slot: Slot) -> String {
format!("Optimistically confirmed slot {} was not rooted", slot)
}
pub fn log_unrooted_optimistic_slots( pub fn log_unrooted_optimistic_slots(
root_bank: &Bank, root_bank: &Bank,
vote_tracker: &VoteTracker, vote_tracker: &VoteTracker,
@ -96,7 +100,7 @@ impl OptimisticConfirmationVerifier {
.unwrap_or(0); .unwrap_or(0);
error!( error!(
"Optimistic slot {} was not rooted, "{},
hash: {}, hash: {},
epoch: {}, epoch: {},
voted keys: {:?}, voted keys: {:?},
@ -105,7 +109,7 @@ impl OptimisticConfirmationVerifier {
voted stake: {}, voted stake: {},
total epoch stake: {}, total epoch stake: {},
pct: {}", pct: {}",
optimistic_slot, Self::format_optimistic_confirmd_slot_violation_log(*optimistic_slot),
hash, hash,
epoch, epoch,
r_slot_tracker r_slot_tracker
@ -181,7 +185,8 @@ mod test {
.cloned() .cloned()
.unwrap(); .unwrap();
assert_eq!( assert_eq!(
optimistic_confirmation_verifier.get_unrooted_optimistic_slots(&bank1, &blockstore), optimistic_confirmation_verifier
.verify_for_unrooted_optimistic_slots(&bank1, &blockstore),
vec![(1, bad_bank_hash)] vec![(1, bad_bank_hash)]
); );
assert_eq!(optimistic_confirmation_verifier.unchecked_slots.len(), 1); assert_eq!(optimistic_confirmation_verifier.unchecked_slots.len(), 1);
@ -228,7 +233,7 @@ mod test {
.cloned() .cloned()
.unwrap(); .unwrap();
assert!(optimistic_confirmation_verifier assert!(optimistic_confirmation_verifier
.get_unrooted_optimistic_slots(&bank5, &blockstore) .verify_for_unrooted_optimistic_slots(&bank5, &blockstore)
.is_empty()); .is_empty());
// 5 is >= than all the unchecked slots, so should clear everything // 5 is >= than all the unchecked slots, so should clear everything
assert!(optimistic_confirmation_verifier.unchecked_slots.is_empty()); assert!(optimistic_confirmation_verifier.unchecked_slots.is_empty());
@ -244,7 +249,7 @@ mod test {
.cloned() .cloned()
.unwrap(); .unwrap();
assert!(optimistic_confirmation_verifier assert!(optimistic_confirmation_verifier
.get_unrooted_optimistic_slots(&bank3, &blockstore) .verify_for_unrooted_optimistic_slots(&bank3, &blockstore)
.is_empty()); .is_empty());
// 3 is bigger than only slot 1, so slot 5 should be left over // 3 is bigger than only slot 1, so slot 5 should be left over
assert_eq!(optimistic_confirmation_verifier.unchecked_slots.len(), 1); assert_eq!(optimistic_confirmation_verifier.unchecked_slots.len(), 1);
@ -264,7 +269,8 @@ mod test {
.cloned() .cloned()
.unwrap(); .unwrap();
assert_eq!( assert_eq!(
optimistic_confirmation_verifier.get_unrooted_optimistic_slots(&bank4, &blockstore), optimistic_confirmation_verifier
.verify_for_unrooted_optimistic_slots(&bank4, &blockstore),
vec![optimistic_slots[1]] vec![optimistic_slots[1]]
); );
// 4 is bigger than only slots 1 and 3, so slot 5 should be left over // 4 is bigger than only slots 1 and 3, so slot 5 should be left over
@ -303,7 +309,8 @@ mod test {
optimistic_confirmation_verifier optimistic_confirmation_verifier
.add_new_optimistic_confirmed_slots(optimistic_slots.clone()); .add_new_optimistic_confirmed_slots(optimistic_slots.clone());
assert_eq!( assert_eq!(
optimistic_confirmation_verifier.get_unrooted_optimistic_slots(&bank7, &blockstore), optimistic_confirmation_verifier
.verify_for_unrooted_optimistic_slots(&bank7, &blockstore),
optimistic_slots[0..=1].to_vec() optimistic_slots[0..=1].to_vec()
); );
assert!(optimistic_confirmation_verifier.unchecked_slots.is_empty()); assert!(optimistic_confirmation_verifier.unchecked_slots.is_empty());
@ -312,7 +319,7 @@ mod test {
blockstore.set_roots(&[1, 3]).unwrap(); blockstore.set_roots(&[1, 3]).unwrap();
optimistic_confirmation_verifier.add_new_optimistic_confirmed_slots(optimistic_slots); optimistic_confirmation_verifier.add_new_optimistic_confirmed_slots(optimistic_slots);
assert!(optimistic_confirmation_verifier assert!(optimistic_confirmation_verifier
.get_unrooted_optimistic_slots(&bank7, &blockstore) .verify_for_unrooted_optimistic_slots(&bank7, &blockstore)
.is_empty()); .is_empty());
assert!(optimistic_confirmation_verifier.unchecked_slots.is_empty()); assert!(optimistic_confirmation_verifier.unchecked_slots.is_empty());
} }

400
core/src/ping_pong.rs Normal file
View File

@ -0,0 +1,400 @@
use bincode::{serialize, Error};
use lru::LruCache;
use rand::{AsByteSliceMut, CryptoRng, Rng};
use serde::Serialize;
use solana_sdk::hash::{self, Hash};
use solana_sdk::pubkey::Pubkey;
use solana_sdk::sanitize::{Sanitize, SanitizeError};
use solana_sdk::signature::{Keypair, Signable, Signature, Signer};
use std::borrow::Cow;
use std::net::SocketAddr;
use std::time::{Duration, Instant};
#[derive(AbiExample, Debug, Deserialize, Serialize)]
pub struct Ping<T> {
from: Pubkey,
token: T,
signature: Signature,
}
#[derive(AbiExample, Debug, Deserialize, Serialize)]
pub struct Pong {
from: Pubkey,
hash: Hash, // Hash of received ping token.
signature: Signature,
}
/// Maintains records of remote nodes which have returned a valid response to a
/// ping message, and on-the-fly ping messages pending a pong response from the
/// remote node.
pub struct PingCache {
// Time-to-live of received pong messages.
ttl: Duration,
// Timestamp of last ping message sent to a remote node.
// Used to rate limit pings to remote nodes.
pings: LruCache<(Pubkey, SocketAddr), Instant>,
// Verified pong responses from remote nodes.
pongs: LruCache<(Pubkey, SocketAddr), Instant>,
// Hash of ping tokens sent out to remote nodes,
// pending a pong response back.
pending_cache: LruCache<Hash, (Pubkey, SocketAddr)>,
}
impl<T: Serialize> Ping<T> {
pub fn new(token: T, keypair: &Keypair) -> Result<Self, Error> {
let signature = keypair.sign_message(&serialize(&token)?);
let ping = Ping {
from: keypair.pubkey(),
token,
signature,
};
Ok(ping)
}
}
impl<T> Ping<T>
where
T: Serialize + AsByteSliceMut + Default,
{
pub fn new_rand<R>(rng: &mut R, keypair: &Keypair) -> Result<Self, Error>
where
R: Rng + CryptoRng,
{
let mut token = T::default();
rng.fill(&mut token);
Ping::new(token, keypair)
}
}
impl<T> Sanitize for Ping<T> {
fn sanitize(&self) -> Result<(), SanitizeError> {
self.from.sanitize()?;
// TODO Add self.token.sanitize()?; when rust's
// specialization feature becomes stable.
self.signature.sanitize()
}
}
impl<T: Serialize> Signable for Ping<T> {
fn pubkey(&self) -> Pubkey {
self.from
}
fn signable_data(&self) -> Cow<[u8]> {
Cow::Owned(serialize(&self.token).unwrap())
}
fn get_signature(&self) -> Signature {
self.signature
}
fn set_signature(&mut self, signature: Signature) {
self.signature = signature;
}
}
impl Pong {
pub fn new<T: Serialize>(ping: &Ping<T>, keypair: &Keypair) -> Result<Self, Error> {
let hash = hash::hash(&serialize(&ping.token)?);
let pong = Pong {
from: keypair.pubkey(),
hash,
signature: keypair.sign_message(hash.as_ref()),
};
Ok(pong)
}
}
impl Sanitize for Pong {
fn sanitize(&self) -> Result<(), SanitizeError> {
self.from.sanitize()?;
self.hash.sanitize()?;
self.signature.sanitize()
}
}
impl Signable for Pong {
fn pubkey(&self) -> Pubkey {
self.from
}
fn signable_data(&self) -> Cow<[u8]> {
Cow::Owned(self.hash.as_ref().into())
}
fn get_signature(&self) -> Signature {
self.signature
}
fn set_signature(&mut self, signature: Signature) {
self.signature = signature;
}
}
impl PingCache {
pub fn new(ttl: Duration, cap: usize) -> Self {
Self {
ttl,
pings: LruCache::new(cap),
pongs: LruCache::new(cap),
pending_cache: LruCache::new(cap),
}
}
/// Checks if the pong hash, pubkey and socket match a ping message sent
/// out previously. If so records current timestamp for the remote node and
/// returns true.
/// Note: Does not verify the signature.
pub fn add(&mut self, pong: &Pong, socket: SocketAddr, now: Instant) -> bool {
let node = (pong.pubkey(), socket);
match self.pending_cache.peek(&pong.hash) {
Some(value) if *value == node => {
self.pings.pop(&node);
self.pongs.put(node, now);
self.pending_cache.pop(&pong.hash);
true
}
_ => false,
}
}
/// Checks if the remote node has been pinged recently. If not, calls the
/// given function to generates a new ping message, records current
/// timestamp and hash of ping token, and returns the ping message.
fn maybe_ping<T, F>(
&mut self,
now: Instant,
node: (Pubkey, SocketAddr),
mut pingf: F,
) -> Option<Ping<T>>
where
T: Serialize,
F: FnMut() -> Option<Ping<T>>,
{
// Rate limit consecutive pings sent to a remote node.
let delay = self.ttl / 64;
match self.pings.peek(&node) {
Some(t) if now.saturating_duration_since(*t) < delay => None,
_ => {
let ping = pingf()?;
let hash = hash::hash(&serialize(&ping.token).ok()?);
self.pings.put(node, now);
self.pending_cache.put(hash, node);
Some(ping)
}
}
}
/// Returns true if the remote node has responded to a ping message.
/// Removes expired pong messages. In order to extend verifications before
/// expiration, if the pong message is not too recent, and the node has not
/// been pinged recently, calls the given function to generates a new ping
/// message, records current timestamp and hash of ping token, and returns
/// the ping message.
/// Caller should verify if the socket address is valid. (e.g. by using
/// ContactInfo::is_valid_address).
pub fn check<T, F>(
&mut self,
now: Instant,
node: (Pubkey, SocketAddr),
pingf: F,
) -> (bool, Option<Ping<T>>)
where
T: Serialize,
F: FnMut() -> Option<Ping<T>>,
{
let (check, should_ping) = match self.pongs.get(&node) {
None => (false, true),
Some(t) => {
let age = now.saturating_duration_since(*t);
// Pop if the pong message has expired.
if age > self.ttl {
self.pongs.pop(&node);
}
// If the pong message is not too recent, generate a new ping
// message to extend remote node verification.
(true, age > self.ttl / 8)
}
};
let ping = if should_ping {
self.maybe_ping(now, node, pingf)
} else {
None
};
(check, ping)
}
// Only for tests and simulations.
pub(crate) fn mock_clone(&self) -> Self {
let mut clone = Self {
ttl: self.ttl,
pings: LruCache::new(self.pings.cap()),
pongs: LruCache::new(self.pongs.cap()),
pending_cache: LruCache::new(self.pending_cache.cap()),
};
for (k, v) in self.pongs.iter().rev() {
clone.pings.put(*k, *v);
}
for (k, v) in self.pongs.iter().rev() {
clone.pongs.put(*k, *v);
}
for (k, v) in self.pending_cache.iter().rev() {
clone.pending_cache.put(*k, *v);
}
clone
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashSet;
use std::iter::repeat_with;
use std::net::{Ipv4Addr, SocketAddrV4};
type Token = [u8; 32];
#[test]
fn test_ping_pong() {
let mut rng = rand::thread_rng();
let keypair = Keypair::new();
let ping = Ping::<Token>::new_rand(&mut rng, &keypair).unwrap();
assert!(ping.verify());
assert!(ping.sanitize().is_ok());
let pong = Pong::new(&ping, &keypair).unwrap();
assert!(pong.verify());
assert!(pong.sanitize().is_ok());
assert_eq!(hash::hash(&ping.token), pong.hash);
}
#[test]
fn test_ping_cache() {
let now = Instant::now();
let mut rng = rand::thread_rng();
let ttl = Duration::from_millis(256);
let mut cache = PingCache::new(ttl, /*cap=*/ 1000);
let this_node = Keypair::new();
let keypairs: Vec<_> = repeat_with(Keypair::new).take(8).collect();
let sockets: Vec<_> = repeat_with(|| {
SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::new(rng.gen(), rng.gen(), rng.gen(), rng.gen()),
rng.gen(),
))
})
.take(8)
.collect();
let remote_nodes: Vec<(&Keypair, SocketAddr)> = repeat_with(|| {
let keypair = &keypairs[rng.gen_range(0, keypairs.len())];
let socket = sockets[rng.gen_range(0, sockets.len())];
(keypair, socket)
})
.take(128)
.collect();
// Initially all checks should fail. The first observation of each node
// should create a ping packet.
let mut seen_nodes = HashSet::<(Pubkey, SocketAddr)>::new();
let pings: Vec<Option<Ping<Token>>> = remote_nodes
.iter()
.map(|(keypair, socket)| {
let node = (keypair.pubkey(), *socket);
let pingf = || Ping::<Token>::new_rand(&mut rng, &this_node).ok();
let (check, ping) = cache.check(now, node, pingf);
assert!(!check);
assert_eq!(seen_nodes.insert(node), ping.is_some());
ping
})
.collect();
let now = now + Duration::from_millis(1);
let panic_ping = || -> Option<Ping<Token>> { panic!("this should not happen!") };
for ((keypair, socket), ping) in remote_nodes.iter().zip(&pings) {
match ping {
None => {
// Already have a recent ping packets for nodes, so no new
// ping packet will be generated.
let node = (keypair.pubkey(), *socket);
let (check, ping) = cache.check(now, node, panic_ping);
assert!(check);
assert!(ping.is_none());
}
Some(ping) => {
let pong = Pong::new(ping, keypair).unwrap();
assert!(cache.add(&pong, *socket, now));
}
}
}
let now = now + Duration::from_millis(1);
// All nodes now have a recent pong packet.
for (keypair, socket) in &remote_nodes {
let node = (keypair.pubkey(), *socket);
let (check, ping) = cache.check(now, node, panic_ping);
assert!(check);
assert!(ping.is_none());
}
let now = now + ttl / 8;
// All nodes still have a valid pong packet, but the cache will create
// a new ping packet to extend verification.
seen_nodes.clear();
for (keypair, socket) in &remote_nodes {
let node = (keypair.pubkey(), *socket);
let pingf = || Ping::<Token>::new_rand(&mut rng, &this_node).ok();
let (check, ping) = cache.check(now, node, pingf);
assert!(check);
assert_eq!(seen_nodes.insert(node), ping.is_some());
}
let now = now + Duration::from_millis(1);
// All nodes still have a valid pong packet, and a very recent ping
// packet pending response. So no new ping packet will be created.
for (keypair, socket) in &remote_nodes {
let node = (keypair.pubkey(), *socket);
let (check, ping) = cache.check(now, node, panic_ping);
assert!(check);
assert!(ping.is_none());
}
let now = now + ttl;
// Pong packets are still valid but expired. The first observation of
// each node will remove the pong packet from cache and create a new
// ping packet.
seen_nodes.clear();
for (keypair, socket) in &remote_nodes {
let node = (keypair.pubkey(), *socket);
let pingf = || Ping::<Token>::new_rand(&mut rng, &this_node).ok();
let (check, ping) = cache.check(now, node, pingf);
if seen_nodes.insert(node) {
assert!(check);
assert!(ping.is_some());
} else {
assert!(!check);
assert!(ping.is_none());
}
}
let now = now + Duration::from_millis(1);
// No valid pong packet in the cache. A recent ping packet already
// created, so no new one will be created.
for (keypair, socket) in &remote_nodes {
let node = (keypair.pubkey(), *socket);
let (check, ping) = cache.check(now, node, panic_ping);
assert!(!check);
assert!(ping.is_none());
}
let now = now + ttl / 64;
// No valid pong packet in the cache. Another ping packet will be
// created for the first observation of each node.
seen_nodes.clear();
for (keypair, socket) in &remote_nodes {
let node = (keypair.pubkey(), *socket);
let pingf = || Ping::<Token>::new_rand(&mut rng, &this_node).ok();
let (check, ping) = cache.check(now, node, pingf);
assert!(!check);
assert_eq!(seen_nodes.insert(node), ping.is_some());
}
}
}

View File

@ -401,7 +401,7 @@ mod test {
fn test_add_vote_pubkey() { fn test_add_vote_pubkey() {
let mut stats = PropagatedStats::default(); let mut stats = PropagatedStats::default();
let mut all_pubkeys = PubkeyReferences::default(); let mut all_pubkeys = PubkeyReferences::default();
let mut vote_pubkey = Pubkey::new_rand(); let mut vote_pubkey = solana_sdk::pubkey::new_rand();
all_pubkeys.get_or_insert(&vote_pubkey); all_pubkeys.get_or_insert(&vote_pubkey);
// Add a vote pubkey, the number of references in all_pubkeys // Add a vote pubkey, the number of references in all_pubkeys
@ -420,7 +420,7 @@ mod test {
assert_eq!(stats.propagated_validators_stake, 1); assert_eq!(stats.propagated_validators_stake, 1);
// Adding another pubkey should succeed // Adding another pubkey should succeed
vote_pubkey = Pubkey::new_rand(); vote_pubkey = solana_sdk::pubkey::new_rand();
stats.add_vote_pubkey(&vote_pubkey, &mut all_pubkeys, 2); stats.add_vote_pubkey(&vote_pubkey, &mut all_pubkeys, 2);
assert!(stats.propagated_validators.contains(&vote_pubkey)); assert!(stats.propagated_validators.contains(&vote_pubkey));
assert_eq!(stats.propagated_validators_stake, 3); assert_eq!(stats.propagated_validators_stake, 3);
@ -434,7 +434,7 @@ mod test {
fn test_add_node_pubkey_internal() { fn test_add_node_pubkey_internal() {
let num_vote_accounts = 10; let num_vote_accounts = 10;
let staked_vote_accounts = 5; let staked_vote_accounts = 5;
let vote_account_pubkeys: Vec<_> = std::iter::repeat_with(Pubkey::new_rand) let vote_account_pubkeys: Vec<_> = std::iter::repeat_with(solana_sdk::pubkey::new_rand)
.take(num_vote_accounts) .take(num_vote_accounts)
.collect(); .collect();
let epoch_vote_accounts: HashMap<_, _> = vote_account_pubkeys let epoch_vote_accounts: HashMap<_, _> = vote_account_pubkeys
@ -445,7 +445,7 @@ mod test {
let mut stats = PropagatedStats::default(); let mut stats = PropagatedStats::default();
let mut all_pubkeys = PubkeyReferences::default(); let mut all_pubkeys = PubkeyReferences::default();
let mut node_pubkey = Pubkey::new_rand(); let mut node_pubkey = solana_sdk::pubkey::new_rand();
all_pubkeys.get_or_insert(&node_pubkey); all_pubkeys.get_or_insert(&node_pubkey);
// Add a vote pubkey, the number of references in all_pubkeys // Add a vote pubkey, the number of references in all_pubkeys
@ -481,7 +481,7 @@ mod test {
// Adding another pubkey with same vote accounts should succeed, but stake // Adding another pubkey with same vote accounts should succeed, but stake
// shouldn't increase // shouldn't increase
node_pubkey = Pubkey::new_rand(); node_pubkey = solana_sdk::pubkey::new_rand();
stats.add_node_pubkey_internal( stats.add_node_pubkey_internal(
&node_pubkey, &node_pubkey,
&mut all_pubkeys, &mut all_pubkeys,
@ -500,8 +500,8 @@ mod test {
// Adding another pubkey with different vote accounts should succeed // Adding another pubkey with different vote accounts should succeed
// and increase stake // and increase stake
node_pubkey = Pubkey::new_rand(); node_pubkey = solana_sdk::pubkey::new_rand();
let vote_account_pubkeys: Vec<_> = std::iter::repeat_with(Pubkey::new_rand) let vote_account_pubkeys: Vec<_> = std::iter::repeat_with(solana_sdk::pubkey::new_rand)
.take(num_vote_accounts) .take(num_vote_accounts)
.collect(); .collect();
let epoch_vote_accounts: HashMap<_, _> = vote_account_pubkeys let epoch_vote_accounts: HashMap<_, _> = vote_account_pubkeys

Some files were not shown because too many files have changed in this diff Show More