Compare commits
451 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
870a7e8458 | ||
|
6f661dd8a9 | ||
|
2e6d03c41f | ||
|
3dbdaa5341 | ||
|
8f3ce5fc57 | ||
|
49ac17a595 | ||
|
63d7fdb4bd | ||
|
e15721f22d | ||
|
17177a41c7 | ||
|
84f0234151 | ||
|
b629291848 | ||
|
240895d387 | ||
|
50d510e4c7 | ||
|
868e757d9d | ||
|
92b2b8dae7 | ||
|
a53fe7b779 | ||
|
a9d6b90e9a | ||
|
88c5d6b10c | ||
|
9891cc6a17 | ||
|
d18a08471e | ||
|
50393adadd | ||
|
1be989c5d2 | ||
|
6bc914989b | ||
|
9a7ea1229b | ||
|
d8a2de1fd9 | ||
|
6859516126 | ||
|
57b69b5804 | ||
|
9a5a9ff633 | ||
|
06b1c980d4 | ||
|
86c26f8432 | ||
|
78d44ae215 | ||
|
31dc79a4f9 | ||
|
5c2dab8055 | ||
|
0ecd7e5c90 | ||
|
df8cf37b89 | ||
|
56a4fc3dd2 | ||
|
5380623f32 | ||
|
313e8bddd4 | ||
|
7d68c44307 | ||
|
6b9a529cda | ||
|
12eea11f93 | ||
|
d2a0445fff | ||
|
232ba8473d | ||
|
a455c8a5af | ||
|
07865a97ce | ||
|
37f618fc62 | ||
|
86ff6f82f8 | ||
|
57baf7f79b | ||
|
e259388069 | ||
|
06d6e357ae | ||
|
7759210ff3 | ||
|
16c42a7b30 | ||
|
d0f08cf25b | ||
|
0ed9f7144c | ||
|
b17c2f451a | ||
|
4aedc086e5 | ||
|
0afb330db0 | ||
|
1201ef172e | ||
|
b63a65bc21 | ||
|
392d2dbd8a | ||
|
4733d6dfc3 | ||
|
337de51088 | ||
|
24ee0b3934 | ||
|
ff8f78199d | ||
|
b524e0a1a7 | ||
|
7dcecdd285 | ||
|
151f025bee | ||
|
edc83c0543 | ||
|
b777bbf7db | ||
|
a29344e681 | ||
|
1bce8a99a2 | ||
|
3a3454d788 | ||
|
0e3131f2b4 | ||
|
27997653f1 | ||
|
c3f66dcfa7 | ||
|
6a2377dd50 | ||
|
8b1a1d9c99 | ||
|
01e2d5cd35 | ||
|
8b61ba4d8d | ||
|
f364956d15 | ||
|
cedd00e82e | ||
|
3b22f5b833 | ||
|
9c549a3ccf | ||
|
f4cf7d2c84 | ||
|
b9834ed9eb | ||
|
2781d69319 | ||
|
4d58a0e200 | ||
|
b37e5c8a36 | ||
|
b06bfeec8d | ||
|
02c4170357 | ||
|
24a21d0ba6 | ||
|
ae1687bc0a | ||
|
5d4654d2f4 | ||
|
d69c1d6db6 | ||
|
fa65107460 | ||
|
dd2d119d2b | ||
|
b7dc7d859c | ||
|
0db23fee53 | ||
|
7e073e64a3 | ||
|
7d438e5c28 | ||
|
83cc44953d | ||
|
215928445c | ||
|
de6de7a367 | ||
|
e8c054b1f4 | ||
|
df08ba5dcd | ||
|
72d038ecd8 | ||
|
b08c0caefe | ||
|
8fe5b41f5f | ||
|
25333abd96 | ||
|
f09a100e60 | ||
|
7cc96dc20f | ||
|
40c95dde4f | ||
|
0d38f11998 | ||
|
3e89cb6b43 | ||
|
5025c7c983 | ||
|
c3fafda981 | ||
|
3f6964d264 | ||
|
b1d294de75 | ||
|
2b34800870 | ||
|
e9c3e0b0ee | ||
|
0e9fe0847f | ||
|
d2e98cb531 | ||
|
32681e2739 | ||
|
dc0b21fa83 | ||
|
c4e770e2f8 | ||
|
f80af6dc1c | ||
|
36ac9b3bb1 | ||
|
282c98a82a | ||
|
11f6c04990 | ||
|
18c4d13ab2 | ||
|
d2e907655f | ||
|
e182afa50f | ||
|
00d1cb0333 | ||
|
bab82ab632 | ||
|
b9ba312975 | ||
|
c5ff373965 | ||
|
51a2d93a0d | ||
|
409ac4dcfa | ||
|
9e42883d4b | ||
|
e41460d500 | ||
|
9aacd0f3c3 | ||
|
3f908306a3 | ||
|
8093586b78 | ||
|
a08a6d55fa | ||
|
802c5fcb00 | ||
|
8749a97b94 | ||
|
4313240b1b | ||
|
24bae00560 | ||
|
fae0a6307d | ||
|
9753f1a6ca | ||
|
8ad1554bc9 | ||
|
2367f561dc | ||
|
21d41b976b | ||
|
3303ead54d | ||
|
f91d7da5a4 | ||
|
b2dad84d05 | ||
|
a7b2939bc8 | ||
|
ea3b783b63 | ||
|
733ef4b0b8 | ||
|
0cf83887c6 | ||
|
094271be7d | ||
|
efc3c0d65f | ||
|
0300eea0d6 | ||
|
b03186e3c6 | ||
|
65e1b881f9 | ||
|
28b9e5b572 | ||
|
072e884c24 | ||
|
dc95663de7 | ||
|
a73303be22 | ||
|
330f42c375 | ||
|
5d088c7d06 | ||
|
5c9495f955 | ||
|
fb163187b5 | ||
|
970bba495f | ||
|
9761af201b | ||
|
7600be946a | ||
|
524b380a71 | ||
|
ebb5fc1285 | ||
|
4cfb3dcc7b | ||
|
e8fff4561e | ||
|
b56e66310d | ||
|
bda3bd1557 | ||
|
7723673038 | ||
|
c69e667f5e | ||
|
6157860c0a | ||
|
32fc4e3d0f | ||
|
ee0c0c4a59 | ||
|
356117819c | ||
|
834c96a374 | ||
|
2195d980a2 | ||
|
3e43b042eb | ||
|
851742e5d9 | ||
|
c6c7feb0c2 | ||
|
1fde69ef48 | ||
|
894bedcae7 | ||
|
47f15eaa03 | ||
|
b0c0739db9 | ||
|
c3dc23e84a | ||
|
cc7fc447a4 | ||
|
a401b2b4cf | ||
|
d8c66c8981 | ||
|
49a415414f | ||
|
6c540d2ada | ||
|
da62ebac1a | ||
|
25aee12502 | ||
|
d8e8528797 | ||
|
ed8c796877 | ||
|
ec750cf3eb | ||
|
4a35053fba | ||
|
9797178ad1 | ||
|
dbc58455df | ||
|
4a3f851e49 | ||
|
5a3bf5c90e | ||
|
de6ec11efc | ||
|
7aec87c086 | ||
|
eabc21c23a | ||
|
713f346211 | ||
|
a81bc0ecf8 | ||
|
a3f1580b8b | ||
|
4f20798654 | ||
|
0ecd1755a6 | ||
|
57dd8a555a | ||
|
f64cd4a75a | ||
|
2ce6c86c2a | ||
|
ff9573714b | ||
|
826111cf79 | ||
|
f31f1d0f52 | ||
|
e220f7067b | ||
|
d9726e61bc | ||
|
786fa4f22e | ||
|
5b74678e37 | ||
|
d203bd1998 | ||
|
5f5fa38d85 | ||
|
fadf1efa41 | ||
|
0269fffa5a | ||
|
50e441a9ed | ||
|
9413051053 | ||
|
13e176a633 | ||
|
9268239c75 | ||
|
e51d7af847 | ||
|
147ba1de69 | ||
|
7cc709c82a | ||
|
a2395e8730 | ||
|
ae605f8f02 | ||
|
ea2cc90215 | ||
|
e15ddbb979 | ||
|
bbd8bd2e74 | ||
|
a5794efe16 | ||
|
27095378fa | ||
|
a8836649cb | ||
|
cc81830f13 | ||
|
558a46f5d5 | ||
|
57add5366e | ||
|
5057aaddc0 | ||
|
3865219085 | ||
|
6da06654ff | ||
|
4a375acebc | ||
|
9fcd465928 | ||
|
1f8ef5e640 | ||
|
a1b0f2f681 | ||
|
f59d4f29d9 | ||
|
b379004c3b | ||
|
25491780df | ||
|
4354ad3299 | ||
|
4e94446fc3 | ||
|
d99795c000 | ||
|
fe775a9716 | ||
|
ac76a75937 | ||
|
6c1678244f | ||
|
63a9f33be1 | ||
|
c9da91cb1c | ||
|
b3488e0139 | ||
|
f3814a0478 | ||
|
5e8d8cfb49 | ||
|
ad37276d83 | ||
|
719db7eed0 | ||
|
4ddb72a32d | ||
|
ff1171338f | ||
|
28683b0ad8 | ||
|
4ef3a679a4 | ||
|
e02bcbdae2 | ||
|
7b0187a148 | ||
|
e92283c8d2 | ||
|
ef3781d4ee | ||
|
6da4bec41d | ||
|
31ed985fd0 | ||
|
cdc10712b1 | ||
|
935a836a7d | ||
|
97ba3cbeb0 | ||
|
e8ca35f9ec | ||
|
d5aae9a8af | ||
|
3bb8016a40 | ||
|
579065443a | ||
|
81d636c2bf | ||
|
6a7ce8500b | ||
|
8ee294639a | ||
|
b275f65ef1 | ||
|
37c2b68677 | ||
|
d9944c8ae3 | ||
|
6c8bbdca0a | ||
|
10e8f3ab32 | ||
|
8c0b0f235e | ||
|
ec8ba76e4d | ||
|
60fba7be75 | ||
|
24075ceeff | ||
|
f7ef1e68b0 | ||
|
723e7f11b9 | ||
|
f7211d3c07 | ||
|
6234090361 | ||
|
a001c1c8f6 | ||
|
7f62f4f621 | ||
|
8334a76e5b | ||
|
eadab5e2f0 | ||
|
8bb7b53f3b | ||
|
8c7b8e8c5d | ||
|
a2857928a4 | ||
|
f6780d72b1 | ||
|
5d003c6dab | ||
|
79ee0e06b2 | ||
|
443f132de5 | ||
|
95299e43a2 | ||
|
0cf1894ede | ||
|
f2f4f28c0b | ||
|
57da68d563 | ||
|
8d2337ccf8 | ||
|
270749185c | ||
|
6184254416 | ||
|
c8bb13b3f7 | ||
|
5da83c1491 | ||
|
b04ce80255 | ||
|
a788021181 | ||
|
553e9fb8cd | ||
|
f1bc7ec4fa | ||
|
581181e87f | ||
|
36e1f9fae8 | ||
|
f10ae394c8 | ||
|
f7905d369a | ||
|
ef079d202b | ||
|
b7efc2373c | ||
|
d3b50bc55b | ||
|
8fd3465f8a | ||
|
23b9e6eae3 | ||
|
fe1a977f9e | ||
|
5e538eff7c | ||
|
3efe4b5478 | ||
|
90e0d4fefe | ||
|
2e983fb39f | ||
|
527b20fbbd | ||
|
a0c4b4e5fc | ||
|
282315a721 | ||
|
b8198f8cc5 | ||
|
68ad2dcce1 | ||
|
e87c3421bc | ||
|
20754a7115 | ||
|
8a57ee181e | ||
|
4e6b5a9808 | ||
|
f24fbde43b | ||
|
47f60c7607 | ||
|
8b307ed409 | ||
|
cf21719a07 | ||
|
3157b464c4 | ||
|
2581db5748 | ||
|
634959b3ab | ||
|
03b21f2e9d | ||
|
cc5565b17e | ||
|
50beef0b15 | ||
|
06a54e1423 | ||
|
4d731ecd08 | ||
|
ee06789a66 | ||
|
2dabe1d706 | ||
|
3b1279a005 | ||
|
5c9f85f28d | ||
|
e12dd46ef3 | ||
|
c4fa03b478 | ||
|
9fb749deb7 | ||
|
bd48344de2 | ||
|
78e54f1d2c | ||
|
76a6576976 | ||
|
92ec1ae255 | ||
|
0d203728cc | ||
|
625773e5b8 | ||
|
a4cb1e45ae | ||
|
8aded2778e | ||
|
d940c5b1a3 | ||
|
1be045df94 | ||
|
86191911c7 | ||
|
8f852d8a6b | ||
|
68a439f8da | ||
|
e021832708 | ||
|
87b11aa187 | ||
|
7475a6f444 | ||
|
86ce650661 | ||
|
4dc5a53014 | ||
|
5e35cf3536 | ||
|
e8a8d1efb3 | ||
|
defd9238fa | ||
|
5f061dcea1 | ||
|
e6ee27a738 | ||
|
dd2d25d698 | ||
|
9096c3df02 | ||
|
9f94c2a9a0 | ||
|
34213da9f4 | ||
|
c3c4991c44 | ||
|
9d37a33dcd | ||
|
a04ca03fee | ||
|
64ce4a6203 | ||
|
7ac3c9ec76 | ||
|
7d91515e8d | ||
|
4e3f2c3d2d | ||
|
8b67ba6d3d | ||
|
c2ce68ab90 | ||
|
fe87cb1cd1 | ||
|
1c8f6a836a | ||
|
3d5ff7968e | ||
|
d6160f7744 | ||
|
5e9ce99abf | ||
|
ebd6fe7acb | ||
|
9e91a2c2fd | ||
|
899f57962a | ||
|
3176b00e57 | ||
|
08b9da8397 | ||
|
2bc21ecba2 | ||
|
5b2a65fab3 | ||
|
f5d56eabf3 | ||
|
af45efb62c | ||
|
f528cda832 | ||
|
eeef9f4e59 | ||
|
32124b59e9 | ||
|
aa9772f9c0 | ||
|
5f183bd773 | ||
|
2238e5001b | ||
|
79fa7ef55c | ||
|
07df827411 | ||
|
a259ff0e72 | ||
|
d7d3e767e7 | ||
|
6e8aa9af17 | ||
|
0236de7bc8 | ||
|
899bd1572a | ||
|
97ec4cd44e | ||
|
5500970a7e | ||
|
caea04d8d5 | ||
|
b1a90c3580 | ||
|
5bd4e38345 | ||
|
fddba08571 | ||
|
87963764fa | ||
|
b691a159dd | ||
|
5af1d48be8 | ||
|
3b3ec3313f | ||
|
be00246fb5 | ||
|
1d80ba9edf | ||
|
4bcf976ecd |
@@ -38,6 +38,7 @@ jobs:
|
||||
- readlink -f .
|
||||
script:
|
||||
- source ci/env.sh
|
||||
- rustup set profile default
|
||||
- ci/publish-tarball.sh
|
||||
deploy:
|
||||
- provider: s3
|
||||
@@ -73,7 +74,7 @@ jobs:
|
||||
|
||||
language: node_js
|
||||
node_js:
|
||||
- "lts/*"
|
||||
- "node"
|
||||
|
||||
cache:
|
||||
directories:
|
||||
|
1270
Cargo.lock
generated
1270
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -21,7 +21,6 @@ members = [
|
||||
"perf",
|
||||
"validator",
|
||||
"genesis",
|
||||
"genesis-utils",
|
||||
"gossip",
|
||||
"install",
|
||||
"keygen",
|
||||
@@ -32,6 +31,7 @@ members = [
|
||||
"log-analyzer",
|
||||
"merkle-root-bench",
|
||||
"merkle-tree",
|
||||
"stake-o-matic",
|
||||
"storage-bigtable",
|
||||
"storage-proto",
|
||||
"streamer",
|
||||
@@ -39,20 +39,21 @@ members = [
|
||||
"metrics",
|
||||
"net-shaper",
|
||||
"notifier",
|
||||
"poh",
|
||||
"poh-bench",
|
||||
"program-test",
|
||||
"programs/secp256k1",
|
||||
"programs/bpf_loader",
|
||||
"programs/budget",
|
||||
"programs/config",
|
||||
"programs/exchange",
|
||||
"programs/failure",
|
||||
"programs/noop",
|
||||
"programs/ownable",
|
||||
"programs/stake",
|
||||
"programs/vest",
|
||||
"programs/vote",
|
||||
"remote-wallet",
|
||||
"rpc",
|
||||
"ramp-tps",
|
||||
"runtime",
|
||||
"runtime/store-tool",
|
||||
"sdk",
|
||||
@@ -60,7 +61,6 @@ members = [
|
||||
"sdk/cargo-test-bpf",
|
||||
"scripts",
|
||||
"stake-accounts",
|
||||
"stake-monitor",
|
||||
"sys-tuner",
|
||||
"tokens",
|
||||
"transaction-status",
|
||||
|
10
README.md
10
README.md
@@ -1,6 +1,6 @@
|
||||
<p align="center">
|
||||
<a href="https://solana.com">
|
||||
<img alt="Solana" src="https://i.imgur.com/uBVzyX3.png" width="250" />
|
||||
<img alt="Solana" src="https://i.imgur.com/OMnvVEz.png" width="250" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
@@ -19,7 +19,7 @@ $ source $HOME/.cargo/env
|
||||
$ rustup component add rustfmt
|
||||
```
|
||||
|
||||
Please sure you are always using the latest stable rust version by running:
|
||||
Please make sure you are always using the latest stable rust version by running:
|
||||
|
||||
```bash
|
||||
$ rustup update
|
||||
@@ -32,12 +32,6 @@ $ sudo apt-get update
|
||||
$ sudo apt-get install libssl-dev libudev-dev pkg-config zlib1g-dev llvm clang make
|
||||
```
|
||||
|
||||
On Mac M1s, make sure you set up your terminal & homebrew [to use](https://5balloons.info/correct-way-to-install-and-use-homebrew-on-m1-macs/) Rosetta. You can install it with:
|
||||
|
||||
```bash
|
||||
$ softwareupdate --install-rosetta
|
||||
```
|
||||
|
||||
## **2. Download the source code.**
|
||||
|
||||
```bash
|
||||
|
20
SECURITY.md
20
SECURITY.md
@@ -51,27 +51,13 @@ The following components are out of scope for the bounty program
|
||||
* Attacks that require social engineering
|
||||
|
||||
Eligibility:
|
||||
* The participant submitting the bug report shall follow the process outlined within this document
|
||||
* The participant submitting the bug bounty shall follow the process outlined within this document
|
||||
* Valid exploits can be eligible even if they are not successfully executed on the cluster
|
||||
* Multiple submissions for the same class of exploit are still eligible for compensation, though may be compensated at a lower rate, however these will be assessed on a case-by-case basis
|
||||
* Participants must complete KYC and sign the participation agreement here when the registrations are open https://solana.com/validator-registration. Security exploits will still be assessed and open for submission at all times. This needs only be done prior to distribution of tokens.
|
||||
|
||||
Payment of Bug Bounties:
|
||||
* Payments for eligible bug reports are distributed monthly.
|
||||
* Bounties for all bug reports submitted in a given month are paid out in the middle of the
|
||||
following month.
|
||||
* The SOL/USD conversion rate used for payments is the market price at the end of
|
||||
the last day of the month for the month in which the bug was submitted.
|
||||
* The reference for this price is the Closing Price given by Coingecko.com on
|
||||
that date given here:
|
||||
https://www.coingecko.com/en/coins/solana/historical_data/usd#panel
|
||||
* For example, for all bugs submitted in March 2021, the SOL/USD price for bug
|
||||
payouts is the Close price on 2021-03-31 of $19.49. This applies to all bugs
|
||||
submitted in March 2021, to be paid in mid-April 2021.
|
||||
* Bug bounties are paid out in
|
||||
[stake accounts](https://solana.com/staking) with a
|
||||
[lockup](https://docs.solana.com/staking/stake-accounts#lockups)
|
||||
expiring 12 months from the last day of the month in which the bug was submitted.
|
||||
Notes:
|
||||
* All locked tokens can be staked during the lockup period
|
||||
|
||||
<a name="process"></a>
|
||||
## Incident Response Process
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-account-decoder"
|
||||
version = "1.7.2"
|
||||
version = "1.6.18"
|
||||
description = "Solana account decoder"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -19,9 +19,10 @@ lazy_static = "1.4.0"
|
||||
serde = "1.0.122"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-config-program = { path = "../programs/config", version = "=1.7.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.2" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.2" }
|
||||
solana-config-program = { path = "../programs/config", version = "=1.6.18" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.6.18" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "=1.6.18" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.6.18" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.1.1", features = ["no-entrypoint"] }
|
||||
thiserror = "1.0"
|
||||
zstd = "0.5.1"
|
||||
|
@@ -9,14 +9,14 @@ use crate::{
|
||||
};
|
||||
use inflector::Inflector;
|
||||
use serde_json::Value;
|
||||
use solana_sdk::{instruction::InstructionError, pubkey::Pubkey, stake, system_program, sysvar};
|
||||
use solana_sdk::{instruction::InstructionError, pubkey::Pubkey, system_program, sysvar};
|
||||
use std::collections::HashMap;
|
||||
use thiserror::Error;
|
||||
|
||||
lazy_static! {
|
||||
static ref BPF_UPGRADEABLE_LOADER_PROGRAM_ID: Pubkey = solana_sdk::bpf_loader_upgradeable::id();
|
||||
static ref CONFIG_PROGRAM_ID: Pubkey = solana_config_program::id();
|
||||
static ref STAKE_PROGRAM_ID: Pubkey = stake::program::id();
|
||||
static ref STAKE_PROGRAM_ID: Pubkey = solana_stake_program::id();
|
||||
static ref SYSTEM_PROGRAM_ID: Pubkey = system_program::id();
|
||||
static ref SYSVAR_PROGRAM_ID: Pubkey = sysvar::id();
|
||||
static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id_v2_0();
|
||||
|
@@ -6,10 +6,10 @@ use bincode::deserialize;
|
||||
use serde_json::Value;
|
||||
use solana_config_program::{get_config_data, ConfigKeys};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::stake::config::{self as stake_config, Config as StakeConfig};
|
||||
use solana_stake_program::config::Config as StakeConfig;
|
||||
|
||||
pub fn parse_config(data: &[u8], pubkey: &Pubkey) -> Result<ConfigAccountType, ParseAccountError> {
|
||||
let parsed_account = if pubkey == &stake_config::id() {
|
||||
let parsed_account = if pubkey == &solana_stake_program::config::id() {
|
||||
get_config_data(data)
|
||||
.ok()
|
||||
.and_then(|data| deserialize::<StakeConfig>(data).ok())
|
||||
@@ -101,7 +101,11 @@ mod test {
|
||||
};
|
||||
let stake_config_account = create_config_account(vec![], &stake_config, 10);
|
||||
assert_eq!(
|
||||
parse_config(&stake_config_account.data(), &stake_config::id()).unwrap(),
|
||||
parse_config(
|
||||
&stake_config_account.data(),
|
||||
&solana_stake_program::config::id()
|
||||
)
|
||||
.unwrap(),
|
||||
ConfigAccountType::StakeConfig(UiStakeConfig {
|
||||
warmup_cooldown_rate: 0.25,
|
||||
slash_penalty: 50,
|
||||
|
@@ -4,7 +4,7 @@ use crate::{
|
||||
};
|
||||
use bincode::deserialize;
|
||||
use solana_sdk::clock::{Epoch, UnixTimestamp};
|
||||
use solana_sdk::stake::state::{Authorized, Delegation, Lockup, Meta, Stake, StakeState};
|
||||
use solana_stake_program::stake_state::{Authorized, Delegation, Lockup, Meta, Stake, StakeState};
|
||||
|
||||
pub fn parse_stake(data: &[u8]) -> Result<StakeAccountType, ParseAccountError> {
|
||||
let stake_state: StakeState = deserialize(data)
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-accounts-bench"
|
||||
version = "1.7.2"
|
||||
version = "1.6.18"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -11,11 +11,11 @@ publish = false
|
||||
[dependencies]
|
||||
log = "0.4.11"
|
||||
rayon = "1.5.0"
|
||||
solana-logger = { path = "../logger", version = "=1.7.2" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.2" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.2" }
|
||||
solana-version = { path = "../version", version = "=1.7.2" }
|
||||
solana-logger = { path = "../logger", version = "=1.6.18" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.6.18" }
|
||||
solana-measure = { path = "../measure", version = "=1.6.18" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.6.18" }
|
||||
solana-version = { path = "../version", version = "=1.6.18" }
|
||||
rand = "0.7.0"
|
||||
clap = "2.33.1"
|
||||
crossbeam-channel = "0.4"
|
||||
|
@@ -6,9 +6,7 @@ use rayon::prelude::*;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_runtime::{
|
||||
accounts::{create_test_accounts, update_accounts_bench, Accounts},
|
||||
accounts_db::AccountShrinkThreshold,
|
||||
accounts_index::AccountSecondaryIndexes,
|
||||
ancestors::Ancestors,
|
||||
accounts_index::{AccountSecondaryIndexes, Ancestors},
|
||||
};
|
||||
use solana_sdk::{genesis_config::ClusterType, pubkey::Pubkey};
|
||||
use std::{env, fs, path::PathBuf};
|
||||
@@ -65,7 +63,6 @@ fn main() {
|
||||
&ClusterType::Testnet,
|
||||
AccountSecondaryIndexes::default(),
|
||||
false,
|
||||
AccountShrinkThreshold::default(),
|
||||
);
|
||||
println!("Creating {} accounts", num_accounts);
|
||||
let mut create_time = Measure::start("create accounts");
|
||||
@@ -90,19 +87,17 @@ fn main() {
|
||||
num_slots,
|
||||
create_time
|
||||
);
|
||||
let mut ancestors = Vec::with_capacity(num_slots);
|
||||
ancestors.push(0);
|
||||
let mut ancestors: Ancestors = vec![(0, 0)].into_iter().collect();
|
||||
for i in 1..num_slots {
|
||||
ancestors.push(i as u64);
|
||||
ancestors.insert(i as u64, i - 1);
|
||||
accounts.add_root(i as u64);
|
||||
}
|
||||
let ancestors = Ancestors::from(ancestors);
|
||||
let mut elapsed = vec![0; iterations];
|
||||
let mut elapsed_store = vec![0; iterations];
|
||||
for x in 0..iterations {
|
||||
if clean {
|
||||
let mut time = Measure::start("clean");
|
||||
accounts.accounts_db.clean_accounts(None, false);
|
||||
accounts.accounts_db.clean_accounts(None);
|
||||
time.stop();
|
||||
println!("{}", time);
|
||||
for slot in 0..num_slots {
|
||||
@@ -121,7 +116,6 @@ fn main() {
|
||||
solana_sdk::clock::Slot::default(),
|
||||
&ancestors,
|
||||
None,
|
||||
false,
|
||||
);
|
||||
time_store.stop();
|
||||
if results != results_store {
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-accounts-cluster-bench"
|
||||
version = "1.7.2"
|
||||
version = "1.6.18"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -13,23 +13,22 @@ clap = "2.33.1"
|
||||
log = "0.4.11"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.4.1"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.2" }
|
||||
solana-client = { path = "../client", version = "=1.7.2" }
|
||||
solana-core = { path = "../core", version = "=1.7.2" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.2" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.2" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.2" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.2" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.2" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.2" }
|
||||
solana-version = { path = "../version", version = "=1.7.2" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.6.18" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.6.18" }
|
||||
solana-client = { path = "../client", version = "=1.6.18" }
|
||||
solana-core = { path = "../core", version = "=1.6.18" }
|
||||
solana-measure = { path = "../measure", version = "=1.6.18" }
|
||||
solana-logger = { path = "../logger", version = "=1.6.18" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.6.18" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.6.18" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.6.18" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.6.18" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.6.18" }
|
||||
solana-version = { path = "../version", version = "=1.6.18" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.1.1", features = ["no-entrypoint"] }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.7.2" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.6.18" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -6,8 +6,8 @@ use rayon::prelude::*;
|
||||
use solana_account_decoder::parse_token::spl_token_v2_0_pubkey;
|
||||
use solana_clap_utils::input_parsers::pubkey_of;
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_core::gossip_service::discover;
|
||||
use solana_faucet::faucet::{request_airdrop_transaction, FAUCET_PORT};
|
||||
use solana_gossip::gossip_service::discover;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_runtime::inline_spl_token_v2_0;
|
||||
use solana_sdk::{
|
||||
@@ -363,7 +363,7 @@ fn run_accounts_bench(
|
||||
iterations: usize,
|
||||
maybe_space: Option<u64>,
|
||||
batch_size: usize,
|
||||
close_nth_batch: u64,
|
||||
close_nth: u64,
|
||||
maybe_lamports: Option<u64>,
|
||||
num_instructions: usize,
|
||||
mint: Option<Pubkey>,
|
||||
@@ -441,7 +441,6 @@ fn run_accounts_bench(
|
||||
}
|
||||
}
|
||||
|
||||
// Create accounts
|
||||
let sigs_len = executor.num_outstanding();
|
||||
if sigs_len < batch_size {
|
||||
let num_to_create = batch_size - sigs_len;
|
||||
@@ -476,14 +475,10 @@ fn run_accounts_bench(
|
||||
}
|
||||
}
|
||||
|
||||
if close_nth_batch > 0 {
|
||||
let num_batches_to_close =
|
||||
total_accounts_created as u64 / (close_nth_batch * batch_size as u64);
|
||||
let expected_closed = num_batches_to_close * batch_size as u64;
|
||||
let max_closed_seed = seed_tracker.max_closed.load(Ordering::Relaxed);
|
||||
// Close every account we've created with seed between max_closed_seed..expected_closed
|
||||
if max_closed_seed < expected_closed {
|
||||
let txs: Vec<_> = (0..expected_closed - max_closed_seed)
|
||||
if close_nth > 0 {
|
||||
let expected_closed = total_accounts_created as u64 / close_nth;
|
||||
if expected_closed > total_accounts_closed {
|
||||
let txs: Vec<_> = (0..expected_closed - total_accounts_closed)
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let message = make_close_message(
|
||||
@@ -577,14 +572,14 @@ fn main() {
|
||||
.help("Number of transactions to send per batch"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("close_nth_batch")
|
||||
Arg::with_name("close_nth")
|
||||
.long("close-frequency")
|
||||
.takes_value(true)
|
||||
.value_name("BYTES")
|
||||
.help(
|
||||
"Every `n` batches, create a batch of close transactions for
|
||||
the earliest remaining batch of accounts created.
|
||||
Note: Should be > 1 to avoid situations where the close \
|
||||
"Send close transactions after this many accounts created. \
|
||||
Note: a `close-frequency` value near or below `batch-size` \
|
||||
may result in transaction-simulation errors, as the close \
|
||||
transactions will be submitted before the corresponding \
|
||||
create transactions have been confirmed",
|
||||
),
|
||||
@@ -637,7 +632,7 @@ fn main() {
|
||||
let space = value_t!(matches, "space", u64).ok();
|
||||
let lamports = value_t!(matches, "lamports", u64).ok();
|
||||
let batch_size = value_t!(matches, "batch_size", usize).unwrap_or(4);
|
||||
let close_nth_batch = value_t!(matches, "close_nth_batch", u64).unwrap_or(0);
|
||||
let close_nth = value_t!(matches, "close_nth", u64).unwrap_or(0);
|
||||
let iterations = value_t!(matches, "iterations", usize).unwrap_or(10);
|
||||
let num_instructions = value_t!(matches, "num_instructions", usize).unwrap_or(1);
|
||||
if num_instructions == 0 || num_instructions > 500 {
|
||||
@@ -690,7 +685,7 @@ fn main() {
|
||||
iterations,
|
||||
space,
|
||||
batch_size,
|
||||
close_nth_batch,
|
||||
close_nth,
|
||||
lamports,
|
||||
num_instructions,
|
||||
mint,
|
||||
@@ -725,7 +720,7 @@ pub mod test {
|
||||
let iterations = 10;
|
||||
let maybe_space = None;
|
||||
let batch_size = 100;
|
||||
let close_nth_batch = 100;
|
||||
let close_nth = 100;
|
||||
let maybe_lamports = None;
|
||||
let num_instructions = 2;
|
||||
let mut start = Measure::start("total accounts run");
|
||||
@@ -736,7 +731,7 @@ pub mod test {
|
||||
iterations,
|
||||
maybe_space,
|
||||
batch_size,
|
||||
close_nth_batch,
|
||||
close_nth,
|
||||
maybe_lamports,
|
||||
num_instructions,
|
||||
None,
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "1.7.2"
|
||||
version = "1.6.18"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -14,18 +14,16 @@ crossbeam-channel = "0.4"
|
||||
log = "0.4.11"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.5.0"
|
||||
solana-core = { path = "../core", version = "=1.7.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.2" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.2" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.7.2" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.2" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.2" }
|
||||
solana-perf = { path = "../perf", version = "=1.7.2" }
|
||||
solana-poh = { path = "../poh", version = "=1.7.2" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.2" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.2" }
|
||||
solana-version = { path = "../version", version = "=1.7.2" }
|
||||
solana-core = { path = "../core", version = "=1.6.18" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.6.18" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.6.18" }
|
||||
solana-perf = { path = "../perf", version = "=1.6.18" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.6.18" }
|
||||
solana-logger = { path = "../logger", version = "=1.6.18" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.6.18" }
|
||||
solana-measure = { path = "../measure", version = "=1.6.18" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.6.18" }
|
||||
solana-version = { path = "../version", version = "=1.6.18" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -4,8 +4,13 @@ use crossbeam_channel::unbounded;
|
||||
use log::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
use rayon::prelude::*;
|
||||
use solana_core::banking_stage::BankingStage;
|
||||
use solana_gossip::{cluster_info::ClusterInfo, cluster_info::Node};
|
||||
use solana_core::{
|
||||
banking_stage::{create_test_recorder, BankingStage},
|
||||
cluster_info::ClusterInfo,
|
||||
cluster_info::Node,
|
||||
poh_recorder::PohRecorder,
|
||||
poh_recorder::WorkingBankEntry,
|
||||
};
|
||||
use solana_ledger::{
|
||||
blockstore::Blockstore,
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
@@ -13,7 +18,6 @@ use solana_ledger::{
|
||||
};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_perf::packet::to_packets_chunked;
|
||||
use solana_poh::poh_recorder::{create_test_recorder, PohRecorder, WorkingBankEntry};
|
||||
use solana_runtime::{
|
||||
accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks,
|
||||
};
|
||||
@@ -74,7 +78,7 @@ fn make_accounts_txs(
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let mut new = dummy.clone();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
if !same_payer {
|
||||
new.message.account_keys[0] = solana_sdk::pubkey::new_rand();
|
||||
}
|
||||
@@ -185,7 +189,7 @@ fn main() {
|
||||
genesis_config.hash(),
|
||||
);
|
||||
// Ignore any pesky duplicate signature errors in the case we are using single-payer
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
fund.signatures = vec![Signature::new(&sig[0..64])];
|
||||
let x = bank.process_transaction(&fund);
|
||||
x.unwrap();
|
||||
@@ -351,7 +355,7 @@ fn main() {
|
||||
if bank.slot() > 0 && bank.slot() % 16 == 0 {
|
||||
for tx in transactions.iter_mut() {
|
||||
tx.message.recent_blockhash = bank.last_blockhash();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
tx.signatures[0] = Signature::new(&sig[0..64]);
|
||||
}
|
||||
verified = to_packets_chunked(&transactions.clone(), packets_per_chunk);
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-client"
|
||||
version = "1.7.2"
|
||||
version = "1.6.18"
|
||||
description = "Solana banks client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -11,20 +11,20 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.3.1"
|
||||
borsh = "0.8.1"
|
||||
borsh-derive = "0.8.1"
|
||||
borsh = "0.9.0"
|
||||
borsh-derive = "0.9.0"
|
||||
futures = "0.3"
|
||||
mio = "0.7.6"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.7.2" }
|
||||
solana-program = { path = "../sdk/program", version = "=1.7.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.2" }
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.6.18" }
|
||||
solana-program = { path = "../sdk/program", version = "=1.6.18" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.6.18" }
|
||||
tarpc = { version = "0.24.1", features = ["full"] }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-serde = { version = "0.8", features = ["bincode"] }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.2" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.7.2" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.6.18" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.6.18" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-interface"
|
||||
version = "1.7.2"
|
||||
version = "1.6.18"
|
||||
description = "Solana banks RPC interface"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -12,7 +12,7 @@ edition = "2018"
|
||||
[dependencies]
|
||||
mio = "0.7.6"
|
||||
serde = { version = "1.0.122", features = ["derive"] }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.6.18" }
|
||||
tarpc = { version = "0.24.1", features = ["full"] }
|
||||
|
||||
[dev-dependencies]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-server"
|
||||
version = "1.7.2"
|
||||
version = "1.6.18"
|
||||
description = "Solana banks server"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -14,10 +14,10 @@ bincode = "1.3.1"
|
||||
futures = "0.3"
|
||||
log = "0.4.11"
|
||||
mio = "0.7.6"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.7.2" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.2" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.2" }
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.6.18" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.6.18" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.6.18" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.6.18" }
|
||||
tarpc = { version = "0.24.1", features = ["full"] }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-serde = { version = "0.8", features = ["bincode"] }
|
||||
|
@@ -131,10 +131,13 @@ impl BanksServer {
|
||||
}
|
||||
}
|
||||
|
||||
fn verify_transaction(transaction: &Transaction) -> transaction::Result<()> {
|
||||
fn verify_transaction(
|
||||
transaction: &Transaction,
|
||||
libsecp256k1_0_5_upgrade_enabled: bool,
|
||||
) -> transaction::Result<()> {
|
||||
if let Err(err) = transaction.verify() {
|
||||
Err(err)
|
||||
} else if let Err(err) = transaction.verify_precompiles() {
|
||||
} else if let Err(err) = transaction.verify_precompiles(libsecp256k1_0_5_upgrade_enabled) {
|
||||
Err(err)
|
||||
} else {
|
||||
Ok(())
|
||||
@@ -215,7 +218,10 @@ impl Banks for BanksServer {
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> Option<transaction::Result<()>> {
|
||||
if let Err(err) = verify_transaction(&transaction) {
|
||||
if let Err(err) = verify_transaction(
|
||||
&transaction,
|
||||
self.bank(commitment).libsecp256k1_0_5_upgrade_enabled(),
|
||||
) {
|
||||
return Some(Err(err));
|
||||
}
|
||||
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "1.7.2"
|
||||
version = "1.6.18"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -18,22 +18,21 @@ rand = "0.7.0"
|
||||
rayon = "1.5.0"
|
||||
serde_json = "1.0.56"
|
||||
serde_yaml = "0.8.13"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.2" }
|
||||
solana-core = { path = "../core", version = "=1.7.2" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.7.2" }
|
||||
solana-client = { path = "../client", version = "=1.7.2" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "=1.7.2" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.2" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.2" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.2" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.2" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.2" }
|
||||
solana-version = { path = "../version", version = "=1.7.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.6.18" }
|
||||
solana-core = { path = "../core", version = "=1.6.18" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.6.18" }
|
||||
solana-client = { path = "../client", version = "=1.6.18" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.6.18" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "=1.6.18" }
|
||||
solana-logger = { path = "../logger", version = "=1.6.18" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.6.18" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.6.18" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.6.18" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.6.18" }
|
||||
solana-version = { path = "../version", version = "=1.6.18" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.7.2" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.6.18" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -5,7 +5,7 @@ pub mod order_book;
|
||||
|
||||
use crate::bench::{airdrop_lamports, create_client_accounts_file, do_bench_exchange, Config};
|
||||
use log::*;
|
||||
use solana_gossip::gossip_service::{discover_cluster, get_multi_client};
|
||||
use solana_core::gossip_service::{discover_cluster, get_multi_client};
|
||||
use solana_sdk::signature::Signer;
|
||||
|
||||
fn main() {
|
||||
|
@@ -1,11 +1,13 @@
|
||||
use log::*;
|
||||
use solana_bench_exchange::bench::{airdrop_lamports, do_bench_exchange, Config};
|
||||
use solana_core::validator::ValidatorConfig;
|
||||
use solana_core::{
|
||||
gossip_service::{discover_cluster, get_multi_client},
|
||||
validator::ValidatorConfig,
|
||||
};
|
||||
use solana_exchange_program::{
|
||||
exchange_processor::process_instruction, id, solana_exchange_program,
|
||||
};
|
||||
use solana_faucet::faucet::run_local_faucet_with_port;
|
||||
use solana_gossip::gossip_service::{discover_cluster, get_multi_client};
|
||||
use solana_local_cluster::{
|
||||
local_cluster::{ClusterConfig, LocalCluster},
|
||||
validator_configs::make_identical_validator_configs,
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "1.7.2"
|
||||
version = "1.6.18"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -10,11 +10,11 @@ publish = false
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.2" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.2" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.2" }
|
||||
solana-version = { path = "../version", version = "=1.7.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.6.18" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.6.18" }
|
||||
solana-logger = { path = "../logger", version = "=1.6.18" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.6.18" }
|
||||
solana-version = { path = "../version", version = "=1.6.18" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -75,7 +75,7 @@ fn main() -> Result<()> {
|
||||
|
||||
let mut read_channels = Vec::new();
|
||||
let mut read_threads = Vec::new();
|
||||
let recycler = PacketsRecycler::default();
|
||||
let recycler = PacketsRecycler::new_without_limit("bench-streamer-recycler-shrink-stats");
|
||||
for _ in 0..num_sockets {
|
||||
let read = solana_net_utils::bind_to(ip_addr, port, false).unwrap();
|
||||
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
||||
@@ -92,7 +92,6 @@ fn main() -> Result<()> {
|
||||
recycler.clone(),
|
||||
"bench-streamer-test",
|
||||
1,
|
||||
true,
|
||||
));
|
||||
}
|
||||
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "1.7.2"
|
||||
version = "1.6.18"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -15,23 +15,22 @@ log = "0.4.11"
|
||||
rayon = "1.5.0"
|
||||
serde_json = "1.0.56"
|
||||
serde_yaml = "0.8.13"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.2" }
|
||||
solana-core = { path = "../core", version = "=1.7.2" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.7.2" }
|
||||
solana-client = { path = "../client", version = "=1.7.2" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.2" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.2" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.2" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.2" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.2" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.2" }
|
||||
solana-version = { path = "../version", version = "=1.7.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.6.18" }
|
||||
solana-core = { path = "../core", version = "=1.6.18" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.6.18" }
|
||||
solana-client = { path = "../client", version = "=1.6.18" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.6.18" }
|
||||
solana-logger = { path = "../logger", version = "=1.6.18" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.6.18" }
|
||||
solana-measure = { path = "../measure", version = "=1.6.18" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.6.18" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.6.18" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.6.18" }
|
||||
solana-version = { path = "../version", version = "=1.6.18" }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.4.0"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.7.2" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.6.18" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,8 +2,8 @@
|
||||
use log::*;
|
||||
use solana_bench_tps::bench::{do_bench_tps, generate_and_fund_keypairs, generate_keypairs};
|
||||
use solana_bench_tps::cli;
|
||||
use solana_core::gossip_service::{discover_cluster, get_client, get_multi_client};
|
||||
use solana_genesis::Base64Account;
|
||||
use solana_gossip::gossip_service::{discover_cluster, get_client, get_multi_client};
|
||||
use solana_sdk::fee_calculator::FeeRateGovernor;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_sdk::system_program;
|
||||
|
@@ -5,9 +5,8 @@ use solana_bench_tps::{
|
||||
cli::Config,
|
||||
};
|
||||
use solana_client::thin_client::create_client;
|
||||
use solana_core::validator::ValidatorConfig;
|
||||
use solana_core::{cluster_info::VALIDATOR_PORT_RANGE, validator::ValidatorConfig};
|
||||
use solana_faucet::faucet::run_local_faucet_with_port;
|
||||
use solana_gossip::cluster_info::VALIDATOR_PORT_RANGE;
|
||||
use solana_local_cluster::{
|
||||
local_cluster::{ClusterConfig, LocalCluster},
|
||||
validator_configs::make_identical_validator_configs,
|
||||
|
@@ -148,6 +148,33 @@ all_test_steps() {
|
||||
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 60
|
||||
wait_step
|
||||
|
||||
# BPF test suite
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable-bpf.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-local-cluster.sh \
|
||||
^core/build.rs \
|
||||
^fetch-perf-libs.sh \
|
||||
^programs/ \
|
||||
^sdk/ \
|
||||
; then
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "ci/test-stable-bpf.sh"
|
||||
name: "stable-bpf"
|
||||
timeout_in_minutes: 20
|
||||
artifact_paths: "bpf-dumps.tar.bz2"
|
||||
agents:
|
||||
- "queue=default"
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
"Stable-BPF skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Perf test suite
|
||||
if affects \
|
||||
.rs$ \
|
||||
@@ -165,7 +192,7 @@ all_test_steps() {
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "ci/test-stable-perf.sh"
|
||||
name: "stable-perf"
|
||||
timeout_in_minutes: 40
|
||||
timeout_in_minutes: 20
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=cuda"
|
||||
|
@@ -3,13 +3,24 @@
|
||||
# Pull requests to not run these steps.
|
||||
steps:
|
||||
- command: "ci/publish-tarball.sh"
|
||||
agents:
|
||||
- "queue=release-build"
|
||||
timeout_in_minutes: 60
|
||||
name: "publish tarball"
|
||||
- command: "ci/publish-bpf-sdk.sh"
|
||||
agents:
|
||||
- "queue=release-build"
|
||||
timeout_in_minutes: 5
|
||||
name: "publish bpf sdk"
|
||||
- wait
|
||||
- command: "sdk/docker-solana/build.sh"
|
||||
agents:
|
||||
- "queue=release-build"
|
||||
timeout_in_minutes: 60
|
||||
name: "publish docker"
|
||||
- command: "ci/publish-crate.sh"
|
||||
agents:
|
||||
- "queue=release-build"
|
||||
timeout_in_minutes: 240
|
||||
name: "publish crate"
|
||||
branches: "!master"
|
||||
|
@@ -1,4 +1,4 @@
|
||||
FROM solanalabs/rust:1.52.1
|
||||
FROM solanalabs/rust:1.51.0
|
||||
ARG date
|
||||
|
||||
RUN set -x \
|
||||
|
@@ -1,6 +1,6 @@
|
||||
# Note: when the rust version is changed also modify
|
||||
# ci/rust-version.sh to pick up the new image tag
|
||||
FROM rust:1.52.1
|
||||
FROM rust:1.51.0
|
||||
|
||||
# Add Google Protocol Buffers for Libra's metrics library.
|
||||
ENV PROTOC_VERSION 3.8.0
|
||||
|
@@ -74,10 +74,13 @@ else
|
||||
export CI_BUILD_ID=
|
||||
export CI_COMMIT=
|
||||
export CI_JOB_ID=
|
||||
export CI_OS_NAME=
|
||||
export CI_PULL_REQUEST=
|
||||
export CI_REPO_SLUG=
|
||||
export CI_TAG=
|
||||
# Don't override ci/run-local.sh
|
||||
if [[ -z $CI_LOCAL_RUN ]]; then
|
||||
export CI_OS_NAME=
|
||||
fi
|
||||
fi
|
||||
|
||||
cat <<EOF
|
||||
|
@@ -127,7 +127,7 @@ startNode() {
|
||||
waitForNodeToInit() {
|
||||
declare initCompleteFile=$1
|
||||
while [[ ! -r $initCompleteFile ]]; do
|
||||
if [[ $SECONDS -ge 240 ]]; then
|
||||
if [[ $SECONDS -ge 300 ]]; then
|
||||
echo "^^^ +++"
|
||||
echo "Error: $initCompleteFile not found in $SECONDS seconds"
|
||||
exit 1
|
||||
|
@@ -12,10 +12,14 @@ import json
|
||||
import subprocess
|
||||
import sys;
|
||||
|
||||
real_file = os.path.realpath(__file__)
|
||||
ci_path = os.path.dirname(real_file)
|
||||
src_root = os.path.dirname(ci_path)
|
||||
|
||||
def load_metadata():
|
||||
cmd = f'{src_root}/cargo metadata --no-deps --format-version=1'
|
||||
return json.loads(subprocess.Popen(
|
||||
'cargo metadata --no-deps --format-version=1',
|
||||
shell=True, stdout=subprocess.PIPE).communicate()[0])
|
||||
cmd, shell=True, stdout=subprocess.PIPE).communicate()[0])
|
||||
|
||||
def get_packages():
|
||||
metadata = load_metadata()
|
||||
|
27
ci/publish-bpf-sdk.sh
Executable file
27
ci/publish-bpf-sdk.sh
Executable file
@@ -0,0 +1,27 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
eval "$(ci/channel-info.sh)"
|
||||
|
||||
if [[ -n "$CI_TAG" ]]; then
|
||||
CHANNEL_OR_TAG=$CI_TAG
|
||||
else
|
||||
CHANNEL_OR_TAG=$CHANNEL
|
||||
fi
|
||||
|
||||
(
|
||||
set -x
|
||||
sdk/bpf/scripts/package.sh
|
||||
[[ -f bpf-sdk.tar.bz2 ]]
|
||||
)
|
||||
|
||||
source ci/upload-ci-artifact.sh
|
||||
echo --- AWS S3 Store
|
||||
if [[ -z $CHANNEL_OR_TAG ]]; then
|
||||
echo Skipped
|
||||
else
|
||||
upload-s3-artifact "/solana/bpf-sdk.tar.bz2" "s3://solana-sdk/$CHANNEL_OR_TAG/bpf-sdk.tar.bz2"
|
||||
fi
|
||||
|
||||
exit 0
|
55
ci/run-local.sh
Executable file
55
ci/run-local.sh
Executable file
@@ -0,0 +1,55 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
export CI_LOCAL_RUN=true
|
||||
|
||||
set -e
|
||||
|
||||
case $(uname -o) in
|
||||
*/Linux)
|
||||
export CI_OS_NAME=linux
|
||||
;;
|
||||
*)
|
||||
echo "local CI runs are only supported on Linux" 1>&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
steps=()
|
||||
steps+=(test-sanity)
|
||||
steps+=(shellcheck)
|
||||
steps+=(test-checks)
|
||||
steps+=(test-coverage)
|
||||
steps+=(test-stable)
|
||||
steps+=(test-stable-bpf)
|
||||
steps+=(test-stable-perf)
|
||||
steps+=(test-downstream-builds)
|
||||
steps+=(test-bench)
|
||||
steps+=(test-local-cluster)
|
||||
|
||||
step_index=0
|
||||
if [[ -n "$1" ]]; then
|
||||
start_step="$1"
|
||||
while [[ $step_index -lt ${#steps[@]} ]]; do
|
||||
step="${steps[$step_index]}"
|
||||
if [[ "$step" = "$start_step" ]]; then
|
||||
break
|
||||
fi
|
||||
step_index=$((step_index + 1))
|
||||
done
|
||||
if [[ $step_index -eq ${#steps[@]} ]]; then
|
||||
echo "unexpected start step: \"$start_step\"" 1>&2
|
||||
exit 1
|
||||
else
|
||||
echo "** starting at step: \"$start_step\" **"
|
||||
echo
|
||||
fi
|
||||
fi
|
||||
|
||||
while [[ $step_index -lt ${#steps[@]} ]]; do
|
||||
step="${steps[$step_index]}"
|
||||
cmd="ci/${step}.sh"
|
||||
$cmd
|
||||
step_index=$((step_index + 1))
|
||||
done
|
@@ -7,7 +7,7 @@ source multinode-demo/common.sh
|
||||
|
||||
rm -rf config/run/init-completed config/ledger config/snapshot-ledger
|
||||
|
||||
SOLANA_RUN_SH_VALIDATOR_ARGS="--snapshot-interval-slots 200" timeout 120 ./run.sh &
|
||||
timeout 120 ./run.sh &
|
||||
pid=$!
|
||||
|
||||
attempts=20
|
||||
@@ -16,8 +16,6 @@ while [[ ! -f config/run/init-completed ]]; do
|
||||
if ((--attempts == 0)); then
|
||||
echo "Error: validator failed to boot"
|
||||
exit 1
|
||||
else
|
||||
echo "Checking init"
|
||||
fi
|
||||
done
|
||||
|
||||
@@ -26,7 +24,6 @@ snapshot_slot=1
|
||||
# wait a bit longer than snapshot_slot
|
||||
while [[ $($solana_cli --url http://localhost:8899 slot --commitment processed) -le $((snapshot_slot + 1)) ]]; do
|
||||
sleep 1
|
||||
echo "Checking slot"
|
||||
done
|
||||
|
||||
$solana_validator --ledger config/ledger exit --force || true
|
||||
|
@@ -18,13 +18,13 @@
|
||||
if [[ -n $RUST_STABLE_VERSION ]]; then
|
||||
stable_version="$RUST_STABLE_VERSION"
|
||||
else
|
||||
stable_version=1.52.1
|
||||
stable_version=1.51.0
|
||||
fi
|
||||
|
||||
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
|
||||
nightly_version="$RUST_NIGHTLY_VERSION"
|
||||
else
|
||||
nightly_version=2021-05-18
|
||||
nightly_version=2021-04-18
|
||||
fi
|
||||
|
||||
|
||||
|
@@ -27,7 +27,7 @@ BENCH_ARTIFACT=current_bench_results.log
|
||||
_ "$cargo" build --manifest-path=keygen/Cargo.toml
|
||||
export PATH="$PWD/target/debug":$PATH
|
||||
|
||||
# Clear the C dependency files, if dependency moves these files are not regenerated
|
||||
# Clear the C dependency files, if dependeny moves these files are not regenerated
|
||||
test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
|
||||
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
|
||||
|
||||
@@ -45,14 +45,6 @@ _ "$cargo" nightly bench --manifest-path sdk/Cargo.toml ${V:+--verbose} \
|
||||
_ "$cargo" nightly bench --manifest-path runtime/Cargo.toml ${V:+--verbose} \
|
||||
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
|
||||
|
||||
# Run gossip benches
|
||||
_ "$cargo" nightly bench --manifest-path gossip/Cargo.toml ${V:+--verbose} \
|
||||
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
|
||||
|
||||
# Run poh benches
|
||||
_ "$cargo" nightly bench --manifest-path poh/Cargo.toml ${V:+--verbose} \
|
||||
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
|
||||
|
||||
# Run core benches
|
||||
_ "$cargo" nightly bench --manifest-path core/Cargo.toml ${V:+--verbose} \
|
||||
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
|
||||
|
@@ -14,7 +14,7 @@ scripts/increment-cargo-version.sh check
|
||||
|
||||
# Disallow uncommitted Cargo.lock changes
|
||||
(
|
||||
_ scripts/cargo-for-all-lock-files.sh tree
|
||||
_ scripts/cargo-for-all-lock-files.sh tree >/dev/null
|
||||
set +e
|
||||
if ! _ git diff --exit-code; then
|
||||
echo -e "\nError: Uncommitted Cargo.lock changes" 1>&2
|
||||
@@ -35,8 +35,10 @@ echo --- build environment
|
||||
"$cargo" stable clippy --version --verbose
|
||||
"$cargo" nightly clippy --version --verbose
|
||||
|
||||
# audit is done only with stable
|
||||
# audit is done only with "$cargo stable"
|
||||
"$cargo" stable audit --version
|
||||
|
||||
grcov --version
|
||||
)
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
@@ -65,7 +67,8 @@ _ ci/order-crates-for-publishing.py
|
||||
|
||||
# -Z... is needed because of clippy bug: https://github.com/rust-lang/rust-clippy/issues/4612
|
||||
# run nightly clippy for `sdk/` as there's a moderate amount of nightly-only code there
|
||||
_ "$cargo" nightly clippy -Zunstable-options --workspace --all-targets -- --deny=warnings --deny=clippy::integer_arithmetic
|
||||
_ "$cargo" nightly clippy -Zunstable-options --workspace --all-targets -- \
|
||||
--deny=warnings --deny=clippy::integer_arithmetic --allow=clippy::inconsistent_struct_constructor
|
||||
|
||||
_ "$cargo" stable fmt --all -- --check
|
||||
|
||||
|
9
ci/test-downstream-builds.sh
Executable file
9
ci/test-downstream-builds.sh
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
export CI_LOCAL_RUN=true
|
||||
|
||||
set -ex
|
||||
|
||||
scripts/build-downstream-projects.sh
|
1
ci/test-stable-bpf.sh
Symbolic link
1
ci/test-stable-bpf.sh
Symbolic link
@@ -0,0 +1 @@
|
||||
test-stable.sh
|
@@ -21,10 +21,6 @@ export RUST_BACKTRACE=1
|
||||
export RUSTFLAGS="-D warnings"
|
||||
source scripts/ulimit-n.sh
|
||||
|
||||
# Clear the C dependency files, if dependency moves these files are not regenerated
|
||||
test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
|
||||
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
|
||||
|
||||
# Limit compiler jobs to reduce memory usage
|
||||
# on machines with 2gb/thread of memory
|
||||
NPROC=$(nproc)
|
||||
@@ -35,17 +31,25 @@ case $testName in
|
||||
test-stable)
|
||||
_ "$cargo" stable test --jobs "$NPROC" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
|
||||
;;
|
||||
test-stable-perf)
|
||||
test-stable-bpf)
|
||||
# Clear the C dependency files, if dependency moves these files are not regenerated
|
||||
test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
|
||||
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
|
||||
|
||||
# rustfilt required for dumping BPF assembly listings
|
||||
"$cargo" install rustfilt
|
||||
|
||||
# solana-keygen required when building C programs
|
||||
_ "$cargo" build --manifest-path=keygen/Cargo.toml
|
||||
export PATH="$PWD/target/debug":$PATH
|
||||
cargo_build_bpf="$(realpath ./cargo-build-bpf)"
|
||||
|
||||
# BPF solana-sdk legacy compile test
|
||||
./cargo-build-bpf --manifest-path sdk/Cargo.toml
|
||||
"$cargo_build_bpf" --manifest-path sdk/Cargo.toml
|
||||
|
||||
# BPF Program unit tests
|
||||
"$cargo" test --manifest-path programs/bpf/Cargo.toml
|
||||
cargo-build-bpf --manifest-path programs/bpf/Cargo.toml --bpf-sdk sdk/bpf
|
||||
"$cargo" stable test --manifest-path programs/bpf/Cargo.toml
|
||||
"$cargo_build_bpf" --manifest-path programs/bpf/Cargo.toml --bpf-sdk sdk/bpf
|
||||
|
||||
# BPF program system tests
|
||||
_ make -C programs/bpf/c tests
|
||||
@@ -53,11 +57,31 @@ test-stable-perf)
|
||||
--manifest-path programs/bpf/Cargo.toml \
|
||||
--no-default-features --features=bpf_c,bpf_rust -- --nocapture
|
||||
|
||||
# Dump BPF program assembly listings
|
||||
for bpf_test in programs/bpf/rust/*; do
|
||||
if pushd "$bpf_test"; then
|
||||
"$cargo_build_bpf" --dump
|
||||
popd
|
||||
fi
|
||||
done
|
||||
|
||||
# BPF program instruction count assertion
|
||||
bpf_target_path=programs/bpf/target
|
||||
_ "$cargo" stable test \
|
||||
--manifest-path programs/bpf/Cargo.toml \
|
||||
--no-default-features --features=bpf_c,bpf_rust assert_instruction_count \
|
||||
-- --nocapture &> "${bpf_target_path}"/deploy/instuction_counts.txt
|
||||
|
||||
bpf_dump_archive="bpf-dumps.tar.bz2"
|
||||
rm -f "$bpf_dump_archive"
|
||||
tar cjvf "$bpf_dump_archive" "${bpf_target_path}"/{deploy/*.txt,bpfel-unknown-unknown/release/*.so}
|
||||
;;
|
||||
test-stable-perf)
|
||||
if [[ $(uname) = Linux ]]; then
|
||||
# Enable persistence mode to keep the CUDA kernel driver loaded, avoiding a
|
||||
# lengthy and unexpected delay the first time CUDA is involved when the driver
|
||||
# is not yet loaded.
|
||||
sudo --non-interactive ./net/scripts/enable-nvidia-persistence-mode.sh
|
||||
sudo --non-interactive ./net/scripts/enable-nvidia-persistence-mode.sh || true
|
||||
|
||||
rm -rf target/perf-libs
|
||||
./fetch-perf-libs.sh
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "1.7.2"
|
||||
version = "1.6.18"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -12,8 +12,8 @@ edition = "2018"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
rpassword = "4.0"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.7.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.2" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.6.18" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.6.18" }
|
||||
thiserror = "1.0.21"
|
||||
tiny-bip39 = "0.8.0"
|
||||
uriparse = "0.6.3"
|
||||
|
@@ -17,9 +17,6 @@ use {
|
||||
std::{str::FromStr, sync::Arc},
|
||||
};
|
||||
|
||||
// Sentinel value used to indicate to write to screen instead of file
|
||||
pub const STDOUT_OUTFILE_TOKEN: &str = "-";
|
||||
|
||||
// Return parsed values from matches at `name`
|
||||
pub fn values_of<T>(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<T>>
|
||||
where
|
||||
|
@@ -1,6 +1,6 @@
|
||||
use {
|
||||
crate::{
|
||||
input_parsers::{pubkeys_sigs_of, STDOUT_OUTFILE_TOKEN},
|
||||
input_parsers::pubkeys_sigs_of,
|
||||
offline::{SIGNER_ARG, SIGN_ONLY_ARG},
|
||||
ArgConstant,
|
||||
},
|
||||
@@ -297,7 +297,7 @@ pub(crate) fn parse_signer_source<S: AsRef<str>>(
|
||||
}
|
||||
} else {
|
||||
match source.as_str() {
|
||||
STDOUT_OUTFILE_TOKEN => Ok(SignerSource::new(SignerSourceKind::Stdin)),
|
||||
"-" => Ok(SignerSource::new(SignerSourceKind::Stdin)),
|
||||
ASK_KEYWORD => Ok(SignerSource::new_legacy(SignerSourceKind::Prompt)),
|
||||
_ => match Pubkey::from_str(source.as_str()) {
|
||||
Ok(pubkey) => Ok(SignerSource::new(SignerSourceKind::Pubkey(pubkey))),
|
||||
@@ -692,7 +692,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_parse_signer_source() {
|
||||
assert!(matches!(
|
||||
parse_signer_source(STDOUT_OUTFILE_TOKEN).unwrap(),
|
||||
parse_signer_source("-").unwrap(),
|
||||
SignerSource {
|
||||
kind: SignerSourceKind::Stdin,
|
||||
derivation_path: None,
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli-config"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.7.2"
|
||||
version = "1.6.18"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli-output"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.7.2"
|
||||
version = "1.6.18"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -12,19 +12,20 @@ documentation = "https://docs.rs/solana-cli-output"
|
||||
[dependencies]
|
||||
base64 = "0.13.0"
|
||||
chrono = { version = "0.4.11", features = ["serde"] }
|
||||
console = "0.14.1"
|
||||
console = "0.11.3"
|
||||
humantime = "2.0.1"
|
||||
Inflector = "0.11.4"
|
||||
indicatif = "0.15.0"
|
||||
serde = "1.0.122"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.2" }
|
||||
solana-client = { path = "../client", version = "=1.7.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.2" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.2" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.2" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.6.18" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.6.18" }
|
||||
solana-client = { path = "../client", version = "=1.6.18" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.6.18" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "=1.6.18" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.6.18" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.6.18" }
|
||||
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
|
@@ -25,10 +25,10 @@ use {
|
||||
native_token::lamports_to_sol,
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
stake::state::{Authorized, Lockup},
|
||||
stake_history::StakeHistoryEntry,
|
||||
transaction::{Transaction, TransactionError},
|
||||
},
|
||||
solana_stake_program::stake_state::{Authorized, Lockup},
|
||||
solana_transaction_status::{
|
||||
EncodedConfirmedBlock, EncodedTransaction, TransactionConfirmationStatus,
|
||||
UiTransactionStatusMeta,
|
||||
@@ -233,6 +233,10 @@ pub struct CliEpochInfo {
|
||||
pub epoch_info: EpochInfo,
|
||||
#[serde(skip)]
|
||||
pub average_slot_time_ms: u64,
|
||||
#[serde(skip)]
|
||||
pub start_block_time: Option<UnixTimestamp>,
|
||||
#[serde(skip)]
|
||||
pub current_block_time: Option<UnixTimestamp>,
|
||||
}
|
||||
|
||||
impl QuietDisplay for CliEpochInfo {}
|
||||
@@ -277,21 +281,41 @@ impl fmt::Display for CliEpochInfo {
|
||||
remaining_slots_in_epoch
|
||||
),
|
||||
)?;
|
||||
let (time_elapsed, annotation) = if let (Some(start_block_time), Some(current_block_time)) =
|
||||
(self.start_block_time, self.current_block_time)
|
||||
{
|
||||
(
|
||||
Duration::from_secs((current_block_time - start_block_time) as u64),
|
||||
None,
|
||||
)
|
||||
} else {
|
||||
(
|
||||
slot_to_duration(self.epoch_info.slot_index, self.average_slot_time_ms),
|
||||
Some("* estimated based on current slot durations"),
|
||||
)
|
||||
};
|
||||
let time_remaining = slot_to_duration(remaining_slots_in_epoch, self.average_slot_time_ms);
|
||||
writeln_name_value(
|
||||
f,
|
||||
"Epoch Completed Time:",
|
||||
&format!(
|
||||
"{}/{} ({} remaining)",
|
||||
slot_to_human_time(self.epoch_info.slot_index, self.average_slot_time_ms),
|
||||
slot_to_human_time(self.epoch_info.slots_in_epoch, self.average_slot_time_ms),
|
||||
slot_to_human_time(remaining_slots_in_epoch, self.average_slot_time_ms)
|
||||
"{}{}/{} ({} remaining)",
|
||||
humantime::format_duration(time_elapsed).to_string(),
|
||||
if annotation.is_some() { "*" } else { "" },
|
||||
humantime::format_duration(time_elapsed + time_remaining).to_string(),
|
||||
humantime::format_duration(time_remaining).to_string(),
|
||||
),
|
||||
)
|
||||
)?;
|
||||
if let Some(annotation) = annotation {
|
||||
writeln!(f)?;
|
||||
writeln!(f, "{}", annotation)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn slot_to_human_time(slot: Slot, slot_time_ms: u64) -> String {
|
||||
humantime::format_duration(Duration::from_secs((slot * slot_time_ms) / 1000)).to_string()
|
||||
fn slot_to_duration(slot: Slot, slot_time_ms: u64) -> Duration {
|
||||
Duration::from_secs((slot * slot_time_ms) / 1000)
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
@@ -323,6 +347,8 @@ pub struct CliValidators {
|
||||
pub total_current_stake: u64,
|
||||
pub total_delinquent_stake: u64,
|
||||
pub validators: Vec<CliValidator>,
|
||||
pub average_skip_rate: f64,
|
||||
pub average_stake_weighted_skip_rate: f64,
|
||||
#[serde(skip_serializing)]
|
||||
pub validators_sort_order: CliValidatorsSortOrder,
|
||||
#[serde(skip_serializing)]
|
||||
@@ -486,6 +512,18 @@ impl fmt::Display for CliValidators {
|
||||
writeln!(f, "{}", header)?;
|
||||
}
|
||||
|
||||
writeln!(f)?;
|
||||
writeln_name_value(
|
||||
f,
|
||||
"Average Stake-Weighted Skip Rate:",
|
||||
&format!("{:.2}%", self.average_stake_weighted_skip_rate,),
|
||||
)?;
|
||||
writeln_name_value(
|
||||
f,
|
||||
"Average Unweighted Skip Rate: ",
|
||||
&format!("{:.2}%", self.average_skip_rate),
|
||||
)?;
|
||||
|
||||
writeln!(f)?;
|
||||
writeln_name_value(
|
||||
f,
|
||||
@@ -733,6 +771,7 @@ pub struct CliEpochReward {
|
||||
pub post_balance: u64, // lamports
|
||||
pub percent_change: f64,
|
||||
pub apr: Option<f64>,
|
||||
pub commission: Option<u8>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
@@ -777,23 +816,27 @@ impl fmt::Display for CliKeyedEpochRewards {
|
||||
writeln!(f, "Epoch Rewards:")?;
|
||||
writeln!(
|
||||
f,
|
||||
" {:<44} {:<18} {:<18} {:>14} {:>14}",
|
||||
"Address", "Amount", "New Balance", "Percent Change", "APR"
|
||||
" {:<44} {:<18} {:<18} {:>14} {:>14} {:>10}",
|
||||
"Address", "Amount", "New Balance", "Percent Change", "APR", "Commission"
|
||||
)?;
|
||||
for keyed_reward in &self.rewards {
|
||||
match &keyed_reward.reward {
|
||||
Some(reward) => {
|
||||
writeln!(
|
||||
f,
|
||||
" {:<44} ◎{:<17.9} ◎{:<17.9} {:>13.2}% {}",
|
||||
" {:<44} ◎{:<17.9} ◎{:<17.9} {:>13.9}% {:>14} {:>10}",
|
||||
keyed_reward.address,
|
||||
lamports_to_sol(reward.amount),
|
||||
lamports_to_sol(reward.post_balance),
|
||||
reward.percent_change,
|
||||
reward
|
||||
.apr
|
||||
.map(|apr| format!("{:>13.2}%", apr))
|
||||
.map(|apr| format!("{:.2}%", apr))
|
||||
.unwrap_or_default(),
|
||||
reward
|
||||
.commission
|
||||
.map(|commission| format!("{}%", commission))
|
||||
.unwrap_or_else(|| "-".to_string())
|
||||
)?;
|
||||
}
|
||||
None => {
|
||||
@@ -910,13 +953,13 @@ fn show_epoch_rewards(
|
||||
writeln!(f, "Epoch Rewards:")?;
|
||||
writeln!(
|
||||
f,
|
||||
" {:<6} {:<11} {:<18} {:<18} {:>14} {:>14}",
|
||||
"Epoch", "Reward Slot", "Amount", "New Balance", "Percent Change", "APR"
|
||||
" {:<6} {:<11} {:<18} {:<18} {:>14} {:>14} {:>10}",
|
||||
"Epoch", "Reward Slot", "Amount", "New Balance", "Percent Change", "APR", "Commission"
|
||||
)?;
|
||||
for reward in epoch_rewards {
|
||||
writeln!(
|
||||
f,
|
||||
" {:<6} {:<11} ◎{:<17.9} ◎{:<17.9} {:>13.2}% {}",
|
||||
" {:<6} {:<11} ◎{:<17.9} ◎{:<17.9} {:>13.9}% {:>14} {:>10}",
|
||||
reward.epoch,
|
||||
reward.effective_slot,
|
||||
lamports_to_sol(reward.amount),
|
||||
@@ -924,8 +967,12 @@ fn show_epoch_rewards(
|
||||
reward.percent_change,
|
||||
reward
|
||||
.apr
|
||||
.map(|apr| format!("{:>13.2}%", apr))
|
||||
.map(|apr| format!("{:.2}%", apr))
|
||||
.unwrap_or_default(),
|
||||
reward
|
||||
.commission
|
||||
.map(|commission| format!("{}%", commission))
|
||||
.unwrap_or_else(|| "-".to_string())
|
||||
)?;
|
||||
}
|
||||
}
|
||||
@@ -1669,6 +1716,7 @@ pub struct CliFeesInner {
|
||||
pub blockhash: String,
|
||||
pub lamports_per_signature: u64,
|
||||
pub last_valid_slot: Option<Slot>,
|
||||
pub last_valid_block_height: Option<Slot>,
|
||||
}
|
||||
|
||||
impl QuietDisplay for CliFeesInner {}
|
||||
@@ -1682,11 +1730,11 @@ impl fmt::Display for CliFeesInner {
|
||||
"Lamports per signature:",
|
||||
&self.lamports_per_signature.to_string(),
|
||||
)?;
|
||||
let last_valid_slot = self
|
||||
.last_valid_slot
|
||||
let last_valid_block_height = self
|
||||
.last_valid_block_height
|
||||
.map(|s| s.to_string())
|
||||
.unwrap_or_default();
|
||||
writeln_name_value(f, "Last valid slot:", &last_valid_slot)
|
||||
writeln_name_value(f, "Last valid block height:", &last_valid_block_height)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1715,6 +1763,7 @@ impl CliFees {
|
||||
blockhash: Hash,
|
||||
lamports_per_signature: u64,
|
||||
last_valid_slot: Option<Slot>,
|
||||
last_valid_block_height: Option<Slot>,
|
||||
) -> Self {
|
||||
Self {
|
||||
inner: Some(CliFeesInner {
|
||||
@@ -1722,6 +1771,7 @@ impl CliFees {
|
||||
blockhash: blockhash.to_string(),
|
||||
lamports_per_signature,
|
||||
last_valid_slot,
|
||||
last_valid_block_height,
|
||||
}),
|
||||
}
|
||||
}
|
||||
@@ -2136,8 +2186,8 @@ impl fmt::Display for CliBlock {
|
||||
writeln!(f, "Rewards:")?;
|
||||
writeln!(
|
||||
f,
|
||||
" {:<44} {:^15} {:<15} {:<20} {:>14}",
|
||||
"Address", "Type", "Amount", "New Balance", "Percent Change"
|
||||
" {:<44} {:^15} {:<15} {:<20} {:>14} {:>10}",
|
||||
"Address", "Type", "Amount", "New Balance", "Percent Change", "Commission"
|
||||
)?;
|
||||
for reward in rewards {
|
||||
let sign = if reward.lamports < 0 { "-" } else { "" };
|
||||
@@ -2145,7 +2195,7 @@ impl fmt::Display for CliBlock {
|
||||
total_rewards += reward.lamports;
|
||||
writeln!(
|
||||
f,
|
||||
" {:<44} {:^15} {:>15} {}",
|
||||
" {:<44} {:^15} {:>15} {} {}",
|
||||
reward.pubkey,
|
||||
if let Some(reward_type) = reward.reward_type {
|
||||
format!("{}", reward_type)
|
||||
@@ -2167,7 +2217,11 @@ impl fmt::Display for CliBlock {
|
||||
/ (reward.post_balance as f64 - reward.lamports as f64))
|
||||
* 100.0
|
||||
)
|
||||
}
|
||||
},
|
||||
reward
|
||||
.commission
|
||||
.map(|commission| format!("{:>9}%", commission))
|
||||
.unwrap_or_else(|| " -".to_string())
|
||||
)?;
|
||||
}
|
||||
|
||||
@@ -2408,6 +2462,10 @@ mod tests {
|
||||
fn try_sign_message(&self, _message: &[u8]) -> Result<Signature, SignerError> {
|
||||
Ok(Signature::new(&[1u8; 64]))
|
||||
}
|
||||
|
||||
fn is_interactive(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
let present: Box<dyn Signer> = Box::new(keypair_from_seed(&[2u8; 32]).unwrap());
|
||||
|
@@ -5,7 +5,7 @@ use {
|
||||
indicatif::{ProgressBar, ProgressStyle},
|
||||
solana_sdk::{
|
||||
clock::UnixTimestamp, hash::Hash, message::Message, native_token::lamports_to_sol,
|
||||
program_utils::limited_deserialize, pubkey::Pubkey, stake, transaction::Transaction,
|
||||
program_utils::limited_deserialize, pubkey::Pubkey, transaction::Transaction,
|
||||
},
|
||||
solana_transaction_status::UiTransactionStatusMeta,
|
||||
spl_memo::id as spl_memo_id,
|
||||
@@ -244,9 +244,10 @@ pub fn write_transaction<W: io::Write>(
|
||||
writeln!(w, "{} {:?}", prefix, vote_instruction)?;
|
||||
raw = false;
|
||||
}
|
||||
} else if program_pubkey == stake::program::id() {
|
||||
if let Ok(stake_instruction) =
|
||||
limited_deserialize::<stake::instruction::StakeInstruction>(&instruction.data)
|
||||
} else if program_pubkey == solana_stake_program::id() {
|
||||
if let Ok(stake_instruction) = limited_deserialize::<
|
||||
solana_stake_program::stake_instruction::StakeInstruction,
|
||||
>(&instruction.data)
|
||||
{
|
||||
writeln!(w, "{} {:?}", prefix, stake_instruction)?;
|
||||
raw = false;
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.7.2"
|
||||
version = "1.6.18"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -16,7 +16,8 @@ chrono = { version = "0.4.11", features = ["serde"] }
|
||||
clap = "2.33.1"
|
||||
criterion-stats = "0.3.0"
|
||||
ctrlc = { version = "3.1.5", features = ["termination"] }
|
||||
console = "0.14.1"
|
||||
console = "0.11.3"
|
||||
const_format = "0.2.14"
|
||||
dirs-next = "2.0.0"
|
||||
log = "0.4.11"
|
||||
Inflector = "0.11.4"
|
||||
@@ -28,29 +29,30 @@ reqwest = { version = "0.11.2", default-features = false, features = ["blocking"
|
||||
serde = "1.0.122"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.2" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.7.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.2" }
|
||||
solana-cli-config = { path = "../cli-config", version = "=1.7.2" }
|
||||
solana-cli-output = { path = "../cli-output", version = "=1.7.2" }
|
||||
solana-client = { path = "../client", version = "=1.7.2" }
|
||||
solana-config-program = { path = "../programs/config", version = "=1.7.2" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.2" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.2" }
|
||||
solana_rbpf = "=0.2.11"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.7.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.2" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.2" }
|
||||
solana-version = { path = "../version", version = "=1.7.2" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.2" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.6.18" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.6.18" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.6.18" }
|
||||
solana-cli-config = { path = "../cli-config", version = "=1.6.18" }
|
||||
solana-cli-output = { path = "../cli-output", version = "=1.6.18" }
|
||||
solana-client = { path = "../client", version = "=1.6.18" }
|
||||
solana-config-program = { path = "../programs/config", version = "=1.6.18" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.6.18" }
|
||||
solana-logger = { path = "../logger", version = "=1.6.18" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.6.18" }
|
||||
solana_rbpf = "=0.2.9"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.6.18" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.6.18" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "=1.6.18" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.6.18" }
|
||||
solana-version = { path = "../version", version = "=1.6.18" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.6.18" }
|
||||
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
|
||||
thiserror = "1.0.21"
|
||||
tiny-bip39 = "0.7.0"
|
||||
url = "2.1.1"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "=1.7.2" }
|
||||
solana-core = { path = "../core", version = "=1.6.18" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
|
@@ -231,9 +231,18 @@ mod tests {
|
||||
mocks.insert(RpcRequest::GetBalance, account_balance_response);
|
||||
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
|
||||
|
||||
assert!(check_account_for_balance(&rpc_client, &pubkey, 1).unwrap());
|
||||
assert!(check_account_for_balance(&rpc_client, &pubkey, account_balance).unwrap());
|
||||
assert!(!check_account_for_balance(&rpc_client, &pubkey, account_balance + 1).unwrap());
|
||||
assert_eq!(
|
||||
check_account_for_balance(&rpc_client, &pubkey, 1).unwrap(),
|
||||
true
|
||||
);
|
||||
assert_eq!(
|
||||
check_account_for_balance(&rpc_client, &pubkey, account_balance).unwrap(),
|
||||
true
|
||||
);
|
||||
assert_eq!(
|
||||
check_account_for_balance(&rpc_client, &pubkey, account_balance + 1).unwrap(),
|
||||
false
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
122
cli/src/cli.rs
122
cli/src/cli.rs
@@ -25,15 +25,14 @@ use solana_cli_output::{
|
||||
};
|
||||
use solana_client::{
|
||||
blockhash_query::BlockhashQuery,
|
||||
client_error::{ClientError, ClientErrorKind, Result as ClientResult},
|
||||
client_error::{ClientError, Result as ClientResult},
|
||||
nonce_utils,
|
||||
rpc_client::RpcClient,
|
||||
rpc_config::{
|
||||
RpcLargestAccountsFilter, RpcSendTransactionConfig, RpcTransactionConfig,
|
||||
RpcConfirmedTransactionConfig, RpcLargestAccountsFilter, RpcSendTransactionConfig,
|
||||
RpcTransactionLogsFilter,
|
||||
},
|
||||
rpc_request::{RpcError, RpcResponseErrorData},
|
||||
rpc_response::{RpcKeyedAccount, RpcSimulateTransactionResult},
|
||||
rpc_response::RpcKeyedAccount,
|
||||
};
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
@@ -45,15 +44,14 @@ use solana_sdk::{
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::{Signature, Signer, SignerError},
|
||||
stake::{
|
||||
self,
|
||||
instruction::LockupArgs,
|
||||
state::{Lockup, StakeAuthorize},
|
||||
},
|
||||
system_instruction::{self, SystemError},
|
||||
system_program,
|
||||
transaction::{Transaction, TransactionError},
|
||||
};
|
||||
use solana_stake_program::{
|
||||
stake_instruction::LockupArgs,
|
||||
stake_state::{Lockup, StakeAuthorize},
|
||||
};
|
||||
use solana_transaction_status::{EncodedTransaction, UiTransactionEncoding};
|
||||
use solana_vote_program::vote_state::VoteAuthorize;
|
||||
use std::{
|
||||
@@ -63,6 +61,7 @@ use std::{
|
||||
use thiserror::Error;
|
||||
|
||||
pub const DEFAULT_RPC_TIMEOUT_SECONDS: &str = "30";
|
||||
pub const DEFAULT_CONFIRM_TX_TIMEOUT_SECONDS: &str = "5";
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
@@ -136,6 +135,8 @@ pub enum CliCommand {
|
||||
sort_order: CliValidatorsSortOrder,
|
||||
reverse_sort: bool,
|
||||
number_validators: bool,
|
||||
keep_unstaked_delinquents: bool,
|
||||
delinquent_slot_distance: Option<Slot>,
|
||||
},
|
||||
Supply {
|
||||
print_accounts: bool,
|
||||
@@ -278,7 +279,6 @@ pub enum CliCommand {
|
||||
memo: Option<String>,
|
||||
fee_payer: SignerIndex,
|
||||
custodian: Option<SignerIndex>,
|
||||
no_wait: bool,
|
||||
},
|
||||
StakeSetLockup {
|
||||
stake_account_pubkey: Pubkey,
|
||||
@@ -453,6 +453,7 @@ pub struct CliConfig<'a> {
|
||||
pub output_format: OutputFormat,
|
||||
pub commitment: CommitmentConfig,
|
||||
pub send_transaction_config: RpcSendTransactionConfig,
|
||||
pub confirm_transaction_initial_timeout: Duration,
|
||||
pub address_labels: HashMap<String, String>,
|
||||
}
|
||||
|
||||
@@ -597,6 +598,9 @@ impl Default for CliConfig<'_> {
|
||||
output_format: OutputFormat::Display,
|
||||
commitment: CommitmentConfig::confirmed(),
|
||||
send_transaction_config: RpcSendTransactionConfig::default(),
|
||||
confirm_transaction_initial_timeout: Duration::from_secs(
|
||||
u64::from_str(DEFAULT_CONFIRM_TX_TIMEOUT_SECONDS).unwrap(),
|
||||
),
|
||||
address_labels: HashMap::new(),
|
||||
}
|
||||
}
|
||||
@@ -933,7 +937,7 @@ pub type ProcessResult = Result<String, Box<dyn std::error::Error>>;
|
||||
fn resolve_derived_address_program_id(matches: &ArgMatches<'_>, arg_name: &str) -> Option<Pubkey> {
|
||||
matches.value_of(arg_name).and_then(|v| match v {
|
||||
"NONCE" => Some(system_program::id()),
|
||||
"STAKE" => Some(stake::program::id()),
|
||||
"STAKE" => Some(solana_stake_program::id()),
|
||||
"VOTE" => Some(solana_vote_program::id()),
|
||||
_ => pubkey_of(matches, arg_name),
|
||||
})
|
||||
@@ -1043,9 +1047,9 @@ fn process_confirm(
|
||||
let mut transaction = None;
|
||||
let mut get_transaction_error = None;
|
||||
if config.verbose {
|
||||
match rpc_client.get_transaction_with_config(
|
||||
match rpc_client.get_confirmed_transaction_with_config(
|
||||
signature,
|
||||
RpcTransactionConfig {
|
||||
RpcConfirmedTransactionConfig {
|
||||
encoding: Some(UiTransactionEncoding::Base64),
|
||||
commitment: Some(CommitmentConfig::confirmed()),
|
||||
},
|
||||
@@ -1288,10 +1292,11 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
}
|
||||
|
||||
let rpc_client = if config.rpc_client.is_none() {
|
||||
Arc::new(RpcClient::new_with_timeout_and_commitment(
|
||||
Arc::new(RpcClient::new_with_timeouts_and_commitment(
|
||||
config.json_rpc_url.to_string(),
|
||||
config.rpc_timeout,
|
||||
config.commitment,
|
||||
config.confirm_transaction_initial_timeout,
|
||||
))
|
||||
} else {
|
||||
// Primarily for testing
|
||||
@@ -1391,6 +1396,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
sort_order,
|
||||
reverse_sort,
|
||||
number_validators,
|
||||
keep_unstaked_delinquents,
|
||||
delinquent_slot_distance,
|
||||
} => process_show_validators(
|
||||
&rpc_client,
|
||||
config,
|
||||
@@ -1398,6 +1405,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
*sort_order,
|
||||
*reverse_sort,
|
||||
*number_validators,
|
||||
*keep_unstaked_delinquents,
|
||||
*delinquent_slot_distance,
|
||||
),
|
||||
CliCommand::Supply { print_accounts } => {
|
||||
process_supply(&rpc_client, config, *print_accounts)
|
||||
@@ -1682,7 +1691,6 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
memo,
|
||||
fee_payer,
|
||||
custodian,
|
||||
no_wait,
|
||||
} => process_stake_authorize(
|
||||
&rpc_client,
|
||||
config,
|
||||
@@ -1696,7 +1704,6 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
*nonce_authority,
|
||||
memo.as_ref(),
|
||||
*fee_payer,
|
||||
*no_wait,
|
||||
),
|
||||
CliCommand::StakeSetLockup {
|
||||
stake_account_pubkey,
|
||||
@@ -1949,43 +1956,41 @@ pub fn request_and_confirm_airdrop(
|
||||
Ok(signature)
|
||||
}
|
||||
|
||||
fn common_error_adapter<E>(ix_error: &InstructionError) -> Option<E>
|
||||
where
|
||||
E: 'static + std::error::Error + DecodeError<E> + FromPrimitive,
|
||||
{
|
||||
if let InstructionError::Custom(code) = ix_error {
|
||||
E::decode_custom_error_to_enum(*code)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn log_instruction_custom_error<E>(
|
||||
result: ClientResult<Signature>,
|
||||
config: &CliConfig,
|
||||
) -> ProcessResult
|
||||
where
|
||||
E: 'static + std::error::Error + DecodeError<E> + FromPrimitive,
|
||||
{
|
||||
log_instruction_custom_error_ex::<E, _>(result, config, common_error_adapter)
|
||||
}
|
||||
|
||||
pub fn log_instruction_custom_error_ex<E, F>(
|
||||
result: ClientResult<Signature>,
|
||||
config: &CliConfig,
|
||||
error_adapter: F,
|
||||
) -> ProcessResult
|
||||
where
|
||||
E: 'static + std::error::Error + DecodeError<E> + FromPrimitive,
|
||||
F: Fn(&InstructionError) -> Option<E>,
|
||||
{
|
||||
match result {
|
||||
Err(err) => {
|
||||
// If transaction simulation returns a known Custom InstructionError, decode it
|
||||
if let ClientErrorKind::RpcError(RpcError::RpcResponseError {
|
||||
data:
|
||||
RpcResponseErrorData::SendTransactionPreflightFailure(
|
||||
RpcSimulateTransactionResult {
|
||||
err:
|
||||
Some(TransactionError::InstructionError(
|
||||
_,
|
||||
InstructionError::Custom(code),
|
||||
)),
|
||||
..
|
||||
},
|
||||
),
|
||||
..
|
||||
}) = err.kind()
|
||||
{
|
||||
if let Some(specific_error) = E::decode_custom_error_to_enum(*code) {
|
||||
return Err(specific_error.into());
|
||||
}
|
||||
}
|
||||
// If the transaction was instead submitted and returned a known Custom
|
||||
// InstructionError, decode it
|
||||
if let ClientErrorKind::TransactionError(TransactionError::InstructionError(
|
||||
_,
|
||||
InstructionError::Custom(code),
|
||||
)) = err.kind()
|
||||
{
|
||||
if let Some(specific_error) = E::decode_custom_error_to_enum(*code) {
|
||||
let maybe_tx_err = err.get_transaction_error();
|
||||
if let Some(TransactionError::InstructionError(_, ix_error)) = maybe_tx_err {
|
||||
if let Some(specific_error) = error_adapter(&ix_error) {
|
||||
return Err(specific_error.into());
|
||||
}
|
||||
}
|
||||
@@ -2023,22 +2028,7 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
|
||||
.stake_subcommands()
|
||||
.subcommand(
|
||||
SubCommand::with_name("airdrop")
|
||||
.about("Request lamports")
|
||||
.arg(
|
||||
Arg::with_name("faucet_host")
|
||||
.long("faucet-host")
|
||||
.value_name("URL")
|
||||
.takes_value(true)
|
||||
.help("Faucet host to use [default: the --url host]"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("faucet_port")
|
||||
.long("faucet-port")
|
||||
.value_name("PORT_NUMBER")
|
||||
.takes_value(true)
|
||||
.default_value(solana_faucet::faucet::FAUCET_PORT_STR)
|
||||
.help("Faucet port to use"),
|
||||
)
|
||||
.about("Request SOL from a faucet")
|
||||
.arg(
|
||||
Arg::with_name("amount")
|
||||
.index(1)
|
||||
@@ -2250,7 +2240,7 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
|
||||
)
|
||||
.offline_args()
|
||||
.nonce_args(false)
|
||||
.arg(memo_arg())
|
||||
.arg(memo_arg())
|
||||
.arg(fee_payer_arg()),
|
||||
)
|
||||
.subcommand(
|
||||
@@ -2488,7 +2478,7 @@ mod tests {
|
||||
let from_pubkey = Some(solana_sdk::pubkey::new_rand());
|
||||
let from_str = from_pubkey.unwrap().to_string();
|
||||
for (name, program_id) in &[
|
||||
("STAKE", stake::program::id()),
|
||||
("STAKE", solana_stake_program::id()),
|
||||
("VOTE", solana_vote_program::id()),
|
||||
("NONCE", system_program::id()),
|
||||
] {
|
||||
@@ -2524,7 +2514,7 @@ mod tests {
|
||||
command: CliCommand::CreateAddressWithSeed {
|
||||
from_pubkey: None,
|
||||
seed: "seed".to_string(),
|
||||
program_id: stake::program::id(),
|
||||
program_id: solana_stake_program::id(),
|
||||
},
|
||||
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
|
||||
}
|
||||
@@ -2787,11 +2777,11 @@ mod tests {
|
||||
config.command = CliCommand::CreateAddressWithSeed {
|
||||
from_pubkey: Some(from_pubkey),
|
||||
seed: "seed".to_string(),
|
||||
program_id: stake::program::id(),
|
||||
program_id: solana_stake_program::id(),
|
||||
};
|
||||
let address = process_command(&config);
|
||||
let expected_address =
|
||||
Pubkey::create_with_seed(&from_pubkey, "seed", &stake::program::id()).unwrap();
|
||||
Pubkey::create_with_seed(&from_pubkey, "seed", &solana_stake_program::id()).unwrap();
|
||||
assert_eq!(address.unwrap(), expected_address.to_string());
|
||||
|
||||
// Need airdrop cases
|
||||
@@ -3178,7 +3168,7 @@ mod tests {
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
derived_address_seed: Some(derived_address_seed),
|
||||
derived_address_program_id: Some(stake::program::id()),
|
||||
derived_address_program_id: Some(solana_stake_program::id()),
|
||||
},
|
||||
signers: vec![read_keypair_file(&default_keypair_file).unwrap().into(),],
|
||||
}
|
||||
|
@@ -24,11 +24,12 @@ use solana_client::{
|
||||
pubsub_client::PubsubClient,
|
||||
rpc_client::{GetConfirmedSignaturesForAddress2Config, RpcClient},
|
||||
rpc_config::{
|
||||
RpcAccountInfoConfig, RpcBlockConfig, RpcLargestAccountsConfig, RpcLargestAccountsFilter,
|
||||
RpcProgramAccountsConfig, RpcTransactionConfig, RpcTransactionLogsConfig,
|
||||
RpcTransactionLogsFilter,
|
||||
RpcAccountInfoConfig, RpcConfirmedBlockConfig, RpcConfirmedTransactionConfig,
|
||||
RpcGetVoteAccountsConfig, RpcLargestAccountsConfig, RpcLargestAccountsFilter,
|
||||
RpcProgramAccountsConfig, RpcTransactionLogsConfig, RpcTransactionLogsFilter,
|
||||
},
|
||||
rpc_filter,
|
||||
rpc_request::DELINQUENT_VALIDATOR_SLOT_DISTANCE,
|
||||
rpc_response::SlotInfo,
|
||||
};
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
@@ -46,9 +47,7 @@ use solana_sdk::{
|
||||
rent::Rent,
|
||||
rpc_port::DEFAULT_RPC_PORT_STR,
|
||||
signature::Signature,
|
||||
slot_history,
|
||||
stake::{self, state::StakeState},
|
||||
system_instruction, system_program,
|
||||
slot_history, system_instruction, system_program,
|
||||
sysvar::{
|
||||
self,
|
||||
slot_history::SlotHistory,
|
||||
@@ -57,6 +56,7 @@ use solana_sdk::{
|
||||
timing,
|
||||
transaction::Transaction,
|
||||
};
|
||||
use solana_stake_program::stake_state::StakeState;
|
||||
use solana_transaction_status::UiTransactionEncoding;
|
||||
use solana_vote_program::vote_state::VoteState;
|
||||
use std::{
|
||||
@@ -176,7 +176,7 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
.takes_value(true)
|
||||
.value_name("EPOCH")
|
||||
.validator(is_epoch)
|
||||
.help("Epoch to show leader schedule for. (default: current)")
|
||||
.help("Epoch to show leader schedule for. [default: current]")
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
@@ -382,6 +382,25 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
])
|
||||
.default_value("stake")
|
||||
.help("Sort order (does not affect JSON output)"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("keep_unstaked_delinquents")
|
||||
.long("keep-unstaked-delinquents")
|
||||
.takes_value(false)
|
||||
.help("Don't discard unstaked, delinquent validators")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("delinquent_slot_distance")
|
||||
.long("delinquent-slot-distance")
|
||||
.takes_value(true)
|
||||
.value_name("SLOT_DISTANCE")
|
||||
.validator(is_slot)
|
||||
.help(
|
||||
concatcp!(
|
||||
"Minimum slot distance from the tip to consider a validator delinquent. [default: ",
|
||||
DELINQUENT_VALIDATOR_SLOT_DISTANCE,
|
||||
"]",
|
||||
))
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
@@ -617,6 +636,8 @@ pub fn parse_show_validators(matches: &ArgMatches<'_>) -> Result<CliCommandInfo,
|
||||
let use_lamports_unit = matches.is_present("lamports");
|
||||
let number_validators = matches.is_present("number");
|
||||
let reverse_sort = matches.is_present("reverse");
|
||||
let keep_unstaked_delinquents = matches.is_present("keep_unstaked_delinquents");
|
||||
let delinquent_slot_distance = value_of(matches, "delinquent_slot_distance");
|
||||
|
||||
let sort_order = match value_t_or_exit!(matches, "sort", String).as_str() {
|
||||
"delinquent" => CliValidatorsSortOrder::Delinquent,
|
||||
@@ -637,6 +658,8 @@ pub fn parse_show_validators(matches: &ArgMatches<'_>) -> Result<CliCommandInfo,
|
||||
sort_order,
|
||||
reverse_sort,
|
||||
number_validators,
|
||||
keep_unstaked_delinquents,
|
||||
delinquent_slot_distance,
|
||||
},
|
||||
signers: vec![],
|
||||
})
|
||||
@@ -938,18 +961,19 @@ pub fn process_fees(
|
||||
*recent_blockhash,
|
||||
fee_calculator.lamports_per_signature,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
} else {
|
||||
CliFees::none()
|
||||
}
|
||||
} else {
|
||||
let result = rpc_client.get_recent_blockhash_with_commitment(config.commitment)?;
|
||||
let (recent_blockhash, fee_calculator, last_valid_slot) = result.value;
|
||||
let result = rpc_client.get_fees_with_commitment(config.commitment)?;
|
||||
CliFees::some(
|
||||
result.context.slot,
|
||||
recent_blockhash,
|
||||
fee_calculator.lamports_per_signature,
|
||||
Some(last_valid_slot),
|
||||
result.value.blockhash,
|
||||
result.value.fee_calculator.lamports_per_signature,
|
||||
None,
|
||||
Some(result.value.last_valid_block_height),
|
||||
)
|
||||
};
|
||||
Ok(config.output_format.formatted_string(&fees))
|
||||
@@ -1028,12 +1052,12 @@ pub fn process_get_block(
|
||||
};
|
||||
|
||||
let encoded_confirmed_block = rpc_client
|
||||
.get_block_with_config(
|
||||
.get_confirmed_block_with_config(
|
||||
slot,
|
||||
RpcBlockConfig {
|
||||
RpcConfirmedBlockConfig {
|
||||
encoding: Some(UiTransactionEncoding::Base64),
|
||||
commitment: Some(CommitmentConfig::confirmed()),
|
||||
..RpcBlockConfig::default()
|
||||
..RpcConfirmedBlockConfig::default()
|
||||
},
|
||||
)?
|
||||
.into();
|
||||
@@ -1076,9 +1100,15 @@ pub fn process_get_epoch_info(rpc_client: &RpcClient, config: &CliConfig) -> Pro
|
||||
(secs as u64).saturating_mul(1000).checked_div(slots)
|
||||
})
|
||||
.unwrap_or(clock::DEFAULT_MS_PER_SLOT);
|
||||
let start_block_time = rpc_client
|
||||
.get_block_time(epoch_info.absolute_slot - epoch_info.slot_index)
|
||||
.ok();
|
||||
let current_block_time = rpc_client.get_block_time(epoch_info.absolute_slot).ok();
|
||||
let epoch_info = CliEpochInfo {
|
||||
epoch_info,
|
||||
average_slot_time_ms,
|
||||
start_block_time,
|
||||
current_block_time,
|
||||
};
|
||||
Ok(config.output_format.formatted_string(&epoch_info))
|
||||
}
|
||||
@@ -1185,7 +1215,7 @@ pub fn process_show_block_production(
|
||||
start_slot = minimum_ledger_slot;
|
||||
}
|
||||
|
||||
let confirmed_blocks = rpc_client.get_blocks(start_slot, Some(end_slot))?;
|
||||
let confirmed_blocks = rpc_client.get_confirmed_blocks(start_slot, Some(end_slot))?;
|
||||
(confirmed_blocks, start_slot)
|
||||
};
|
||||
|
||||
@@ -1708,7 +1738,7 @@ pub fn process_show_stakes(
|
||||
}
|
||||
}
|
||||
let all_stake_accounts = rpc_client
|
||||
.get_program_accounts_with_config(&stake::program::id(), program_accounts_config)?;
|
||||
.get_program_accounts_with_config(&solana_stake_program::id(), program_accounts_config)?;
|
||||
let stake_history_account = rpc_client.get_account(&stake_history::id())?;
|
||||
let clock_account = rpc_client.get_account(&sysvar::clock::id())?;
|
||||
let clock: Clock = from_account(&clock_account).ok_or_else(|| {
|
||||
@@ -1786,11 +1816,17 @@ pub fn process_show_validators(
|
||||
validators_sort_order: CliValidatorsSortOrder,
|
||||
validators_reverse_sort: bool,
|
||||
number_validators: bool,
|
||||
keep_unstaked_delinquents: bool,
|
||||
delinquent_slot_distance: Option<Slot>,
|
||||
) -> ProcessResult {
|
||||
let progress_bar = new_spinner_progress_bar();
|
||||
progress_bar.set_message("Fetching vote accounts...");
|
||||
let epoch_info = rpc_client.get_epoch_info()?;
|
||||
let vote_accounts = rpc_client.get_vote_accounts()?;
|
||||
let vote_accounts = rpc_client.get_vote_accounts_with_config(RpcGetVoteAccountsConfig {
|
||||
keep_unstaked_delinquents: Some(keep_unstaked_delinquents),
|
||||
delinquent_slot_distance,
|
||||
..RpcGetVoteAccountsConfig::default()
|
||||
})?;
|
||||
|
||||
progress_bar.set_message("Fetching block production...");
|
||||
let skip_rate: HashMap<_, _> = rpc_client
|
||||
@@ -1889,14 +1925,40 @@ pub fn process_show_validators(
|
||||
entry.delinquent_active_stake += validator.activated_stake;
|
||||
}
|
||||
|
||||
let validators: Vec<_> = current_validators
|
||||
.into_iter()
|
||||
.chain(delinquent_validators.into_iter())
|
||||
.collect();
|
||||
|
||||
let (average_skip_rate, average_stake_weighted_skip_rate) = {
|
||||
let mut skip_rate_len = 0;
|
||||
let mut skip_rate_sum = 0.;
|
||||
let mut skip_rate_weighted_sum = 0.;
|
||||
for validator in validators.iter() {
|
||||
if let Some(skip_rate) = validator.skip_rate {
|
||||
skip_rate_sum += skip_rate;
|
||||
skip_rate_len += 1;
|
||||
skip_rate_weighted_sum += skip_rate * validator.activated_stake as f64;
|
||||
}
|
||||
}
|
||||
|
||||
if skip_rate_len > 0 && total_active_stake > 0 {
|
||||
(
|
||||
skip_rate_sum / skip_rate_len as f64,
|
||||
skip_rate_weighted_sum / total_active_stake as f64,
|
||||
)
|
||||
} else {
|
||||
(100., 100.) // Impossible?
|
||||
}
|
||||
};
|
||||
|
||||
let cli_validators = CliValidators {
|
||||
total_active_stake,
|
||||
total_current_stake,
|
||||
total_delinquent_stake,
|
||||
validators: current_validators
|
||||
.into_iter()
|
||||
.chain(delinquent_validators.into_iter())
|
||||
.collect(),
|
||||
validators,
|
||||
average_skip_rate,
|
||||
average_stake_weighted_skip_rate,
|
||||
validators_sort_order,
|
||||
validators_reverse_sort,
|
||||
number_validators,
|
||||
@@ -1915,7 +1977,7 @@ pub fn process_transaction_history(
|
||||
limit: usize,
|
||||
show_transactions: bool,
|
||||
) -> ProcessResult {
|
||||
let results = rpc_client.get_signatures_for_address_with_config(
|
||||
let results = rpc_client.get_confirmed_signatures_for_address2_with_config(
|
||||
address,
|
||||
GetConfirmedSignaturesForAddress2Config {
|
||||
before,
|
||||
@@ -1954,9 +2016,9 @@ pub fn process_transaction_history(
|
||||
|
||||
if show_transactions {
|
||||
if let Ok(signature) = result.signature.parse::<Signature>() {
|
||||
match rpc_client.get_transaction_with_config(
|
||||
match rpc_client.get_confirmed_transaction_with_config(
|
||||
&signature,
|
||||
RpcTransactionConfig {
|
||||
RpcConfirmedTransactionConfig {
|
||||
encoding: Some(UiTransactionEncoding::Base64),
|
||||
commitment: Some(CommitmentConfig::confirmed()),
|
||||
},
|
||||
|
@@ -10,6 +10,7 @@ use solana_cli_output::{QuietDisplay, VerboseDisplay};
|
||||
use solana_client::{client_error::ClientError, rpc_client::RpcClient};
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock::Slot,
|
||||
feature::{self, Feature},
|
||||
feature_set::FEATURE_NAMES,
|
||||
@@ -312,6 +313,31 @@ fn feature_activation_allowed(rpc_client: &RpcClient, quiet: bool) -> Result<boo
|
||||
Ok(feature_activation_allowed)
|
||||
}
|
||||
|
||||
fn status_from_account(account: Account) -> Option<CliFeatureStatus> {
|
||||
feature::from_account(&account).map(|feature| match feature.activated_at {
|
||||
None => CliFeatureStatus::Pending,
|
||||
Some(activation_slot) => CliFeatureStatus::Active(activation_slot),
|
||||
})
|
||||
}
|
||||
|
||||
fn get_feature_status(
|
||||
rpc_client: &RpcClient,
|
||||
feature_id: &Pubkey,
|
||||
) -> Result<Option<CliFeatureStatus>, Box<dyn std::error::Error>> {
|
||||
rpc_client
|
||||
.get_account(feature_id)
|
||||
.map(status_from_account)
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
|
||||
pub fn get_feature_is_active(
|
||||
rpc_client: &RpcClient,
|
||||
feature_id: &Pubkey,
|
||||
) -> Result<bool, Box<dyn std::error::Error>> {
|
||||
get_feature_status(rpc_client, feature_id)
|
||||
.map(|status| matches!(status, Some(CliFeatureStatus::Active(_))))
|
||||
}
|
||||
|
||||
fn process_status(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
@@ -327,11 +353,7 @@ fn process_status(
|
||||
let feature_id = &feature_ids[i];
|
||||
let feature_name = FEATURE_NAMES.get(feature_id).unwrap();
|
||||
if let Some(account) = account {
|
||||
if let Some(feature) = feature::from_account(&account) {
|
||||
let feature_status = match feature.activated_at {
|
||||
None => CliFeatureStatus::Pending,
|
||||
Some(activation_slot) => CliFeatureStatus::Active(activation_slot),
|
||||
};
|
||||
if let Some(feature_status) = status_from_account(account) {
|
||||
features.push(CliFeature {
|
||||
id: feature_id.to_string(),
|
||||
description: feature_name.to_string(),
|
||||
|
@@ -10,6 +10,7 @@ macro_rules! ACCOUNT_STRING {
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_use]
|
||||
macro_rules! pubkey {
|
||||
($arg:expr, $help:expr) => {
|
||||
$arg.takes_value(true)
|
||||
@@ -18,6 +19,9 @@ macro_rules! pubkey {
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_use]
|
||||
extern crate const_format;
|
||||
|
||||
extern crate serde_derive;
|
||||
|
||||
pub mod checks;
|
||||
|
@@ -10,7 +10,7 @@ use solana_clap_utils::{
|
||||
};
|
||||
use solana_cli::cli::{
|
||||
app, parse_command, process_command, CliCommandInfo, CliConfig, SettingType,
|
||||
DEFAULT_RPC_TIMEOUT_SECONDS,
|
||||
DEFAULT_CONFIRM_TX_TIMEOUT_SECONDS, DEFAULT_RPC_TIMEOUT_SECONDS,
|
||||
};
|
||||
use solana_cli_config::{Config, CONFIG_FILE};
|
||||
use solana_cli_output::{display::println_name_value, OutputFormat};
|
||||
@@ -167,6 +167,11 @@ pub fn parse_args<'a>(
|
||||
let rpc_timeout = value_t_or_exit!(matches, "rpc_timeout", u64);
|
||||
let rpc_timeout = Duration::from_secs(rpc_timeout);
|
||||
|
||||
let confirm_transaction_initial_timeout =
|
||||
value_t_or_exit!(matches, "confirm_transaction_initial_timeout", u64);
|
||||
let confirm_transaction_initial_timeout =
|
||||
Duration::from_secs(confirm_transaction_initial_timeout);
|
||||
|
||||
let (_, websocket_url) = CliConfig::compute_websocket_url_setting(
|
||||
matches.value_of("websocket_url").unwrap_or(""),
|
||||
&config.websocket_url,
|
||||
@@ -235,6 +240,7 @@ pub fn parse_args<'a>(
|
||||
preflight_commitment: Some(commitment.commitment),
|
||||
..RpcSendTransactionConfig::default()
|
||||
},
|
||||
confirm_transaction_initial_timeout,
|
||||
address_labels,
|
||||
},
|
||||
signers,
|
||||
@@ -350,6 +356,16 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
.hidden(true)
|
||||
.help("Timeout value for RPC requests"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("confirm_transaction_initial_timeout")
|
||||
.long("confirm-timeout")
|
||||
.value_name("SECONDS")
|
||||
.takes_value(true)
|
||||
.default_value(DEFAULT_CONFIRM_TX_TIMEOUT_SECONDS)
|
||||
.global(true)
|
||||
.hidden(true)
|
||||
.help("Timeout value for initial transaction status"),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("config")
|
||||
.about("Solana command-line tool configuration settings")
|
||||
|
105
cli/src/nonce.rs
105
cli/src/nonce.rs
@@ -1,9 +1,10 @@
|
||||
use crate::{
|
||||
checks::{check_account_for_fee_with_commitment, check_unique_pubkeys},
|
||||
cli::{
|
||||
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError,
|
||||
ProcessResult,
|
||||
log_instruction_custom_error, log_instruction_custom_error_ex, CliCommand, CliCommandInfo,
|
||||
CliConfig, CliError, ProcessResult,
|
||||
},
|
||||
feature::get_feature_is_active,
|
||||
memo::WithMemo,
|
||||
spend_utils::{resolve_spend_tx_and_check_account_balance, SpendAmount},
|
||||
};
|
||||
@@ -12,7 +13,7 @@ use solana_clap_utils::{
|
||||
input_parsers::*,
|
||||
input_validators::*,
|
||||
keypair::{DefaultSigner, SignerIndex},
|
||||
memo::MEMO_ARG,
|
||||
memo::{memo_arg, MEMO_ARG},
|
||||
nonce::*,
|
||||
};
|
||||
use solana_cli_output::CliNonceAccount;
|
||||
@@ -20,16 +21,19 @@ use solana_client::{nonce_utils::*, rpc_client::RpcClient};
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
feature_set::merge_nonce_error_into_system_error,
|
||||
hash::Hash,
|
||||
instruction::InstructionError,
|
||||
message::Message,
|
||||
nonce::{self, State},
|
||||
pubkey::Pubkey,
|
||||
system_instruction::{
|
||||
advance_nonce_account, authorize_nonce_account, create_nonce_account,
|
||||
create_nonce_account_with_seed, withdraw_nonce_account, NonceError, SystemError,
|
||||
create_nonce_account_with_seed, instruction_to_nonce_error, withdraw_nonce_account,
|
||||
NonceError, SystemError,
|
||||
},
|
||||
system_program,
|
||||
transaction::Transaction,
|
||||
transaction::{Transaction, TransactionError},
|
||||
};
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -56,7 +60,8 @@ impl NonceSubCommands for App<'_, '_> {
|
||||
.required(true),
|
||||
"Account to be granted authority of the nonce account. "),
|
||||
)
|
||||
.arg(nonce_authority_arg()),
|
||||
.arg(nonce_authority_arg())
|
||||
.arg(memo_arg()),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("create-nonce-account")
|
||||
@@ -91,7 +96,8 @@ impl NonceSubCommands for App<'_, '_> {
|
||||
.value_name("STRING")
|
||||
.takes_value(true)
|
||||
.help("Seed for address generation; if specified, the resulting account will be at a derived address of the NONCE_ACCOUNT pubkey")
|
||||
),
|
||||
)
|
||||
.arg(memo_arg()),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("nonce")
|
||||
@@ -115,7 +121,8 @@ impl NonceSubCommands for App<'_, '_> {
|
||||
.required(true),
|
||||
"Address of the nonce account. "),
|
||||
)
|
||||
.arg(nonce_authority_arg()),
|
||||
.arg(nonce_authority_arg())
|
||||
.arg(memo_arg()),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("nonce-account")
|
||||
@@ -161,7 +168,8 @@ impl NonceSubCommands for App<'_, '_> {
|
||||
.validator(is_amount)
|
||||
.help("The amount to withdraw from the nonce account, in SOL"),
|
||||
)
|
||||
.arg(nonce_authority_arg()),
|
||||
.arg(nonce_authority_arg())
|
||||
.arg(memo_arg()),
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -363,8 +371,21 @@ pub fn process_authorize_nonce_account(
|
||||
&tx.message,
|
||||
config.commitment,
|
||||
)?;
|
||||
let merge_errors =
|
||||
get_feature_is_active(rpc_client, &merge_nonce_error_into_system_error::id())?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<NonceError>(result, &config)
|
||||
|
||||
if merge_errors {
|
||||
log_instruction_custom_error::<SystemError>(result, config)
|
||||
} else {
|
||||
log_instruction_custom_error_ex::<NonceError, _>(result, config, |ix_error| {
|
||||
if let InstructionError::Custom(_) = ix_error {
|
||||
instruction_to_nonce_error(ix_error, merge_errors)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process_create_nonce_account(
|
||||
@@ -448,8 +469,40 @@ pub fn process_create_nonce_account(
|
||||
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&config.signers, recent_blockhash)?;
|
||||
let merge_errors =
|
||||
get_feature_is_active(rpc_client, &merge_nonce_error_into_system_error::id())?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<SystemError>(result, &config)
|
||||
|
||||
let err_ix_index = if let Err(err) = &result {
|
||||
err.get_transaction_error().and_then(|tx_err| {
|
||||
if let TransactionError::InstructionError(ix_index, _) = tx_err {
|
||||
Some(ix_index)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
match err_ix_index {
|
||||
// SystemInstruction::InitializeNonceAccount failed
|
||||
Some(1) => {
|
||||
if merge_errors {
|
||||
log_instruction_custom_error::<SystemError>(result, config)
|
||||
} else {
|
||||
log_instruction_custom_error_ex::<NonceError, _>(result, config, |ix_error| {
|
||||
if let InstructionError::Custom(_) = ix_error {
|
||||
instruction_to_nonce_error(ix_error, merge_errors)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
// SystemInstruction::CreateAccount{,WithSeed} failed
|
||||
_ => log_instruction_custom_error::<SystemError>(result, config),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process_get_nonce(
|
||||
@@ -502,8 +555,21 @@ pub fn process_new_nonce(
|
||||
&tx.message,
|
||||
config.commitment,
|
||||
)?;
|
||||
let merge_errors =
|
||||
get_feature_is_active(rpc_client, &merge_nonce_error_into_system_error::id())?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<SystemError>(result, &config)
|
||||
|
||||
if merge_errors {
|
||||
log_instruction_custom_error::<SystemError>(result, config)
|
||||
} else {
|
||||
log_instruction_custom_error_ex::<NonceError, _>(result, config, |ix_error| {
|
||||
if let InstructionError::Custom(_) = ix_error {
|
||||
instruction_to_nonce_error(ix_error, merge_errors)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process_show_nonce_account(
|
||||
@@ -565,8 +631,21 @@ pub fn process_withdraw_from_nonce_account(
|
||||
&tx.message,
|
||||
config.commitment,
|
||||
)?;
|
||||
let merge_errors =
|
||||
get_feature_is_active(rpc_client, &merge_nonce_error_into_system_error::id())?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<NonceError>(result, &config)
|
||||
|
||||
if merge_errors {
|
||||
log_instruction_custom_error::<SystemError>(result, config)
|
||||
} else {
|
||||
log_instruction_custom_error_ex::<NonceError, _>(result, config, |ix_error| {
|
||||
if let InstructionError::Custom(_) = ix_error {
|
||||
instruction_to_nonce_error(ix_error, merge_errors)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@@ -150,7 +150,7 @@ impl ProgramSubCommands for App<'_, '_> {
|
||||
pubkey!(Arg::with_name("program_id")
|
||||
.long("program-id")
|
||||
.value_name("PROGRAM_ID"),
|
||||
"Executable program's address, must be a keypair for initial deploys, can be a pubkey for upgrades \
|
||||
"Executable program's address, must be a signer for initial deploys, can be a pubkey for upgrades \
|
||||
[default: address of keypair at /path/to/program-keypair.json if present, otherwise a random address]"),
|
||||
)
|
||||
.arg(
|
||||
@@ -886,11 +886,6 @@ fn process_program_deploy(
|
||||
)?;
|
||||
|
||||
let result = if do_deploy {
|
||||
if program_signer.is_none() {
|
||||
return Err(
|
||||
"Initial deployments require a keypair be provided for the program id".into(),
|
||||
);
|
||||
}
|
||||
do_process_program_write_and_deploy(
|
||||
rpc_client.clone(),
|
||||
config,
|
||||
|
@@ -14,7 +14,7 @@ use solana_clap_utils::{
|
||||
input_parsers::*,
|
||||
input_validators::*,
|
||||
keypair::{DefaultSigner, SignerIndex},
|
||||
memo::MEMO_ARG,
|
||||
memo::{memo_arg, MEMO_ARG},
|
||||
nonce::*,
|
||||
offline::*,
|
||||
ArgConstant,
|
||||
@@ -36,11 +36,6 @@ use solana_sdk::{
|
||||
feature, feature_set,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
stake::{
|
||||
self,
|
||||
instruction::{self as stake_instruction, LockupArgs, StakeError},
|
||||
state::{Authorized, Lockup, Meta, StakeAuthorize, StakeState},
|
||||
},
|
||||
system_instruction::SystemError,
|
||||
sysvar::{
|
||||
clock,
|
||||
@@ -48,6 +43,10 @@ use solana_sdk::{
|
||||
},
|
||||
transaction::Transaction,
|
||||
};
|
||||
use solana_stake_program::{
|
||||
stake_instruction::{self, LockupArgs, StakeError},
|
||||
stake_state::{Authorized, Lockup, Meta, StakeAuthorize, StakeState},
|
||||
};
|
||||
use solana_vote_program::vote_state::VoteState;
|
||||
use std::{ops::Deref, sync::Arc};
|
||||
|
||||
@@ -179,6 +178,7 @@ impl StakeSubCommands for App<'_, '_> {
|
||||
.offline_args()
|
||||
.nonce_args(false)
|
||||
.arg(fee_payer_arg())
|
||||
.arg(memo_arg())
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("delegate-stake")
|
||||
@@ -208,6 +208,7 @@ impl StakeSubCommands for App<'_, '_> {
|
||||
.offline_args()
|
||||
.nonce_args(false)
|
||||
.arg(fee_payer_arg())
|
||||
.arg(memo_arg())
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("stake-authorize")
|
||||
@@ -239,12 +240,7 @@ impl StakeSubCommands for App<'_, '_> {
|
||||
.nonce_args(false)
|
||||
.arg(fee_payer_arg())
|
||||
.arg(custodian_arg())
|
||||
.arg(
|
||||
Arg::with_name("no_wait")
|
||||
.long("no-wait")
|
||||
.takes_value(false)
|
||||
.help("Return signature immediately after submitting the transaction, instead of waiting for confirmations"),
|
||||
)
|
||||
.arg(memo_arg())
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("deactivate-stake")
|
||||
@@ -268,6 +264,7 @@ impl StakeSubCommands for App<'_, '_> {
|
||||
.offline_args()
|
||||
.nonce_args(false)
|
||||
.arg(fee_payer_arg())
|
||||
.arg(memo_arg())
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("split-stake")
|
||||
@@ -309,6 +306,7 @@ impl StakeSubCommands for App<'_, '_> {
|
||||
.offline_args()
|
||||
.nonce_args(false)
|
||||
.arg(fee_payer_arg())
|
||||
.arg(memo_arg())
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("merge-stake")
|
||||
@@ -332,6 +330,7 @@ impl StakeSubCommands for App<'_, '_> {
|
||||
.offline_args()
|
||||
.nonce_args(false)
|
||||
.arg(fee_payer_arg())
|
||||
.arg(memo_arg())
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("withdraw-stake")
|
||||
@@ -372,6 +371,7 @@ impl StakeSubCommands for App<'_, '_> {
|
||||
.nonce_args(false)
|
||||
.arg(fee_payer_arg())
|
||||
.arg(custodian_arg())
|
||||
.arg(memo_arg())
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("stake-set-lockup")
|
||||
@@ -419,6 +419,7 @@ impl StakeSubCommands for App<'_, '_> {
|
||||
.offline_args()
|
||||
.nonce_args(false)
|
||||
.arg(fee_payer_arg())
|
||||
.arg(memo_arg())
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("stake-account")
|
||||
@@ -634,7 +635,6 @@ pub fn parse_stake_authorize(
|
||||
signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?;
|
||||
let (fee_payer, fee_payer_pubkey) = signer_of(matches, FEE_PAYER_ARG.name, wallet_manager)?;
|
||||
let (custodian, custodian_pubkey) = signer_of(matches, "custodian", wallet_manager)?;
|
||||
let no_wait = matches.is_present("no_wait");
|
||||
|
||||
bulk_signers.push(fee_payer);
|
||||
if nonce_account.is_some() {
|
||||
@@ -671,7 +671,6 @@ pub fn parse_stake_authorize(
|
||||
memo,
|
||||
fee_payer: signer_info.index_of(fee_payer_pubkey).unwrap(),
|
||||
custodian: custodian_pubkey.and_then(|_| signer_info.index_of(custodian_pubkey)),
|
||||
no_wait,
|
||||
},
|
||||
signers: signer_info.signers,
|
||||
})
|
||||
@@ -972,7 +971,7 @@ pub fn process_create_stake_account(
|
||||
) -> ProcessResult {
|
||||
let stake_account = config.signers[stake_account];
|
||||
let stake_account_address = if let Some(seed) = seed {
|
||||
Pubkey::create_with_seed(&stake_account.pubkey(), &seed, &stake::program::id())?
|
||||
Pubkey::create_with_seed(&stake_account.pubkey(), &seed, &solana_stake_program::id())?
|
||||
} else {
|
||||
stake_account.pubkey()
|
||||
};
|
||||
@@ -1040,7 +1039,7 @@ pub fn process_create_stake_account(
|
||||
|
||||
if !sign_only {
|
||||
if let Ok(stake_account) = rpc_client.get_account(&stake_account_address) {
|
||||
let err_msg = if stake_account.owner == stake::program::id() {
|
||||
let err_msg = if stake_account.owner == solana_stake_program::id() {
|
||||
format!("Stake account {} already exists", stake_account_address)
|
||||
} else {
|
||||
format!(
|
||||
@@ -1103,7 +1102,6 @@ pub fn process_stake_authorize(
|
||||
nonce_authority: SignerIndex,
|
||||
memo: Option<&String>,
|
||||
fee_payer: SignerIndex,
|
||||
no_wait: bool,
|
||||
) -> ProcessResult {
|
||||
let mut ixs = Vec::new();
|
||||
let custodian = custodian.map(|index| config.signers[index]);
|
||||
@@ -1167,11 +1165,7 @@ pub fn process_stake_authorize(
|
||||
&tx.message,
|
||||
config.commitment,
|
||||
)?;
|
||||
let result = if no_wait {
|
||||
rpc_client.send_transaction(&tx)
|
||||
} else {
|
||||
rpc_client.send_and_confirm_transaction_with_spinner(&tx)
|
||||
};
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<StakeError>(result, &config)
|
||||
}
|
||||
}
|
||||
@@ -1196,7 +1190,7 @@ pub fn process_deactivate_stake_account(
|
||||
let stake_authority = config.signers[stake_authority];
|
||||
|
||||
let stake_account_address = if let Some(seed) = seed {
|
||||
Pubkey::create_with_seed(&stake_account_pubkey, seed, &stake::program::id())?
|
||||
Pubkey::create_with_seed(&stake_account_pubkey, seed, &solana_stake_program::id())?
|
||||
} else {
|
||||
*stake_account_pubkey
|
||||
};
|
||||
@@ -1274,7 +1268,7 @@ pub fn process_withdraw_stake(
|
||||
let custodian = custodian.map(|index| config.signers[index]);
|
||||
|
||||
let stake_account_address = if let Some(seed) = seed {
|
||||
Pubkey::create_with_seed(&stake_account_pubkey, seed, &stake::program::id())?
|
||||
Pubkey::create_with_seed(&stake_account_pubkey, seed, &solana_stake_program::id())?
|
||||
} else {
|
||||
*stake_account_pubkey
|
||||
};
|
||||
@@ -1395,14 +1389,18 @@ pub fn process_split_stake(
|
||||
let stake_authority = config.signers[stake_authority];
|
||||
|
||||
let split_stake_account_address = if let Some(seed) = split_stake_account_seed {
|
||||
Pubkey::create_with_seed(&split_stake_account.pubkey(), &seed, &stake::program::id())?
|
||||
Pubkey::create_with_seed(
|
||||
&split_stake_account.pubkey(),
|
||||
&seed,
|
||||
&solana_stake_program::id(),
|
||||
)?
|
||||
} else {
|
||||
split_stake_account.pubkey()
|
||||
};
|
||||
|
||||
if !sign_only {
|
||||
if let Ok(stake_account) = rpc_client.get_account(&split_stake_account_address) {
|
||||
let err_msg = if stake_account.owner == stake::program::id() {
|
||||
let err_msg = if stake_account.owner == solana_stake_program::id() {
|
||||
format!(
|
||||
"Stake account {} already exists",
|
||||
split_stake_account_address
|
||||
@@ -1537,7 +1535,7 @@ pub fn process_merge_stake(
|
||||
if !sign_only {
|
||||
for stake_account_address in &[stake_account_pubkey, source_stake_account_pubkey] {
|
||||
if let Ok(stake_account) = rpc_client.get_account(stake_account_address) {
|
||||
if stake_account.owner != stake::program::id() {
|
||||
if stake_account.owner != solana_stake_program::id() {
|
||||
return Err(CliError::BadParameter(format!(
|
||||
"Account {} is not a stake account",
|
||||
stake_account_address
|
||||
@@ -1824,6 +1822,7 @@ pub fn make_cli_reward(
|
||||
post_balance: reward.post_balance,
|
||||
percent_change: rate_change * 100.0,
|
||||
apr: Some(apr * 100.0),
|
||||
commission: reward.commission,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
@@ -1873,7 +1872,7 @@ pub fn process_show_stake_account(
|
||||
with_rewards: Option<usize>,
|
||||
) -> ProcessResult {
|
||||
let stake_account = rpc_client.get_account(stake_account_address)?;
|
||||
if stake_account.owner != stake::program::id() {
|
||||
if stake_account.owner != solana_stake_program::id() {
|
||||
return Err(CliError::RpcRequestError(format!(
|
||||
"{:?} is not a stake account",
|
||||
stake_account_address,
|
||||
@@ -2158,7 +2157,6 @@ mod tests {
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
},
|
||||
signers: vec![read_keypair_file(&default_keypair_file).unwrap().into(),],
|
||||
},
|
||||
@@ -2196,7 +2194,6 @@ mod tests {
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
@@ -2238,7 +2235,6 @@ mod tests {
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
@@ -2269,7 +2265,6 @@ mod tests {
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
},
|
||||
signers: vec![read_keypair_file(&default_keypair_file).unwrap().into(),],
|
||||
},
|
||||
@@ -2297,7 +2292,6 @@ mod tests {
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
@@ -2331,7 +2325,6 @@ mod tests {
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
@@ -2366,7 +2359,6 @@ mod tests {
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
},
|
||||
signers: vec![read_keypair_file(&default_keypair_file).unwrap().into(),],
|
||||
},
|
||||
@@ -2398,7 +2390,6 @@ mod tests {
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
@@ -2409,35 +2400,6 @@ mod tests {
|
||||
},
|
||||
);
|
||||
|
||||
// Test Authorize Subcommand w/ no-wait
|
||||
let test_authorize = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"stake-authorize",
|
||||
&stake_account_string,
|
||||
"--new-stake-authority",
|
||||
&stake_account_string,
|
||||
"--no-wait",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_authorize, &default_signer, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorizations: vec![(StakeAuthorize::Staker, stake_account_pubkey, 0)],
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
custodian: None,
|
||||
no_wait: true,
|
||||
},
|
||||
signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()],
|
||||
}
|
||||
);
|
||||
|
||||
// Test Authorize Subcommand w/ sign-only
|
||||
let blockhash = Hash::default();
|
||||
let blockhash_string = format!("{}", blockhash);
|
||||
@@ -2465,7 +2427,6 @@ mod tests {
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
},
|
||||
signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()],
|
||||
}
|
||||
@@ -2505,7 +2466,6 @@ mod tests {
|
||||
memo: None,
|
||||
fee_payer: 1,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
@@ -2555,7 +2515,6 @@ mod tests {
|
||||
memo: None,
|
||||
fee_payer: 1,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
@@ -2591,7 +2550,6 @@ mod tests {
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
},
|
||||
signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()],
|
||||
}
|
||||
@@ -2632,7 +2590,6 @@ mod tests {
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
@@ -2669,7 +2626,6 @@ mod tests {
|
||||
memo: None,
|
||||
fee_payer: 1,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
@@ -2710,7 +2666,6 @@ mod tests {
|
||||
memo: None,
|
||||
fee_payer: 1,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
|
@@ -82,7 +82,7 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.takes_value(true)
|
||||
.help("Seed for address generation; if specified, the resulting account will be at a derived address of the VOTE ACCOUNT pubkey")
|
||||
)
|
||||
.arg(memo_arg())
|
||||
.arg(memo_arg())
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("vote-authorize-voter")
|
||||
@@ -109,7 +109,7 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.required(true),
|
||||
"New authorized vote signer. "),
|
||||
)
|
||||
.arg(memo_arg())
|
||||
.arg(memo_arg())
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("vote-authorize-withdrawer")
|
||||
@@ -136,7 +136,7 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.required(true),
|
||||
"New authorized withdrawer. "),
|
||||
)
|
||||
.arg(memo_arg())
|
||||
.arg(memo_arg())
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("vote-update-validator")
|
||||
@@ -166,7 +166,7 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.validator(is_valid_signer)
|
||||
.help("Authorized withdrawer keypair"),
|
||||
)
|
||||
.arg(memo_arg())
|
||||
.arg(memo_arg())
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("vote-update-commission")
|
||||
@@ -196,7 +196,7 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.validator(is_valid_signer)
|
||||
.help("Authorized withdrawer keypair"),
|
||||
)
|
||||
.arg(memo_arg())
|
||||
.arg(memo_arg())
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("vote-account")
|
||||
@@ -266,7 +266,7 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.validator(is_valid_signer)
|
||||
.help("Authorized withdrawer [default: cli config keypair]"),
|
||||
)
|
||||
.arg(memo_arg())
|
||||
.arg(memo_arg())
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@@ -72,7 +72,7 @@ fn test_cli_program_deploy_non_upgradeable() {
|
||||
let account0 = rpc_client.get_account(&program_id).unwrap();
|
||||
assert_eq!(account0.lamports, minimum_balance_for_rent_exemption);
|
||||
assert_eq!(account0.owner, bpf_loader::id());
|
||||
assert!(account0.executable);
|
||||
assert_eq!(account0.executable, true);
|
||||
let mut file = File::open(pathbuf.to_str().unwrap().to_string()).unwrap();
|
||||
let mut elf = Vec::new();
|
||||
file.read_to_end(&mut elf).unwrap();
|
||||
@@ -93,7 +93,7 @@ fn test_cli_program_deploy_non_upgradeable() {
|
||||
.unwrap();
|
||||
assert_eq!(account1.lamports, minimum_balance_for_rent_exemption);
|
||||
assert_eq!(account1.owner, bpf_loader::id());
|
||||
assert!(account1.executable);
|
||||
assert_eq!(account1.executable, true);
|
||||
assert_eq!(account1.data, account0.data);
|
||||
|
||||
// Attempt to redeploy to the same address
|
||||
@@ -129,7 +129,7 @@ fn test_cli_program_deploy_non_upgradeable() {
|
||||
.unwrap();
|
||||
assert_eq!(account2.lamports, 2 * minimum_balance_for_rent_exemption);
|
||||
assert_eq!(account2.owner, bpf_loader::id());
|
||||
assert!(account2.executable);
|
||||
assert_eq!(account2.executable, true);
|
||||
assert_eq!(account2.data, account0.data);
|
||||
}
|
||||
|
||||
@@ -289,7 +289,7 @@ fn test_cli_program_deploy_with_authority() {
|
||||
let program_account = rpc_client.get_account(&program_keypair.pubkey()).unwrap();
|
||||
assert_eq!(program_account.lamports, minimum_balance_for_program);
|
||||
assert_eq!(program_account.owner, bpf_loader_upgradeable::id());
|
||||
assert!(program_account.executable);
|
||||
assert_eq!(program_account.executable, true);
|
||||
let (programdata_pubkey, _) = Pubkey::find_program_address(
|
||||
&[program_keypair.pubkey().as_ref()],
|
||||
&bpf_loader_upgradeable::id(),
|
||||
@@ -300,7 +300,7 @@ fn test_cli_program_deploy_with_authority() {
|
||||
minimum_balance_for_programdata
|
||||
);
|
||||
assert_eq!(programdata_account.owner, bpf_loader_upgradeable::id());
|
||||
assert!(!programdata_account.executable);
|
||||
assert_eq!(programdata_account.executable, false);
|
||||
assert_eq!(
|
||||
programdata_account.data[UpgradeableLoaderState::programdata_data_offset().unwrap()..],
|
||||
program_data[..]
|
||||
@@ -332,7 +332,7 @@ fn test_cli_program_deploy_with_authority() {
|
||||
let program_account = rpc_client.get_account(&program_pubkey).unwrap();
|
||||
assert_eq!(program_account.lamports, minimum_balance_for_program);
|
||||
assert_eq!(program_account.owner, bpf_loader_upgradeable::id());
|
||||
assert!(program_account.executable);
|
||||
assert_eq!(program_account.executable, true);
|
||||
let (programdata_pubkey, _) =
|
||||
Pubkey::find_program_address(&[program_pubkey.as_ref()], &bpf_loader_upgradeable::id());
|
||||
let programdata_account = rpc_client.get_account(&programdata_pubkey).unwrap();
|
||||
@@ -341,7 +341,7 @@ fn test_cli_program_deploy_with_authority() {
|
||||
minimum_balance_for_programdata
|
||||
);
|
||||
assert_eq!(programdata_account.owner, bpf_loader_upgradeable::id());
|
||||
assert!(!programdata_account.executable);
|
||||
assert_eq!(programdata_account.executable, false);
|
||||
assert_eq!(
|
||||
programdata_account.data[UpgradeableLoaderState::programdata_data_offset().unwrap()..],
|
||||
program_data[..]
|
||||
@@ -364,7 +364,7 @@ fn test_cli_program_deploy_with_authority() {
|
||||
let program_account = rpc_client.get_account(&program_pubkey).unwrap();
|
||||
assert_eq!(program_account.lamports, minimum_balance_for_program);
|
||||
assert_eq!(program_account.owner, bpf_loader_upgradeable::id());
|
||||
assert!(program_account.executable);
|
||||
assert_eq!(program_account.executable, true);
|
||||
let (programdata_pubkey, _) =
|
||||
Pubkey::find_program_address(&[program_pubkey.as_ref()], &bpf_loader_upgradeable::id());
|
||||
let programdata_account = rpc_client.get_account(&programdata_pubkey).unwrap();
|
||||
@@ -373,7 +373,7 @@ fn test_cli_program_deploy_with_authority() {
|
||||
minimum_balance_for_programdata
|
||||
);
|
||||
assert_eq!(programdata_account.owner, bpf_loader_upgradeable::id());
|
||||
assert!(!programdata_account.executable);
|
||||
assert_eq!(programdata_account.executable, false);
|
||||
assert_eq!(
|
||||
programdata_account.data[UpgradeableLoaderState::programdata_data_offset().unwrap()..],
|
||||
program_data[..]
|
||||
@@ -418,7 +418,7 @@ fn test_cli_program_deploy_with_authority() {
|
||||
let program_account = rpc_client.get_account(&program_pubkey).unwrap();
|
||||
assert_eq!(program_account.lamports, minimum_balance_for_program);
|
||||
assert_eq!(program_account.owner, bpf_loader_upgradeable::id());
|
||||
assert!(program_account.executable);
|
||||
assert_eq!(program_account.executable, true);
|
||||
let (programdata_pubkey, _) =
|
||||
Pubkey::find_program_address(&[program_pubkey.as_ref()], &bpf_loader_upgradeable::id());
|
||||
let programdata_account = rpc_client.get_account(&programdata_pubkey).unwrap();
|
||||
@@ -427,7 +427,7 @@ fn test_cli_program_deploy_with_authority() {
|
||||
minimum_balance_for_programdata
|
||||
);
|
||||
assert_eq!(programdata_account.owner, bpf_loader_upgradeable::id());
|
||||
assert!(!programdata_account.executable);
|
||||
assert_eq!(programdata_account.executable, false);
|
||||
assert_eq!(
|
||||
programdata_account.data[UpgradeableLoaderState::programdata_data_offset().unwrap()..],
|
||||
program_data[..]
|
||||
|
@@ -17,11 +17,10 @@ use solana_sdk::{
|
||||
nonce::State as NonceState,
|
||||
pubkey::Pubkey,
|
||||
signature::{keypair_from_seed, Keypair, Signer},
|
||||
stake::{
|
||||
self,
|
||||
instruction::LockupArgs,
|
||||
state::{Lockup, StakeAuthorize, StakeState},
|
||||
},
|
||||
};
|
||||
use solana_stake_program::{
|
||||
stake_instruction::LockupArgs,
|
||||
stake_state::{Lockup, StakeAuthorize, StakeState},
|
||||
};
|
||||
|
||||
#[test]
|
||||
@@ -140,7 +139,7 @@ fn test_seed_stake_delegation_and_deactivation() {
|
||||
let stake_address = Pubkey::create_with_seed(
|
||||
&config_validator.signers[0].pubkey(),
|
||||
"hi there",
|
||||
&stake::program::id(),
|
||||
&solana_stake_program::id(),
|
||||
)
|
||||
.expect("bad seed");
|
||||
|
||||
@@ -609,7 +608,6 @@ fn test_stake_authorize() {
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap();
|
||||
@@ -640,7 +638,6 @@ fn test_stake_authorize() {
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap();
|
||||
@@ -666,7 +663,6 @@ fn test_stake_authorize() {
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap();
|
||||
@@ -692,7 +688,6 @@ fn test_stake_authorize() {
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
};
|
||||
config_offline.output_format = OutputFormat::JsonCompact;
|
||||
let sign_reply = process_command(&config_offline).unwrap();
|
||||
@@ -711,7 +706,6 @@ fn test_stake_authorize() {
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap();
|
||||
@@ -762,7 +756,6 @@ fn test_stake_authorize() {
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
};
|
||||
let sign_reply = process_command(&config_offline).unwrap();
|
||||
let sign_only = parse_sign_only_reply_string(&sign_reply);
|
||||
@@ -785,7 +778,6 @@ fn test_stake_authorize() {
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap();
|
||||
@@ -889,7 +881,6 @@ fn test_stake_authorize_with_fee_payer() {
|
||||
memo: None,
|
||||
fee_payer: 1,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
// `config` balance has not changed, despite submitting the TX
|
||||
@@ -911,7 +902,6 @@ fn test_stake_authorize_with_fee_payer() {
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
};
|
||||
config_offline.output_format = OutputFormat::JsonCompact;
|
||||
let sign_reply = process_command(&config_offline).unwrap();
|
||||
@@ -930,7 +920,6 @@ fn test_stake_authorize_with_fee_payer() {
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
// `config`'s balance again has not changed
|
||||
@@ -1558,6 +1547,6 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let seed_address =
|
||||
Pubkey::create_with_seed(&stake_pubkey, seed, &stake::program::id()).unwrap();
|
||||
Pubkey::create_with_seed(&stake_pubkey, seed, &solana_stake_program::id()).unwrap();
|
||||
check_recent_balance(50_000, &rpc_client, &seed_address);
|
||||
}
|
||||
|
@@ -16,7 +16,6 @@ use solana_sdk::{
|
||||
nonce::State as NonceState,
|
||||
pubkey::Pubkey,
|
||||
signature::{keypair_from_seed, Keypair, NullSigner, Signer},
|
||||
stake,
|
||||
};
|
||||
|
||||
#[test]
|
||||
@@ -514,7 +513,7 @@ fn test_transfer_with_seed() {
|
||||
let sender_pubkey = config.signers[0].pubkey();
|
||||
let recipient_pubkey = Pubkey::new(&[1u8; 32]);
|
||||
let derived_address_seed = "seed".to_string();
|
||||
let derived_address_program_id = stake::program::id();
|
||||
let derived_address_program_id = solana_stake_program::id();
|
||||
let derived_address = Pubkey::create_with_seed(
|
||||
&sender_pubkey,
|
||||
&derived_address_seed,
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "1.7.2"
|
||||
version = "1.6.18"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -24,14 +24,14 @@ semver = "0.11.0"
|
||||
serde = "1.0.122"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.2" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.2" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.2" }
|
||||
solana-version = { path = "../version", version = "=1.7.2" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.2" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.6.18" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.6.18" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.6.18" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.6.18" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.6.18" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.6.18" }
|
||||
solana-version = { path = "../version", version = "=1.6.18" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.6.18" }
|
||||
thiserror = "1.0"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tungstenite = "0.10.1"
|
||||
@@ -40,7 +40,7 @@ url = "2.1.1"
|
||||
[dev-dependencies]
|
||||
assert_matches = "1.3.0"
|
||||
jsonrpc-http-server = "17.0.0"
|
||||
solana-logger = { path = "../logger", version = "=1.7.2" }
|
||||
solana-logger = { path = "../logger", version = "=1.6.18" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,5 +1,5 @@
|
||||
use {
|
||||
crate::rpc_request,
|
||||
crate::{rpc_request, rpc_response},
|
||||
solana_faucet::faucet::FaucetError,
|
||||
solana_sdk::{
|
||||
signature::SignerError, transaction::TransactionError, transport::TransportError,
|
||||
@@ -30,6 +30,24 @@ pub enum ClientErrorKind {
|
||||
Custom(String),
|
||||
}
|
||||
|
||||
impl ClientErrorKind {
|
||||
pub fn get_transaction_error(&self) -> Option<TransactionError> {
|
||||
match self {
|
||||
Self::RpcError(rpc_request::RpcError::RpcResponseError {
|
||||
data:
|
||||
rpc_request::RpcResponseErrorData::SendTransactionPreflightFailure(
|
||||
rpc_response::RpcSimulateTransactionResult {
|
||||
err: Some(tx_err), ..
|
||||
},
|
||||
),
|
||||
..
|
||||
}) => Some(tx_err.clone()),
|
||||
Self::TransactionError(tx_err) => Some(tx_err.clone()),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<TransportError> for ClientErrorKind {
|
||||
fn from(err: TransportError) -> Self {
|
||||
match err {
|
||||
@@ -86,6 +104,10 @@ impl ClientError {
|
||||
pub fn kind(&self) -> &ClientErrorKind {
|
||||
&self.kind
|
||||
}
|
||||
|
||||
pub fn get_transaction_error(&self) -> Option<TransactionError> {
|
||||
self.kind.get_transaction_error()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ClientErrorKind> for ClientError {
|
||||
|
9
client/src/fees.rs
Normal file
9
client/src/fees.rs
Normal file
@@ -0,0 +1,9 @@
|
||||
use crate::{fee_calculator::FeeCalculator, hash::Hash};
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Fees {
|
||||
pub blockhash: Hash,
|
||||
pub fee_calculator: FeeCalculator,
|
||||
pub last_valid_block_height: u64,
|
||||
}
|
@@ -13,7 +13,6 @@ pub mod rpc_cache;
|
||||
pub mod rpc_client;
|
||||
pub mod rpc_config;
|
||||
pub mod rpc_custom_error;
|
||||
pub mod rpc_deprecated_config;
|
||||
pub mod rpc_filter;
|
||||
pub mod rpc_request;
|
||||
pub mod rpc_response;
|
||||
|
@@ -1,8 +1,3 @@
|
||||
#[allow(deprecated)]
|
||||
use crate::rpc_deprecated_config::{
|
||||
RpcConfirmedBlockConfig, RpcConfirmedTransactionConfig,
|
||||
RpcGetConfirmedSignaturesForAddress2Config,
|
||||
};
|
||||
use {
|
||||
crate::{
|
||||
client_error::{ClientError, ClientErrorKind, Result as ClientResult},
|
||||
@@ -49,41 +44,36 @@ use {
|
||||
},
|
||||
};
|
||||
|
||||
pub struct RpcClient {
|
||||
sender: Box<dyn RpcSender + Send + Sync + 'static>,
|
||||
#[derive(Default)]
|
||||
pub struct RpcClientConfig {
|
||||
commitment_config: CommitmentConfig,
|
||||
node_version: RwLock<Option<semver::Version>>,
|
||||
confirm_transaction_initial_timeout: Option<Duration>,
|
||||
}
|
||||
|
||||
fn serialize_encode_transaction(
|
||||
transaction: &Transaction,
|
||||
encoding: UiTransactionEncoding,
|
||||
) -> ClientResult<String> {
|
||||
let serialized = serialize(transaction)
|
||||
.map_err(|e| ClientErrorKind::Custom(format!("transaction serialization failed: {}", e)))?;
|
||||
let encoded = match encoding {
|
||||
UiTransactionEncoding::Base58 => bs58::encode(serialized).into_string(),
|
||||
UiTransactionEncoding::Base64 => base64::encode(serialized),
|
||||
_ => {
|
||||
return Err(ClientErrorKind::Custom(format!(
|
||||
"unsupported transaction encoding: {}. Supported encodings: base58, base64",
|
||||
encoding
|
||||
))
|
||||
.into())
|
||||
impl RpcClientConfig {
|
||||
fn with_commitment(commitment_config: CommitmentConfig) -> Self {
|
||||
RpcClientConfig {
|
||||
commitment_config,
|
||||
..Self::default()
|
||||
}
|
||||
};
|
||||
Ok(encoded)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RpcClient {
|
||||
sender: Box<dyn RpcSender + Send + Sync + 'static>,
|
||||
config: RpcClientConfig,
|
||||
node_version: RwLock<Option<semver::Version>>,
|
||||
}
|
||||
|
||||
impl RpcClient {
|
||||
fn new_sender<T: RpcSender + Send + Sync + 'static>(
|
||||
sender: T,
|
||||
commitment_config: CommitmentConfig,
|
||||
config: RpcClientConfig,
|
||||
) -> Self {
|
||||
Self {
|
||||
sender: Box::new(sender),
|
||||
node_version: RwLock::new(None),
|
||||
commitment_config,
|
||||
config,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,13 +82,16 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn new_with_commitment(url: String, commitment_config: CommitmentConfig) -> Self {
|
||||
Self::new_sender(HttpSender::new(url), commitment_config)
|
||||
Self::new_sender(
|
||||
HttpSender::new(url),
|
||||
RpcClientConfig::with_commitment(commitment_config),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn new_with_timeout(url: String, timeout: Duration) -> Self {
|
||||
Self::new_sender(
|
||||
HttpSender::new_with_timeout(url, timeout),
|
||||
CommitmentConfig::default(),
|
||||
RpcClientConfig::with_commitment(CommitmentConfig::default()),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -109,18 +102,36 @@ impl RpcClient {
|
||||
) -> Self {
|
||||
Self::new_sender(
|
||||
HttpSender::new_with_timeout(url, timeout),
|
||||
commitment_config,
|
||||
RpcClientConfig::with_commitment(commitment_config),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn new_with_timeouts_and_commitment(
|
||||
url: String,
|
||||
timeout: Duration,
|
||||
commitment_config: CommitmentConfig,
|
||||
confirm_transaction_initial_timeout: Duration,
|
||||
) -> Self {
|
||||
Self::new_sender(
|
||||
HttpSender::new_with_timeout(url, timeout),
|
||||
RpcClientConfig {
|
||||
commitment_config,
|
||||
confirm_transaction_initial_timeout: Some(confirm_transaction_initial_timeout),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub fn new_mock(url: String) -> Self {
|
||||
Self::new_sender(MockSender::new(url), CommitmentConfig::default())
|
||||
Self::new_sender(
|
||||
MockSender::new(url),
|
||||
RpcClientConfig::with_commitment(CommitmentConfig::default()),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn new_mock_with_mocks(url: String, mocks: Mocks) -> Self {
|
||||
Self::new_sender(
|
||||
MockSender::new_with_mocks(url, mocks),
|
||||
CommitmentConfig::default(),
|
||||
RpcClientConfig::with_commitment(CommitmentConfig::default()),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -159,7 +170,7 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn commitment(&self) -> CommitmentConfig {
|
||||
self.commitment_config
|
||||
self.config.commitment_config
|
||||
}
|
||||
|
||||
fn use_deprecated_commitment(&self) -> Result<bool, RpcError> {
|
||||
@@ -182,26 +193,9 @@ impl RpcClient {
|
||||
Ok(requested_commitment)
|
||||
}
|
||||
|
||||
#[allow(deprecated)]
|
||||
fn maybe_map_request(&self, mut request: RpcRequest) -> Result<RpcRequest, RpcError> {
|
||||
if self.get_node_version()? < semver::Version::new(1, 7, 0) {
|
||||
request = match request {
|
||||
RpcRequest::GetBlock => RpcRequest::GetConfirmedBlock,
|
||||
RpcRequest::GetBlocks => RpcRequest::GetConfirmedBlocks,
|
||||
RpcRequest::GetBlocksWithLimit => RpcRequest::GetConfirmedBlocksWithLimit,
|
||||
RpcRequest::GetSignaturesForAddress => {
|
||||
RpcRequest::GetConfirmedSignaturesForAddress2
|
||||
}
|
||||
RpcRequest::GetTransaction => RpcRequest::GetConfirmedTransaction,
|
||||
_ => request,
|
||||
};
|
||||
}
|
||||
Ok(request)
|
||||
}
|
||||
|
||||
pub fn confirm_transaction(&self, signature: &Signature) -> ClientResult<bool> {
|
||||
Ok(self
|
||||
.confirm_transaction_with_commitment(signature, self.commitment_config)?
|
||||
.confirm_transaction_with_commitment(signature, self.commitment())?
|
||||
.value)
|
||||
}
|
||||
|
||||
@@ -227,8 +221,7 @@ impl RpcClient {
|
||||
transaction,
|
||||
RpcSendTransactionConfig {
|
||||
preflight_commitment: Some(
|
||||
self.maybe_map_commitment(self.commitment_config)?
|
||||
.commitment,
|
||||
self.maybe_map_commitment(self.commitment())?.commitment,
|
||||
),
|
||||
..RpcSendTransactionConfig::default()
|
||||
},
|
||||
@@ -317,7 +310,7 @@ impl RpcClient {
|
||||
self.simulate_transaction_with_config(
|
||||
transaction,
|
||||
RpcSimulateTransactionConfig {
|
||||
commitment: Some(self.commitment_config),
|
||||
commitment: Some(self.commitment()),
|
||||
..RpcSimulateTransactionConfig::default()
|
||||
},
|
||||
)
|
||||
@@ -355,7 +348,7 @@ impl RpcClient {
|
||||
&self,
|
||||
signature: &Signature,
|
||||
) -> ClientResult<Option<transaction::Result<()>>> {
|
||||
self.get_signature_status_with_commitment(signature, self.commitment_config)
|
||||
self.get_signature_status_with_commitment(signature, self.commitment())
|
||||
}
|
||||
|
||||
pub fn get_signature_statuses(
|
||||
@@ -413,7 +406,7 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn get_slot(&self) -> ClientResult<Slot> {
|
||||
self.get_slot_with_commitment(self.commitment_config)
|
||||
self.get_slot_with_commitment(self.commitment())
|
||||
}
|
||||
|
||||
pub fn get_slot_with_commitment(
|
||||
@@ -427,7 +420,7 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn get_block_height(&self) -> ClientResult<u64> {
|
||||
self.get_block_height_with_commitment(self.commitment_config)
|
||||
self.get_block_height_with_commitment(self.commitment())
|
||||
}
|
||||
|
||||
pub fn get_block_height_with_commitment(
|
||||
@@ -481,14 +474,14 @@ impl RpcClient {
|
||||
stake_account.to_string(),
|
||||
RpcEpochConfig {
|
||||
epoch,
|
||||
commitment: Some(self.commitment_config),
|
||||
commitment: Some(self.commitment()),
|
||||
}
|
||||
]),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn supply(&self) -> RpcResult<RpcSupply> {
|
||||
self.supply_with_commitment(self.commitment_config)
|
||||
self.supply_with_commitment(self.commitment())
|
||||
}
|
||||
|
||||
pub fn supply_with_commitment(
|
||||
@@ -501,6 +494,27 @@ impl RpcClient {
|
||||
)
|
||||
}
|
||||
|
||||
#[deprecated(since = "1.5.19", note = "Please use RpcClient::supply() instead")]
|
||||
#[allow(deprecated)]
|
||||
pub fn total_supply(&self) -> ClientResult<u64> {
|
||||
self.total_supply_with_commitment(self.commitment())
|
||||
}
|
||||
|
||||
#[deprecated(
|
||||
since = "1.5.19",
|
||||
note = "Please use RpcClient::supply_with_commitment() instead"
|
||||
)]
|
||||
#[allow(deprecated)]
|
||||
pub fn total_supply_with_commitment(
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> ClientResult<u64> {
|
||||
self.send(
|
||||
RpcRequest::GetTotalSupply,
|
||||
json!([self.maybe_map_commitment(commitment_config)?]),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn get_largest_accounts_with_config(
|
||||
&self,
|
||||
config: RpcLargestAccountsConfig,
|
||||
@@ -515,7 +529,7 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn get_vote_accounts(&self) -> ClientResult<RpcVoteAccountStatus> {
|
||||
self.get_vote_accounts_with_commitment(self.commitment_config)
|
||||
self.get_vote_accounts_with_commitment(self.commitment())
|
||||
}
|
||||
|
||||
pub fn get_vote_accounts_with_commitment(
|
||||
@@ -571,43 +585,10 @@ impl RpcClient {
|
||||
self.send(RpcRequest::GetClusterNodes, Value::Null)
|
||||
}
|
||||
|
||||
pub fn get_block(&self, slot: Slot) -> ClientResult<EncodedConfirmedBlock> {
|
||||
self.get_block_with_encoding(slot, UiTransactionEncoding::Json)
|
||||
}
|
||||
|
||||
pub fn get_block_with_encoding(
|
||||
&self,
|
||||
slot: Slot,
|
||||
encoding: UiTransactionEncoding,
|
||||
) -> ClientResult<EncodedConfirmedBlock> {
|
||||
self.send(
|
||||
self.maybe_map_request(RpcRequest::GetBlock)?,
|
||||
json!([slot, encoding]),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn get_block_with_config(
|
||||
&self,
|
||||
slot: Slot,
|
||||
config: RpcBlockConfig,
|
||||
) -> ClientResult<UiConfirmedBlock> {
|
||||
self.send(
|
||||
self.maybe_map_request(RpcRequest::GetBlock)?,
|
||||
json!([slot, config]),
|
||||
)
|
||||
}
|
||||
|
||||
#[deprecated(since = "1.7.0", note = "Please use RpcClient::get_block() instead")]
|
||||
#[allow(deprecated)]
|
||||
pub fn get_confirmed_block(&self, slot: Slot) -> ClientResult<EncodedConfirmedBlock> {
|
||||
self.get_confirmed_block_with_encoding(slot, UiTransactionEncoding::Json)
|
||||
}
|
||||
|
||||
#[deprecated(
|
||||
since = "1.7.0",
|
||||
note = "Please use RpcClient::get_block_with_encoding() instead"
|
||||
)]
|
||||
#[allow(deprecated)]
|
||||
pub fn get_confirmed_block_with_encoding(
|
||||
&self,
|
||||
slot: Slot,
|
||||
@@ -616,11 +597,6 @@ impl RpcClient {
|
||||
self.send(RpcRequest::GetConfirmedBlock, json!([slot, encoding]))
|
||||
}
|
||||
|
||||
#[deprecated(
|
||||
since = "1.7.0",
|
||||
note = "Please use RpcClient::get_block_with_config() instead"
|
||||
)]
|
||||
#[allow(deprecated)]
|
||||
pub fn get_confirmed_block_with_config(
|
||||
&self,
|
||||
slot: Slot,
|
||||
@@ -629,56 +605,6 @@ impl RpcClient {
|
||||
self.send(RpcRequest::GetConfirmedBlock, json!([slot, config]))
|
||||
}
|
||||
|
||||
pub fn get_blocks(&self, start_slot: Slot, end_slot: Option<Slot>) -> ClientResult<Vec<Slot>> {
|
||||
self.send(
|
||||
self.maybe_map_request(RpcRequest::GetBlocks)?,
|
||||
json!([start_slot, end_slot]),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn get_blocks_with_commitment(
|
||||
&self,
|
||||
start_slot: Slot,
|
||||
end_slot: Option<Slot>,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> ClientResult<Vec<Slot>> {
|
||||
let json = if end_slot.is_some() {
|
||||
json!([
|
||||
start_slot,
|
||||
end_slot,
|
||||
self.maybe_map_commitment(commitment_config)?
|
||||
])
|
||||
} else {
|
||||
json!([start_slot, self.maybe_map_commitment(commitment_config)?])
|
||||
};
|
||||
self.send(self.maybe_map_request(RpcRequest::GetBlocks)?, json)
|
||||
}
|
||||
|
||||
pub fn get_blocks_with_limit(&self, start_slot: Slot, limit: usize) -> ClientResult<Vec<Slot>> {
|
||||
self.send(
|
||||
self.maybe_map_request(RpcRequest::GetBlocksWithLimit)?,
|
||||
json!([start_slot, limit]),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn get_blocks_with_limit_and_commitment(
|
||||
&self,
|
||||
start_slot: Slot,
|
||||
limit: usize,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> ClientResult<Vec<Slot>> {
|
||||
self.send(
|
||||
self.maybe_map_request(RpcRequest::GetBlocksWithLimit)?,
|
||||
json!([
|
||||
start_slot,
|
||||
limit,
|
||||
self.maybe_map_commitment(commitment_config)?
|
||||
]),
|
||||
)
|
||||
}
|
||||
|
||||
#[deprecated(since = "1.7.0", note = "Please use RpcClient::get_blocks() instead")]
|
||||
#[allow(deprecated)]
|
||||
pub fn get_confirmed_blocks(
|
||||
&self,
|
||||
start_slot: Slot,
|
||||
@@ -690,11 +616,6 @@ impl RpcClient {
|
||||
)
|
||||
}
|
||||
|
||||
#[deprecated(
|
||||
since = "1.7.0",
|
||||
note = "Please use RpcClient::get_blocks_with_commitment() instead"
|
||||
)]
|
||||
#[allow(deprecated)]
|
||||
pub fn get_confirmed_blocks_with_commitment(
|
||||
&self,
|
||||
start_slot: Slot,
|
||||
@@ -713,11 +634,6 @@ impl RpcClient {
|
||||
self.send(RpcRequest::GetConfirmedBlocks, json)
|
||||
}
|
||||
|
||||
#[deprecated(
|
||||
since = "1.7.0",
|
||||
note = "Please use RpcClient::get_blocks_with_limit() instead"
|
||||
)]
|
||||
#[allow(deprecated)]
|
||||
pub fn get_confirmed_blocks_with_limit(
|
||||
&self,
|
||||
start_slot: Slot,
|
||||
@@ -729,11 +645,6 @@ impl RpcClient {
|
||||
)
|
||||
}
|
||||
|
||||
#[deprecated(
|
||||
since = "1.7.0",
|
||||
note = "Please use RpcClient::get_blocks_with_limit_and_commitment() instead"
|
||||
)]
|
||||
#[allow(deprecated)]
|
||||
pub fn get_confirmed_blocks_with_limit_and_commitment(
|
||||
&self,
|
||||
start_slot: Slot,
|
||||
@@ -750,41 +661,33 @@ impl RpcClient {
|
||||
)
|
||||
}
|
||||
|
||||
pub fn get_signatures_for_address(
|
||||
&self,
|
||||
address: &Pubkey,
|
||||
) -> ClientResult<Vec<RpcConfirmedTransactionStatusWithSignature>> {
|
||||
self.get_signatures_for_address_with_config(
|
||||
address,
|
||||
GetConfirmedSignaturesForAddress2Config::default(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn get_signatures_for_address_with_config(
|
||||
&self,
|
||||
address: &Pubkey,
|
||||
config: GetConfirmedSignaturesForAddress2Config,
|
||||
) -> ClientResult<Vec<RpcConfirmedTransactionStatusWithSignature>> {
|
||||
let config = RpcSignaturesForAddressConfig {
|
||||
before: config.before.map(|signature| signature.to_string()),
|
||||
until: config.until.map(|signature| signature.to_string()),
|
||||
limit: config.limit,
|
||||
commitment: config.commitment,
|
||||
};
|
||||
|
||||
let result: Vec<RpcConfirmedTransactionStatusWithSignature> = self.send(
|
||||
self.maybe_map_request(RpcRequest::GetSignaturesForAddress)?,
|
||||
json!([address.to_string(), config]),
|
||||
)?;
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
#[deprecated(
|
||||
since = "1.7.0",
|
||||
note = "Please use RpcClient::get_signatures_for_address() instead"
|
||||
since = "1.5.19",
|
||||
note = "Please use RpcClient::get_confirmed_signatures_for_address2() instead"
|
||||
)]
|
||||
#[allow(deprecated)]
|
||||
pub fn get_confirmed_signatures_for_address(
|
||||
&self,
|
||||
address: &Pubkey,
|
||||
start_slot: Slot,
|
||||
end_slot: Slot,
|
||||
) -> ClientResult<Vec<Signature>> {
|
||||
let signatures_base58_str: Vec<String> = self.send(
|
||||
RpcRequest::GetConfirmedSignaturesForAddress,
|
||||
json!([address.to_string(), start_slot, end_slot]),
|
||||
)?;
|
||||
|
||||
let mut signatures = vec![];
|
||||
for signature_base58_str in signatures_base58_str {
|
||||
signatures.push(
|
||||
signature_base58_str.parse::<Signature>().map_err(|err| {
|
||||
Into::<ClientError>::into(RpcError::ParseError(err.to_string()))
|
||||
})?,
|
||||
);
|
||||
}
|
||||
Ok(signatures)
|
||||
}
|
||||
|
||||
pub fn get_confirmed_signatures_for_address2(
|
||||
&self,
|
||||
address: &Pubkey,
|
||||
@@ -795,11 +698,6 @@ impl RpcClient {
|
||||
)
|
||||
}
|
||||
|
||||
#[deprecated(
|
||||
since = "1.7.0",
|
||||
note = "Please use RpcClient::get_signatures_for_address_with_config() instead"
|
||||
)]
|
||||
#[allow(deprecated)]
|
||||
pub fn get_confirmed_signatures_for_address2_with_config(
|
||||
&self,
|
||||
address: &Pubkey,
|
||||
@@ -820,33 +718,6 @@ impl RpcClient {
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn get_transaction(
|
||||
&self,
|
||||
signature: &Signature,
|
||||
encoding: UiTransactionEncoding,
|
||||
) -> ClientResult<EncodedConfirmedTransaction> {
|
||||
self.send(
|
||||
self.maybe_map_request(RpcRequest::GetTransaction)?,
|
||||
json!([signature.to_string(), encoding]),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn get_transaction_with_config(
|
||||
&self,
|
||||
signature: &Signature,
|
||||
config: RpcTransactionConfig,
|
||||
) -> ClientResult<EncodedConfirmedTransaction> {
|
||||
self.send(
|
||||
self.maybe_map_request(RpcRequest::GetTransaction)?,
|
||||
json!([signature.to_string(), config]),
|
||||
)
|
||||
}
|
||||
|
||||
#[deprecated(
|
||||
since = "1.7.0",
|
||||
note = "Please use RpcClient::get_transaction() instead"
|
||||
)]
|
||||
#[allow(deprecated)]
|
||||
pub fn get_confirmed_transaction(
|
||||
&self,
|
||||
signature: &Signature,
|
||||
@@ -858,11 +729,6 @@ impl RpcClient {
|
||||
)
|
||||
}
|
||||
|
||||
#[deprecated(
|
||||
since = "1.7.0",
|
||||
note = "Please use RpcClient::get_transaction_with_config() instead"
|
||||
)]
|
||||
#[allow(deprecated)]
|
||||
pub fn get_confirmed_transaction_with_config(
|
||||
&self,
|
||||
signature: &Signature,
|
||||
@@ -892,7 +758,7 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn get_epoch_info(&self) -> ClientResult<EpochInfo> {
|
||||
self.get_epoch_info_with_commitment(self.commitment_config)
|
||||
self.get_epoch_info_with_commitment(self.commitment())
|
||||
}
|
||||
|
||||
pub fn get_epoch_info_with_commitment(
|
||||
@@ -909,7 +775,7 @@ impl RpcClient {
|
||||
&self,
|
||||
slot: Option<Slot>,
|
||||
) -> ClientResult<Option<RpcLeaderSchedule>> {
|
||||
self.get_leader_schedule_with_commitment(slot, self.commitment_config)
|
||||
self.get_leader_schedule_with_commitment(slot, self.commitment())
|
||||
}
|
||||
|
||||
pub fn get_leader_schedule_with_commitment(
|
||||
@@ -979,7 +845,7 @@ impl RpcClient {
|
||||
addresses,
|
||||
RpcEpochConfig {
|
||||
epoch,
|
||||
commitment: Some(self.commitment_config),
|
||||
commitment: Some(self.commitment()),
|
||||
}
|
||||
]),
|
||||
)
|
||||
@@ -1045,7 +911,7 @@ impl RpcClient {
|
||||
/// Note that `get_account` returns `Err(..)` if the account does not exist whereas
|
||||
/// `get_account_with_commitment` returns `Ok(None)` if the account does not exist.
|
||||
pub fn get_account(&self, pubkey: &Pubkey) -> ClientResult<Account> {
|
||||
self.get_account_with_commitment(pubkey, self.commitment_config)?
|
||||
self.get_account_with_commitment(pubkey, self.commitment())?
|
||||
.value
|
||||
.ok_or_else(|| RpcError::ForUser(format!("AccountNotFound: pubkey={}", pubkey)).into())
|
||||
}
|
||||
@@ -1056,7 +922,7 @@ impl RpcClient {
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<Option<Account>> {
|
||||
let config = RpcAccountInfoConfig {
|
||||
encoding: Some(UiAccountEncoding::Base64Zstd),
|
||||
encoding: Some(UiAccountEncoding::Base64),
|
||||
commitment: Some(self.maybe_map_commitment(commitment_config)?),
|
||||
data_slice: None,
|
||||
};
|
||||
@@ -1101,7 +967,7 @@ impl RpcClient {
|
||||
|
||||
pub fn get_multiple_accounts(&self, pubkeys: &[Pubkey]) -> ClientResult<Vec<Option<Account>>> {
|
||||
Ok(self
|
||||
.get_multiple_accounts_with_commitment(pubkeys, self.commitment_config)?
|
||||
.get_multiple_accounts_with_commitment(pubkeys, self.commitment())?
|
||||
.value)
|
||||
}
|
||||
|
||||
@@ -1111,7 +977,7 @@ impl RpcClient {
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<Vec<Option<Account>>> {
|
||||
let config = RpcAccountInfoConfig {
|
||||
encoding: Some(UiAccountEncoding::Base64Zstd),
|
||||
encoding: Some(UiAccountEncoding::Base64),
|
||||
commitment: Some(self.maybe_map_commitment(commitment_config)?),
|
||||
data_slice: None,
|
||||
};
|
||||
@@ -1155,7 +1021,7 @@ impl RpcClient {
|
||||
/// Request the balance of the account `pubkey`.
|
||||
pub fn get_balance(&self, pubkey: &Pubkey) -> ClientResult<u64> {
|
||||
Ok(self
|
||||
.get_balance_with_commitment(pubkey, self.commitment_config)?
|
||||
.get_balance_with_commitment(pubkey, self.commitment())?
|
||||
.value)
|
||||
}
|
||||
|
||||
@@ -1213,7 +1079,7 @@ impl RpcClient {
|
||||
|
||||
/// Request the transaction count.
|
||||
pub fn get_transaction_count(&self) -> ClientResult<u64> {
|
||||
self.get_transaction_count_with_commitment(self.commitment_config)
|
||||
self.get_transaction_count_with_commitment(self.commitment())
|
||||
}
|
||||
|
||||
pub fn get_transaction_count_with_commitment(
|
||||
@@ -1226,9 +1092,37 @@ impl RpcClient {
|
||||
)
|
||||
}
|
||||
|
||||
pub fn get_fees(&self) -> ClientResult<Fees> {
|
||||
Ok(self.get_fees_with_commitment(self.commitment())?.value)
|
||||
}
|
||||
|
||||
pub fn get_fees_with_commitment(&self, commitment_config: CommitmentConfig) -> RpcResult<Fees> {
|
||||
let Response {
|
||||
context,
|
||||
value: fees,
|
||||
} = self.send::<Response<RpcFees>>(
|
||||
RpcRequest::GetFees,
|
||||
json!([self.maybe_map_commitment(commitment_config)?]),
|
||||
)?;
|
||||
let blockhash = fees.blockhash.parse().map_err(|_| {
|
||||
ClientError::new_with_request(
|
||||
RpcError::ParseError("Hash".to_string()).into(),
|
||||
RpcRequest::GetFees,
|
||||
)
|
||||
})?;
|
||||
Ok(Response {
|
||||
context,
|
||||
value: Fees {
|
||||
blockhash,
|
||||
fee_calculator: fees.fee_calculator,
|
||||
last_valid_block_height: fees.last_valid_block_height,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_recent_blockhash(&self) -> ClientResult<(Hash, FeeCalculator)> {
|
||||
let (blockhash, fee_calculator, _last_valid_slot) = self
|
||||
.get_recent_blockhash_with_commitment(self.commitment_config)?
|
||||
.get_recent_blockhash_with_commitment(self.commitment())?
|
||||
.value;
|
||||
Ok((blockhash, fee_calculator))
|
||||
}
|
||||
@@ -1301,7 +1195,7 @@ impl RpcClient {
|
||||
blockhash: &Hash,
|
||||
) -> ClientResult<Option<FeeCalculator>> {
|
||||
Ok(self
|
||||
.get_fee_calculator_for_blockhash_with_commitment(blockhash, self.commitment_config)?
|
||||
.get_fee_calculator_for_blockhash_with_commitment(blockhash, self.commitment())?
|
||||
.value)
|
||||
}
|
||||
|
||||
@@ -1383,7 +1277,7 @@ impl RpcClient {
|
||||
|
||||
pub fn get_token_account(&self, pubkey: &Pubkey) -> ClientResult<Option<UiTokenAccount>> {
|
||||
Ok(self
|
||||
.get_token_account_with_commitment(pubkey, self.commitment_config)?
|
||||
.get_token_account_with_commitment(pubkey, self.commitment())?
|
||||
.value)
|
||||
}
|
||||
|
||||
@@ -1444,7 +1338,7 @@ impl RpcClient {
|
||||
|
||||
pub fn get_token_account_balance(&self, pubkey: &Pubkey) -> ClientResult<UiTokenAmount> {
|
||||
Ok(self
|
||||
.get_token_account_balance_with_commitment(pubkey, self.commitment_config)?
|
||||
.get_token_account_balance_with_commitment(pubkey, self.commitment())?
|
||||
.value)
|
||||
}
|
||||
|
||||
@@ -1471,7 +1365,7 @@ impl RpcClient {
|
||||
.get_token_accounts_by_delegate_with_commitment(
|
||||
delegate,
|
||||
token_account_filter,
|
||||
self.commitment_config,
|
||||
self.commitment(),
|
||||
)?
|
||||
.value)
|
||||
}
|
||||
@@ -1510,7 +1404,7 @@ impl RpcClient {
|
||||
.get_token_accounts_by_owner_with_commitment(
|
||||
owner,
|
||||
token_account_filter,
|
||||
self.commitment_config,
|
||||
self.commitment(),
|
||||
)?
|
||||
.value)
|
||||
}
|
||||
@@ -1542,7 +1436,7 @@ impl RpcClient {
|
||||
|
||||
pub fn get_token_supply(&self, mint: &Pubkey) -> ClientResult<UiTokenAmount> {
|
||||
Ok(self
|
||||
.get_token_supply_with_commitment(mint, self.commitment_config)?
|
||||
.get_token_supply_with_commitment(mint, self.commitment())?
|
||||
.value)
|
||||
}
|
||||
|
||||
@@ -1565,7 +1459,7 @@ impl RpcClient {
|
||||
pubkey,
|
||||
lamports,
|
||||
RpcRequestAirdropConfig {
|
||||
commitment: Some(self.commitment_config),
|
||||
commitment: Some(self.commitment()),
|
||||
..RpcRequestAirdropConfig::default()
|
||||
},
|
||||
)
|
||||
@@ -1581,7 +1475,7 @@ impl RpcClient {
|
||||
pubkey,
|
||||
lamports,
|
||||
RpcRequestAirdropConfig {
|
||||
commitment: Some(self.commitment_config),
|
||||
commitment: Some(self.commitment()),
|
||||
recent_blockhash: Some(recent_blockhash.to_string()),
|
||||
},
|
||||
)
|
||||
@@ -1684,7 +1578,7 @@ impl RpcClient {
|
||||
|
||||
/// Poll the server to confirm a transaction.
|
||||
pub fn poll_for_signature(&self, signature: &Signature) -> ClientResult<()> {
|
||||
self.poll_for_signature_with_commitment(signature, self.commitment_config)
|
||||
self.poll_for_signature_with_commitment(signature, self.commitment())
|
||||
}
|
||||
|
||||
/// Poll the server to confirm a transaction.
|
||||
@@ -1794,7 +1688,7 @@ impl RpcClient {
|
||||
) -> ClientResult<Signature> {
|
||||
self.send_and_confirm_transaction_with_spinner_and_commitment(
|
||||
transaction,
|
||||
self.commitment_config,
|
||||
self.commitment(),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1850,19 +1744,25 @@ impl RpcClient {
|
||||
"[{}/{}] Finalizing transaction {}",
|
||||
confirmations, desired_confirmations, signature,
|
||||
));
|
||||
|
||||
let now = Instant::now();
|
||||
let confirm_transaction_initial_timeout = self
|
||||
.config
|
||||
.confirm_transaction_initial_timeout
|
||||
.unwrap_or_default();
|
||||
let (signature, status) = loop {
|
||||
// Get recent commitment in order to count confirmations for successful transactions
|
||||
let status = self
|
||||
.get_signature_status_with_commitment(&signature, CommitmentConfig::processed())?;
|
||||
if status.is_none() {
|
||||
if self
|
||||
let blockhash_not_found = self
|
||||
.get_fee_calculator_for_blockhash_with_commitment(
|
||||
&recent_blockhash,
|
||||
CommitmentConfig::processed(),
|
||||
)?
|
||||
.value
|
||||
.is_none()
|
||||
{
|
||||
.is_none();
|
||||
if blockhash_not_found && now.elapsed() >= confirm_transaction_initial_timeout {
|
||||
break (signature, status);
|
||||
}
|
||||
} else {
|
||||
@@ -1933,6 +1833,26 @@ impl RpcClient {
|
||||
}
|
||||
}
|
||||
|
||||
fn serialize_encode_transaction(
|
||||
transaction: &Transaction,
|
||||
encoding: UiTransactionEncoding,
|
||||
) -> ClientResult<String> {
|
||||
let serialized = serialize(transaction)
|
||||
.map_err(|e| ClientErrorKind::Custom(format!("transaction serialization failed: {}", e)))?;
|
||||
let encoded = match encoding {
|
||||
UiTransactionEncoding::Base58 => bs58::encode(serialized).into_string(),
|
||||
UiTransactionEncoding::Base64 => base64::encode(serialized),
|
||||
_ => {
|
||||
return Err(ClientErrorKind::Custom(format!(
|
||||
"unsupported transaction encoding: {}. Supported encodings: base58, base64",
|
||||
encoding
|
||||
))
|
||||
.into())
|
||||
}
|
||||
};
|
||||
Ok(encoded)
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct GetConfirmedSignaturesForAddress2Config {
|
||||
pub before: Option<Signature>,
|
||||
@@ -2062,7 +1982,7 @@ mod tests {
|
||||
// Send erroneous parameter
|
||||
let blockhash: ClientResult<String> =
|
||||
rpc_client.send(RpcRequest::GetRecentBlockhash, json!(["parameter"]));
|
||||
assert!(blockhash.is_err());
|
||||
assert_eq!(blockhash.is_err(), true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@@ -81,6 +81,8 @@ pub struct RpcGetVoteAccountsConfig {
|
||||
pub vote_pubkey: Option<String>, // validator vote address, as a base-58 encoded string
|
||||
#[serde(flatten)]
|
||||
pub commitment: Option<CommitmentConfig>,
|
||||
pub keep_unstaked_delinquents: Option<bool>,
|
||||
pub delinquent_slot_distance: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
@@ -172,7 +174,7 @@ pub struct RpcSignatureSubscribeConfig {
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcSignaturesForAddressConfig {
|
||||
pub struct RpcGetConfirmedSignaturesForAddress2Config {
|
||||
pub before: Option<String>, // Signature as base-58 string
|
||||
pub until: Option<String>, // Signature as base-58 string
|
||||
pub limit: Option<usize>,
|
||||
@@ -194,17 +196,6 @@ impl<T: EncodingConfig + Default + Copy> RpcEncodingConfigWrapper<T> {
|
||||
RpcEncodingConfigWrapper::Current(config) => config.unwrap_or_default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn convert<U: EncodingConfig + From<T>>(&self) -> RpcEncodingConfigWrapper<U> {
|
||||
match self {
|
||||
RpcEncodingConfigWrapper::Deprecated(encoding) => {
|
||||
RpcEncodingConfigWrapper::Deprecated(*encoding)
|
||||
}
|
||||
RpcEncodingConfigWrapper::Current(config) => {
|
||||
RpcEncodingConfigWrapper::Current(config.map(|config| config.into()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait EncodingConfig {
|
||||
@@ -213,7 +204,7 @@ pub trait EncodingConfig {
|
||||
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcBlockConfig {
|
||||
pub struct RpcConfirmedBlockConfig {
|
||||
pub encoding: Option<UiTransactionEncoding>,
|
||||
pub transaction_details: Option<TransactionDetails>,
|
||||
pub rewards: Option<bool>,
|
||||
@@ -221,7 +212,7 @@ pub struct RpcBlockConfig {
|
||||
pub commitment: Option<CommitmentConfig>,
|
||||
}
|
||||
|
||||
impl EncodingConfig for RpcBlockConfig {
|
||||
impl EncodingConfig for RpcConfirmedBlockConfig {
|
||||
fn new_with_encoding(encoding: &Option<UiTransactionEncoding>) -> Self {
|
||||
Self {
|
||||
encoding: *encoding,
|
||||
@@ -230,7 +221,7 @@ impl EncodingConfig for RpcBlockConfig {
|
||||
}
|
||||
}
|
||||
|
||||
impl RpcBlockConfig {
|
||||
impl RpcConfirmedBlockConfig {
|
||||
pub fn rewards_only() -> Self {
|
||||
Self {
|
||||
transaction_details: Some(TransactionDetails::None),
|
||||
@@ -247,21 +238,21 @@ impl RpcBlockConfig {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<RpcBlockConfig> for RpcEncodingConfigWrapper<RpcBlockConfig> {
|
||||
fn from(config: RpcBlockConfig) -> Self {
|
||||
impl From<RpcConfirmedBlockConfig> for RpcEncodingConfigWrapper<RpcConfirmedBlockConfig> {
|
||||
fn from(config: RpcConfirmedBlockConfig) -> Self {
|
||||
RpcEncodingConfigWrapper::Current(Some(config))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcTransactionConfig {
|
||||
pub struct RpcConfirmedTransactionConfig {
|
||||
pub encoding: Option<UiTransactionEncoding>,
|
||||
#[serde(flatten)]
|
||||
pub commitment: Option<CommitmentConfig>,
|
||||
}
|
||||
|
||||
impl EncodingConfig for RpcTransactionConfig {
|
||||
impl EncodingConfig for RpcConfirmedTransactionConfig {
|
||||
fn new_with_encoding(encoding: &Option<UiTransactionEncoding>) -> Self {
|
||||
Self {
|
||||
encoding: *encoding,
|
||||
@@ -272,16 +263,16 @@ impl EncodingConfig for RpcTransactionConfig {
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum RpcBlocksConfigWrapper {
|
||||
pub enum RpcConfirmedBlocksConfigWrapper {
|
||||
EndSlotOnly(Option<Slot>),
|
||||
CommitmentOnly(Option<CommitmentConfig>),
|
||||
}
|
||||
|
||||
impl RpcBlocksConfigWrapper {
|
||||
impl RpcConfirmedBlocksConfigWrapper {
|
||||
pub fn unzip(&self) -> (Option<Slot>, Option<CommitmentConfig>) {
|
||||
match &self {
|
||||
RpcBlocksConfigWrapper::EndSlotOnly(end_slot) => (*end_slot, None),
|
||||
RpcBlocksConfigWrapper::CommitmentOnly(commitment) => (None, *commitment),
|
||||
RpcConfirmedBlocksConfigWrapper::EndSlotOnly(end_slot) => (*end_slot, None),
|
||||
RpcConfirmedBlocksConfigWrapper::CommitmentOnly(commitment) => (None, *commitment),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,120 +0,0 @@
|
||||
#![allow(deprecated)]
|
||||
use {
|
||||
crate::rpc_config::{
|
||||
EncodingConfig, RpcBlockConfig, RpcEncodingConfigWrapper, RpcTransactionConfig,
|
||||
},
|
||||
solana_sdk::{clock::Slot, commitment_config::CommitmentConfig},
|
||||
solana_transaction_status::{TransactionDetails, UiTransactionEncoding},
|
||||
};
|
||||
|
||||
#[deprecated(
|
||||
since = "1.7.0",
|
||||
note = "Please use RpcSignaturesForAddressConfig instead"
|
||||
)]
|
||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcGetConfirmedSignaturesForAddress2Config {
|
||||
pub before: Option<String>, // Signature as base-58 string
|
||||
pub until: Option<String>, // Signature as base-58 string
|
||||
pub limit: Option<usize>,
|
||||
#[serde(flatten)]
|
||||
pub commitment: Option<CommitmentConfig>,
|
||||
}
|
||||
|
||||
#[deprecated(since = "1.7.0", note = "Please use RpcBlockConfig instead")]
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcConfirmedBlockConfig {
|
||||
pub encoding: Option<UiTransactionEncoding>,
|
||||
pub transaction_details: Option<TransactionDetails>,
|
||||
pub rewards: Option<bool>,
|
||||
#[serde(flatten)]
|
||||
pub commitment: Option<CommitmentConfig>,
|
||||
}
|
||||
|
||||
impl EncodingConfig for RpcConfirmedBlockConfig {
|
||||
fn new_with_encoding(encoding: &Option<UiTransactionEncoding>) -> Self {
|
||||
Self {
|
||||
encoding: *encoding,
|
||||
..Self::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RpcConfirmedBlockConfig {
|
||||
pub fn rewards_only() -> Self {
|
||||
Self {
|
||||
transaction_details: Some(TransactionDetails::None),
|
||||
..Self::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn rewards_with_commitment(commitment: Option<CommitmentConfig>) -> Self {
|
||||
Self {
|
||||
transaction_details: Some(TransactionDetails::None),
|
||||
commitment,
|
||||
..Self::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<RpcConfirmedBlockConfig> for RpcEncodingConfigWrapper<RpcConfirmedBlockConfig> {
|
||||
fn from(config: RpcConfirmedBlockConfig) -> Self {
|
||||
RpcEncodingConfigWrapper::Current(Some(config))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<RpcConfirmedBlockConfig> for RpcBlockConfig {
|
||||
fn from(config: RpcConfirmedBlockConfig) -> Self {
|
||||
Self {
|
||||
encoding: config.encoding,
|
||||
transaction_details: config.transaction_details,
|
||||
rewards: config.rewards,
|
||||
commitment: config.commitment,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[deprecated(since = "1.7.0", note = "Please use RpcTransactionConfig instead")]
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcConfirmedTransactionConfig {
|
||||
pub encoding: Option<UiTransactionEncoding>,
|
||||
#[serde(flatten)]
|
||||
pub commitment: Option<CommitmentConfig>,
|
||||
}
|
||||
|
||||
impl EncodingConfig for RpcConfirmedTransactionConfig {
|
||||
fn new_with_encoding(encoding: &Option<UiTransactionEncoding>) -> Self {
|
||||
Self {
|
||||
encoding: *encoding,
|
||||
..Self::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<RpcConfirmedTransactionConfig> for RpcTransactionConfig {
|
||||
fn from(config: RpcConfirmedTransactionConfig) -> Self {
|
||||
Self {
|
||||
encoding: config.encoding,
|
||||
commitment: config.commitment,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[deprecated(since = "1.7.0", note = "Please use RpcBlocksConfigWrapper instead")]
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum RpcConfirmedBlocksConfigWrapper {
|
||||
EndSlotOnly(Option<Slot>),
|
||||
CommitmentOnly(Option<CommitmentConfig>),
|
||||
}
|
||||
|
||||
impl RpcConfirmedBlocksConfigWrapper {
|
||||
pub fn unzip(&self) -> (Option<Slot>, Option<CommitmentConfig>) {
|
||||
match &self {
|
||||
RpcConfirmedBlocksConfigWrapper::EndSlotOnly(end_slot) => (*end_slot, None),
|
||||
RpcConfirmedBlocksConfigWrapper::CommitmentOnly(commitment) => (None, *commitment),
|
||||
}
|
||||
}
|
||||
}
|
@@ -11,34 +11,22 @@ pub enum RpcRequest {
|
||||
DeregisterNode,
|
||||
GetAccountInfo,
|
||||
GetBalance,
|
||||
GetBlock,
|
||||
GetBlockHeight,
|
||||
GetBlockProduction,
|
||||
GetBlocks,
|
||||
GetBlocksWithLimit,
|
||||
GetBlockTime,
|
||||
GetClusterNodes,
|
||||
|
||||
#[deprecated(since = "1.7.0", note = "Please use RpcRequest::GetBlock instead")]
|
||||
GetConfirmedBlock,
|
||||
#[deprecated(since = "1.7.0", note = "Please use RpcRequest::GetBlocks instead")]
|
||||
GetConfirmedBlocks,
|
||||
#[deprecated(
|
||||
since = "1.7.0",
|
||||
note = "Please use RpcRequest::GetBlocksWithLimit instead"
|
||||
)]
|
||||
GetConfirmedBlocksWithLimit,
|
||||
#[deprecated(
|
||||
since = "1.7.0",
|
||||
note = "Please use RpcRequest::GetSignaturesForAddress instead"
|
||||
)]
|
||||
GetConfirmedSignaturesForAddress2,
|
||||
#[deprecated(
|
||||
since = "1.7.0",
|
||||
note = "Please use RpcRequest::GetTransaction instead"
|
||||
)]
|
||||
GetConfirmedTransaction,
|
||||
|
||||
#[deprecated(
|
||||
since = "1.5.19",
|
||||
note = "Please use RpcRequest::GetConfirmedSignaturesForAddress2 instead"
|
||||
)]
|
||||
GetConfirmedSignaturesForAddress,
|
||||
|
||||
GetConfirmedSignaturesForAddress2,
|
||||
GetConfirmedTransaction,
|
||||
GetEpochInfo,
|
||||
GetEpochSchedule,
|
||||
GetFeeCalculatorForBlockhash,
|
||||
@@ -61,7 +49,6 @@ pub enum RpcRequest {
|
||||
GetRecentBlockhash,
|
||||
GetRecentPerformanceSamples,
|
||||
GetSnapshotSlot,
|
||||
GetSignaturesForAddress,
|
||||
GetSignatureStatuses,
|
||||
GetSlot,
|
||||
GetSlotLeader,
|
||||
@@ -76,7 +63,10 @@ pub enum RpcRequest {
|
||||
GetTokenAccountsByDelegate,
|
||||
GetTokenAccountsByOwner,
|
||||
GetTokenSupply,
|
||||
GetTransaction,
|
||||
|
||||
#[deprecated(since = "1.5.19", note = "Please use RpcRequest::GetSupply instead")]
|
||||
GetTotalSupply,
|
||||
|
||||
GetTransactionCount,
|
||||
GetVersion,
|
||||
GetVoteAccounts,
|
||||
@@ -95,16 +85,14 @@ impl fmt::Display for RpcRequest {
|
||||
RpcRequest::DeregisterNode => "deregisterNode",
|
||||
RpcRequest::GetAccountInfo => "getAccountInfo",
|
||||
RpcRequest::GetBalance => "getBalance",
|
||||
RpcRequest::GetBlock => "getBlock",
|
||||
RpcRequest::GetBlockHeight => "getBlockHeight",
|
||||
RpcRequest::GetBlockProduction => "getBlockProduction",
|
||||
RpcRequest::GetBlocks => "getBlocks",
|
||||
RpcRequest::GetBlocksWithLimit => "getBlocksWithLimit",
|
||||
RpcRequest::GetBlockTime => "getBlockTime",
|
||||
RpcRequest::GetClusterNodes => "getClusterNodes",
|
||||
RpcRequest::GetConfirmedBlock => "getConfirmedBlock",
|
||||
RpcRequest::GetConfirmedBlocks => "getConfirmedBlocks",
|
||||
RpcRequest::GetConfirmedBlocksWithLimit => "getConfirmedBlocksWithLimit",
|
||||
RpcRequest::GetConfirmedSignaturesForAddress => "getConfirmedSignaturesForAddress",
|
||||
RpcRequest::GetConfirmedSignaturesForAddress2 => "getConfirmedSignaturesForAddress2",
|
||||
RpcRequest::GetConfirmedTransaction => "getConfirmedTransaction",
|
||||
RpcRequest::GetEpochInfo => "getEpochInfo",
|
||||
@@ -129,7 +117,6 @@ impl fmt::Display for RpcRequest {
|
||||
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
|
||||
RpcRequest::GetRecentPerformanceSamples => "getRecentPerformanceSamples",
|
||||
RpcRequest::GetSnapshotSlot => "getSnapshotSlot",
|
||||
RpcRequest::GetSignaturesForAddress => "getSignaturesForAddress",
|
||||
RpcRequest::GetSignatureStatuses => "getSignatureStatuses",
|
||||
RpcRequest::GetSlot => "getSlot",
|
||||
RpcRequest::GetSlotLeader => "getSlotLeader",
|
||||
@@ -144,7 +131,7 @@ impl fmt::Display for RpcRequest {
|
||||
RpcRequest::GetTokenAccountsByDelegate => "getTokenAccountsByDelegate",
|
||||
RpcRequest::GetTokenAccountsByOwner => "getTokenAccountsByOwner",
|
||||
RpcRequest::GetTokenSupply => "getTokenSupply",
|
||||
RpcRequest::GetTransaction => "getTransaction",
|
||||
RpcRequest::GetTotalSupply => "getTotalSupply",
|
||||
RpcRequest::GetTransactionCount => "getTransactionCount",
|
||||
RpcRequest::GetVersion => "getVersion",
|
||||
RpcRequest::GetVoteAccounts => "getVoteAccounts",
|
||||
|
@@ -4,6 +4,7 @@ use {
|
||||
solana_sdk::{
|
||||
clock::{Epoch, Slot, UnixTimestamp},
|
||||
fee_calculator::{FeeCalculator, FeeRateGovernor},
|
||||
hash::Hash,
|
||||
inflation::Inflation,
|
||||
transaction::{Result, TransactionError},
|
||||
},
|
||||
@@ -57,6 +58,14 @@ pub struct DeprecatedRpcFees {
|
||||
pub last_valid_slot: Slot,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Fees {
|
||||
pub blockhash: Hash,
|
||||
pub fee_calculator: FeeCalculator,
|
||||
pub last_valid_block_height: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcFeeCalculator {
|
||||
@@ -394,8 +403,9 @@ pub struct RpcPerfSample {
|
||||
pub struct RpcInflationReward {
|
||||
pub epoch: Epoch,
|
||||
pub effective_slot: Slot,
|
||||
pub amount: u64, // lamports
|
||||
pub post_balance: u64, // lamports
|
||||
pub amount: u64, // lamports
|
||||
pub post_balance: u64, // lamports
|
||||
pub commission: Option<u8>, // Vote account commission when the reward was credited
|
||||
}
|
||||
|
||||
impl From<ConfirmedTransactionStatusWithSignature> for RpcConfirmedTransactionStatusWithSignature {
|
||||
|
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-core"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.7.2"
|
||||
version = "1.6.18"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-core"
|
||||
readme = "../README.md"
|
||||
@@ -22,12 +22,19 @@ bv = { version = "0.11.1", features = ["serde"] }
|
||||
bs58 = "0.3.1"
|
||||
byteorder = "1.3.4"
|
||||
chrono = { version = "0.4.11", features = ["serde"] }
|
||||
core_affinity = "0.5.10"
|
||||
crossbeam-channel = "0.4"
|
||||
ed25519-dalek = "=1.0.1"
|
||||
fs_extra = "1.2.0"
|
||||
flate2 = "1.0"
|
||||
indexmap = { version = "1.5", features = ["rayon"] }
|
||||
itertools = "0.9.0"
|
||||
jsonrpc-core = "17.1.0"
|
||||
jsonrpc-core-client = { version = "17.1.0", features = ["ipc", "ws"] }
|
||||
jsonrpc-derive = "17.1.0"
|
||||
jsonrpc-http-server = "17.1.0"
|
||||
jsonrpc-pubsub = "17.1.0"
|
||||
jsonrpc-ws-server = "17.1.0"
|
||||
libc = "0.2.81"
|
||||
log = "0.4.11"
|
||||
lru = "0.6.1"
|
||||
@@ -39,52 +46,52 @@ rand_chacha = "0.2.2"
|
||||
rand_core = "0.6.2"
|
||||
raptorq = "1.4.2"
|
||||
rayon = "1.5.0"
|
||||
regex = "1.3.9"
|
||||
retain_mut = "0.1.2"
|
||||
serde = "1.0.122"
|
||||
serde_bytes = "0.11"
|
||||
serde_derive = "1.0.103"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.2" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.7.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.2" }
|
||||
solana-client = { path = "../client", version = "=1.7.2" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.2" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.7.2" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.2" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "=1.7.2" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.2" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.2" }
|
||||
solana-perf = { path = "../perf", version = "=1.7.2" }
|
||||
solana-poh = { path = "../poh", version = "=1.7.2" }
|
||||
solana-program-test = { path = "../program-test", version = "=1.7.2" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.7.2" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.2" }
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "=1.7.2" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.7.2" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.2" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.2" }
|
||||
solana-version = { path = "../version", version = "=1.7.2" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.2" }
|
||||
serde_json = "1.0.56"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.6.18" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.6.18" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.6.18" }
|
||||
solana-client = { path = "../client", version = "=1.6.18" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.6.18" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.6.18" }
|
||||
solana-logger = { path = "../logger", version = "=1.6.18" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "=1.6.18" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.6.18" }
|
||||
solana-measure = { path = "../measure", version = "=1.6.18" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.6.18" }
|
||||
solana-perf = { path = "../perf", version = "=1.6.18" }
|
||||
solana-program-test = { path = "../program-test", version = "=1.6.18" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.6.18" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.6.18" }
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "=1.6.18" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.6.18" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "=1.6.18" }
|
||||
solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.6.18" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.6.18" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "=1.6.18" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.6.18" }
|
||||
solana-version = { path = "../version", version = "=1.6.18" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.6.18" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.1.1", features = ["no-entrypoint"] }
|
||||
tempfile = "3.1.0"
|
||||
thiserror = "1.0"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.7.2" }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio_02 = { version = "0.2", package = "tokio", features = ["full"] }
|
||||
tokio-util = { version = "0.3", features = ["codec"] } # This crate needs to stay in sync with tokio_02, until that dependency can be removed
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.6.18" }
|
||||
trees = "0.2.1"
|
||||
|
||||
[dev-dependencies]
|
||||
jsonrpc-core = "17.1.0"
|
||||
jsonrpc-core-client = { version = "17.1.0", features = ["ipc", "ws"] }
|
||||
matches = "0.1.6"
|
||||
num_cpus = "1.13.0"
|
||||
reqwest = { version = "0.11.2", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
serde_json = "1.0.56"
|
||||
serial_test = "0.4.0"
|
||||
solana-stake-program = { path = "../programs/stake", version = "=1.7.2" }
|
||||
solana-version = { path = "../version", version = "=1.7.2" }
|
||||
symlink = "0.1.0"
|
||||
systemstat = "0.1.5"
|
||||
tokio_02 = { version = "0.2", package = "tokio", features = ["full"] }
|
||||
|
||||
[build-dependencies]
|
||||
rustc_version = "0.2"
|
||||
@@ -96,7 +103,13 @@ name = "banking_stage"
|
||||
name = "blockstore"
|
||||
|
||||
[[bench]]
|
||||
name = "cluster_info"
|
||||
name = "crds"
|
||||
|
||||
[[bench]]
|
||||
name = "crds_gossip_pull"
|
||||
|
||||
[[bench]]
|
||||
name = "crds_shards"
|
||||
|
||||
[[bench]]
|
||||
name = "gen_keys"
|
||||
@@ -104,8 +117,14 @@ name = "gen_keys"
|
||||
[[bench]]
|
||||
name = "sigverify_stage"
|
||||
|
||||
[[bench]]
|
||||
name = "poh"
|
||||
|
||||
[[bench]]
|
||||
name = "retransmit_stage"
|
||||
|
||||
[[bench]]
|
||||
name = "cluster_info"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -7,16 +7,16 @@ use crossbeam_channel::unbounded;
|
||||
use log::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
use rayon::prelude::*;
|
||||
use solana_core::banking_stage::{BankingStage, BankingStageStats};
|
||||
use solana_gossip::cluster_info::ClusterInfo;
|
||||
use solana_gossip::cluster_info::Node;
|
||||
use solana_core::banking_stage::{create_test_recorder, BankingStage, BankingStageStats};
|
||||
use solana_core::cluster_info::ClusterInfo;
|
||||
use solana_core::cluster_info::Node;
|
||||
use solana_core::poh_recorder::WorkingBankEntry;
|
||||
use solana_ledger::blockstore_processor::process_entries;
|
||||
use solana_ledger::entry::{next_hash, Entry};
|
||||
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
|
||||
use solana_perf::packet::to_packets_chunked;
|
||||
use solana_perf::test_tx::test_tx;
|
||||
use solana_poh::poh_recorder::{create_test_recorder, WorkingBankEntry};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::genesis_config::GenesisConfig;
|
||||
use solana_sdk::hash::Hash;
|
||||
|
@@ -5,8 +5,8 @@ extern crate test;
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana_core::broadcast_stage::broadcast_metrics::TransmitShredsStats;
|
||||
use solana_core::broadcast_stage::{broadcast_shreds, get_broadcast_peers};
|
||||
use solana_gossip::cluster_info::{ClusterInfo, Node};
|
||||
use solana_gossip::contact_info::ContactInfo;
|
||||
use solana_core::cluster_info::{ClusterInfo, Node};
|
||||
use solana_core::contact_info::ContactInfo;
|
||||
use solana_ledger::shred::Shred;
|
||||
use solana_sdk::pubkey;
|
||||
use solana_sdk::timing::timestamp;
|
||||
|
@@ -2,16 +2,14 @@
|
||||
|
||||
extern crate test;
|
||||
|
||||
use {
|
||||
rand::{thread_rng, Rng},
|
||||
rayon::ThreadPoolBuilder,
|
||||
solana_gossip::{
|
||||
crds::Crds, crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS, crds_value::CrdsValue,
|
||||
},
|
||||
solana_sdk::pubkey::Pubkey,
|
||||
std::collections::HashMap,
|
||||
test::Bencher,
|
||||
};
|
||||
use rand::{thread_rng, Rng};
|
||||
use rayon::ThreadPoolBuilder;
|
||||
use solana_core::crds::Crds;
|
||||
use solana_core::crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
|
||||
use solana_core::crds_value::CrdsValue;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::collections::HashMap;
|
||||
use test::Bencher;
|
||||
|
||||
#[bench]
|
||||
fn bench_find_old_labels(bencher: &mut Bencher) {
|
@@ -2,18 +2,14 @@
|
||||
|
||||
extern crate test;
|
||||
|
||||
use {
|
||||
rand::{thread_rng, Rng},
|
||||
rayon::ThreadPoolBuilder,
|
||||
solana_gossip::{
|
||||
cluster_info::MAX_BLOOM_SIZE,
|
||||
crds::Crds,
|
||||
crds_gossip_pull::{CrdsFilter, CrdsGossipPull},
|
||||
crds_value::CrdsValue,
|
||||
},
|
||||
solana_sdk::hash,
|
||||
test::Bencher,
|
||||
};
|
||||
use rand::{thread_rng, Rng};
|
||||
use rayon::ThreadPoolBuilder;
|
||||
use solana_core::cluster_info::MAX_BLOOM_SIZE;
|
||||
use solana_core::crds::Crds;
|
||||
use solana_core::crds_gossip_pull::{CrdsFilter, CrdsGossipPull};
|
||||
use solana_core::crds_value::CrdsValue;
|
||||
use solana_sdk::hash;
|
||||
use test::Bencher;
|
||||
|
||||
#[bench]
|
||||
fn bench_hash_as_u64(bencher: &mut Bencher) {
|
@@ -2,17 +2,15 @@
|
||||
|
||||
extern crate test;
|
||||
|
||||
use {
|
||||
rand::{thread_rng, Rng},
|
||||
solana_gossip::{
|
||||
crds::{Crds, VersionedCrdsValue},
|
||||
crds_shards::CrdsShards,
|
||||
crds_value::CrdsValue,
|
||||
},
|
||||
solana_sdk::timing::timestamp,
|
||||
std::iter::repeat_with,
|
||||
test::Bencher,
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana_core::{
|
||||
crds::{Crds, VersionedCrdsValue},
|
||||
crds_shards::CrdsShards,
|
||||
crds_value::CrdsValue,
|
||||
};
|
||||
use solana_sdk::timing::timestamp;
|
||||
use std::iter::repeat_with;
|
||||
use test::Bencher;
|
||||
|
||||
const CRDS_SHARDS_BITS: u32 = 8;
|
||||
|
@@ -3,16 +3,12 @@
|
||||
#![feature(test)]
|
||||
extern crate test;
|
||||
|
||||
use {
|
||||
solana_ledger::poh::Poh,
|
||||
solana_poh::poh_service::DEFAULT_HASHES_PER_BATCH,
|
||||
solana_sdk::hash::Hash,
|
||||
std::sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc, Mutex,
|
||||
},
|
||||
test::Bencher,
|
||||
};
|
||||
use solana_core::poh_service::DEFAULT_HASHES_PER_BATCH;
|
||||
use solana_ledger::poh::Poh;
|
||||
use solana_sdk::hash::Hash;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use test::Bencher;
|
||||
|
||||
const NUM_HASHES: u64 = 30_000; // Should require ~10ms on a 2017 MacBook Pro
|
||||
|
@@ -1,15 +1,11 @@
|
||||
#![feature(test)]
|
||||
extern crate test;
|
||||
|
||||
use {
|
||||
solana_ledger::entry::{next_entry_mut, Entry, EntrySlice},
|
||||
solana_sdk::{
|
||||
hash::{hash, Hash},
|
||||
signature::{Keypair, Signer},
|
||||
system_transaction,
|
||||
},
|
||||
test::Bencher,
|
||||
};
|
||||
use solana_ledger::entry::{next_entry_mut, Entry, EntrySlice};
|
||||
use solana_sdk::hash::{hash, Hash};
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_sdk::system_transaction;
|
||||
use test::Bencher;
|
||||
|
||||
const NUM_HASHES: u64 = 400;
|
||||
const NUM_ENTRIES: usize = 800;
|
@@ -4,16 +4,16 @@ extern crate solana_core;
|
||||
extern crate test;
|
||||
|
||||
use log::*;
|
||||
use solana_core::cluster_info::{ClusterInfo, Node};
|
||||
use solana_core::contact_info::ContactInfo;
|
||||
use solana_core::max_slots::MaxSlots;
|
||||
use solana_core::retransmit_stage::retransmitter;
|
||||
use solana_gossip::cluster_info::{ClusterInfo, Node};
|
||||
use solana_gossip::contact_info::ContactInfo;
|
||||
use solana_ledger::entry::Entry;
|
||||
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
|
||||
use solana_ledger::shred::Shredder;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_perf::packet::{Packet, Packets};
|
||||
use solana_rpc::max_slots::MaxSlots;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_runtime::bank_forks::BankForks;
|
||||
use solana_sdk::hash::Hash;
|
||||
@@ -39,12 +39,7 @@ fn bench_retransmitter(bencher: &mut Bencher) {
|
||||
const NUM_PEERS: usize = 4;
|
||||
let mut peer_sockets = Vec::new();
|
||||
for _ in 0..NUM_PEERS {
|
||||
// This ensures that cluster_info.id() is the root of turbine
|
||||
// retransmit tree and so the shreds are retransmited to all other
|
||||
// nodes in the cluster.
|
||||
let id = std::iter::repeat_with(pubkey::new_rand)
|
||||
.find(|pk| cluster_info.id() < *pk)
|
||||
.unwrap();
|
||||
let id = pubkey::new_rand();
|
||||
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let mut contact_info = ContactInfo::new_localhost(&id, timestamp());
|
||||
contact_info.tvu = socket.local_addr().unwrap();
|
||||
|
@@ -8,7 +8,7 @@ use raptorq::{Decoder, Encoder};
|
||||
use solana_ledger::entry::{create_ticks, Entry};
|
||||
use solana_ledger::shred::{
|
||||
max_entries_per_n_shred, max_ticks_per_n_shreds, ProcessShredsStats, Shred, Shredder,
|
||||
MAX_DATA_SHREDS_PER_FEC_BLOCK, SHRED_PAYLOAD_SIZE, SIZE_OF_CODING_SHRED_HEADERS,
|
||||
MAX_DATA_SHREDS_PER_FEC_BLOCK, SHRED_PAYLOAD_SIZE, SIZE_OF_DATA_SHRED_IGNORED_TAIL,
|
||||
SIZE_OF_DATA_SHRED_PAYLOAD,
|
||||
};
|
||||
use solana_perf::test_tx;
|
||||
@@ -55,7 +55,7 @@ fn make_shreds(num_shreds: usize) -> Vec<Shred> {
|
||||
|
||||
fn make_concatenated_shreds(num_shreds: usize) -> Vec<u8> {
|
||||
let data_shreds = make_shreds(num_shreds);
|
||||
let valid_shred_data_len = (SHRED_PAYLOAD_SIZE - SIZE_OF_CODING_SHRED_HEADERS) as usize;
|
||||
let valid_shred_data_len = (SHRED_PAYLOAD_SIZE - SIZE_OF_DATA_SHRED_IGNORED_TAIL) as usize;
|
||||
let mut data: Vec<u8> = vec![0; num_shreds * valid_shred_data_len];
|
||||
for (i, shred) in (data_shreds[0..num_shreds]).iter().enumerate() {
|
||||
data[i * valid_shred_data_len..(i + 1) * valid_shred_data_len]
|
||||
@@ -163,7 +163,7 @@ fn bench_shredder_decoding(bencher: &mut Bencher) {
|
||||
fn bench_shredder_coding_raptorq(bencher: &mut Bencher) {
|
||||
let symbol_count = MAX_DATA_SHREDS_PER_FEC_BLOCK;
|
||||
let data = make_concatenated_shreds(symbol_count as usize);
|
||||
let valid_shred_data_len = (SHRED_PAYLOAD_SIZE - SIZE_OF_CODING_SHRED_HEADERS) as usize;
|
||||
let valid_shred_data_len = (SHRED_PAYLOAD_SIZE - SIZE_OF_DATA_SHRED_IGNORED_TAIL) as usize;
|
||||
bencher.iter(|| {
|
||||
let encoder = Encoder::with_defaults(&data, valid_shred_data_len as u16);
|
||||
encoder.get_encoded_packets(symbol_count);
|
||||
@@ -174,7 +174,7 @@ fn bench_shredder_coding_raptorq(bencher: &mut Bencher) {
|
||||
fn bench_shredder_decoding_raptorq(bencher: &mut Bencher) {
|
||||
let symbol_count = MAX_DATA_SHREDS_PER_FEC_BLOCK;
|
||||
let data = make_concatenated_shreds(symbol_count as usize);
|
||||
let valid_shred_data_len = (SHRED_PAYLOAD_SIZE - SIZE_OF_CODING_SHRED_HEADERS) as usize;
|
||||
let valid_shred_data_len = (SHRED_PAYLOAD_SIZE - SIZE_OF_DATA_SHRED_IGNORED_TAIL) as usize;
|
||||
let encoder = Encoder::with_defaults(&data, valid_shred_data_len as u16);
|
||||
let mut packets = encoder.get_encoded_packets(symbol_count as u32);
|
||||
packets.shuffle(&mut rand::thread_rng());
|
||||
|
@@ -4,9 +4,11 @@
|
||||
// hash on gossip. Monitor gossip for messages from validators in the --trusted-validators
|
||||
// set and halt the node if a mismatch is detected.
|
||||
|
||||
use crate::snapshot_packager_service::PendingSnapshotPackage;
|
||||
use crate::{
|
||||
cluster_info::{ClusterInfo, MAX_SNAPSHOT_HASHES},
|
||||
snapshot_packager_service::PendingSnapshotPackage,
|
||||
};
|
||||
use rayon::ThreadPool;
|
||||
use solana_gossip::cluster_info::{ClusterInfo, MAX_SNAPSHOT_HASHES};
|
||||
use solana_runtime::{
|
||||
accounts_db,
|
||||
snapshot_package::{AccountsPackage, AccountsPackagePre, AccountsPackageReceiver},
|
||||
@@ -41,7 +43,7 @@ impl AccountsHashVerifier {
|
||||
let exit = exit.clone();
|
||||
let cluster_info = cluster_info.clone();
|
||||
let t_accounts_hash_verifier = Builder::new()
|
||||
.name("solana-hash-accounts".to_string())
|
||||
.name("solana-accounts-hash".to_string())
|
||||
.spawn(move || {
|
||||
let mut hashes = vec![];
|
||||
let mut thread_pool_storage = None;
|
||||
@@ -216,7 +218,8 @@ impl AccountsHashVerifier {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_gossip::{cluster_info::make_accounts_hashes_message, contact_info::ContactInfo};
|
||||
use crate::cluster_info::make_accounts_hashes_message;
|
||||
use crate::contact_info::ContactInfo;
|
||||
use solana_runtime::bank_forks::ArchiveFormat;
|
||||
use solana_runtime::snapshot_utils::SnapshotVersion;
|
||||
use solana_sdk::{
|
||||
|
@@ -1,21 +1,27 @@
|
||||
//! The `banking_stage` processes Transaction messages. It is intended to be used
|
||||
//! to contruct a software pipeline. The stage uses all available CPU cores and
|
||||
//! can do its processing in parallel with signature verification on the GPU.
|
||||
use crate::packet_hasher::PacketHasher;
|
||||
use crate::{
|
||||
cluster_info::ClusterInfo,
|
||||
packet_hasher::PacketHasher,
|
||||
poh_recorder::{PohRecorder, PohRecorderError, TransactionRecorder, WorkingBankEntry},
|
||||
poh_service::{self, PohService},
|
||||
};
|
||||
use crossbeam_channel::{Receiver as CrossbeamReceiver, RecvTimeoutError};
|
||||
use itertools::Itertools;
|
||||
use lru::LruCache;
|
||||
use retain_mut::RetainMut;
|
||||
use solana_gossip::cluster_info::ClusterInfo;
|
||||
use solana_ledger::{blockstore_processor::TransactionStatusSender, entry::hash_transactions};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_ledger::{
|
||||
blockstore::Blockstore, blockstore_processor::TransactionStatusSender,
|
||||
entry::hash_transactions, leader_schedule_cache::LeaderScheduleCache,
|
||||
};
|
||||
use solana_measure::{measure::Measure, thread_mem_usage};
|
||||
use solana_metrics::{inc_new_counter_debug, inc_new_counter_info};
|
||||
use solana_perf::{
|
||||
cuda_runtime::PinnedVec,
|
||||
packet::{limited_deserialize, Packet, Packets, PACKETS_PER_BATCH},
|
||||
perf_libs,
|
||||
};
|
||||
use solana_poh::poh_recorder::{PohRecorder, PohRecorderError, TransactionRecorder};
|
||||
use solana_runtime::{
|
||||
accounts_db::ErrorCounters,
|
||||
bank::{
|
||||
@@ -33,6 +39,7 @@ use solana_sdk::{
|
||||
MAX_TRANSACTION_FORWARDING_DELAY_GPU,
|
||||
},
|
||||
message::Message,
|
||||
poh_config::PohConfig,
|
||||
pubkey::Pubkey,
|
||||
short_vec::decode_shortu16_len,
|
||||
signature::Signature,
|
||||
@@ -50,7 +57,8 @@ use std::{
|
||||
mem::size_of,
|
||||
net::UdpSocket,
|
||||
ops::DerefMut,
|
||||
sync::atomic::{AtomicU64, AtomicUsize, Ordering},
|
||||
sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering},
|
||||
sync::mpsc::Receiver,
|
||||
sync::{Arc, Mutex},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::Duration,
|
||||
@@ -271,6 +279,7 @@ impl BankingStage {
|
||||
Builder::new()
|
||||
.name("solana-banking-stage-tx".to_string())
|
||||
.spawn(move || {
|
||||
thread_mem_usage::datapoint("solana-banking-stage-tx");
|
||||
Self::process_loop(
|
||||
my_pubkey,
|
||||
&verified_receiver,
|
||||
@@ -976,16 +985,15 @@ impl BankingStage {
|
||||
fn transactions_from_packets(
|
||||
msgs: &Packets,
|
||||
transaction_indexes: &[usize],
|
||||
secp256k1_program_enabled: bool,
|
||||
libsecp256k1_0_5_upgrade_enabled: bool,
|
||||
) -> (Vec<HashedTransaction<'static>>, Vec<usize>) {
|
||||
transaction_indexes
|
||||
.iter()
|
||||
.filter_map(|tx_index| {
|
||||
let p = &msgs.packets[*tx_index];
|
||||
let tx: Transaction = limited_deserialize(&p.data[0..p.meta.size]).ok()?;
|
||||
if secp256k1_program_enabled {
|
||||
tx.verify_precompiles().ok()?;
|
||||
}
|
||||
tx.verify_precompiles(libsecp256k1_0_5_upgrade_enabled)
|
||||
.ok()?;
|
||||
let message_bytes = Self::packet_message(p)?;
|
||||
let message_hash = Message::hash_raw_message(message_bytes);
|
||||
Some((
|
||||
@@ -1049,7 +1057,7 @@ impl BankingStage {
|
||||
let (transactions, transaction_to_packet_indexes) = Self::transactions_from_packets(
|
||||
msgs,
|
||||
&packet_indexes,
|
||||
bank.secp256k1_program_enabled(),
|
||||
bank.libsecp256k1_0_5_upgrade_enabled(),
|
||||
);
|
||||
packet_conversion_time.stop();
|
||||
|
||||
@@ -1120,7 +1128,7 @@ impl BankingStage {
|
||||
let (transactions, transaction_to_packet_indexes) = Self::transactions_from_packets(
|
||||
msgs,
|
||||
&transaction_indexes,
|
||||
bank.secp256k1_program_enabled(),
|
||||
bank.libsecp256k1_0_5_upgrade_enabled(),
|
||||
);
|
||||
|
||||
let tx_count = transaction_to_packet_indexes.len();
|
||||
@@ -1384,29 +1392,65 @@ fn next_leader_tpu_forwards(
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_test_recorder(
|
||||
bank: &Arc<Bank>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
poh_config: Option<PohConfig>,
|
||||
) -> (
|
||||
Arc<AtomicBool>,
|
||||
Arc<Mutex<PohRecorder>>,
|
||||
PohService,
|
||||
Receiver<WorkingBankEntry>,
|
||||
) {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let poh_config = Arc::new(poh_config.unwrap_or_default());
|
||||
let (mut poh_recorder, entry_receiver, record_receiver) = PohRecorder::new(
|
||||
bank.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
bank.slot(),
|
||||
Some((4, 4)),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
blockstore,
|
||||
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||
&poh_config,
|
||||
exit.clone(),
|
||||
);
|
||||
poh_recorder.set_bank(&bank);
|
||||
|
||||
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
||||
let poh_service = PohService::new(
|
||||
poh_recorder.clone(),
|
||||
&poh_config,
|
||||
&exit,
|
||||
bank.ticks_per_slot(),
|
||||
poh_service::DEFAULT_PINNED_CPU_CORE,
|
||||
poh_service::DEFAULT_HASHES_PER_BATCH,
|
||||
record_receiver,
|
||||
);
|
||||
|
||||
(exit, poh_recorder, poh_service, entry_receiver)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{
|
||||
cluster_info::Node, poh_recorder::Record, poh_recorder::WorkingBank,
|
||||
transaction_status_service::TransactionStatusService,
|
||||
};
|
||||
use crossbeam_channel::unbounded;
|
||||
use itertools::Itertools;
|
||||
use solana_gossip::cluster_info::Node;
|
||||
use solana_ledger::{
|
||||
blockstore::{entries_to_test_shreds, Blockstore},
|
||||
blockstore::entries_to_test_shreds,
|
||||
entry::{next_entry, Entry, EntrySlice},
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
get_tmp_ledger_path,
|
||||
leader_schedule_cache::LeaderScheduleCache,
|
||||
};
|
||||
use solana_perf::packet::to_packets_chunked;
|
||||
use solana_poh::{
|
||||
poh_recorder::{create_test_recorder, Record, WorkingBank, WorkingBankEntry},
|
||||
poh_service::PohService,
|
||||
};
|
||||
use solana_rpc::transaction_status_service::TransactionStatusService;
|
||||
use solana_sdk::{
|
||||
hash::Hash,
|
||||
instruction::InstructionError,
|
||||
poh_config::PohConfig,
|
||||
signature::{Keypair, Signer},
|
||||
system_instruction::SystemError,
|
||||
system_transaction,
|
||||
@@ -1416,10 +1460,7 @@ mod tests {
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
path::Path,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
mpsc::Receiver,
|
||||
},
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
thread::sleep,
|
||||
};
|
||||
|
||||
@@ -1507,7 +1548,7 @@ mod tests {
|
||||
.collect();
|
||||
trace!("done");
|
||||
assert_eq!(entries.len(), genesis_config.ticks_per_slot as usize);
|
||||
assert!(entries.verify(&start_hash));
|
||||
assert_eq!(entries.verify(&start_hash), true);
|
||||
assert_eq!(entries[entries.len() - 1].hash, bank.last_blockhash());
|
||||
banking_stage.join().unwrap();
|
||||
}
|
||||
@@ -1616,7 +1657,7 @@ mod tests {
|
||||
.map(|(_bank, (entry, _tick_height))| entry)
|
||||
.collect();
|
||||
|
||||
assert!(entries.verify(&blockhash));
|
||||
assert_eq!(entries.verify(&blockhash), true);
|
||||
if !entries.is_empty() {
|
||||
blockhash = entries.last().unwrap().hash;
|
||||
for entry in entries {
|
||||
@@ -2084,7 +2125,7 @@ mod tests {
|
||||
}
|
||||
trace!("done ticking");
|
||||
|
||||
assert!(done);
|
||||
assert_eq!(done, true);
|
||||
|
||||
let transactions = vec![system_transaction::transfer(
|
||||
&mint_keypair,
|
||||
|
@@ -1,13 +1,11 @@
|
||||
use {
|
||||
crate::{bigtable_upload, blockstore::Blockstore},
|
||||
solana_runtime::commitment::BlockCommitmentCache,
|
||||
std::{
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
sync::{Arc, RwLock},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
},
|
||||
tokio::runtime::Runtime,
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_runtime::commitment::BlockCommitmentCache;
|
||||
use std::{
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
sync::{Arc, RwLock},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
};
|
||||
use tokio::runtime::Runtime;
|
||||
|
||||
// Delay uploading the largest confirmed root for this many slots. This is done in an attempt to
|
||||
// ensure that the `CacheBlockMetaService` has had enough time to add the block time for the root
|
||||
@@ -70,7 +68,7 @@ impl BigTableUploadService {
|
||||
continue;
|
||||
}
|
||||
|
||||
let result = runtime.block_on(bigtable_upload::upload_confirmed_blocks(
|
||||
let result = runtime.block_on(solana_ledger::bigtable_upload::upload_confirmed_blocks(
|
||||
blockstore.clone(),
|
||||
bigtable_ledger_storage.clone(),
|
||||
start_slot,
|
@@ -1,30 +1,29 @@
|
||||
//! A stage to broadcast data from a leader node to validators
|
||||
#![allow(clippy::rc_buffer)]
|
||||
use self::{
|
||||
broadcast_duplicates_run::BroadcastDuplicatesRun,
|
||||
broadcast_fake_shreds_run::BroadcastFakeShredsRun, broadcast_metrics::*,
|
||||
fail_entry_verification_broadcast_run::FailEntryVerificationBroadcastRun,
|
||||
standard_broadcast_run::StandardBroadcastRun,
|
||||
};
|
||||
use crate::result::{Error, Result};
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
|
||||
use crate::weighted_shuffle::weighted_best;
|
||||
use crate::{
|
||||
cluster_info::{ClusterInfo, ClusterInfoError},
|
||||
poh_recorder::WorkingBankEntry,
|
||||
result::{Error, Result},
|
||||
};
|
||||
use crossbeam_channel::{
|
||||
Receiver as CrossbeamReceiver, RecvTimeoutError as CrossbeamRecvTimeoutError,
|
||||
Sender as CrossbeamSender,
|
||||
};
|
||||
use solana_gossip::{
|
||||
cluster_info::{self, ClusterInfo, ClusterInfoError},
|
||||
contact_info::ContactInfo,
|
||||
crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS,
|
||||
weighted_shuffle::weighted_best,
|
||||
};
|
||||
use solana_ledger::{blockstore::Blockstore, shred::Shred};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::{inc_new_counter_error, inc_new_counter_info};
|
||||
use solana_poh::poh_recorder::WorkingBankEntry;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::timing::timestamp;
|
||||
use solana_sdk::{clock::Slot, pubkey::Pubkey};
|
||||
use solana_streamer::sendmmsg::send_mmsg;
|
||||
use solana_streamer::{sendmmsg::send_mmsg, socket::is_global};
|
||||
use std::sync::atomic::AtomicU64;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
@@ -36,7 +35,6 @@ use std::{
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
mod broadcast_duplicates_run;
|
||||
mod broadcast_fake_shreds_run;
|
||||
pub mod broadcast_metrics;
|
||||
pub(crate) mod broadcast_utils;
|
||||
@@ -54,20 +52,11 @@ pub enum BroadcastStageReturnType {
|
||||
ChannelDisconnected,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Clone, Debug)]
|
||||
pub struct BroadcastDuplicatesConfig {
|
||||
/// Percentage of stake to send different version of slots to
|
||||
pub stake_partition: u8,
|
||||
/// Number of slots to wait before sending duplicate shreds
|
||||
pub duplicate_send_delay: usize,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Clone, Debug)]
|
||||
pub enum BroadcastStageType {
|
||||
Standard,
|
||||
FailEntryVerification,
|
||||
BroadcastFakeShreds,
|
||||
BroadcastDuplicates(BroadcastDuplicatesConfig),
|
||||
}
|
||||
|
||||
impl BroadcastStageType {
|
||||
@@ -112,16 +101,6 @@ impl BroadcastStageType {
|
||||
blockstore,
|
||||
BroadcastFakeShredsRun::new(keypair, 0, shred_version),
|
||||
),
|
||||
|
||||
BroadcastStageType::BroadcastDuplicates(config) => BroadcastStage::new(
|
||||
sock,
|
||||
cluster_info,
|
||||
receiver,
|
||||
retransmit_slots_receiver,
|
||||
exit_sender,
|
||||
blockstore,
|
||||
BroadcastDuplicatesRun::new(keypair, shred_version, config.clone()),
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -384,6 +363,7 @@ pub fn get_broadcast_peers(
|
||||
cluster_info: &ClusterInfo,
|
||||
stakes: Option<&HashMap<Pubkey, u64>>,
|
||||
) -> (Vec<ContactInfo>, Vec<(u64, usize)>) {
|
||||
use crate::cluster_info;
|
||||
let mut peers = cluster_info.tvu_peers();
|
||||
let peers_and_stakes = cluster_info::stake_weight_peers(&mut peers, stakes);
|
||||
(peers, peers_and_stakes)
|
||||
@@ -407,10 +387,15 @@ pub fn broadcast_shreds(
|
||||
let mut shred_select = Measure::start("shred_select");
|
||||
let packets: Vec<_> = shreds
|
||||
.iter()
|
||||
.map(|shred| {
|
||||
.filter_map(|shred| {
|
||||
let broadcast_index = weighted_best(&peers_and_stakes, shred.seed());
|
||||
let node = &peers[broadcast_index];
|
||||
|
||||
(&shred.payload, &peers[broadcast_index].tvu)
|
||||
if is_global(&node.tvu) {
|
||||
Some((&shred.payload, &node.tvu))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
shred_select.stop();
|
||||
@@ -460,8 +445,8 @@ fn num_live_peers(peers: &[ContactInfo]) -> i64 {
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
use super::*;
|
||||
use crate::cluster_info::{ClusterInfo, Node};
|
||||
use crossbeam_channel::unbounded;
|
||||
use solana_gossip::cluster_info::{ClusterInfo, Node};
|
||||
use solana_ledger::{
|
||||
blockstore::{make_slot_entries, Blockstore},
|
||||
entry::create_ticks,
|
||||
|
@@ -1,333 +0,0 @@
|
||||
use super::broadcast_utils::ReceiveResults;
|
||||
use super::*;
|
||||
use log::*;
|
||||
use solana_ledger::entry::{create_ticks, Entry, EntrySlice};
|
||||
use solana_ledger::shred::Shredder;
|
||||
use solana_runtime::blockhash_queue::BlockhashQueue;
|
||||
use solana_sdk::clock::Slot;
|
||||
use solana_sdk::fee_calculator::FeeCalculator;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_sdk::transaction::Transaction;
|
||||
use std::collections::VecDeque;
|
||||
use std::sync::Mutex;
|
||||
|
||||
// Queue which facilitates delivering shreds with a delay
|
||||
type DelayedQueue = VecDeque<(Option<Pubkey>, Option<Vec<Shred>>)>;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(super) struct BroadcastDuplicatesRun {
|
||||
config: BroadcastDuplicatesConfig,
|
||||
// Local queue for broadcast to track which duplicate blockhashes we've sent
|
||||
duplicate_queue: BlockhashQueue,
|
||||
// Shared queue between broadcast and transmit threads
|
||||
delayed_queue: Arc<Mutex<DelayedQueue>>,
|
||||
// Buffer for duplicate entries
|
||||
duplicate_entries_buffer: Vec<Entry>,
|
||||
last_duplicate_entry_hash: Hash,
|
||||
last_broadcast_slot: Slot,
|
||||
next_shred_index: u32,
|
||||
shred_version: u16,
|
||||
keypair: Arc<Keypair>,
|
||||
}
|
||||
|
||||
impl BroadcastDuplicatesRun {
|
||||
pub(super) fn new(
|
||||
keypair: Arc<Keypair>,
|
||||
shred_version: u16,
|
||||
config: BroadcastDuplicatesConfig,
|
||||
) -> Self {
|
||||
let mut delayed_queue = DelayedQueue::new();
|
||||
delayed_queue.resize(config.duplicate_send_delay, (None, None));
|
||||
Self {
|
||||
config,
|
||||
delayed_queue: Arc::new(Mutex::new(delayed_queue)),
|
||||
duplicate_queue: BlockhashQueue::default(),
|
||||
duplicate_entries_buffer: vec![],
|
||||
next_shred_index: u32::MAX,
|
||||
last_broadcast_slot: 0,
|
||||
last_duplicate_entry_hash: Hash::default(),
|
||||
shred_version,
|
||||
keypair,
|
||||
}
|
||||
}
|
||||
|
||||
fn queue_or_create_duplicate_entries(
|
||||
&mut self,
|
||||
bank: &Arc<Bank>,
|
||||
receive_results: &ReceiveResults,
|
||||
) -> (Vec<Entry>, u32) {
|
||||
// If the last entry hash is default, grab the last blockhash from the parent bank
|
||||
if self.last_duplicate_entry_hash == Hash::default() {
|
||||
self.last_duplicate_entry_hash = bank.last_blockhash();
|
||||
}
|
||||
|
||||
// Create duplicate entries by..
|
||||
// 1) rearranging real entries so that all transaction entries are moved to
|
||||
// the front and tick entries are moved to the back.
|
||||
// 2) setting all transaction entries to zero hashes and all tick entries to `hashes_per_tick`.
|
||||
// 3) removing any transactions which reference blockhashes which aren't in the
|
||||
// duplicate blockhash queue.
|
||||
let (duplicate_entries, next_shred_index) = if bank.slot() > MINIMUM_DUPLICATE_SLOT {
|
||||
let mut tx_entries: Vec<Entry> = receive_results
|
||||
.entries
|
||||
.iter()
|
||||
.filter_map(|entry| {
|
||||
if entry.is_tick() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let transactions: Vec<Transaction> = entry
|
||||
.transactions
|
||||
.iter()
|
||||
.filter(|tx| {
|
||||
self.duplicate_queue
|
||||
.get_hash_age(&tx.message.recent_blockhash)
|
||||
.is_some()
|
||||
})
|
||||
.cloned()
|
||||
.collect();
|
||||
if !transactions.is_empty() {
|
||||
Some(Entry::new_mut(
|
||||
&mut self.last_duplicate_entry_hash,
|
||||
&mut 0,
|
||||
transactions,
|
||||
))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
let mut tick_entries = create_ticks(
|
||||
receive_results.entries.tick_count(),
|
||||
bank.hashes_per_tick().unwrap_or_default(),
|
||||
self.last_duplicate_entry_hash,
|
||||
);
|
||||
self.duplicate_entries_buffer.append(&mut tx_entries);
|
||||
self.duplicate_entries_buffer.append(&mut tick_entries);
|
||||
|
||||
// Only send out duplicate entries when the block is finished otherwise the
|
||||
// recipient will start repairing for shreds they haven't received yet and
|
||||
// hit duplicate slot issues before we want them to.
|
||||
let entries = if receive_results.last_tick_height == bank.max_tick_height() {
|
||||
self.duplicate_entries_buffer.drain(..).collect()
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
|
||||
// Set next shred index to 0 since we are sending the full slot
|
||||
(entries, 0)
|
||||
} else {
|
||||
// Send real entries until we hit min duplicate slot
|
||||
(receive_results.entries.clone(), self.next_shred_index)
|
||||
};
|
||||
|
||||
// Save last duplicate entry hash to avoid invalid entry hash errors
|
||||
if let Some(last_duplicate_entry) = duplicate_entries.last() {
|
||||
self.last_duplicate_entry_hash = last_duplicate_entry.hash;
|
||||
}
|
||||
|
||||
(duplicate_entries, next_shred_index)
|
||||
}
|
||||
}
|
||||
|
||||
/// Duplicate slots should only be sent once all validators have started.
|
||||
/// This constant is intended to be used as a buffer so that all validators
|
||||
/// are live before sending duplicate slots.
|
||||
pub const MINIMUM_DUPLICATE_SLOT: Slot = 20;
|
||||
|
||||
impl BroadcastRun for BroadcastDuplicatesRun {
|
||||
fn run(
|
||||
&mut self,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
receiver: &Receiver<WorkingBankEntry>,
|
||||
socket_sender: &Sender<(TransmitShreds, Option<BroadcastShredBatchInfo>)>,
|
||||
blockstore_sender: &Sender<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>,
|
||||
) -> Result<()> {
|
||||
// 1) Pull entries from banking stage
|
||||
let receive_results = broadcast_utils::recv_slot_entries(receiver)?;
|
||||
let bank = receive_results.bank.clone();
|
||||
let last_tick_height = receive_results.last_tick_height;
|
||||
|
||||
if self.next_shred_index == u32::MAX {
|
||||
self.next_shred_index = blockstore
|
||||
.meta(bank.slot())
|
||||
.expect("Database error")
|
||||
.map(|meta| meta.consumed)
|
||||
.unwrap_or(0) as u32
|
||||
}
|
||||
|
||||
// We were not the leader, but just became leader again
|
||||
if bank.slot() > self.last_broadcast_slot + 1 {
|
||||
self.last_duplicate_entry_hash = Hash::default();
|
||||
}
|
||||
self.last_broadcast_slot = bank.slot();
|
||||
|
||||
let shredder = Shredder::new(
|
||||
bank.slot(),
|
||||
bank.parent().unwrap().slot(),
|
||||
self.keypair.clone(),
|
||||
(bank.tick_height() % bank.ticks_per_slot()) as u8,
|
||||
self.shred_version,
|
||||
)
|
||||
.expect("Expected to create a new shredder");
|
||||
|
||||
let (data_shreds, coding_shreds, last_shred_index) = shredder.entries_to_shreds(
|
||||
&receive_results.entries,
|
||||
last_tick_height == bank.max_tick_height(),
|
||||
self.next_shred_index,
|
||||
);
|
||||
|
||||
let (duplicate_entries, next_duplicate_shred_index) =
|
||||
self.queue_or_create_duplicate_entries(&bank, &receive_results);
|
||||
let (duplicate_data_shreds, duplicate_coding_shreds, _) = if !duplicate_entries.is_empty() {
|
||||
shredder.entries_to_shreds(
|
||||
&duplicate_entries,
|
||||
last_tick_height == bank.max_tick_height(),
|
||||
next_duplicate_shred_index,
|
||||
)
|
||||
} else {
|
||||
(vec![], vec![], 0)
|
||||
};
|
||||
|
||||
// Manually track the shred index because relying on slot meta consumed is racy
|
||||
if last_tick_height == bank.max_tick_height() {
|
||||
self.next_shred_index = 0;
|
||||
self.duplicate_queue
|
||||
.register_hash(&self.last_duplicate_entry_hash, &FeeCalculator::default());
|
||||
} else {
|
||||
self.next_shred_index = last_shred_index;
|
||||
}
|
||||
|
||||
// Partition network with duplicate and real shreds based on stake
|
||||
let bank_epoch = bank.get_leader_schedule_epoch(bank.slot());
|
||||
let mut duplicate_recipients = HashMap::new();
|
||||
let mut real_recipients = HashMap::new();
|
||||
|
||||
let mut stakes: Vec<(Pubkey, u64)> = bank
|
||||
.epoch_staked_nodes(bank_epoch)
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.filter(|(pubkey, _)| *pubkey != self.keypair.pubkey())
|
||||
.collect();
|
||||
stakes.sort_by(|(l_key, l_stake), (r_key, r_stake)| {
|
||||
if r_stake == l_stake {
|
||||
l_key.cmp(&r_key)
|
||||
} else {
|
||||
r_stake.cmp(&l_stake)
|
||||
}
|
||||
});
|
||||
|
||||
let highest_staked_node = stakes.first().cloned().map(|x| x.0);
|
||||
let stake_total: u64 = stakes.iter().map(|(_, stake)| *stake).sum();
|
||||
let mut cumulative_stake: u64 = 0;
|
||||
for (pubkey, stake) in stakes.into_iter().rev() {
|
||||
cumulative_stake += stake;
|
||||
if (100 * cumulative_stake / stake_total) as u8 <= self.config.stake_partition {
|
||||
duplicate_recipients.insert(pubkey, stake);
|
||||
} else {
|
||||
real_recipients.insert(pubkey, stake);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(highest_staked_node) = highest_staked_node {
|
||||
if bank.slot() > MINIMUM_DUPLICATE_SLOT && last_tick_height == bank.max_tick_height() {
|
||||
warn!(
|
||||
"{} sent duplicate slot {} to nodes: {:?}",
|
||||
self.keypair.pubkey(),
|
||||
bank.slot(),
|
||||
&duplicate_recipients,
|
||||
);
|
||||
warn!(
|
||||
"Duplicate shreds for slot {} will be broadcast in {} slot(s)",
|
||||
bank.slot(),
|
||||
self.config.duplicate_send_delay
|
||||
);
|
||||
|
||||
let delayed_shreds: Option<Vec<Shred>> = vec![
|
||||
duplicate_data_shreds.last().cloned(),
|
||||
data_shreds.last().cloned(),
|
||||
]
|
||||
.into_iter()
|
||||
.collect();
|
||||
self.delayed_queue
|
||||
.lock()
|
||||
.unwrap()
|
||||
.push_back((Some(highest_staked_node), delayed_shreds));
|
||||
}
|
||||
}
|
||||
|
||||
let duplicate_recipients = Arc::new(duplicate_recipients);
|
||||
let real_recipients = Arc::new(real_recipients);
|
||||
|
||||
let data_shreds = Arc::new(data_shreds);
|
||||
blockstore_sender.send((data_shreds.clone(), None))?;
|
||||
|
||||
// 3) Start broadcast step
|
||||
socket_sender.send((
|
||||
(
|
||||
Some(duplicate_recipients.clone()),
|
||||
Arc::new(duplicate_data_shreds),
|
||||
),
|
||||
None,
|
||||
))?;
|
||||
socket_sender.send((
|
||||
(
|
||||
Some(duplicate_recipients),
|
||||
Arc::new(duplicate_coding_shreds),
|
||||
),
|
||||
None,
|
||||
))?;
|
||||
socket_sender.send(((Some(real_recipients.clone()), data_shreds), None))?;
|
||||
socket_sender.send(((Some(real_recipients), Arc::new(coding_shreds)), None))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
fn transmit(
|
||||
&mut self,
|
||||
receiver: &Arc<Mutex<TransmitReceiver>>,
|
||||
cluster_info: &ClusterInfo,
|
||||
sock: &UdpSocket,
|
||||
) -> Result<()> {
|
||||
// Check the delay queue for shreds that are ready to be sent
|
||||
let (delayed_recipient, delayed_shreds) = {
|
||||
let mut delayed_deque = self.delayed_queue.lock().unwrap();
|
||||
if delayed_deque.len() > self.config.duplicate_send_delay {
|
||||
delayed_deque.pop_front().unwrap()
|
||||
} else {
|
||||
(None, None)
|
||||
}
|
||||
};
|
||||
|
||||
let ((stakes, shreds), _) = receiver.lock().unwrap().recv()?;
|
||||
let stakes = stakes.unwrap();
|
||||
for peer in cluster_info.tvu_peers() {
|
||||
// Forward shreds to circumvent gossip
|
||||
if stakes.get(&peer.id).is_some() {
|
||||
shreds.iter().for_each(|shred| {
|
||||
sock.send_to(&shred.payload, &peer.tvu_forwards).unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
// After a delay, broadcast duplicate shreds to a single node
|
||||
if let Some(shreds) = delayed_shreds.as_ref() {
|
||||
if Some(peer.id) == delayed_recipient {
|
||||
shreds.iter().for_each(|shred| {
|
||||
sock.send_to(&shred.payload, &peer.tvu).unwrap();
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
fn record(
|
||||
&mut self,
|
||||
receiver: &Arc<Mutex<RecordReceiver>>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
) -> Result<()> {
|
||||
let (data_shreds, _) = receiver.lock().unwrap().recv()?;
|
||||
blockstore.insert_shreds(data_shreds.to_vec(), None, true)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
@@ -138,7 +138,7 @@ impl BroadcastRun for BroadcastFakeShredsRun {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_gossip::contact_info::ContactInfo;
|
||||
use crate::contact_info::ContactInfo;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
|
||||
#[test]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
use crate::poh_recorder::WorkingBankEntry;
|
||||
use crate::result::Result;
|
||||
use solana_ledger::{entry::Entry, shred::Shred};
|
||||
use solana_poh::poh_recorder::WorkingBankEntry;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::clock::Slot;
|
||||
use std::{
|
||||
|
@@ -494,7 +494,7 @@ impl BroadcastRun for StandardBroadcastRun {
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_gossip::cluster_info::{ClusterInfo, Node};
|
||||
use crate::cluster_info::{ClusterInfo, Node};
|
||||
use solana_ledger::genesis_utils::create_genesis_config;
|
||||
use solana_ledger::{
|
||||
blockstore::Blockstore, entry::create_ticks, get_tmp_ledger_path,
|
||||
|
@@ -12,78 +12,77 @@
|
||||
//! * layer 2 - Everyone else, if layer 1 is `2^10`, layer 2 should be able to fit `2^20` number of nodes.
|
||||
//!
|
||||
//! Bank needs to provide an interface for us to query the stake weight
|
||||
use {
|
||||
crate::{
|
||||
cluster_info_metrics::{submit_gossip_stats, Counter, GossipStats, ScopedTimer},
|
||||
contact_info::ContactInfo,
|
||||
crds::Cursor,
|
||||
crds_gossip::CrdsGossip,
|
||||
crds_gossip_error::CrdsGossipError,
|
||||
crds_gossip_pull::{CrdsFilter, ProcessPullStats, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS},
|
||||
crds_value::{
|
||||
self, CrdsData, CrdsValue, CrdsValueLabel, EpochSlotsIndex, LowestSlot, NodeInstance,
|
||||
SnapshotHash, Version, Vote, MAX_WALLCLOCK,
|
||||
},
|
||||
data_budget::DataBudget,
|
||||
epoch_slots::EpochSlots,
|
||||
gossip_error::GossipError,
|
||||
ping_pong::{self, PingCache, Pong},
|
||||
socketaddr, socketaddr_any,
|
||||
weighted_shuffle::weighted_shuffle,
|
||||
use crate::{
|
||||
cluster_info_metrics::{submit_gossip_stats, Counter, GossipStats, ScopedTimer},
|
||||
contact_info::ContactInfo,
|
||||
crds::Cursor,
|
||||
crds_gossip::CrdsGossip,
|
||||
crds_gossip_error::CrdsGossipError,
|
||||
crds_gossip_pull::{CrdsFilter, ProcessPullStats, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS},
|
||||
crds_value::{
|
||||
self, CrdsData, CrdsValue, CrdsValueLabel, EpochSlotsIndex, LowestSlot, NodeInstance,
|
||||
SnapshotHash, Version, Vote, MAX_WALLCLOCK,
|
||||
},
|
||||
bincode::{serialize, serialized_size},
|
||||
itertools::Itertools,
|
||||
rand::{seq::SliceRandom, thread_rng, CryptoRng, Rng},
|
||||
rayon::{prelude::*, ThreadPool, ThreadPoolBuilder},
|
||||
serde::ser::Serialize,
|
||||
solana_ledger::shred::Shred,
|
||||
solana_measure::measure::Measure,
|
||||
solana_metrics::{inc_new_counter_debug, inc_new_counter_error},
|
||||
solana_net_utils::{
|
||||
bind_common, bind_common_in_range, bind_in_range, find_available_port_in_range,
|
||||
multi_bind_in_range, PortRange,
|
||||
},
|
||||
solana_perf::packet::{
|
||||
limited_deserialize, to_packets_with_destination, Packet, Packets, PacketsRecycler,
|
||||
PACKET_DATA_SIZE,
|
||||
},
|
||||
solana_rayon_threadlimit::get_thread_count,
|
||||
solana_runtime::bank_forks::BankForks,
|
||||
solana_sdk::{
|
||||
clock::{Slot, DEFAULT_MS_PER_SLOT, DEFAULT_SLOTS_PER_EPOCH},
|
||||
feature_set::{self, FeatureSet},
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
sanitize::{Sanitize, SanitizeError},
|
||||
signature::{Keypair, Signable, Signature, Signer},
|
||||
timing::timestamp,
|
||||
transaction::Transaction,
|
||||
},
|
||||
solana_streamer::{
|
||||
packet,
|
||||
sendmmsg::multicast,
|
||||
streamer::{PacketReceiver, PacketSender},
|
||||
},
|
||||
solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY,
|
||||
std::{
|
||||
borrow::Cow,
|
||||
collections::{hash_map::Entry, HashMap, HashSet, VecDeque},
|
||||
fmt::Debug,
|
||||
fs::{self, File},
|
||||
io::BufReader,
|
||||
iter::repeat,
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener, UdpSocket},
|
||||
ops::{Deref, DerefMut, Div},
|
||||
path::{Path, PathBuf},
|
||||
result::Result,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
mpsc::{Receiver, RecvTimeoutError, Sender},
|
||||
{Arc, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard},
|
||||
},
|
||||
thread::{sleep, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
data_budget::DataBudget,
|
||||
epoch_slots::EpochSlots,
|
||||
ping_pong::{self, PingCache, Pong},
|
||||
result::{Error, Result},
|
||||
weighted_shuffle::weighted_shuffle,
|
||||
};
|
||||
use rand::{seq::SliceRandom, CryptoRng, Rng};
|
||||
use solana_ledger::shred::Shred;
|
||||
use solana_sdk::sanitize::{Sanitize, SanitizeError};
|
||||
|
||||
use bincode::{serialize, serialized_size};
|
||||
use itertools::Itertools;
|
||||
use rand::thread_rng;
|
||||
use rayon::prelude::*;
|
||||
use rayon::{ThreadPool, ThreadPoolBuilder};
|
||||
use serde::ser::Serialize;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_measure::thread_mem_usage;
|
||||
use solana_metrics::{inc_new_counter_debug, inc_new_counter_error};
|
||||
use solana_net_utils::{
|
||||
bind_common, bind_common_in_range, bind_in_range, find_available_port_in_range,
|
||||
multi_bind_in_range, PortRange,
|
||||
};
|
||||
use solana_perf::packet::{
|
||||
limited_deserialize, to_packets_with_destination, Packet, Packets, PacketsRecycler,
|
||||
PACKET_DATA_SIZE,
|
||||
};
|
||||
use solana_rayon_threadlimit::get_thread_count;
|
||||
use solana_runtime::bank_forks::BankForks;
|
||||
use solana_sdk::{
|
||||
clock::{Slot, DEFAULT_MS_PER_SLOT, DEFAULT_SLOTS_PER_EPOCH},
|
||||
feature_set::{self, FeatureSet},
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signable, Signature, Signer},
|
||||
timing::timestamp,
|
||||
transaction::Transaction,
|
||||
};
|
||||
use solana_streamer::{
|
||||
sendmmsg::multicast,
|
||||
socket::is_global,
|
||||
streamer::{PacketReceiver, PacketSender},
|
||||
};
|
||||
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
collections::{hash_map::Entry, HashMap, HashSet, VecDeque},
|
||||
fmt::Debug,
|
||||
fs::{self, File},
|
||||
io::BufReader,
|
||||
iter::repeat,
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener, UdpSocket},
|
||||
ops::{Deref, DerefMut, Div},
|
||||
path::{Path, PathBuf},
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
{Arc, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard},
|
||||
},
|
||||
thread::{sleep, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
pub const VALIDATOR_PORT_RANGE: PortRange = (8000, 10_000);
|
||||
@@ -213,7 +212,7 @@ pub struct ClusterInfo {
|
||||
/// The network
|
||||
pub gossip: RwLock<CrdsGossip>,
|
||||
/// set the keypair that will be used to sign crds values generated. It is unset only in tests.
|
||||
pub keypair: Arc<Keypair>,
|
||||
pub(crate) keypair: Arc<Keypair>,
|
||||
/// Network entrypoints
|
||||
entrypoints: RwLock<Vec<ContactInfo>>,
|
||||
outbound_budget: DataBudget,
|
||||
@@ -236,7 +235,7 @@ impl Default for ClusterInfo {
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, AbiExample)]
|
||||
pub(crate) struct PruneData {
|
||||
struct PruneData {
|
||||
/// Pubkey of the node that sent this prune data
|
||||
pubkey: Pubkey,
|
||||
/// Pubkeys of nodes that should be pruned
|
||||
@@ -271,7 +270,7 @@ impl PruneData {
|
||||
}
|
||||
|
||||
impl Sanitize for PruneData {
|
||||
fn sanitize(&self) -> Result<(), SanitizeError> {
|
||||
fn sanitize(&self) -> std::result::Result<(), SanitizeError> {
|
||||
if self.wallclock >= MAX_WALLCLOCK {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
@@ -327,10 +326,10 @@ pub fn make_accounts_hashes_message(
|
||||
pub(crate) type Ping = ping_pong::Ping<[u8; GOSSIP_PING_TOKEN_SIZE]>;
|
||||
|
||||
// TODO These messages should go through the gpu pipeline for spam filtering
|
||||
#[frozen_abi(digest = "GANv3KVkTYF84kmg1bAuWEZd9MaiYzPquuu13hup3379")]
|
||||
#[frozen_abi(digest = "CH5BWuhAyvUiUQYgu2Lcwu7eoiW6bQitvtLS1yFsdmrE")]
|
||||
#[derive(Serialize, Deserialize, Debug, AbiEnumVisitor, AbiExample)]
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
pub(crate) enum Protocol {
|
||||
enum Protocol {
|
||||
/// Gossip protocol messages
|
||||
PullRequest(CrdsFilter, CrdsValue),
|
||||
PullResponse(Pubkey, Vec<CrdsValue>),
|
||||
@@ -412,7 +411,7 @@ impl Protocol {
|
||||
}
|
||||
|
||||
impl Sanitize for Protocol {
|
||||
fn sanitize(&self) -> Result<(), SanitizeError> {
|
||||
fn sanitize(&self) -> std::result::Result<(), SanitizeError> {
|
||||
match self {
|
||||
Protocol::PullRequest(filter, val) => {
|
||||
filter.sanitize()?;
|
||||
@@ -898,26 +897,29 @@ impl ClusterInfo {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: If two threads call into this function then epoch_slot_index has a
|
||||
// race condition and the threads will overwrite each other in crds table.
|
||||
pub fn push_epoch_slots(&self, mut update: &[Slot]) {
|
||||
let current_slots: Vec<_> = {
|
||||
let gossip =
|
||||
self.time_gossip_read_lock("lookup_epoch_slots", &self.stats.epoch_slots_lookup);
|
||||
(0..crds_value::MAX_EPOCH_SLOTS)
|
||||
.filter_map(|ix| {
|
||||
let label = CrdsValueLabel::EpochSlots(ix, self.id());
|
||||
let epoch_slots = gossip.crds.get(&label)?.value.epoch_slots()?;
|
||||
let first_slot = epoch_slots.first_slot()?;
|
||||
Some((epoch_slots.wallclock, first_slot, ix))
|
||||
})
|
||||
.collect()
|
||||
};
|
||||
pub fn push_epoch_slots(&self, update: &[Slot]) {
|
||||
let mut num = 0;
|
||||
let mut current_slots: Vec<_> = (0..crds_value::MAX_EPOCH_SLOTS)
|
||||
.filter_map(|ix| {
|
||||
Some((
|
||||
self.time_gossip_read_lock(
|
||||
"lookup_epoch_slots",
|
||||
&self.stats.epoch_slots_lookup,
|
||||
)
|
||||
.crds
|
||||
.get(&CrdsValueLabel::EpochSlots(ix, self.id()))
|
||||
.and_then(|v| v.value.epoch_slots())
|
||||
.and_then(|x| Some((x.wallclock, x.first_slot()?)))?,
|
||||
ix,
|
||||
))
|
||||
})
|
||||
.collect();
|
||||
current_slots.sort_unstable();
|
||||
let min_slot: Slot = current_slots
|
||||
.iter()
|
||||
.map(|(_wallclock, slot, _index)| *slot)
|
||||
.map(|((_, s), _)| *s)
|
||||
.min()
|
||||
.unwrap_or_default();
|
||||
.unwrap_or(0);
|
||||
let max_slot: Slot = update.iter().max().cloned().unwrap_or(0);
|
||||
let total_slots = max_slot as isize - min_slot as isize;
|
||||
// WARN if CRDS is not storing at least a full epoch worth of slots
|
||||
@@ -932,32 +934,29 @@ impl ClusterInfo {
|
||||
);
|
||||
}
|
||||
let mut reset = false;
|
||||
let mut epoch_slot_index = match current_slots.iter().max() {
|
||||
Some((_wallclock, _slot, index)) => *index,
|
||||
None => 0,
|
||||
};
|
||||
let self_pubkey = self.id();
|
||||
let mut entries = Vec::default();
|
||||
while !update.is_empty() {
|
||||
let mut epoch_slot_index = current_slots.last().map(|(_, x)| *x).unwrap_or(0);
|
||||
while num < update.len() {
|
||||
let ix = (epoch_slot_index % crds_value::MAX_EPOCH_SLOTS) as u8;
|
||||
let now = timestamp();
|
||||
let mut slots = if !reset {
|
||||
self.lookup_epoch_slots(ix)
|
||||
} else {
|
||||
EpochSlots::new(self_pubkey, now)
|
||||
EpochSlots::new(self.id(), now)
|
||||
};
|
||||
let n = slots.fill(update, now);
|
||||
update = &update[n..];
|
||||
let n = slots.fill(&update[num..], now);
|
||||
if n > 0 {
|
||||
let epoch_slots = CrdsData::EpochSlots(ix, slots);
|
||||
let entry = CrdsValue::new_signed(epoch_slots, &self.keypair);
|
||||
entries.push(entry);
|
||||
let entry = CrdsValue::new_signed(CrdsData::EpochSlots(ix, slots), &self.keypair);
|
||||
self.local_message_pending_push_queue
|
||||
.lock()
|
||||
.unwrap()
|
||||
.push(entry);
|
||||
}
|
||||
num += n;
|
||||
if num < update.len() {
|
||||
epoch_slot_index += 1;
|
||||
reset = true;
|
||||
}
|
||||
epoch_slot_index += 1;
|
||||
reset = true;
|
||||
}
|
||||
let mut gossip = self.gossip.write().unwrap();
|
||||
gossip.process_push_message(&self_pubkey, entries, timestamp());
|
||||
}
|
||||
|
||||
fn time_gossip_read_lock<'a>(
|
||||
@@ -976,7 +975,7 @@ impl ClusterInfo {
|
||||
GossipWriteLock::new(self.gossip.write().unwrap(), label, counter)
|
||||
}
|
||||
|
||||
pub fn push_message(&self, message: CrdsValue) {
|
||||
pub(crate) fn push_message(&self, message: CrdsValue) {
|
||||
self.local_message_pending_push_queue
|
||||
.lock()
|
||||
.unwrap()
|
||||
@@ -1102,11 +1101,7 @@ impl ClusterInfo {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send_vote(
|
||||
&self,
|
||||
vote: &Transaction,
|
||||
tpu: Option<SocketAddr>,
|
||||
) -> Result<(), GossipError> {
|
||||
pub fn send_vote(&self, vote: &Transaction, tpu: Option<SocketAddr>) -> Result<()> {
|
||||
let tpu = tpu.unwrap_or_else(|| self.my_contact_info().tpu);
|
||||
let buf = serialize(vote)?;
|
||||
self.socket.send_to(&buf, &tpu)?;
|
||||
@@ -1131,11 +1126,7 @@ impl ClusterInfo {
|
||||
(labels, txs)
|
||||
}
|
||||
|
||||
pub fn push_duplicate_shred(
|
||||
&self,
|
||||
shred: &Shred,
|
||||
other_payload: &[u8],
|
||||
) -> Result<(), GossipError> {
|
||||
pub(crate) fn push_duplicate_shred(&self, shred: &Shred, other_payload: &[u8]) -> Result<()> {
|
||||
self.gossip.write().unwrap().push_duplicate_shred(
|
||||
&self.keypair,
|
||||
shred,
|
||||
@@ -1170,19 +1161,10 @@ impl ClusterInfo {
|
||||
.map(map)
|
||||
}
|
||||
|
||||
/// Returns epoch-slots inserted since the given cursor.
|
||||
/// Excludes entries from nodes with unkown or different shred version.
|
||||
pub fn get_epoch_slots(&self, cursor: &mut Cursor) -> Vec<EpochSlots> {
|
||||
let self_shred_version = self.my_shred_version();
|
||||
pub(crate) fn get_epoch_slots(&self, cursor: &mut Cursor) -> Vec<EpochSlots> {
|
||||
let gossip = self.gossip.read().unwrap();
|
||||
let entries = gossip.crds.get_epoch_slots(cursor);
|
||||
entries
|
||||
.filter(
|
||||
|entry| match gossip.crds.get_contact_info(entry.value.pubkey()) {
|
||||
Some(node) => node.shred_version == self_shred_version,
|
||||
None => false,
|
||||
},
|
||||
)
|
||||
.map(|entry| match &entry.value.data {
|
||||
CrdsData::EpochSlots(_, slots) => slots.clone(),
|
||||
_ => panic!("this should not happen!"),
|
||||
@@ -1228,7 +1210,7 @@ impl ClusterInfo {
|
||||
}
|
||||
|
||||
// All nodes in gossip (including spy nodes) and the last time we heard about them
|
||||
pub fn all_peers(&self) -> Vec<(ContactInfo, u64)> {
|
||||
pub(crate) fn all_peers(&self) -> Vec<(ContactInfo, u64)> {
|
||||
self.gossip
|
||||
.read()
|
||||
.unwrap()
|
||||
@@ -1271,7 +1253,7 @@ impl ClusterInfo {
|
||||
.filter(|node| {
|
||||
node.id != self_pubkey
|
||||
&& node.shred_version == self_shred_version
|
||||
&& ContactInfo::is_valid_address(&node.tvu)
|
||||
&& ContactInfo::is_valid_tvu_address(&node.tvu)
|
||||
})
|
||||
.cloned()
|
||||
.collect()
|
||||
@@ -1403,9 +1385,14 @@ impl ClusterInfo {
|
||||
.iter()
|
||||
.map(|peer| &peer.tvu_forwards)
|
||||
.filter(|addr| ContactInfo::is_valid_address(addr))
|
||||
.filter(|addr| is_global(addr))
|
||||
.collect()
|
||||
} else {
|
||||
peers.iter().map(|peer| &peer.tvu).collect()
|
||||
peers
|
||||
.iter()
|
||||
.map(|peer| &peer.tvu)
|
||||
.filter(|addr| is_global(addr))
|
||||
.collect()
|
||||
};
|
||||
let mut dests = &dests[..];
|
||||
let data = &packet.data[..packet.meta.size];
|
||||
@@ -1587,7 +1574,7 @@ impl ClusterInfo {
|
||||
let mut push_queue = self.local_message_pending_push_queue.lock().unwrap();
|
||||
std::mem::take(&mut *push_queue)
|
||||
}
|
||||
// Used in tests
|
||||
#[cfg(test)]
|
||||
pub fn flush_push_queue(&self) {
|
||||
let pending_push_messages = self.drain_push_queue();
|
||||
let mut gossip = self.gossip.write().unwrap();
|
||||
@@ -1675,7 +1662,7 @@ impl ClusterInfo {
|
||||
sender: &PacketSender,
|
||||
generate_pull_requests: bool,
|
||||
require_stake_for_gossip: bool,
|
||||
) -> Result<(), GossipError> {
|
||||
) -> Result<()> {
|
||||
let reqs = self.generate_new_gossip_requests(
|
||||
thread_pool,
|
||||
gossip_validators,
|
||||
@@ -1799,7 +1786,7 @@ impl ClusterInfo {
|
||||
let mut last_contact_info_trace = timestamp();
|
||||
let mut last_contact_info_save = timestamp();
|
||||
let mut entrypoints_processed = false;
|
||||
let recycler = PacketsRecycler::default();
|
||||
let recycler = PacketsRecycler::new_without_limit("gossip-recycler-shrink-stats");
|
||||
let crds_data = vec![
|
||||
CrdsData::Version(Version::new(self.id())),
|
||||
CrdsData::NodeInstance(self.instance.with_wallclock(timestamp())),
|
||||
@@ -1811,6 +1798,7 @@ impl ClusterInfo {
|
||||
let mut generate_pull_requests = true;
|
||||
loop {
|
||||
let start = timestamp();
|
||||
thread_mem_usage::datapoint("solana-gossip");
|
||||
if self.contact_debug_interval != 0
|
||||
&& start - last_contact_info_trace > self.contact_debug_interval
|
||||
{
|
||||
@@ -2051,8 +2039,7 @@ impl ClusterInfo {
|
||||
.process_pull_requests(callers.cloned(), timestamp());
|
||||
let output_size_limit =
|
||||
self.update_data_budget(stakes.len()) / PULL_RESPONSE_MIN_SERIALIZED_SIZE;
|
||||
let mut packets =
|
||||
Packets::new_unpinned_with_recycler(recycler.clone(), 64, "handle_pull_requests");
|
||||
let mut packets = Packets::new_with_recycler(recycler.clone(), 64).unwrap();
|
||||
let (caller_and_filters, addrs): (Vec<_>, Vec<_>) = {
|
||||
let mut rng = rand::thread_rng();
|
||||
let check_pull_request =
|
||||
@@ -2323,8 +2310,7 @@ impl ClusterInfo {
|
||||
if packets.is_empty() {
|
||||
None
|
||||
} else {
|
||||
let packets =
|
||||
Packets::new_unpinned_with_recycler_data(recycler, "handle_ping_messages", packets);
|
||||
let packets = Packets::new_with_recycler_data(recycler, packets).unwrap();
|
||||
Some(packets)
|
||||
}
|
||||
}
|
||||
@@ -2343,7 +2329,6 @@ impl ClusterInfo {
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::needless_collect)]
|
||||
fn handle_batch_push_messages(
|
||||
&self,
|
||||
messages: Vec<(Pubkey, Vec<CrdsValue>)>,
|
||||
@@ -2510,7 +2495,7 @@ impl ClusterInfo {
|
||||
|
||||
fn process_packets(
|
||||
&self,
|
||||
packets: VecDeque<(/*from:*/ SocketAddr, Protocol)>,
|
||||
packets: VecDeque<Packet>,
|
||||
thread_pool: &ThreadPool,
|
||||
recycler: &PacketsRecycler,
|
||||
response_sender: &PacketSender,
|
||||
@@ -2518,15 +2503,33 @@ impl ClusterInfo {
|
||||
feature_set: Option<&FeatureSet>,
|
||||
epoch_duration: Duration,
|
||||
should_check_duplicate_instance: bool,
|
||||
) -> Result<(), GossipError> {
|
||||
) -> Result<()> {
|
||||
let _st = ScopedTimer::from(&self.stats.process_gossip_packets_time);
|
||||
self.stats
|
||||
.packets_received_count
|
||||
.add_relaxed(packets.len() as u64);
|
||||
let packets: Vec<_> = thread_pool.install(|| {
|
||||
packets
|
||||
.into_par_iter()
|
||||
.filter_map(|packet| {
|
||||
let protocol: Protocol =
|
||||
limited_deserialize(&packet.data[..packet.meta.size]).ok()?;
|
||||
protocol.sanitize().ok()?;
|
||||
let protocol = protocol.par_verify()?;
|
||||
Some((packet.meta.addr(), protocol))
|
||||
})
|
||||
.collect()
|
||||
});
|
||||
self.stats
|
||||
.packets_received_verified_count
|
||||
.add_relaxed(packets.len() as u64);
|
||||
// Check if there is a duplicate instance of
|
||||
// this node with more recent timestamp.
|
||||
let check_duplicate_instance = |values: &[CrdsValue]| {
|
||||
if should_check_duplicate_instance {
|
||||
for value in values {
|
||||
if self.instance.check_duplicate(value) {
|
||||
return Err(GossipError::DuplicateNodeInstance);
|
||||
return Err(Error::DuplicateNodeInstance);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2604,64 +2607,23 @@ impl ClusterInfo {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Consumes packets received from the socket, deserializing, sanitizing and
|
||||
// verifying them and then sending them down the channel for the actual
|
||||
// handling of requests/messages.
|
||||
fn run_socket_consume(
|
||||
&self,
|
||||
receiver: &PacketReceiver,
|
||||
sender: &Sender<Vec<(/*from:*/ SocketAddr, Protocol)>>,
|
||||
thread_pool: &ThreadPool,
|
||||
) -> Result<(), GossipError> {
|
||||
const RECV_TIMEOUT: Duration = Duration::from_secs(1);
|
||||
let packets: Vec<_> = receiver.recv_timeout(RECV_TIMEOUT)?.packets.into();
|
||||
let mut packets = VecDeque::from(packets);
|
||||
for payload in receiver.try_iter() {
|
||||
packets.extend(payload.packets.iter().cloned());
|
||||
let excess_count = packets.len().saturating_sub(MAX_GOSSIP_TRAFFIC);
|
||||
if excess_count > 0 {
|
||||
packets.drain(0..excess_count);
|
||||
self.stats
|
||||
.gossip_packets_dropped_count
|
||||
.add_relaxed(excess_count as u64);
|
||||
}
|
||||
}
|
||||
self.stats
|
||||
.packets_received_count
|
||||
.add_relaxed(packets.len() as u64);
|
||||
let verify_packet = |packet: Packet| {
|
||||
let data = &packet.data[..packet.meta.size];
|
||||
let protocol: Protocol = limited_deserialize(data).ok()?;
|
||||
protocol.sanitize().ok()?;
|
||||
let protocol = protocol.par_verify()?;
|
||||
Some((packet.meta.addr(), protocol))
|
||||
};
|
||||
let packets: Vec<_> = {
|
||||
let _st = ScopedTimer::from(&self.stats.verify_gossip_packets_time);
|
||||
thread_pool.install(|| packets.into_par_iter().filter_map(verify_packet).collect())
|
||||
};
|
||||
self.stats
|
||||
.packets_received_verified_count
|
||||
.add_relaxed(packets.len() as u64);
|
||||
Ok(sender.send(packets)?)
|
||||
}
|
||||
|
||||
/// Process messages from the network
|
||||
fn run_listen(
|
||||
&self,
|
||||
recycler: &PacketsRecycler,
|
||||
bank_forks: Option<&RwLock<BankForks>>,
|
||||
receiver: &Receiver<Vec<(/*from:*/ SocketAddr, Protocol)>>,
|
||||
requests_receiver: &PacketReceiver,
|
||||
response_sender: &PacketSender,
|
||||
thread_pool: &ThreadPool,
|
||||
last_print: &mut Instant,
|
||||
should_check_duplicate_instance: bool,
|
||||
) -> Result<(), GossipError> {
|
||||
) -> Result<()> {
|
||||
const RECV_TIMEOUT: Duration = Duration::from_secs(1);
|
||||
const SUBMIT_GOSSIP_STATS_INTERVAL: Duration = Duration::from_secs(2);
|
||||
let mut packets = VecDeque::from(receiver.recv_timeout(RECV_TIMEOUT)?);
|
||||
for payload in receiver.try_iter() {
|
||||
packets.extend(payload);
|
||||
let packets: Vec<_> = requests_receiver.recv_timeout(RECV_TIMEOUT)?.packets.into();
|
||||
let mut packets = VecDeque::from(packets);
|
||||
while let Ok(packet) = requests_receiver.try_recv() {
|
||||
packets.extend(packet.packets.into_iter());
|
||||
let excess_count = packets.len().saturating_sub(MAX_GOSSIP_TRAFFIC);
|
||||
if excess_count > 0 {
|
||||
packets.drain(0..excess_count);
|
||||
@@ -2698,41 +2660,17 @@ impl ClusterInfo {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn start_socket_consume_thread(
|
||||
self: Arc<Self>,
|
||||
receiver: PacketReceiver,
|
||||
sender: Sender<Vec<(/*from:*/ SocketAddr, Protocol)>>,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> JoinHandle<()> {
|
||||
let thread_pool = ThreadPoolBuilder::new()
|
||||
.num_threads(get_thread_count().min(8))
|
||||
.thread_name(|i| format!("gossip-consume-{}", i))
|
||||
.build()
|
||||
.unwrap();
|
||||
let run_consume = move || {
|
||||
while !exit.load(Ordering::Relaxed) {
|
||||
match self.run_socket_consume(&receiver, &sender, &thread_pool) {
|
||||
Err(GossipError::RecvTimeoutError(RecvTimeoutError::Disconnected)) => break,
|
||||
Err(GossipError::RecvTimeoutError(RecvTimeoutError::Timeout)) => (),
|
||||
Err(err) => error!("gossip consume: {}", err),
|
||||
Ok(()) => (),
|
||||
}
|
||||
}
|
||||
};
|
||||
let thread_name = String::from("gossip-consume");
|
||||
Builder::new().name(thread_name).spawn(run_consume).unwrap()
|
||||
}
|
||||
|
||||
pub(crate) fn listen(
|
||||
pub fn listen(
|
||||
self: Arc<Self>,
|
||||
bank_forks: Option<Arc<RwLock<BankForks>>>,
|
||||
requests_receiver: Receiver<Vec<(/*from:*/ SocketAddr, Protocol)>>,
|
||||
requests_receiver: PacketReceiver,
|
||||
response_sender: PacketSender,
|
||||
should_check_duplicate_instance: bool,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> JoinHandle<()> {
|
||||
let exit = exit.clone();
|
||||
let recycler = PacketsRecycler::default();
|
||||
let recycler =
|
||||
PacketsRecycler::new_without_limit("cluster-info-listen-recycler-shrink-stats");
|
||||
Builder::new()
|
||||
.name("solana-listen".to_string())
|
||||
.spawn(move || {
|
||||
@@ -2753,8 +2691,7 @@ impl ClusterInfo {
|
||||
should_check_duplicate_instance,
|
||||
) {
|
||||
match err {
|
||||
GossipError::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
GossipError::RecvTimeoutError(RecvTimeoutError::Timeout) => {
|
||||
Error::RecvTimeoutError(_) => {
|
||||
let table_size = self.gossip.read().unwrap().crds.len();
|
||||
debug!(
|
||||
"{}: run_listen timeout, table size: {}",
|
||||
@@ -2762,19 +2699,20 @@ impl ClusterInfo {
|
||||
table_size,
|
||||
);
|
||||
}
|
||||
GossipError::DuplicateNodeInstance => {
|
||||
Error::DuplicateNodeInstance => {
|
||||
error!(
|
||||
"duplicate running instances of the same validator node: {}",
|
||||
self.id()
|
||||
);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
// TODO: Pass through Exit here so
|
||||
// TODO: Pass through ValidatorExit here so
|
||||
// that this will exit cleanly.
|
||||
std::process::exit(1);
|
||||
}
|
||||
_ => error!("gossip run_listen failed: {}", err),
|
||||
}
|
||||
}
|
||||
thread_mem_usage::datapoint("solana-listen");
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
@@ -3085,20 +3023,6 @@ impl Node {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push_messages_to_peer(
|
||||
messages: Vec<CrdsValue>,
|
||||
self_id: Pubkey,
|
||||
peer_gossip: SocketAddr,
|
||||
) -> Result<(), GossipError> {
|
||||
let reqs: Vec<_> = ClusterInfo::split_gossip_messages(PUSH_MESSAGE_MAX_PAYLOAD_SIZE, messages)
|
||||
.map(move |payload| (peer_gossip, Protocol::PushMessage(self_id, payload)))
|
||||
.collect();
|
||||
let packets = to_packets_with_destination(PacketsRecycler::default(), &reqs);
|
||||
let sock = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
packet::send_to(&packets, &sock)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn stake_weight_peers(
|
||||
peers: &mut Vec<ContactInfo>,
|
||||
stakes: Option<&HashMap<Pubkey, u64>>,
|
||||
@@ -3109,25 +3033,21 @@ pub fn stake_weight_peers(
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use {
|
||||
super::*,
|
||||
crate::{
|
||||
crds_gossip_pull::tests::MIN_NUM_BLOOM_FILTERS,
|
||||
crds_value::{CrdsValue, CrdsValueLabel, Vote as CrdsVote},
|
||||
duplicate_shred::{self, tests::new_rand_shred, MAX_DUPLICATE_SHREDS},
|
||||
},
|
||||
itertools::izip,
|
||||
rand::{seq::SliceRandom, SeedableRng},
|
||||
rand_chacha::ChaChaRng,
|
||||
solana_ledger::shred::Shredder,
|
||||
solana_sdk::signature::{Keypair, Signer},
|
||||
solana_vote_program::{vote_instruction, vote_state::Vote},
|
||||
std::{
|
||||
iter::repeat_with,
|
||||
net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddrV4},
|
||||
sync::Arc,
|
||||
},
|
||||
use super::*;
|
||||
use crate::{
|
||||
crds_gossip_pull::tests::MIN_NUM_BLOOM_FILTERS,
|
||||
crds_value::{CrdsValue, CrdsValueLabel, Vote as CrdsVote},
|
||||
duplicate_shred::{self, tests::new_rand_shred, MAX_DUPLICATE_SHREDS},
|
||||
};
|
||||
use itertools::izip;
|
||||
use rand::{seq::SliceRandom, SeedableRng};
|
||||
use rand_chacha::ChaChaRng;
|
||||
use solana_ledger::shred::Shredder;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_vote_program::{vote_instruction, vote_state::Vote};
|
||||
use std::iter::repeat_with;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddrV4};
|
||||
use std::sync::Arc;
|
||||
|
||||
#[test]
|
||||
fn test_gossip_node() {
|
||||
@@ -3250,7 +3170,6 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[allow(clippy::needless_collect)]
|
||||
fn test_handle_ping_messages() {
|
||||
let mut rng = rand::thread_rng();
|
||||
let this_node = Arc::new(Keypair::new());
|
||||
@@ -3270,7 +3189,7 @@ mod tests {
|
||||
.iter()
|
||||
.map(|ping| Pong::new(ping, &this_node).unwrap())
|
||||
.collect();
|
||||
let recycler = PacketsRecycler::default();
|
||||
let recycler = PacketsRecycler::new_without_limit("");
|
||||
let packets = cluster_info
|
||||
.handle_ping_messages(
|
||||
remote_nodes
|
||||
@@ -3600,7 +3519,7 @@ mod tests {
|
||||
.unwrap()
|
||||
.new_push_messages(cluster_info.drain_push_queue(), timestamp());
|
||||
// there should be some pushes ready
|
||||
assert!(!push_messages.is_empty());
|
||||
assert_eq!(push_messages.is_empty(), false);
|
||||
push_messages
|
||||
.values()
|
||||
.for_each(|v| v.par_iter().for_each(|v| assert!(v.verify())));
|
||||
@@ -3842,6 +3761,7 @@ mod tests {
|
||||
let slots = cluster_info.get_epoch_slots(&mut Cursor::default());
|
||||
assert!(slots.is_empty());
|
||||
cluster_info.push_epoch_slots(&[0]);
|
||||
cluster_info.flush_push_queue();
|
||||
|
||||
let mut cursor = Cursor::default();
|
||||
let slots = cluster_info.get_epoch_slots(&mut cursor);
|
||||
@@ -3849,43 +3769,6 @@ mod tests {
|
||||
|
||||
let slots = cluster_info.get_epoch_slots(&mut cursor);
|
||||
assert!(slots.is_empty());
|
||||
|
||||
// Test with different shred versions.
|
||||
let mut rng = rand::thread_rng();
|
||||
let node_pubkey = Pubkey::new_unique();
|
||||
let mut node = ContactInfo::new_rand(&mut rng, Some(node_pubkey));
|
||||
node.shred_version = 42;
|
||||
let epoch_slots = EpochSlots::new_rand(&mut rng, Some(node_pubkey));
|
||||
let entries = vec![
|
||||
CrdsValue::new_unsigned(CrdsData::ContactInfo(node)),
|
||||
CrdsValue::new_unsigned(CrdsData::EpochSlots(0, epoch_slots)),
|
||||
];
|
||||
{
|
||||
let mut gossip = cluster_info.gossip.write().unwrap();
|
||||
for entry in entries {
|
||||
assert!(gossip.crds.insert(entry, /*now=*/ 0).is_ok());
|
||||
}
|
||||
}
|
||||
// Should exclude other node's epoch-slot because of different
|
||||
// shred-version.
|
||||
let slots = cluster_info.get_epoch_slots(&mut Cursor::default());
|
||||
assert_eq!(slots.len(), 1);
|
||||
assert_eq!(slots[0].from, cluster_info.id);
|
||||
// Match shred versions.
|
||||
{
|
||||
let mut node = cluster_info.my_contact_info.write().unwrap();
|
||||
node.shred_version = 42;
|
||||
}
|
||||
cluster_info.push_self(
|
||||
&HashMap::default(), // stakes
|
||||
None, // gossip validators
|
||||
);
|
||||
cluster_info.flush_push_queue();
|
||||
// Should now include both epoch slots.
|
||||
let slots = cluster_info.get_epoch_slots(&mut Cursor::default());
|
||||
assert_eq!(slots.len(), 2);
|
||||
assert_eq!(slots[0].from, cluster_info.id);
|
||||
assert_eq!(slots[1].from, node_pubkey);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -3989,7 +3872,6 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[allow(clippy::needless_collect)]
|
||||
fn test_split_messages_packet_size() {
|
||||
// Test that if a value is smaller than payload size but too large to be wrapped in a vec
|
||||
// that it is still dropped
|
||||
@@ -4023,10 +3905,9 @@ mod tests {
|
||||
let expected_len = (NUM_VALUES + num_values_per_payload - 1) / num_values_per_payload;
|
||||
let msgs = vec![value; NUM_VALUES as usize];
|
||||
|
||||
assert!(
|
||||
ClusterInfo::split_gossip_messages(PUSH_MESSAGE_MAX_PAYLOAD_SIZE, msgs).count() as u64
|
||||
<= expected_len
|
||||
);
|
||||
let split: Vec<_> =
|
||||
ClusterInfo::split_gossip_messages(PUSH_MESSAGE_MAX_PAYLOAD_SIZE, msgs).collect();
|
||||
assert!(split.len() as u64 <= expected_len);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -4235,7 +4116,9 @@ mod tests {
|
||||
range.push(last + rand::thread_rng().gen_range(1, 32));
|
||||
}
|
||||
cluster_info.push_epoch_slots(&range[..16000]);
|
||||
cluster_info.flush_push_queue();
|
||||
cluster_info.push_epoch_slots(&range[16000..]);
|
||||
cluster_info.flush_push_queue();
|
||||
let slots = cluster_info.get_epoch_slots(&mut Cursor::default());
|
||||
let slots: Vec<_> = slots.iter().flat_map(|x| x.to_slots(0)).collect();
|
||||
assert_eq!(slots, range);
|
@@ -1,15 +1,13 @@
|
||||
use {
|
||||
crate::crds_gossip::CrdsGossip,
|
||||
solana_measure::measure::Measure,
|
||||
solana_sdk::pubkey::Pubkey,
|
||||
std::{
|
||||
collections::HashMap,
|
||||
sync::{
|
||||
atomic::{AtomicU64, Ordering},
|
||||
RwLock,
|
||||
},
|
||||
time::Instant,
|
||||
use crate::crds_gossip::CrdsGossip;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::{
|
||||
atomic::{AtomicU64, Ordering},
|
||||
RwLock,
|
||||
},
|
||||
time::Instant,
|
||||
};
|
||||
|
||||
#[derive(Default)]
|
||||
@@ -118,7 +116,6 @@ pub(crate) struct GossipStats {
|
||||
pub(crate) trim_crds_table_failed: Counter,
|
||||
pub(crate) trim_crds_table_purged_values_count: Counter,
|
||||
pub(crate) tvu_peers: Counter,
|
||||
pub(crate) verify_gossip_packets_time: Counter,
|
||||
}
|
||||
|
||||
pub(crate) fn submit_gossip_stats(
|
||||
@@ -172,11 +169,6 @@ pub(crate) fn submit_gossip_stats(
|
||||
stats.process_gossip_packets_time.clear(),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"verify_gossip_packets_time",
|
||||
stats.verify_gossip_packets_time.clear(),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"handle_batch_ping_messages_time",
|
||||
stats.handle_batch_ping_messages_time.clear(),
|
@@ -1,7 +1,13 @@
|
||||
use crate::{
|
||||
cluster_info::{ClusterInfo, GOSSIP_SLEEP_MILLIS},
|
||||
crds::Cursor,
|
||||
crds_value::CrdsValueLabel,
|
||||
optimistic_confirmation_verifier::OptimisticConfirmationVerifier,
|
||||
optimistically_confirmed_bank_tracker::{BankNotification, BankNotificationSender},
|
||||
poh_recorder::PohRecorder,
|
||||
replay_stage::DUPLICATE_THRESHOLD,
|
||||
result::{Error, Result},
|
||||
rpc_subscriptions::RpcSubscriptions,
|
||||
sigverify,
|
||||
verified_vote_packets::VerifiedVotePackets,
|
||||
vote_stake_tracker::VoteStakeTracker,
|
||||
@@ -11,19 +17,9 @@ use crossbeam_channel::{
|
||||
};
|
||||
use itertools::izip;
|
||||
use log::*;
|
||||
use solana_gossip::{
|
||||
cluster_info::{ClusterInfo, GOSSIP_SLEEP_MILLIS},
|
||||
crds::Cursor,
|
||||
crds_value::CrdsValueLabel,
|
||||
};
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_metrics::inc_new_counter_debug;
|
||||
use solana_perf::packet::{self, Packets};
|
||||
use solana_poh::poh_recorder::PohRecorder;
|
||||
use solana_rpc::{
|
||||
optimistically_confirmed_bank_tracker::{BankNotification, BankNotificationSender},
|
||||
rpc_subscriptions::RpcSubscriptions,
|
||||
};
|
||||
use solana_runtime::{
|
||||
bank::Bank,
|
||||
bank_forks::BankForks,
|
||||
@@ -33,7 +29,7 @@ use solana_runtime::{
|
||||
vote_sender_types::{ReplayVoteReceiver, ReplayedVote},
|
||||
};
|
||||
use solana_sdk::{
|
||||
clock::{Epoch, Slot, DEFAULT_MS_PER_SLOT, DEFAULT_TICKS_PER_SLOT},
|
||||
clock::{Epoch, Slot, DEFAULT_MS_PER_SLOT},
|
||||
epoch_schedule::EpochSchedule,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
@@ -384,14 +380,9 @@ impl ClusterInfoVoteListener {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let would_be_leader = poh_recorder
|
||||
.lock()
|
||||
.unwrap()
|
||||
.would_be_leader(20 * DEFAULT_TICKS_PER_SLOT);
|
||||
if let Err(e) = verified_vote_packets.receive_and_process_vote_packets(
|
||||
&verified_vote_label_packets_receiver,
|
||||
&mut update_version,
|
||||
would_be_leader,
|
||||
) {
|
||||
match e {
|
||||
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Disconnected) => {
|
||||
@@ -833,8 +824,8 @@ impl ClusterInfoVoteListener {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank;
|
||||
use solana_perf::packet;
|
||||
use solana_rpc::optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank;
|
||||
use solana_runtime::{
|
||||
bank::Bank,
|
||||
commitment::BlockCommitmentCache,
|
||||
|
@@ -3,7 +3,7 @@ use crate::{
|
||||
progress_map::ProgressMap,
|
||||
};
|
||||
use solana_sdk::{clock::Slot, hash::Hash};
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
|
||||
|
||||
pub(crate) type DuplicateSlotsTracker = BTreeSet<Slot>;
|
||||
pub(crate) type GossipDuplicateConfirmedSlots = BTreeMap<Slot, Hash>;
|
||||
@@ -201,12 +201,19 @@ fn get_cluster_duplicate_confirmed_hash<'a>(
|
||||
|
||||
fn apply_state_changes(
|
||||
slot: Slot,
|
||||
progress: &mut ProgressMap,
|
||||
fork_choice: &mut HeaviestSubtreeForkChoice,
|
||||
ancestors: &HashMap<Slot, HashSet<Slot>>,
|
||||
descendants: &HashMap<Slot, HashSet<Slot>>,
|
||||
state_changes: Vec<ResultingStateChange>,
|
||||
) {
|
||||
for state_change in state_changes {
|
||||
match state_change {
|
||||
ResultingStateChange::MarkSlotDuplicate(bank_frozen_hash) => {
|
||||
progress.set_unconfirmed_duplicate_slot(
|
||||
slot,
|
||||
descendants.get(&slot).unwrap_or(&HashSet::default()),
|
||||
);
|
||||
fork_choice.mark_fork_invalid_candidate(&(slot, bank_frozen_hash));
|
||||
}
|
||||
ResultingStateChange::RepairDuplicateConfirmedVersion(
|
||||
@@ -217,6 +224,11 @@ fn apply_state_changes(
|
||||
repair_correct_version(slot, &cluster_duplicate_confirmed_hash);
|
||||
}
|
||||
ResultingStateChange::DuplicateConfirmedSlotMatchesCluster(bank_frozen_hash) => {
|
||||
progress.set_confirmed_duplicate_slot(
|
||||
slot,
|
||||
ancestors.get(&slot).unwrap_or(&HashSet::default()),
|
||||
descendants.get(&slot).unwrap_or(&HashSet::default()),
|
||||
);
|
||||
fork_choice.mark_fork_valid_candidate(&(slot, bank_frozen_hash));
|
||||
}
|
||||
}
|
||||
@@ -230,7 +242,9 @@ pub(crate) fn check_slot_agrees_with_cluster(
|
||||
frozen_hash: Option<Hash>,
|
||||
duplicate_slots_tracker: &mut DuplicateSlotsTracker,
|
||||
gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots,
|
||||
progress: &ProgressMap,
|
||||
ancestors: &HashMap<Slot, HashSet<Slot>>,
|
||||
descendants: &HashMap<Slot, HashSet<Slot>>,
|
||||
progress: &mut ProgressMap,
|
||||
fork_choice: &mut HeaviestSubtreeForkChoice,
|
||||
slot_state_update: SlotStateUpdate,
|
||||
) {
|
||||
@@ -266,11 +280,7 @@ pub(crate) fn check_slot_agrees_with_cluster(
|
||||
let frozen_hash = frozen_hash.unwrap();
|
||||
let gossip_duplicate_confirmed_hash = gossip_duplicate_confirmed_slots.get(&slot);
|
||||
|
||||
// If the bank hasn't been frozen yet, then we haven't duplicate confirmed a local version
|
||||
// this slot through replay yet.
|
||||
let is_local_replay_duplicate_confirmed = fork_choice
|
||||
.is_duplicate_confirmed(&(slot, frozen_hash))
|
||||
.unwrap_or(false);
|
||||
let is_local_replay_duplicate_confirmed = progress.is_duplicate_confirmed(slot).expect("If the frozen hash exists, then the slot must exist in bank forks and thus in progress map");
|
||||
let cluster_duplicate_confirmed_hash = get_cluster_duplicate_confirmed_hash(
|
||||
slot,
|
||||
gossip_duplicate_confirmed_hash,
|
||||
@@ -300,7 +310,14 @@ pub(crate) fn check_slot_agrees_with_cluster(
|
||||
is_slot_duplicate,
|
||||
is_dead,
|
||||
);
|
||||
apply_state_changes(slot, fork_choice, state_changes);
|
||||
apply_state_changes(
|
||||
slot,
|
||||
progress,
|
||||
fork_choice,
|
||||
ancestors,
|
||||
descendants,
|
||||
state_changes,
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -308,16 +325,15 @@ mod test {
|
||||
use super::*;
|
||||
use crate::consensus::test::VoteSimulator;
|
||||
use solana_runtime::bank_forks::BankForks;
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
sync::RwLock,
|
||||
};
|
||||
use std::sync::RwLock;
|
||||
use trees::tr;
|
||||
|
||||
struct InitialState {
|
||||
heaviest_subtree_fork_choice: HeaviestSubtreeForkChoice,
|
||||
progress: ProgressMap,
|
||||
ancestors: HashMap<Slot, HashSet<Slot>>,
|
||||
descendants: HashMap<Slot, HashSet<Slot>>,
|
||||
slot: Slot,
|
||||
bank_forks: RwLock<BankForks>,
|
||||
}
|
||||
|
||||
@@ -326,6 +342,7 @@ mod test {
|
||||
let forks = tr(0) / (tr(1) / (tr(2) / tr(3)));
|
||||
let mut vote_simulator = VoteSimulator::new(1);
|
||||
vote_simulator.fill_bank_forks(forks, &HashMap::new());
|
||||
let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
|
||||
|
||||
let descendants = vote_simulator
|
||||
.bank_forks
|
||||
@@ -337,7 +354,9 @@ mod test {
|
||||
InitialState {
|
||||
heaviest_subtree_fork_choice: vote_simulator.heaviest_subtree_fork_choice,
|
||||
progress: vote_simulator.progress,
|
||||
ancestors,
|
||||
descendants,
|
||||
slot: 0,
|
||||
bank_forks: vote_simulator.bank_forks,
|
||||
}
|
||||
}
|
||||
@@ -608,68 +627,64 @@ mod test {
|
||||
// Common state
|
||||
let InitialState {
|
||||
mut heaviest_subtree_fork_choice,
|
||||
mut progress,
|
||||
ancestors,
|
||||
descendants,
|
||||
slot,
|
||||
bank_forks,
|
||||
..
|
||||
} = setup();
|
||||
|
||||
// MarkSlotDuplicate should mark progress map and remove
|
||||
// the slot from fork choice
|
||||
let duplicate_slot = bank_forks.read().unwrap().root() + 1;
|
||||
let duplicate_slot_hash = bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(duplicate_slot)
|
||||
.unwrap()
|
||||
.hash();
|
||||
let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash();
|
||||
apply_state_changes(
|
||||
duplicate_slot,
|
||||
slot,
|
||||
&mut progress,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
vec![ResultingStateChange::MarkSlotDuplicate(duplicate_slot_hash)],
|
||||
&ancestors,
|
||||
&descendants,
|
||||
vec![ResultingStateChange::MarkSlotDuplicate(slot_hash)],
|
||||
);
|
||||
assert!(!heaviest_subtree_fork_choice
|
||||
.is_candidate(&(duplicate_slot, duplicate_slot_hash))
|
||||
.is_candidate_slot(&(slot, slot_hash))
|
||||
.unwrap());
|
||||
for child_slot in descendants
|
||||
.get(&duplicate_slot)
|
||||
.get(&slot)
|
||||
.unwrap()
|
||||
.iter()
|
||||
.chain(std::iter::once(&duplicate_slot))
|
||||
.chain(std::iter::once(&slot))
|
||||
{
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(&(
|
||||
*child_slot,
|
||||
bank_forks.read().unwrap().get(*child_slot).unwrap().hash()
|
||||
))
|
||||
progress
|
||||
.latest_unconfirmed_duplicate_ancestor(*child_slot)
|
||||
.unwrap(),
|
||||
duplicate_slot
|
||||
slot
|
||||
);
|
||||
}
|
||||
|
||||
// DuplicateConfirmedSlotMatchesCluster should re-enable fork choice
|
||||
apply_state_changes(
|
||||
duplicate_slot,
|
||||
slot,
|
||||
&mut progress,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
vec![ResultingStateChange::DuplicateConfirmedSlotMatchesCluster(
|
||||
duplicate_slot_hash,
|
||||
slot_hash,
|
||||
)],
|
||||
);
|
||||
for child_slot in descendants
|
||||
.get(&duplicate_slot)
|
||||
.get(&slot)
|
||||
.unwrap()
|
||||
.iter()
|
||||
.chain(std::iter::once(&duplicate_slot))
|
||||
.chain(std::iter::once(&slot))
|
||||
{
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(&(
|
||||
*child_slot,
|
||||
bank_forks.read().unwrap().get(*child_slot).unwrap().hash()
|
||||
))
|
||||
assert!(progress
|
||||
.latest_unconfirmed_duplicate_ancestor(*child_slot)
|
||||
.is_none());
|
||||
}
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_candidate(&(duplicate_slot, duplicate_slot_hash))
|
||||
.is_candidate_slot(&(slot, slot_hash))
|
||||
.unwrap());
|
||||
}
|
||||
|
||||
@@ -677,7 +692,9 @@ mod test {
|
||||
// Common state
|
||||
let InitialState {
|
||||
mut heaviest_subtree_fork_choice,
|
||||
progress,
|
||||
mut progress,
|
||||
ancestors,
|
||||
descendants,
|
||||
bank_forks,
|
||||
..
|
||||
} = setup();
|
||||
@@ -696,18 +713,12 @@ mod test {
|
||||
initial_bank_hash,
|
||||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&progress,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&mut progress,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::Duplicate,
|
||||
);
|
||||
assert!(duplicate_slots_tracker.contains(&duplicate_slot));
|
||||
// Nothing should be applied yet to fork choice, since bank was not yet frozen
|
||||
for slot in 2..=3 {
|
||||
let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash();
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(&(slot, slot_hash))
|
||||
.is_none());
|
||||
}
|
||||
|
||||
// Now freeze the bank
|
||||
let frozen_duplicate_slot_hash = bank_forks
|
||||
@@ -722,16 +733,16 @@ mod test {
|
||||
Some(frozen_duplicate_slot_hash),
|
||||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&progress,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&mut progress,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::Frozen,
|
||||
);
|
||||
|
||||
// Progress map should have the correct updates, fork choice should mark duplicate
|
||||
// as unvotable
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_unconfirmed_duplicate(&(duplicate_slot, frozen_duplicate_slot_hash))
|
||||
.unwrap());
|
||||
assert!(progress.is_unconfirmed_duplicate(duplicate_slot).unwrap());
|
||||
|
||||
// The ancestor of the duplicate slot should be the best slot now
|
||||
let (duplicate_ancestor, duplicate_parent_hash) = {
|
||||
@@ -760,7 +771,9 @@ mod test {
|
||||
// Common state
|
||||
let InitialState {
|
||||
mut heaviest_subtree_fork_choice,
|
||||
progress,
|
||||
mut progress,
|
||||
ancestors,
|
||||
descendants,
|
||||
bank_forks,
|
||||
..
|
||||
} = setup();
|
||||
@@ -783,26 +796,17 @@ mod test {
|
||||
Some(slot2_hash),
|
||||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&progress,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&mut progress,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::DuplicateConfirmed,
|
||||
);
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&(2, slot2_hash))
|
||||
.unwrap());
|
||||
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.best_overall_slot(),
|
||||
(3, slot3_hash)
|
||||
);
|
||||
for slot in 0..=2 {
|
||||
let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash();
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&(slot, slot_hash))
|
||||
.unwrap());
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(&(slot, slot_hash))
|
||||
.is_none());
|
||||
}
|
||||
|
||||
// Mark 3 as duplicate, should not remove the duplicate confirmed slot 2 from
|
||||
// fork choice
|
||||
@@ -812,36 +816,17 @@ mod test {
|
||||
Some(slot3_hash),
|
||||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&progress,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&mut progress,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::Duplicate,
|
||||
);
|
||||
assert!(duplicate_slots_tracker.contains(&3));
|
||||
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.best_overall_slot(),
|
||||
(2, slot2_hash)
|
||||
);
|
||||
for slot in 0..=3 {
|
||||
let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash();
|
||||
if slot <= 2 {
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&(slot, slot_hash))
|
||||
.unwrap());
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(&(slot, slot_hash))
|
||||
.is_none());
|
||||
} else {
|
||||
assert!(!heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&(slot, slot_hash))
|
||||
.unwrap());
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(&(slot, slot_hash))
|
||||
.unwrap(),
|
||||
3
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -849,7 +834,9 @@ mod test {
|
||||
// Common state
|
||||
let InitialState {
|
||||
mut heaviest_subtree_fork_choice,
|
||||
progress,
|
||||
mut progress,
|
||||
ancestors,
|
||||
descendants,
|
||||
bank_forks,
|
||||
..
|
||||
} = setup();
|
||||
@@ -870,20 +857,12 @@ mod test {
|
||||
Some(bank_forks.read().unwrap().get(2).unwrap().hash()),
|
||||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&progress,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&mut progress,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::Duplicate,
|
||||
);
|
||||
assert!(duplicate_slots_tracker.contains(&2));
|
||||
for slot in 2..=3 {
|
||||
let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash();
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(&(slot, slot_hash))
|
||||
.unwrap(),
|
||||
2
|
||||
);
|
||||
}
|
||||
|
||||
let slot1_hash = bank_forks.read().unwrap().get(1).unwrap().hash();
|
||||
assert_eq!(
|
||||
@@ -899,91 +878,13 @@ mod test {
|
||||
Some(slot3_hash),
|
||||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&progress,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&mut progress,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::DuplicateConfirmed,
|
||||
);
|
||||
for slot in 0..=3 {
|
||||
let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash();
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&(slot, slot_hash))
|
||||
.unwrap());
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(&(slot, slot_hash))
|
||||
.is_none());
|
||||
}
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.best_overall_slot(),
|
||||
(3, slot3_hash)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_state_descendant_confirmed_ancestor_duplicate() {
|
||||
// Common state
|
||||
let InitialState {
|
||||
mut heaviest_subtree_fork_choice,
|
||||
progress,
|
||||
bank_forks,
|
||||
..
|
||||
} = setup();
|
||||
|
||||
let slot3_hash = bank_forks.read().unwrap().get(3).unwrap().hash();
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.best_overall_slot(),
|
||||
(3, slot3_hash)
|
||||
);
|
||||
let root = 0;
|
||||
let mut duplicate_slots_tracker = DuplicateSlotsTracker::default();
|
||||
let mut gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default();
|
||||
|
||||
// Mark 3 as duplicate confirmed
|
||||
gossip_duplicate_confirmed_slots.insert(3, slot3_hash);
|
||||
check_slot_agrees_with_cluster(
|
||||
3,
|
||||
root,
|
||||
Some(slot3_hash),
|
||||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&progress,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::DuplicateConfirmed,
|
||||
);
|
||||
let verify_all_slots_duplicate_confirmed =
|
||||
|bank_forks: &RwLock<BankForks>,
|
||||
heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice| {
|
||||
for slot in 0..=3 {
|
||||
let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash();
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&(slot, slot_hash))
|
||||
.unwrap());
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(&(slot, slot_hash))
|
||||
.is_none());
|
||||
}
|
||||
};
|
||||
verify_all_slots_duplicate_confirmed(&bank_forks, &heaviest_subtree_fork_choice);
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.best_overall_slot(),
|
||||
(3, slot3_hash)
|
||||
);
|
||||
|
||||
// Mark ancestor 1 as duplicate, fork choice should be unaffected since
|
||||
// slot 1 was duplicate confirmed by the confirmation on its
|
||||
// descendant, 3.
|
||||
let slot1_hash = bank_forks.read().unwrap().get(1).unwrap().hash();
|
||||
check_slot_agrees_with_cluster(
|
||||
1,
|
||||
root,
|
||||
Some(slot1_hash),
|
||||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&progress,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::Duplicate,
|
||||
);
|
||||
assert!(duplicate_slots_tracker.contains(&1));
|
||||
verify_all_slots_duplicate_confirmed(&bank_forks, &heaviest_subtree_fork_choice);
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.best_overall_slot(),
|
||||
(3, slot3_hash)
|
||||
|
@@ -1,8 +1,8 @@
|
||||
use crate::serve_repair::RepairType;
|
||||
use itertools::Itertools;
|
||||
use solana_gossip::{
|
||||
use crate::{
|
||||
cluster_info::ClusterInfo, contact_info::ContactInfo, crds::Cursor, epoch_slots::EpochSlots,
|
||||
serve_repair::RepairType,
|
||||
};
|
||||
use itertools::Itertools;
|
||||
use solana_runtime::{bank_forks::BankForks, epoch_stakes::NodeIdToVoteAccounts};
|
||||
use solana_sdk::{clock::Slot, pubkey::Pubkey};
|
||||
use std::{
|
||||
|
@@ -1,32 +1,28 @@
|
||||
use crate::cluster_slots::ClusterSlots;
|
||||
use crossbeam_channel::{Receiver, RecvTimeoutError, Sender};
|
||||
use solana_gossip::cluster_info::ClusterInfo;
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use crate::{cluster_info::ClusterInfo, cluster_slots::ClusterSlots};
|
||||
use solana_ledger::blockstore::{Blockstore, CompletedSlotsReceiver};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_runtime::bank_forks::BankForks;
|
||||
use solana_sdk::{clock::Slot, pubkey::Pubkey};
|
||||
use std::{
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
mpsc::RecvTimeoutError,
|
||||
{Arc, RwLock},
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
pub type ClusterSlotsUpdateReceiver = Receiver<Vec<Slot>>;
|
||||
pub type ClusterSlotsUpdateSender = Sender<Vec<Slot>>;
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
struct ClusterSlotsServiceTiming {
|
||||
pub lowest_slot_elapsed: u64,
|
||||
pub process_cluster_slots_updates_elapsed: u64,
|
||||
pub update_completed_slots_elapsed: u64,
|
||||
}
|
||||
|
||||
impl ClusterSlotsServiceTiming {
|
||||
fn update(&mut self, lowest_slot_elapsed: u64, process_cluster_slots_updates_elapsed: u64) {
|
||||
fn update(&mut self, lowest_slot_elapsed: u64, update_completed_slots_elapsed: u64) {
|
||||
self.lowest_slot_elapsed += lowest_slot_elapsed;
|
||||
self.process_cluster_slots_updates_elapsed += process_cluster_slots_updates_elapsed;
|
||||
self.update_completed_slots_elapsed += update_completed_slots_elapsed;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,12 +36,12 @@ impl ClusterSlotsService {
|
||||
cluster_slots: Arc<ClusterSlots>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
cluster_slots_update_receiver: ClusterSlotsUpdateReceiver,
|
||||
completed_slots_receiver: CompletedSlotsReceiver,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let id = cluster_info.id();
|
||||
Self::initialize_lowest_slot(id, &blockstore, &cluster_info);
|
||||
Self::initialize_epoch_slots(&bank_forks, &cluster_info);
|
||||
Self::initialize_epoch_slots(&blockstore, &cluster_info, &completed_slots_receiver);
|
||||
let t_cluster_slots_service = Builder::new()
|
||||
.name("solana-cluster-slots-service".to_string())
|
||||
.spawn(move || {
|
||||
@@ -54,7 +50,7 @@ impl ClusterSlotsService {
|
||||
cluster_slots,
|
||||
bank_forks,
|
||||
cluster_info,
|
||||
cluster_slots_update_receiver,
|
||||
completed_slots_receiver,
|
||||
exit,
|
||||
)
|
||||
})
|
||||
@@ -74,7 +70,7 @@ impl ClusterSlotsService {
|
||||
cluster_slots: Arc<ClusterSlots>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
cluster_slots_update_receiver: ClusterSlotsUpdateReceiver,
|
||||
completed_slots_receiver: CompletedSlotsReceiver,
|
||||
exit: Arc<AtomicBool>,
|
||||
) {
|
||||
let mut cluster_slots_service_timing = ClusterSlotsServiceTiming::default();
|
||||
@@ -83,8 +79,7 @@ impl ClusterSlotsService {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
let slots = match cluster_slots_update_receiver.recv_timeout(Duration::from_millis(200))
|
||||
{
|
||||
let slots = match completed_slots_receiver.recv_timeout(Duration::from_millis(200)) {
|
||||
Ok(slots) => Some(slots),
|
||||
Err(RecvTimeoutError::Timeout) => None,
|
||||
Err(RecvTimeoutError::Disconnected) => {
|
||||
@@ -98,21 +93,17 @@ impl ClusterSlotsService {
|
||||
let lowest_slot = blockstore.lowest_slot();
|
||||
Self::update_lowest_slot(&id, lowest_slot, &cluster_info);
|
||||
lowest_slot_elapsed.stop();
|
||||
let mut process_cluster_slots_updates_elapsed =
|
||||
Measure::start("process_cluster_slots_updates_elapsed");
|
||||
let mut update_completed_slots_elapsed =
|
||||
Measure::start("update_completed_slots_elapsed");
|
||||
if let Some(slots) = slots {
|
||||
Self::process_cluster_slots_updates(
|
||||
slots,
|
||||
&cluster_slots_update_receiver,
|
||||
&cluster_info,
|
||||
);
|
||||
Self::update_completed_slots(slots, &completed_slots_receiver, &cluster_info);
|
||||
}
|
||||
cluster_slots.update(new_root, &cluster_info, &bank_forks);
|
||||
process_cluster_slots_updates_elapsed.stop();
|
||||
update_completed_slots_elapsed.stop();
|
||||
|
||||
cluster_slots_service_timing.update(
|
||||
lowest_slot_elapsed.as_us(),
|
||||
process_cluster_slots_updates_elapsed.as_us(),
|
||||
update_completed_slots_elapsed.as_us(),
|
||||
);
|
||||
|
||||
if last_stats.elapsed().as_secs() > 2 {
|
||||
@@ -124,8 +115,8 @@ impl ClusterSlotsService {
|
||||
i64
|
||||
),
|
||||
(
|
||||
"process_cluster_slots_updates_elapsed",
|
||||
cluster_slots_service_timing.process_cluster_slots_updates_elapsed,
|
||||
"update_completed_slots_elapsed",
|
||||
cluster_slots_service_timing.update_completed_slots_elapsed,
|
||||
i64
|
||||
),
|
||||
);
|
||||
@@ -135,12 +126,12 @@ impl ClusterSlotsService {
|
||||
}
|
||||
}
|
||||
|
||||
fn process_cluster_slots_updates(
|
||||
fn update_completed_slots(
|
||||
mut slots: Vec<Slot>,
|
||||
cluster_slots_update_receiver: &ClusterSlotsUpdateReceiver,
|
||||
completed_slots_receiver: &CompletedSlotsReceiver,
|
||||
cluster_info: &ClusterInfo,
|
||||
) {
|
||||
while let Ok(mut more) = cluster_slots_update_receiver.try_recv() {
|
||||
while let Ok(mut more) = completed_slots_receiver.try_recv() {
|
||||
slots.append(&mut more);
|
||||
}
|
||||
#[allow(clippy::stable_sort_primitive)]
|
||||
@@ -163,16 +154,30 @@ impl ClusterSlotsService {
|
||||
cluster_info.push_lowest_slot(*id, lowest_slot);
|
||||
}
|
||||
|
||||
fn initialize_epoch_slots(bank_forks: &RwLock<BankForks>, cluster_info: &ClusterInfo) {
|
||||
// TODO: Should probably incorporate slots that were replayed on startup,
|
||||
// and maybe some that were frozen < snapshot root in case validators restart
|
||||
// from newer snapshots and lose history.
|
||||
let frozen_banks = bank_forks.read().unwrap().frozen_banks();
|
||||
let mut frozen_bank_slots: Vec<Slot> = frozen_banks.keys().cloned().collect();
|
||||
frozen_bank_slots.sort_unstable();
|
||||
fn initialize_epoch_slots(
|
||||
blockstore: &Blockstore,
|
||||
cluster_info: &ClusterInfo,
|
||||
completed_slots_receiver: &CompletedSlotsReceiver,
|
||||
) {
|
||||
let root = blockstore.last_root();
|
||||
let mut slots: Vec<_> = blockstore
|
||||
.live_slots_iterator(root)
|
||||
.filter_map(|(slot, slot_meta)| {
|
||||
if slot_meta.is_full() {
|
||||
Some(slot)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
if !frozen_bank_slots.is_empty() {
|
||||
cluster_info.push_epoch_slots(&frozen_bank_slots);
|
||||
while let Ok(mut more) = completed_slots_receiver.try_recv() {
|
||||
slots.append(&mut more);
|
||||
}
|
||||
slots.sort_unstable();
|
||||
slots.dedup();
|
||||
if !slots.is_empty() {
|
||||
cluster_info.push_epoch_slots(&slots);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -180,7 +185,7 @@ impl ClusterSlotsService {
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_gossip::{cluster_info::Node, crds_value::CrdsValueLabel};
|
||||
use crate::{cluster_info::Node, crds_value::CrdsValueLabel};
|
||||
|
||||
#[test]
|
||||
pub fn test_update_lowest_slot() {
|
||||
|
@@ -1,7 +1,6 @@
|
||||
use crate::consensus::Stake;
|
||||
use crate::{consensus::Stake, rpc_subscriptions::RpcSubscriptions};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::datapoint_info;
|
||||
use solana_rpc::rpc_subscriptions::RpcSubscriptions;
|
||||
use solana_runtime::{
|
||||
bank::Bank,
|
||||
commitment::{BlockCommitment, BlockCommitmentCache, CommitmentSlots, VOTE_THRESHOLD_SIZE},
|
||||
|
@@ -1,7 +1,7 @@
|
||||
use crate::{max_slots::MaxSlots, rpc_subscriptions::RpcSubscriptions};
|
||||
use crossbeam_channel::{Receiver, RecvTimeoutError, Sender};
|
||||
use solana_ledger::blockstore::{Blockstore, CompletedDataSetInfo};
|
||||
use solana_ledger::entry::Entry;
|
||||
use solana_rpc::{max_slots::MaxSlots, rpc_subscriptions::RpcSubscriptions};
|
||||
use solana_sdk::signature::Signature;
|
||||
use std::{
|
||||
sync::{
|
||||
|
@@ -1,5 +1,4 @@
|
||||
use crate::{
|
||||
heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice,
|
||||
latest_validator_votes_for_frozen_banks::LatestValidatorVotesForFrozenBanks,
|
||||
progress_map::{LockoutIntervals, ProgressMap},
|
||||
};
|
||||
@@ -92,8 +91,6 @@ pub type Stake = u64;
|
||||
pub type VotedStakes = HashMap<Slot, Stake>;
|
||||
pub type PubkeyVotes = Vec<(Pubkey, Slot)>;
|
||||
|
||||
// lint warning "bank_weight is never read"
|
||||
#[allow(dead_code)]
|
||||
pub(crate) struct ComputedBankState {
|
||||
pub voted_stakes: VotedStakes,
|
||||
pub total_stake: Stake,
|
||||
@@ -267,7 +264,7 @@ impl Tower {
|
||||
};
|
||||
for vote in &vote_state.votes {
|
||||
lockout_intervals
|
||||
.entry(vote.last_locked_out_slot())
|
||||
.entry(vote.expiration_slot())
|
||||
.or_insert_with(Vec::new)
|
||||
.push((vote.slot, key));
|
||||
}
|
||||
@@ -530,31 +527,31 @@ impl Tower {
|
||||
false
|
||||
}
|
||||
|
||||
pub fn is_locked_out(&self, slot: Slot, ancestors: &HashSet<Slot>) -> bool {
|
||||
pub fn is_locked_out(&self, slot: Slot, ancestors: &HashMap<Slot, HashSet<Slot>>) -> bool {
|
||||
assert!(ancestors.contains_key(&slot));
|
||||
|
||||
if !self.is_recent(slot) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check if a slot is locked out by simulating adding a vote for that
|
||||
// slot to the current lockouts to pop any expired votes. If any of the
|
||||
// remaining voted slots are on a different fork from the checked slot,
|
||||
// it's still locked out.
|
||||
let mut lockouts = self.lockouts.clone();
|
||||
lockouts.process_slot_vote_unchecked(slot);
|
||||
for vote in &lockouts.votes {
|
||||
if slot != vote.slot && !ancestors.contains(&vote.slot) {
|
||||
if vote.slot == slot {
|
||||
continue;
|
||||
}
|
||||
if !ancestors[&slot].contains(&vote.slot) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(root_slot) = lockouts.root_slot {
|
||||
// This case should never happen because bank forks purges all
|
||||
// non-descendants of the root every time root is set
|
||||
if slot != root_slot {
|
||||
// This case should never happen because bank forks purges all
|
||||
// non-descendants of the root every time root is set
|
||||
assert!(
|
||||
ancestors.contains(&root_slot),
|
||||
ancestors[&slot].contains(&root_slot),
|
||||
"ancestors: {:?}, slot: {} root: {}",
|
||||
ancestors,
|
||||
ancestors[&slot],
|
||||
slot,
|
||||
root_slot
|
||||
);
|
||||
@@ -564,17 +561,6 @@ impl Tower {
|
||||
false
|
||||
}
|
||||
|
||||
fn is_candidate_slot_descendant_of_last_vote(
|
||||
candidate_slot: Slot,
|
||||
last_voted_slot: Slot,
|
||||
ancestors: &HashMap<Slot, HashSet<u64>>,
|
||||
) -> Option<bool> {
|
||||
ancestors
|
||||
.get(&candidate_slot)
|
||||
.map(|candidate_slot_ancestors| candidate_slot_ancestors.contains(&last_voted_slot))
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn make_check_switch_threshold_decision(
|
||||
&self,
|
||||
switch_slot: u64,
|
||||
@@ -583,11 +569,9 @@ impl Tower {
|
||||
progress: &ProgressMap,
|
||||
total_stake: u64,
|
||||
epoch_vote_accounts: &HashMap<Pubkey, (u64, ArcVoteAccount)>,
|
||||
latest_validator_votes_for_frozen_banks: &LatestValidatorVotesForFrozenBanks,
|
||||
heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice,
|
||||
) -> SwitchForkDecision {
|
||||
self.last_voted_slot_hash()
|
||||
.map(|(last_voted_slot, last_voted_hash)| {
|
||||
self.last_voted_slot()
|
||||
.map(|last_voted_slot| {
|
||||
let root = self.root();
|
||||
let empty_ancestors = HashSet::default();
|
||||
let empty_ancestors_due_to_minor_unsynced_ledger = || {
|
||||
@@ -676,7 +660,7 @@ impl Tower {
|
||||
if last_vote_ancestors.contains(&switch_slot) {
|
||||
if self.is_stray_last_vote() {
|
||||
return suspended_decision_due_to_major_unsynced_ledger();
|
||||
} else if let Some(latest_duplicate_ancestor) = heaviest_subtree_fork_choice.latest_invalid_ancestor(&(last_voted_slot, last_voted_hash)) {
|
||||
} else if let Some(latest_duplicate_ancestor) = progress.latest_unconfirmed_duplicate_ancestor(last_voted_slot) {
|
||||
// We're rolling back because one of the ancestors of the last vote was a duplicate. In this
|
||||
// case, it's acceptable if the switch candidate is one of ancestors of the previous vote,
|
||||
// just fail the switch check because there's no point in voting on an ancestor. ReplayStage
|
||||
@@ -718,9 +702,13 @@ impl Tower {
|
||||
// then use this bank as a representative for the fork.
|
||||
|| descendants.iter().any(|d| progress.get_fork_stats(*d).map(|stats| stats.computed).unwrap_or(false))
|
||||
|| *candidate_slot == last_voted_slot
|
||||
// Ignore if the `candidate_slot` is a descendant of the `last_voted_slot`, since we do not
|
||||
// want to count votes on the same fork.
|
||||
|| Self::is_candidate_slot_descendant_of_last_vote(*candidate_slot, last_voted_slot, ancestors).expect("exists in descendants map, so must exist in ancestors map")
|
||||
|| ancestors
|
||||
.get(&candidate_slot)
|
||||
.expect(
|
||||
"empty descendants implies this is a child, not parent of root, so must
|
||||
exist in the ancestors map",
|
||||
)
|
||||
.contains(&last_voted_slot)
|
||||
|| *candidate_slot <= root
|
||||
{
|
||||
continue;
|
||||
@@ -740,9 +728,8 @@ impl Tower {
|
||||
.unwrap()
|
||||
.fork_stats
|
||||
.lockout_intervals;
|
||||
// Find any locked out intervals for vote accounts in this bank with
|
||||
// `lockout_interval_end` >= `last_vote`, which implies they are locked out at
|
||||
// `last_vote` on another fork.
|
||||
// Find any locked out intervals in this bank with endpoint >= last_vote,
|
||||
// implies they are locked out at last_vote
|
||||
for (_lockout_interval_end, intervals_keyed_by_end) in lockout_intervals.range((Included(last_voted_slot), Unbounded)) {
|
||||
for (lockout_interval_start, vote_account_pubkey) in intervals_keyed_by_end {
|
||||
if locked_out_vote_accounts.contains(vote_account_pubkey) {
|
||||
@@ -765,66 +752,21 @@ impl Tower {
|
||||
.map(|(stake, _)| *stake)
|
||||
.unwrap_or(0);
|
||||
locked_out_stake += stake;
|
||||
if (locked_out_stake as f64 / total_stake as f64) > SWITCH_FORK_THRESHOLD {
|
||||
return SwitchForkDecision::SwitchProof(switch_proof);
|
||||
}
|
||||
locked_out_vote_accounts.insert(vote_account_pubkey);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check the latest votes for potentially gossip votes that haven't landed yet
|
||||
for (vote_account_pubkey, (candidate_latest_frozen_vote, _candidate_latest_frozen_vote_hash)) in latest_validator_votes_for_frozen_banks.max_gossip_frozen_votes() {
|
||||
if locked_out_vote_accounts.contains(&vote_account_pubkey) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if *candidate_latest_frozen_vote > last_voted_slot
|
||||
&&
|
||||
// Because `candidate_latest_frozen_vote` is the last vote made by some validator
|
||||
// in the cluster for a frozen bank `B` observed through gossip, we may have cleared
|
||||
// that frozen bank `B` because we `set_root(root)` for a `root` on a different fork,
|
||||
// like so:
|
||||
//
|
||||
// |----------X ------candidate_latest_frozen_vote (frozen)
|
||||
// old root
|
||||
// |----------new root ----last_voted_slot
|
||||
//
|
||||
// In most cases, because `last_voted_slot` must be a descendant of `root`, then
|
||||
// if `candidate_latest_frozen_vote` is not found in the ancestors/descendants map (recall these
|
||||
// directly reflect the state of BankForks), this implies that `B` was pruned from BankForks
|
||||
// because it was on a different fork than `last_voted_slot`, and thus this vote for `candidate_latest_frozen_vote`
|
||||
// should be safe to count towards the switching proof:
|
||||
//
|
||||
// However, there is also the possibility that `last_voted_slot` is a stray, in which
|
||||
// case we cannot make this conclusion as we do not know the ancestors/descendants
|
||||
// of strays. Hence we err on the side of caution here and ignore this vote. This
|
||||
// is ok because validators voting on different unrooted forks should eventually vote
|
||||
// on some descendant of the root, at which time they can be included in switching proofs.
|
||||
!Self::is_candidate_slot_descendant_of_last_vote(
|
||||
*candidate_latest_frozen_vote, last_voted_slot, ancestors)
|
||||
.unwrap_or(true) {
|
||||
let stake = epoch_vote_accounts
|
||||
.get(vote_account_pubkey)
|
||||
.map(|(stake, _)| *stake)
|
||||
.unwrap_or(0);
|
||||
locked_out_stake += stake;
|
||||
if (locked_out_stake as f64 / total_stake as f64) > SWITCH_FORK_THRESHOLD {
|
||||
return SwitchForkDecision::SwitchProof(switch_proof);
|
||||
}
|
||||
locked_out_vote_accounts.insert(vote_account_pubkey);
|
||||
}
|
||||
if (locked_out_stake as f64 / total_stake as f64) > SWITCH_FORK_THRESHOLD {
|
||||
SwitchForkDecision::SwitchProof(switch_proof)
|
||||
} else {
|
||||
SwitchForkDecision::FailedSwitchThreshold(locked_out_stake, total_stake)
|
||||
}
|
||||
|
||||
// We have not detected sufficient lockout past the last voted slot to generate
|
||||
// a switching proof
|
||||
SwitchForkDecision::FailedSwitchThreshold(locked_out_stake, total_stake)
|
||||
})
|
||||
.unwrap_or(SwitchForkDecision::SameFork)
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) fn check_switch_threshold(
|
||||
&mut self,
|
||||
switch_slot: u64,
|
||||
@@ -833,8 +775,6 @@ impl Tower {
|
||||
progress: &ProgressMap,
|
||||
total_stake: u64,
|
||||
epoch_vote_accounts: &HashMap<Pubkey, (u64, ArcVoteAccount)>,
|
||||
latest_validator_votes_for_frozen_banks: &LatestValidatorVotesForFrozenBanks,
|
||||
heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice,
|
||||
) -> SwitchForkDecision {
|
||||
let decision = self.make_check_switch_threshold_decision(
|
||||
switch_slot,
|
||||
@@ -843,8 +783,6 @@ impl Tower {
|
||||
progress,
|
||||
total_stake,
|
||||
epoch_vote_accounts,
|
||||
latest_validator_votes_for_frozen_banks,
|
||||
heaviest_subtree_fork_choice,
|
||||
);
|
||||
let new_check = Some((switch_slot, decision.clone()));
|
||||
if new_check != self.last_switch_threshold_check {
|
||||
@@ -1366,9 +1304,9 @@ pub mod test {
|
||||
cluster_info_vote_listener::VoteTracker,
|
||||
cluster_slot_state_verifier::{DuplicateSlotsTracker, GossipDuplicateConfirmedSlots},
|
||||
cluster_slots::ClusterSlots,
|
||||
fork_choice::{ForkChoice, SelectVoteAndResetForkResult},
|
||||
heaviest_subtree_fork_choice::SlotHashKey,
|
||||
progress_map::ForkProgress,
|
||||
fork_choice::SelectVoteAndResetForkResult,
|
||||
heaviest_subtree_fork_choice::{HeaviestSubtreeForkChoice, SlotHashKey},
|
||||
progress_map::{DuplicateStats, ForkProgress},
|
||||
replay_stage::{HeaviestForkFailures, ReplayStage},
|
||||
unfrozen_gossip_verified_vote_hashes::UnfrozenGossipVerifiedVoteHashes,
|
||||
};
|
||||
@@ -1382,7 +1320,7 @@ pub mod test {
|
||||
},
|
||||
};
|
||||
use solana_sdk::{
|
||||
account::{Account, AccountSharedData, ReadableAccount, WritableAccount},
|
||||
account::{Account, AccountSharedData, WritableAccount},
|
||||
clock::Slot,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
@@ -1445,9 +1383,9 @@ pub mod test {
|
||||
|
||||
while let Some(visit) = walk.get() {
|
||||
let slot = visit.node().data;
|
||||
self.progress
|
||||
.entry(slot)
|
||||
.or_insert_with(|| ForkProgress::new(Hash::default(), None, None, 0, 0));
|
||||
self.progress.entry(slot).or_insert_with(|| {
|
||||
ForkProgress::new(Hash::default(), None, DuplicateStats::default(), None, 0, 0)
|
||||
});
|
||||
if self.bank_forks.read().unwrap().get(slot).is_some() {
|
||||
walk.forward();
|
||||
continue;
|
||||
@@ -1535,8 +1473,6 @@ pub mod test {
|
||||
&descendants,
|
||||
&self.progress,
|
||||
tower,
|
||||
&self.latest_validator_votes_for_frozen_banks,
|
||||
&self.heaviest_subtree_fork_choice,
|
||||
);
|
||||
|
||||
// Make sure this slot isn't locked out or failing threshold
|
||||
@@ -1600,7 +1536,9 @@ pub mod test {
|
||||
) {
|
||||
self.progress
|
||||
.entry(slot)
|
||||
.or_insert_with(|| ForkProgress::new(Hash::default(), None, None, 0, 0))
|
||||
.or_insert_with(|| {
|
||||
ForkProgress::new(Hash::default(), None, DuplicateStats::default(), None, 0, 0)
|
||||
})
|
||||
.fork_stats
|
||||
.lockout_intervals
|
||||
.entry(lockout_interval.1)
|
||||
@@ -1707,7 +1645,14 @@ pub mod test {
|
||||
let mut progress = ProgressMap::default();
|
||||
progress.insert(
|
||||
0,
|
||||
ForkProgress::new(bank0.last_blockhash(), None, None, 0, 0),
|
||||
ForkProgress::new(
|
||||
bank0.last_blockhash(),
|
||||
None,
|
||||
DuplicateStats::default(),
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
),
|
||||
);
|
||||
let bank_forks = BankForks::new(bank0);
|
||||
let heaviest_subtree_fork_choice =
|
||||
@@ -1718,11 +1663,11 @@ pub mod test {
|
||||
fn gen_stakes(stake_votes: &[(u64, &[u64])]) -> Vec<(Pubkey, (u64, ArcVoteAccount))> {
|
||||
let mut stakes = vec![];
|
||||
for (lamports, votes) in stake_votes {
|
||||
let mut account = AccountSharedData::from(Account {
|
||||
let mut account = AccountSharedData {
|
||||
data: vec![0; VoteState::size_of()],
|
||||
lamports: *lamports,
|
||||
..Account::default()
|
||||
});
|
||||
..AccountSharedData::default()
|
||||
};
|
||||
let mut vote_state = VoteState::default();
|
||||
for slot in *votes {
|
||||
vote_state.process_slot_vote_unchecked(*slot);
|
||||
@@ -1842,8 +1787,7 @@ pub mod test {
|
||||
/ (tr(44)
|
||||
// Minor fork 2
|
||||
/ (tr(45) / (tr(46) / (tr(47) / (tr(48) / (tr(49) / (tr(50)))))))
|
||||
/ (tr(110)))
|
||||
/ tr(112))));
|
||||
/ (tr(110))))));
|
||||
|
||||
// Fill the BankForks according to the above fork structure
|
||||
vote_simulator.fill_bank_forks(forks, &HashMap::new());
|
||||
@@ -1866,46 +1810,21 @@ pub mod test {
|
||||
let mut tower = Tower::new_with_key(&vote_simulator.node_pubkeys[0]);
|
||||
|
||||
// Last vote is 47
|
||||
tower.record_vote(
|
||||
47,
|
||||
vote_simulator
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(47)
|
||||
.unwrap()
|
||||
.hash(),
|
||||
);
|
||||
tower.record_vote(47, Hash::default());
|
||||
|
||||
// Trying to switch to an ancestor of last vote should only not panic
|
||||
// if the current vote has a duplicate ancestor
|
||||
let ancestor_of_voted_slot = 43;
|
||||
let duplicate_ancestor1 = 44;
|
||||
let duplicate_ancestor2 = 45;
|
||||
vote_simulator
|
||||
.heaviest_subtree_fork_choice
|
||||
.mark_fork_invalid_candidate(&(
|
||||
duplicate_ancestor1,
|
||||
vote_simulator
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(duplicate_ancestor1)
|
||||
.unwrap()
|
||||
.hash(),
|
||||
));
|
||||
vote_simulator
|
||||
.heaviest_subtree_fork_choice
|
||||
.mark_fork_invalid_candidate(&(
|
||||
duplicate_ancestor2,
|
||||
vote_simulator
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(duplicate_ancestor2)
|
||||
.unwrap()
|
||||
.hash(),
|
||||
));
|
||||
vote_simulator.progress.set_unconfirmed_duplicate_slot(
|
||||
duplicate_ancestor1,
|
||||
&descendants.get(&duplicate_ancestor1).unwrap(),
|
||||
);
|
||||
vote_simulator.progress.set_unconfirmed_duplicate_slot(
|
||||
duplicate_ancestor2,
|
||||
&descendants.get(&duplicate_ancestor2).unwrap(),
|
||||
);
|
||||
assert_eq!(
|
||||
tower.check_switch_threshold(
|
||||
ancestor_of_voted_slot,
|
||||
@@ -1914,8 +1833,6 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchDuplicateRollback(duplicate_ancestor2)
|
||||
);
|
||||
@@ -1928,18 +1845,11 @@ pub mod test {
|
||||
confirm_ancestors.push(duplicate_ancestor2);
|
||||
}
|
||||
for (i, duplicate_ancestor) in confirm_ancestors.into_iter().enumerate() {
|
||||
vote_simulator
|
||||
.heaviest_subtree_fork_choice
|
||||
.mark_fork_valid_candidate(&(
|
||||
duplicate_ancestor,
|
||||
vote_simulator
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(duplicate_ancestor)
|
||||
.unwrap()
|
||||
.hash(),
|
||||
));
|
||||
vote_simulator.progress.set_confirmed_duplicate_slot(
|
||||
duplicate_ancestor,
|
||||
ancestors.get(&duplicate_ancestor).unwrap(),
|
||||
&descendants.get(&duplicate_ancestor).unwrap(),
|
||||
);
|
||||
let res = tower.check_switch_threshold(
|
||||
ancestor_of_voted_slot,
|
||||
&ancestors,
|
||||
@@ -1947,8 +1857,6 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
);
|
||||
if i == 0 {
|
||||
assert_eq!(
|
||||
@@ -1984,8 +1892,6 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::SameFork
|
||||
);
|
||||
@@ -1999,8 +1905,6 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
@@ -2016,8 +1920,6 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
@@ -2033,8 +1935,6 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
@@ -2050,8 +1950,6 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
@@ -2069,8 +1967,6 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
@@ -2086,8 +1982,6 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::SwitchProof(Hash::default())
|
||||
);
|
||||
@@ -2104,8 +1998,6 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::SwitchProof(Hash::default())
|
||||
);
|
||||
@@ -2131,115 +2023,6 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_switch_threshold_use_gossip_votes() {
|
||||
let num_validators = 2;
|
||||
let (bank0, mut vote_simulator, total_stake) = setup_switch_test(2);
|
||||
let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
|
||||
let descendants = vote_simulator
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.descendants()
|
||||
.clone();
|
||||
let mut tower = Tower::new_with_key(&vote_simulator.node_pubkeys[0]);
|
||||
let other_vote_account = vote_simulator.vote_pubkeys[1];
|
||||
|
||||
// Last vote is 47
|
||||
tower.record_vote(47, Hash::default());
|
||||
|
||||
// Trying to switch to another fork at 110 should fail
|
||||
assert_eq!(
|
||||
tower.check_switch_threshold(
|
||||
110,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, num_validators * 10000)
|
||||
);
|
||||
|
||||
// Adding a vote on the descendant shouldn't count toward the switch threshold
|
||||
vote_simulator.simulate_lockout_interval(50, (49, 100), &other_vote_account);
|
||||
assert_eq!(
|
||||
tower.check_switch_threshold(
|
||||
110,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
|
||||
// Adding a later vote from gossip that isn't on the same fork should count toward the
|
||||
// switch threshold
|
||||
vote_simulator
|
||||
.latest_validator_votes_for_frozen_banks
|
||||
.check_add_vote(
|
||||
other_vote_account,
|
||||
112,
|
||||
Some(
|
||||
vote_simulator
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(112)
|
||||
.unwrap()
|
||||
.hash(),
|
||||
),
|
||||
false,
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
tower.check_switch_threshold(
|
||||
110,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::SwitchProof(Hash::default())
|
||||
);
|
||||
|
||||
// If we now set a root that causes slot 112 to be purged from BankForks, then
|
||||
// the switch proof will now fail since that validator's vote can no longer be
|
||||
// included in the switching proof
|
||||
vote_simulator.set_root(44);
|
||||
let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
|
||||
let descendants = vote_simulator
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.descendants()
|
||||
.clone();
|
||||
assert_eq!(
|
||||
tower.check_switch_threshold(
|
||||
110,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
@@ -2521,14 +2304,16 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_is_locked_out_empty() {
|
||||
let tower = Tower::new_for_tests(0, 0.67);
|
||||
let ancestors = HashSet::new();
|
||||
let ancestors = vec![(0, HashSet::new())].into_iter().collect();
|
||||
assert!(!tower.is_locked_out(0, &ancestors));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_locked_out_root_slot_child_pass() {
|
||||
let mut tower = Tower::new_for_tests(0, 0.67);
|
||||
let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
|
||||
let ancestors = vec![(1, vec![0].into_iter().collect())]
|
||||
.into_iter()
|
||||
.collect();
|
||||
tower.lockouts.root_slot = Some(0);
|
||||
assert!(!tower.is_locked_out(1, &ancestors));
|
||||
}
|
||||
@@ -2536,7 +2321,9 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_is_locked_out_root_slot_sibling_fail() {
|
||||
let mut tower = Tower::new_for_tests(0, 0.67);
|
||||
let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
|
||||
let ancestors = vec![(2, vec![0].into_iter().collect())]
|
||||
.into_iter()
|
||||
.collect();
|
||||
tower.lockouts.root_slot = Some(0);
|
||||
tower.record_vote(1, Hash::default());
|
||||
assert!(tower.is_locked_out(2, &ancestors));
|
||||
@@ -2567,7 +2354,9 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_is_locked_out_double_vote() {
|
||||
let mut tower = Tower::new_for_tests(0, 0.67);
|
||||
let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
|
||||
let ancestors = vec![(1, vec![0].into_iter().collect()), (0, HashSet::new())]
|
||||
.into_iter()
|
||||
.collect();
|
||||
tower.record_vote(0, Hash::default());
|
||||
tower.record_vote(1, Hash::default());
|
||||
assert!(tower.is_locked_out(0, &ancestors));
|
||||
@@ -2576,7 +2365,9 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_is_locked_out_child() {
|
||||
let mut tower = Tower::new_for_tests(0, 0.67);
|
||||
let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
|
||||
let ancestors = vec![(1, vec![0].into_iter().collect())]
|
||||
.into_iter()
|
||||
.collect();
|
||||
tower.record_vote(0, Hash::default());
|
||||
assert!(!tower.is_locked_out(1, &ancestors));
|
||||
}
|
||||
@@ -2584,7 +2375,13 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_is_locked_out_sibling() {
|
||||
let mut tower = Tower::new_for_tests(0, 0.67);
|
||||
let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
|
||||
let ancestors = vec![
|
||||
(0, HashSet::new()),
|
||||
(1, vec![0].into_iter().collect()),
|
||||
(2, vec![0].into_iter().collect()),
|
||||
]
|
||||
.into_iter()
|
||||
.collect();
|
||||
tower.record_vote(0, Hash::default());
|
||||
tower.record_vote(1, Hash::default());
|
||||
assert!(tower.is_locked_out(2, &ancestors));
|
||||
@@ -2593,7 +2390,13 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_is_locked_out_last_vote_expired() {
|
||||
let mut tower = Tower::new_for_tests(0, 0.67);
|
||||
let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
|
||||
let ancestors = vec![
|
||||
(0, HashSet::new()),
|
||||
(1, vec![0].into_iter().collect()),
|
||||
(4, vec![0].into_iter().collect()),
|
||||
]
|
||||
.into_iter()
|
||||
.collect();
|
||||
tower.record_vote(0, Hash::default());
|
||||
tower.record_vote(1, Hash::default());
|
||||
assert!(!tower.is_locked_out(4, &ancestors));
|
||||
@@ -2657,7 +2460,7 @@ pub mod test {
|
||||
});
|
||||
let set: HashSet<u64> = vec![0u64, 1u64].into_iter().collect();
|
||||
let ancestors: HashMap<u64, HashSet<u64>> = [(2u64, set)].iter().cloned().collect();
|
||||
Tower::update_ancestor_voted_stakes(&mut voted_stakes, 2, account.lamports(), &ancestors);
|
||||
Tower::update_ancestor_voted_stakes(&mut voted_stakes, 2, account.lamports, &ancestors);
|
||||
assert_eq!(voted_stakes[&0], 1);
|
||||
assert_eq!(voted_stakes[&1], 1);
|
||||
assert_eq!(voted_stakes[&2], 1);
|
||||
@@ -2918,8 +2721,6 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::SameFork
|
||||
);
|
||||
@@ -2933,8 +2734,6 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
@@ -2949,8 +2748,6 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::SwitchProof(Hash::default())
|
||||
);
|
||||
@@ -3020,8 +2817,6 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
@@ -3036,8 +2831,6 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
@@ -3052,8 +2845,6 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::SwitchProof(Hash::default())
|
||||
);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user